diff --git a/.github_changelog_generator b/.github_changelog_generator new file mode 100644 index 0000000000..cd09ebcbfa --- /dev/null +++ b/.github_changelog_generator @@ -0,0 +1,7 @@ +pr-wo-labels=False +exclude-labels=duplicate,question,invalid,wontfix,weekly-digest +author=False +unreleased=False +since-tag=2.11.0 +release-branch=master +enhancement-label=**Enhancements:** diff --git a/.gitignore b/.gitignore index 4b2eb5453a..101c1e6224 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,31 @@ __pycache__/ *.py[cod] *$py.class +# Mac Stuff +########### +# General +.DS_Store +.AppleDouble +.LSOverride +# Icon must end with two \r +Icon +# Thumbnails +._* +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + # Documentation ############### /docs/build diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..ba86b85eec --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,565 @@ +# Changelog + +## [2.12.0](https://github.com/pypeclub/pype/tree/2.12.0) (2020-09-09) + +[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.8...2.12.0) + +**Enhancements:** + +- Less mongo connections [\#509](https://github.com/pypeclub/pype/pull/509) +- Nuke: adding image loader [\#499](https://github.com/pypeclub/pype/pull/499) +- Move launcher window to top if launcher action is clicked [\#450](https://github.com/pypeclub/pype/pull/450) +- Maya: better tile rendering support in Pype [\#446](https://github.com/pypeclub/pype/pull/446) +- Implementation of non QML launcher [\#443](https://github.com/pypeclub/pype/pull/443) +- Optional skip review on renders. [\#441](https://github.com/pypeclub/pype/pull/441) +- Ftrack: Option to push status from task to latest version [\#440](https://github.com/pypeclub/pype/pull/440) +- Properly containerize image plane loads. [\#434](https://github.com/pypeclub/pype/pull/434) +- Option to keep the review files. [\#426](https://github.com/pypeclub/pype/pull/426) +- Isolate view on instance members. [\#425](https://github.com/pypeclub/pype/pull/425) +- ftrack group is bcw compatible [\#418](https://github.com/pypeclub/pype/pull/418) +- Maya: Publishing of tile renderings on Deadline [\#398](https://github.com/pypeclub/pype/pull/398) +- Feature/little bit better logging gui [\#383](https://github.com/pypeclub/pype/pull/383) + +**Fixed bugs:** + +- Maya: Fix tile order for Draft Tile Assembler [\#511](https://github.com/pypeclub/pype/pull/511) +- Remove extra dash [\#501](https://github.com/pypeclub/pype/pull/501) +- Fix: strip dot from repre names in single frame renders [\#498](https://github.com/pypeclub/pype/pull/498) +- Better handling of destination during integrating [\#485](https://github.com/pypeclub/pype/pull/485) +- Fix: allow thumbnail creation for single frame renders [\#460](https://github.com/pypeclub/pype/pull/460) +- added missing argument to launch\_application in ftrack app handler [\#453](https://github.com/pypeclub/pype/pull/453) +- Burnins: Copy bit rate of input video to match quality. [\#448](https://github.com/pypeclub/pype/pull/448) +- Standalone publisher is now independent from tray [\#442](https://github.com/pypeclub/pype/pull/442) +- Bugfix/empty enumerator attributes [\#436](https://github.com/pypeclub/pype/pull/436) +- Fixed wrong order of "other" category collapssing in publisher [\#435](https://github.com/pypeclub/pype/pull/435) +- Multiple reviews where being overwritten to one. [\#424](https://github.com/pypeclub/pype/pull/424) +- Cleanup plugin fail on instances without staging dir [\#420](https://github.com/pypeclub/pype/pull/420) +- deprecated -intra parameter in ffmpeg to new `-g` [\#417](https://github.com/pypeclub/pype/pull/417) +- Delivery action can now work with entered path [\#397](https://github.com/pypeclub/pype/pull/397) + +**Merged pull requests:** + +- Review on instance.data [\#473](https://github.com/pypeclub/pype/pull/473) + +## [2.11.8](https://github.com/pypeclub/pype/tree/2.11.8) (2020-08-27) + +[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.7...2.11.8) + +**Enhancements:** + +- DWAA support for Maya [\#382](https://github.com/pypeclub/pype/issues/382) +- Isolate View on Playblast [\#367](https://github.com/pypeclub/pype/issues/367) +- Maya: Tile rendering [\#297](https://github.com/pypeclub/pype/issues/297) +- single pype instance running [\#47](https://github.com/pypeclub/pype/issues/47) +- PYPE-649: projects don't guarantee backwards compatible environment [\#8](https://github.com/pypeclub/pype/issues/8) +- PYPE-663: separate venv for each deployed version [\#7](https://github.com/pypeclub/pype/issues/7) + +**Fixed bugs:** + +- pyblish pype - other group is collapsed before plugins are done [\#431](https://github.com/pypeclub/pype/issues/431) +- Alpha white edges in harmony on PNGs [\#412](https://github.com/pypeclub/pype/issues/412) +- harmony image loader picks wrong representations [\#404](https://github.com/pypeclub/pype/issues/404) +- Clockify crash when response contain symbol not allowed by UTF-8 [\#81](https://github.com/pypeclub/pype/issues/81) + +## [2.11.7](https://github.com/pypeclub/pype/tree/2.11.7) (2020-08-21) + +[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.6...2.11.7) + +**Fixed bugs:** + +- Clean Up Baked Movie [\#369](https://github.com/pypeclub/pype/issues/369) +- celaction last workfile [\#459](https://github.com/pypeclub/pype/pull/459) + +## [2.11.6](https://github.com/pypeclub/pype/tree/2.11.6) (2020-08-18) + +[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.5...2.11.6) + +**Enhancements:** + +- publisher app [\#56](https://github.com/pypeclub/pype/issues/56) + +## [2.11.5](https://github.com/pypeclub/pype/tree/2.11.5) (2020-08-13) + +[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.4...2.11.5) + +**Enhancements:** + +- Switch from master to equivalent [\#220](https://github.com/pypeclub/pype/issues/220) +- Standalone publisher now only groups sequence if the extension is known [\#439](https://github.com/pypeclub/pype/pull/439) + +**Fixed bugs:** + +- Logs have been disable for editorial by default to speed up publishing [\#433](https://github.com/pypeclub/pype/pull/433) +- additional fixes for celaction [\#430](https://github.com/pypeclub/pype/pull/430) +- Harmony: invalid variable scope in validate scene settings [\#428](https://github.com/pypeclub/pype/pull/428) +- new representation name for audio was not accepted [\#427](https://github.com/pypeclub/pype/pull/427) + +## [2.11.4](https://github.com/pypeclub/pype/tree/2.11.4) (2020-08-10) + +[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.3...2.11.4) + +**Enhancements:** + +- WebSocket server [\#135](https://github.com/pypeclub/pype/issues/135) +- standalonepublisher: editorial family features expansion \[master branch\] [\#411](https://github.com/pypeclub/pype/pull/411) + +## [2.11.3](https://github.com/pypeclub/pype/tree/2.11.3) (2020-08-04) + +[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.2...2.11.3) + +**Fixed bugs:** + +- Harmony: publishing performance issues [\#408](https://github.com/pypeclub/pype/pull/408) + +## [2.11.2](https://github.com/pypeclub/pype/tree/2.11.2) (2020-07-31) + +[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.1...2.11.2) + +**Fixed bugs:** + +- Ftrack to Avalon bug [\#406](https://github.com/pypeclub/pype/issues/406) + +## [2.11.1](https://github.com/pypeclub/pype/tree/2.11.1) (2020-07-29) + +[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.0...2.11.1) + +**Merged pull requests:** + +- Celaction: metadata json folder fixes on path [\#393](https://github.com/pypeclub/pype/pull/393) +- CelAction - version up method taken fro pype.lib [\#391](https://github.com/pypeclub/pype/pull/391) + + +## 2.11.0 ## + +_**release date:** 27 July 2020_ + +**new:** +- _(blender)_ namespace support [\#341](https://github.com/pypeclub/pype/pull/341) +- _(blender)_ start end frames [\#330](https://github.com/pypeclub/pype/pull/330) +- _(blender)_ camera asset [\#322](https://github.com/pypeclub/pype/pull/322) +- _(pype)_ toggle instances per family in pyblish GUI [\#320](https://github.com/pypeclub/pype/pull/320) +- _(pype)_ current release version is now shown in the tray menu [#379](https://github.com/pypeclub/pype/pull/379) + + +**improved:** +- _(resolve)_ tagging for publish [\#239](https://github.com/pypeclub/pype/issues/239) +- _(pype)_ Support publishing a subset of shots with standalone editorial [\#336](https://github.com/pypeclub/pype/pull/336) +- _(harmony)_ Basic support for palettes [\#324](https://github.com/pypeclub/pype/pull/324) +- _(photoshop)_ Flag outdated containers on startup and publish. [\#309](https://github.com/pypeclub/pype/pull/309) +- _(harmony)_ Flag Outdated containers [\#302](https://github.com/pypeclub/pype/pull/302) +- _(photoshop)_ Publish review [\#298](https://github.com/pypeclub/pype/pull/298) +- _(pype)_ Optional Last workfile launch [\#365](https://github.com/pypeclub/pype/pull/365) + + +**fixed:** +- _(premiere)_ workflow fixes [\#346](https://github.com/pypeclub/pype/pull/346) +- _(pype)_ pype-setup does not work with space in path [\#327](https://github.com/pypeclub/pype/issues/327) +- _(ftrack)_ Ftrack delete action cause circular error [\#206](https://github.com/pypeclub/pype/issues/206) +- _(nuke)_ Priority was forced to 50 [\#345](https://github.com/pypeclub/pype/pull/345) +- _(nuke)_ Fix ValidateNukeWriteKnobs [\#340](https://github.com/pypeclub/pype/pull/340) +- _(maya)_ If camera attributes are connected, we can ignore them. [\#339](https://github.com/pypeclub/pype/pull/339) +- _(pype)_ stop appending of tools environment to existing env [\#337](https://github.com/pypeclub/pype/pull/337) +- _(ftrack)_ Ftrack timeout needs to look at AVALON\_TIMEOUT [\#325](https://github.com/pypeclub/pype/pull/325) +- _(harmony)_ Only zip files are supported. [\#310](https://github.com/pypeclub/pype/pull/310) +- _(pype)_ hotfix/Fix event server mongo uri [\#305](https://github.com/pypeclub/pype/pull/305) +- _(photoshop)_ Subset was not named or validated correctly. [\#304](https://github.com/pypeclub/pype/pull/304) + + + + +## 2.10.0 ## + +_**release date:** 17 June 2020_ + +**new:** +- _(harmony)_ **Toon Boom Harmony** has been greatly extended to support rigging, scene build, animation and rendering workflows. [#270](https://github.com/pypeclub/pype/issues/270) [#271](https://github.com/pypeclub/pype/issues/271) [#190](https://github.com/pypeclub/pype/issues/190) [#191](https://github.com/pypeclub/pype/issues/191) [#172](https://github.com/pypeclub/pype/issues/172) [#168](https://github.com/pypeclub/pype/issues/168) +- _(pype)_ Added support for rudimentary **edl publishing** into individual shots. [#265](https://github.com/pypeclub/pype/issues/265) +- _(celaction)_ Simple **Celaction** integration has been added with support for workfiles and rendering. [#255](https://github.com/pypeclub/pype/issues/255) +- _(maya)_ Support for multiple job types when submitting to the farm. We can now render Maya or Standalone render jobs for Vray and Arnold (limited support for arnold) [#204](https://github.com/pypeclub/pype/issues/204) +- _(photoshop)_ Added initial support for Photoshop [#232](https://github.com/pypeclub/pype/issues/232) + +**improved:** +- _(blender)_ Updated support for rigs and added support Layout family [#233](https://github.com/pypeclub/pype/issues/233) [#226](https://github.com/pypeclub/pype/issues/226) +- _(premiere)_ It is now possible to choose different storage root for workfiles of different task types. [#255](https://github.com/pypeclub/pype/issues/255) +- _(maya)_ Support for unmerged AOVs in Redshift multipart EXRs [#197](https://github.com/pypeclub/pype/issues/197) +- _(pype)_ Pype repository has been refactored in preparation for 3.0 release [#169](https://github.com/pypeclub/pype/issues/169) +- _(deadline)_ All file dependencies are now passed to deadline from maya to prevent premature start of rendering if caches or textures haven't been coppied over yet. [#195](https://github.com/pypeclub/pype/issues/195) +- _(nuke)_ Script validation can now be made optional. [#194](https://github.com/pypeclub/pype/issues/194) +- _(pype)_ Publishing can now be stopped at any time. [#194](https://github.com/pypeclub/pype/issues/194) + +**fix:** +- _(pype)_ Pyblish-lite has been integrated into pype repository, plus various publishing GUI fixes. [#274](https://github.com/pypeclub/pype/issues/274) [#275](https://github.com/pypeclub/pype/issues/275) [#268](https://github.com/pypeclub/pype/issues/268) [#227](https://github.com/pypeclub/pype/issues/227) [#238](https://github.com/pypeclub/pype/issues/238) +- _(maya)_ Alembic extractor was getting wrong frame range type in certain scenarios [#254](https://github.com/pypeclub/pype/issues/254) +- _(maya)_ Attaching a render to subset in maya was not passing validation in certain scenarios [#256](https://github.com/pypeclub/pype/issues/256) +- _(ftrack)_ Various small fixes to ftrack sync [#263](https://github.com/pypeclub/pype/issues/263) [#259](https://github.com/pypeclub/pype/issues/259) +- _(maya)_ Look extraction is now able to skp invalid connections in shaders [#207](https://github.com/pypeclub/pype/issues/207) + + + + +## 2.9.0 ## + +_**release date:** 25 May 2020_ + +**new:** +- _(pype)_ Support for **Multiroot projects**. You can now store project data on multiple physical or virtual storages and target individual publishes to these locations. For instance render can be stored on a faster storage than the rest of the project. [#145](https://github.com/pypeclub/pype/issues/145), [#38](https://github.com/pypeclub/pype/issues/38) +- _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) +- _(pype)_ OSX support is in public beta now. There are issues to be expected, but the main implementation should be functional. [#141](https://github.com/pypeclub/pype/issues/141) + + +**improved:** + +- _(pype)_ **Review extractor** has been completely rebuilt. It now supports granular filtering so you can create **multiple outputs** for different tasks, families or hosts. [#103](https://github.com/pypeclub/pype/issues/103), [#166](https://github.com/pypeclub/pype/issues/166), [#165](https://github.com/pypeclub/pype/issues/165) +- _(pype)_ **Burnin** generation had been extended to **support same multi-output filtering** as review extractor [#103](https://github.com/pypeclub/pype/issues/103) +- _(pype)_ Publishing file templates can now be specified in config for each individual family [#114](https://github.com/pypeclub/pype/issues/114) +- _(pype)_ Studio specific plugins can now be appended to pype standard publishing plugins. [#112](https://github.com/pypeclub/pype/issues/112) +- _(nukestudio)_ Reviewable clips no longer need to be previously cut, exported and re-imported to timeline. **Pype can now dynamically cut reviewable quicktimes** from continuous offline footage during publishing. [#23](https://github.com/pypeclub/pype/issues/23) +- _(deadline)_ Deadline can now correctly differentiate between staging and production pype. [#154](https://github.com/pypeclub/pype/issues/154) +- _(deadline)_ `PYPE_PYTHON_EXE` env variable can now be used to direct publishing to explicit python installation. [#120](https://github.com/pypeclub/pype/issues/120) +- _(nuke)_ Nuke now check for new version of loaded data on file open. [#140](https://github.com/pypeclub/pype/issues/140) +- _(nuke)_ frame range and limit checkboxes are now exposed on write node. [#119](https://github.com/pypeclub/pype/issues/119) + + + +**fix:** + +- _(nukestudio)_ Project Location was using backslashes which was breaking nukestudio native exporting in certains configurations [#82](https://github.com/pypeclub/pype/issues/82) +- _(nukestudio)_ Duplicity in hierarchy tags was prone to throwing publishing error [#130](https://github.com/pypeclub/pype/issues/130), [#144](https://github.com/pypeclub/pype/issues/144) +- _(ftrack)_ multiple stability improvements [#157](https://github.com/pypeclub/pype/issues/157), [#159](https://github.com/pypeclub/pype/issues/159), [#128](https://github.com/pypeclub/pype/issues/128), [#118](https://github.com/pypeclub/pype/issues/118), [#127](https://github.com/pypeclub/pype/issues/127) +- _(deadline)_ multipart EXRs were stopping review publishing on the farm. They are still not supported for automatic review generation, but the publish will go through correctly without the quicktime. [#155](https://github.com/pypeclub/pype/issues/155) +- _(deadline)_ If deadline is non-responsive it will no longer freeze host when publishing [#149](https://github.com/pypeclub/pype/issues/149) +- _(deadline)_ Sometimes deadline was trying to launch render before all the source data was coppied over. [#137](https://github.com/pypeclub/pype/issues/137) _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) +- _(nuke)_ Filepath knob wasn't updated properly. [#131](https://github.com/pypeclub/pype/issues/131) +- _(maya)_ When extracting animation, the "Write Color Set" options on the instance were not respected. [#108](https://github.com/pypeclub/pype/issues/108) +- _(maya)_ Attribute overrides for AOV only worked for the legacy render layers. Now it works for new render setup as well [#132](https://github.com/pypeclub/pype/issues/132) +- _(maya)_ Stability and usability improvements in yeti workflow [#104](https://github.com/pypeclub/pype/issues/104) + + + + +## 2.8.0 ## + +_**release date:** 20 April 2020_ + +**new:** + +- _(pype)_ Option to generate slates from json templates. [PYPE-628] [#26](https://github.com/pypeclub/pype/issues/26) +- _(pype)_ It is now possible to automate loading of published subsets into any scene. Documentation will follow :). [PYPE-611] [#24](https://github.com/pypeclub/pype/issues/24) + +**fix:** + +- _(maya)_ Some Redshift render tokens could break publishing. [PYPE-778] [#33](https://github.com/pypeclub/pype/issues/33) +- _(maya)_ Publish was not preserving maya file extension. [#39](https://github.com/pypeclub/pype/issues/39) +- _(maya)_ Rig output validator was failing on nodes without shapes. [#40](https://github.com/pypeclub/pype/issues/40) +- _(maya)_ Yeti caches can now be properly versioned up in the scene inventory. [#40](https://github.com/pypeclub/pype/issues/40) +- _(nuke)_ Build first workfiles was not accepting jpeg sequences. [#34](https://github.com/pypeclub/pype/issues/34) +- _(deadline)_ Trying to generate ffmpeg review from multipart EXRs no longer crashes publishing. [PYPE-781] +- _(deadline)_ Render publishing is more stable in multiplatform environments. [PYPE-775] + + + + +## 2.7.0 ## + +_**release date:** 30 March 2020_ + +**new:** + +- _(maya)_ Artist can now choose to load multiple references of the same subset at once [PYPE-646, PYPS-81] +- _(nuke)_ Option to use named OCIO colorspaces for review colour baking. [PYPS-82] +- _(pype)_ Pype can now work with `master` versions for publishing and loading. These are non-versioned publishes that are overwritten with the latest version during publish. These are now supported in all the GUIs, but their publishing is deactivated by default. [PYPE-653] +- _(blender)_ Added support for basic blender workflow. We currently support `rig`, `model` and `animation` families. [PYPE-768] +- _(pype)_ Source timecode can now be used in burn-ins. [PYPE-777] +- _(pype)_ Review outputs profiles can now specify delivery resolution different than project setting [PYPE-759] +- _(nuke)_ Bookmark to current context is now added automatically to all nuke browser windows. [PYPE-712] + +**change:** + +- _(maya)_ It is now possible to publish camera without. baking. Keep in mind that unbaked cameras can't be guaranteed to work in other hosts. [PYPE-595] +- _(maya)_ All the renders from maya are now grouped in the loader by their Layer name. [PYPE-482] +- _(nuke/hiero)_ Any publishes from nuke and hiero can now be versioned independently of the workfile. [PYPE-728] + + +**fix:** + +- _(nuke)_ Mixed slashes caused issues in ocio config path. +- _(pype)_ Intent field in pyblish GUI was passing label instead of value to ftrack. [PYPE-733] +- _(nuke)_ Publishing of pre-renders was inconsistent. [PYPE-766] +- _(maya)_ Handles and frame ranges were inconsistent in various places during publishing. +- _(nuke)_ Nuke was crashing if it ran into certain missing knobs. For example DPX output missing `autocrop` [PYPE-774] +- _(deadline)_ Project overrides were not working properly with farm render publishing. +- _(hiero)_ Problems with single frame plates publishing. +- _(maya)_ Redshift RenderPass token were breaking render publishing. [PYPE-778] +- _(nuke)_ Build first workfile was not accepting jpeg sequences. +- _(maya)_ Multipart (Multilayer) EXRs were breaking review publishing due to FFMPEG incompatiblity [PYPE-781] + + + +## 2.6.0 ## + +_**release date:** 9 March 2020_ + +**change:** +- _(maya)_ render publishing has been simplified and made more robust. Render setup layers are now automatically added to publishing subsets and `render globals` family has been replaced with simple `render` [PYPE-570] +- _(avalon)_ change context and workfiles apps, have been merged into one, that allows both actions to be performed at the same time. [PYPE-747] +- _(pype)_ thumbnails are now automatically propagate to asset from the last published subset in the loader +- _(ftrack)_ publishing comment and intent are now being published to ftrack note as well as describtion. [PYPE-727] +- _(pype)_ when overriding existing version new old representations are now overriden, instead of the new ones just being appended. (to allow this behaviour, the version validator need to be disabled. [PYPE-690]) +- _(pype)_ burnin preset has been significantly simplified. It now doesn't require passing function to each field, but only need the actual text template. to use this, all the current burnin PRESETS MUST BE UPDATED for all the projects. +- _(ftrack)_ credentials are now stored on a per server basis, so it's possible to switch between ftrack servers without having to log in and out. [PYPE-723] + + +**new:** +- _(pype)_ production and development deployments now have different colour of the tray icon. Orange for Dev and Green for production [PYPE-718] +- _(maya)_ renders can now be attached to a publishable subset rather than creating their own subset. For example it is possible to create a reviewable `look` or `model` render and have it correctly attached as a representation of the subsets [PYPE-451] +- _(maya)_ after saving current scene into a new context (as a new shot for instance), all the scene publishing subsets data gets re-generated automatically to match the new context [PYPE-532] +- _(pype)_ we now support project specific publish, load and create plugins [PYPE-740] +- _(ftrack)_ new action that allow archiving/deleting old published versions. User can keep how many of the latest version to keep when the action is ran. [PYPE-748, PYPE-715] +- _(ftrack)_ it is now possible to monitor and restart ftrack event server using ftrack action. [PYPE-658] +- _(pype)_ validator that prevent accidental overwrites of previously published versions. [PYPE-680] +- _(avalon)_ avalon core updated to version 5.6.0 +- _(maya)_ added validator to make sure that relative paths are used when publishing arnold standins. +- _(nukestudio)_ it is now possible to extract and publish audio family from clip in nuke studio [PYPE-682] + +**fix**: +- _(maya)_ maya set framerange button was ignoring handles [PYPE-719] +- _(ftrack)_ sync to avalon was sometime crashing when ran on empty project +- _(nukestudio)_ publishing same shots after they've been previously archived/deleted would result in a crash. [PYPE-737] +- _(nuke)_ slate workflow was breaking in certain scenarios. [PYPE-730] +- _(pype)_ rendering publish workflow has been significantly improved to prevent error resulting from implicit render collection. [PYPE-665, PYPE-746] +- _(pype)_ launching application on a non-synced project resulted in obscure [PYPE-528] +- _(pype)_ missing keys in burnins no longer result in an error. [PYPE-706] +- _(ftrack)_ create folder structure action was sometimes failing for project managers due to wrong permissions. +- _(Nukestudio)_ using `source` in the start frame tag could result in wrong frame range calculation +- _(ftrack)_ sync to avalon action and event have been improved by catching more edge cases and provessing them properly. + + + +## 2.5.0 ## + +_**release date:** 11 Feb 2020_ + +**change:** +- _(pype)_ added many logs for easier debugging +- _(pype)_ review presets can now be separated between 2d and 3d renders [PYPE-693] +- _(pype)_ anatomy module has been greatly improved to allow for more dynamic pulblishing and faster debugging [PYPE-685] +- _(pype)_ avalon schemas have been moved from `pype-config` to `pype` repository, for simplification. [PYPE-670] +- _(ftrack)_ updated to latest ftrack API +- _(ftrack)_ publishing comments now appear in ftrack also as a note on version with customisable category [PYPE-645] +- _(ftrack)_ delete asset/subset action had been improved. It is now able to remove multiple entities and descendants of the selected entities [PYPE-361, PYPS-72] +- _(workfiles)_ added date field to workfiles app [PYPE-603] +- _(maya)_ old deprecated loader have been removed in favour of a single unified reference loader (old scenes will upgrade automatically to the new loader upon opening) [PYPE-633, PYPE-697] +- _(avalon)_ core updated to 5.5.15 [PYPE-671] +- _(nuke)_ library loader is now available in nuke [PYPE-698] + + +**new:** +- _(pype)_ added pype render wrapper to allow rendering on mixed platform farms. [PYPE-634] +- _(pype)_ added `pype launch` command. It let's admin run applications with dynamically built environment based on the given context. [PYPE-634] +- _(pype)_ added support for extracting review sequences with burnins [PYPE-657] +- _(publish)_ users can now set intent next to a comment when publishing. This will then be reflected on an attribute in ftrack. [PYPE-632] +- _(burnin)_ timecode can now be added to burnin +- _(burnin)_ datetime keys can now be added to burnin and anatomy [PYPE-651] +- _(burnin)_ anatomy templates can now be used in burnins. [PYPE=626] +- _(nuke)_ new validator for render resolution +- _(nuke)_ support for attach slate to nuke renders [PYPE-630] +- _(nuke)_ png sequences were added to loaders +- _(maya)_ added maya 2020 compatibility [PYPE-677] +- _(maya)_ ability to publish and load .ASS standin sequences [PYPS-54] +- _(pype)_ thumbnails can now be published and are visible in the loader. `AVALON_THUMBNAIL_ROOT` environment variable needs to be set for this to work [PYPE-573, PYPE-132] +- _(blender)_ base implementation of blender was added with publishing and loading of .blend files [PYPE-612] +- _(ftrack)_ new action for preparing deliveries [PYPE-639] + + +**fix**: +- _(burnin)_ more robust way of finding ffmpeg for burnins. +- _(pype)_ improved UNC paths remapping when sending to farm. +- _(pype)_ float frames sometimes made their way to representation context in database, breaking loaders [PYPE-668] +- _(pype)_ `pype install --force` was failing sometimes [PYPE-600] +- _(pype)_ padding in published files got calculated wrongly sometimes. It is now instead being always read from project anatomy. [PYPE-667] +- _(publish)_ comment publishing was failing in certain situations +- _(ftrack)_ multiple edge case scenario fixes in auto sync and sync-to-avalon action +- _(ftrack)_ sync to avalon now works on empty projects +- _(ftrack)_ thumbnail update event was failing when deleting entities [PYPE-561] +- _(nuke)_ loader applies proper colorspaces from Presets +- _(nuke)_ publishing handles didn't always work correctly [PYPE-686] +- _(maya)_ assembly publishing and loading wasn't working correctly + + + + + +## 2.4.0 ## + +_**release date:** 9 Dec 2019_ + +**change:** +- _(ftrack)_ version to status ftrack event can now be configured from Presets + - based on preset `presets/ftracc/ftrack_config.json["status_version_to_task"]` +- _(ftrack)_ sync to avalon event has been completely re-written. It now supports most of the project management situations on ftrack including moving, renaming and deleting entities, updating attributes and working with tasks. +- _(ftrack)_ sync to avalon action has been also re-writen. It is now much faster (up to 100 times depending on a project structure), has much better logging and reporting on encountered problems, and is able to handle much more complex situations. +- _(ftrack)_ sync to avalon trigger by checking `auto-sync` toggle on ftrack [PYPE-504] +- _(pype)_ various new features in the REST api +- _(pype)_ new visual identity used across pype +- _(pype)_ started moving all requirements to pip installation rather than vendorising them in pype repository. Due to a few yet unreleased packages, this means that pype can temporarily be only installed in the offline mode. + +**new:** +- _(nuke)_ support for publishing gizmos and loading them as viewer processes +- _(nuke)_ support for publishing nuke nodes from backdrops and loading them back +- _(pype)_ burnins can now work with start and end frames as keys + - use keys `{frame_start}`, `{frame_end}` and `{current_frame}` in burnin preset to use them. [PYPS-44,PYPS-73, PYPE-602] +- _(pype)_ option to filter logs by user and level in loggin GUI +- _(pype)_ image family added to standalone publisher [PYPE-574] +- _(pype)_ matchmove family added to standalone publisher [PYPE-574] +- _(nuke)_ validator for comparing arbitrary knobs with values from presets +- _(maya)_ option to force maya to copy textures in the new look publish rather than hardlinking them +- _(pype)_ comments from pyblish GUI are now being added to ftrack version +- _(maya)_ validator for checking outdated containers in the scene +- _(maya)_ option to publish and load arnold standin sequence [PYPE-579, PYPS-54] + +**fix**: +- _(pype)_ burnins were not respecting codec of the input video +- _(nuke)_ lot's of various nuke and nuke studio fixes across the board [PYPS-45] +- _(pype)_ workfiles app is not launching with the start of the app by default [PYPE-569] +- _(ftrack)_ ftrack integration during publishing was failing under certain situations [PYPS-66] +- _(pype)_ minor fixes in REST api +- _(ftrack)_ status change event was crashing when the target status was missing [PYPS-68] +- _(ftrack)_ actions will try to reconnect if they fail for some reason +- _(maya)_ problems with fps mapping when using float FPS values +- _(deadline)_ overall improvements to deadline publishing +- _(setup)_ environment variables are now remapped on the fly based on the platform pype is running on. This fixes many issues in mixed platform environments. + + + +## 2.3.6 # + +_**release date:** 27 Nov 2019_ + +**hotfix**: +- _(ftrack)_ was hiding important debug logo +- _(nuke)_ crashes during workfile publishing +- _(ftrack)_ event server crashes because of signal problems +- _(muster)_ problems with muster render submissions +- _(ftrack)_ thumbnail update event syntax errors + + +## 2.3.0 ## +_release date: 6 Oct 2019_ + +**new**: +- _(maya)_ support for yeti rigs and yeti caches +- _(maya)_ validator for comparing arbitrary attributes against ftrack +- _(pype)_ burnins can now show current date and time +- _(muster)_ pools can now be set in render globals in maya +- _(pype)_ Rest API has been implemented in beta stage +- _(nuke)_ LUT loader has been added +- _(pype)_ rudimentary user module has been added as preparation for user management +- _(pype)_ a simple logging GUI has been added to pype tray +- _(nuke)_ nuke can now bake input process into mov +- _(maya)_ imported models now have selection handle displayed by defaulting +- _(avalon)_ it's is now possible to load multiple assets at once using loader +- _(maya)_ added ability to automatically connect yeti rig to a mesh upon loading + +**changed**: +- _(ftrack)_ event server now runs two parallel processes and is able to keep queue of events to process. +- _(nuke)_ task name is now added to all rendered subsets +- _(pype)_ adding more families to standalone publisher +- _(pype)_ standalone publisher now uses pyblish-lite +- _(pype)_ standalone publisher can now create review quicktimes +- _(ftrack)_ queries to ftrack were sped up +- _(ftrack)_ multiple ftrack action have been deprecated +- _(avalon)_ avalon upstream has been updated to 5.5.0 +- _(nukestudio)_ published transforms can now be animated +- + +**fix**: +- _(maya)_ fps popup button didn't work in some cases +- _(maya)_ geometry instances and references in maya were losing shader assignments +- _(muster)_ muster rendering templates were not working correctly +- _(maya)_ arnold tx texture conversion wasn't respecting colorspace set by the artist +- _(pype)_ problems with avalon db sync +- _(maya)_ ftrack was rounding FPS making it inconsistent +- _(pype)_ wrong icon names in Creator +- _(maya)_ scene inventory wasn't showing anything if representation was removed from database after it's been loaded to the scene +- _(nukestudio)_ multiple bugs squashed +- _(loader)_ loader was taking long time to show all the loading action when first launcher in maya + +## 2.2.0 ## +_release date: 8 Sept 2019_ + +**new**: +- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts +- _(nuke)_ option to choose deadline chunk size on write nodes +- _(nukestudio)_ added option to publish soft effects (subTrackItems) from NukeStudio as subsets including LUT files. these can then be loaded in nuke or NukeStudio +- _(nuke)_ option to build nuke script from previously published latest versions of plate and render subsets. +- _(nuke)_ nuke writes now have deadline tab. +- _(ftrack)_ Prepare Project action can now be used for creating the base folder structure on disk and in ftrack, setting up all the initial project attributes and it automatically prepares `pype_project_config` folder for the given project. +- _(clockify)_ Added support for time tracking in clockify. This currently in addition to ftrack time logs, but does not completely replace them. +- _(pype)_ any attributes in Creator and Loader plugins can now be customised using pype preset system + +**changed**: +- nukestudio now uses workio API for workfiles +- _(maya)_ "FIX FPS" prompt in maya now appears in the middle of the screen +- _(muster)_ can now be configured with custom templates +- _(pype)_ global publishing plugins can now be configured using presets as well as host specific ones + + +**fix**: +- wrong version retrieval from path in certain scenarios +- nuke reset resolution wasn't working in certain scenarios + +## 2.1.0 ## +_release date: 6 Aug 2019_ + +A large cleanup release. Most of the change are under the hood. + +**new**: +- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts +- _(pype)_ Added configurable option to add burnins to any generated quicktimes +- _(ftrack)_ Action that identifies what machines pype is running on. +- _(system)_ unify subprocess calls +- _(maya)_ add audio to review quicktimes +- _(nuke)_ add crop before write node to prevent overscan problems in ffmpeg +- **Nuke Studio** publishing and workfiles support +- **Muster** render manager support +- _(nuke)_ Framerange, FPS and Resolution are set automatically at startup +- _(maya)_ Ability to load published sequences as image planes +- _(system)_ Ftrack event that sets asset folder permissions based on task assignees in ftrack. +- _(maya)_ Pyblish plugin that allow validation of maya attributes +- _(system)_ added better startup logging to tray debug, including basic connection information +- _(avalon)_ option to group published subsets to groups in the loader +- _(avalon)_ loader family filters are working now + +**changed**: +- change multiple key attributes to unify their behaviour across the pipeline + - `frameRate` to `fps` + - `startFrame` to `frameStart` + - `endFrame` to `frameEnd` + - `fstart` to `frameStart` + - `fend` to `frameEnd` + - `handle_start` to `handleStart` + - `handle_end` to `handleEnd` + - `resolution_width` to `resolutionWidth` + - `resolution_height` to `resolutionHeight` + - `pixel_aspect` to `pixelAspect` + +- _(nuke)_ write nodes are now created inside group with only some attributes editable by the artist +- rendered frames are now deleted from temporary location after their publishing is finished. +- _(ftrack)_ RV action can now be launched from any entity +- after publishing only refresh button is now available in pyblish UI +- added context instance pyblish-lite so that artist knows if context plugin fails +- _(avalon)_ allow opening selected files using enter key +- _(avalon)_ core updated to v5.2.9 with our forked changes on top + +**fix**: +- faster hierarchy retrieval from db +- _(nuke)_ A lot of stability enhancements +- _(nuke studio)_ A lot of stability enhancements +- _(nuke)_ now only renders a single write node on farm +- _(ftrack)_ pype would crash when launcher project level task +- work directory was sometimes not being created correctly +- major pype.lib cleanup. Removing of unused functions, merging those that were doing the same and general house cleaning. +- _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/HISTORY.md b/HISTORY.md new file mode 100644 index 0000000000..d60bd7b0c7 --- /dev/null +++ b/HISTORY.md @@ -0,0 +1,432 @@ + +## 2.11.0 ## + +_**release date:** 27 July 2020_ + +**new:** +- _(blender)_ namespace support [\#341](https://github.com/pypeclub/pype/pull/341) +- _(blender)_ start end frames [\#330](https://github.com/pypeclub/pype/pull/330) +- _(blender)_ camera asset [\#322](https://github.com/pypeclub/pype/pull/322) +- _(pype)_ toggle instances per family in pyblish GUI [\#320](https://github.com/pypeclub/pype/pull/320) +- _(pype)_ current release version is now shown in the tray menu [#379](https://github.com/pypeclub/pype/pull/379) + + +**improved:** +- _(resolve)_ tagging for publish [\#239](https://github.com/pypeclub/pype/issues/239) +- _(pype)_ Support publishing a subset of shots with standalone editorial [\#336](https://github.com/pypeclub/pype/pull/336) +- _(harmony)_ Basic support for palettes [\#324](https://github.com/pypeclub/pype/pull/324) +- _(photoshop)_ Flag outdated containers on startup and publish. [\#309](https://github.com/pypeclub/pype/pull/309) +- _(harmony)_ Flag Outdated containers [\#302](https://github.com/pypeclub/pype/pull/302) +- _(photoshop)_ Publish review [\#298](https://github.com/pypeclub/pype/pull/298) +- _(pype)_ Optional Last workfile launch [\#365](https://github.com/pypeclub/pype/pull/365) + + +**fixed:** +- _(premiere)_ workflow fixes [\#346](https://github.com/pypeclub/pype/pull/346) +- _(pype)_ pype-setup does not work with space in path [\#327](https://github.com/pypeclub/pype/issues/327) +- _(ftrack)_ Ftrack delete action cause circular error [\#206](https://github.com/pypeclub/pype/issues/206) +- _(nuke)_ Priority was forced to 50 [\#345](https://github.com/pypeclub/pype/pull/345) +- _(nuke)_ Fix ValidateNukeWriteKnobs [\#340](https://github.com/pypeclub/pype/pull/340) +- _(maya)_ If camera attributes are connected, we can ignore them. [\#339](https://github.com/pypeclub/pype/pull/339) +- _(pype)_ stop appending of tools environment to existing env [\#337](https://github.com/pypeclub/pype/pull/337) +- _(ftrack)_ Ftrack timeout needs to look at AVALON\_TIMEOUT [\#325](https://github.com/pypeclub/pype/pull/325) +- _(harmony)_ Only zip files are supported. [\#310](https://github.com/pypeclub/pype/pull/310) +- _(pype)_ hotfix/Fix event server mongo uri [\#305](https://github.com/pypeclub/pype/pull/305) +- _(photoshop)_ Subset was not named or validated correctly. [\#304](https://github.com/pypeclub/pype/pull/304) + + + + +## 2.10.0 ## + +_**release date:** 17 June 2020_ + +**new:** +- _(harmony)_ **Toon Boom Harmony** has been greatly extended to support rigging, scene build, animation and rendering workflows. [#270](https://github.com/pypeclub/pype/issues/270) [#271](https://github.com/pypeclub/pype/issues/271) [#190](https://github.com/pypeclub/pype/issues/190) [#191](https://github.com/pypeclub/pype/issues/191) [#172](https://github.com/pypeclub/pype/issues/172) [#168](https://github.com/pypeclub/pype/issues/168) +- _(pype)_ Added support for rudimentary **edl publishing** into individual shots. [#265](https://github.com/pypeclub/pype/issues/265) +- _(celaction)_ Simple **Celaction** integration has been added with support for workfiles and rendering. [#255](https://github.com/pypeclub/pype/issues/255) +- _(maya)_ Support for multiple job types when submitting to the farm. We can now render Maya or Standalone render jobs for Vray and Arnold (limited support for arnold) [#204](https://github.com/pypeclub/pype/issues/204) +- _(photoshop)_ Added initial support for Photoshop [#232](https://github.com/pypeclub/pype/issues/232) + +**improved:** +- _(blender)_ Updated support for rigs and added support Layout family [#233](https://github.com/pypeclub/pype/issues/233) [#226](https://github.com/pypeclub/pype/issues/226) +- _(premiere)_ It is now possible to choose different storage root for workfiles of different task types. [#255](https://github.com/pypeclub/pype/issues/255) +- _(maya)_ Support for unmerged AOVs in Redshift multipart EXRs [#197](https://github.com/pypeclub/pype/issues/197) +- _(pype)_ Pype repository has been refactored in preparation for 3.0 release [#169](https://github.com/pypeclub/pype/issues/169) +- _(deadline)_ All file dependencies are now passed to deadline from maya to prevent premature start of rendering if caches or textures haven't been coppied over yet. [#195](https://github.com/pypeclub/pype/issues/195) +- _(nuke)_ Script validation can now be made optional. [#194](https://github.com/pypeclub/pype/issues/194) +- _(pype)_ Publishing can now be stopped at any time. [#194](https://github.com/pypeclub/pype/issues/194) + +**fix:** +- _(pype)_ Pyblish-lite has been integrated into pype repository, plus various publishing GUI fixes. [#274](https://github.com/pypeclub/pype/issues/274) [#275](https://github.com/pypeclub/pype/issues/275) [#268](https://github.com/pypeclub/pype/issues/268) [#227](https://github.com/pypeclub/pype/issues/227) [#238](https://github.com/pypeclub/pype/issues/238) +- _(maya)_ Alembic extractor was getting wrong frame range type in certain scenarios [#254](https://github.com/pypeclub/pype/issues/254) +- _(maya)_ Attaching a render to subset in maya was not passing validation in certain scenarios [#256](https://github.com/pypeclub/pype/issues/256) +- _(ftrack)_ Various small fixes to ftrack sync [#263](https://github.com/pypeclub/pype/issues/263) [#259](https://github.com/pypeclub/pype/issues/259) +- _(maya)_ Look extraction is now able to skp invalid connections in shaders [#207](https://github.com/pypeclub/pype/issues/207) + + + + +## 2.9.0 ## + +_**release date:** 25 May 2020_ + +**new:** +- _(pype)_ Support for **Multiroot projects**. You can now store project data on multiple physical or virtual storages and target individual publishes to these locations. For instance render can be stored on a faster storage than the rest of the project. [#145](https://github.com/pypeclub/pype/issues/145), [#38](https://github.com/pypeclub/pype/issues/38) +- _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) +- _(pype)_ OSX support is in public beta now. There are issues to be expected, but the main implementation should be functional. [#141](https://github.com/pypeclub/pype/issues/141) + + +**improved:** + +- _(pype)_ **Review extractor** has been completely rebuilt. It now supports granular filtering so you can create **multiple outputs** for different tasks, families or hosts. [#103](https://github.com/pypeclub/pype/issues/103), [#166](https://github.com/pypeclub/pype/issues/166), [#165](https://github.com/pypeclub/pype/issues/165) +- _(pype)_ **Burnin** generation had been extended to **support same multi-output filtering** as review extractor [#103](https://github.com/pypeclub/pype/issues/103) +- _(pype)_ Publishing file templates can now be specified in config for each individual family [#114](https://github.com/pypeclub/pype/issues/114) +- _(pype)_ Studio specific plugins can now be appended to pype standard publishing plugins. [#112](https://github.com/pypeclub/pype/issues/112) +- _(nukestudio)_ Reviewable clips no longer need to be previously cut, exported and re-imported to timeline. **Pype can now dynamically cut reviewable quicktimes** from continuous offline footage during publishing. [#23](https://github.com/pypeclub/pype/issues/23) +- _(deadline)_ Deadline can now correctly differentiate between staging and production pype. [#154](https://github.com/pypeclub/pype/issues/154) +- _(deadline)_ `PYPE_PYTHON_EXE` env variable can now be used to direct publishing to explicit python installation. [#120](https://github.com/pypeclub/pype/issues/120) +- _(nuke)_ Nuke now check for new version of loaded data on file open. [#140](https://github.com/pypeclub/pype/issues/140) +- _(nuke)_ frame range and limit checkboxes are now exposed on write node. [#119](https://github.com/pypeclub/pype/issues/119) + + + +**fix:** + +- _(nukestudio)_ Project Location was using backslashes which was breaking nukestudio native exporting in certains configurations [#82](https://github.com/pypeclub/pype/issues/82) +- _(nukestudio)_ Duplicity in hierarchy tags was prone to throwing publishing error [#130](https://github.com/pypeclub/pype/issues/130), [#144](https://github.com/pypeclub/pype/issues/144) +- _(ftrack)_ multiple stability improvements [#157](https://github.com/pypeclub/pype/issues/157), [#159](https://github.com/pypeclub/pype/issues/159), [#128](https://github.com/pypeclub/pype/issues/128), [#118](https://github.com/pypeclub/pype/issues/118), [#127](https://github.com/pypeclub/pype/issues/127) +- _(deadline)_ multipart EXRs were stopping review publishing on the farm. They are still not supported for automatic review generation, but the publish will go through correctly without the quicktime. [#155](https://github.com/pypeclub/pype/issues/155) +- _(deadline)_ If deadline is non-responsive it will no longer freeze host when publishing [#149](https://github.com/pypeclub/pype/issues/149) +- _(deadline)_ Sometimes deadline was trying to launch render before all the source data was coppied over. [#137](https://github.com/pypeclub/pype/issues/137) _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) +- _(nuke)_ Filepath knob wasn't updated properly. [#131](https://github.com/pypeclub/pype/issues/131) +- _(maya)_ When extracting animation, the "Write Color Set" options on the instance were not respected. [#108](https://github.com/pypeclub/pype/issues/108) +- _(maya)_ Attribute overrides for AOV only worked for the legacy render layers. Now it works for new render setup as well [#132](https://github.com/pypeclub/pype/issues/132) +- _(maya)_ Stability and usability improvements in yeti workflow [#104](https://github.com/pypeclub/pype/issues/104) + + + + +## 2.8.0 ## + +_**release date:** 20 April 2020_ + +**new:** + +- _(pype)_ Option to generate slates from json templates. [PYPE-628] [#26](https://github.com/pypeclub/pype/issues/26) +- _(pype)_ It is now possible to automate loading of published subsets into any scene. Documentation will follow :). [PYPE-611] [#24](https://github.com/pypeclub/pype/issues/24) + +**fix:** + +- _(maya)_ Some Redshift render tokens could break publishing. [PYPE-778] [#33](https://github.com/pypeclub/pype/issues/33) +- _(maya)_ Publish was not preserving maya file extension. [#39](https://github.com/pypeclub/pype/issues/39) +- _(maya)_ Rig output validator was failing on nodes without shapes. [#40](https://github.com/pypeclub/pype/issues/40) +- _(maya)_ Yeti caches can now be properly versioned up in the scene inventory. [#40](https://github.com/pypeclub/pype/issues/40) +- _(nuke)_ Build first workfiles was not accepting jpeg sequences. [#34](https://github.com/pypeclub/pype/issues/34) +- _(deadline)_ Trying to generate ffmpeg review from multipart EXRs no longer crashes publishing. [PYPE-781] +- _(deadline)_ Render publishing is more stable in multiplatform environments. [PYPE-775] + + + + +## 2.7.0 ## + +_**release date:** 30 March 2020_ + +**new:** + +- _(maya)_ Artist can now choose to load multiple references of the same subset at once [PYPE-646, PYPS-81] +- _(nuke)_ Option to use named OCIO colorspaces for review colour baking. [PYPS-82] +- _(pype)_ Pype can now work with `master` versions for publishing and loading. These are non-versioned publishes that are overwritten with the latest version during publish. These are now supported in all the GUIs, but their publishing is deactivated by default. [PYPE-653] +- _(blender)_ Added support for basic blender workflow. We currently support `rig`, `model` and `animation` families. [PYPE-768] +- _(pype)_ Source timecode can now be used in burn-ins. [PYPE-777] +- _(pype)_ Review outputs profiles can now specify delivery resolution different than project setting [PYPE-759] +- _(nuke)_ Bookmark to current context is now added automatically to all nuke browser windows. [PYPE-712] + +**change:** + +- _(maya)_ It is now possible to publish camera without. baking. Keep in mind that unbaked cameras can't be guaranteed to work in other hosts. [PYPE-595] +- _(maya)_ All the renders from maya are now grouped in the loader by their Layer name. [PYPE-482] +- _(nuke/hiero)_ Any publishes from nuke and hiero can now be versioned independently of the workfile. [PYPE-728] + + +**fix:** + +- _(nuke)_ Mixed slashes caused issues in ocio config path. +- _(pype)_ Intent field in pyblish GUI was passing label instead of value to ftrack. [PYPE-733] +- _(nuke)_ Publishing of pre-renders was inconsistent. [PYPE-766] +- _(maya)_ Handles and frame ranges were inconsistent in various places during publishing. +- _(nuke)_ Nuke was crashing if it ran into certain missing knobs. For example DPX output missing `autocrop` [PYPE-774] +- _(deadline)_ Project overrides were not working properly with farm render publishing. +- _(hiero)_ Problems with single frame plates publishing. +- _(maya)_ Redshift RenderPass token were breaking render publishing. [PYPE-778] +- _(nuke)_ Build first workfile was not accepting jpeg sequences. +- _(maya)_ Multipart (Multilayer) EXRs were breaking review publishing due to FFMPEG incompatiblity [PYPE-781] + + + +## 2.6.0 ## + +_**release date:** 9 March 2020_ + +**change:** +- _(maya)_ render publishing has been simplified and made more robust. Render setup layers are now automatically added to publishing subsets and `render globals` family has been replaced with simple `render` [PYPE-570] +- _(avalon)_ change context and workfiles apps, have been merged into one, that allows both actions to be performed at the same time. [PYPE-747] +- _(pype)_ thumbnails are now automatically propagate to asset from the last published subset in the loader +- _(ftrack)_ publishing comment and intent are now being published to ftrack note as well as describtion. [PYPE-727] +- _(pype)_ when overriding existing version new old representations are now overriden, instead of the new ones just being appended. (to allow this behaviour, the version validator need to be disabled. [PYPE-690]) +- _(pype)_ burnin preset has been significantly simplified. It now doesn't require passing function to each field, but only need the actual text template. to use this, all the current burnin PRESETS MUST BE UPDATED for all the projects. +- _(ftrack)_ credentials are now stored on a per server basis, so it's possible to switch between ftrack servers without having to log in and out. [PYPE-723] + + +**new:** +- _(pype)_ production and development deployments now have different colour of the tray icon. Orange for Dev and Green for production [PYPE-718] +- _(maya)_ renders can now be attached to a publishable subset rather than creating their own subset. For example it is possible to create a reviewable `look` or `model` render and have it correctly attached as a representation of the subsets [PYPE-451] +- _(maya)_ after saving current scene into a new context (as a new shot for instance), all the scene publishing subsets data gets re-generated automatically to match the new context [PYPE-532] +- _(pype)_ we now support project specific publish, load and create plugins [PYPE-740] +- _(ftrack)_ new action that allow archiving/deleting old published versions. User can keep how many of the latest version to keep when the action is ran. [PYPE-748, PYPE-715] +- _(ftrack)_ it is now possible to monitor and restart ftrack event server using ftrack action. [PYPE-658] +- _(pype)_ validator that prevent accidental overwrites of previously published versions. [PYPE-680] +- _(avalon)_ avalon core updated to version 5.6.0 +- _(maya)_ added validator to make sure that relative paths are used when publishing arnold standins. +- _(nukestudio)_ it is now possible to extract and publish audio family from clip in nuke studio [PYPE-682] + +**fix**: +- _(maya)_ maya set framerange button was ignoring handles [PYPE-719] +- _(ftrack)_ sync to avalon was sometime crashing when ran on empty project +- _(nukestudio)_ publishing same shots after they've been previously archived/deleted would result in a crash. [PYPE-737] +- _(nuke)_ slate workflow was breaking in certain scenarios. [PYPE-730] +- _(pype)_ rendering publish workflow has been significantly improved to prevent error resulting from implicit render collection. [PYPE-665, PYPE-746] +- _(pype)_ launching application on a non-synced project resulted in obscure [PYPE-528] +- _(pype)_ missing keys in burnins no longer result in an error. [PYPE-706] +- _(ftrack)_ create folder structure action was sometimes failing for project managers due to wrong permissions. +- _(Nukestudio)_ using `source` in the start frame tag could result in wrong frame range calculation +- _(ftrack)_ sync to avalon action and event have been improved by catching more edge cases and provessing them properly. + + + +## 2.5.0 ## + +_**release date:** 11 Feb 2020_ + +**change:** +- _(pype)_ added many logs for easier debugging +- _(pype)_ review presets can now be separated between 2d and 3d renders [PYPE-693] +- _(pype)_ anatomy module has been greatly improved to allow for more dynamic pulblishing and faster debugging [PYPE-685] +- _(pype)_ avalon schemas have been moved from `pype-config` to `pype` repository, for simplification. [PYPE-670] +- _(ftrack)_ updated to latest ftrack API +- _(ftrack)_ publishing comments now appear in ftrack also as a note on version with customisable category [PYPE-645] +- _(ftrack)_ delete asset/subset action had been improved. It is now able to remove multiple entities and descendants of the selected entities [PYPE-361, PYPS-72] +- _(workfiles)_ added date field to workfiles app [PYPE-603] +- _(maya)_ old deprecated loader have been removed in favour of a single unified reference loader (old scenes will upgrade automatically to the new loader upon opening) [PYPE-633, PYPE-697] +- _(avalon)_ core updated to 5.5.15 [PYPE-671] +- _(nuke)_ library loader is now available in nuke [PYPE-698] + + +**new:** +- _(pype)_ added pype render wrapper to allow rendering on mixed platform farms. [PYPE-634] +- _(pype)_ added `pype launch` command. It let's admin run applications with dynamically built environment based on the given context. [PYPE-634] +- _(pype)_ added support for extracting review sequences with burnins [PYPE-657] +- _(publish)_ users can now set intent next to a comment when publishing. This will then be reflected on an attribute in ftrack. [PYPE-632] +- _(burnin)_ timecode can now be added to burnin +- _(burnin)_ datetime keys can now be added to burnin and anatomy [PYPE-651] +- _(burnin)_ anatomy templates can now be used in burnins. [PYPE=626] +- _(nuke)_ new validator for render resolution +- _(nuke)_ support for attach slate to nuke renders [PYPE-630] +- _(nuke)_ png sequences were added to loaders +- _(maya)_ added maya 2020 compatibility [PYPE-677] +- _(maya)_ ability to publish and load .ASS standin sequences [PYPS-54] +- _(pype)_ thumbnails can now be published and are visible in the loader. `AVALON_THUMBNAIL_ROOT` environment variable needs to be set for this to work [PYPE-573, PYPE-132] +- _(blender)_ base implementation of blender was added with publishing and loading of .blend files [PYPE-612] +- _(ftrack)_ new action for preparing deliveries [PYPE-639] + + +**fix**: +- _(burnin)_ more robust way of finding ffmpeg for burnins. +- _(pype)_ improved UNC paths remapping when sending to farm. +- _(pype)_ float frames sometimes made their way to representation context in database, breaking loaders [PYPE-668] +- _(pype)_ `pype install --force` was failing sometimes [PYPE-600] +- _(pype)_ padding in published files got calculated wrongly sometimes. It is now instead being always read from project anatomy. [PYPE-667] +- _(publish)_ comment publishing was failing in certain situations +- _(ftrack)_ multiple edge case scenario fixes in auto sync and sync-to-avalon action +- _(ftrack)_ sync to avalon now works on empty projects +- _(ftrack)_ thumbnail update event was failing when deleting entities [PYPE-561] +- _(nuke)_ loader applies proper colorspaces from Presets +- _(nuke)_ publishing handles didn't always work correctly [PYPE-686] +- _(maya)_ assembly publishing and loading wasn't working correctly + + + + + +## 2.4.0 ## + +_**release date:** 9 Dec 2019_ + +**change:** +- _(ftrack)_ version to status ftrack event can now be configured from Presets + - based on preset `presets/ftracc/ftrack_config.json["status_version_to_task"]` +- _(ftrack)_ sync to avalon event has been completely re-written. It now supports most of the project management situations on ftrack including moving, renaming and deleting entities, updating attributes and working with tasks. +- _(ftrack)_ sync to avalon action has been also re-writen. It is now much faster (up to 100 times depending on a project structure), has much better logging and reporting on encountered problems, and is able to handle much more complex situations. +- _(ftrack)_ sync to avalon trigger by checking `auto-sync` toggle on ftrack [PYPE-504] +- _(pype)_ various new features in the REST api +- _(pype)_ new visual identity used across pype +- _(pype)_ started moving all requirements to pip installation rather than vendorising them in pype repository. Due to a few yet unreleased packages, this means that pype can temporarily be only installed in the offline mode. + +**new:** +- _(nuke)_ support for publishing gizmos and loading them as viewer processes +- _(nuke)_ support for publishing nuke nodes from backdrops and loading them back +- _(pype)_ burnins can now work with start and end frames as keys + - use keys `{frame_start}`, `{frame_end}` and `{current_frame}` in burnin preset to use them. [PYPS-44,PYPS-73, PYPE-602] +- _(pype)_ option to filter logs by user and level in loggin GUI +- _(pype)_ image family added to standalone publisher [PYPE-574] +- _(pype)_ matchmove family added to standalone publisher [PYPE-574] +- _(nuke)_ validator for comparing arbitrary knobs with values from presets +- _(maya)_ option to force maya to copy textures in the new look publish rather than hardlinking them +- _(pype)_ comments from pyblish GUI are now being added to ftrack version +- _(maya)_ validator for checking outdated containers in the scene +- _(maya)_ option to publish and load arnold standin sequence [PYPE-579, PYPS-54] + +**fix**: +- _(pype)_ burnins were not respecting codec of the input video +- _(nuke)_ lot's of various nuke and nuke studio fixes across the board [PYPS-45] +- _(pype)_ workfiles app is not launching with the start of the app by default [PYPE-569] +- _(ftrack)_ ftrack integration during publishing was failing under certain situations [PYPS-66] +- _(pype)_ minor fixes in REST api +- _(ftrack)_ status change event was crashing when the target status was missing [PYPS-68] +- _(ftrack)_ actions will try to reconnect if they fail for some reason +- _(maya)_ problems with fps mapping when using float FPS values +- _(deadline)_ overall improvements to deadline publishing +- _(setup)_ environment variables are now remapped on the fly based on the platform pype is running on. This fixes many issues in mixed platform environments. + + + +## 2.3.6 # + +_**release date:** 27 Nov 2019_ + +**hotfix**: +- _(ftrack)_ was hiding important debug logo +- _(nuke)_ crashes during workfile publishing +- _(ftrack)_ event server crashes because of signal problems +- _(muster)_ problems with muster render submissions +- _(ftrack)_ thumbnail update event syntax errors + + +## 2.3.0 ## +_release date: 6 Oct 2019_ + +**new**: +- _(maya)_ support for yeti rigs and yeti caches +- _(maya)_ validator for comparing arbitrary attributes against ftrack +- _(pype)_ burnins can now show current date and time +- _(muster)_ pools can now be set in render globals in maya +- _(pype)_ Rest API has been implemented in beta stage +- _(nuke)_ LUT loader has been added +- _(pype)_ rudimentary user module has been added as preparation for user management +- _(pype)_ a simple logging GUI has been added to pype tray +- _(nuke)_ nuke can now bake input process into mov +- _(maya)_ imported models now have selection handle displayed by defaulting +- _(avalon)_ it's is now possible to load multiple assets at once using loader +- _(maya)_ added ability to automatically connect yeti rig to a mesh upon loading + +**changed**: +- _(ftrack)_ event server now runs two parallel processes and is able to keep queue of events to process. +- _(nuke)_ task name is now added to all rendered subsets +- _(pype)_ adding more families to standalone publisher +- _(pype)_ standalone publisher now uses pyblish-lite +- _(pype)_ standalone publisher can now create review quicktimes +- _(ftrack)_ queries to ftrack were sped up +- _(ftrack)_ multiple ftrack action have been deprecated +- _(avalon)_ avalon upstream has been updated to 5.5.0 +- _(nukestudio)_ published transforms can now be animated +- + +**fix**: +- _(maya)_ fps popup button didn't work in some cases +- _(maya)_ geometry instances and references in maya were losing shader assignments +- _(muster)_ muster rendering templates were not working correctly +- _(maya)_ arnold tx texture conversion wasn't respecting colorspace set by the artist +- _(pype)_ problems with avalon db sync +- _(maya)_ ftrack was rounding FPS making it inconsistent +- _(pype)_ wrong icon names in Creator +- _(maya)_ scene inventory wasn't showing anything if representation was removed from database after it's been loaded to the scene +- _(nukestudio)_ multiple bugs squashed +- _(loader)_ loader was taking long time to show all the loading action when first launcher in maya + +## 2.2.0 ## +_release date: 8 Sept 2019_ + +**new**: +- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts +- _(nuke)_ option to choose deadline chunk size on write nodes +- _(nukestudio)_ added option to publish soft effects (subTrackItems) from NukeStudio as subsets including LUT files. these can then be loaded in nuke or NukeStudio +- _(nuke)_ option to build nuke script from previously published latest versions of plate and render subsets. +- _(nuke)_ nuke writes now have deadline tab. +- _(ftrack)_ Prepare Project action can now be used for creating the base folder structure on disk and in ftrack, setting up all the initial project attributes and it automatically prepares `pype_project_config` folder for the given project. +- _(clockify)_ Added support for time tracking in clockify. This currently in addition to ftrack time logs, but does not completely replace them. +- _(pype)_ any attributes in Creator and Loader plugins can now be customised using pype preset system + +**changed**: +- nukestudio now uses workio API for workfiles +- _(maya)_ "FIX FPS" prompt in maya now appears in the middle of the screen +- _(muster)_ can now be configured with custom templates +- _(pype)_ global publishing plugins can now be configured using presets as well as host specific ones + + +**fix**: +- wrong version retrieval from path in certain scenarios +- nuke reset resolution wasn't working in certain scenarios + +## 2.1.0 ## +_release date: 6 Aug 2019_ + +A large cleanup release. Most of the change are under the hood. + +**new**: +- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts +- _(pype)_ Added configurable option to add burnins to any generated quicktimes +- _(ftrack)_ Action that identifies what machines pype is running on. +- _(system)_ unify subprocess calls +- _(maya)_ add audio to review quicktimes +- _(nuke)_ add crop before write node to prevent overscan problems in ffmpeg +- **Nuke Studio** publishing and workfiles support +- **Muster** render manager support +- _(nuke)_ Framerange, FPS and Resolution are set automatically at startup +- _(maya)_ Ability to load published sequences as image planes +- _(system)_ Ftrack event that sets asset folder permissions based on task assignees in ftrack. +- _(maya)_ Pyblish plugin that allow validation of maya attributes +- _(system)_ added better startup logging to tray debug, including basic connection information +- _(avalon)_ option to group published subsets to groups in the loader +- _(avalon)_ loader family filters are working now + +**changed**: +- change multiple key attributes to unify their behaviour across the pipeline + - `frameRate` to `fps` + - `startFrame` to `frameStart` + - `endFrame` to `frameEnd` + - `fstart` to `frameStart` + - `fend` to `frameEnd` + - `handle_start` to `handleStart` + - `handle_end` to `handleEnd` + - `resolution_width` to `resolutionWidth` + - `resolution_height` to `resolutionHeight` + - `pixel_aspect` to `pixelAspect` + +- _(nuke)_ write nodes are now created inside group with only some attributes editable by the artist +- rendered frames are now deleted from temporary location after their publishing is finished. +- _(ftrack)_ RV action can now be launched from any entity +- after publishing only refresh button is now available in pyblish UI +- added context instance pyblish-lite so that artist knows if context plugin fails +- _(avalon)_ allow opening selected files using enter key +- _(avalon)_ core updated to v5.2.9 with our forked changes on top + +**fix**: +- faster hierarchy retrieval from db +- _(nuke)_ A lot of stability enhancements +- _(nuke studio)_ A lot of stability enhancements +- _(nuke)_ now only renders a single write node on farm +- _(ftrack)_ pype would crash when launcher project level task +- work directory was sometimes not being created correctly +- major pype.lib cleanup. Removing of unused functions, merging those that were doing the same and general house cleaning. +- _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner diff --git a/changelog.md b/changelog.md deleted file mode 100644 index bdee041615..0000000000 --- a/changelog.md +++ /dev/null @@ -1,120 +0,0 @@ -# Pype changelog # -Welcome to pype changelog - -## 2.3.0 ## -_release date: 6 Oct 2019_ - -**new**: -- _(maya)_ support for yeti rigs and yeti caches -- _(maya)_ validator for comparing arbitrary attributes against ftrack -- _(pype)_ burnins can now show current date and time -- _(muster)_ pools can now be set in render globals in maya -- _(pype)_ Rest API has been implemented in beta stage -- _(nuke)_ LUT loader has been added -- _(pype)_ rudimentary user module has been added as preparation for user management -- _(pype)_ a simple logging GUI has been added to pype tray -- _(nuke)_ nuke can now bake input process into mov -- _(maya)_ imported models now have selection handle displayed by defaulting -- _(avalon)_ it's is now possible to load multiple assets at once using loader -- _(maya)_ added ability to automatically connect yeti rig to a mesh upon loading - -**changed**: -- _(ftrack)_ event server now runs two parallel processes and is able to keep queue of events to process. -- _(nuke)_ task name is now added to all rendered subsets -- _(pype)_ adding more families to standalone publisher -- _(pype)_ standalone publisher now uses pyblish-lite -- _(pype)_ standalone publisher can now create review quicktimes -- _(ftrack)_ queries to ftrack were sped up -- _(ftrack)_ multiple ftrack action have been deprecated -- _(avalon)_ avalon upstream has been updated to 5.5.0 -- _(nukestudio)_ published transforms can now be animated -- - -**fix**: -- _(maya)_ fps popup button didn't work in some cases -- _(maya)_ geometry instances and references in maya were losing shader assignments -- _(muster)_ muster rendering templates were not working correctly -- _(maya)_ arnold tx texture conversion wasn't respecting colorspace set by the artist -- _(pype)_ problems with avalon db sync -- _(maya)_ ftrack was rounding FPS making it inconsistent -- _(pype)_ wrong icon names in Creator -- _(maya)_ scene inventory wasn't showing anything if representation was removed from database after it's been loaded to the scene -- _(nukestudio)_ multiple bugs squashed -- _(loader)_ loader was taking long time to show all the loading action when first launcher in maya - -## 2.2.0 ## -_release date: 8 Sept 2019_ - -**new**: -- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts -- _(nuke)_ option to choose deadline chunk size on write nodes -- _(nukestudio)_ added option to publish soft effects (subTrackItems) from NukeStudio as subsets including LUT files. these can then be loaded in nuke or NukeStudio -- _(nuke)_ option to build nuke script from previously published latest versions of plate and render subsets. -- _(nuke)_ nuke writes now have deadline tab. -- _(ftrack)_ Prepare Project action can now be used for creating the base folder structure on disk and in ftrack, setting up all the initial project attributes and it automatically prepares `pype_project_config` folder for the given project. -- _(clockify)_ Added support for time tracking in clockify. This currently in addition to ftrack time logs, but does not completely replace them. -- _(pype)_ any attributes in Creator and Loader plugins can now be customised using pype preset system - -**changed**: -- nukestudio now uses workio API for workfiles -- _(maya)_ "FIX FPS" prompt in maya now appears in the middle of the screen -- _(muster)_ can now be configured with custom templates -- _(pype)_ global publishing plugins can now be configured using presets as well as host specific ones - - -**fix**: -- wrong version retrieval from path in certain scenarios -- nuke reset resolution wasn't working in certain scenarios - -## 2.1.0 ## -_release date: 6 Aug 2019_ - -A large cleanup release. Most of the change are under the hood. - -**new**: -- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts -- _(pype)_ Added configurable option to add burnins to any generated quicktimes -- _(ftrack)_ Action that identifies what machines pype is running on. -- _(system)_ unify subprocess calls -- _(maya)_ add audio to review quicktimes -- _(nuke)_ add crop before write node to prevent overscan problems in ffmpeg -- **Nuke Studio** publishing and workfiles support -- **Muster** render manager support -- _(nuke)_ Framerange, FPS and Resolution are set automatically at startup -- _(maya)_ Ability to load published sequences as image planes -- _(system)_ Ftrack event that sets asset folder permissions based on task assignees in ftrack. -- _(maya)_ Pyblish plugin that allow validation of maya attributes -- _(system)_ added better startup logging to tray debug, including basic connection information -- _(avalon)_ option to group published subsets to groups in the loader -- _(avalon)_ loader family filters are working now - -**changed**: -- change multiple key attributes to unify their behaviour across the pipeline - - `frameRate` to `fps` - - `startFrame` to `frameStart` - - `endFrame` to `frameEnd` - - `fstart` to `frameStart` - - `fend` to `frameEnd` - - `handle_start` to `handleStart` - - `handle_end` to `handleEnd` - - `resolution_width` to `resolutionWidth` - - `resolution_height` to `resolutionHeight` - - `pixel_aspect` to `pixelAspect` - -- _(nuke)_ write nodes are now created inside group with only some attributes editable by the artist -- rendered frames are now deleted from temporary location after their publishing is finished. -- _(ftrack)_ RV action can now be launched from any entity -- after publishing only refresh button is now available in pyblish UI -- added context instance pyblish-lite so that artist knows if context plugin fails -- _(avalon)_ allow opening selected files using enter key -- _(avalon)_ core updated to v5.2.9 with our forked changes on top - -**fix**: -- faster hierarchy retrieval from db -- _(nuke)_ A lot of stability enhancements -- _(nuke studio)_ A lot of stability enhancements -- _(nuke)_ now only renders a single write node on farm -- _(ftrack)_ pype would crash when launcher project level task -- work directory was sometimes not being created correctly -- major pype.lib cleanup. Removing of unused functions, merging those that were doing the same and general house cleaning. -- _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner diff --git a/pype/api.py b/pype/api.py index 44a31f2626..021080b4d5 100644 --- a/pype/api.py +++ b/pype/api.py @@ -1,3 +1,7 @@ +from .settings import ( + system_settings, + project_settings +) from pypeapp import ( Logger, Anatomy, @@ -40,13 +44,18 @@ from .lib import ( get_version_from_path, get_last_version_from_path, modified_environ, - add_tool_to_environment + add_tool_to_environment, + source_hash, + get_latest_version ) # Special naming case for subprocess since its a built-in method. from .lib import _subprocess as subprocess __all__ = [ + "system_settings", + "project_settings", + "Logger", "Anatomy", "project_overrides_dir_path", @@ -58,6 +67,7 @@ __all__ = [ # Resources "resources", + # plugin classes "Extractor", # ordering @@ -84,6 +94,8 @@ __all__ = [ "get_last_version_from_path", "modified_environ", "add_tool_to_environment", + "source_hash", - "subprocess" + "subprocess", + "get_latest_version" ] diff --git a/pype/hooks/celaction/prelaunch.py b/pype/hooks/celaction/prelaunch.py index df9da6cbbf..c8541a9bc3 100644 --- a/pype/hooks/celaction/prelaunch.py +++ b/pype/hooks/celaction/prelaunch.py @@ -57,8 +57,8 @@ class CelactionPrelaunchHook(PypeHook): self.log.info(f"Work dir is: `{workdir}`") # get last version of workfile - workfile_last = get_last_version_from_path( - workdir, workfile.split(version)) + workfile_last = env.get("AVALON_LAST_WORKFILE") + self.log.debug(f"_ workfile_last: `{workfile_last}`") if workfile_last: workfile = workfile_last @@ -106,8 +106,8 @@ class CelactionPrelaunchHook(PypeHook): f"--project {project}", f"--asset {asset}", f"--task {task}", - "--currentFile \"*SCENE*\"", - "--chunk *CHUNK*", + "--currentFile \\\"\"*SCENE*\"\\\"", + "--chunk 10", "--frameStart *START*", "--frameEnd *END*", "--resolutionWidth *X*", diff --git a/pype/hooks/resolve/prelaunch.py b/pype/hooks/resolve/prelaunch.py index bddeccf4a3..a122b87868 100644 --- a/pype/hooks/resolve/prelaunch.py +++ b/pype/hooks/resolve/prelaunch.py @@ -46,13 +46,14 @@ class ResolvePrelaunch(PypeHook): "`RESOLVE_UTILITY_SCRIPTS_DIR` or reinstall DaVinci Resolve. \n" f"RESOLVE_UTILITY_SCRIPTS_DIR: `{us_dir}`" ) + self.log.debug(f"-- us_dir: `{us_dir}`") # correctly format path for pre python script pre_py_sc = os.path.normpath(env.get("PRE_PYTHON_SCRIPT", "")) env["PRE_PYTHON_SCRIPT"] = pre_py_sc - + self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...") try: - __import__("pype.resolve") + __import__("pype.hosts.resolve") __import__("pyblish") except ImportError as e: @@ -62,6 +63,7 @@ class ResolvePrelaunch(PypeHook): else: # Resolve Setup integration importlib.reload(utils) + self.log.debug(f"-- utils.__file__: `{utils.__file__}`") utils.setup(env) return True diff --git a/pype/hosts/blender/plugin.py b/pype/hosts/blender/plugin.py index ab53d49041..07080a86c4 100644 --- a/pype/hosts/blender/plugin.py +++ b/pype/hosts/blender/plugin.py @@ -45,8 +45,9 @@ def get_unique_number( def prepare_data(data, container_name): name = data.name - data = data.make_local() - data.name = f"{name}:{container_name}" + local_data = data.make_local() + local_data.name = f"{name}:{container_name}" + return local_data def create_blender_context(active: Optional[bpy.types.Object] = None, diff --git a/pype/hosts/celaction/cli.py b/pype/hosts/celaction/cli.py index 8cf2bcc791..42f7a1a385 100644 --- a/pype/hosts/celaction/cli.py +++ b/pype/hosts/celaction/cli.py @@ -46,9 +46,6 @@ def cli(): parser.add_argument("--resolutionHeight", help=("Height of resolution")) - # parser.add_argument("--programDir", - # help=("Directory with celaction program installation")) - celaction.kwargs = parser.parse_args(sys.argv[1:]).__dict__ @@ -78,7 +75,7 @@ def _prepare_publish_environments(): env["AVALON_WORKDIR"] = os.getenv("AVALON_WORKDIR") env["AVALON_HIERARCHY"] = hierarchy env["AVALON_PROJECTCODE"] = project_doc["data"].get("code", "") - env["AVALON_APP"] = publish_host + env["AVALON_APP"] = f"hosts.{publish_host}" env["AVALON_APP_NAME"] = "celaction_local" env["PYBLISH_HOSTS"] = publish_host diff --git a/pype/hosts/harmony/__init__.py b/pype/hosts/harmony/__init__.py index 3cae695852..7310e91e9b 100644 --- a/pype/hosts/harmony/__init__.py +++ b/pype/hosts/harmony/__init__.py @@ -18,12 +18,7 @@ def set_scene_settings(settings): if (args[0]["frameStart"] && args[0]["frameEnd"]) { var duration = args[0]["frameEnd"] - args[0]["frameStart"] + 1 - if (frame.numberOf() > duration) - { - frame.remove( - duration, frame.numberOf() - duration - ); - } + if (frame.numberOf() < duration) { frame.insert( @@ -151,27 +146,31 @@ def application_launch(): def export_template(backdrops, nodes, filepath): func = """function func(args) { - // Add an extra node just so a new group can be created. + var temp_node = node.add("Top", "temp_note", "NOTE", 0, 0, 0); var template_group = node.createGroup(temp_node, "temp_group"); node.deleteNode( template_group + "/temp_note" ); - // This will make Node View to focus on the new group. + selection.clearSelection(); + for (var f = 0; f < args[1].length; f++) + { + selection.addNodeToSelection(args[1][f]); + } + + Action.perform("copy()", "Node View"); + selection.clearSelection(); selection.addNodeToSelection(template_group); Action.perform("onActionEnterGroup()", "Node View"); + Action.perform("paste()", "Node View"); // Recreate backdrops in group. for (var i = 0 ; i < args[0].length; i++) { + MessageLog.trace(args[0][i]); Backdrop.addBackdrop(template_group, args[0][i]); }; - // Copy-paste the selected nodes into the new group. - var drag_object = copyPaste.copy(args[1], 1, frame.numberOf, ""); - copyPaste.pasteNewNodes(drag_object, template_group, ""); - - // Select all nodes within group and export as template. Action.perform( "selectAll()", "Node View" ); copyPaste.createTemplateFromSelection(args[2], args[3]); diff --git a/pype/hosts/maya/customize.py b/pype/hosts/maya/customize.py index 8bd7052d9e..ee3ad4f239 100644 --- a/pype/hosts/maya/customize.py +++ b/pype/hosts/maya/customize.py @@ -69,17 +69,38 @@ def override_component_mask_commands(): def override_toolbox_ui(): """Add custom buttons in Toolbox as replacement for Maya web help icon.""" + inventory = None + loader = None + launch_workfiles_app = None + mayalookassigner = None + try: + import avalon.tools.sceneinventory as inventory + except Exception: + log.warning("Could not import SceneInventory tool") - import pype - res = os.path.join(os.path.dirname(os.path.dirname(pype.__file__)), - "res") - icons = os.path.join(res, "icons") + try: + import avalon.tools.loader as loader + except Exception: + log.warning("Could not import Loader tool") - import avalon.tools.sceneinventory as inventory - import avalon.tools.loader as loader - from avalon.maya.pipeline import launch_workfiles_app - import mayalookassigner + try: + from avalon.maya.pipeline import launch_workfiles_app + except Exception: + log.warning("Could not import Workfiles tool") + try: + import mayalookassigner + except Exception: + log.warning("Could not import Maya Look assigner tool") + + from pype.api import resources + + icons = resources.get_resource("icons") + + if not any(( + mayalookassigner, launch_workfiles_app, loader, inventory + )): + return # Ensure the maya web icon on toolbox exists web_button = "ToolBox|MainToolboxLayout|mayaWebButton" @@ -99,65 +120,65 @@ def override_toolbox_ui(): # Create our controls background_color = (0.267, 0.267, 0.267) controls = [] + if mayalookassigner: + controls.append( + mc.iconTextButton( + "pype_toolbox_lookmanager", + annotation="Look Manager", + label="Look Manager", + image=os.path.join(icons, "lookmanager.png"), + command=lambda: mayalookassigner.show(), + bgc=background_color, + width=icon_size, + height=icon_size, + parent=parent + ) + ) - control = mc.iconTextButton( - "pype_toolbox_lookmanager", - annotation="Look Manager", - label="Look Manager", - image=os.path.join(icons, "lookmanager.png"), - command=lambda: mayalookassigner.show(), - bgc=background_color, - width=icon_size, - height=icon_size, - parent=parent) - controls.append(control) + if launch_workfiles_app: + controls.append( + mc.iconTextButton( + "pype_toolbox_workfiles", + annotation="Work Files", + label="Work Files", + image=os.path.join(icons, "workfiles.png"), + command=lambda: launch_workfiles_app(), + bgc=background_color, + width=icon_size, + height=icon_size, + parent=parent + ) + ) - control = mc.iconTextButton( - "pype_toolbox_workfiles", - annotation="Work Files", - label="Work Files", - image=os.path.join(icons, "workfiles.png"), - command=lambda: launch_workfiles_app(), - bgc=background_color, - width=icon_size, - height=icon_size, - parent=parent) - controls.append(control) + if loader: + controls.append( + mc.iconTextButton( + "pype_toolbox_loader", + annotation="Loader", + label="Loader", + image=os.path.join(icons, "loader.png"), + command=lambda: loader.show(use_context=True), + bgc=background_color, + width=icon_size, + height=icon_size, + parent=parent + ) + ) - control = mc.iconTextButton( - "pype_toolbox_loader", - annotation="Loader", - label="Loader", - image=os.path.join(icons, "loader.png"), - command=lambda: loader.show(use_context=True), - bgc=background_color, - width=icon_size, - height=icon_size, - parent=parent) - controls.append(control) - - control = mc.iconTextButton( - "pype_toolbox_manager", - annotation="Inventory", - label="Inventory", - image=os.path.join(icons, "inventory.png"), - command=lambda: inventory.show(), - bgc=background_color, - width=icon_size, - height=icon_size, - parent=parent) - controls.append(control) - - # control = mc.iconTextButton( - # "pype_toolbox", - # annotation="Kredenc", - # label="Kredenc", - # image=os.path.join(icons, "kredenc_logo.png"), - # bgc=background_color, - # width=icon_size, - # height=icon_size, - # parent=parent) - # controls.append(control) + if inventory: + controls.append( + mc.iconTextButton( + "pype_toolbox_manager", + annotation="Inventory", + label="Inventory", + image=os.path.join(icons, "inventory.png"), + command=lambda: inventory.show(), + bgc=background_color, + width=icon_size, + height=icon_size, + parent=parent + ) + ) # Add the buttons on the bottom and stack # them above each other with side padding diff --git a/pype/hosts/maya/expected_files.py b/pype/hosts/maya/expected_files.py index a7204cba93..77d55eb1c1 100644 --- a/pype/hosts/maya/expected_files.py +++ b/pype/hosts/maya/expected_files.py @@ -158,6 +158,25 @@ class AExpectedFiles: """To be implemented by renderer class.""" pass + def sanitize_camera_name(self, camera): + """Sanitize camera name. + + Remove Maya illegal characters from camera name. + + Args: + camera (str): Maya camera name. + + Returns: + (str): sanitized camera name + + Example: + >>> sanizite_camera_name('test:camera_01') + test_camera_01 + + """ + sanitized = re.sub('[^0-9a-zA-Z_]+', '_', camera) + return sanitized + def get_renderer_prefix(self): """Return prefix for specific renderer. @@ -252,7 +271,7 @@ class AExpectedFiles: mappings = ( (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), - (R_SUBSTITUTE_CAMERA_TOKEN, cam), + (R_SUBSTITUTE_CAMERA_TOKEN, self.sanitize_camera_name(cam)), # this is required to remove unfilled aov token, for example # in Redshift (R_REMOVE_AOV_TOKEN, ""), @@ -287,7 +306,8 @@ class AExpectedFiles: mappings = ( (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), - (R_SUBSTITUTE_CAMERA_TOKEN, cam), + (R_SUBSTITUTE_CAMERA_TOKEN, + self.sanitize_camera_name(cam)), (R_SUBSTITUTE_AOV_TOKEN, aov[0]), (R_CLEAN_FRAME_TOKEN, ""), (R_CLEAN_EXT_TOKEN, ""), @@ -314,7 +334,8 @@ class AExpectedFiles: # camera name to AOV to allow per camera AOVs. aov_name = aov[0] if len(layer_data["cameras"]) > 1: - aov_name = "{}_{}".format(aov[0], cam) + aov_name = "{}_{}".format(aov[0], + self.sanitize_camera_name(cam)) aov_file_list[aov_name] = aov_files file_prefix = layer_data["filePrefix"] diff --git a/pype/hosts/nuke/lib.py b/pype/hosts/nuke/lib.py index 72a8836a03..19a0784327 100644 --- a/pype/hosts/nuke/lib.py +++ b/pype/hosts/nuke/lib.py @@ -1,7 +1,6 @@ import os import re import sys -import getpass from collections import OrderedDict from avalon import api, io, lib @@ -1060,310 +1059,6 @@ def get_write_node_template_attr(node): return avalon.nuke.lib.fix_data_for_node_create(correct_data) -class BuildWorkfile(WorkfileSettings): - """ - Building first version of workfile. - - Settings are taken from presets and db. It will add all subsets - in last version for defined representaions - - Arguments: - variable (type): description - - """ - xpos = 0 - ypos = 0 - xpos_size = 80 - ypos_size = 90 - xpos_gap = 50 - ypos_gap = 50 - pos_layer = 10 - - def __init__(self, - root_path=None, - root_node=None, - nodes=None, - to_script=None, - **kwargs): - """ - A short description. - - A bit longer description. - - Argumetns: - root_path (str): description - root_node (nuke.Node): description - nodes (list): list of nuke.Node - nodes_effects (dict): dictionary with subsets - - Example: - nodes_effects = { - "plateMain": { - "nodes": [ - [("Class", "Reformat"), - ("resize", "distort"), - ("flip", True)], - - [("Class", "Grade"), - ("blackpoint", 0.5), - ("multiply", 0.4)] - ] - }, - } - - """ - - WorkfileSettings.__init__(self, - root_node=root_node, - nodes=nodes, - **kwargs) - self.to_script = to_script - # collect data for formating - self.data_tmp = { - "project": {"name": self._project["name"], - "code": self._project["data"].get("code", "")}, - "asset": self._asset or os.environ["AVALON_ASSET"], - "task": kwargs.get("task") or api.Session["AVALON_TASK"], - "hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(), - "version": kwargs.get("version", {}).get("name", 1), - "user": getpass.getuser(), - "comment": "firstBuild", - "ext": "nk" - } - - # get presets from anatomy - anatomy = get_anatomy() - # format anatomy - anatomy_filled = anatomy.format(self.data_tmp) - - # get dir and file for workfile - self.work_dir = anatomy_filled["work"]["folder"] - self.work_file = anatomy_filled["work"]["file"] - - def save_script_as(self, path=None): - # first clear anything in open window - nuke.scriptClear() - - if not path: - dir = self.work_dir - path = os.path.join( - self.work_dir, - self.work_file).replace("\\", "/") - else: - dir = os.path.dirname(path) - - # check if folder is created - if not os.path.exists(dir): - os.makedirs(dir) - - # save script to path - nuke.scriptSaveAs(path) - - def process(self, - regex_filter=None, - version=None, - representations=["exr", "dpx", "lutJson", "mov", - "preview", "png", "jpeg", "jpg"]): - """ - A short description. - - A bit longer description. - - Args: - regex_filter (raw string): regex pattern to filter out subsets - version (int): define a particular version, None gets last - representations (list): - - Returns: - type: description - - Raises: - Exception: description - - """ - - if not self.to_script: - # save the script - self.save_script_as() - - # create viewer and reset frame range - viewer = self.get_nodes(nodes_filter=["Viewer"]) - if not viewer: - vn = nuke.createNode("Viewer") - vn["xpos"].setValue(self.xpos) - vn["ypos"].setValue(self.ypos) - else: - vn = viewer[-1] - - # move position - self.position_up() - - wn = self.write_create() - wn["xpos"].setValue(self.xpos) - wn["ypos"].setValue(self.ypos) - wn["render"].setValue(True) - vn.setInput(0, wn) - - # adding backdrop under write - self.create_backdrop(label="Render write \n\n\n\nOUTPUT", - color='0xcc1102ff', layer=-1, - nodes=[wn]) - - # move position - self.position_up(4) - - # set frame range for new viewer - self.reset_frame_range_handles() - - # get all available representations - subsets = pype.get_subsets(self._asset, - regex_filter=regex_filter, - version=version, - representations=representations) - - for name, subset in subsets.items(): - log.debug("___________________") - log.debug(name) - log.debug(subset["version"]) - - nodes_backdrop = list() - for name, subset in subsets.items(): - if "lut" in name: - continue - log.info("Building Loader to: `{}`".format(name)) - version = subset["version"] - log.info("Version to: `{}`".format(version["name"])) - representations = subset["representaions"] - for repr in representations: - rn = self.read_loader(repr) - rn["xpos"].setValue(self.xpos) - rn["ypos"].setValue(self.ypos) - wn.setInput(0, rn) - - # get editional nodes - lut_subset = [s for n, s in subsets.items() - if "lut{}".format(name.lower()) in n.lower()] - log.debug(">> lut_subset: `{}`".format(lut_subset)) - - if len(lut_subset) > 0: - lsub = lut_subset[0] - fxn = self.effect_loader(lsub["representaions"][-1]) - fxn_ypos = fxn["ypos"].value() - fxn["ypos"].setValue(fxn_ypos - 100) - nodes_backdrop.append(fxn) - - nodes_backdrop.append(rn) - # move position - self.position_right() - - # adding backdrop under all read nodes - self.create_backdrop(label="Loaded Reads", - color='0x2d7702ff', layer=-1, - nodes=nodes_backdrop) - - def read_loader(self, representation): - """ - Gets Loader plugin for image sequence or mov - - Arguments: - representation (dict): avalon db entity - - """ - context = representation["context"] - - loader_name = "LoadSequence" - if "mov" in context["representation"]: - loader_name = "LoadMov" - - loader_plugin = None - for Loader in api.discover(api.Loader): - if Loader.__name__ != loader_name: - continue - - loader_plugin = Loader - - return api.load(Loader=loader_plugin, - representation=representation["_id"]) - - def effect_loader(self, representation): - """ - Gets Loader plugin for effects - - Arguments: - representation (dict): avalon db entity - - """ - loader_name = "LoadLuts" - - loader_plugin = None - for Loader in api.discover(api.Loader): - if Loader.__name__ != loader_name: - continue - - loader_plugin = Loader - - return api.load(Loader=loader_plugin, - representation=representation["_id"]) - - def write_create(self): - """ - Create render write - - Arguments: - representation (dict): avalon db entity - - """ - task = self.data_tmp["task"] - sanitized_task = re.sub('[^0-9a-zA-Z]+', '', task) - subset_name = "render{}Main".format( - sanitized_task.capitalize()) - - Create_name = "CreateWriteRender" - - creator_plugin = None - for Creator in api.discover(api.Creator): - if Creator.__name__ != Create_name: - continue - - creator_plugin = Creator - - # return api.create() - return creator_plugin(subset_name, self._asset).process() - - def create_backdrop(self, label="", color=None, layer=0, - nodes=None): - """ - Create Backdrop node - - Arguments: - color (str): nuke compatible string with color code - layer (int): layer of node usually used (self.pos_layer - 1) - label (str): the message - nodes (list): list of nodes to be wrapped into backdrop - - """ - assert isinstance(nodes, list), "`nodes` should be a list of nodes" - layer = self.pos_layer + layer - - create_backdrop(label=label, color=color, layer=layer, nodes=nodes) - - def position_reset(self, xpos=0, ypos=0): - self.xpos = xpos - self.ypos = ypos - - def position_right(self, multiply=1): - self.xpos += (self.xpos_size * multiply) + self.xpos_gap - - def position_left(self, multiply=1): - self.xpos -= (self.xpos_size * multiply) + self.xpos_gap - - def position_down(self, multiply=1): - self.ypos -= (self.ypos_size * multiply) + self.ypos_gap - - def position_up(self, multiply=1): - self.ypos -= (self.ypos_size * multiply) + self.ypos_gap - - class ExporterReview: """ Base class object for generating review data from Nuke @@ -1445,7 +1140,7 @@ class ExporterReview: anlib.reset_selection() ipn_orig = None for v in [n for n in nuke.allNodes() - if "Viewer" in n.Class()]: + if "Viewer" == n.Class()]: ip = v['input_process'].getValue() ipn = v['input_process_node'].getValue() if "VIEWER_INPUT" not in ipn and ip: diff --git a/pype/hosts/nuke/menu.py b/pype/hosts/nuke/menu.py index 7306add9fe..b1ef7f47c4 100644 --- a/pype/hosts/nuke/menu.py +++ b/pype/hosts/nuke/menu.py @@ -2,10 +2,12 @@ import nuke from avalon.api import Session from pype.hosts.nuke import lib +from ...lib import BuildWorkfile from pype.api import Logger log = Logger().get_logger(__name__, "nuke") + def install(): menubar = nuke.menu("Nuke") menu = menubar.findItem(Session["AVALON_LABEL"]) @@ -20,7 +22,11 @@ def install(): log.debug("Changing Item: {}".format(rm_item)) # rm_item[1].setEnabled(False) menu.removeItem(rm_item[1].name()) - menu.addCommand(new_name, lambda: workfile_settings().reset_resolution(), index=(rm_item[0])) + menu.addCommand( + new_name, + lambda: workfile_settings().reset_resolution(), + index=(rm_item[0]) + ) # replace reset frame range from avalon core to pype's name = "Reset Frame Range" @@ -31,33 +37,38 @@ def install(): log.debug("Changing Item: {}".format(rm_item)) # rm_item[1].setEnabled(False) menu.removeItem(rm_item[1].name()) - menu.addCommand(new_name, lambda: workfile_settings().reset_frame_range_handles(), index=(rm_item[0])) + menu.addCommand( + new_name, + lambda: workfile_settings().reset_frame_range_handles(), + index=(rm_item[0]) + ) # add colorspace menu item - name = "Set colorspace" + name = "Set Colorspace" menu.addCommand( name, lambda: workfile_settings().set_colorspace(), - index=(rm_item[0]+2) + index=(rm_item[0] + 2) ) log.debug("Adding menu item: {}".format(name)) # add workfile builder menu item - name = "Build First Workfile.." + name = "Build Workfile" menu.addCommand( - name, lambda: lib.BuildWorkfile().process(), - index=(rm_item[0]+7) + name, lambda: BuildWorkfile().process(), + index=(rm_item[0] + 7) ) log.debug("Adding menu item: {}".format(name)) # add item that applies all setting above - name = "Apply all settings" + name = "Apply All Settings" menu.addCommand( - name, lambda: workfile_settings().set_context_settings(), index=(rm_item[0]+3) + name, + lambda: workfile_settings().set_context_settings(), + index=(rm_item[0] + 3) ) log.debug("Adding menu item: {}".format(name)) - def uninstall(): menubar = nuke.menu("Nuke") diff --git a/pype/hosts/nuke/utils.py b/pype/hosts/nuke/utils.py index aa5bc1077e..72c7b7bc14 100644 --- a/pype/hosts/nuke/utils.py +++ b/pype/hosts/nuke/utils.py @@ -1,6 +1,7 @@ import os import nuke from avalon.nuke import lib as anlib +from pype.api import resources def set_context_favorites(favorites={}): @@ -9,9 +10,7 @@ def set_context_favorites(favorites={}): Argumets: favorites (dict): couples of {name:path} """ - dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) - icon_path = os.path.join(dir, 'res', 'icons', 'folder-favorite3.png') - + icon_path = resources.get_resource("icons", "folder-favorite3.png") for name, path in favorites.items(): nuke.addFavoriteDir( name, diff --git a/pype/hosts/nukestudio/tags.json b/pype/hosts/nukestudio/tags.json new file mode 100644 index 0000000000..56fcfcbce9 --- /dev/null +++ b/pype/hosts/nukestudio/tags.json @@ -0,0 +1,262 @@ +{ + "Hierarchy": { + "editable": "1", + "note": "{folder}/{sequence}/{shot}", + "icon": { + "path": "hierarchy.png" + }, + "metadata": { + "folder": "FOLDER_NAME", + "shot": "{clip}", + "track": "{track}", + "sequence": "{sequence}", + "episode": "EPISODE_NAME", + "root": "{projectroot}" + } + }, + "Source Resolution": { + "editable": "1", + "note": "Use source resolution", + "icon": { + "path": "resolution.png" + }, + "metadata": { + "family": "resolution" + } + }, + "Retiming": { + "editable": "1", + "note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", + "icon": { + "path": "retiming.png" + }, + "metadata": { + "family": "retiming", + "marginIn": 1, + "marginOut": 1 + } + }, + "Frame start": { + "editable": "1", + "note": "Starting frame for comps. \n\n> Use `value` and add either number or write `source` (if you want to preserve source frame numbering)", + "icon": { + "path": "icons:TagBackground.png" + }, + "metadata": { + "family": "frameStart", + "value": "1001" + } + }, + "[Lenses]": { + "Set lense here": { + "editable": "1", + "note": "Adjust parameters of your lense and then drop to clip. Remember! You can always overwrite on clip", + "icon": { + "path": "lense.png" + }, + "metadata": { + "focalLengthMm": 57 + + } + } + }, + "[Subsets]": { + "Audio": { + "editable": "1", + "note": "Export with Audio", + "icon": { + "path": "volume.png" + }, + "metadata": { + "family": "audio", + "subset": "main" + } + }, + "plateFg": { + "editable": "1", + "note": "Add to publish to \"forground\" subset. Change metadata subset name if different order number", + "icon": { + "path": "z_layer_fg.png" + }, + "metadata": { + "family": "plate", + "subset": "Fg01" + } + }, + "plateBg": { + "editable": "1", + "note": "Add to publish to \"background\" subset. Change metadata subset name if different order number", + "icon": { + "path": "z_layer_bg.png" + }, + "metadata": { + "family": "plate", + "subset": "Bg01" + } + }, + "plateRef": { + "editable": "1", + "note": "Add to publish to \"reference\" subset.", + "icon": { + "path": "icons:Reference.png" + }, + "metadata": { + "family": "plate", + "subset": "Ref" + } + }, + "plateMain": { + "editable": "1", + "note": "Add to publish to \"main\" subset.", + "icon": { + "path": "z_layer_main.png" + }, + "metadata": { + "family": "plate", + "subset": "main" + } + }, + "plateProxy": { + "editable": "1", + "note": "Add to publish to \"proxy\" subset.", + "icon": { + "path": "z_layer_main.png" + }, + "metadata": { + "family": "plate", + "subset": "proxy" + } + }, + "review": { + "editable": "1", + "note": "Upload to Ftrack as review component.", + "icon": { + "path": "review.png" + }, + "metadata": { + "family": "review", + "track": "review" + } + } + }, + "[Handles]": { + "start: add 20 frames": { + "editable": "1", + "note": "Adding frames to start of selected clip", + "icon": { + "path": "3_add_handles_start.png" + }, + "metadata": { + "family": "handles", + "value": "20", + "args": "{'op':'add','where':'start'}" + } + }, + "start: add 10 frames": { + "editable": "1", + "note": "Adding frames to start of selected clip", + "icon": { + "path": "3_add_handles_start.png" + }, + "metadata": { + "family": "handles", + "value": "10", + "args": "{'op':'add','where':'start'}" + } + }, + "start: add 5 frames": { + "editable": "1", + "note": "Adding frames to start of selected clip", + "icon": { + "path": "3_add_handles_start.png" + }, + "metadata": { + "family": "handles", + "value": "5", + "args": "{'op':'add','where':'start'}" + } + }, + "start: add 0 frames": { + "editable": "1", + "note": "Adding frames to start of selected clip", + "icon": { + "path": "3_add_handles_start.png" + }, + "metadata": { + "family": "handles", + "value": "0", + "args": "{'op':'add','where':'start'}" + } + }, + "end: add 20 frames": { + "editable": "1", + "note": "Adding frames to end of selected clip", + "icon": { + "path": "1_add_handles_end.png" + }, + "metadata": { + "family": "handles", + "value": "20", + "args": "{'op':'add','where':'end'}" + } + }, + "end: add 10 frames": { + "editable": "1", + "note": "Adding frames to end of selected clip", + "icon": { + "path": "1_add_handles_end.png" + }, + "metadata": { + "family": "handles", + "value": "10", + "args": "{'op':'add','where':'end'}" + } + }, + "end: add 5 frames": { + "editable": "1", + "note": "Adding frames to end of selected clip", + "icon": { + "path": "1_add_handles_end.png" + }, + "metadata": { + "family": "handles", + "value": "5", + "args": "{'op':'add','where':'end'}" + } + }, + "end: add 0 frames": { + "editable": "1", + "note": "Adding frames to end of selected clip", + "icon": { + "path": "1_add_handles_end.png" + }, + "metadata": { + "family": "handles", + "value": "0", + "args": "{'op':'add','where':'end'}" + } + } + }, + "NukeScript": { + "editable": "1", + "note": "Collecting track items to Nuke scripts.", + "icon": { + "path": "icons:TagNuke.png" + }, + "metadata": { + "family": "nukescript", + "subset": "main" + } + }, + "Comment": { + "editable": "1", + "note": "Comment on a shot.", + "icon": { + "path": "icons:TagComment.png" + }, + "metadata": { + "family": "comment", + "subset": "main" + } + } +} diff --git a/pype/hosts/nukestudio/tags.py b/pype/hosts/nukestudio/tags.py index c97f13d17c..c2b1d0d728 100644 --- a/pype/hosts/nukestudio/tags.py +++ b/pype/hosts/nukestudio/tags.py @@ -1,16 +1,22 @@ import re import os +import json import hiero -from pype.api import ( - config, - Logger -) +from pype.api import Logger from avalon import io log = Logger().get_logger(__name__, "nukestudio") +def tag_data(): + current_dir = os.path.dirname(__file__) + json_path = os.path.join(current_dir, "tags.json") + with open(json_path, "r") as json_stream: + data = json.load(json_stream) + return data + + def create_tag(key, value): """ Creating Tag object. @@ -58,13 +64,9 @@ def add_tags_from_presets(): return log.debug("Setting default tags on project: {}".format(project.name())) - - # get all presets - presets = config.get_presets() - # get nukestudio tag.json from presets - nks_pres = presets["nukestudio"] - nks_pres_tags = nks_pres.get("tags", None) + # get nukestudio tags.json + nks_pres_tags = tag_data() # Get project task types. tasks = io.find_one({"type": "project"})["config"]["tasks"] diff --git a/pype/hosts/premiere/extensions/com.pype/jsx/pype.jsx b/pype/hosts/premiere/extensions/com.pype/jsx/pype.jsx index 684cef5e5a..3cd4502653 100644 --- a/pype/hosts/premiere/extensions/com.pype/jsx/pype.jsx +++ b/pype/hosts/premiere/extensions/com.pype/jsx/pype.jsx @@ -534,7 +534,9 @@ $.pype = { if (instances === null) { return null; } - if (audioOnly === true) { + + // make only audio representations + if (audioOnly === 'true') { $.pype.log('? looping if audio True'); for (var i = 0; i < instances.length; i++) { var subsetToRepresentations = instances[i].subsetToRepresentations; diff --git a/pype/hosts/resolve/__init__.py b/pype/hosts/resolve/__init__.py index 72d6314b5e..c8f45259ff 100644 --- a/pype/hosts/resolve/__init__.py +++ b/pype/hosts/resolve/__init__.py @@ -1,17 +1,34 @@ +from .utils import ( + setup, + get_resolve_module +) + from .pipeline import ( install, uninstall, ls, containerise, publish, - launch_workfiles_app + launch_workfiles_app, + maintained_selection ) -from .utils import ( - setup, - get_resolve_module +from .lib import ( + get_project_manager, + get_current_project, + get_current_sequence, + get_current_track_items, + create_current_sequence_media_bin, + create_compound_clip, + swap_clips, + get_pype_clip_metadata, + set_project_manager_to_folder_name ) +from .menu import launch_pype_menu + +from .plugin import Creator + from .workio import ( open_file, save_file, @@ -21,12 +38,8 @@ from .workio import ( work_root ) -from .lib import ( - get_project_manager, - set_project_manager_to_folder_name -) - -from .menu import launch_pype_menu +bmdvr = None +bmdvf = None __all__ = [ # pipeline @@ -37,6 +50,7 @@ __all__ = [ "reload_pipeline", "publish", "launch_workfiles_app", + "maintained_selection", # utils "setup", @@ -44,16 +58,30 @@ __all__ = [ # lib "get_project_manager", + "get_current_project", + "get_current_sequence", + "get_current_track_items", + "create_current_sequence_media_bin", + "create_compound_clip", + "swap_clips", + "get_pype_clip_metadata", "set_project_manager_to_folder_name", # menu "launch_pype_menu", + # plugin + "Creator", + # workio "open_file", "save_file", "current_file", "has_unsaved_changes", "file_extensions", - "work_root" + "work_root", + + # singleton with black magic resolve module + "bmdvr", + "bmdvf" ] diff --git a/pype/hosts/resolve/action.py b/pype/hosts/resolve/action.py index 31830937c1..a9803cef4e 100644 --- a/pype/hosts/resolve/action.py +++ b/pype/hosts/resolve/action.py @@ -21,9 +21,9 @@ class SelectInvalidAction(pyblish.api.Action): def process(self, context, plugin): try: - from pype.hosts.resolve.utils import get_resolve_module - resolve = get_resolve_module() - self.log.debug(resolve) + from . import get_project_manager + pm = get_project_manager() + self.log.debug(pm) except ImportError: raise ImportError("Current host is not Resolve") diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index 2576136df5..deb4fa6339 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -1,20 +1,406 @@ import sys -from .utils import get_resolve_module -from pypeapp import Logger +import json +from opentimelineio import opentime +from pprint import pformat + +from pype.api import Logger log = Logger().get_logger(__name__, "resolve") self = sys.modules[__name__] self.pm = None +self.rename_index = 0 +self.rename_add = 0 +self.pype_metadata_key = "VFX Notes" def get_project_manager(): + from . import bmdvr if not self.pm: - resolve = get_resolve_module() - self.pm = resolve.GetProjectManager() + self.pm = bmdvr.GetProjectManager() return self.pm +def get_current_project(): + # initialize project manager + get_project_manager() + + return self.pm.GetCurrentProject() + + +def get_current_sequence(): + # get current project + project = get_current_project() + + return project.GetCurrentTimeline() + + +def get_current_track_items( + filter=False, + track_type=None, + selecting_color=None): + """ Gets all available current timeline track items + """ + track_type = track_type or "video" + selecting_color = selecting_color or "Chocolate" + project = get_current_project() + sequence = get_current_sequence() + selected_clips = list() + + # get all tracks count filtered by track type + selected_track_count = sequence.GetTrackCount(track_type) + + # loop all tracks and get items + _clips = dict() + for track_index in range(1, (int(selected_track_count) + 1)): + track_name = sequence.GetTrackName(track_type, track_index) + track_track_items = sequence.GetItemListInTrack( + track_type, track_index) + _clips[track_index] = track_track_items + + _data = { + "project": project, + "sequence": sequence, + "track": { + "name": track_name, + "index": track_index, + "type": track_type} + } + # get track item object and its color + for clip_index, ti in enumerate(_clips[track_index]): + data = _data.copy() + data["clip"] = { + "item": ti, + "index": clip_index + } + ti_color = ti.GetClipColor() + if filter is True: + if selecting_color in ti_color: + selected_clips.append(data) + # ti.ClearClipColor() + else: + selected_clips.append(data) + + return selected_clips + + +def create_current_sequence_media_bin(sequence): + seq_name = sequence.GetName() + media_pool = get_current_project().GetMediaPool() + root_folder = media_pool.GetRootFolder() + sub_folders = root_folder.GetSubFolderList() + testing_names = list() + + print(f"_ sub_folders: {sub_folders}") + for subfolder in sub_folders: + subf_name = subfolder.GetName() + if seq_name in subf_name: + testing_names.append(subfolder) + else: + testing_names.append(False) + + matching = next((f for f in testing_names if f is not False), None) + + if not matching: + new_folder = media_pool.AddSubFolder(root_folder, seq_name) + media_pool.SetCurrentFolder(new_folder) + else: + media_pool.SetCurrentFolder(matching) + + return media_pool.GetCurrentFolder() + + +def get_name_with_data(clip_data, presets): + """ + Take hierarchy data from presets and build name with parents data + + Args: + clip_data (dict): clip data from `get_current_track_items()` + presets (dict): data from create plugin + + Returns: + list: name, data + + """ + def _replace_hash_to_expression(name, text): + _spl = text.split("#") + _len = (len(_spl) - 1) + _repl = f"{{{name}:0>{_len}}}" + new_text = text.replace(("#" * _len), _repl) + return new_text + + # presets data + clip_name = presets["clipName"] + hierarchy = presets["hierarchy"] + hierarchy_data = presets["hierarchyData"].copy() + count_from = presets["countFrom"] + steps = presets["steps"] + + # reset rename_add + if self.rename_add < count_from: + self.rename_add = count_from + + # shot num calculate + if self.rename_index == 0: + shot_num = self.rename_add + else: + shot_num = self.rename_add + steps + + print(f"shot_num: {shot_num}") + + # clip data + _data = { + "sequence": clip_data["sequence"].GetName(), + "track": clip_data["track"]["name"].replace(" ", "_"), + "shot": shot_num + } + + # solve # in test to pythonic explression + for k, v in hierarchy_data.items(): + if "#" not in v: + continue + hierarchy_data[k] = _replace_hash_to_expression(k, v) + + # fill up pythonic expresisons + for k, v in hierarchy_data.items(): + hierarchy_data[k] = v.format(**_data) + + # fill up clip name and hierarchy keys + hierarchy = hierarchy.format(**hierarchy_data) + clip_name = clip_name.format(**hierarchy_data) + + self.rename_add = shot_num + print(f"shot_num: {shot_num}") + + return (clip_name, { + "hierarchy": hierarchy, + "hierarchyData": hierarchy_data + }) + + +def create_compound_clip(clip_data, folder, rename=False, **kwargs): + """ + Convert timeline object into nested timeline object + + Args: + clip_data (dict): timeline item object packed into dict + with project, timeline (sequence) + folder (resolve.MediaPool.Folder): media pool folder object, + rename (bool)[optional]: renaming in sequence or not + kwargs (optional): additional data needed for rename=True (presets) + + Returns: + resolve.MediaPoolItem: media pool item with compound clip timeline(cct) + """ + # get basic objects form data + project = clip_data["project"] + sequence = clip_data["sequence"] + clip = clip_data["clip"] + + # get details of objects + clip_item = clip["item"] + track = clip_data["track"] + + mp = project.GetMediaPool() + + # get clip attributes + clip_attributes = get_clip_attributes(clip_item) + print(f"_ clip_attributes: {pformat(clip_attributes)}") + + if rename: + presets = kwargs.get("presets") + if presets: + name, data = get_name_with_data(clip_data, presets) + # add hirarchy data to clip attributes + clip_attributes.update(data) + else: + name = "{:0>3}_{:0>4}".format( + int(track["index"]), int(clip["index"])) + else: + # build name + clip_name_split = clip_item.GetName().split(".") + name = "_".join([ + track["name"], + str(track["index"]), + clip_name_split[0], + str(clip["index"])] + ) + + # get metadata + mp_item = clip_item.GetMediaPoolItem() + mp_props = mp_item.GetClipProperty() + + mp_first_frame = int(mp_props["Start"]) + mp_last_frame = int(mp_props["End"]) + + # initialize basic source timing for otio + ci_l_offset = clip_item.GetLeftOffset() + ci_duration = clip_item.GetDuration() + rate = float(mp_props["FPS"]) + + # source rational times + mp_in_rc = opentime.RationalTime((ci_l_offset), rate) + mp_out_rc = opentime.RationalTime((ci_l_offset + ci_duration - 1), rate) + + # get frame in and out for clip swaping + in_frame = opentime.to_frames(mp_in_rc) + out_frame = opentime.to_frames(mp_out_rc) + + # keep original sequence + sq_origin = sequence + + # Set current folder to input media_pool_folder: + mp.SetCurrentFolder(folder) + + # check if clip doesnt exist already: + clips = folder.GetClipList() + cct = next((c for c in clips + if c.GetName() in name), None) + + if cct: + print(f"_ cct exists: {cct}") + else: + # Create empty timeline in current folder and give name: + cct = mp.CreateEmptyTimeline(name) + + # check if clip doesnt exist already: + clips = folder.GetClipList() + cct = next((c for c in clips + if c.GetName() in name), None) + print(f"_ cct created: {cct}") + + # Set current timeline to created timeline: + project.SetCurrentTimeline(cct) + + # Add input clip to the current timeline: + mp.AppendToTimeline([{ + "mediaPoolItem": mp_item, + "startFrame": mp_first_frame, + "endFrame": mp_last_frame + }]) + + # Set current timeline to the working timeline: + project.SetCurrentTimeline(sq_origin) + + # Add collected metadata and attributes to the comound clip: + if mp_item.GetMetadata(self.pype_metadata_key): + clip_attributes[self.pype_metadata_key] = mp_item.GetMetadata( + self.pype_metadata_key)[self.pype_metadata_key] + + # stringify + clip_attributes = json.dumps(clip_attributes) + + # add attributes to metadata + for k, v in mp_item.GetMetadata().items(): + cct.SetMetadata(k, v) + + # add metadata to cct + cct.SetMetadata(self.pype_metadata_key, clip_attributes) + + # reset start timecode of the compound clip + cct.SetClipProperty("Start TC", mp_props["Start TC"]) + + # swap clips on timeline + swap_clips(clip_item, cct, name, in_frame, out_frame) + + cct.SetClipColor("Pink") + return cct + + +def swap_clips(from_clip, to_clip, to_clip_name, to_in_frame, to_out_frame): + """ + Swaping clips on timeline in timelineItem + + It will add take and activate it to the frame range which is inputted + + Args: + from_clip (resolve.mediaPoolItem) + to_clip (resolve.mediaPoolItem) + to_clip_name (str): name of to_clip + to_in_frame (float): cut in frame, usually `GetLeftOffset()` + to_out_frame (float): cut out frame, usually left offset plus duration + + Returns: + bool: True if successfully replaced + + """ + # add clip item as take to timeline + take = from_clip.AddTake( + to_clip, + float(to_in_frame), + float(to_out_frame) + ) + + if not take: + return False + + for take_index in range(1, (int(from_clip.GetTakesCount()) + 1)): + take_item = from_clip.GetTakeByIndex(take_index) + take_mp_item = take_item["mediaPoolItem"] + if to_clip_name in take_mp_item.GetName(): + from_clip.SelectTakeByIndex(take_index) + from_clip.FinalizeTake() + return True + return False + + +def validate_tc(x): + # Validate and reformat timecode string + + if len(x) != 11: + print('Invalid timecode. Try again.') + + c = ':' + colonized = x[:2] + c + x[3:5] + c + x[6:8] + c + x[9:] + + if colonized.replace(':', '').isdigit(): + print(f"_ colonized: {colonized}") + return colonized + else: + print('Invalid timecode. Try again.') + + +def get_pype_clip_metadata(clip): + """ + Get pype metadata created by creator plugin + + Attributes: + clip (resolve.TimelineItem): resolve's object + + Returns: + dict: hierarchy, orig clip attributes + """ + mp_item = clip.GetMediaPoolItem() + metadata = mp_item.GetMetadata() + + return metadata.get(self.pype_metadata_key) + + +def get_clip_attributes(clip): + """ + Collect basic atrributes from resolve timeline item + + Args: + clip (resolve.TimelineItem): timeline item object + + Returns: + dict: all collected attributres as key: values + """ + mp_item = clip.GetMediaPoolItem() + + data = { + "clipIn": clip.GetStart(), + "clipOut": clip.GetEnd(), + "clipLeftOffset": clip.GetLeftOffset(), + "clipRightOffset": clip.GetRightOffset(), + "clipMarkers": clip.GetMarkers(), + "clipFlags": clip.GetFlagList(), + "sourceId": mp_item.GetMediaId(), + "sourceProperties": mp_item.GetClipProperty() + } + return data + + def set_project_manager_to_folder_name(folder_name): """ Sets context of Project manager to given folder by name. diff --git a/pype/hosts/resolve/menu_style.qss b/pype/hosts/resolve/menu_style.qss index df4fd7e949..ea11c4ca2e 100644 --- a/pype/hosts/resolve/menu_style.qss +++ b/pype/hosts/resolve/menu_style.qss @@ -1,6 +1,7 @@ QWidget { background-color: #282828; border-radius: 3; + font-size: 13px; } QPushButton { @@ -20,10 +21,38 @@ QPushButton:hover { color: #e64b3d; } +QSpinBox { + border: 1px solid #090909; + background-color: #201f1f; + color: #ffffff; + padding: 2; + max-width: 8em; + qproperty-alignment: AlignCenter; +} + +QLineEdit { + border: 1px solid #090909; + border-radius: 3px; + background-color: #201f1f; + color: #ffffff; + padding: 2; + min-width: 10em; + qproperty-alignment: AlignCenter; +} + #PypeMenu { border: 1px solid #fef9ef; } -#Spacer { +QVBoxLayout { background-color: #282828; } + +#Devider { + border: 1px solid #090909; + background-color: #585858; +} + +QLabel { + color: #77776b; +} diff --git a/pype/hosts/resolve/pipeline.py b/pype/hosts/resolve/pipeline.py index 967aed1436..92bef2e13b 100644 --- a/pype/hosts/resolve/pipeline.py +++ b/pype/hosts/resolve/pipeline.py @@ -2,27 +2,23 @@ Basic avalon integration """ import os -# import sys +import contextlib from avalon.tools import workfiles from avalon import api as avalon from pyblish import api as pyblish -from pypeapp import Logger +import pype +from pype.api import Logger log = Logger().get_logger(__name__, "resolve") -# self = sys.modules[__name__] - AVALON_CONFIG = os.environ["AVALON_CONFIG"] -PARENT_DIR = os.path.dirname(__file__) -PACKAGE_DIR = os.path.dirname(PARENT_DIR) -PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") -LOAD_PATH = os.path.join(PLUGINS_DIR, "resolve", "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "resolve", "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "resolve", "inventory") +LOAD_PATH = os.path.join(pype.PLUGINS_DIR, "resolve", "load") +CREATE_PATH = os.path.join(pype.PLUGINS_DIR, "resolve", "create") +INVENTORY_PATH = os.path.join(pype.PLUGINS_DIR, "resolve", "inventory") PUBLISH_PATH = os.path.join( - PLUGINS_DIR, "resolve", "publish" + pype.PLUGINS_DIR, "resolve", "publish" ).replace("\\", "/") AVALON_CONTAINERS = ":AVALON_CONTAINERS" @@ -40,11 +36,13 @@ def install(): See the Maya equivalent for inspiration on how to implement this. """ + from . import get_resolve_module # Disable all families except for the ones we explicitly want to see family_states = [ "imagesequence", - "mov" + "mov", + "clip" ] avalon.data["familiesStateDefault"] = False avalon.data["familiesStateToggled"] = family_states @@ -59,6 +57,8 @@ def install(): avalon.register_plugin_path(avalon.Creator, CREATE_PATH) avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + get_resolve_module() + def uninstall(): """Uninstall all tha was installed @@ -140,3 +140,26 @@ def publish(parent): """Shorthand to publish from within host""" from avalon.tools import publish return publish.show(parent) + + +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context + + Example: + >>> with maintained_selection(): + ... node['selected'].setValue(True) + >>> print(node['selected'].value()) + False + """ + try: + # do the operation + yield + finally: + pass + + +def reset_selection(): + """Deselect all selected nodes + """ + pass diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index 628d4bdb26..72eec04896 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -1,6 +1,182 @@ +import re from avalon import api -# from pype.hosts.resolve import lib as drlib +from pype.hosts import resolve from avalon.vendor import qargparse +from pype.api import config + +from Qt import QtWidgets, QtCore + + +class CreatorWidget(QtWidgets.QDialog): + + # output items + items = dict() + + def __init__(self, name, info, presets, parent=None): + super(CreatorWidget, self).__init__(parent) + + self.setObjectName(name) + + self.setWindowFlags( + QtCore.Qt.Window + | QtCore.Qt.CustomizeWindowHint + | QtCore.Qt.WindowTitleHint + | QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowStaysOnTopHint + ) + self.setWindowTitle(name or "Pype Creator Input") + + # Where inputs and labels are set + self.content_widget = [QtWidgets.QWidget(self)] + top_layout = QtWidgets.QFormLayout(self.content_widget[0]) + top_layout.setObjectName("ContentLayout") + top_layout.addWidget(Spacer(5, self)) + + # first add widget tag line + top_layout.addWidget(QtWidgets.QLabel(info)) + + top_layout.addWidget(Spacer(5, self)) + + # main dynamic layout + self.content_widget.append(QtWidgets.QWidget(self)) + content_layout = QtWidgets.QFormLayout(self.content_widget[-1]) + + # add preset data into input widget layout + self.items = self.add_presets_to_layout(content_layout, presets) + + # Confirmation buttons + btns_widget = QtWidgets.QWidget(self) + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + + cancel_btn = QtWidgets.QPushButton("Cancel") + btns_layout.addWidget(cancel_btn) + + ok_btn = QtWidgets.QPushButton("Ok") + btns_layout.addWidget(ok_btn) + + # Main layout of the dialog + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(10, 10, 10, 10) + main_layout.setSpacing(0) + + # adding content widget + for w in self.content_widget: + main_layout.addWidget(w) + + main_layout.addWidget(btns_widget) + + ok_btn.clicked.connect(self._on_ok_clicked) + cancel_btn.clicked.connect(self._on_cancel_clicked) + + stylesheet = resolve.menu.load_stylesheet() + self.setStyleSheet(stylesheet) + + def _on_ok_clicked(self): + self.result = self.value(self.items) + self.close() + + def _on_cancel_clicked(self): + self.result = None + self.close() + + def value(self, data): + for k, v in data.items(): + if isinstance(v, dict): + print(f"nested: {k}") + data[k] = self.value(v) + elif getattr(v, "value", None): + print(f"normal int: {k}") + result = v.value() + data[k] = result() + else: + print(f"normal text: {k}") + result = v.text() + data[k] = result() + return data + + def camel_case_split(self, text): + matches = re.finditer( + '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text) + return " ".join([str(m.group(0)).capitalize() for m in matches]) + + def create_row(self, layout, type, text, **kwargs): + # get type attribute from qwidgets + attr = getattr(QtWidgets, type) + + # convert label text to normal capitalized text with spaces + label_text = self.camel_case_split(text) + + # assign the new text to lable widget + label = QtWidgets.QLabel(label_text) + label.setObjectName("LineLabel") + + # create attribute name text strip of spaces + attr_name = text.replace(" ", "") + + # create attribute and assign default values + setattr( + self, + attr_name, + attr(parent=self)) + + # assign the created attribute to variable + item = getattr(self, attr_name) + for func, val in kwargs.items(): + if getattr(item, func): + func_attr = getattr(item, func) + func_attr(val) + + # add to layout + layout.addRow(label, item) + + return item + + def add_presets_to_layout(self, content_layout, data): + for k, v in data.items(): + if isinstance(v, dict): + # adding spacer between sections + self.content_widget.append(QtWidgets.QWidget(self)) + devider = QtWidgets.QVBoxLayout(self.content_widget[-1]) + devider.addWidget(Spacer(5, self)) + devider.setObjectName("Devider") + + # adding nested layout with label + self.content_widget.append(QtWidgets.QWidget(self)) + nested_content_layout = QtWidgets.QFormLayout( + self.content_widget[-1]) + nested_content_layout.setObjectName("NestedContentLayout") + + # add nested key as label + self.create_row(nested_content_layout, "QLabel", k) + data[k] = self.add_presets_to_layout(nested_content_layout, v) + elif isinstance(v, str): + print(f"layout.str: {k}") + print(f"content_layout: {content_layout}") + data[k] = self.create_row( + content_layout, "QLineEdit", k, setText=v) + elif isinstance(v, int): + print(f"layout.int: {k}") + print(f"content_layout: {content_layout}") + data[k] = self.create_row( + content_layout, "QSpinBox", k, setValue=v) + return data + + +class Spacer(QtWidgets.QWidget): + def __init__(self, height, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + + self.setFixedHeight(height) + + real_spacer = QtWidgets.QWidget(self) + real_spacer.setObjectName("Spacer") + real_spacer.setFixedHeight(height) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(real_spacer) + + self.setLayout(layout) def get_reference_node_parents(ref): @@ -73,3 +249,25 @@ class SequenceLoader(api.Loader): """Remove an existing `container` """ pass + + +class Creator(api.Creator): + """Creator class wrapper + """ + marker_color = "Purple" + + def __init__(self, *args, **kwargs): + super(Creator, self).__init__(*args, **kwargs) + self.presets = config.get_presets()['plugins']["resolve"][ + "create"].get(self.__class__.__name__, {}) + + # adding basic current context resolve objects + self.project = resolve.get_current_project() + self.sequence = resolve.get_current_sequence() + + if (self.options or {}).get("useSelection"): + self.selected = resolve.get_current_track_items(filter=True) + else: + self.selected = resolve.get_current_track_items(filter=False) + + self.widget = CreatorWidget diff --git a/pype/hosts/resolve/preload_console.py b/pype/hosts/resolve/preload_console.py index ea1bd4f180..58975777b8 100644 --- a/pype/hosts/resolve/preload_console.py +++ b/pype/hosts/resolve/preload_console.py @@ -1,7 +1,7 @@ #!/usr/bin/env python import time from pype.hosts.resolve.utils import get_resolve_module -from pypeapp import Logger +from pype.api import Logger log = Logger().get_logger(__name__, "resolve") diff --git a/pype/hosts/resolve/utility_scripts/Pype_menu.py b/pype/hosts/resolve/utility_scripts/Pype_menu.py index 1f5cd36277..230a7a80f0 100644 --- a/pype/hosts/resolve/utility_scripts/Pype_menu.py +++ b/pype/hosts/resolve/utility_scripts/Pype_menu.py @@ -3,7 +3,7 @@ import sys import avalon.api as avalon import pype -from pypeapp import Logger +from pype.api import Logger log = Logger().get_logger(__name__) diff --git a/pype/hosts/resolve/utility_scripts/__dev_compound_clip.py b/pype/hosts/resolve/utility_scripts/__dev_compound_clip.py deleted file mode 100644 index fe47008c70..0000000000 --- a/pype/hosts/resolve/utility_scripts/__dev_compound_clip.py +++ /dev/null @@ -1,65 +0,0 @@ -#! python3 -# -*- coding: utf-8 -*- - - -# convert clip def -def convert_clip(timeline=None): - """Convert timeline item (clip) into compound clip pype container - - Args: - timeline (MediaPool.Timeline): Object of timeline - - Returns: - bool: `True` if success - - Raises: - Exception: description - - """ - pass - - -# decorator function create_current_timeline_media_bin() -def create_current_timeline_media_bin(timeline=None): - """Convert timeline item (clip) into compound clip pype container - - Args: - timeline (MediaPool.Timeline): Object of timeline - - Returns: - bool: `True` if success - - Raises: - Exception: description - - """ - pass - - -# decorator function get_selected_track_items() -def get_selected_track_items(): - """Convert timeline item (clip) into compound clip pype container - - Args: - timeline (MediaPool.Timeline): Object of timeline - - Returns: - bool: `True` if success - - Raises: - Exception: description - - """ - print("testText") - - -# PypeCompoundClip() class -class PypeCompoundClip(object): - """docstring for .""" - - def __init__(self, arg): - super(self).__init__() - self.arg = arg - - def create_compound_clip(self): - pass diff --git a/pype/hosts/resolve/utility_scripts/__test_subprocess.py b/pype/hosts/resolve/utility_scripts/__test_subprocess.py deleted file mode 100644 index bdc57bbf00..0000000000 --- a/pype/hosts/resolve/utility_scripts/__test_subprocess.py +++ /dev/null @@ -1,35 +0,0 @@ -#! python3 -# -*- coding: utf-8 -*- -import os -from pypeapp import execute, Logger -from pype.hosts.resolve.utils import get_resolve_module - -log = Logger().get_logger("Resolve") - -CURRENT_DIR = os.getenv("RESOLVE_UTILITY_SCRIPTS_DIR", "") -python_dir = os.getenv("PYTHON36_RESOLVE") -python_exe = os.path.normpath( - os.path.join(python_dir, "python.exe") -) - -resolve = get_resolve_module() -PM = resolve.GetProjectManager() -P = PM.GetCurrentProject() - -log.info(P.GetName()) - - -# ______________________________________________________ -# testing subprocessing Scripts -testing_py = os.path.join(CURRENT_DIR, "ResolvePageSwitcher.py") -testing_py = os.path.normpath(testing_py) -log.info(f"Testing path to script: `{testing_py}`") - -returncode = execute( - [python_exe, os.path.normpath(testing_py)], - env=dict(os.environ) -) - -# Check if output file exists -if returncode != 0: - log.error("Executing failed!") diff --git a/pype/hosts/resolve/utility_scripts/test.py b/pype/hosts/resolve/utility_scripts/test.py new file mode 100644 index 0000000000..69dc4768bd --- /dev/null +++ b/pype/hosts/resolve/utility_scripts/test.py @@ -0,0 +1,21 @@ +#! python3 +import sys +from pype.api import Logger +import DaVinciResolveScript as bmdvr + + +log = Logger().get_logger(__name__) + + +def main(): + import pype.hosts.resolve as bmdvr + bm = bmdvr.utils.get_resolve_module() + log.info(f"blackmagicmodule: {bm}") + + +print(f"_>> bmdvr.scriptapp(Resolve): {bmdvr.scriptapp('Resolve')}") + + +if __name__ == "__main__": + result = main() + sys.exit(not bool(result)) diff --git a/pype/hosts/resolve/utils.py b/pype/hosts/resolve/utils.py index f5add53a6b..e11cc64b3b 100644 --- a/pype/hosts/resolve/utils.py +++ b/pype/hosts/resolve/utils.py @@ -9,18 +9,16 @@ import os import shutil from pypeapp import Logger - log = Logger().get_logger(__name__, "resolve") -self = sys.modules[__name__] -self.bmd = None - def get_resolve_module(): + from pype.hosts import resolve # dont run if already loaded - if self.bmd: - return self.bmd - + if resolve.bmdvr: + log.info(("resolve module is assigned to " + f"`pype.hosts.resolve.bmdvr`: {resolve.bmdvr}")) + return resolve.bmdvr try: """ The PYTHONPATH needs to be set correctly for this import @@ -71,8 +69,14 @@ def get_resolve_module(): ) sys.exit() # assign global var and return - self.bmd = bmd.scriptapp("Resolve") - return self.bmd + bmdvr = bmd.scriptapp("Resolve") + # bmdvf = bmd.scriptapp("Fusion") + resolve.bmdvr = bmdvr + resolve.bmdvf = bmdvr.Fusion() + log.info(("Assigning resolve module to " + f"`pype.hosts.resolve.bmdvr`: {resolve.bmdvr}")) + log.info(("Assigning resolve module to " + f"`pype.hosts.resolve.bmdvf`: {resolve.bmdvf}")) def _sync_utility_scripts(env=None): diff --git a/pype/hosts/resolve/workio.py b/pype/hosts/resolve/workio.py index e1e30a8734..9d8d320a3c 100644 --- a/pype/hosts/resolve/workio.py +++ b/pype/hosts/resolve/workio.py @@ -2,8 +2,9 @@ import os from pypeapp import Logger -from .lib import ( +from . import ( get_project_manager, + get_current_project, set_project_manager_to_folder_name ) @@ -26,7 +27,7 @@ def save_file(filepath): pm = get_project_manager() file = os.path.basename(filepath) fname, _ = os.path.splitext(file) - project = pm.GetCurrentProject() + project = get_current_project() name = project.GetName() if "Untitled Project" not in name: diff --git a/pype/lib.py b/pype/lib.py index 87808e53f5..601c85f521 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -7,16 +7,19 @@ import json import collections import logging import itertools +import copy import contextlib import subprocess +import getpass import inspect +import acre +import platform from abc import ABCMeta, abstractmethod from avalon import io, pipeline import six import avalon.api -from .api import config - +from .api import config, Anatomy log = logging.getLogger(__name__) @@ -110,7 +113,9 @@ def _subprocess(*args, **kwargs): log.error(line) if proc.returncode != 0: - raise ValueError("\"{}\" was not successful: {}".format(args, output)) + raise ValueError( + "\"{}\" was not successful:\nOutput: {}\nError: {}".format( + args, output, error)) return output @@ -520,14 +525,6 @@ def set_io_database(): io.install() -def get_all_avalon_projects(): - db = get_avalon_database() - projects = [] - for name in db.collection_names(): - projects.append(db[name].find_one({'type': 'project'})) - return projects - - def filter_pyblish_plugins(plugins): """ This servers as plugin filter / modifier for pyblish. It will load plugin @@ -749,8 +746,9 @@ class PypeHook: def get_linked_assets(asset_entity): """Return linked assets for `asset_entity`.""" - # TODO implement - return [] + inputs = asset_entity["data"].get("inputs", []) + inputs = [io.find_one({"_id": x}) for x in inputs] + return inputs def map_subsets_by_family(subsets): @@ -1387,3 +1385,265 @@ def ffprobe_streams(path_to_file): popen_output = popen.communicate()[0] log.debug("FFprobe output: {}".format(popen_output)) return json.loads(popen_output)["streams"] + + +def source_hash(filepath, *args): + """Generate simple identifier for a source file. + This is used to identify whether a source file has previously been + processe into the pipeline, e.g. a texture. + The hash is based on source filepath, modification time and file size. + This is only used to identify whether a specific source file was already + published before from the same location with the same modification date. + We opt to do it this way as opposed to Avalanch C4 hash as this is much + faster and predictable enough for all our production use cases. + Args: + filepath (str): The source file path. + You can specify additional arguments in the function + to allow for specific 'processing' values to be included. + """ + # We replace dots with comma because . cannot be a key in a pymongo dict. + file_name = os.path.basename(filepath) + time = str(os.path.getmtime(filepath)) + size = str(os.path.getsize(filepath)) + return "|".join([file_name, time, size] + list(args)).replace(".", ",") + + +def get_latest_version(asset_name, subset_name): + """Retrieve latest version from `asset_name`, and `subset_name`. + + Args: + asset_name (str): Name of asset. + subset_name (str): Name of subset. + """ + # Get asset + asset_name = io.find_one( + {"type": "asset", "name": asset_name}, projection={"name": True} + ) + + subset = io.find_one( + {"type": "subset", "name": subset_name, "parent": asset_name["_id"]}, + projection={"_id": True, "name": True}, + ) + + # Check if subsets actually exists. + assert subset, "No subsets found." + + # Get version + version_projection = { + "name": True, + "parent": True, + } + + version = io.find_one( + {"type": "version", "parent": subset["_id"]}, + projection=version_projection, + sort=[("name", -1)], + ) + + assert version, "No version found, this is a bug" + + return version + + +class ApplicationLaunchFailed(Exception): + pass + + +def launch_application(project_name, asset_name, task_name, app_name): + database = get_avalon_database() + project_document = database[project_name].find_one({"type": "project"}) + asset_document = database[project_name].find_one({ + "type": "asset", + "name": asset_name + }) + + asset_doc_parents = asset_document["data"].get("parents") + hierarchy = "/".join(asset_doc_parents) + + app_def = avalon.lib.get_application(app_name) + app_label = app_def.get("ftrack_label", app_def.get("label", app_name)) + + host_name = app_def["application_dir"] + data = { + "project": { + "name": project_document["name"], + "code": project_document["data"].get("code") + }, + "task": task_name, + "asset": asset_name, + "app": host_name, + "hierarchy": hierarchy + } + + try: + anatomy = Anatomy(project_name) + anatomy_filled = anatomy.format(data) + workdir = os.path.normpath(anatomy_filled["work"]["folder"]) + + except Exception as exc: + raise ApplicationLaunchFailed( + "Error in anatomy.format: {}".format(str(exc)) + ) + + try: + os.makedirs(workdir) + except FileExistsError: + pass + + last_workfile_path = None + extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(host_name) + if extensions: + # Find last workfile + file_template = anatomy.templates["work"]["file"] + data.update({ + "version": 1, + "user": os.environ.get("PYPE_USERNAME") or getpass.getuser(), + "ext": extensions[0] + }) + + last_workfile_path = avalon.api.last_workfile( + workdir, file_template, data, extensions, True + ) + + # set environments for Avalon + prep_env = copy.deepcopy(os.environ) + prep_env.update({ + "AVALON_PROJECT": project_name, + "AVALON_ASSET": asset_name, + "AVALON_TASK": task_name, + "AVALON_APP": host_name, + "AVALON_APP_NAME": app_name, + "AVALON_HIERARCHY": hierarchy, + "AVALON_WORKDIR": workdir + }) + + start_last_workfile = avalon.api.should_start_last_workfile( + project_name, host_name, task_name + ) + # Store boolean as "0"(False) or "1"(True) + prep_env["AVALON_OPEN_LAST_WORKFILE"] = ( + str(int(bool(start_last_workfile))) + ) + + if ( + start_last_workfile + and last_workfile_path + and os.path.exists(last_workfile_path) + ): + prep_env["AVALON_LAST_WORKFILE"] = last_workfile_path + + prep_env.update(anatomy.roots_obj.root_environments()) + + # collect all the 'environment' attributes from parents + tools_attr = [prep_env["AVALON_APP"], prep_env["AVALON_APP_NAME"]] + tools_env = asset_document["data"].get("tools_env") or [] + tools_attr.extend(tools_env) + + tools_env = acre.get_tools(tools_attr) + env = acre.compute(tools_env) + env = acre.merge(env, current_env=dict(prep_env)) + + # Get path to execute + st_temp_path = os.environ["PYPE_CONFIG"] + os_plat = platform.system().lower() + + # Path to folder with launchers + path = os.path.join(st_temp_path, "launchers", os_plat) + + # Full path to executable launcher + execfile = None + + launch_hook = app_def.get("launch_hook") + if launch_hook: + log.info("launching hook: {}".format(launch_hook)) + ret_val = execute_hook(launch_hook, env=env) + if not ret_val: + raise ApplicationLaunchFailed( + "Hook didn't finish successfully {}".format(app_label) + ) + + if sys.platform == "win32": + for ext in os.environ["PATHEXT"].split(os.pathsep): + fpath = os.path.join(path.strip('"'), app_def["executable"] + ext) + if os.path.isfile(fpath) and os.access(fpath, os.X_OK): + execfile = fpath + break + + # Run SW if was found executable + if execfile is None: + raise ApplicationLaunchFailed( + "We didn't find launcher for {}".format(app_label) + ) + + popen = avalon.lib.launch( + executable=execfile, args=[], environment=env + ) + + elif ( + sys.platform.startswith("linux") + or sys.platform.startswith("darwin") + ): + execfile = os.path.join(path.strip('"'), app_def["executable"]) + # Run SW if was found executable + if execfile is None: + raise ApplicationLaunchFailed( + "We didn't find launcher for {}".format(app_label) + ) + + if not os.path.isfile(execfile): + raise ApplicationLaunchFailed( + "Launcher doesn't exist - {}".format(execfile) + ) + + try: + fp = open(execfile) + except PermissionError as perm_exc: + raise ApplicationLaunchFailed( + "Access denied on launcher {} - {}".format(execfile, perm_exc) + ) + + fp.close() + # check executable permission + if not os.access(execfile, os.X_OK): + raise ApplicationLaunchFailed( + "No executable permission - {}".format(execfile) + ) + + popen = avalon.lib.launch( # noqa: F841 + "/usr/bin/env", args=["bash", execfile], environment=env + ) + return popen + + +class ApplicationAction(avalon.api.Action): + """Default application launcher + + This is a convenience application Action that when "config" refers to a + parsed application `.toml` this can launch the application. + + """ + + config = None + group = None + variant = None + required_session_keys = ( + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK" + ) + + def is_compatible(self, session): + for key in self.required_session_keys: + if key not in session: + return False + return True + + def process(self, session, **kwargs): + """Process the full Application action""" + + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] + return launch_application( + project_name, asset_name, task_name, self.name + ) diff --git a/pype/modules/adobe_communicator/lib/__init__.py b/pype/modules/adobe_communicator/lib/__init__.py index 23aee81275..f918e49a60 100644 --- a/pype/modules/adobe_communicator/lib/__init__.py +++ b/pype/modules/adobe_communicator/lib/__init__.py @@ -1,8 +1,6 @@ -from .io_nonsingleton import DbConnector from .rest_api import AdobeRestApi, PUBLISH_PATHS __all__ = [ "PUBLISH_PATHS", - "DbConnector", "AdobeRestApi" ] diff --git a/pype/modules/adobe_communicator/lib/io_nonsingleton.py b/pype/modules/adobe_communicator/lib/io_nonsingleton.py deleted file mode 100644 index da37c657c6..0000000000 --- a/pype/modules/adobe_communicator/lib/io_nonsingleton.py +++ /dev/null @@ -1,460 +0,0 @@ -""" -Wrapper around interactions with the database - -Copy of io module in avalon-core. - - In this case not working as singleton with api.Session! -""" - -import os -import time -import errno -import shutil -import logging -import tempfile -import functools -import contextlib - -from avalon import schema -from avalon.vendor import requests -from avalon.io import extract_port_from_url - -# Third-party dependencies -import pymongo - - -def auto_reconnect(func): - """Handling auto reconnect in 3 retry times""" - @functools.wraps(func) - def decorated(*args, **kwargs): - object = args[0] - for retry in range(3): - try: - return func(*args, **kwargs) - except pymongo.errors.AutoReconnect: - object.log.error("Reconnecting..") - time.sleep(0.1) - else: - raise - - return decorated - - -class DbConnector(object): - - log = logging.getLogger(__name__) - - def __init__(self): - self.Session = {} - self._mongo_client = None - self._sentry_client = None - self._sentry_logging_handler = None - self._database = None - self._is_installed = False - - def __getitem__(self, key): - # gives direct access to collection withou setting `active_table` - return self._database[key] - - def __getattribute__(self, attr): - # not all methods of PyMongo database are implemented with this it is - # possible to use them too - try: - return super(DbConnector, self).__getattribute__(attr) - except AttributeError: - cur_proj = self.Session["AVALON_PROJECT"] - return self._database[cur_proj].__getattribute__(attr) - - def install(self): - """Establish a persistent connection to the database""" - if self._is_installed: - return - - logging.basicConfig() - self.Session.update(self._from_environment()) - - timeout = int(self.Session["AVALON_TIMEOUT"]) - mongo_url = self.Session["AVALON_MONGO"] - kwargs = { - "host": mongo_url, - "serverSelectionTimeoutMS": timeout - } - - port = extract_port_from_url(mongo_url) - if port is not None: - kwargs["port"] = int(port) - - self._mongo_client = pymongo.MongoClient(**kwargs) - - for retry in range(3): - try: - t1 = time.time() - self._mongo_client.server_info() - - except Exception: - self.log.error("Retrying..") - time.sleep(1) - timeout *= 1.5 - - else: - break - - else: - raise IOError( - "ERROR: Couldn't connect to %s in " - "less than %.3f ms" % (self.Session["AVALON_MONGO"], timeout)) - - self.log.info("Connected to %s, delay %.3f s" % ( - self.Session["AVALON_MONGO"], time.time() - t1)) - - self._install_sentry() - - self._database = self._mongo_client[self.Session["AVALON_DB"]] - self._is_installed = True - - def _install_sentry(self): - if "AVALON_SENTRY" not in self.Session: - return - - try: - from raven import Client - from raven.handlers.logging import SentryHandler - from raven.conf import setup_logging - except ImportError: - # Note: There was a Sentry address in this Session - return self.log.warning("Sentry disabled, raven not installed") - - client = Client(self.Session["AVALON_SENTRY"]) - - # Transmit log messages to Sentry - handler = SentryHandler(client) - handler.setLevel(logging.WARNING) - - setup_logging(handler) - - self._sentry_client = client - self._sentry_logging_handler = handler - self.log.info( - "Connected to Sentry @ %s" % self.Session["AVALON_SENTRY"] - ) - - def _from_environment(self): - Session = { - item[0]: os.getenv(item[0], item[1]) - for item in ( - # Root directory of projects on disk - ("AVALON_PROJECTS", None), - - # Name of current Project - ("AVALON_PROJECT", ""), - - # Name of current Asset - ("AVALON_ASSET", ""), - - # Name of current silo - ("AVALON_SILO", ""), - - # Name of current task - ("AVALON_TASK", None), - - # Name of current app - ("AVALON_APP", None), - - # Path to working directory - ("AVALON_WORKDIR", None), - - # Name of current Config - # TODO(marcus): Establish a suitable default config - ("AVALON_CONFIG", "no_config"), - - # Name of Avalon in graphical user interfaces - # Use this to customise the visual appearance of Avalon - # to better integrate with your surrounding pipeline - ("AVALON_LABEL", "Avalon"), - - # Used during any connections to the outside world - ("AVALON_TIMEOUT", "1000"), - - # Address to Asset Database - ("AVALON_MONGO", "mongodb://localhost:27017"), - - # Name of database used in MongoDB - ("AVALON_DB", "avalon"), - - # Address to Sentry - ("AVALON_SENTRY", None), - - # Address to Deadline Web Service - # E.g. http://192.167.0.1:8082 - ("AVALON_DEADLINE", None), - - # Enable features not necessarily stable. The user's own risk - ("AVALON_EARLY_ADOPTER", None), - - # Address of central asset repository, contains - # the following interface: - # /upload - # /download - # /manager (optional) - ("AVALON_LOCATION", "http://127.0.0.1"), - - # Boolean of whether to upload published material - # to central asset repository - ("AVALON_UPLOAD", None), - - # Generic username and password - ("AVALON_USERNAME", "avalon"), - ("AVALON_PASSWORD", "secret"), - - # Unique identifier for instances in working files - ("AVALON_INSTANCE_ID", "avalon.instance"), - ("AVALON_CONTAINER_ID", "avalon.container"), - - # Enable debugging - ("AVALON_DEBUG", None), - - ) if os.getenv(item[0], item[1]) is not None - } - - Session["schema"] = "avalon-core:session-2.0" - try: - schema.validate(Session) - except schema.ValidationError as e: - # TODO(marcus): Make this mandatory - self.log.warning(e) - - return Session - - def uninstall(self): - """Close any connection to the database""" - try: - self._mongo_client.close() - except AttributeError: - pass - - self._mongo_client = None - self._database = None - self._is_installed = False - - def active_project(self): - """Return the name of the active project""" - return self.Session["AVALON_PROJECT"] - - def activate_project(self, project_name): - self.Session["AVALON_PROJECT"] = project_name - - def projects(self): - """List available projects - - Returns: - list of project documents - - """ - - collection_names = self.collections() - for project in collection_names: - if project in ("system.indexes",): - continue - - # Each collection will have exactly one project document - document = self.find_project(project) - - if document is not None: - yield document - - def locate(self, path): - """Traverse a hierarchy from top-to-bottom - - Example: - representation = locate(["hulk", "Bruce", "modelDefault", 1, "ma"]) - - Returns: - representation (ObjectId) - - """ - - components = zip( - ("project", "asset", "subset", "version", "representation"), - path - ) - - parent = None - for type_, name in components: - latest = (type_ == "version") and name in (None, -1) - - try: - if latest: - parent = self.find_one( - filter={ - "type": type_, - "parent": parent - }, - projection={"_id": 1}, - sort=[("name", -1)] - )["_id"] - else: - parent = self.find_one( - filter={ - "type": type_, - "name": name, - "parent": parent - }, - projection={"_id": 1}, - )["_id"] - - except TypeError: - return None - - return parent - - @auto_reconnect - def collections(self): - return self._database.collection_names() - - @auto_reconnect - def find_project(self, project): - return self._database[project].find_one({"type": "project"}) - - @auto_reconnect - def insert_one(self, item): - assert isinstance(item, dict), "item must be of type " - schema.validate(item) - return self._database[self.Session["AVALON_PROJECT"]].insert_one(item) - - @auto_reconnect - def insert_many(self, items, ordered=True): - # check if all items are valid - assert isinstance(items, list), "`items` must be of type " - for item in items: - assert isinstance(item, dict), "`item` must be of type " - schema.validate(item) - - return self._database[self.Session["AVALON_PROJECT"]].insert_many( - items, - ordered=ordered) - - @auto_reconnect - def find(self, filter, projection=None, sort=None): - return self._database[self.Session["AVALON_PROJECT"]].find( - filter=filter, - projection=projection, - sort=sort - ) - - @auto_reconnect - def find_one(self, filter, projection=None, sort=None): - assert isinstance(filter, dict), "filter must be " - - return self._database[self.Session["AVALON_PROJECT"]].find_one( - filter=filter, - projection=projection, - sort=sort - ) - - @auto_reconnect - def save(self, *args, **kwargs): - return self._database[self.Session["AVALON_PROJECT"]].save( - *args, **kwargs) - - @auto_reconnect - def replace_one(self, filter, replacement): - return self._database[self.Session["AVALON_PROJECT"]].replace_one( - filter, replacement) - - @auto_reconnect - def update_many(self, filter, update): - return self._database[self.Session["AVALON_PROJECT"]].update_many( - filter, update) - - @auto_reconnect - def distinct(self, *args, **kwargs): - return self._database[self.Session["AVALON_PROJECT"]].distinct( - *args, **kwargs) - - @auto_reconnect - def drop(self, *args, **kwargs): - return self._database[self.Session["AVALON_PROJECT"]].drop( - *args, **kwargs) - - @auto_reconnect - def delete_many(self, *args, **kwargs): - return self._database[self.Session["AVALON_PROJECT"]].delete_many( - *args, **kwargs) - - def parenthood(self, document): - assert document is not None, "This is a bug" - - parents = list() - - while document.get("parent") is not None: - document = self.find_one({"_id": document["parent"]}) - - if document is None: - break - - if document.get("type") == "master_version": - _document = self.find_one({"_id": document["version_id"]}) - document["data"] = _document["data"] - - parents.append(document) - - return parents - - @contextlib.contextmanager - def tempdir(self): - tempdir = tempfile.mkdtemp() - try: - yield tempdir - finally: - shutil.rmtree(tempdir) - - def download(self, src, dst): - """Download `src` to `dst` - - Arguments: - src (str): URL to source file - dst (str): Absolute path to destination file - - Yields tuple (progress, error): - progress (int): Between 0-100 - error (Exception): Any exception raised when first making connection - - """ - - try: - response = requests.get( - src, - stream=True, - auth=requests.auth.HTTPBasicAuth( - self.Session["AVALON_USERNAME"], - self.Session["AVALON_PASSWORD"] - ) - ) - except requests.ConnectionError as e: - yield None, e - return - - with self.tempdir() as dirname: - tmp = os.path.join(dirname, os.path.basename(src)) - - with open(tmp, "wb") as f: - total_length = response.headers.get("content-length") - - if total_length is None: # no content length header - f.write(response.content) - else: - downloaded = 0 - total_length = int(total_length) - for data in response.iter_content(chunk_size=4096): - downloaded += len(data) - f.write(data) - - yield int(100.0 * downloaded / total_length), None - - try: - os.makedirs(os.path.dirname(dst)) - except OSError as e: - # An already existing destination directory is fine. - if e.errno != errno.EEXIST: - raise - - shutil.copy(tmp, dst) diff --git a/pype/modules/adobe_communicator/lib/rest_api.py b/pype/modules/adobe_communicator/lib/rest_api.py index 86739e4d80..35094d10dc 100644 --- a/pype/modules/adobe_communicator/lib/rest_api.py +++ b/pype/modules/adobe_communicator/lib/rest_api.py @@ -2,7 +2,7 @@ import os import sys import copy from pype.modules.rest_api import RestApi, route, abort, CallbackResult -from .io_nonsingleton import DbConnector +from avalon.api import AvalonMongoDB from pype.api import config, execute, Logger log = Logger().get_logger("AdobeCommunicator") @@ -14,7 +14,7 @@ PUBLISH_PATHS = [] class AdobeRestApi(RestApi): - dbcon = DbConnector() + dbcon = AvalonMongoDB() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/pype/modules/avalon_apps/avalon_app.py b/pype/modules/avalon_apps/avalon_app.py index d103a84d90..7ed651f82b 100644 --- a/pype/modules/avalon_apps/avalon_app.py +++ b/pype/modules/avalon_apps/avalon_app.py @@ -1,10 +1,7 @@ -import os -import argparse -from Qt import QtGui, QtWidgets +from Qt import QtWidgets from avalon.tools import libraryloader from pype.api import Logger -from avalon import io -from launcher import launcher_widget, lib as launcher_lib +from pype.tools.launcher import LauncherWindow, actions class AvalonApps: @@ -12,7 +9,12 @@ class AvalonApps: self.log = Logger().get_logger(__name__) self.main_parent = main_parent self.parent = parent - self.app_launcher = None + + self.app_launcher = LauncherWindow() + + # actions.register_default_actions() + actions.register_config_actions() + actions.register_environment_actions() def process_modules(self, modules): if "RestApiServer" in modules: @@ -32,23 +34,22 @@ class AvalonApps: self.log.warning('Parent menu is not set') return - icon = QtGui.QIcon(launcher_lib.resource("icon", "main.png")) - aShowLauncher = QtWidgets.QAction(icon, "&Launcher", parent_menu) - aLibraryLoader = QtWidgets.QAction("Library", parent_menu) + action_launcher = QtWidgets.QAction("Launcher", parent_menu) + action_library_loader = QtWidgets.QAction( + "Library loader", parent_menu + ) - aShowLauncher.triggered.connect(self.show_launcher) - aLibraryLoader.triggered.connect(self.show_library_loader) + action_launcher.triggered.connect(self.show_launcher) + action_library_loader.triggered.connect(self.show_library_loader) - parent_menu.addAction(aShowLauncher) - parent_menu.addAction(aLibraryLoader) + parent_menu.addAction(action_launcher) + parent_menu.addAction(action_library_loader) def show_launcher(self): # if app_launcher don't exist create it/otherwise only show main window - if self.app_launcher is None: - io.install() - APP_PATH = launcher_lib.resource("qml", "main.qml") - self.app_launcher = launcher_widget.Launcher(APP_PATH) - self.app_launcher.window.show() + self.app_launcher.show() + self.app_launcher.raise_() + self.app_launcher.activateWindow() def show_library_loader(self): libraryloader.show( diff --git a/pype/modules/avalon_apps/rest_api.py b/pype/modules/avalon_apps/rest_api.py index 1cb9e544a7..2408e56bbc 100644 --- a/pype/modules/avalon_apps/rest_api.py +++ b/pype/modules/avalon_apps/rest_api.py @@ -4,14 +4,14 @@ import json import bson import bson.json_util from pype.modules.rest_api import RestApi, abort, CallbackResult -from pype.modules.ftrack.lib.io_nonsingleton import DbConnector +from avalon.api import AvalonMongoDB class AvalonRestApi(RestApi): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.dbcon = DbConnector() + self.dbcon = AvalonMongoDB() self.dbcon.install() @RestApi.route("/projects/", url_prefix="/avalon", methods="GET") diff --git a/pype/modules/clockify/__init__.py b/pype/modules/clockify/__init__.py index aab0d048de..8e11d2f5f4 100644 --- a/pype/modules/clockify/__init__.py +++ b/pype/modules/clockify/__init__.py @@ -1,14 +1,7 @@ -from .clockify_api import ClockifyAPI -from .widget_settings import ClockifySettings -from .widget_message import MessageWidget from .clockify import ClockifyModule -__all__ = [ - "ClockifyAPI", - "ClockifySettings", - "ClockifyModule", - "MessageWidget" -] - +CLASS_DEFINIION = ClockifyModule + + def tray_init(tray_widget, main_widget): return ClockifyModule(main_widget, tray_widget) diff --git a/pype/modules/clockify/clockify.py b/pype/modules/clockify/clockify.py index 2ab22702c1..fea15a1bea 100644 --- a/pype/modules/clockify/clockify.py +++ b/pype/modules/clockify/clockify.py @@ -3,17 +3,25 @@ import threading from pype.api import Logger from avalon import style from Qt import QtWidgets -from . import ClockifySettings, ClockifyAPI, MessageWidget +from .widgets import ClockifySettings, MessageWidget +from .clockify_api import ClockifyAPI +from .constants import CLOCKIFY_FTRACK_USER_PATH class ClockifyModule: + workspace_name = None def __init__(self, main_parent=None, parent=None): + if not self.workspace_name: + raise Exception("Clockify Workspace is not set in config.") + + os.environ["CLOCKIFY_WORKSPACE"] = self.workspace_name + self.log = Logger().get_logger(self.__class__.__name__, "PypeTray") self.main_parent = main_parent self.parent = parent - self.clockapi = ClockifyAPI() + self.clockapi = ClockifyAPI(master_parent=self) self.message_widget = None self.widget_settings = ClockifySettings(main_parent, self) self.widget_settings_required = None @@ -24,8 +32,6 @@ class ClockifyModule: self.bool_api_key_set = False self.bool_workspace_set = False self.bool_timer_run = False - - self.clockapi.set_master(self) self.bool_api_key_set = self.clockapi.set_api() def tray_start(self): @@ -43,14 +49,12 @@ class ClockifyModule: def process_modules(self, modules): if 'FtrackModule' in modules: - actions_path = os.path.sep.join([ - os.path.dirname(__file__), - 'ftrack_actions' - ]) current = os.environ.get('FTRACK_ACTIONS_PATH', '') if current: current += os.pathsep - os.environ['FTRACK_ACTIONS_PATH'] = current + actions_path + os.environ['FTRACK_ACTIONS_PATH'] = ( + current + CLOCKIFY_FTRACK_USER_PATH + ) if 'AvalonApps' in modules: from launcher import lib @@ -188,9 +192,10 @@ class ClockifyModule: ).format(project_name)) msg = ( - "Project \"{}\" is not in Clockify Workspace \"{}\"." + "Project \"{}\" is not" + " in Clockify Workspace \"{}\"." "

Please inform your Project Manager." - ).format(project_name, str(self.clockapi.workspace)) + ).format(project_name, str(self.clockapi.workspace_name)) self.message_widget = MessageWidget( self.main_parent, msg, "Clockify - Info Message" diff --git a/pype/modules/clockify/clockify_api.py b/pype/modules/clockify/clockify_api.py index f012efc002..d88b2ef8df 100644 --- a/pype/modules/clockify/clockify_api.py +++ b/pype/modules/clockify/clockify_api.py @@ -1,35 +1,39 @@ import os import re +import time import requests import json import datetime -import appdirs +from .constants import ( + CLOCKIFY_ENDPOINT, ADMIN_PERMISSION_NAMES, CREDENTIALS_JSON_PATH +) -class Singleton(type): - _instances = {} +def time_check(obj): + if obj.request_counter < 10: + obj.request_counter += 1 + return - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super( - Singleton, cls - ).__call__(*args, **kwargs) - return cls._instances[cls] + wait_time = 1 - (time.time() - obj.request_time) + if wait_time > 0: + time.sleep(wait_time) + + obj.request_time = time.time() + obj.request_counter = 0 -class ClockifyAPI(metaclass=Singleton): - endpoint = "https://api.clockify.me/api/" - headers = {"X-Api-Key": None} - app_dir = os.path.normpath(appdirs.user_data_dir('pype-app', 'pype')) - file_name = 'clockify.json' - fpath = os.path.join(app_dir, file_name) - admin_permission_names = ['WORKSPACE_OWN', 'WORKSPACE_ADMIN'] - master_parent = None - workspace = None - workspace_id = None - - def set_master(self, master_parent): +class ClockifyAPI: + def __init__(self, api_key=None, master_parent=None): + self.workspace_name = None + self.workspace_id = None self.master_parent = master_parent + self.api_key = api_key + self.request_counter = 0 + self.request_time = time.time() + + @property + def headers(self): + return {"X-Api-Key": self.api_key} def verify_api(self): for key, value in self.headers.items(): @@ -42,7 +46,7 @@ class ClockifyAPI(metaclass=Singleton): api_key = self.get_api_key() if api_key is not None and self.validate_api_key(api_key) is True: - self.headers["X-Api-Key"] = api_key + self.api_key = api_key self.set_workspace() if self.master_parent: self.master_parent.signed_in() @@ -52,8 +56,9 @@ class ClockifyAPI(metaclass=Singleton): def validate_api_key(self, api_key): test_headers = {'X-Api-Key': api_key} action_url = 'workspaces/' + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=test_headers ) if response.status_code != 200: @@ -69,25 +74,27 @@ class ClockifyAPI(metaclass=Singleton): action_url = "/workspaces/{}/users/{}/permissions".format( workspace_id, user_id ) + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) user_permissions = response.json() for perm in user_permissions: - if perm['name'] in self.admin_permission_names: + if perm['name'] in ADMIN_PERMISSION_NAMES: return True return False def get_user_id(self): action_url = 'v1/user/' + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) # this regex is neccessary: UNICODE strings are crashing # during json serialization - id_regex ='\"{1}id\"{1}\:{1}\"{1}\w+\"{1}' + id_regex = '\"{1}id\"{1}\:{1}\"{1}\w+\"{1}' result = re.findall(id_regex, str(response.content)) if len(result) != 1: # replace with log and better message? @@ -98,9 +105,9 @@ class ClockifyAPI(metaclass=Singleton): def set_workspace(self, name=None): if name is None: name = os.environ.get('CLOCKIFY_WORKSPACE', None) - self.workspace = name + self.workspace_name = name self.workspace_id = None - if self.workspace is None: + if self.workspace_name is None: return try: result = self.validate_workspace() @@ -115,7 +122,7 @@ class ClockifyAPI(metaclass=Singleton): def validate_workspace(self, name=None): if name is None: - name = self.workspace + name = self.workspace_name all_workspaces = self.get_workspaces() if name in all_workspaces: return all_workspaces[name] @@ -124,25 +131,26 @@ class ClockifyAPI(metaclass=Singleton): def get_api_key(self): api_key = None try: - file = open(self.fpath, 'r') + file = open(CREDENTIALS_JSON_PATH, 'r') api_key = json.load(file).get('api_key', None) if api_key == '': api_key = None except Exception: - file = open(self.fpath, 'w') + file = open(CREDENTIALS_JSON_PATH, 'w') file.close() return api_key def save_api_key(self, api_key): data = {'api_key': api_key} - file = open(self.fpath, 'w') + file = open(CREDENTIALS_JSON_PATH, 'w') file.write(json.dumps(data)) file.close() def get_workspaces(self): action_url = 'workspaces/' + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) return { @@ -153,8 +161,9 @@ class ClockifyAPI(metaclass=Singleton): if workspace_id is None: workspace_id = self.workspace_id action_url = 'workspaces/{}/projects/'.format(workspace_id) + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) @@ -168,8 +177,9 @@ class ClockifyAPI(metaclass=Singleton): action_url = 'workspaces/{}/projects/{}/'.format( workspace_id, project_id ) + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) @@ -179,8 +189,9 @@ class ClockifyAPI(metaclass=Singleton): if workspace_id is None: workspace_id = self.workspace_id action_url = 'workspaces/{}/tags/'.format(workspace_id) + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) @@ -194,8 +205,9 @@ class ClockifyAPI(metaclass=Singleton): action_url = 'workspaces/{}/projects/{}/tasks/'.format( workspace_id, project_id ) + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) @@ -276,8 +288,9 @@ class ClockifyAPI(metaclass=Singleton): "taskId": task_id, "tagIds": tag_ids } + time_check(self) response = requests.post( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) @@ -293,8 +306,9 @@ class ClockifyAPI(metaclass=Singleton): action_url = 'workspaces/{}/timeEntries/inProgress'.format( workspace_id ) + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) try: @@ -323,8 +337,9 @@ class ClockifyAPI(metaclass=Singleton): "tagIds": current["tagIds"], "end": self.get_current_time() } + time_check(self) response = requests.put( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) @@ -336,8 +351,9 @@ class ClockifyAPI(metaclass=Singleton): if workspace_id is None: workspace_id = self.workspace_id action_url = 'workspaces/{}/timeEntries/'.format(workspace_id) + time_check(self) response = requests.get( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) return response.json()[:quantity] @@ -348,8 +364,9 @@ class ClockifyAPI(metaclass=Singleton): action_url = 'workspaces/{}/timeEntries/{}'.format( workspace_id, tid ) + time_check(self) response = requests.delete( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) return response.json() @@ -363,14 +380,15 @@ class ClockifyAPI(metaclass=Singleton): "clientId": "", "isPublic": "false", "estimate": { - # "estimate": "3600", + "estimate": 0, "type": "AUTO" }, "color": "#f44336", "billable": "true" } + time_check(self) response = requests.post( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) @@ -379,8 +397,9 @@ class ClockifyAPI(metaclass=Singleton): def add_workspace(self, name): action_url = 'workspaces/' body = {"name": name} + time_check(self) response = requests.post( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) @@ -398,8 +417,9 @@ class ClockifyAPI(metaclass=Singleton): "name": name, "projectId": project_id } + time_check(self) response = requests.post( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) @@ -412,8 +432,9 @@ class ClockifyAPI(metaclass=Singleton): body = { "name": name } + time_check(self) response = requests.post( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) @@ -427,8 +448,9 @@ class ClockifyAPI(metaclass=Singleton): action_url = '/workspaces/{}/projects/{}'.format( workspace_id, project_id ) + time_check(self) response = requests.delete( - self.endpoint + action_url, + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, ) return response.json() diff --git a/pype/modules/clockify/constants.py b/pype/modules/clockify/constants.py new file mode 100644 index 0000000000..38ad4b64cf --- /dev/null +++ b/pype/modules/clockify/constants.py @@ -0,0 +1,17 @@ +import os +import appdirs + + +CLOCKIFY_FTRACK_SERVER_PATH = os.path.join( + os.path.dirname(__file__), "ftrack", "server" +) +CLOCKIFY_FTRACK_USER_PATH = os.path.join( + os.path.dirname(__file__), "ftrack", "user" +) +CREDENTIALS_JSON_PATH = os.path.normpath(os.path.join( + appdirs.user_data_dir("pype-app", "pype"), + "clockify.json" +)) + +ADMIN_PERMISSION_NAMES = ["WORKSPACE_OWN", "WORKSPACE_ADMIN"] +CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/" diff --git a/pype/modules/clockify/ftrack/server/action_clockify_sync_server.py b/pype/modules/clockify/ftrack/server/action_clockify_sync_server.py new file mode 100644 index 0000000000..ae911f6258 --- /dev/null +++ b/pype/modules/clockify/ftrack/server/action_clockify_sync_server.py @@ -0,0 +1,166 @@ +import os +import json +from pype.modules.ftrack.lib import BaseAction +from pype.modules.clockify.clockify_api import ClockifyAPI + + +class SyncClocifyServer(BaseAction): + '''Synchronise project names and task types.''' + + identifier = "clockify.sync.server" + label = "Sync To Clockify (server)" + description = "Synchronise data to Clockify workspace" + + discover_role_list = ["Pypeclub", "Administrator", "project Manager"] + + def __init__(self, *args, **kwargs): + super(SyncClocifyServer, self).__init__(*args, **kwargs) + + workspace_name = os.environ.get("CLOCKIFY_WORKSPACE") + api_key = os.environ.get("CLOCKIFY_API_KEY") + self.clockapi = ClockifyAPI(api_key) + self.clockapi.set_workspace(workspace_name) + if api_key is None: + modified_key = "None" + else: + str_len = int(len(api_key) / 2) + start_replace = int(len(api_key) / 4) + modified_key = "" + for idx in range(len(api_key)): + if idx >= start_replace and idx < start_replace + str_len: + replacement = "X" + else: + replacement = api_key[idx] + modified_key += replacement + + self.log.info( + "Clockify info. Workspace: \"{}\" API key: \"{}\"".format( + str(workspace_name), str(modified_key) + ) + ) + + def discover(self, session, entities, event): + if ( + len(entities) != 1 + or entities[0].entity_type.lower() != "project" + ): + return False + + # Get user and check his roles + user_id = event.get("source", {}).get("user", {}).get("id") + if not user_id: + return False + + user = session.query("User where id is \"{}\"".format(user_id)).first() + if not user: + return False + + for role in user["user_security_roles"]: + if role["security_role"]["name"] in self.discover_role_list: + return True + return False + + def register(self): + self.session.event_hub.subscribe( + "topic=ftrack.action.discover", + self._discover, + priority=self.priority + ) + + launch_subscription = ( + "topic=ftrack.action.launch and data.actionIdentifier={}" + ).format(self.identifier) + self.session.event_hub.subscribe(launch_subscription, self._launch) + + def launch(self, session, entities, event): + if self.clockapi.workspace_id is None: + return { + "success": False, + "message": "Clockify Workspace or API key are not set!" + } + + if self.clockapi.validate_workspace_perm() is False: + return { + "success": False, + "message": "Missing permissions for this action!" + } + + # JOB SETTINGS + user_id = event["source"]["user"]["id"] + user = session.query("User where id is " + user_id).one() + + job = session.create("Job", { + "user": user, + "status": "running", + "data": json.dumps({"description": "Sync Ftrack to Clockify"}) + }) + session.commit() + + project_entity = entities[0] + if project_entity.entity_type.lower() != "project": + project_entity = self.get_project_from_entity(project_entity) + + project_name = project_entity["full_name"] + self.log.info( + "Synchronization of project \"{}\" to clockify begins.".format( + project_name + ) + ) + task_types = ( + project_entity["project_schema"]["_task_type_schema"]["types"] + ) + task_type_names = [ + task_type["name"] for task_type in task_types + ] + try: + clockify_projects = self.clockapi.get_projects() + if project_name not in clockify_projects: + response = self.clockapi.add_project(project_name) + if "id" not in response: + self.log.warning( + "Project \"{}\" can't be created. Response: {}".format( + project_name, response + ) + ) + return { + "success": False, + "message": ( + "Can't create clockify project \"{}\"." + " Unexpected error." + ).format(project_name) + } + + clockify_workspace_tags = self.clockapi.get_tags() + for task_type_name in task_type_names: + if task_type_name in clockify_workspace_tags: + self.log.debug( + "Task \"{}\" already exist".format(task_type_name) + ) + continue + + response = self.clockapi.add_tag(task_type_name) + if "id" not in response: + self.log.warning( + "Task \"{}\" can't be created. Response: {}".format( + task_type_name, response + ) + ) + + job["status"] = "done" + + except Exception: + self.log.warning( + "Synchronization to clockify failed.", + exc_info=True + ) + + finally: + if job["status"] != "done": + job["status"] = "failed" + session.commit() + + return True + + +def register(session, **kw): + SyncClocifyServer(session).register() diff --git a/pype/modules/clockify/ftrack/user/action_clockify_sync_local.py b/pype/modules/clockify/ftrack/user/action_clockify_sync_local.py new file mode 100644 index 0000000000..e74bf3dbb3 --- /dev/null +++ b/pype/modules/clockify/ftrack/user/action_clockify_sync_local.py @@ -0,0 +1,122 @@ +import json +from pype.modules.ftrack.lib import BaseAction, statics_icon +from pype.modules.clockify.clockify_api import ClockifyAPI + + +class SyncClocifyLocal(BaseAction): + '''Synchronise project names and task types.''' + + #: Action identifier. + identifier = 'clockify.sync.local' + #: Action label. + label = 'Sync To Clockify (local)' + #: Action description. + description = 'Synchronise data to Clockify workspace' + #: roles that are allowed to register this action + role_list = ["Pypeclub", "Administrator", "project Manager"] + #: icon + icon = statics_icon("app_icons", "clockify-white.png") + + #: CLockifyApi + clockapi = ClockifyAPI() + + def discover(self, session, entities, event): + if ( + len(entities) == 1 + and entities[0].entity_type.lower() == "project" + ): + return True + return False + + def launch(self, session, entities, event): + self.clockapi.set_api() + if self.clockapi.workspace_id is None: + return { + "success": False, + "message": "Clockify Workspace or API key are not set!" + } + + if self.clockapi.validate_workspace_perm() is False: + return { + "success": False, + "message": "Missing permissions for this action!" + } + + # JOB SETTINGS + userId = event['source']['user']['id'] + user = session.query('User where id is ' + userId).one() + + job = session.create('Job', { + 'user': user, + 'status': 'running', + 'data': json.dumps({ + 'description': 'Sync Ftrack to Clockify' + }) + }) + session.commit() + + project_entity = entities[0] + if project_entity.entity_type.lower() != "project": + project_entity = self.get_project_from_entity(project_entity) + + project_name = project_entity["full_name"] + self.log.info( + "Synchronization of project \"{}\" to clockify begins.".format( + project_name + ) + ) + task_types = ( + project_entity["project_schema"]["_task_type_schema"]["types"] + ) + task_type_names = [ + task_type["name"] for task_type in task_types + ] + try: + clockify_projects = self.clockapi.get_projects() + if project_name not in clockify_projects: + response = self.clockapi.add_project(project_name) + if "id" not in response: + self.log.warning( + "Project \"{}\" can't be created. Response: {}".format( + project_name, response + ) + ) + return { + "success": False, + "message": ( + "Can't create clockify project \"{}\"." + " Unexpected error." + ).format(project_name) + } + + clockify_workspace_tags = self.clockapi.get_tags() + for task_type_name in task_type_names: + if task_type_name in clockify_workspace_tags: + self.log.debug( + "Task \"{}\" already exist".format(task_type_name) + ) + continue + + response = self.clockapi.add_tag(task_type_name) + if "id" not in response: + self.log.warning( + "Task \"{}\" can't be created. Response: {}".format( + task_type_name, response + ) + ) + + job["status"] = "done" + + except Exception: + pass + + finally: + if job["status"] != "done": + job["status"] = "failed" + session.commit() + + return True + + +def register(session, **kw): + SyncClocifyLocal(session).register() diff --git a/pype/modules/clockify/ftrack_actions/action_clockify_sync.py b/pype/modules/clockify/ftrack_actions/action_clockify_sync.py deleted file mode 100644 index a041e6ada6..0000000000 --- a/pype/modules/clockify/ftrack_actions/action_clockify_sync.py +++ /dev/null @@ -1,155 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import ftrack_api -from pype.modules.ftrack import BaseAction, MissingPermision -from pype.modules.clockify import ClockifyAPI - - -class SyncClocify(BaseAction): - '''Synchronise project names and task types.''' - - #: Action identifier. - identifier = 'clockify.sync' - #: Action label. - label = 'Sync To Clockify' - #: Action description. - description = 'Synchronise data to Clockify workspace' - #: roles that are allowed to register this action - role_list = ["Pypeclub", "Administrator", "project Manager"] - #: icon - icon = '{}/app_icons/clockify-white.png'.format( - os.environ.get('PYPE_STATICS_SERVER', '') - ) - #: CLockifyApi - clockapi = ClockifyAPI() - - def preregister(self): - if self.clockapi.workspace_id is None: - return "Clockify Workspace or API key are not set!" - - if self.clockapi.validate_workspace_perm() is False: - raise MissingPermision('Clockify') - - return True - - def discover(self, session, entities, event): - ''' Validation ''' - if len(entities) != 1: - return False - - if entities[0].entity_type.lower() != "project": - return False - return True - - def launch(self, session, entities, event): - # JOB SETTINGS - userId = event['source']['user']['id'] - user = session.query('User where id is ' + userId).one() - - job = session.create('Job', { - 'user': user, - 'status': 'running', - 'data': json.dumps({ - 'description': 'Sync Ftrack to Clockify' - }) - }) - session.commit() - try: - entity = entities[0] - - if entity.entity_type.lower() == 'project': - project = entity - else: - project = entity['project'] - project_name = project['full_name'] - - task_types = [] - for task_type in project['project_schema']['_task_type_schema'][ - 'types' - ]: - task_types.append(task_type['name']) - - clockify_projects = self.clockapi.get_projects() - - if project_name not in clockify_projects: - response = self.clockapi.add_project(project_name) - if 'id' not in response: - self.log.error('Project {} can\'t be created'.format( - project_name - )) - return { - 'success': False, - 'message': 'Can\'t create project, unexpected error' - } - project_id = response['id'] - else: - project_id = clockify_projects[project_name] - - clockify_workspace_tags = self.clockapi.get_tags() - for task_type in task_types: - if task_type not in clockify_workspace_tags: - response = self.clockapi.add_tag(task_type) - if 'id' not in response: - self.log.error('Task {} can\'t be created'.format( - task_type - )) - continue - except Exception: - job['status'] = 'failed' - session.commit() - return False - - job['status'] = 'done' - session.commit() - return True - - -def register(session, **kw): - '''Register plugin. Called when used as an plugin.''' - - if not isinstance(session, ftrack_api.session.Session): - return - - SyncClocify(session).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) diff --git a/pype/modules/clockify/launcher_actions/ClockifyStart.py b/pype/modules/clockify/launcher_actions/ClockifyStart.py index d5e9164977..f97360662f 100644 --- a/pype/modules/clockify/launcher_actions/ClockifyStart.py +++ b/pype/modules/clockify/launcher_actions/ClockifyStart.py @@ -1,6 +1,6 @@ from avalon import api, io from pype.api import Logger -from pype.modules.clockify import ClockifyAPI +from pype.modules.clockify.clockify_api import ClockifyAPI log = Logger().get_logger(__name__, "clockify_start") diff --git a/pype/modules/clockify/launcher_actions/ClockifySync.py b/pype/modules/clockify/launcher_actions/ClockifySync.py index 0f20d1dce1..a77c038076 100644 --- a/pype/modules/clockify/launcher_actions/ClockifySync.py +++ b/pype/modules/clockify/launcher_actions/ClockifySync.py @@ -1,5 +1,5 @@ from avalon import api, io -from pype.modules.clockify import ClockifyAPI +from pype.modules.clockify.clockify_api import ClockifyAPI from pype.api import Logger log = Logger().get_logger(__name__, "clockify_sync") diff --git a/pype/modules/clockify/widget_message.py b/pype/modules/clockify/widget_message.py deleted file mode 100644 index 9e4fad1df1..0000000000 --- a/pype/modules/clockify/widget_message.py +++ /dev/null @@ -1,92 +0,0 @@ -from Qt import QtCore, QtGui, QtWidgets -from avalon import style -from pype.api import resources - - -class MessageWidget(QtWidgets.QWidget): - - SIZE_W = 300 - SIZE_H = 130 - - closed = QtCore.Signal() - - def __init__(self, parent=None, messages=[], title="Message"): - - super(MessageWidget, self).__init__() - - self._parent = parent - - # Icon - if parent and hasattr(parent, 'icon'): - self.setWindowIcon(parent.icon) - else: - icon = QtGui.QIcon(resources.pype_icon_filepath()) - self.setWindowIcon(icon) - - self.setWindowFlags( - QtCore.Qt.WindowCloseButtonHint | - QtCore.Qt.WindowMinimizeButtonHint - ) - - # Font - self.font = QtGui.QFont() - self.font.setFamily("DejaVu Sans Condensed") - self.font.setPointSize(9) - self.font.setBold(True) - self.font.setWeight(50) - self.font.setKerning(True) - - # Size setting - self.resize(self.SIZE_W, self.SIZE_H) - self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) - self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100)) - - # Style - self.setStyleSheet(style.load_stylesheet()) - - self.setLayout(self._ui_layout(messages)) - self.setWindowTitle(title) - - def _ui_layout(self, messages): - if not messages: - messages = ["*Misssing messages (This is a bug)*", ] - - elif not isinstance(messages, (tuple, list)): - messages = [messages, ] - - main_layout = QtWidgets.QVBoxLayout(self) - - labels = [] - for message in messages: - label = QtWidgets.QLabel(message) - label.setFont(self.font) - label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) - label.setTextFormat(QtCore.Qt.RichText) - label.setWordWrap(True) - - labels.append(label) - main_layout.addWidget(label) - - btn_close = QtWidgets.QPushButton("Close") - btn_close.setToolTip('Close this window') - btn_close.clicked.connect(self.on_close_clicked) - - btn_group = QtWidgets.QHBoxLayout() - btn_group.addStretch(1) - btn_group.addWidget(btn_close) - - main_layout.addLayout(btn_group) - - self.labels = labels - self.btn_group = btn_group - self.btn_close = btn_close - self.main_layout = main_layout - - return main_layout - - def on_close_clicked(self): - self.close() - - def close(self, *args, **kwargs): - self.closed.emit() - super(MessageWidget, self).close(*args, **kwargs) diff --git a/pype/modules/clockify/widget_settings.py b/pype/modules/clockify/widgets.py similarity index 66% rename from pype/modules/clockify/widget_settings.py rename to pype/modules/clockify/widgets.py index 7e5ee300bb..dc57a48ecb 100644 --- a/pype/modules/clockify/widget_settings.py +++ b/pype/modules/clockify/widgets.py @@ -1,9 +1,97 @@ -import os from Qt import QtCore, QtGui, QtWidgets from avalon import style from pype.api import resources +class MessageWidget(QtWidgets.QWidget): + + SIZE_W = 300 + SIZE_H = 130 + + closed = QtCore.Signal() + + def __init__(self, parent=None, messages=[], title="Message"): + + super(MessageWidget, self).__init__() + + self._parent = parent + + # Icon + if parent and hasattr(parent, 'icon'): + self.setWindowIcon(parent.icon) + else: + icon = QtGui.QIcon(resources.pype_icon_filepath()) + self.setWindowIcon(icon) + + self.setWindowFlags( + QtCore.Qt.WindowCloseButtonHint | + QtCore.Qt.WindowMinimizeButtonHint + ) + + # Font + self.font = QtGui.QFont() + self.font.setFamily("DejaVu Sans Condensed") + self.font.setPointSize(9) + self.font.setBold(True) + self.font.setWeight(50) + self.font.setKerning(True) + + # Size setting + self.resize(self.SIZE_W, self.SIZE_H) + self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) + self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100)) + + # Style + self.setStyleSheet(style.load_stylesheet()) + + self.setLayout(self._ui_layout(messages)) + self.setWindowTitle(title) + + def _ui_layout(self, messages): + if not messages: + messages = ["*Misssing messages (This is a bug)*", ] + + elif not isinstance(messages, (tuple, list)): + messages = [messages, ] + + main_layout = QtWidgets.QVBoxLayout(self) + + labels = [] + for message in messages: + label = QtWidgets.QLabel(message) + label.setFont(self.font) + label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) + label.setTextFormat(QtCore.Qt.RichText) + label.setWordWrap(True) + + labels.append(label) + main_layout.addWidget(label) + + btn_close = QtWidgets.QPushButton("Close") + btn_close.setToolTip('Close this window') + btn_close.clicked.connect(self.on_close_clicked) + + btn_group = QtWidgets.QHBoxLayout() + btn_group.addStretch(1) + btn_group.addWidget(btn_close) + + main_layout.addLayout(btn_group) + + self.labels = labels + self.btn_group = btn_group + self.btn_close = btn_close + self.main_layout = main_layout + + return main_layout + + def on_close_clicked(self): + self.close() + + def close(self, *args, **kwargs): + self.closed.emit() + super(MessageWidget, self).close(*args, **kwargs) + + class ClockifySettings(QtWidgets.QWidget): SIZE_W = 300 diff --git a/pype/modules/ftrack/actions/action_application_loader.py b/pype/modules/ftrack/actions/action_application_loader.py index ec7fc53fb6..ecc5a4fad3 100644 --- a/pype/modules/ftrack/actions/action_application_loader.py +++ b/pype/modules/ftrack/actions/action_application_loader.py @@ -3,8 +3,7 @@ import toml import time from pype.modules.ftrack.lib import AppAction from avalon import lib -from pype.api import Logger -from pype.lib import get_all_avalon_projects +from pype.api import Logger, config log = Logger().get_logger(__name__) @@ -49,17 +48,26 @@ def registerApp(app, session, plugins_presets): def register(session, plugins_presets={}): - # WARNING getting projects only helps to check connection to mongo - # - without will `discover` of ftrack apps actions take ages - result = get_all_avalon_projects() + app_usages = ( + config.get_presets() + .get("global", {}) + .get("applications") + ) or {} apps = [] - + missing_app_names = [] launchers_path = os.path.join(os.environ["PYPE_CONFIG"], "launchers") for file in os.listdir(launchers_path): filename, ext = os.path.splitext(file) if ext.lower() != ".toml": continue + + app_usage = app_usages.get(filename) + if not app_usage: + if app_usage is None: + missing_app_names.append(filename) + continue + loaded_data = toml.load(os.path.join(launchers_path, file)) app_data = { "name": filename, @@ -67,7 +75,17 @@ def register(session, plugins_presets={}): } apps.append(app_data) - apps = sorted(apps, key=lambda x: x['name']) + if missing_app_names: + log.debug( + "Apps not defined in applications usage. ({})".format( + ", ".join(( + "\"{}\"".format(app_name) + for app_name in missing_app_names + )) + ) + ) + + apps = sorted(apps, key=lambda app: app["name"]) app_counter = 0 for app in apps: try: @@ -76,7 +94,7 @@ def register(session, plugins_presets={}): time.sleep(0.1) app_counter += 1 except Exception as exc: - log.exception( + log.warning( "\"{}\" - not a proper App ({})".format(app['name'], str(exc)), exc_info=True ) diff --git a/pype/modules/ftrack/actions/action_clean_hierarchical_attributes.py b/pype/modules/ftrack/actions/action_clean_hierarchical_attributes.py index 86503ff5bc..e81e587f0a 100644 --- a/pype/modules/ftrack/actions/action_clean_hierarchical_attributes.py +++ b/pype/modules/ftrack/actions/action_clean_hierarchical_attributes.py @@ -1,7 +1,7 @@ import collections import ftrack_api from pype.modules.ftrack.lib import BaseAction, statics_icon -from pype.modules.ftrack.lib.avalon_sync import get_avalon_attr +from pype.modules.ftrack.lib.avalon_sync import get_pype_attr class CleanHierarchicalAttrsAction(BaseAction): @@ -48,7 +48,7 @@ class CleanHierarchicalAttrsAction(BaseAction): ) entity_ids_joined = ", ".join(all_entities_ids) - attrs, hier_attrs = get_avalon_attr(session) + attrs, hier_attrs = get_pype_attr(session) for attr in hier_attrs: configuration_key = attr["key"] diff --git a/pype/modules/ftrack/actions/action_create_cust_attrs.py b/pype/modules/ftrack/actions/action_create_cust_attrs.py index 9845cc8876..21c4743725 100644 --- a/pype/modules/ftrack/actions/action_create_cust_attrs.py +++ b/pype/modules/ftrack/actions/action_create_cust_attrs.py @@ -1,99 +1,120 @@ +import os import collections +import toml import json import arrow import ftrack_api from pype.modules.ftrack.lib import BaseAction, statics_icon -from pype.modules.ftrack.lib.avalon_sync import CustAttrIdKey +from pype.modules.ftrack.lib.avalon_sync import ( + CUST_ATTR_ID_KEY, CUST_ATTR_GROUP, default_custom_attributes_definition +) from pype.api import config """ This action creates/updates custom attributes. -- first part take care about avalon_mongo_id attribute -- second part is based on json file in templates: - ~/PYPE-TEMPLATES/presets/ftrack/ftrack_custom_attributes.json - - you can add Custom attributes based on these conditions +## First part take care about special attributes + - `avalon_mongo_id` for storing Avalon MongoID + - `applications` based on applications usages + - `tools` based on tools usages + +## Second part is based on json file in ftrack module. +File location: `~/pype/pype/modules/ftrack/ftrack_custom_attributes.json` + +Data in json file is nested dictionary. Keys in first dictionary level +represents Ftrack entity type (task, show, assetversion, user, list, asset) +and dictionary value define attribute. + +There is special key for hierchical attributes `is_hierarchical`. + +Entity types `task` requires to define task object type (Folder, Shot, +Sequence, Task, Library, Milestone, Episode, Asset Build, etc.) at second +dictionary level, task's attributes are nested more. + +*** Not Changeable ********************************************************* + +group (string) + - name of group + - based on attribute `pype.modules.ftrack.lib.CUST_ATTR_GROUP` + - "pype" by default *** Required *************************************************************** label (string) - - label that will show in ftrack + - label that will show in ftrack key (string) - - must contain only chars [a-z0-9_] + - must contain only chars [a-z0-9_] type (string) - - type of custom attribute - - possibilities: text, boolean, date, enumerator, dynamic enumerator, number + - type of custom attribute + - possibilities: + text, boolean, date, enumerator, dynamic enumerator, number *** Required with conditions *********************************************** -entity_type (string) - - if 'is_hierarchical' is set to False - - type of entity - - possibilities: task, show, assetversion, user, list, asset - config (dictionary) - - for each entity type different requirements and possibilities: - - enumerator: multiSelect = True/False(default: False) - data = {key_1:value_1,key_2:value_2,..,key_n:value_n} - - 'data' is Required value with enumerator - - 'key' must contain only chars [a-z0-9_] + - for each attribute type different requirements and possibilities: + - enumerator: + multiSelect = True/False(default: False) + data = {key_1:value_1,key_2:value_2,..,key_n:value_n} + - 'data' is Required value with enumerator + - 'key' must contain only chars [a-z0-9_] - - number: isdecimal = True/False(default: False) + - number: + isdecimal = True/False(default: False) - - text: markdown = True/False(default: False) + - text: + markdown = True/False(default: False) -object_type (string) - - IF ENTITY_TYPE is set to 'task' - - default possibilities: Folder, Shot, Sequence, Task, Library, - Milestone, Episode, Asset Build,... - -*** Optional *************************************************************** +*** Presetable keys ********************************************************** write_security_roles/read_security_roles (array of strings) - - default: ["ALL"] - - strings should be role names (e.g.: ["API", "Administrator"]) - - if set to ["ALL"] - all roles will be availabled - - if first is 'except' - roles will be set to all except roles in array - - Warning: Be carefull with except - roles can be different by company - - example: - write_security_roles = ["except", "User"] - read_security_roles = ["ALL"] - - User is unable to write but can read - -group (string) - - default: None - - name of group + - default: ["ALL"] + - strings should be role names (e.g.: ["API", "Administrator"]) + - if set to ["ALL"] - all roles will be availabled + - if first is 'except' - roles will be set to all except roles in array + - Warning: Be carefull with except - roles can be different by company + - example: + write_security_roles = ["except", "User"] + read_security_roles = ["ALL"] # (User is can only read) default - - default: None - - sets default value for custom attribute: - - text -> string - - number -> integer - - enumerator -> array with string of key/s - - boolean -> bool true/false - - date -> string in format: 'YYYY.MM.DD' or 'YYYY.MM.DD HH:mm:ss' - - example: "2018.12.24" / "2018.1.1 6:0:0" - - dynamic enumerator -> DON'T HAVE DEFAULT VALUE!!! + - default: None + - sets default value for custom attribute: + - text -> string + - number -> integer + - enumerator -> array with string of key/s + - boolean -> bool true/false + - date -> string in format: 'YYYY.MM.DD' or 'YYYY.MM.DD HH:mm:ss' + - example: "2018.12.24" / "2018.1.1 6:0:0" + - dynamic enumerator -> DON'T HAVE DEFAULT VALUE!!! -is_hierarchical (bool) - - default: False - - will set hierachical attribute - - False by default - -EXAMPLE: -{ +Example: +``` +"show": { "avalon_auto_sync": { - "label": "Avalon auto-sync", - "key": "avalon_auto_sync", - "type": "boolean", - "entity_type": "show", - "group": "avalon", - "default": false, - "write_security_role": ["API","Administrator"], - "read_security_role": ["API","Administrator"] + "label": "Avalon auto-sync", + "type": "boolean", + "write_security_role": ["API", "Administrator"], + "read_security_role": ["API", "Administrator"] + } +}, +"is_hierarchical": { + "fps": { + "label": "FPS", + "type": "number", + "config": {"isdecimal": true} + } +}, +"task": { + "library": { + "my_attr_name": { + "label": "My Attr", + "type": "number" + } } } +``` """ @@ -115,11 +136,15 @@ class CustomAttributes(BaseAction): role_list = ['Pypeclub', 'Administrator'] icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg") - required_keys = ['key', 'label', 'type'] - type_posibilities = [ - 'text', 'boolean', 'date', 'enumerator', - 'dynamic enumerator', 'number' - ] + required_keys = ("key", "label", "type") + + presetable_keys = ("default", "write_security_role", "read_security_role") + hierarchical_key = "is_hierarchical" + + type_posibilities = ( + "text", "boolean", "date", "enumerator", + "dynamic enumerator", "number" + ) def discover(self, session, entities, event): ''' @@ -141,21 +166,24 @@ class CustomAttributes(BaseAction): }) }) session.commit() + try: self.prepare_global_data(session) self.avalon_mongo_id_attributes(session, event) - self.custom_attributes_from_file(session, event) + self.applications_attribute(event) + self.tools_attribute(event) + self.intent_attribute(event) + self.custom_attributes_from_file(event) job['status'] = 'done' session.commit() - except Exception as exc: + except Exception: session.rollback() - job['status'] = 'failed' + job["status"] = "failed" session.commit() self.log.error( - 'Creating custom attributes failed ({})'.format(exc), - exc_info=True + "Creating custom attributes failed ({})", exc_info=True ) return True @@ -182,20 +210,39 @@ class CustomAttributes(BaseAction): self.groups = {} + self.presets = config.get_presets() + self.attrs_presets = self.prepare_attribute_pressets() + + def prepare_attribute_pressets(self): + output = {} + + attr_presets = ( + self.presets.get("ftrack", {}).get("ftrack_custom_attributes") + ) or {} + for entity_type, preset in attr_presets.items(): + # Lower entity type + entity_type = entity_type.lower() + # Just store if entity type is not "task" + if entity_type != "task": + output[entity_type] = preset + continue + + # Prepare empty dictionary for entity type if not set yet + if entity_type not in output: + output[entity_type] = {} + + # Store presets per lowered object type + for obj_type, _preset in preset.items(): + output[entity_type][obj_type.lower()] = _preset + + return output + def avalon_mongo_id_attributes(self, session, event): + self.create_hierarchical_mongo_attr(session, event) + hierarchical_attr, object_type_attrs = ( self.mongo_id_custom_attributes(session) ) - - if hierarchical_attr is None: - self.create_hierarchical_mongo_attr(session) - hierarchical_attr, object_type_attrs = ( - self.mongo_id_custom_attributes(session) - ) - - if hierarchical_attr is None: - return - if object_type_attrs: self.convert_mongo_id_to_hierarchical( hierarchical_attr, object_type_attrs, session, event @@ -206,7 +253,7 @@ class CustomAttributes(BaseAction): "select id, entity_type, object_type_id, is_hierarchical, default" " from CustomAttributeConfiguration" " where key = \"{}\"" - ).format(CustAttrIdKey) + ).format(CUST_ATTR_ID_KEY) mongo_id_avalon_attr = session.query(cust_attrs_query).all() heirarchical_attr = None @@ -220,32 +267,22 @@ class CustomAttributes(BaseAction): return heirarchical_attr, object_type_attrs - def create_hierarchical_mongo_attr(self, session): - # Attribute Name and Label - cust_attr_label = "Avalon/Mongo ID" - + def create_hierarchical_mongo_attr(self, session, event): # Set security roles for attribute - role_list = ("API", "Administrator", "Pypeclub") - roles = self.get_security_roles(role_list) - # Set Text type of Attribute - custom_attribute_type = self.types_per_name["text"] - # Set group to 'avalon' - group = self.get_group("avalon") - + default_role_list = ("API", "Administrator", "Pypeclub") data = { - "key": CustAttrIdKey, - "label": cust_attr_label, - "type": custom_attribute_type, + "key": CUST_ATTR_ID_KEY, + "label": "Avalon/Mongo ID", + "type": "text", "default": "", - "write_security_roles": roles, - "read_security_roles": roles, - "group": group, + "write_security_roles": default_role_list, + "read_security_roles": default_role_list, + "group": CUST_ATTR_GROUP, "is_hierarchical": True, - "entity_type": "show", - "config": json.dumps({"markdown": False}) + "config": {"markdown": False} } - self.process_attribute(data) + self.process_attr_data(data, event) def convert_mongo_id_to_hierarchical( self, hierarchical_attr, object_type_attrs, session, event @@ -335,91 +372,261 @@ class CustomAttributes(BaseAction): exc_info=True ) - def custom_attributes_from_file(self, session, event): - presets = config.get_presets()['ftrack']['ftrack_custom_attributes'] + def application_definitions(self): + app_usages = self.presets.get("global", {}).get("applications") or {} - for cust_attr_data in presets: - cust_attr_name = cust_attr_data.get( - 'label', - cust_attr_data.get('key') + app_definitions = [] + launchers_path = os.path.join(os.environ["PYPE_CONFIG"], "launchers") + + missing_app_names = [] + for file in os.listdir(launchers_path): + app_name, ext = os.path.splitext(file) + if ext.lower() != ".toml": + continue + + if not app_usages.get(app_name): + missing_app_names.append(app_name) + continue + + loaded_data = toml.load(os.path.join(launchers_path, file)) + + ftrack_label = loaded_data.get("ftrack_label") + if ftrack_label: + parts = app_name.split("_") + if len(parts) > 1: + ftrack_label = " ".join((ftrack_label, parts[-1])) + else: + ftrack_label = loaded_data.get("label", app_name) + + app_definitions.append({app_name: ftrack_label}) + + if missing_app_names: + self.log.warning( + "Apps not defined in applications usage. ({})".format( + ", ".join(( + "\"{}\"".format(app_name) + for app_name in missing_app_names + )) + ) ) - try: - data = {} - # Get key, label, type - data.update(self.get_required(cust_attr_data)) - # Get hierachical/ entity_type/ object_id - data.update(self.get_entity_type(cust_attr_data)) - # Get group, default, security roles - data.update(self.get_optional(cust_attr_data)) - # Process data - self.process_attribute(data) - except CustAttrException as cae: - if cust_attr_name: - msg = 'Custom attribute error "{}" - {}'.format( - cust_attr_name, str(cae) - ) - else: - msg = 'Custom attribute error - {}'.format(str(cae)) - self.log.warning(msg, exc_info=True) - self.show_message(event, msg) + # Make sure there is at least one item + if not app_definitions: + app_definitions.append({"empty": "< Empty >"}) + return app_definitions - return True + def applications_attribute(self, event): + applications_custom_attr_data = { + "label": "Applications", + "key": "applications", + "type": "enumerator", + "entity_type": "show", + "group": CUST_ATTR_GROUP, + "config": { + "multiselect": True, + "data": self.application_definitions() + } + } + self.process_attr_data(applications_custom_attr_data, event) + + def tools_attribute(self, event): + tool_usages = self.presets.get("global", {}).get("tools") or {} + tools_data = [] + for tool_name, usage in tool_usages.items(): + if usage: + tools_data.append({tool_name: tool_name}) + + # Make sure there is at least one item + if not tools_data: + tools_data.append({"empty": "< Empty >"}) + + tools_custom_attr_data = { + "label": "Tools", + "key": "tools_env", + "type": "enumerator", + "is_hierarchical": True, + "group": CUST_ATTR_GROUP, + "config": { + "multiselect": True, + "data": tools_data + } + } + self.process_attr_data(tools_custom_attr_data, event) + + def intent_attribute(self, event): + intent_key_values = ( + self.presets + .get("global", {}) + .get("intent", {}) + .get("items", {}) + ) or {} + + intent_values = [] + for key, label in intent_key_values.items(): + if not key or not label: + self.log.info(( + "Skipping intent row: {{\"{}\": \"{}\"}}" + " because of empty key or label." + ).format(key, label)) + continue + + intent_values.append({key: label}) + + if not intent_values: + return + + intent_custom_attr_data = { + "label": "Intent", + "key": "intent", + "type": "enumerator", + "entity_type": "assetversion", + "group": CUST_ATTR_GROUP, + "config": { + "multiselect": False, + "data": intent_values + } + } + self.process_attr_data(intent_custom_attr_data, event) + + def custom_attributes_from_file(self, event): + # Load json with custom attributes configurations + cust_attr_def = default_custom_attributes_definition() + attrs_data = [] + + # Prepare data of hierarchical attributes + hierarchical_attrs = cust_attr_def.pop(self.hierarchical_key, {}) + for key, cust_attr_data in hierarchical_attrs.items(): + cust_attr_data["key"] = key + cust_attr_data["is_hierarchical"] = True + attrs_data.append(cust_attr_data) + + # Prepare data of entity specific attributes + for entity_type, cust_attr_datas in cust_attr_def.items(): + if entity_type.lower() != "task": + for key, cust_attr_data in cust_attr_datas.items(): + cust_attr_data["key"] = key + cust_attr_data["entity_type"] = entity_type + attrs_data.append(cust_attr_data) + continue + + # Task should have nested level for object type + for object_type, _cust_attr_datas in cust_attr_datas.items(): + for key, cust_attr_data in _cust_attr_datas.items(): + cust_attr_data["key"] = key + cust_attr_data["entity_type"] = entity_type + cust_attr_data["object_type"] = object_type + attrs_data.append(cust_attr_data) + + # Process prepared data + for cust_attr_data in attrs_data: + # Add group + cust_attr_data["group"] = CUST_ATTR_GROUP + self.process_attr_data(cust_attr_data, event) + + def presets_for_attr_data(self, attr_data): + output = {} + + attr_key = attr_data["key"] + if attr_data.get("is_hierarchical"): + entity_key = self.hierarchical_key + else: + entity_key = attr_data["entity_type"] + + entity_presets = self.attrs_presets.get(entity_key) or {} + if entity_key.lower() == "task": + object_type = attr_data["object_type"] + entity_presets = entity_presets.get(object_type.lower()) or {} + + key_presets = entity_presets.get(attr_key) or {} + + for key, value in key_presets.items(): + if key in self.presetable_keys and value: + output[key] = value + return output + + def process_attr_data(self, cust_attr_data, event): + attr_presets = self.presets_for_attr_data(cust_attr_data) + cust_attr_data.update(attr_presets) + + try: + data = {} + # Get key, label, type + data.update(self.get_required(cust_attr_data)) + # Get hierachical/ entity_type/ object_id + data.update(self.get_entity_type(cust_attr_data)) + # Get group, default, security roles + data.update(self.get_optional(cust_attr_data)) + # Process data + self.process_attribute(data) + + except CustAttrException as cae: + cust_attr_name = cust_attr_data.get("label", cust_attr_data["key"]) + + if cust_attr_name: + msg = 'Custom attribute error "{}" - {}'.format( + cust_attr_name, str(cae) + ) + else: + msg = 'Custom attribute error - {}'.format(str(cae)) + self.log.warning(msg, exc_info=True) + self.show_message(event, msg) def process_attribute(self, data): - existing_atr = self.session.query('CustomAttributeConfiguration').all() + existing_attrs = self.session.query( + "CustomAttributeConfiguration" + ).all() matching = [] - for attr in existing_atr: + for attr in existing_attrs: if ( - attr['key'] != data['key'] or - attr['type']['name'] != data['type']['name'] + attr["key"] != data["key"] or + attr["type"]["name"] != data["type"]["name"] ): continue - if data.get('is_hierarchical', False) is True: - if attr['is_hierarchical'] is True: + if data.get("is_hierarchical") is True: + if attr["is_hierarchical"] is True: matching.append(attr) - elif 'object_type_id' in data: + elif "object_type_id" in data: if ( - attr['entity_type'] == data['entity_type'] and - attr['object_type_id'] == data['object_type_id'] + attr["entity_type"] == data["entity_type"] and + attr["object_type_id"] == data["object_type_id"] ): matching.append(attr) else: - if attr['entity_type'] == data['entity_type']: + if attr["entity_type"] == data["entity_type"]: matching.append(attr) if len(matching) == 0: - self.session.create('CustomAttributeConfiguration', data) + self.session.create("CustomAttributeConfiguration", data) self.session.commit() self.log.debug( - '{}: "{}" created'.format(self.label, data['label']) + "Custom attribute \"{}\" created".format(data["label"]) ) elif len(matching) == 1: attr_update = matching[0] for key in data: - if ( - key not in [ - 'is_hierarchical', 'entity_type', 'object_type_id' - ] + if key not in ( + "is_hierarchical", "entity_type", "object_type_id" ): attr_update[key] = data[key] - self.log.debug( - '{}: "{}" updated'.format(self.label, data['label']) - ) self.session.commit() + self.log.debug( + "Custom attribute \"{}\" updated".format(data["label"]) + ) else: - raise CustAttrException('Is duplicated') + raise CustAttrException(( + "Custom attribute is duplicated. Key: \"{}\" Type: \"{}\"" + ).format(data["key"], data["type"]["name"])) def get_required(self, attr): output = {} for key in self.required_keys: if key not in attr: raise CustAttrException( - 'Key {} is required - please set'.format(key) + "BUG: Key \"{}\" is required".format(key) ) if attr['type'].lower() not in self.type_posibilities: @@ -593,17 +800,17 @@ class CustomAttributes(BaseAction): def get_optional(self, attr): output = {} - if 'group' in attr: - output['group'] = self.get_group(attr) - if 'default' in attr: - output['default'] = self.get_default(attr) + if "group" in attr: + output["group"] = self.get_group(attr) + if "default" in attr: + output["default"] = self.get_default(attr) roles_read = [] roles_write = [] - if 'read_security_roles' in output: - roles_read = attr['read_security_roles'] - if 'read_security_roles' in output: - roles_write = attr['write_security_roles'] + if "read_security_roles" in attr: + roles_read = attr["read_security_roles"] + if "write_security_roles" in attr: + roles_write = attr["write_security_roles"] output['read_security_roles'] = self.get_security_roles(roles_read) output['write_security_roles'] = self.get_security_roles(roles_write) diff --git a/pype/modules/ftrack/actions/action_delete_asset.py b/pype/modules/ftrack/actions/action_delete_asset.py index 27394770e1..7d2dac3320 100644 --- a/pype/modules/ftrack/actions/action_delete_asset.py +++ b/pype/modules/ftrack/actions/action_delete_asset.py @@ -5,7 +5,7 @@ from queue import Queue from bson.objectid import ObjectId from pype.modules.ftrack.lib import BaseAction, statics_icon -from pype.modules.ftrack.lib.io_nonsingleton import DbConnector +from avalon.api import AvalonMongoDB class DeleteAssetSubset(BaseAction): @@ -21,7 +21,7 @@ class DeleteAssetSubset(BaseAction): #: roles that are allowed to register this action role_list = ["Pypeclub", "Administrator", "Project Manager"] #: Db connection - dbcon = DbConnector() + dbcon = AvalonMongoDB() splitter = {"type": "label", "value": "---"} action_data_by_id = {} diff --git a/pype/modules/ftrack/actions/action_delete_old_versions.py b/pype/modules/ftrack/actions/action_delete_old_versions.py index 46652b136a..b55f091fdc 100644 --- a/pype/modules/ftrack/actions/action_delete_old_versions.py +++ b/pype/modules/ftrack/actions/action_delete_old_versions.py @@ -6,7 +6,7 @@ import clique from pymongo import UpdateOne from pype.modules.ftrack.lib import BaseAction, statics_icon -from pype.modules.ftrack.lib.io_nonsingleton import DbConnector +from avalon.api import AvalonMongoDB from pype.api import Anatomy import avalon.pipeline @@ -24,7 +24,7 @@ class DeleteOldVersions(BaseAction): role_list = ["Pypeclub", "Project Manager", "Administrator"] icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg") - dbcon = DbConnector() + dbcon = AvalonMongoDB() inteface_title = "Choose your preferences" splitter_item = {"type": "label", "value": "---"} @@ -105,11 +105,34 @@ class DeleteOldVersions(BaseAction): "value": False }) + items.append(self.splitter_item) + + items.append({ + "type": "label", + "value": ( + "This will NOT delete any files and only return the " + "total size of the files." + ) + }) + items.append({ + "type": "boolean", + "name": "only_calculate", + "label": "Only calculate size of files.", + "value": False + }) + return { "items": items, "title": self.inteface_title } + def sizeof_fmt(self, num, suffix='B'): + for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: + if abs(num) < 1024.0: + return "%3.1f%s%s" % (num, unit, suffix) + num /= 1024.0 + return "%.1f%s%s" % (num, 'Yi', suffix) + def launch(self, session, entities, event): values = event["data"].get("values") if not values: @@ -117,6 +140,7 @@ class DeleteOldVersions(BaseAction): versions_count = int(values["last_versions_count"]) force_to_remove = values["force_delete_publish_folder"] + only_calculate = values["only_calculate"] _val1 = "OFF" if force_to_remove: @@ -318,10 +342,29 @@ class DeleteOldVersions(BaseAction): "Folder does not exist. Deleting it's files skipped: {}" ).format(paths_msg)) + # Size of files. + size = 0 + + if only_calculate: + if force_to_remove: + size = self.delete_whole_dir_paths( + dir_paths.values(), delete=False + ) + else: + size = self.delete_only_repre_files( + dir_paths, file_paths_by_dir, delete=False + ) + + msg = "Total size of files: " + self.sizeof_fmt(size) + + self.log.warning(msg) + + return {"success": True, "message": msg} + if force_to_remove: - self.delete_whole_dir_paths(dir_paths.values()) + size = self.delete_whole_dir_paths(dir_paths.values()) else: - self.delete_only_repre_files(dir_paths, file_paths_by_dir) + size = self.delete_only_repre_files(dir_paths, file_paths_by_dir) mongo_changes_bulk = [] for version in versions: @@ -383,17 +426,31 @@ class DeleteOldVersions(BaseAction): "message": msg } - return True + msg = "Total size of files deleted: " + self.sizeof_fmt(size) + + self.log.warning(msg) + + return {"success": True, "message": msg} + + def delete_whole_dir_paths(self, dir_paths, delete=True): + size = 0 - def delete_whole_dir_paths(self, dir_paths): for dir_path in dir_paths: # Delete all files and fodlers in dir path for root, dirs, files in os.walk(dir_path, topdown=False): for name in files: - os.remove(os.path.join(root, name)) + file_path = os.path.join(root, name) + size += os.path.getsize(file_path) + if delete: + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) for name in dirs: - os.rmdir(os.path.join(root, name)) + if delete: + os.rmdir(os.path.join(root, name)) + + if not delete: + continue # Delete even the folder and it's parents folders if they are empty while True: @@ -406,7 +463,11 @@ class DeleteOldVersions(BaseAction): os.rmdir(os.path.join(dir_path)) - def delete_only_repre_files(self, dir_paths, file_paths): + return size + + def delete_only_repre_files(self, dir_paths, file_paths, delete=True): + size = 0 + for dir_id, dir_path in dir_paths.items(): dir_files = os.listdir(dir_path) collections, remainders = clique.assemble(dir_files) @@ -420,8 +481,13 @@ class DeleteOldVersions(BaseAction): "File was not found: {}".format(file_path) ) continue - os.remove(file_path) - self.log.debug("Removed file: {}".format(file_path)) + + size += os.path.getsize(file_path) + + if delete: + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) + remainders.remove(file_path_base) continue @@ -440,21 +506,34 @@ class DeleteOldVersions(BaseAction): final_col.head = os.path.join(dir_path, final_col.head) for _file_path in final_col: if os.path.exists(_file_path): - os.remove(_file_path) + + size += os.path.getsize(_file_path) + + if delete: + os.remove(_file_path) + self.log.debug( + "Removed file: {}".format(_file_path) + ) + _seq_path = final_col.format("{head}{padding}{tail}") self.log.debug("Removed files: {}".format(_seq_path)) collections.remove(final_col) elif os.path.exists(file_path): - os.remove(file_path) - self.log.debug("Removed file: {}".format(file_path)) + size += os.path.getsize(file_path) + if delete: + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) else: self.log.warning( "File was not found: {}".format(file_path) ) # Delete as much as possible parent folders + if not delete: + return size + for dir_path in dir_paths.values(): while True: if not os.path.exists(dir_path): @@ -467,6 +546,8 @@ class DeleteOldVersions(BaseAction): self.log.debug("Removed folder: {}".format(dir_path)) os.rmdir(dir_path) + return size + def path_from_represenation(self, representation, anatomy): try: template = representation["data"]["template"] diff --git a/pype/modules/ftrack/actions/action_delivery.py b/pype/modules/ftrack/actions/action_delivery.py index a2048222e5..8812ce9bc7 100644 --- a/pype/modules/ftrack/actions/action_delivery.py +++ b/pype/modules/ftrack/actions/action_delivery.py @@ -1,5 +1,6 @@ import os import copy +import json import shutil import collections @@ -9,10 +10,10 @@ from bson.objectid import ObjectId from avalon import pipeline from avalon.vendor import filelink -from pype.api import Anatomy +from pype.api import Anatomy, config from pype.modules.ftrack.lib import BaseAction, statics_icon -from pype.modules.ftrack.lib.avalon_sync import CustAttrIdKey -from pype.modules.ftrack.lib.io_nonsingleton import DbConnector +from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY +from avalon.api import AvalonMongoDB class Delivery(BaseAction): @@ -23,7 +24,7 @@ class Delivery(BaseAction): role_list = ["Pypeclub", "Administrator", "Project manager"] icon = statics_icon("ftrack", "action_icons", "Delivery.svg") - db_con = DbConnector() + db_con = AvalonMongoDB() def discover(self, session, entities, event): for entity in entities: @@ -41,36 +42,22 @@ class Delivery(BaseAction): items = [] item_splitter = {"type": "label", "value": "---"} - # Prepare component names for processing - components = None - project = None - for entity in entities: - if project is None: - project_id = None - for ent_info in entity["link"]: - if ent_info["type"].lower() == "project": - project_id = ent_info["id"] - break + project_entity = self.get_project_from_entity(entities[0]) + project_name = project_entity["full_name"] + self.db_con.install() + self.db_con.Session["AVALON_PROJECT"] = project_name + project_doc = self.db_con.find_one({"type": "project"}) + if not project_doc: + return { + "success": False, + "message": ( + "Didn't found project \"{}\" in avalon." + ).format(project_name) + } - if project_id is None: - project = entity["asset"]["parent"]["project"] - else: - project = session.query(( - "select id, full_name from Project where id is \"{}\"" - ).format(project_id)).one() + repre_names = self._get_repre_names(entities) + self.db_con.uninstall() - _components = set( - [component["name"] for component in entity["components"]] - ) - if components is None: - components = _components - continue - - components = components.intersection(_components) - if not components: - break - - project_name = project["full_name"] items.append({ "type": "hidden", "name": "__project_name__", @@ -81,17 +68,19 @@ class Delivery(BaseAction): anatomy = Anatomy(project_name) new_anatomies = [] first = None - for key in (anatomy.templates.get("delivery") or {}): - new_anatomies.append({ - "label": key, - "value": key - }) - if first is None: - first = key + for key, template in (anatomy.templates.get("delivery") or {}).items(): + # Use only keys with `{root}` or `{root[*]}` in value + if isinstance(template, str) and "{root" in template: + new_anatomies.append({ + "label": key, + "value": key + }) + if first is None: + first = key skipped = False # Add message if there are any common components - if not components or not new_anatomies: + if not repre_names or not new_anatomies: skipped = True items.append({ "type": "label", @@ -104,7 +93,7 @@ class Delivery(BaseAction): "value": skipped }) - if not components: + if not repre_names: if len(entities) == 1: items.append({ "type": "label", @@ -141,12 +130,12 @@ class Delivery(BaseAction): "type": "label" }) - for component in components: + for repre_name in repre_names: items.append({ "type": "boolean", "value": False, - "label": component, - "name": component + "label": repre_name, + "name": repre_name }) items.append(item_splitter) @@ -196,27 +185,233 @@ class Delivery(BaseAction): "title": title } + def _get_repre_names(self, entities): + version_ids = self._get_interest_version_ids(entities) + repre_docs = self.db_con.find({ + "type": "representation", + "parent": {"$in": version_ids} + }) + return list(sorted(repre_docs.distinct("name"))) + + def _get_interest_version_ids(self, entities): + parent_ent_by_id = {} + subset_names = set() + version_nums = set() + for entity in entities: + asset = entity["asset"] + parent = asset["parent"] + parent_ent_by_id[parent["id"]] = parent + + subset_name = asset["name"] + subset_names.add(subset_name) + + version = entity["version"] + version_nums.add(version) + + asset_docs_by_ftrack_id = self._get_asset_docs(parent_ent_by_id) + subset_docs = self._get_subset_docs( + asset_docs_by_ftrack_id, subset_names, entities + ) + version_docs = self._get_version_docs( + asset_docs_by_ftrack_id, subset_docs, version_nums, entities + ) + + return [version_doc["_id"] for version_doc in version_docs] + + def _get_version_docs( + self, asset_docs_by_ftrack_id, subset_docs, version_nums, entities + ): + subset_docs_by_id = { + subset_doc["_id"]: subset_doc + for subset_doc in subset_docs + } + version_docs = list(self.db_con.find({ + "type": "version", + "parent": {"$in": list(subset_docs_by_id.keys())}, + "name": {"$in": list(version_nums)} + })) + version_docs_by_parent_id = collections.defaultdict(dict) + for version_doc in version_docs: + subset_doc = subset_docs_by_id[version_doc["parent"]] + + asset_id = subset_doc["parent"] + subset_name = subset_doc["name"] + version = version_doc["name"] + if version_docs_by_parent_id[asset_id].get(subset_name) is None: + version_docs_by_parent_id[asset_id][subset_name] = {} + + version_docs_by_parent_id[asset_id][subset_name][version] = ( + version_doc + ) + + filtered_versions = [] + for entity in entities: + asset = entity["asset"] + + parent = asset["parent"] + asset_doc = asset_docs_by_ftrack_id[parent["id"]] + + subsets_by_name = version_docs_by_parent_id.get(asset_doc["_id"]) + if not subsets_by_name: + continue + + subset_name = asset["name"] + version_docs_by_version = subsets_by_name.get(subset_name) + if not version_docs_by_version: + continue + + version = entity["version"] + version_doc = version_docs_by_version.get(version) + if version_doc: + filtered_versions.append(version_doc) + return filtered_versions + + def _get_subset_docs( + self, asset_docs_by_ftrack_id, subset_names, entities + ): + asset_doc_ids = list() + for asset_doc in asset_docs_by_ftrack_id.values(): + asset_doc_ids.append(asset_doc["_id"]) + + subset_docs = list(self.db_con.find({ + "type": "subset", + "parent": {"$in": asset_doc_ids}, + "name": {"$in": list(subset_names)} + })) + subset_docs_by_parent_id = collections.defaultdict(dict) + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + subset_name = subset_doc["name"] + subset_docs_by_parent_id[asset_id][subset_name] = subset_doc + + filtered_subsets = [] + for entity in entities: + asset = entity["asset"] + + parent = asset["parent"] + asset_doc = asset_docs_by_ftrack_id[parent["id"]] + + subsets_by_name = subset_docs_by_parent_id.get(asset_doc["_id"]) + if not subsets_by_name: + continue + + subset_name = asset["name"] + subset_doc = subsets_by_name.get(subset_name) + if subset_doc: + filtered_subsets.append(subset_doc) + return filtered_subsets + + def _get_asset_docs(self, parent_ent_by_id): + asset_docs = list(self.db_con.find({ + "type": "asset", + "data.ftrackId": {"$in": list(parent_ent_by_id.keys())} + })) + asset_docs_by_ftrack_id = { + asset_doc["data"]["ftrackId"]: asset_doc + for asset_doc in asset_docs + } + + entities_by_mongo_id = {} + entities_by_names = {} + for ftrack_id, entity in parent_ent_by_id.items(): + if ftrack_id not in asset_docs_by_ftrack_id: + parent_mongo_id = entity["custom_attributes"].get( + CUST_ATTR_ID_KEY + ) + if parent_mongo_id: + entities_by_mongo_id[ObjectId(parent_mongo_id)] = entity + else: + entities_by_names[entity["name"]] = entity + + expressions = [] + if entities_by_mongo_id: + expression = { + "type": "asset", + "_id": {"$in": list(entities_by_mongo_id.keys())} + } + expressions.append(expression) + + if entities_by_names: + expression = { + "type": "asset", + "name": {"$in": list(entities_by_names.keys())} + } + expressions.append(expression) + + if expressions: + if len(expressions) == 1: + filter = expressions[0] + else: + filter = {"$or": expressions} + + asset_docs = self.db_con.find(filter) + for asset_doc in asset_docs: + if asset_doc["_id"] in entities_by_mongo_id: + entity = entities_by_mongo_id[asset_doc["_id"]] + asset_docs_by_ftrack_id[entity["id"]] = asset_doc + + elif asset_doc["name"] in entities_by_names: + entity = entities_by_names[asset_doc["name"]] + asset_docs_by_ftrack_id[entity["id"]] = asset_doc + + return asset_docs_by_ftrack_id + def launch(self, session, entities, event): if "values" not in event["data"]: return - self.report_items = collections.defaultdict(list) - values = event["data"]["values"] skipped = values.pop("__skipped__") if skipped: return None - component_names = [] + user_id = event["source"]["user"]["id"] + user_entity = session.query( + "User where id is {}".format(user_id) + ).one() + + job = session.create("Job", { + "user": user_entity, + "status": "running", + "data": json.dumps({ + "description": "Delivery processing." + }) + }) + session.commit() + + try: + self.db_con.install() + self.real_launch(session, entities, event) + job["status"] = "done" + + except Exception: + self.log.warning( + "Failed during processing delivery action.", + exc_info=True + ) + + finally: + if job["status"] != "done": + job["status"] = "failed" + session.commit() + self.db_con.uninstall() + + def real_launch(self, session, entities, event): + self.log.info("Delivery action just started.") + report_items = collections.defaultdict(list) + + values = event["data"]["values"] + location_path = values.pop("__location_path__") anatomy_name = values.pop("__new_anatomies__") project_name = values.pop("__project_name__") + repre_names = [] for key, value in values.items(): if value is True: - component_names.append(key) + repre_names.append(key) - if not component_names: + if not repre_names: return { "success": True, "message": "Not selected components to deliver." @@ -226,76 +421,44 @@ class Delivery(BaseAction): if location_path: location_path = os.path.normpath(location_path) if not os.path.exists(location_path): - return { - "success": False, - "message": ( - "Entered location path does not exists. \"{}\"" - ).format(location_path) - } + os.makedirs(location_path) - self.db_con.install() self.db_con.Session["AVALON_PROJECT"] = project_name - repres_to_deliver = [] - for entity in entities: - asset = entity["asset"] - subset_name = asset["name"] - version = entity["version"] - - parent = asset["parent"] - parent_mongo_id = parent["custom_attributes"].get(CustAttrIdKey) - if parent_mongo_id: - parent_mongo_id = ObjectId(parent_mongo_id) - else: - asset_ent = self.db_con.find_one({ - "type": "asset", - "data.ftrackId": parent["id"] - }) - if not asset_ent: - ent_path = "/".join( - [ent["name"] for ent in parent["link"]] - ) - msg = "Not synchronized entities to avalon" - self.report_items[msg].append(ent_path) - self.log.warning("{} <{}>".format(msg, ent_path)) - continue - - parent_mongo_id = asset_ent["_id"] - - subset_ent = self.db_con.find_one({ - "type": "subset", - "parent": parent_mongo_id, - "name": subset_name - }) - - version_ent = self.db_con.find_one({ - "type": "version", - "name": version, - "parent": subset_ent["_id"] - }) - - repre_ents = self.db_con.find({ - "type": "representation", - "parent": version_ent["_id"] - }) - - repres_by_name = {} - for repre in repre_ents: - repre_name = repre["name"] - repres_by_name[repre_name] = repre - - for component in entity["components"]: - comp_name = component["name"] - if comp_name not in component_names: - continue - - repre = repres_by_name.get(comp_name) - repres_to_deliver.append(repre) + self.log.debug("Collecting representations to process.") + version_ids = self._get_interest_version_ids(entities) + repres_to_deliver = list(self.db_con.find({ + "type": "representation", + "parent": {"$in": version_ids}, + "name": {"$in": repre_names} + })) anatomy = Anatomy(project_name) + + format_dict = {} + if location_path: + location_path = location_path.replace("\\", "/") + root_names = anatomy.root_names_from_templates( + anatomy.templates["delivery"] + ) + if root_names is None: + format_dict["root"] = location_path + else: + format_dict["root"] = {} + for name in root_names: + format_dict["root"][name] = location_path + + datetime_data = config.get_datetime_data() for repre in repres_to_deliver: + source_path = repre.get("data", {}).get("path") + debug_msg = "Processing representation {}".format(repre["_id"]) + if source_path: + debug_msg += " with published path {}.".format(source_path) + self.log.debug(debug_msg) + # Get destination repre path anatomy_data = copy.deepcopy(repre["context"]) + anatomy_data.update(datetime_data) anatomy_filled = anatomy.format_all(anatomy_data) test_path = anatomy_filled["delivery"][anatomy_name] @@ -322,7 +485,7 @@ class Delivery(BaseAction): "- Invalid value DataType: \"{}\"
" ).format(str(repre["_id"]), keys) - self.report_items[msg].append(sub_msg) + report_items[msg].append(sub_msg) self.log.warning( "{} Representation: \"{}\" Filled: <{}>".format( msg, str(repre["_id"]), str(test_path) @@ -339,25 +502,32 @@ class Delivery(BaseAction): repre_path = self.path_from_represenation(repre, anatomy) # TODO add backup solution where root of path from component # is repalced with root + args = ( + repre_path, + anatomy, + anatomy_name, + anatomy_data, + format_dict, + report_items + ) if not frame: - self.process_single_file( - repre_path, anatomy, anatomy_name, anatomy_data - ) - + self.process_single_file(*args) else: - self.process_sequence( - repre_path, anatomy, anatomy_name, anatomy_data - ) + self.process_sequence(*args) - self.db_con.uninstall() - - return self.report() + return self.report(report_items) def process_single_file( - self, repre_path, anatomy, anatomy_name, anatomy_data + self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict, + report_items ): anatomy_filled = anatomy.format(anatomy_data) - delivery_path = anatomy_filled["delivery"][anatomy_name] + if format_dict: + template_result = anatomy_filled["delivery"][anatomy_name] + delivery_path = template_result.rootless.format(**format_dict) + else: + delivery_path = anatomy_filled["delivery"][anatomy_name] + delivery_folder = os.path.dirname(delivery_path) if not os.path.exists(delivery_folder): os.makedirs(delivery_folder) @@ -365,7 +535,8 @@ class Delivery(BaseAction): self.copy_file(repre_path, delivery_path) def process_sequence( - self, repre_path, anatomy, anatomy_name, anatomy_data + self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict, + report_items ): dir_path, file_name = os.path.split(str(repre_path)) @@ -379,7 +550,7 @@ class Delivery(BaseAction): if not file_name_items: msg = "Source file was not found" - self.report_items[msg].append(repre_path) + report_items[msg].append(repre_path) self.log.warning("{} <{}>".format(msg, repre_path)) return @@ -399,7 +570,7 @@ class Delivery(BaseAction): if src_collection is None: # TODO log error! msg = "Source collection of files was not found" - self.report_items[msg].append(repre_path) + report_items[msg].append(repre_path) self.log.warning("{} <{}>".format(msg, repre_path)) return @@ -408,8 +579,12 @@ class Delivery(BaseAction): anatomy_data["frame"] = frame_indicator anatomy_filled = anatomy.format(anatomy_data) - delivery_path = anatomy_filled["delivery"][anatomy_name] - print(delivery_path) + if format_dict: + template_result = anatomy_filled["delivery"][anatomy_name] + delivery_path = template_result.rootless.format(**format_dict) + else: + delivery_path = anatomy_filled["delivery"][anatomy_name] + delivery_folder = os.path.dirname(delivery_path) dst_head, dst_tail = delivery_path.split(frame_indicator) dst_padding = src_collection.padding @@ -468,10 +643,10 @@ class Delivery(BaseAction): except OSError: shutil.copyfile(src_path, dst_path) - def report(self): + def report(self, report_items): items = [] title = "Delivery report" - for msg, _items in self.report_items.items(): + for msg, _items in report_items.items(): if not _items: continue diff --git a/pype/modules/ftrack/actions/action_prepare_project.py b/pype/modules/ftrack/actions/action_prepare_project.py index f51a9eb9a6..b3a2a20151 100644 --- a/pype/modules/ftrack/actions/action_prepare_project.py +++ b/pype/modules/ftrack/actions/action_prepare_project.py @@ -3,7 +3,7 @@ import json from pype.modules.ftrack.lib import BaseAction, statics_icon from pype.api import config, Anatomy, project_overrides_dir_path -from pype.modules.ftrack.lib.avalon_sync import get_avalon_attr +from pype.modules.ftrack.lib.avalon_sync import get_pype_attr class PrepareProject(BaseAction): @@ -221,7 +221,7 @@ class PrepareProject(BaseAction): def _attributes_to_set(self, project_defaults): attributes_to_set = {} - cust_attrs, hier_cust_attrs = get_avalon_attr(self.session, True) + cust_attrs, hier_cust_attrs = get_pype_attr(self.session, True) for attr in hier_cust_attrs: key = attr["key"] diff --git a/pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py index b399dab7ce..36f7175768 100644 --- a/pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py +++ b/pype/modules/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -6,9 +6,9 @@ import json from bson.objectid import ObjectId from pype.modules.ftrack.lib import BaseAction, statics_icon from pype.api import Anatomy -from pype.modules.ftrack.lib.io_nonsingleton import DbConnector +from avalon.api import AvalonMongoDB -from pype.modules.ftrack.lib.avalon_sync import CustAttrIdKey +from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY class StoreThumbnailsToAvalon(BaseAction): @@ -25,7 +25,7 @@ class StoreThumbnailsToAvalon(BaseAction): icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg") thumbnail_key = "AVALON_THUMBNAIL_ROOT" - db_con = DbConnector() + db_con = AvalonMongoDB() def discover(self, session, entities, event): for entity in entities: @@ -390,7 +390,7 @@ class StoreThumbnailsToAvalon(BaseAction): return output asset_ent = None - asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey) + asset_mongo_id = parent["custom_attributes"].get(CUST_ATTR_ID_KEY) if asset_mongo_id: try: asset_mongo_id = ObjectId(asset_mongo_id) diff --git a/pype/modules/ftrack/actions/action_thumbnail_to_parent.py b/pype/modules/ftrack/actions/action_thumbnail_to_parent.py index 8710fa9dcf..fb473f9aa5 100644 --- a/pype/modules/ftrack/actions/action_thumbnail_to_parent.py +++ b/pype/modules/ftrack/actions/action_thumbnail_to_parent.py @@ -41,9 +41,9 @@ class ThumbToParent(BaseAction): parent = None thumbid = None if entity.entity_type.lower() == 'assetversion': - try: - parent = entity['task'] - except Exception: + parent = entity['task'] + + if parent is None: par_ent = entity['link'][-2] parent = session.get(par_ent['type'], par_ent['id']) else: @@ -51,7 +51,7 @@ class ThumbToParent(BaseAction): parent = entity['parent'] except Exception as e: msg = ( - "Durin Action 'Thumb to Parent'" + "During Action 'Thumb to Parent'" " went something wrong" ) self.log.error(msg) @@ -62,7 +62,10 @@ class ThumbToParent(BaseAction): parent['thumbnail_id'] = thumbid status = 'done' else: - status = 'failed' + raise Exception( + "Parent or thumbnail id not found. Parent: {}. " + "Thumbnail id: {}".format(parent, thumbid) + ) # inform the user that the job is done job['status'] = status or 'done' diff --git a/pype/modules/ftrack/events/action_push_frame_values_to_task.py b/pype/modules/ftrack/events/action_push_frame_values_to_task.py new file mode 100644 index 0000000000..a55c1e46a6 --- /dev/null +++ b/pype/modules/ftrack/events/action_push_frame_values_to_task.py @@ -0,0 +1,437 @@ +import json +import collections +import ftrack_api +from pype.modules.ftrack.lib import BaseAction + + +class PushFrameValuesToTaskAction(BaseAction): + """Action for testing purpose or as base for new actions.""" + + # Ignore event handler by default + ignore_me = True + + identifier = "admin.push_frame_values_to_task" + label = "Pype Admin" + variant = "- Push Frame values to Task" + + entities_query = ( + "select id, name, parent_id, link from TypedContext" + " where project_id is \"{}\" and object_type_id in ({})" + ) + cust_attrs_query = ( + "select id, key, object_type_id, is_hierarchical, default" + " from CustomAttributeConfiguration" + " where key in ({})" + ) + cust_attr_value_query = ( + "select value, entity_id from CustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" + ) + + pushing_entity_types = {"Shot"} + hierarchical_custom_attribute_keys = {"frameStart", "frameEnd"} + custom_attribute_mapping = { + "frameStart": "fstart", + "frameEnd": "fend" + } + discover_role_list = {"Pypeclub", "Administrator", "Project Manager"} + + def register(self): + modified_role_names = set() + for role_name in self.discover_role_list: + modified_role_names.add(role_name.lower()) + self.discover_role_list = modified_role_names + + self.session.event_hub.subscribe( + "topic=ftrack.action.discover", + self._discover, + priority=self.priority + ) + + launch_subscription = ( + "topic=ftrack.action.launch and data.actionIdentifier={0}" + ).format(self.identifier) + self.session.event_hub.subscribe(launch_subscription, self._launch) + + def discover(self, session, entities, event): + """ Validation """ + # Check if selection is valid + valid_selection = False + for ent in event["data"]["selection"]: + # Ignore entities that are not tasks or projects + if ent["entityType"].lower() == "show": + valid_selection = True + break + + if not valid_selection: + return False + + # Get user and check his roles + user_id = event.get("source", {}).get("user", {}).get("id") + if not user_id: + return False + + user = session.query("User where id is \"{}\"".format(user_id)).first() + if not user: + return False + + for role in user["user_security_roles"]: + lowered_role = role["security_role"]["name"].lower() + if lowered_role in self.discover_role_list: + return True + return False + + def launch(self, session, entities, event): + self.log.debug("{}: Creating job".format(self.label)) + + user_entity = session.query( + "User where id is {}".format(event["source"]["user"]["id"]) + ).one() + job = session.create("Job", { + "user": user_entity, + "status": "running", + "data": json.dumps({ + "description": "Propagation of Frame attribute values to task." + }) + }) + session.commit() + + try: + project_entity = self.get_project_from_entity(entities[0]) + result = self.propagate_values(session, project_entity, event) + job["status"] = "done" + session.commit() + + return result + + except Exception: + session.rollback() + job["status"] = "failed" + session.commit() + + msg = "Pushing Custom attribute values to task Failed" + self.log.warning(msg, exc_info=True) + return { + "success": False, + "message": msg + } + + finally: + if job["status"] == "running": + job["status"] = "failed" + session.commit() + + def task_attributes(self, session): + task_object_type = session.query( + "ObjectType where name is \"Task\"" + ).one() + + hier_attr_names = list( + self.custom_attribute_mapping.keys() + ) + entity_type_specific_names = list( + self.custom_attribute_mapping.values() + ) + joined_keys = self.join_keys( + hier_attr_names + entity_type_specific_names + ) + attribute_entities = session.query( + self.cust_attrs_query.format(joined_keys) + ).all() + + hier_attrs = [] + task_attrs = {} + for attr in attribute_entities: + attr_key = attr["key"] + if attr["is_hierarchical"]: + if attr_key in hier_attr_names: + hier_attrs.append(attr) + elif attr["object_type_id"] == task_object_type["id"]: + if attr_key in entity_type_specific_names: + task_attrs[attr_key] = attr["id"] + return task_attrs, hier_attrs + + def join_keys(self, items): + return ",".join(["\"{}\"".format(item) for item in items]) + + def propagate_values(self, session, project_entity, event): + self.log.debug("Querying project's entities \"{}\".".format( + project_entity["full_name"] + )) + pushing_entity_types = tuple( + ent_type.lower() + for ent_type in self.pushing_entity_types + ) + destination_object_types = [] + all_object_types = session.query("ObjectType").all() + for object_type in all_object_types: + lowered_name = object_type["name"].lower() + if ( + lowered_name == "task" + or lowered_name in pushing_entity_types + ): + destination_object_types.append(object_type) + + destination_object_type_ids = tuple( + obj_type["id"] + for obj_type in destination_object_types + ) + entities = session.query(self.entities_query.format( + project_entity["id"], + self.join_keys(destination_object_type_ids) + )).all() + + entities_by_id = { + entity["id"]: entity + for entity in entities + } + + self.log.debug("Filtering Task entities.") + task_entities_by_parent_id = collections.defaultdict(list) + non_task_entities = [] + non_task_entity_ids = [] + for entity in entities: + if entity.entity_type.lower() != "task": + non_task_entities.append(entity) + non_task_entity_ids.append(entity["id"]) + continue + + parent_id = entity["parent_id"] + if parent_id in entities_by_id: + task_entities_by_parent_id[parent_id].append(entity) + + task_attr_id_by_keys, hier_attrs = self.task_attributes(session) + + self.log.debug("Getting Custom attribute values from tasks' parents.") + hier_values_by_entity_id = self.get_hier_values( + session, + hier_attrs, + non_task_entity_ids + ) + + self.log.debug("Setting parents' values to task.") + task_missing_keys = self.set_task_attr_values( + session, + task_entities_by_parent_id, + hier_values_by_entity_id, + task_attr_id_by_keys + ) + + self.log.debug("Setting values to entities themselves.") + missing_keys_by_object_name = self.push_values_to_entities( + session, + non_task_entities, + hier_values_by_entity_id + ) + if task_missing_keys: + missing_keys_by_object_name["Task"] = task_missing_keys + if missing_keys_by_object_name: + self.report(missing_keys_by_object_name, event) + return True + + def report(self, missing_keys_by_object_name, event): + splitter = {"type": "label", "value": "---"} + + title = "Push Custom Attribute values report:" + + items = [] + items.append({ + "type": "label", + "value": "# Pushing values was not complete" + }) + items.append({ + "type": "label", + "value": ( + "

It was due to missing custom" + " attribute configurations for specific entity type/s." + " These configurations are not created automatically.

" + ) + }) + + log_message_items = [] + log_message_item_template = ( + "Entity type \"{}\" does not have created Custom Attribute/s: {}" + ) + for object_name, missing_attr_names in ( + missing_keys_by_object_name.items() + ): + log_message_items.append(log_message_item_template.format( + object_name, self.join_keys(missing_attr_names) + )) + + items.append(splitter) + items.append({ + "type": "label", + "value": "## Entity type: {}".format(object_name) + }) + + items.append({ + "type": "label", + "value": "

{}

".format("
".join(missing_attr_names)) + }) + + self.log.warning(( + "Couldn't finish pushing attribute values because" + " few entity types miss Custom attribute configurations:\n{}" + ).format("\n".join(log_message_items))) + + self.show_interface(items, title, event) + + def get_hier_values(self, session, hier_attrs, focus_entity_ids): + joined_entity_ids = self.join_keys(focus_entity_ids) + hier_attr_ids = self.join_keys( + tuple(hier_attr["id"] for hier_attr in hier_attrs) + ) + hier_attrs_key_by_id = { + hier_attr["id"]: hier_attr["key"] + for hier_attr in hier_attrs + } + call_expr = [{ + "action": "query", + "expression": self.cust_attr_value_query.format( + joined_entity_ids, hier_attr_ids + ) + }] + if hasattr(session, "call"): + [values] = session.call(call_expr) + else: + [values] = session._call(call_expr) + + values_per_entity_id = {} + for item in values["data"]: + entity_id = item["entity_id"] + key = hier_attrs_key_by_id[item["configuration_id"]] + + if entity_id not in values_per_entity_id: + values_per_entity_id[entity_id] = {} + value = item["value"] + if value is not None: + values_per_entity_id[entity_id][key] = value + + output = {} + for entity_id in focus_entity_ids: + value = values_per_entity_id.get(entity_id) + if value: + output[entity_id] = value + + return output + + def set_task_attr_values( + self, + session, + task_entities_by_parent_id, + hier_values_by_entity_id, + task_attr_id_by_keys + ): + missing_keys = set() + for parent_id, values in hier_values_by_entity_id.items(): + task_entities = task_entities_by_parent_id[parent_id] + for hier_key, value in values.items(): + key = self.custom_attribute_mapping[hier_key] + if key not in task_attr_id_by_keys: + missing_keys.add(key) + continue + + for task_entity in task_entities: + _entity_key = collections.OrderedDict({ + "configuration_id": task_attr_id_by_keys[key], + "entity_id": task_entity["id"] + }) + + session.recorded_operations.push( + ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + _entity_key, + "value", + ftrack_api.symbol.NOT_SET, + value + ) + ) + session.commit() + + return missing_keys + + def push_values_to_entities( + self, + session, + non_task_entities, + hier_values_by_entity_id + ): + object_types = session.query( + "ObjectType where name in ({})".format( + self.join_keys(self.pushing_entity_types) + ) + ).all() + object_type_names_by_id = { + object_type["id"]: object_type["name"] + for object_type in object_types + } + joined_keys = self.join_keys( + self.custom_attribute_mapping.values() + ) + attribute_entities = session.query( + self.cust_attrs_query.format(joined_keys) + ).all() + + attrs_by_obj_id = {} + for attr in attribute_entities: + if attr["is_hierarchical"]: + continue + + obj_id = attr["object_type_id"] + if obj_id not in object_type_names_by_id: + continue + + if obj_id not in attrs_by_obj_id: + attrs_by_obj_id[obj_id] = {} + + attr_key = attr["key"] + attrs_by_obj_id[obj_id][attr_key] = attr["id"] + + entities_by_obj_id = collections.defaultdict(list) + for entity in non_task_entities: + entities_by_obj_id[entity["object_type_id"]].append(entity) + + missing_keys_by_object_id = collections.defaultdict(set) + for obj_type_id, attr_keys in attrs_by_obj_id.items(): + entities = entities_by_obj_id.get(obj_type_id) + if not entities: + continue + + for entity in entities: + values = hier_values_by_entity_id.get(entity["id"]) + if not values: + continue + + for hier_key, value in values.items(): + key = self.custom_attribute_mapping[hier_key] + if key not in attr_keys: + missing_keys_by_object_id[obj_type_id].add(key) + continue + + _entity_key = collections.OrderedDict({ + "configuration_id": attr_keys[key], + "entity_id": entity["id"] + }) + + session.recorded_operations.push( + ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + _entity_key, + "value", + ftrack_api.symbol.NOT_SET, + value + ) + ) + session.commit() + + missing_keys_by_object_name = {} + for obj_id, missing_keys in missing_keys_by_object_id.items(): + obj_name = object_type_names_by_id[obj_id] + missing_keys_by_object_name[obj_name] = missing_keys + + return missing_keys_by_object_name + + +def register(session, plugins_presets={}): + PushFrameValuesToTaskAction(session, plugins_presets).register() diff --git a/pype/modules/ftrack/events/action_sync_to_avalon.py b/pype/modules/ftrack/events/action_sync_to_avalon.py index a06b825d6a..4e119228c3 100644 --- a/pype/modules/ftrack/events/action_sync_to_avalon.py +++ b/pype/modules/ftrack/events/action_sync_to_avalon.py @@ -1,10 +1,8 @@ -import os import time import traceback from pype.modules.ftrack import BaseAction from pype.modules.ftrack.lib.avalon_sync import SyncEntitiesFactory -from pype.api import config class SyncToAvalonServer(BaseAction): @@ -38,17 +36,6 @@ class SyncToAvalonServer(BaseAction): variant = "- Sync To Avalon (Server)" #: Action description. description = "Send data from Ftrack to Avalon" - #: Action icon. - icon = "{}/ftrack/action_icons/PypeAdmin.svg".format( - os.environ.get( - "PYPE_STATICS_SERVER", - "http://localhost:{}".format( - config.get_presets().get("services", {}).get( - "rest_api", {} - ).get("default_port", 8021) - ) - ) - ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/pype/modules/ftrack/events/event_del_avalon_id_from_new.py b/pype/modules/ftrack/events/event_del_avalon_id_from_new.py index 89bad52f29..ee82c9589d 100644 --- a/pype/modules/ftrack/events/event_del_avalon_id_from_new.py +++ b/pype/modules/ftrack/events/event_del_avalon_id_from_new.py @@ -1,5 +1,5 @@ from pype.modules.ftrack.lib import BaseEvent -from pype.modules.ftrack.lib.avalon_sync import CustAttrIdKey +from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from pype.modules.ftrack.events.event_sync_to_avalon import SyncToAvalonEvent @@ -29,7 +29,7 @@ class DelAvalonIdFromNew(BaseEvent): elif ( entity.get('action', None) == 'update' and - CustAttrIdKey in entity['keys'] and + CUST_ATTR_ID_KEY in entity['keys'] and entity_id in created ): ftrack_entity = session.get( @@ -37,12 +37,9 @@ class DelAvalonIdFromNew(BaseEvent): entity_id ) - cust_attr = ftrack_entity['custom_attributes'][ - CustAttrIdKey - ] - - if cust_attr != '': - ftrack_entity['custom_attributes'][CustAttrIdKey] = '' + cust_attrs = ftrack_entity["custom_attributes"] + if cust_attrs[CUST_ATTR_ID_KEY]: + cust_attrs[CUST_ATTR_ID_KEY] = "" session.commit() except Exception: diff --git a/pype/modules/ftrack/events/event_next_task_update.py b/pype/modules/ftrack/events/event_next_task_update.py index dc1ab0a0d7..1f8407e559 100644 --- a/pype/modules/ftrack/events/event_next_task_update.py +++ b/pype/modules/ftrack/events/event_next_task_update.py @@ -1,92 +1,220 @@ -import ftrack_api -from pype.modules.ftrack import BaseEvent import operator +import collections +from pype.modules.ftrack import BaseEvent class NextTaskUpdate(BaseEvent): + def filter_entities_info(self, session, event): + # Filter if event contain relevant data + entities_info = event["data"].get("entities") + if not entities_info: + return - def get_next_task(self, task, session): - parent = task['parent'] - # tasks = parent['tasks'] - tasks = parent['children'] + first_filtered_entities = [] + for entity_info in entities_info: + # Care only about tasks + if entity_info.get("entityType") != "task": + continue - def sort_types(types): - data = {} - for t in types: - data[t] = t.get('sort') + # Care only about changes of status + changes = entity_info.get("changes") or {} + statusid_changes = changes.get("statusid") or {} + if ( + statusid_changes.get("new") is None + or statusid_changes.get("old") is None + ): + continue - data = sorted(data.items(), key=operator.itemgetter(1)) - results = [] - for item in data: - results.append(item[0]) - return results + first_filtered_entities.append(entity_info) - types_sorted = sort_types(session.query('Type')) - next_types = None - for t in types_sorted: - if t['id'] == task['type_id']: - next_types = types_sorted[(types_sorted.index(t) + 1):] + status_ids = [ + entity_info["changes"]["statusid"]["new"] + for entity_info in first_filtered_entities + ] + statuses_by_id = self.get_statuses_by_id( + session, status_ids=status_ids + ) - for nt in next_types: - for t in tasks: - if nt['id'] == t['type_id']: - return t + # Care only about tasks having status with state `Done` + filtered_entities = [] + for entity_info in first_filtered_entities: + status_id = entity_info["changes"]["statusid"]["new"] + status_entity = statuses_by_id[status_id] + if status_entity["state"]["name"].lower() == "done": + filtered_entities.append(entity_info) - return None + return filtered_entities + + def get_parents_by_id(self, session, entities_info): + parent_ids = [ + "\"{}\"".format(entity_info["parentId"]) + for entity_info in entities_info + ] + parent_entities = session.query( + "TypedContext where id in ({})".format(", ".join(parent_ids)) + ).all() + + return { + entity["id"]: entity + for entity in parent_entities + } + + def get_tasks_by_id(self, session, parent_ids): + joined_parent_ids = ",".join([ + "\"{}\"".format(parent_id) + for parent_id in parent_ids + ]) + task_entities = session.query( + "Task where parent_id in ({})".format(joined_parent_ids) + ).all() + + return { + entity["id"]: entity + for entity in task_entities + } + + def get_statuses_by_id(self, session, task_entities=None, status_ids=None): + if task_entities is None and status_ids is None: + return {} + + if status_ids is None: + status_ids = [] + for task_entity in task_entities: + status_ids.append(task_entity["status_id"]) + + if not status_ids: + return {} + + status_entities = session.query( + "Status where id in ({})".format(", ".join(status_ids)) + ).all() + + return { + entity["id"]: entity + for entity in status_entities + } + + def get_sorted_task_types(self, session): + data = { + _type: _type.get("sort") + for _type in session.query("Type").all() + if _type.get("sort") is not None + } + + return [ + item[0] + for item in sorted(data.items(), key=operator.itemgetter(1)) + ] def launch(self, session, event): '''Propagates status from version to task when changed''' - # self.log.info(event) - # start of event procedure ---------------------------------- + entities_info = self.filter_entities_info(session, event) + if not entities_info: + return - for entity in event['data'].get('entities', []): - changes = entity.get('changes', None) - if changes is None: - continue - statusid_changes = changes.get('statusid', {}) - if ( - entity['entityType'] != 'task' or - 'statusid' not in (entity.get('keys') or []) or - statusid_changes.get('new', None) is None or - statusid_changes.get('old', None) is None - ): + parents_by_id = self.get_parents_by_id(session, entities_info) + tasks_by_id = self.get_tasks_by_id( + session, tuple(parents_by_id.keys()) + ) + + tasks_to_parent_id = collections.defaultdict(list) + for task_entity in tasks_by_id.values(): + tasks_to_parent_id[task_entity["parent_id"]].append(task_entity) + + statuses_by_id = self.get_statuses_by_id(session, tasks_by_id.values()) + + next_status_name = "Ready" + next_status = session.query( + "Status where name is \"{}\"".format(next_status_name) + ).first() + if not next_status: + self.log.warning("Couldn't find status with name \"{}\"".format( + next_status_name + )) + return + + for entity_info in entities_info: + parent_id = entity_info["parentId"] + task_id = entity_info["entityId"] + task_entity = tasks_by_id[task_id] + + all_same_type_taks_done = True + for parents_task in tasks_to_parent_id[parent_id]: + if ( + parents_task["id"] == task_id + or parents_task["type_id"] != task_entity["type_id"] + ): + continue + + parents_task_status = statuses_by_id[parents_task["status_id"]] + low_status_name = parents_task_status["name"].lower() + # Skip if task's status name "Omitted" + if low_status_name == "omitted": + continue + + low_state_name = parents_task_status["state"]["name"].lower() + if low_state_name != "done": + all_same_type_taks_done = False + break + + if not all_same_type_taks_done: continue - task = session.get('Task', entity['entityId']) + # Prepare all task types + sorted_task_types = self.get_sorted_task_types(session) + sorted_task_types_len = len(sorted_task_types) - status = session.get('Status', - entity['changes']['statusid']['new']) - state = status['state']['name'] + from_idx = None + for idx, task_type in enumerate(sorted_task_types): + if task_type["id"] == task_entity["type_id"]: + from_idx = idx + 1 + break - next_task = self.get_next_task(task, session) + # Current task type is last in order + if from_idx is None or from_idx >= sorted_task_types_len: + continue - # Setting next task to Ready, if on NOT READY - if next_task and state == 'Done': - if next_task['status']['name'].lower() == 'not ready': + next_task_type_id = None + next_task_type_tasks = [] + for idx in range(from_idx, sorted_task_types_len): + next_task_type = sorted_task_types[idx] + for parents_task in tasks_to_parent_id[parent_id]: + if next_task_type_id is None: + if parents_task["type_id"] != next_task_type["id"]: + continue + next_task_type_id = next_task_type["id"] - # Get path to task - path = task['name'] - for p in task['ancestors']: - path = p['name'] + '/' + path + if parents_task["type_id"] == next_task_type_id: + next_task_type_tasks.append(parents_task) - # Setting next task status - try: - query = 'Status where name is "{}"'.format('Ready') - status_to_set = session.query(query).one() - next_task['status'] = status_to_set - session.commit() - self.log.info(( - '>>> [ {} ] updated to [ Ready ]' - ).format(path)) - except Exception as e: - session.rollback() - self.log.warning(( - '!!! [ {} ] status couldnt be set: [ {} ]' - ).format(path, str(e)), exc_info=True) + if next_task_type_id is not None: + break + + for next_task_entity in next_task_type_tasks: + if next_task_entity["status"]["name"].lower() != "not ready": + continue + + ent_path = "/".join( + [ent["name"] for ent in next_task_entity["link"]] + ) + try: + next_task_entity["status"] = next_status + session.commit() + self.log.info( + "\"{}\" updated status to \"{}\"".format( + ent_path, next_status_name + ) + ) + except Exception: + session.rollback() + self.log.warning( + "\"{}\" status couldnt be set to \"{}\"".format( + ent_path, next_status_name + ), + exc_info=True + ) def register(session, plugins_presets): - '''Register plugin. Called when used as an plugin.''' - NextTaskUpdate(session, plugins_presets).register() diff --git a/pype/modules/ftrack/events/event_push_frame_values_to_task.py b/pype/modules/ftrack/events/event_push_frame_values_to_task.py new file mode 100644 index 0000000000..32993ef938 --- /dev/null +++ b/pype/modules/ftrack/events/event_push_frame_values_to_task.py @@ -0,0 +1,230 @@ +import collections +import ftrack_api +from pype.modules.ftrack import BaseEvent + + +class PushFrameValuesToTaskEvent(BaseEvent): + # Ignore event handler by default + ignore_me = True + + cust_attrs_query = ( + "select id, key, object_type_id, is_hierarchical, default" + " from CustomAttributeConfiguration" + " where key in ({}) and object_type_id in ({})" + ) + + interest_entity_types = {"Shot"} + interest_attributes = {"frameStart", "frameEnd"} + interest_attr_mapping = { + "frameStart": "fstart", + "frameEnd": "fend" + } + _cached_task_object_id = None + _cached_interest_object_ids = None + + @staticmethod + def join_keys(keys): + return ",".join(["\"{}\"".format(key) for key in keys]) + + @classmethod + def task_object_id(cls, session): + if cls._cached_task_object_id is None: + task_object_type = session.query( + "ObjectType where name is \"Task\"" + ).one() + cls._cached_task_object_id = task_object_type["id"] + return cls._cached_task_object_id + + @classmethod + def interest_object_ids(cls, session): + if cls._cached_interest_object_ids is None: + object_types = session.query( + "ObjectType where name in ({})".format( + cls.join_keys(cls.interest_entity_types) + ) + ).all() + cls._cached_interest_object_ids = tuple( + object_type["id"] + for object_type in object_types + ) + return cls._cached_interest_object_ids + + def launch(self, session, event): + interesting_data = self.extract_interesting_data(session, event) + if not interesting_data: + return + + entities = self.get_entities(session, interesting_data) + if not entities: + return + + entities_by_id = { + entity["id"]: entity + for entity in entities + } + for entity_id in tuple(interesting_data.keys()): + if entity_id not in entities_by_id: + interesting_data.pop(entity_id) + + task_entities = self.get_task_entities(session, interesting_data) + + attrs_by_obj_id = self.attrs_configurations(session) + if not attrs_by_obj_id: + self.log.warning(( + "There is not created Custom Attributes {}" + " for \"Task\" entity type." + ).format(self.join_keys(self.interest_attributes))) + return + + task_entities_by_parent_id = collections.defaultdict(list) + for task_entity in task_entities: + task_entities_by_parent_id[task_entity["parent_id"]].append( + task_entity + ) + + missing_keys_by_object_name = collections.defaultdict(set) + for parent_id, values in interesting_data.items(): + entities = task_entities_by_parent_id.get(parent_id) or [] + entities.append(entities_by_id[parent_id]) + + for hier_key, value in values.items(): + changed_ids = [] + for entity in entities: + key = self.interest_attr_mapping[hier_key] + entity_attrs_mapping = ( + attrs_by_obj_id.get(entity["object_type_id"]) + ) + if not entity_attrs_mapping: + missing_keys_by_object_name[entity.entity_type].add( + key + ) + continue + + configuration_id = entity_attrs_mapping.get(key) + if not configuration_id: + missing_keys_by_object_name[entity.entity_type].add( + key + ) + continue + + changed_ids.append(entity["id"]) + entity_key = collections.OrderedDict({ + "configuration_id": configuration_id, + "entity_id": entity["id"] + }) + if value is None: + op = ftrack_api.operation.DeleteEntityOperation( + "CustomAttributeValue", + entity_key + ) + else: + op = ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + entity_key, + "value", + ftrack_api.symbol.NOT_SET, + value + ) + + session.recorded_operations.push(op) + self.log.info(( + "Changing Custom Attribute \"{}\" to value" + " \"{}\" on entities: {}" + ).format(key, value, self.join_keys(changed_ids))) + try: + session.commit() + except Exception: + session.rollback() + self.log.warning( + "Changing of values failed.", + exc_info=True + ) + if not missing_keys_by_object_name: + return + + msg_items = [] + for object_name, missing_keys in missing_keys_by_object_name.items(): + msg_items.append( + "{}: ({})".format(object_name, self.join_keys(missing_keys)) + ) + + self.log.warning(( + "Missing Custom Attribute configuration" + " per specific object types: {}" + ).format(", ".join(msg_items))) + + def extract_interesting_data(self, session, event): + # Filter if event contain relevant data + entities_info = event["data"].get("entities") + if not entities_info: + return + + interesting_data = {} + for entity_info in entities_info: + # Care only about tasks + if entity_info.get("entityType") != "task": + continue + + # Care only about changes of status + changes = entity_info.get("changes") or {} + if not changes: + continue + + # Care only about changes if specific keys + entity_changes = {} + for key in self.interest_attributes: + if key in changes: + entity_changes[key] = changes[key]["new"] + + if not entity_changes: + continue + + # Do not care about "Task" entity_type + task_object_id = self.task_object_id(session) + if entity_info.get("objectTypeId") == task_object_id: + continue + + interesting_data[entity_info["entityId"]] = entity_changes + return interesting_data + + def get_entities(self, session, interesting_data): + entities = session.query( + "TypedContext where id in ({})".format( + self.join_keys(interesting_data.keys()) + ) + ).all() + + output = [] + interest_object_ids = self.interest_object_ids(session) + for entity in entities: + if entity["object_type_id"] in interest_object_ids: + output.append(entity) + return output + + def get_task_entities(self, session, interesting_data): + return session.query( + "Task where parent_id in ({})".format( + self.join_keys(interesting_data.keys()) + ) + ).all() + + def attrs_configurations(self, session): + object_ids = list(self.interest_object_ids(session)) + object_ids.append(self.task_object_id(session)) + + attrs = session.query(self.cust_attrs_query.format( + self.join_keys(self.interest_attr_mapping.values()), + self.join_keys(object_ids) + )).all() + + output = {} + for attr in attrs: + obj_id = attr["object_type_id"] + if obj_id not in output: + output[obj_id] = {} + output[obj_id][attr["key"]] = attr["id"] + return output + + +def register(session, plugins_presets): + PushFrameValuesToTaskEvent(session, plugins_presets).register() diff --git a/pype/modules/ftrack/events/event_sync_to_avalon.py b/pype/modules/ftrack/events/event_sync_to_avalon.py index 739ec69522..314871f5b3 100644 --- a/pype/modules/ftrack/events/event_sync_to_avalon.py +++ b/pype/modules/ftrack/events/event_sync_to_avalon.py @@ -14,17 +14,17 @@ from avalon import schema from pype.modules.ftrack.lib import avalon_sync from pype.modules.ftrack.lib.avalon_sync import ( - CustAttrIdKey, CustAttrAutoSync, EntitySchemas + CUST_ATTR_ID_KEY, CUST_ATTR_AUTO_SYNC, EntitySchemas ) import ftrack_api from pype.modules.ftrack import BaseEvent -from pype.modules.ftrack.lib.io_nonsingleton import DbConnector +from avalon.api import AvalonMongoDB class SyncToAvalonEvent(BaseEvent): - dbcon = DbConnector() + dbcon = AvalonMongoDB() interest_entTypes = ["show", "task"] ignore_ent_types = ["Milestone"] @@ -103,7 +103,7 @@ class SyncToAvalonEvent(BaseEvent): @property def avalon_cust_attrs(self): if self._avalon_cust_attrs is None: - self._avalon_cust_attrs = avalon_sync.get_avalon_attr( + self._avalon_cust_attrs = avalon_sync.get_pype_attr( self.process_session ) return self._avalon_cust_attrs @@ -220,7 +220,7 @@ class SyncToAvalonEvent(BaseEvent): def avalon_custom_attributes(self): """Return info about changeability of entity and it's parents.""" if self._avalon_custom_attributes is None: - self._avalon_custom_attributes = avalon_sync.get_avalon_attr( + self._avalon_custom_attributes = avalon_sync.get_pype_attr( self.process_session ) return self._avalon_custom_attributes @@ -557,10 +557,10 @@ class SyncToAvalonEvent(BaseEvent): continue changes = ent_info["changes"] - if CustAttrAutoSync not in changes: + if CUST_ATTR_AUTO_SYNC not in changes: continue - auto_sync = changes[CustAttrAutoSync]["new"] + auto_sync = changes[CUST_ATTR_AUTO_SYNC]["new"] if auto_sync == "1": # Trigger sync to avalon action if auto sync was turned on ft_project = self.cur_project @@ -593,16 +593,16 @@ class SyncToAvalonEvent(BaseEvent): ft_project = self.cur_project # Check if auto-sync custom attribute exists - if CustAttrAutoSync not in ft_project["custom_attributes"]: + if CUST_ATTR_AUTO_SYNC not in ft_project["custom_attributes"]: # TODO should we sent message to someone? self.log.error(( "Custom attribute \"{}\" is not created or user \"{}\" used" " for Event server don't have permissions to access it!" - ).format(CustAttrAutoSync, self.session.api_user)) + ).format(CUST_ATTR_AUTO_SYNC, self.session.api_user)) return True # Skip if auto-sync is not set - auto_sync = ft_project["custom_attributes"][CustAttrAutoSync] + auto_sync = ft_project["custom_attributes"][CUST_ATTR_AUTO_SYNC] if auto_sync is not True: return True @@ -844,7 +844,7 @@ class SyncToAvalonEvent(BaseEvent): new_entity["custom_attributes"][key] = val - new_entity["custom_attributes"][CustAttrIdKey] = ( + new_entity["custom_attributes"][CUST_ATTR_ID_KEY] = ( str(avalon_entity["_id"]) ) ent_path = self.get_ent_path(new_entity_id) @@ -1097,7 +1097,7 @@ class SyncToAvalonEvent(BaseEvent): continue final_entity["data"][key] = val - _mongo_id_str = cust_attrs.get(CustAttrIdKey) + _mongo_id_str = cust_attrs.get(CUST_ATTR_ID_KEY) if _mongo_id_str: try: _mongo_id = ObjectId(_mongo_id_str) @@ -1158,15 +1158,17 @@ class SyncToAvalonEvent(BaseEvent): self.log.debug("Entity was synchronized <{}>".format(ent_path)) mongo_id_str = str(mongo_id) - if mongo_id_str != ftrack_ent["custom_attributes"][CustAttrIdKey]: - ftrack_ent["custom_attributes"][CustAttrIdKey] = mongo_id_str + if mongo_id_str != ftrack_ent["custom_attributes"][CUST_ATTR_ID_KEY]: + ftrack_ent["custom_attributes"][CUST_ATTR_ID_KEY] = mongo_id_str try: self.process_session.commit() except Exception: self.process_session.rolback() # TODO logging # TODO report - error_msg = "Failed to store MongoID to entity's custom attribute" + error_msg = ( + "Failed to store MongoID to entity's custom attribute" + ) report_msg = ( "{}||SyncToAvalon action may solve this issue" ).format(error_msg) @@ -1245,7 +1247,7 @@ class SyncToAvalonEvent(BaseEvent): self.process_session, entity, hier_keys, defaults ) for key, val in hier_values.items(): - if key == CustAttrIdKey: + if key == CUST_ATTR_ID_KEY: continue output[key] = val @@ -1687,7 +1689,7 @@ class SyncToAvalonEvent(BaseEvent): if "_hierarchical" not in temp_dict: hier_mongo_id_configuration_id = None for attr in hier_attrs: - if attr["key"] == CustAttrIdKey: + if attr["key"] == CUST_ATTR_ID_KEY: hier_mongo_id_configuration_id = attr["id"] break temp_dict["_hierarchical"] = hier_mongo_id_configuration_id @@ -1704,7 +1706,7 @@ class SyncToAvalonEvent(BaseEvent): for attr in cust_attrs: key = attr["key"] - if key != CustAttrIdKey: + if key != CUST_ATTR_ID_KEY: continue if attr["entity_type"] != ent_info["entityType"]: diff --git a/pype/modules/ftrack/events/event_task_to_version_status.py b/pype/modules/ftrack/events/event_task_to_version_status.py new file mode 100644 index 0000000000..e07be67b18 --- /dev/null +++ b/pype/modules/ftrack/events/event_task_to_version_status.py @@ -0,0 +1,222 @@ +import collections +from pype.modules.ftrack import BaseEvent + + +class TaskToVersionStatus(BaseEvent): + """Changes status of task's latest AssetVersions on its status change.""" + + # Attribute for caching session user id + _cached_user_id = None + + # Presets usage + asset_types_of_focus = [] + + def register(self, *args, **kwargs): + # Skip registration if attribute `asset_types_of_focus` is not set + modified_asset_types_of_focus = list() + if self.asset_types_of_focus: + if isinstance(self.asset_types_of_focus, str): + self.asset_types_of_focus = [self.asset_types_of_focus] + + for asset_type_name in self.asset_types_of_focus: + modified_asset_types_of_focus.append( + asset_type_name.lower() + ) + + if not modified_asset_types_of_focus: + raise Exception(( + "Event handler \"{}\" does not" + " have set presets for attribute \"{}\"" + ).format(self.__class__.__name__, "asset_types_of_focus")) + + self.asset_types_of_focus = modified_asset_types_of_focus + return super(TaskToVersionStatus, self).register(*args, **kwargs) + + def is_event_invalid(self, session, event): + # Cache user id of currently running session + if self._cached_user_id is None: + session_user_entity = session.query( + "User where username is \"{}\"".format(session.api_user) + ).first() + if not session_user_entity: + self.log.warning( + "Couldn't query Ftrack user with username \"{}\"".format( + session.api_user + ) + ) + return False + self._cached_user_id = session_user_entity["id"] + + # Skip processing if current session user was the user who created + # the event + user_info = event["source"].get("user") or {} + user_id = user_info.get("id") + + # Mark as invalid if user is unknown + if user_id is None: + return True + return user_id == self._cached_user_id + + def filter_event_entities(self, event): + # Filter if event contain relevant data + entities_info = event["data"].get("entities") + if not entities_info: + return + + filtered_entities = [] + for entity_info in entities_info: + # Care only about tasks + if entity_info.get("entityType") != "task": + continue + + # Care only about changes of status + changes = entity_info.get("changes") or {} + statusid_changes = changes.get("statusid") or {} + if ( + statusid_changes.get("new") is None + or statusid_changes.get("old") is None + ): + continue + + filtered_entities.append(entity_info) + + return filtered_entities + + def _get_ent_path(self, entity): + return "/".join( + [ent["name"] for ent in entity["link"]] + ) + + def launch(self, session, event): + '''Propagates status from version to task when changed''' + if self.is_event_invalid(session, event): + return + + filtered_entity_infos = self.filter_event_entities(event) + if not filtered_entity_infos: + return + + task_ids = [ + entity_info["entityId"] + for entity_info in filtered_entity_infos + ] + joined_ids = ",".join( + ["\"{}\"".format(entity_id) for entity_id in task_ids] + ) + + # Query tasks' AssetVersions + asset_versions = session.query(( + "AssetVersion where task_id in ({}) order by version descending" + ).format(joined_ids)).all() + + last_asset_version_by_task_id = ( + self.last_asset_version_by_task_id(asset_versions, task_ids) + ) + if not last_asset_version_by_task_id: + return + + # Query Task entities for last asset versions + joined_filtered_ids = ",".join([ + "\"{}\"".format(entity_id) + for entity_id in last_asset_version_by_task_id.keys() + ]) + task_entities = session.query( + "Task where id in ({})".format(joined_filtered_ids) + ).all() + if not task_entities: + return + + # Final process of changing statuses + av_statuses_by_low_name = self.asset_version_statuses(task_entities[0]) + for task_entity in task_entities: + task_id = task_entity["id"] + task_path = self._get_ent_path(task_entity) + task_status_name = task_entity["status"]["name"] + task_status_name_low = task_status_name.lower() + + last_asset_versions = last_asset_version_by_task_id[task_id] + for last_asset_version in last_asset_versions: + self.log.debug(( + "Trying to change status of last AssetVersion {}" + " for task \"{}\"" + ).format(last_asset_version["version"], task_path)) + + new_asset_version_status = av_statuses_by_low_name.get( + task_status_name_low + ) + # Skip if tasks status is not available to AssetVersion + if not new_asset_version_status: + self.log.debug(( + "AssetVersion does not have matching status to \"{}\"" + ).format(task_status_name)) + continue + + av_ent_path = task_path + " Asset {} AssetVersion {}".format( + last_asset_version["asset"]["name"], + last_asset_version["version"] + ) + + # Skip if current AssetVersion's status is same + current_status_name = last_asset_version["status"]["name"] + if current_status_name.lower() == task_status_name_low: + self.log.debug(( + "AssetVersion already has set status \"{}\". \"{}\"" + ).format(current_status_name, av_ent_path)) + continue + + # Change the status + try: + last_asset_version["status"] = new_asset_version_status + session.commit() + self.log.info("[ {} ] Status updated to [ {} ]".format( + av_ent_path, new_asset_version_status["name"] + )) + except Exception: + session.rollback() + self.log.warning( + "[ {} ]Status couldn't be set to \"{}\"".format( + av_ent_path, new_asset_version_status["name"] + ), + exc_info=True + ) + + def asset_version_statuses(self, entity): + project_entity = self.get_project_from_entity(entity) + project_schema = project_entity["project_schema"] + # Get all available statuses for Task + statuses = project_schema.get_statuses("AssetVersion") + # map lowered status name with it's object + av_statuses_by_low_name = { + status["name"].lower(): status for status in statuses + } + return av_statuses_by_low_name + + def last_asset_version_by_task_id(self, asset_versions, task_ids): + last_asset_version_by_task_id = collections.defaultdict(list) + last_version_by_task_id = {} + poping_entity_ids = set(task_ids) + for asset_version in asset_versions: + asset_type_name_low = ( + asset_version["asset"]["type"]["name"].lower() + ) + if asset_type_name_low not in self.asset_types_of_focus: + continue + + task_id = asset_version["task_id"] + last_version = last_version_by_task_id.get(task_id) + if last_version is None: + last_version_by_task_id[task_id] = asset_version["version"] + + elif last_version != asset_version["version"]: + poping_entity_ids.remove(task_id) + + if not poping_entity_ids: + break + + if task_id in poping_entity_ids: + last_asset_version_by_task_id[task_id].append(asset_version) + return last_asset_version_by_task_id + + +def register(session, plugins_presets): + TaskToVersionStatus(session, plugins_presets).register() diff --git a/pype/modules/ftrack/events/event_user_assigment.py b/pype/modules/ftrack/events/event_user_assigment.py index e198ced618..19a67b745f 100644 --- a/pype/modules/ftrack/events/event_user_assigment.py +++ b/pype/modules/ftrack/events/event_user_assigment.py @@ -3,8 +3,8 @@ import re import subprocess from pype.modules.ftrack import BaseEvent -from pype.modules.ftrack.lib.avalon_sync import CustAttrIdKey -from pype.modules.ftrack.lib.io_nonsingleton import DbConnector +from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY +from avalon.api import AvalonMongoDB from bson.objectid import ObjectId @@ -37,7 +37,7 @@ class UserAssigmentEvent(BaseEvent): 3) path to publish files of task user was (de)assigned to """ - db_con = DbConnector() + db_con = AvalonMongoDB() def error(self, *err): for e in err: @@ -106,7 +106,7 @@ class UserAssigmentEvent(BaseEvent): self.db_con.Session['AVALON_PROJECT'] = task['project']['full_name'] avalon_entity = None - parent_id = parent['custom_attributes'].get(CustAttrIdKey) + parent_id = parent['custom_attributes'].get(CUST_ATTR_ID_KEY) if parent_id: parent_id = ObjectId(parent_id) avalon_entity = self.db_con.find_one({ diff --git a/pype/modules/ftrack/events/event_version_to_task_statuses.py b/pype/modules/ftrack/events/event_version_to_task_statuses.py index 3ff986f9c6..fdb48cbc37 100644 --- a/pype/modules/ftrack/events/event_version_to_task_statuses.py +++ b/pype/modules/ftrack/events/event_version_to_task_statuses.py @@ -84,6 +84,9 @@ class VersionToTaskStatus(BaseEvent): if not task: continue + if version["asset"]["type"]["short"].lower() == "scene": + continue + project_schema = task["project"]["project_schema"] # Get all available statuses for Task statuses = project_schema.get_statuses("Task", task["type_id"]) diff --git a/pype/modules/ftrack/ftrack_server/event_server_cli.py b/pype/modules/ftrack/ftrack_server/event_server_cli.py index 73c7abfc5d..bf51c37290 100644 --- a/pype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/pype/modules/ftrack/ftrack_server/event_server_cli.py @@ -522,6 +522,21 @@ def main(argv): help="Load creadentials from apps dir", action="store_true" ) + parser.add_argument( + "-clockifyapikey", type=str, + help=( + "Enter API key for Clockify actions." + " (default from environment: $CLOCKIFY_API_KEY)" + ) + ) + parser.add_argument( + "-clockifyworkspace", type=str, + help=( + "Enter workspace for Clockify." + " (default from module presets or " + "environment: $CLOCKIFY_WORKSPACE)" + ) + ) ftrack_url = os.environ.get('FTRACK_SERVER') username = os.environ.get('FTRACK_API_USER') api_key = os.environ.get('FTRACK_API_KEY') @@ -546,6 +561,12 @@ def main(argv): if kwargs.ftrackapikey: api_key = kwargs.ftrackapikey + if kwargs.clockifyworkspace: + os.environ["CLOCKIFY_WORKSPACE"] = kwargs.clockifyworkspace + + if kwargs.clockifyapikey: + os.environ["CLOCKIFY_API_KEY"] = kwargs.clockifyapikey + legacy = kwargs.legacy # Check url regex and accessibility ftrack_url = check_ftrack_url(ftrack_url) diff --git a/pype/modules/ftrack/ftrack_server/socket_thread.py b/pype/modules/ftrack/ftrack_server/socket_thread.py index dda4c7db35..e66e8bc775 100644 --- a/pype/modules/ftrack/ftrack_server/socket_thread.py +++ b/pype/modules/ftrack/ftrack_server/socket_thread.py @@ -11,7 +11,7 @@ from pype.api import Logger class SocketThread(threading.Thread): """Thread that checks suprocess of storer of processor of events""" - MAX_TIMEOUT = 35 + MAX_TIMEOUT = int(os.environ.get("PYPE_FTRACK_SOCKET_TIMEOUT", 45)) def __init__(self, name, port, filepath, additional_args=[]): super(SocketThread, self).__init__() diff --git a/pype/modules/ftrack/ftrack_server/sub_event_processor.py b/pype/modules/ftrack/ftrack_server/sub_event_processor.py index d7bb7a53b3..4a3241dd4f 100644 --- a/pype/modules/ftrack/ftrack_server/sub_event_processor.py +++ b/pype/modules/ftrack/ftrack_server/sub_event_processor.py @@ -9,7 +9,7 @@ from pype.modules.ftrack.ftrack_server.lib import ( SocketSession, ProcessEventHub, TOPIC_STATUS_SERVER ) import ftrack_api -from pype.api import Logger +from pype.api import Logger, config log = Logger().get_logger("Event processor") @@ -55,6 +55,42 @@ def register(session): ) +def clockify_module_registration(): + module_name = "Clockify" + + menu_items = config.get_presets()["tray"]["menu_items"] + if not menu_items["item_usage"][module_name]: + return + + api_key = os.environ.get("CLOCKIFY_API_KEY") + if not api_key: + log.warning("Clockify API key is not set.") + return + + workspace_name = os.environ.get("CLOCKIFY_WORKSPACE") + if not workspace_name: + workspace_name = ( + menu_items + .get("attributes", {}) + .get(module_name, {}) + .get("workspace_name", {}) + ) + + if not workspace_name: + log.warning("Clockify Workspace is not set.") + return + + os.environ["CLOCKIFY_WORKSPACE"] = workspace_name + + from pype.modules.clockify.constants import CLOCKIFY_FTRACK_SERVER_PATH + + current = os.environ.get("FTRACK_EVENTS_PATH") or "" + if current: + current += os.pathsep + os.environ["FTRACK_EVENTS_PATH"] = current + CLOCKIFY_FTRACK_SERVER_PATH + return True + + def main(args): port = int(args[-1]) # Create a TCP/IP socket @@ -66,6 +102,11 @@ def main(args): sock.connect(server_address) sock.sendall(b"CreatedProcess") + try: + clockify_module_registration() + except Exception: + log.info("Clockify registration failed.", exc_info=True) + try: session = SocketSession( auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub diff --git a/pype/modules/ftrack/lib/__init__.py b/pype/modules/ftrack/lib/__init__.py index df546ab725..d8e9c7a11c 100644 --- a/pype/modules/ftrack/lib/__init__.py +++ b/pype/modules/ftrack/lib/__init__.py @@ -5,7 +5,7 @@ from .ftrack_event_handler import BaseEvent from .ftrack_action_handler import BaseAction, statics_icon from .ftrack_app_handler import AppAction -__all__ = [ +__all__ = ( "avalon_sync", "credentials", "BaseHandler", @@ -13,4 +13,4 @@ __all__ = [ "BaseAction", "statics_icon", "AppAction" -] +) diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index c5c9eb9054..65a59452da 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -1,10 +1,11 @@ import os import re import queue +import json import collections import copy -from pype.modules.ftrack.lib.io_nonsingleton import DbConnector +from avalon.api import AvalonMongoDB import avalon import avalon.api @@ -27,9 +28,21 @@ EntitySchemas = { "config": "avalon-core:config-1.0" } +# Group name of custom attributes +CUST_ATTR_GROUP = "pype" + # name of Custom attribute that stores mongo_id from avalon db -CustAttrIdKey = "avalon_mongo_id" -CustAttrAutoSync = "avalon_auto_sync" +CUST_ATTR_ID_KEY = "avalon_mongo_id" +CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" + + +def default_custom_attributes_definition(): + json_file_path = os.path.join( + os.path.dirname(__file__), "custom_attributes.json" + ) + with open(json_file_path, "r") as json_stream: + data = json.load(json_stream) + return data def check_regex(name, entity_type, in_schema=None, schema_patterns=None): @@ -51,10 +64,11 @@ def check_regex(name, entity_type, in_schema=None, schema_patterns=None): if not schema_obj: name_pattern = default_pattern else: - name_pattern = schema_obj.get( - "properties", {}).get( - "name", {}).get( - "pattern", default_pattern + name_pattern = ( + schema_obj + .get("properties", {}) + .get("name", {}) + .get("pattern", default_pattern) ) if schema_patterns is not None: schema_patterns[schema_name] = name_pattern @@ -64,13 +78,14 @@ def check_regex(name, entity_type, in_schema=None, schema_patterns=None): return False -def get_avalon_attr(session, split_hierarchical=True): +def get_pype_attr(session, split_hierarchical=True): custom_attributes = [] hier_custom_attributes = [] + # TODO remove deprecated "avalon" group from query cust_attrs_query = ( "select id, entity_type, object_type_id, is_hierarchical, default" " from CustomAttributeConfiguration" - " where group.name = \"avalon\"" + " where group.name in (\"avalon\", \"pype\")" ) all_avalon_attr = session.query(cust_attrs_query).all() for cust_attr in all_avalon_attr: @@ -225,7 +240,7 @@ def get_hierarchical_attributes(session, entity, attr_names, attr_defaults={}): class SyncEntitiesFactory: - dbcon = DbConnector() + dbcon = AvalonMongoDB() project_query = ( "select full_name, name, custom_attributes" @@ -322,12 +337,12 @@ class SyncEntitiesFactory: "*** Synchronization initialization started <{}>." ).format(project_full_name)) # Check if `avalon_mongo_id` custom attribute exist or is accessible - if CustAttrIdKey not in ft_project["custom_attributes"]: + if CUST_ATTR_ID_KEY not in ft_project["custom_attributes"]: items = [] items.append({ "type": "label", "value": "# Can't access Custom attribute <{}>".format( - CustAttrIdKey + CUST_ATTR_ID_KEY ) }) items.append({ @@ -687,7 +702,7 @@ class SyncEntitiesFactory: def set_cutom_attributes(self): self.log.debug("* Preparing custom attributes") # Get custom attributes and values - custom_attrs, hier_attrs = get_avalon_attr(self.session) + custom_attrs, hier_attrs = get_pype_attr(self.session) ent_types = self.session.query("select id, name from ObjectType").all() ent_types_by_name = { ent_type["name"]: ent_type["id"] for ent_type in ent_types @@ -904,7 +919,7 @@ class SyncEntitiesFactory: project_values[key] = value for key in avalon_hier: - if key == CustAttrIdKey: + if key == CUST_ATTR_ID_KEY: continue value = self.entities_dict[top_id]["avalon_attrs"][key] if value is not None: @@ -1058,7 +1073,7 @@ class SyncEntitiesFactory: same_mongo_id = [] all_mongo_ids = {} for ftrack_id, entity_dict in self.entities_dict.items(): - mongo_id = entity_dict["avalon_attrs"].get(CustAttrIdKey) + mongo_id = entity_dict["avalon_attrs"].get(CUST_ATTR_ID_KEY) if not mongo_id: continue if mongo_id in all_mongo_ids: @@ -1089,7 +1104,7 @@ class SyncEntitiesFactory: entity_dict = self.entities_dict[ftrack_id] ent_path = self.get_ent_path(ftrack_id) - mongo_id = entity_dict["avalon_attrs"].get(CustAttrIdKey) + mongo_id = entity_dict["avalon_attrs"].get(CUST_ATTR_ID_KEY) av_ent_by_mongo_id = self.avalon_ents_by_id.get(mongo_id) if av_ent_by_mongo_id: av_ent_ftrack_id = av_ent_by_mongo_id.get("data", {}).get( @@ -1110,7 +1125,9 @@ class SyncEntitiesFactory: continue _entity_dict = self.entities_dict[_ftrack_id] - _mongo_id = _entity_dict["avalon_attrs"][CustAttrIdKey] + _mongo_id = ( + _entity_dict["avalon_attrs"][CUST_ATTR_ID_KEY] + ) _av_ent_by_mongo_id = self.avalon_ents_by_id.get( _mongo_id ) @@ -1503,11 +1520,11 @@ class SyncEntitiesFactory: avalon_attrs = self.entities_dict[ftrack_id]["avalon_attrs"] if ( - CustAttrIdKey not in avalon_attrs or - avalon_attrs[CustAttrIdKey] != avalon_id + CUST_ATTR_ID_KEY not in avalon_attrs or + avalon_attrs[CUST_ATTR_ID_KEY] != avalon_id ): configuration_id = self.entities_dict[ftrack_id][ - "avalon_attrs_id"][CustAttrIdKey] + "avalon_attrs_id"][CUST_ATTR_ID_KEY] _entity_key = collections.OrderedDict({ "configuration_id": configuration_id, @@ -1587,7 +1604,7 @@ class SyncEntitiesFactory: # avalon_archived_by_id avalon_archived_by_name current_id = ( - entity_dict["avalon_attrs"].get(CustAttrIdKey) or "" + entity_dict["avalon_attrs"].get(CUST_ATTR_ID_KEY) or "" ).strip() mongo_id = current_id name = entity_dict["name"] @@ -1623,14 +1640,14 @@ class SyncEntitiesFactory: if current_id != new_id_str: # store mongo id to ftrack entity configuration_id = self.hier_cust_attr_ids_by_key.get( - CustAttrIdKey + CUST_ATTR_ID_KEY ) if not configuration_id: - # NOTE this is for cases when CustAttrIdKey key is not + # NOTE this is for cases when CUST_ATTR_ID_KEY key is not # hierarchical custom attribute but per entity type configuration_id = self.entities_dict[ftrack_id][ "avalon_attrs_id" - ][CustAttrIdKey] + ][CUST_ATTR_ID_KEY] _entity_key = collections.OrderedDict({ "configuration_id": configuration_id, @@ -1739,7 +1756,7 @@ class SyncEntitiesFactory: project_item = self.entities_dict[self.ft_project_id]["final_entity"] mongo_id = ( self.entities_dict[self.ft_project_id]["avalon_attrs"].get( - CustAttrIdKey + CUST_ATTR_ID_KEY ) or "" ).strip() @@ -1770,7 +1787,7 @@ class SyncEntitiesFactory: # store mongo id to ftrack entity entity = self.entities_dict[self.ft_project_id]["entity"] - entity["custom_attributes"][CustAttrIdKey] = str(new_id) + entity["custom_attributes"][CUST_ATTR_ID_KEY] = str(new_id) def _bubble_changeability(self, unchangeable_ids): unchangeable_queue = queue.Queue() @@ -2151,7 +2168,7 @@ class SyncEntitiesFactory: if new_entity_id not in p_chilren: self.entities_dict[parent_id]["children"].append(new_entity_id) - cust_attr, hier_attrs = get_avalon_attr(self.session) + cust_attr, hier_attrs = get_pype_attr(self.session) for _attr in cust_attr: key = _attr["key"] if key not in av_entity["data"]: @@ -2167,7 +2184,7 @@ class SyncEntitiesFactory: new_entity["custom_attributes"][key] = value av_entity_id = str(av_entity["_id"]) - new_entity["custom_attributes"][CustAttrIdKey] = av_entity_id + new_entity["custom_attributes"][CUST_ATTR_ID_KEY] = av_entity_id self.ftrack_avalon_mapper[new_entity_id] = av_entity_id self.avalon_ftrack_mapper[av_entity_id] = new_entity_id diff --git a/pype/modules/ftrack/lib/custom_attributes.json b/pype/modules/ftrack/lib/custom_attributes.json new file mode 100644 index 0000000000..17ff6691d3 --- /dev/null +++ b/pype/modules/ftrack/lib/custom_attributes.json @@ -0,0 +1,60 @@ +{ + "show": { + "avalon_auto_sync": { + "label": "Avalon auto-sync", + "type": "boolean", + "write_security_role": ["API", "Administrator"], + "read_security_role": ["API", "Administrator"] + }, + "library_project": { + "label": "Library Project", + "type": "boolean", + "write_security_role": ["API", "Administrator"], + "read_security_role": ["API", "Administrator"] + } + }, + "is_hierarchical": { + "fps": { + "label": "FPS", + "type": "number", + "config": {"isdecimal": true} + }, + "clipIn": { + "label": "Clip in", + "type": "number" + }, + "clipOut": { + "label": "Clip out", + "type": "number" + }, + "frameStart": { + "label": "Frame start", + "type": "number" + }, + "frameEnd": { + "label": "Frame end", + "type": "number" + }, + "resolutionWidth": { + "label": "Resolution Width", + "type": "number" + }, + "resolutionHeight": { + "label": "Resolution Height", + "type": "number" + }, + "pixelAspect": { + "label": "Pixel aspect", + "type": "number", + "config": {"isdecimal": true} + }, + "handleStart": { + "label": "Frame handles start", + "type": "number" + }, + "handleEnd": { + "label": "Frame handles end", + "type": "number" + } + } +} diff --git a/pype/modules/ftrack/lib/ftrack_app_handler.py b/pype/modules/ftrack/lib/ftrack_app_handler.py index 34ab8c5ee4..23776aced7 100644 --- a/pype/modules/ftrack/lib/ftrack_app_handler.py +++ b/pype/modules/ftrack/lib/ftrack_app_handler.py @@ -1,14 +1,6 @@ -import os -import sys -import copy -import platform -import avalon.lib -import acre -import getpass from pype import lib as pypelib -from pype.api import config, Anatomy +from pype.api import config from .ftrack_action_handler import BaseAction -from avalon.api import last_workfile, HOST_WORKFILE_EXTENSIONS class AppAction(BaseAction): @@ -84,7 +76,7 @@ class AppAction(BaseAction): if ( len(entities) != 1 - or entities[0].entity_type.lower() != 'task' + or entities[0].entity_type.lower() != "task" ): return False @@ -92,21 +84,31 @@ class AppAction(BaseAction): if entity["parent"].entity_type.lower() == "project": return False - ft_project = self.get_project_from_entity(entity) - database = pypelib.get_avalon_database() - project_name = ft_project["full_name"] - avalon_project = database[project_name].find_one({ - "type": "project" - }) + avalon_project_apps = event["data"].get("avalon_project_apps", None) + avalon_project_doc = event["data"].get("avalon_project_doc", None) + if avalon_project_apps is None: + if avalon_project_doc is None: + ft_project = self.get_project_from_entity(entity) + database = pypelib.get_avalon_database() + project_name = ft_project["full_name"] + avalon_project_doc = database[project_name].find_one({ + "type": "project" + }) or False + event["data"]["avalon_project_doc"] = avalon_project_doc - if not avalon_project: + if not avalon_project_doc: + return False + + project_apps_config = avalon_project_doc["config"].get("apps", []) + avalon_project_apps = [ + app["name"] for app in project_apps_config + ] or False + event["data"]["avalon_project_apps"] = avalon_project_apps + + if not avalon_project_apps: return False - project_apps = avalon_project["config"].get("apps", []) - apps = [app["name"] for app in project_apps] - if self.identifier in apps: - return True - return False + return self.identifier in avalon_project_apps def _launch(self, event): entities = self._translate_event(event) @@ -142,42 +144,25 @@ class AppAction(BaseAction): """ entity = entities[0] - project_name = entity["project"]["full_name"] - - database = pypelib.get_avalon_database() + task_name = entity["name"] asset_name = entity["parent"]["name"] - asset_document = database[project_name].find_one({ - "type": "asset", - "name": asset_name - }) - - hierarchy = "" - asset_doc_parents = asset_document["data"].get("parents") - if asset_doc_parents: - hierarchy = os.path.join(*asset_doc_parents) - - application = avalon.lib.get_application(self.identifier) - host_name = application["application_dir"] - data = { - "project": { - "name": entity["project"]["full_name"], - "code": entity["project"]["name"] - }, - "task": entity["name"], - "asset": asset_name, - "app": host_name, - "hierarchy": hierarchy - } - + project_name = entity["project"]["full_name"] try: - anatomy = Anatomy(project_name) - anatomy_filled = anatomy.format(data) - workdir = os.path.normpath(anatomy_filled["work"]["folder"]) + pypelib.launch_application( + project_name, asset_name, task_name, self.identifier + ) - except Exception as exc: - msg = "Error in anatomy.format: {}".format( - str(exc) + except pypelib.ApplicationLaunchFailed as exc: + self.log.error(str(exc)) + return { + "success": False, + "message": str(exc) + } + + except Exception: + msg = "Unexpected failure of application launch {}".format( + self.label ) self.log.error(msg, exc_info=True) return { @@ -185,146 +170,6 @@ class AppAction(BaseAction): "message": msg } - try: - os.makedirs(workdir) - except FileExistsError: - pass - - last_workfile_path = None - extensions = HOST_WORKFILE_EXTENSIONS.get(host_name) - if extensions: - # Find last workfile - file_template = anatomy.templates["work"]["file"] - data.update({ - "version": 1, - "user": getpass.getuser(), - "ext": extensions[0] - }) - - last_workfile_path = last_workfile( - workdir, file_template, data, extensions, True - ) - - # set environments for Avalon - prep_env = copy.deepcopy(os.environ) - prep_env.update({ - "AVALON_PROJECT": project_name, - "AVALON_ASSET": asset_name, - "AVALON_TASK": entity["name"], - "AVALON_APP": self.identifier.split("_")[0], - "AVALON_APP_NAME": self.identifier, - "AVALON_HIERARCHY": hierarchy, - "AVALON_WORKDIR": workdir - }) - if last_workfile_path: - prep_env["AVALON_LAST_WORKFILE"] = last_workfile_path - prep_env.update(anatomy.roots_obj.root_environments()) - - # collect all parents from the task - parents = [] - for item in entity['link']: - parents.append(session.get(item['type'], item['id'])) - - # collect all the 'environment' attributes from parents - tools_attr = [prep_env["AVALON_APP"], prep_env["AVALON_APP_NAME"]] - tools_env = asset_document["data"].get("tools_env") or [] - tools_attr.extend(tools_env) - - tools_env = acre.get_tools(tools_attr) - env = acre.compute(tools_env) - env = acre.merge(env, current_env=dict(prep_env)) - - # Get path to execute - st_temp_path = os.environ["PYPE_CONFIG"] - os_plat = platform.system().lower() - - # Path to folder with launchers - path = os.path.join(st_temp_path, "launchers", os_plat) - - # Full path to executable launcher - execfile = None - - if application.get("launch_hook"): - hook = application.get("launch_hook") - self.log.info("launching hook: {}".format(hook)) - ret_val = pypelib.execute_hook( - application.get("launch_hook"), env=env) - if not ret_val: - return { - 'success': False, - 'message': "Hook didn't finish successfully {0}" - .format(self.label) - } - - if sys.platform == "win32": - for ext in os.environ["PATHEXT"].split(os.pathsep): - fpath = os.path.join(path.strip('"'), self.executable + ext) - if os.path.isfile(fpath) and os.access(fpath, os.X_OK): - execfile = fpath - break - - # Run SW if was found executable - if execfile is None: - return { - "success": False, - "message": "We didn't find launcher for {0}".format( - self.label - ) - } - - popen = avalon.lib.launch( - executable=execfile, args=[], environment=env - ) - - elif (sys.platform.startswith("linux") - or sys.platform.startswith("darwin")): - execfile = os.path.join(path.strip('"'), self.executable) - if not os.path.isfile(execfile): - msg = "Launcher doesn't exist - {}".format(execfile) - - self.log.error(msg) - return { - "success": False, - "message": msg - } - - try: - fp = open(execfile) - except PermissionError as perm_exc: - msg = "Access denied on launcher {} - {}".format( - execfile, perm_exc - ) - - self.log.exception(msg, exc_info=True) - return { - "success": False, - "message": msg - } - - fp.close() - # check executable permission - if not os.access(execfile, os.X_OK): - msg = "No executable permission - {}".format(execfile) - - self.log.error(msg) - return { - "success": False, - "message": msg - } - - # Run SW if was found executable - if execfile is None: - return { - "success": False, - "message": "We didn't found launcher for {0}".format( - self.label - ) - } - - popen = avalon.lib.launch( # noqa: F841 - "/usr/bin/env", args=["bash", execfile], environment=env - ) - # Change status of task to In progress presets = config.get_presets()["ftrack"]["ftrack_config"] diff --git a/pype/modules/ftrack/lib/io_nonsingleton.py b/pype/modules/ftrack/lib/io_nonsingleton.py deleted file mode 100644 index da37c657c6..0000000000 --- a/pype/modules/ftrack/lib/io_nonsingleton.py +++ /dev/null @@ -1,460 +0,0 @@ -""" -Wrapper around interactions with the database - -Copy of io module in avalon-core. - - In this case not working as singleton with api.Session! -""" - -import os -import time -import errno -import shutil -import logging -import tempfile -import functools -import contextlib - -from avalon import schema -from avalon.vendor import requests -from avalon.io import extract_port_from_url - -# Third-party dependencies -import pymongo - - -def auto_reconnect(func): - """Handling auto reconnect in 3 retry times""" - @functools.wraps(func) - def decorated(*args, **kwargs): - object = args[0] - for retry in range(3): - try: - return func(*args, **kwargs) - except pymongo.errors.AutoReconnect: - object.log.error("Reconnecting..") - time.sleep(0.1) - else: - raise - - return decorated - - -class DbConnector(object): - - log = logging.getLogger(__name__) - - def __init__(self): - self.Session = {} - self._mongo_client = None - self._sentry_client = None - self._sentry_logging_handler = None - self._database = None - self._is_installed = False - - def __getitem__(self, key): - # gives direct access to collection withou setting `active_table` - return self._database[key] - - def __getattribute__(self, attr): - # not all methods of PyMongo database are implemented with this it is - # possible to use them too - try: - return super(DbConnector, self).__getattribute__(attr) - except AttributeError: - cur_proj = self.Session["AVALON_PROJECT"] - return self._database[cur_proj].__getattribute__(attr) - - def install(self): - """Establish a persistent connection to the database""" - if self._is_installed: - return - - logging.basicConfig() - self.Session.update(self._from_environment()) - - timeout = int(self.Session["AVALON_TIMEOUT"]) - mongo_url = self.Session["AVALON_MONGO"] - kwargs = { - "host": mongo_url, - "serverSelectionTimeoutMS": timeout - } - - port = extract_port_from_url(mongo_url) - if port is not None: - kwargs["port"] = int(port) - - self._mongo_client = pymongo.MongoClient(**kwargs) - - for retry in range(3): - try: - t1 = time.time() - self._mongo_client.server_info() - - except Exception: - self.log.error("Retrying..") - time.sleep(1) - timeout *= 1.5 - - else: - break - - else: - raise IOError( - "ERROR: Couldn't connect to %s in " - "less than %.3f ms" % (self.Session["AVALON_MONGO"], timeout)) - - self.log.info("Connected to %s, delay %.3f s" % ( - self.Session["AVALON_MONGO"], time.time() - t1)) - - self._install_sentry() - - self._database = self._mongo_client[self.Session["AVALON_DB"]] - self._is_installed = True - - def _install_sentry(self): - if "AVALON_SENTRY" not in self.Session: - return - - try: - from raven import Client - from raven.handlers.logging import SentryHandler - from raven.conf import setup_logging - except ImportError: - # Note: There was a Sentry address in this Session - return self.log.warning("Sentry disabled, raven not installed") - - client = Client(self.Session["AVALON_SENTRY"]) - - # Transmit log messages to Sentry - handler = SentryHandler(client) - handler.setLevel(logging.WARNING) - - setup_logging(handler) - - self._sentry_client = client - self._sentry_logging_handler = handler - self.log.info( - "Connected to Sentry @ %s" % self.Session["AVALON_SENTRY"] - ) - - def _from_environment(self): - Session = { - item[0]: os.getenv(item[0], item[1]) - for item in ( - # Root directory of projects on disk - ("AVALON_PROJECTS", None), - - # Name of current Project - ("AVALON_PROJECT", ""), - - # Name of current Asset - ("AVALON_ASSET", ""), - - # Name of current silo - ("AVALON_SILO", ""), - - # Name of current task - ("AVALON_TASK", None), - - # Name of current app - ("AVALON_APP", None), - - # Path to working directory - ("AVALON_WORKDIR", None), - - # Name of current Config - # TODO(marcus): Establish a suitable default config - ("AVALON_CONFIG", "no_config"), - - # Name of Avalon in graphical user interfaces - # Use this to customise the visual appearance of Avalon - # to better integrate with your surrounding pipeline - ("AVALON_LABEL", "Avalon"), - - # Used during any connections to the outside world - ("AVALON_TIMEOUT", "1000"), - - # Address to Asset Database - ("AVALON_MONGO", "mongodb://localhost:27017"), - - # Name of database used in MongoDB - ("AVALON_DB", "avalon"), - - # Address to Sentry - ("AVALON_SENTRY", None), - - # Address to Deadline Web Service - # E.g. http://192.167.0.1:8082 - ("AVALON_DEADLINE", None), - - # Enable features not necessarily stable. The user's own risk - ("AVALON_EARLY_ADOPTER", None), - - # Address of central asset repository, contains - # the following interface: - # /upload - # /download - # /manager (optional) - ("AVALON_LOCATION", "http://127.0.0.1"), - - # Boolean of whether to upload published material - # to central asset repository - ("AVALON_UPLOAD", None), - - # Generic username and password - ("AVALON_USERNAME", "avalon"), - ("AVALON_PASSWORD", "secret"), - - # Unique identifier for instances in working files - ("AVALON_INSTANCE_ID", "avalon.instance"), - ("AVALON_CONTAINER_ID", "avalon.container"), - - # Enable debugging - ("AVALON_DEBUG", None), - - ) if os.getenv(item[0], item[1]) is not None - } - - Session["schema"] = "avalon-core:session-2.0" - try: - schema.validate(Session) - except schema.ValidationError as e: - # TODO(marcus): Make this mandatory - self.log.warning(e) - - return Session - - def uninstall(self): - """Close any connection to the database""" - try: - self._mongo_client.close() - except AttributeError: - pass - - self._mongo_client = None - self._database = None - self._is_installed = False - - def active_project(self): - """Return the name of the active project""" - return self.Session["AVALON_PROJECT"] - - def activate_project(self, project_name): - self.Session["AVALON_PROJECT"] = project_name - - def projects(self): - """List available projects - - Returns: - list of project documents - - """ - - collection_names = self.collections() - for project in collection_names: - if project in ("system.indexes",): - continue - - # Each collection will have exactly one project document - document = self.find_project(project) - - if document is not None: - yield document - - def locate(self, path): - """Traverse a hierarchy from top-to-bottom - - Example: - representation = locate(["hulk", "Bruce", "modelDefault", 1, "ma"]) - - Returns: - representation (ObjectId) - - """ - - components = zip( - ("project", "asset", "subset", "version", "representation"), - path - ) - - parent = None - for type_, name in components: - latest = (type_ == "version") and name in (None, -1) - - try: - if latest: - parent = self.find_one( - filter={ - "type": type_, - "parent": parent - }, - projection={"_id": 1}, - sort=[("name", -1)] - )["_id"] - else: - parent = self.find_one( - filter={ - "type": type_, - "name": name, - "parent": parent - }, - projection={"_id": 1}, - )["_id"] - - except TypeError: - return None - - return parent - - @auto_reconnect - def collections(self): - return self._database.collection_names() - - @auto_reconnect - def find_project(self, project): - return self._database[project].find_one({"type": "project"}) - - @auto_reconnect - def insert_one(self, item): - assert isinstance(item, dict), "item must be of type " - schema.validate(item) - return self._database[self.Session["AVALON_PROJECT"]].insert_one(item) - - @auto_reconnect - def insert_many(self, items, ordered=True): - # check if all items are valid - assert isinstance(items, list), "`items` must be of type " - for item in items: - assert isinstance(item, dict), "`item` must be of type " - schema.validate(item) - - return self._database[self.Session["AVALON_PROJECT"]].insert_many( - items, - ordered=ordered) - - @auto_reconnect - def find(self, filter, projection=None, sort=None): - return self._database[self.Session["AVALON_PROJECT"]].find( - filter=filter, - projection=projection, - sort=sort - ) - - @auto_reconnect - def find_one(self, filter, projection=None, sort=None): - assert isinstance(filter, dict), "filter must be " - - return self._database[self.Session["AVALON_PROJECT"]].find_one( - filter=filter, - projection=projection, - sort=sort - ) - - @auto_reconnect - def save(self, *args, **kwargs): - return self._database[self.Session["AVALON_PROJECT"]].save( - *args, **kwargs) - - @auto_reconnect - def replace_one(self, filter, replacement): - return self._database[self.Session["AVALON_PROJECT"]].replace_one( - filter, replacement) - - @auto_reconnect - def update_many(self, filter, update): - return self._database[self.Session["AVALON_PROJECT"]].update_many( - filter, update) - - @auto_reconnect - def distinct(self, *args, **kwargs): - return self._database[self.Session["AVALON_PROJECT"]].distinct( - *args, **kwargs) - - @auto_reconnect - def drop(self, *args, **kwargs): - return self._database[self.Session["AVALON_PROJECT"]].drop( - *args, **kwargs) - - @auto_reconnect - def delete_many(self, *args, **kwargs): - return self._database[self.Session["AVALON_PROJECT"]].delete_many( - *args, **kwargs) - - def parenthood(self, document): - assert document is not None, "This is a bug" - - parents = list() - - while document.get("parent") is not None: - document = self.find_one({"_id": document["parent"]}) - - if document is None: - break - - if document.get("type") == "master_version": - _document = self.find_one({"_id": document["version_id"]}) - document["data"] = _document["data"] - - parents.append(document) - - return parents - - @contextlib.contextmanager - def tempdir(self): - tempdir = tempfile.mkdtemp() - try: - yield tempdir - finally: - shutil.rmtree(tempdir) - - def download(self, src, dst): - """Download `src` to `dst` - - Arguments: - src (str): URL to source file - dst (str): Absolute path to destination file - - Yields tuple (progress, error): - progress (int): Between 0-100 - error (Exception): Any exception raised when first making connection - - """ - - try: - response = requests.get( - src, - stream=True, - auth=requests.auth.HTTPBasicAuth( - self.Session["AVALON_USERNAME"], - self.Session["AVALON_PASSWORD"] - ) - ) - except requests.ConnectionError as e: - yield None, e - return - - with self.tempdir() as dirname: - tmp = os.path.join(dirname, os.path.basename(src)) - - with open(tmp, "wb") as f: - total_length = response.headers.get("content-length") - - if total_length is None: # no content length header - f.write(response.content) - else: - downloaded = 0 - total_length = int(total_length) - for data in response.iter_content(chunk_size=4096): - downloaded += len(data) - f.write(data) - - yield int(100.0 * downloaded / total_length), None - - try: - os.makedirs(os.path.dirname(dst)) - except OSError as e: - # An already existing destination directory is fine. - if e.errno != errno.EEXIST: - raise - - shutil.copy(tmp, dst) diff --git a/pype/modules/ftrack/tray/ftrack_module.py b/pype/modules/ftrack/tray/ftrack_module.py index 674e8cbd4f..0b011c5b33 100644 --- a/pype/modules/ftrack/tray/ftrack_module.py +++ b/pype/modules/ftrack/tray/ftrack_module.py @@ -2,7 +2,7 @@ import os import time import datetime import threading -from Qt import QtCore, QtWidgets +from Qt import QtCore, QtWidgets, QtGui import ftrack_api from ..ftrack_server.lib import check_ftrack_url @@ -10,7 +10,7 @@ from ..ftrack_server import socket_thread from ..lib import credentials from . import login_dialog -from pype.api import Logger +from pype.api import Logger, resources log = Logger().get_logger("FtrackModule", "ftrack") @@ -19,7 +19,7 @@ log = Logger().get_logger("FtrackModule", "ftrack") class FtrackModule: def __init__(self, main_parent=None, parent=None): self.parent = parent - self.widget_login = login_dialog.Login_Dialog_ui(self) + self.thread_action_server = None self.thread_socket_server = None self.thread_timer = None @@ -29,8 +29,22 @@ class FtrackModule: self.bool_action_thread_running = False self.bool_timer_event = False + self.widget_login = login_dialog.CredentialsDialog() + self.widget_login.login_changed.connect(self.on_login_change) + self.widget_login.logout_signal.connect(self.on_logout) + + self.action_credentials = None + self.icon_logged = QtGui.QIcon( + resources.get_resource("icons", "circle_green.png") + ) + self.icon_not_logged = QtGui.QIcon( + resources.get_resource("icons", "circle_orange.png") + ) + def show_login_widget(self): self.widget_login.show() + self.widget_login.activateWindow() + self.widget_login.raise_() def validate(self): validation = False @@ -39,9 +53,10 @@ class FtrackModule: ft_api_key = cred.get("api_key") validation = credentials.check_credentials(ft_user, ft_api_key) if validation: + self.widget_login.set_credentials(ft_user, ft_api_key) credentials.set_env(ft_user, ft_api_key) log.info("Connected to Ftrack successfully") - self.loginChange() + self.on_login_change() return validation @@ -60,15 +75,28 @@ class FtrackModule: return validation # Necessary - login_dialog works with this method after logging in - def loginChange(self): + def on_login_change(self): self.bool_logged = True + + if self.action_credentials: + self.action_credentials.setIcon(self.icon_logged) + self.action_credentials.setToolTip( + "Logged as user \"{}\"".format( + self.widget_login.user_input.text() + ) + ) + self.set_menu_visibility() self.start_action_server() - def logout(self): + def on_logout(self): credentials.clear_credentials() self.stop_action_server() + if self.action_credentials: + self.action_credentials.setIcon(self.icon_not_logged) + self.action_credentials.setToolTip("Logged out") + log.info("Logged out of Ftrack") self.bool_logged = False self.set_menu_visibility() @@ -218,43 +246,45 @@ class FtrackModule: # Definition of Tray menu def tray_menu(self, parent_menu): # Menu for Tray App - self.menu = QtWidgets.QMenu('Ftrack', parent_menu) - self.menu.setProperty('submenu', 'on') - - # Actions - server - self.smActionS = self.menu.addMenu("Action server") - - self.aRunActionS = QtWidgets.QAction( - "Run action server", self.smActionS - ) - self.aResetActionS = QtWidgets.QAction( - "Reset action server", self.smActionS - ) - self.aStopActionS = QtWidgets.QAction( - "Stop action server", self.smActionS - ) - - self.aRunActionS.triggered.connect(self.start_action_server) - self.aResetActionS.triggered.connect(self.reset_action_server) - self.aStopActionS.triggered.connect(self.stop_action_server) - - self.smActionS.addAction(self.aRunActionS) - self.smActionS.addAction(self.aResetActionS) - self.smActionS.addAction(self.aStopActionS) + tray_menu = QtWidgets.QMenu("Ftrack", parent_menu) # Actions - basic - self.aLogin = QtWidgets.QAction("Login", self.menu) - self.aLogin.triggered.connect(self.validate) - self.aLogout = QtWidgets.QAction("Logout", self.menu) - self.aLogout.triggered.connect(self.logout) + action_credentials = QtWidgets.QAction("Credentials", tray_menu) + action_credentials.triggered.connect(self.show_login_widget) + if self.bool_logged: + icon = self.icon_logged + else: + icon = self.icon_not_logged + action_credentials.setIcon(icon) + tray_menu.addAction(action_credentials) + self.action_credentials = action_credentials - self.menu.addAction(self.aLogin) - self.menu.addAction(self.aLogout) + # Actions - server + tray_server_menu = tray_menu.addMenu("Action server") + self.action_server_run = QtWidgets.QAction( + "Run action server", tray_server_menu + ) + self.action_server_reset = QtWidgets.QAction( + "Reset action server", tray_server_menu + ) + self.action_server_stop = QtWidgets.QAction( + "Stop action server", tray_server_menu + ) + + self.action_server_run.triggered.connect(self.start_action_server) + self.action_server_reset.triggered.connect(self.reset_action_server) + self.action_server_stop.triggered.connect(self.stop_action_server) + + tray_server_menu.addAction(self.action_server_run) + tray_server_menu.addAction(self.action_server_reset) + tray_server_menu.addAction(self.action_server_stop) + + self.tray_server_menu = tray_server_menu self.bool_logged = False self.set_menu_visibility() - parent_menu.addMenu(self.menu) + parent_menu.addMenu(tray_menu) def tray_start(self): self.validate() @@ -264,19 +294,15 @@ class FtrackModule: # Definition of visibility of each menu actions def set_menu_visibility(self): - - self.smActionS.menuAction().setVisible(self.bool_logged) - self.aLogin.setVisible(not self.bool_logged) - self.aLogout.setVisible(self.bool_logged) - + self.tray_server_menu.menuAction().setVisible(self.bool_logged) if self.bool_logged is False: if self.bool_timer_event is True: self.stop_timer_thread() return - self.aRunActionS.setVisible(not self.bool_action_server_running) - self.aResetActionS.setVisible(self.bool_action_thread_running) - self.aStopActionS.setVisible(self.bool_action_server_running) + self.action_server_run.setVisible(not self.bool_action_server_running) + self.action_server_reset.setVisible(self.bool_action_thread_running) + self.action_server_stop.setVisible(self.bool_action_server_running) if self.bool_timer_event is False: self.start_timer_thread() diff --git a/pype/modules/ftrack/tray/login_dialog.py b/pype/modules/ftrack/tray/login_dialog.py index e0614513a3..7730ee1609 100644 --- a/pype/modules/ftrack/tray/login_dialog.py +++ b/pype/modules/ftrack/tray/login_dialog.py @@ -7,309 +7,314 @@ from pype.api import resources from Qt import QtCore, QtGui, QtWidgets -class Login_Dialog_ui(QtWidgets.QWidget): - +class CredentialsDialog(QtWidgets.QDialog): SIZE_W = 300 SIZE_H = 230 - loginSignal = QtCore.Signal(object, object, object) - _login_server_thread = None - inputs = [] - buttons = [] - labels = [] + login_changed = QtCore.Signal() + logout_signal = QtCore.Signal() - def __init__(self, parent=None, is_event=False): + def __init__(self, parent=None): + super(CredentialsDialog, self).__init__(parent) - super(Login_Dialog_ui, self).__init__() + self.setWindowTitle("Pype - Ftrack Login") - self.parent = parent - self.is_event = is_event + self._login_server_thread = None + self._is_logged = False + self._in_advance_mode = False - if hasattr(parent, 'icon'): - self.setWindowIcon(self.parent.icon) - elif hasattr(parent, 'parent') and hasattr(parent.parent, 'icon'): - self.setWindowIcon(self.parent.parent.icon) - else: - icon = QtGui.QIcon(resources.pype_icon_filepath()) - self.setWindowIcon(icon) + icon = QtGui.QIcon(resources.pype_icon_filepath()) + self.setWindowIcon(icon) self.setWindowFlags( QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowMinimizeButtonHint ) - self.loginSignal.connect(self.loginWithCredentials) - self._translate = QtCore.QCoreApplication.translate - - self.font = QtGui.QFont() - self.font.setFamily("DejaVu Sans Condensed") - self.font.setPointSize(9) - self.font.setBold(True) - self.font.setWeight(50) - self.font.setKerning(True) - - self.resize(self.SIZE_W, self.SIZE_H) self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) - self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100)) + self.setMaximumSize(QtCore.QSize(self.SIZE_W + 100, self.SIZE_H + 100)) self.setStyleSheet(style.load_stylesheet()) - self.setLayout(self._main()) - self.setWindowTitle('Pype - Ftrack Login') + self.login_changed.connect(self._on_login) - def _main(self): - self.main = QtWidgets.QVBoxLayout() - self.main.setObjectName("main") + self.ui_init() - self.form = QtWidgets.QFormLayout() - self.form.setContentsMargins(10, 15, 10, 5) - self.form.setObjectName("form") - - self.ftsite_label = QtWidgets.QLabel("FTrack URL:") - self.ftsite_label.setFont(self.font) - self.ftsite_label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) - self.ftsite_label.setTextFormat(QtCore.Qt.RichText) - self.ftsite_label.setObjectName("user_label") + def ui_init(self): + self.ftsite_label = QtWidgets.QLabel("Ftrack URL:") + self.user_label = QtWidgets.QLabel("Username:") + self.api_label = QtWidgets.QLabel("API Key:") self.ftsite_input = QtWidgets.QLineEdit() - self.ftsite_input.setEnabled(True) - self.ftsite_input.setFrame(True) - self.ftsite_input.setEnabled(False) self.ftsite_input.setReadOnly(True) - self.ftsite_input.setObjectName("ftsite_input") - - self.user_label = QtWidgets.QLabel("Username:") - self.user_label.setFont(self.font) - self.user_label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) - self.user_label.setTextFormat(QtCore.Qt.RichText) - self.user_label.setObjectName("user_label") + self.ftsite_input.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor)) self.user_input = QtWidgets.QLineEdit() - self.user_input.setEnabled(True) - self.user_input.setFrame(True) - self.user_input.setObjectName("user_input") - self.user_input.setPlaceholderText( - self._translate("main", "user.name") - ) + self.user_input.setPlaceholderText("user.name") self.user_input.textChanged.connect(self._user_changed) - self.api_label = QtWidgets.QLabel("API Key:") - self.api_label.setFont(self.font) - self.api_label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) - self.api_label.setTextFormat(QtCore.Qt.RichText) - self.api_label.setObjectName("api_label") - self.api_input = QtWidgets.QLineEdit() - self.api_input.setEnabled(True) - self.api_input.setFrame(True) - self.api_input.setObjectName("api_input") - self.api_input.setPlaceholderText(self._translate( - "main", "e.g. xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" - )) + self.api_input.setPlaceholderText( + "e.g. xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + ) self.api_input.textChanged.connect(self._api_changed) + input_layout = QtWidgets.QFormLayout() + input_layout.setContentsMargins(10, 15, 10, 5) + + input_layout.addRow(self.ftsite_label, self.ftsite_input) + input_layout.addRow(self.user_label, self.user_input) + input_layout.addRow(self.api_label, self.api_input) + + self.btn_advanced = QtWidgets.QPushButton("Advanced") + self.btn_advanced.clicked.connect(self._on_advanced_clicked) + + self.btn_simple = QtWidgets.QPushButton("Simple") + self.btn_simple.clicked.connect(self._on_simple_clicked) + + self.btn_login = QtWidgets.QPushButton("Login") + self.btn_login.setToolTip( + "Set Username and API Key with entered values" + ) + self.btn_login.clicked.connect(self._on_login_clicked) + + self.btn_ftrack_login = QtWidgets.QPushButton("Ftrack login") + self.btn_ftrack_login.setToolTip("Open browser for Login to Ftrack") + self.btn_ftrack_login.clicked.connect(self._on_ftrack_login_clicked) + + self.btn_logout = QtWidgets.QPushButton("Logout") + self.btn_logout.clicked.connect(self._on_logout_clicked) + + self.btn_close = QtWidgets.QPushButton("Close") + self.btn_close.setToolTip("Close this window") + self.btn_close.clicked.connect(self._close_widget) + + btns_layout = QtWidgets.QHBoxLayout() + btns_layout.addWidget(self.btn_advanced) + btns_layout.addWidget(self.btn_simple) + btns_layout.addStretch(1) + btns_layout.addWidget(self.btn_ftrack_login) + btns_layout.addWidget(self.btn_login) + btns_layout.addWidget(self.btn_logout) + btns_layout.addWidget(self.btn_close) + + self.note_label = QtWidgets.QLabel(( + "NOTE: Click on \"{}\" button to log with your default browser" + " or click on \"{}\" button to enter API key manually." + ).format(self.btn_ftrack_login.text(), self.btn_advanced.text())) + + self.note_label.setWordWrap(True) + self.note_label.hide() + self.error_label = QtWidgets.QLabel("") - self.error_label.setFont(self.font) - self.error_label.setTextFormat(QtCore.Qt.RichText) - self.error_label.setObjectName("error_label") self.error_label.setWordWrap(True) self.error_label.hide() - self.form.addRow(self.ftsite_label, self.ftsite_input) - self.form.addRow(self.user_label, self.user_input) - self.form.addRow(self.api_label, self.api_input) - self.form.addRow(self.error_label) + label_layout = QtWidgets.QVBoxLayout() + label_layout.setContentsMargins(10, 5, 10, 5) + label_layout.addWidget(self.note_label) + label_layout.addWidget(self.error_label) - self.btnGroup = QtWidgets.QHBoxLayout() - self.btnGroup.addStretch(1) - self.btnGroup.setObjectName("btnGroup") + main = QtWidgets.QVBoxLayout(self) + main.addLayout(input_layout) + main.addLayout(label_layout) + main.addStretch(1) + main.addLayout(btns_layout) - self.btnEnter = QtWidgets.QPushButton("Login") - self.btnEnter.setToolTip( - 'Set Username and API Key with entered values' - ) - self.btnEnter.clicked.connect(self.enter_credentials) + self.fill_ftrack_url() - self.btnClose = QtWidgets.QPushButton("Close") - self.btnClose.setToolTip('Close this window') - self.btnClose.clicked.connect(self._close_widget) + self.set_is_logged(self._is_logged) - self.btnFtrack = QtWidgets.QPushButton("Ftrack") - self.btnFtrack.setToolTip('Open browser for Login to Ftrack') - self.btnFtrack.clicked.connect(self.open_ftrack) + self.setLayout(main) - self.btnGroup.addWidget(self.btnFtrack) - self.btnGroup.addWidget(self.btnEnter) - self.btnGroup.addWidget(self.btnClose) + def fill_ftrack_url(self): + url = os.getenv("FTRACK_SERVER") + checked_url = self.check_url(url) - self.main.addLayout(self.form) - self.main.addLayout(self.btnGroup) + if checked_url is None: + checked_url = "" + self.btn_login.setEnabled(False) + self.btn_ftrack_login.setEnabled(False) - self.inputs.append(self.api_input) - self.inputs.append(self.user_input) - self.inputs.append(self.ftsite_input) + self.api_input.setEnabled(False) + self.user_input.setEnabled(False) + self.ftsite_input.setEnabled(False) - self.enter_site() - return self.main + self.ftsite_input.setText(checked_url) - def enter_site(self): - try: - url = os.getenv('FTRACK_SERVER') - newurl = self.checkUrl(url) + def set_advanced_mode(self, is_advanced): + self._in_advance_mode = is_advanced - if newurl is None: - self.btnEnter.setEnabled(False) - self.btnFtrack.setEnabled(False) - for input in self.inputs: - input.setEnabled(False) - newurl = url + self.error_label.setVisible(False) - self.ftsite_input.setText(newurl) + is_logged = self._is_logged - except Exception: - self.setError("FTRACK_SERVER is not set in templates") - self.btnEnter.setEnabled(False) - self.btnFtrack.setEnabled(False) - for input in self.inputs: - input.setEnabled(False) + self.note_label.setVisible(not is_logged and not is_advanced) + self.btn_ftrack_login.setVisible(not is_logged and not is_advanced) + self.btn_advanced.setVisible(not is_logged and not is_advanced) - def setError(self, msg): + self.btn_login.setVisible(not is_logged and is_advanced) + self.btn_simple.setVisible(not is_logged and is_advanced) + + self.user_label.setVisible(is_logged or is_advanced) + self.user_input.setVisible(is_logged or is_advanced) + self.api_label.setVisible(is_logged or is_advanced) + self.api_input.setVisible(is_logged or is_advanced) + if is_advanced: + self.user_input.setFocus() + else: + self.btn_ftrack_login.setFocus() + + def set_is_logged(self, is_logged): + self._is_logged = is_logged + + self.user_input.setReadOnly(is_logged) + self.api_input.setReadOnly(is_logged) + self.user_input.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor)) + self.api_input.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor)) + + self.btn_logout.setVisible(is_logged) + + self.set_advanced_mode(self._in_advance_mode) + + def set_error(self, msg): self.error_label.setText(msg) self.error_label.show() + def _on_logout_clicked(self): + self.user_input.setText("") + self.api_input.setText("") + self.set_is_logged(False) + self.logout_signal.emit() + + def _on_simple_clicked(self): + self.set_advanced_mode(False) + + def _on_advanced_clicked(self): + self.set_advanced_mode(True) + def _user_changed(self): - self.user_input.setStyleSheet("") + self._not_invalid_input(self.user_input) def _api_changed(self): - self.api_input.setStyleSheet("") + self._not_invalid_input(self.api_input) - def _invalid_input(self, entity): - entity.setStyleSheet("border: 1px solid red;") + def _not_invalid_input(self, input_widget): + input_widget.setStyleSheet("") - def enter_credentials(self): + def _invalid_input(self, input_widget): + input_widget.setStyleSheet("border: 1px solid red;") + + def _on_login(self): + self.set_is_logged(True) + self._close_widget() + + def _on_login_clicked(self): username = self.user_input.text().strip() - apiKey = self.api_input.text().strip() - msg = "You didn't enter " + api_key = self.api_input.text().strip() missing = [] if username == "": missing.append("Username") self._invalid_input(self.user_input) - if apiKey == "": + if api_key == "": missing.append("API Key") self._invalid_input(self.api_input) if len(missing) > 0: - self.setError("{0} {1}".format(msg, " and ".join(missing))) + self.set_error("You didn't enter {}".format(" and ".join(missing))) return - verification = credentials.check_credentials(username, apiKey) - - if verification: - credentials.save_credentials(username, apiKey, self.is_event) - credentials.set_env(username, apiKey) - if self.parent is not None: - self.parent.loginChange() - self._close_widget() - else: + if not self.login_with_credentials(username, api_key): self._invalid_input(self.user_input) self._invalid_input(self.api_input) - self.setError( + self.set_error( "We're unable to sign in to Ftrack with these credentials" ) - def open_ftrack(self): - url = self.ftsite_input.text() - self.loginWithCredentials(url, None, None) - - def checkUrl(self, url): - url = url.strip('/ ') - + def _on_ftrack_login_clicked(self): + url = self.check_url(self.ftsite_input.text()) if not url: - self.setError("There is no URL set in Templates") - return - - if 'http' not in url: - if url.endswith('ftrackapp.com'): - url = 'https://' + url - else: - url = 'https://{0}.ftrackapp.com'.format(url) - try: - result = requests.get( - url, - # Old python API will not work with redirect. - allow_redirects=False - ) - except requests.exceptions.RequestException: - self.setError( - 'The server URL set in Templates could not be reached.' - ) - return - - if ( - result.status_code != 200 or 'FTRACK_VERSION' not in result.headers - ): - self.setError( - 'The server URL set in Templates is not a valid ftrack server.' - ) - return - return url - - def loginWithCredentials(self, url, username, apiKey): - url = url.strip('/ ') - - if not url: - self.setError( - 'You need to specify a valid server URL, ' - 'for example https://server-name.ftrackapp.com' - ) - return - - if 'http' not in url: - if url.endswith('ftrackapp.com'): - url = 'https://' + url - else: - url = 'https://{0}.ftrackapp.com'.format(url) - try: - result = requests.get( - url, - # Old python API will not work with redirect. - allow_redirects=False - ) - except requests.exceptions.RequestException: - self.setError( - 'The server URL you provided could not be reached.' - ) - return - - if ( - result.status_code != 200 or 'FTRACK_VERSION' not in result.headers - ): - self.setError( - 'The server URL you provided is not a valid ftrack server.' - ) return # If there is an existing server thread running we need to stop it. if self._login_server_thread: - self._login_server_thread.quit() + self._login_server_thread.join() self._login_server_thread = None # If credentials are not properly set, try to get them using a http # server. - if not username or not apiKey: - self._login_server_thread = login_tools.LoginServerThread() - self._login_server_thread.loginSignal.connect(self.loginSignal) - self._login_server_thread.start(url) + self._login_server_thread = login_tools.LoginServerThread( + url, self._result_of_ftrack_thread + ) + self._login_server_thread.start() + + def _result_of_ftrack_thread(self, username, api_key): + if not self.login_with_credentials(username, api_key): + self._invalid_input(self.api_input) + self.set_error(( + "Somthing happened with Ftrack login." + " Try enter Username and API key manually." + )) + + def login_with_credentials(self, username, api_key): + verification = credentials.check_credentials(username, api_key) + if verification: + credentials.save_credentials(username, api_key, False) + credentials.set_env(username, api_key) + self.set_credentials(username, api_key) + self.login_changed.emit() + return verification + + def set_credentials(self, username, api_key, is_logged=True): + self.user_input.setText(username) + self.api_input.setText(api_key) + + self.error_label.hide() + + self._not_invalid_input(self.ftsite_input) + self._not_invalid_input(self.user_input) + self._not_invalid_input(self.api_input) + + if is_logged is not None: + self.set_is_logged(is_logged) + + def check_url(self, url): + if url is not None: + url = url.strip("/ ") + + if not url: + self.set_error(( + "You need to specify a valid server URL, " + "for example https://server-name.ftrackapp.com" + )) return - verification = credentials.check_credentials(username, apiKey) + if "http" not in url: + if url.endswith("ftrackapp.com"): + url = "https://" + url + else: + url = "https://{}.ftrackapp.com".format(url) + try: + result = requests.get( + url, + # Old python API will not work with redirect. + allow_redirects=False + ) + except requests.exceptions.RequestException: + self.set_error( + "Specified URL could not be reached." + ) + return - if verification is True: - credentials.save_credentials(username, apiKey, self.is_event) - credentials.set_env(username, apiKey) - if self.parent is not None: - self.parent.loginChange() - self._close_widget() + if ( + result.status_code != 200 + or "FTRACK_VERSION" not in result.headers + ): + self.set_error( + "Specified URL does not lead to a valid Ftrack server." + ) + return + return url def closeEvent(self, event): event.ignore() diff --git a/pype/modules/ftrack/tray/login_tools.py b/pype/modules/ftrack/tray/login_tools.py index 02982294f2..e7d22fbc19 100644 --- a/pype/modules/ftrack/tray/login_tools.py +++ b/pype/modules/ftrack/tray/login_tools.py @@ -2,7 +2,7 @@ from http.server import BaseHTTPRequestHandler, HTTPServer from urllib import parse import webbrowser import functools -from Qt import QtCore +import threading from pype.api import resources @@ -55,20 +55,17 @@ class LoginServerHandler(BaseHTTPRequestHandler): ) -class LoginServerThread(QtCore.QThread): +class LoginServerThread(threading.Thread): '''Login server thread.''' - # Login signal. - loginSignal = QtCore.Signal(object, object, object) - - def start(self, url): - '''Start thread.''' + def __init__(self, url, callback): self.url = url - super(LoginServerThread, self).start() + self.callback = callback + super(LoginServerThread, self).__init__() def _handle_login(self, api_user, api_key): '''Login to server with *api_user* and *api_key*.''' - self.loginSignal.emit(self.url, api_user, api_key) + self.callback(api_user, api_key) def run(self): '''Listen for events.''' diff --git a/pype/modules/logging/gui/app.py b/pype/modules/logging/gui/app.py index 99b0b230a9..c0e180c8a1 100644 --- a/pype/modules/logging/gui/app.py +++ b/pype/modules/logging/gui/app.py @@ -8,9 +8,9 @@ class LogsWindow(QtWidgets.QWidget): super(LogsWindow, self).__init__(parent) self.setStyleSheet(style.load_stylesheet()) - self.resize(1200, 800) - logs_widget = LogsWidget(parent=self) + self.resize(1400, 800) log_detail = OutputWidget(parent=self) + logs_widget = LogsWidget(log_detail, parent=self) main_layout = QtWidgets.QHBoxLayout() @@ -18,8 +18,6 @@ class LogsWindow(QtWidgets.QWidget): log_splitter.setOrientation(QtCore.Qt.Horizontal) log_splitter.addWidget(logs_widget) log_splitter.addWidget(log_detail) - log_splitter.setStretchFactor(0, 65) - log_splitter.setStretchFactor(1, 35) main_layout.addWidget(log_splitter) @@ -28,10 +26,3 @@ class LogsWindow(QtWidgets.QWidget): self.setLayout(main_layout) self.setWindowTitle("Logs") - - self.logs_widget.active_changed.connect(self.on_selection_changed) - - def on_selection_changed(self): - index = self.logs_widget.selected_log() - node = index.data(self.logs_widget.model.NodeRole) - self.log_detail.set_detail(node) diff --git a/pype/modules/logging/gui/lib.py b/pype/modules/logging/gui/lib.py deleted file mode 100644 index 85782e071e..0000000000 --- a/pype/modules/logging/gui/lib.py +++ /dev/null @@ -1,94 +0,0 @@ -import contextlib -from Qt import QtCore - - -def _iter_model_rows( - model, column, include_root=False -): - """Iterate over all row indices in a model""" - indices = [QtCore.QModelIndex()] # start iteration at root - - for index in indices: - # Add children to the iterations - child_rows = model.rowCount(index) - for child_row in range(child_rows): - child_index = model.index(child_row, column, index) - indices.append(child_index) - - if not include_root and not index.isValid(): - continue - - yield index - - -@contextlib.contextmanager -def preserve_states( - tree_view, column=0, role=None, - preserve_expanded=True, preserve_selection=True, - expanded_role=QtCore.Qt.DisplayRole, selection_role=QtCore.Qt.DisplayRole - -): - """Preserves row selection in QTreeView by column's data role. - - This function is created to maintain the selection status of - the model items. When refresh is triggered the items which are expanded - will stay expanded and vise versa. - - tree_view (QWidgets.QTreeView): the tree view nested in the application - column (int): the column to retrieve the data from - role (int): the role which dictates what will be returned - - Returns: - None - - """ - # When `role` is set then override both expanded and selection roles - if role: - expanded_role = role - selection_role = role - - model = tree_view.model() - selection_model = tree_view.selectionModel() - flags = selection_model.Select | selection_model.Rows - - expanded = set() - - if preserve_expanded: - for index in _iter_model_rows( - model, column=column, include_root=False - ): - if tree_view.isExpanded(index): - value = index.data(expanded_role) - expanded.add(value) - - selected = None - - if preserve_selection: - selected_rows = selection_model.selectedRows() - if selected_rows: - selected = set(row.data(selection_role) for row in selected_rows) - - try: - yield - finally: - if expanded: - for index in _iter_model_rows( - model, column=0, include_root=False - ): - value = index.data(expanded_role) - is_expanded = value in expanded - # skip if new index was created meanwhile - if is_expanded is None: - continue - tree_view.setExpanded(index, is_expanded) - - if selected: - # Go through all indices, select the ones with similar data - for index in _iter_model_rows( - model, column=column, include_root=False - ): - value = index.data(selection_role) - state = value in selected - if state: - tree_view.scrollTo(index) # Ensure item is visible - selection_model.select(index, flags) diff --git a/pype/modules/logging/gui/models.py b/pype/modules/logging/gui/models.py index ce1fa236a9..ae2666f501 100644 --- a/pype/modules/logging/gui/models.py +++ b/pype/modules/logging/gui/models.py @@ -1,21 +1,20 @@ import collections -from Qt import QtCore +from Qt import QtCore, QtGui from pype.api import Logger from pypeapp.lib.log import _bootstrap_mongo_log, LOG_COLLECTION_NAME log = Logger().get_logger("LogModel", "LoggingModule") -class LogModel(QtCore.QAbstractItemModel): - COLUMNS = [ +class LogModel(QtGui.QStandardItemModel): + COLUMNS = ( "process_name", "hostname", "hostip", "username", "system_name", "started" - ] - + ) colums_mapping = { "process_name": "Process Name", "process_id": "Process Id", @@ -25,30 +24,53 @@ class LogModel(QtCore.QAbstractItemModel): "system_name": "System name", "started": "Started at" } - process_keys = [ + process_keys = ( "process_id", "hostname", "hostip", "username", "system_name", "process_name" - ] - log_keys = [ + ) + log_keys = ( "timestamp", "level", "thread", "threadName", "message", "loggerName", "fileName", "module", "method", "lineNumber" - ] + ) default_value = "- Not set -" - NodeRole = QtCore.Qt.UserRole + 1 + + ROLE_LOGS = QtCore.Qt.UserRole + 2 + ROLE_PROCESS_ID = QtCore.Qt.UserRole + 3 def __init__(self, parent=None): super(LogModel, self).__init__(parent) - self._root_node = Node() + self.log_by_process = None self.dbcon = None + # Crash if connection is not possible to skip this module database = _bootstrap_mongo_log() if LOG_COLLECTION_NAME in database.list_collection_names(): self.dbcon = database[LOG_COLLECTION_NAME] - def add_log(self, log): - node = Node(log) - self._root_node.add_child(node) + def headerData(self, section, orientation, role): + if ( + role == QtCore.Qt.DisplayRole + and orientation == QtCore.Qt.Horizontal + ): + if section < len(self.COLUMNS): + key = self.COLUMNS[section] + return self.colums_mapping.get(key, key) + + super(LogModel, self).headerData(section, orientation, role) + + def add_process_logs(self, process_logs): + items = [] + first_item = True + for key in self.COLUMNS: + display_value = str(process_logs[key]) + item = QtGui.QStandardItem(display_value) + if first_item: + first_item = False + item.setData(process_logs["_logs"], self.ROLE_LOGS) + item.setData(process_logs["process_id"], self.ROLE_PROCESS_ID) + items.append(item) + self.appendRow(items) def refresh(self): self.log_by_process = collections.defaultdict(list) @@ -65,16 +87,13 @@ class LogModel(QtCore.QAbstractItemModel): continue if process_id not in self.process_info: - proc_dict = {} + proc_dict = {"_logs": []} for key in self.process_keys: proc_dict[key] = ( item.get(key) or self.default_value ) self.process_info[process_id] = proc_dict - if "_logs" not in self.process_info[process_id]: - self.process_info[process_id]["_logs"] = [] - log_item = {} for key in self.log_keys: log_item[key] = item.get(key) or self.default_value @@ -89,114 +108,29 @@ class LogModel(QtCore.QAbstractItemModel): item["_logs"], key=lambda item: item["timestamp"] ) item["started"] = item["_logs"][0]["timestamp"] - self.add_log(item) + self.add_process_logs(item) self.endResetModel() - def data(self, index, role): - if not index.isValid(): - return None - if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: - node = index.internalPointer() - column = index.column() +class LogsFilterProxy(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(LogsFilterProxy, self).__init__(*args, **kwargs) + self.col_usernames = None + self.filter_usernames = set() - key = self.COLUMNS[column] - if key == "started": - return str(node.get(key, None)) - return node.get(key, None) + def update_users_filter(self, users): + self.filter_usernames = set() + for user in users or tuple(): + self.filter_usernames.add(user) + self.invalidateFilter() - if role == self.NodeRole: - return index.internalPointer() - - def index(self, row, column, parent): - """Return index for row/column under parent""" - - if not parent.isValid(): - parent_node = self._root_node - else: - parent_node = parent.internalPointer() - - child_item = parent_node.child(row) - if child_item: - return self.createIndex(row, column, child_item) - return QtCore.QModelIndex() - - def rowCount(self, parent): - node = self._root_node - if parent.isValid(): - node = parent.internalPointer() - return node.childCount() - - def columnCount(self, parent): - return len(self.COLUMNS) - - def parent(self, index): - return QtCore.QModelIndex() - - def headerData(self, section, orientation, role): - if role == QtCore.Qt.DisplayRole: - if section < len(self.COLUMNS): - key = self.COLUMNS[section] - return self.colums_mapping.get(key, key) - - super(LogModel, self).headerData(section, orientation, role) - - def flags(self, index): - return (QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable) - - def clear(self): - self.beginResetModel() - self._root_node = Node() - self.endResetModel() - - -class Node(dict): - """A node that can be represented in a tree view. - - The node can store data just like a dictionary. - - >>> data = {"name": "John", "score": 10} - >>> node = Node(data) - >>> assert node["name"] == "John" - - """ - - def __init__(self, data=None): - super(Node, self).__init__() - - self._children = list() - self._parent = None - - if data is not None: - assert isinstance(data, dict) - self.update(data) - - def childCount(self): - return len(self._children) - - def child(self, row): - if row >= len(self._children): - log.warning("Invalid row as child: {0}".format(row)) - return - - return self._children[row] - - def children(self): - return self._children - - def parent(self): - return self._parent - - def row(self): - """ - Returns: - int: Index of this node under parent""" - if self._parent is not None: - siblings = self.parent().children() - return siblings.index(self) - - def add_child(self, child): - """Add a child to this node""" - child._parent = self - self._children.append(child) + def filterAcceptsRow(self, source_row, source_parent): + if self.col_usernames is not None: + index = self.sourceModel().index( + source_row, self.col_usernames, source_parent + ) + user = index.data(QtCore.Qt.DisplayRole) + if user not in self.filter_usernames: + return False + return True diff --git a/pype/modules/logging/gui/widgets.py b/pype/modules/logging/gui/widgets.py index cf20066397..cd0df283bf 100644 --- a/pype/modules/logging/gui/widgets.py +++ b/pype/modules/logging/gui/widgets.py @@ -1,6 +1,6 @@ -from Qt import QtCore, QtWidgets, QtGui -from PyQt5.QtCore import QVariant -from .models import LogModel +from Qt import QtCore, QtWidgets +from avalon.vendor import qtawesome +from .models import LogModel, LogsFilterProxy class SearchComboBox(QtWidgets.QComboBox): @@ -50,37 +50,6 @@ class SearchComboBox(QtWidgets.QComboBox): return text -class CheckableComboBox2(QtWidgets.QComboBox): - def __init__(self, parent=None): - super(CheckableComboBox, self).__init__(parent) - self.view().pressed.connect(self.handleItemPressed) - self._changed = False - - def handleItemPressed(self, index): - item = self.model().itemFromIndex(index) - if item.checkState() == QtCore.Qt.Checked: - item.setCheckState(QtCore.Qt.Unchecked) - else: - item.setCheckState(QtCore.Qt.Checked) - self._changed = True - - def hidePopup(self): - if not self._changed: - super(CheckableComboBox, self).hidePopup() - self._changed = False - - def itemChecked(self, index): - item = self.model().item(index, self.modelColumn()) - return item.checkState() == QtCore.Qt.Checked - - def setItemChecked(self, index, checked=True): - item = self.model().item(index, self.modelColumn()) - if checked: - item.setCheckState(QtCore.Qt.Checked) - else: - item.setCheckState(QtCore.Qt.Unchecked) - - class SelectableMenu(QtWidgets.QMenu): selection_changed = QtCore.Signal() @@ -137,144 +106,108 @@ class CustomCombo(QtWidgets.QWidget): yield action -class CheckableComboBox(QtWidgets.QComboBox): - def __init__(self, parent=None): - super(CheckableComboBox, self).__init__(parent) - - view = QtWidgets.QTreeView() - view.header().hide() - view.setRootIsDecorated(False) - - model = QtGui.QStandardItemModel() - - view.pressed.connect(self.handleItemPressed) - self._changed = False - - self.setView(view) - self.setModel(model) - - self.view = view - self.model = model - - def handleItemPressed(self, index): - item = self.model.itemFromIndex(index) - if item.checkState() == QtCore.Qt.Checked: - item.setCheckState(QtCore.Qt.Unchecked) - else: - item.setCheckState(QtCore.Qt.Checked) - self._changed = True - - def hidePopup(self): - if not self._changed: - super(CheckableComboBox, self).hidePopup() - self._changed = False - - def itemChecked(self, index): - item = self.model.item(index, self.modelColumn()) - return item.checkState() == QtCore.Qt.Checked - - def setItemChecked(self, index, checked=True): - item = self.model.item(index, self.modelColumn()) - if checked: - item.setCheckState(QtCore.Qt.Checked) - else: - item.setCheckState(QtCore.Qt.Unchecked) - - def addItems(self, items): - for text, checked in items: - text_item = QtGui.QStandardItem(text) - checked_item = QtGui.QStandardItem() - checked_item.setData(QVariant(checked), QtCore.Qt.CheckStateRole) - self.model.appendRow([text_item, checked_item]) - - class LogsWidget(QtWidgets.QWidget): """A widget that lists the published subsets for an asset""" - active_changed = QtCore.Signal() - - def __init__(self, parent=None): + def __init__(self, detail_widget, parent=None): super(LogsWidget, self).__init__(parent=parent) model = LogModel() + proxy_model = LogsFilterProxy() + proxy_model.setSourceModel(model) + proxy_model.col_usernames = model.COLUMNS.index("username") filter_layout = QtWidgets.QHBoxLayout() # user_filter = SearchComboBox(self, "Users") user_filter = CustomCombo("Users", self) - users = model.dbcon.distinct("user") + users = model.dbcon.distinct("username") user_filter.populate(users) - user_filter.selection_changed.connect(self.user_changed) + user_filter.selection_changed.connect(self._user_changed) + + proxy_model.update_users_filter(users) level_filter = CustomCombo("Levels", self) # levels = [(level, True) for level in model.dbcon.distinct("level")] levels = model.dbcon.distinct("level") level_filter.addItems(levels) + level_filter.selection_changed.connect(self._level_changed) - date_from_label = QtWidgets.QLabel("From:") - date_filter_from = QtWidgets.QDateTimeEdit() + detail_widget.update_level_filter(levels) - date_from_layout = QtWidgets.QVBoxLayout() - date_from_layout.addWidget(date_from_label) - date_from_layout.addWidget(date_filter_from) + spacer = QtWidgets.QWidget() - # now = datetime.datetime.now() - # QtCore.QDateTime( - # now.year, - # now.month, - # now.day, - # now.hour, - # now.minute, - # second=0, - # msec=0, - # timeSpec=0 - # ) - date_to_label = QtWidgets.QLabel("To:") - date_filter_to = QtWidgets.QDateTimeEdit() - - date_to_layout = QtWidgets.QVBoxLayout() - date_to_layout.addWidget(date_to_label) - date_to_layout.addWidget(date_filter_to) + icon = qtawesome.icon("fa.refresh", color="white") + refresh_btn = QtWidgets.QPushButton(icon, "") filter_layout.addWidget(user_filter) filter_layout.addWidget(level_filter) - - filter_layout.addLayout(date_from_layout) - filter_layout.addLayout(date_to_layout) + filter_layout.addWidget(spacer, 1) + filter_layout.addWidget(refresh_btn) view = QtWidgets.QTreeView(self) view.setAllColumnsShowFocus(True) + view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addLayout(filter_layout) layout.addWidget(view) + view.setModel(proxy_model) + view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) view.setSortingEnabled(True) view.sortByColumn( model.COLUMNS.index("started"), - QtCore.Qt.AscendingOrder + QtCore.Qt.DescendingOrder ) - view.setModel(model) - view.pressed.connect(self._on_activated) - # prepare - model.refresh() + view.selectionModel().selectionChanged.connect(self._on_index_change) + refresh_btn.clicked.connect(self._on_refresh_clicked) # Store to memory self.model = model + self.proxy_model = proxy_model self.view = view self.user_filter = user_filter self.level_filter = level_filter - def _on_activated(self, *args, **kwargs): - self.active_changed.emit() + self.detail_widget = detail_widget + self.refresh_btn = refresh_btn - def user_changed(self): + # prepare + self.refresh() + + def refresh(self): + self.model.refresh() + self.detail_widget.refresh() + + def _on_refresh_clicked(self): + self.refresh() + + def _on_index_change(self, to_index, from_index): + index = self._selected_log() + if index: + logs = index.data(self.model.ROLE_LOGS) + else: + logs = [] + self.detail_widget.set_detail(logs) + + def _user_changed(self): + checked_values = set() for action in self.user_filter.items(): - print(action) + if action.isChecked(): + checked_values.add(action.text()) + self.proxy_model.update_users_filter(checked_values) + + def _level_changed(self): + checked_values = set() + for action in self.level_filter.items(): + if action.isChecked(): + checked_values.add(action.text()) + self.detail_widget.update_level_filter(checked_values) def on_context_menu(self, point): # TODO will be any actions? it's ready @@ -288,7 +221,7 @@ class LogsWidget(QtWidgets.QWidget): selection = self.view.selectionModel() rows = selection.selectedRows(column=0) - def selected_log(self): + def _selected_log(self): selection = self.view.selectionModel() rows = selection.selectedRows(column=0) if len(rows) == 1: @@ -300,22 +233,55 @@ class OutputWidget(QtWidgets.QWidget): def __init__(self, parent=None): super(OutputWidget, self).__init__(parent=parent) layout = QtWidgets.QVBoxLayout(self) + + show_timecode_checkbox = QtWidgets.QCheckBox("Show timestamp") + output_text = QtWidgets.QTextEdit() output_text.setReadOnly(True) # output_text.setLineWrapMode(QtWidgets.QTextEdit.FixedPixelWidth) + layout.addWidget(show_timecode_checkbox) layout.addWidget(output_text) + show_timecode_checkbox.stateChanged.connect( + self.on_show_timecode_change + ) self.setLayout(layout) self.output_text = output_text + self.show_timecode_checkbox = show_timecode_checkbox + + self.refresh() + + def refresh(self): + self.set_detail() + + def show_timecode(self): + return self.show_timecode_checkbox.isChecked() + + def on_show_timecode_change(self): + self.set_detail(self.las_logs) + + def update_level_filter(self, levels): + self.filter_levels = set() + for level in levels or tuple(): + self.filter_levels.add(level.lower()) + + self.set_detail(self.las_logs) def add_line(self, line): self.output_text.append(line) - def set_detail(self, node): + def set_detail(self, logs=None): + self.las_logs = logs self.output_text.clear() - for log in node["_logs"]: + if not logs: + return + + show_timecode = self.show_timecode() + for log in logs: level = log["level"].lower() + if level not in self.filter_levels: + continue line_f = "{message}" if level == "debug": @@ -353,66 +319,13 @@ class OutputWidget(QtWidgets.QWidget): line = line_f.format(**log) + if show_timecode: + timestamp = log["timestamp"] + line = timestamp.strftime("%Y-%d-%m %H:%M:%S") + " " + line + self.add_line(line) if not exc: continue for _line in exc["stackTrace"].split("\n"): self.add_line(_line) - - -class LogDetailWidget(QtWidgets.QWidget): - """A Widget that display information about a specific version""" - data_rows = [ - "user", - "message", - "level", - "logname", - "method", - "module", - "fileName", - "lineNumber", - "host", - "timestamp" - ] - - html_text = u""" -

{user} - {timestamp}

-User
{user}
-
Level
{level}
-
Message
{message}
-
Log Name
{logname}

Method
{method}
-
File
{fileName}
-
Line
{lineNumber}
-
Host
{host}
-
Timestamp
{timestamp}
-""" - - def __init__(self, parent=None): - super(LogDetailWidget, self).__init__(parent=parent) - - layout = QtWidgets.QVBoxLayout(self) - - label = QtWidgets.QLabel("Detail") - detail_widget = QtWidgets.QTextEdit() - detail_widget.setReadOnly(True) - layout.addWidget(label) - layout.addWidget(detail_widget) - - self.detail_widget = detail_widget - - self.setEnabled(True) - - self.set_detail(None) - - def set_detail(self, detail_data): - if not detail_data: - self.detail_widget.setText("") - return - - data = dict() - for row in self.data_rows: - value = detail_data.get(row) or "< Not set >" - data[row] = value - - self.detail_widget.setHtml(self.html_text.format(**data)) diff --git a/pype/modules/standalonepublish/__init__.py b/pype/modules/standalonepublish/__init__.py index 8e615afbea..4038b696d9 100644 --- a/pype/modules/standalonepublish/__init__.py +++ b/pype/modules/standalonepublish/__init__.py @@ -1,14 +1,5 @@ -PUBLISH_PATHS = [] - from .standalonepublish_module import StandAlonePublishModule -from .app import ( - show, - cli -) -__all__ = [ - "show", - "cli" -] + def tray_init(tray_widget, main_widget): return StandAlonePublishModule(main_widget, tray_widget) diff --git a/pype/modules/standalonepublish/__main__.py b/pype/modules/standalonepublish/__main__.py deleted file mode 100644 index d77bc585c5..0000000000 --- a/pype/modules/standalonepublish/__main__.py +++ /dev/null @@ -1,5 +0,0 @@ -from . import cli - -if __name__ == '__main__': - import sys - sys.exit(cli(sys.argv[1:])) diff --git a/pype/modules/standalonepublish/publish.py b/pype/modules/standalonepublish/publish.py deleted file mode 100644 index dd65030f7a..0000000000 --- a/pype/modules/standalonepublish/publish.py +++ /dev/null @@ -1,165 +0,0 @@ -import os -import sys -import json -import tempfile -import random -import string - -from avalon import io, api -from avalon.tools import publish as av_publish - -import pype -from pype.api import execute - -import pyblish.api -from . import PUBLISH_PATHS - - -def set_context(project, asset, task, app): - ''' Sets context for pyblish (must be done before pyblish is launched) - :param project: Name of `Project` where instance should be published - :type project: str - :param asset: Name of `Asset` where instance should be published - :type asset: str - ''' - os.environ["AVALON_PROJECT"] = project - io.Session["AVALON_PROJECT"] = project - os.environ["AVALON_ASSET"] = asset - io.Session["AVALON_ASSET"] = asset - if not task: - task = '' - os.environ["AVALON_TASK"] = task - io.Session["AVALON_TASK"] = task - - io.install() - - av_project = io.find_one({'type': 'project'}) - av_asset = io.find_one({ - "type": 'asset', - "name": asset - }) - - parents = av_asset['data']['parents'] - hierarchy = '' - if parents and len(parents) > 0: - hierarchy = os.path.sep.join(parents) - - os.environ["AVALON_HIERARCHY"] = hierarchy - io.Session["AVALON_HIERARCHY"] = hierarchy - - os.environ["AVALON_PROJECTCODE"] = av_project['data'].get('code', '') - io.Session["AVALON_PROJECTCODE"] = av_project['data'].get('code', '') - - io.Session["current_dir"] = os.path.normpath(os.getcwd()) - - os.environ["AVALON_APP"] = app - io.Session["AVALON_APP"] = app - - io.uninstall() - - -def publish(data, gui=True): - # cli pyblish seems like better solution - return cli_publish(data, gui) - # # this uses avalon pyblish launch tool - # avalon_api_publish(data, gui) - - -def avalon_api_publish(data, gui=True): - ''' Launches Pyblish (GUI by default) - :param data: Should include data for pyblish and standalone collector - :type data: dict - :param gui: Pyblish will be launched in GUI mode if set to True - :type gui: bool - ''' - io.install() - - # Create hash name folder in temp - chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) - staging_dir = tempfile.mkdtemp(chars) - - # create also json and fill with data - json_data_path = staging_dir + os.path.basename(staging_dir) + '.json' - with open(json_data_path, 'w') as outfile: - json.dump(data, outfile) - - args = [ - "-pp", os.pathsep.join(pyblish.api.registered_paths()) - ] - - envcopy = os.environ.copy() - envcopy["PYBLISH_HOSTS"] = "standalonepublisher" - envcopy["SAPUBLISH_INPATH"] = json_data_path - - if gui: - av_publish.show() - else: - returncode = execute([ - sys.executable, "-u", "-m", "pyblish" - ] + args, env=envcopy) - - io.uninstall() - - -def cli_publish(data, gui=True): - io.install() - - pyblish.api.deregister_all_plugins() - # Registers Global pyblish plugins - pype.install() - # Registers Standalone pyblish plugins - for path in PUBLISH_PATHS: - pyblish.api.register_plugin_path(path) - - project_plugins_paths = os.environ.get("PYPE_PROJECT_PLUGINS") - project_name = os.environ["AVALON_PROJECT"] - if project_plugins_paths and project_name: - for path in project_plugins_paths.split(os.pathsep): - if not path: - continue - plugin_path = os.path.join(path, project_name, "plugins") - if os.path.exists(plugin_path): - pyblish.api.register_plugin_path(plugin_path) - api.register_plugin_path(api.Loader, plugin_path) - api.register_plugin_path(api.Creator, plugin_path) - - # Create hash name folder in temp - chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) - staging_dir = tempfile.mkdtemp(chars) - - # create json for return data - return_data_path = ( - staging_dir + os.path.basename(staging_dir) + 'return.json' - ) - # create also json and fill with data - json_data_path = staging_dir + os.path.basename(staging_dir) + '.json' - with open(json_data_path, 'w') as outfile: - json.dump(data, outfile) - - args = [ - "-pp", os.pathsep.join(pyblish.api.registered_paths()) - ] - - if gui: - args += ["gui"] - - envcopy = os.environ.copy() - envcopy["PYBLISH_HOSTS"] = "standalonepublisher" - envcopy["SAPUBLISH_INPATH"] = json_data_path - envcopy["SAPUBLISH_OUTPATH"] = return_data_path - envcopy["PYBLISH_GUI"] = "pyblish_pype" - - returncode = execute([ - sys.executable, "-u", "-m", "pyblish" - ] + args, env=envcopy) - - result = {} - if os.path.exists(json_data_path): - with open(json_data_path, "r") as f: - result = json.load(f) - - io.uninstall() - # TODO: check if was pyblish successful - # if successful return True - print('Check result here') - return False diff --git a/pype/modules/standalonepublish/standalonepublish_module.py b/pype/modules/standalonepublish/standalonepublish_module.py index 64195bc271..ed997bfd9f 100644 --- a/pype/modules/standalonepublish/standalonepublish_module.py +++ b/pype/modules/standalonepublish/standalonepublish_module.py @@ -1,21 +1,22 @@ import os -from .app import show -from .widgets import QtWidgets +import sys +import subprocess import pype -from . import PUBLISH_PATHS +from pype import tools class StandAlonePublishModule: - def __init__(self, main_parent=None, parent=None): self.main_parent = main_parent self.parent_widget = parent - PUBLISH_PATHS.clear() - PUBLISH_PATHS.append(os.path.sep.join( - [pype.PLUGINS_DIR, "standalonepublisher", "publish"] - )) + self.publish_paths = [ + os.path.join( + pype.PLUGINS_DIR, "standalonepublisher", "publish" + ) + ] def tray_menu(self, parent_menu): + from Qt import QtWidgets self.run_action = QtWidgets.QAction( "Publish", parent_menu ) @@ -24,9 +25,17 @@ class StandAlonePublishModule: def process_modules(self, modules): if "FtrackModule" in modules: - PUBLISH_PATHS.append(os.path.sep.join( - [pype.PLUGINS_DIR, "ftrack", "publish"] + self.publish_paths.append(os.path.join( + pype.PLUGINS_DIR, "ftrack", "publish" )) def show(self): - show(self.main_parent, False) + standalone_publisher_tool_path = os.path.join( + os.path.dirname(tools.__file__), + "standalonepublish" + ) + subprocess.Popen([ + sys.executable, + standalone_publisher_tool_path, + os.pathsep.join(self.publish_paths).replace("\\", "/") + ]) diff --git a/pype/modules/standalonepublish/widgets/button_from_svgs.py b/pype/modules/standalonepublish/widgets/button_from_svgs.py deleted file mode 100644 index 4255c5f29b..0000000000 --- a/pype/modules/standalonepublish/widgets/button_from_svgs.py +++ /dev/null @@ -1,113 +0,0 @@ -from xml.dom import minidom - -from . import QtGui, QtCore, QtWidgets -from PyQt5 import QtSvg, QtXml - - -class SvgResizable(QtSvg.QSvgWidget): - clicked = QtCore.Signal() - - def __init__(self, filepath, width=None, height=None, fill=None): - super().__init__() - self.xmldoc = minidom.parse(filepath) - itemlist = self.xmldoc.getElementsByTagName('svg') - for element in itemlist: - if fill: - element.setAttribute('fill', str(fill)) - # TODO auto scale if only one is set - if width is not None and height is not None: - self.setMaximumSize(width, height) - self.setMinimumSize(width, height) - xml_string = self.xmldoc.toxml() - svg_bytes = bytearray(xml_string, encoding='utf-8') - - self.load(svg_bytes) - - def change_color(self, color): - element = self.xmldoc.getElementsByTagName('svg')[0] - element.setAttribute('fill', str(color)) - xml_string = self.xmldoc.toxml() - svg_bytes = bytearray(xml_string, encoding='utf-8') - self.load(svg_bytes) - - def mousePressEvent(self, event): - self.clicked.emit() - - -class SvgButton(QtWidgets.QFrame): - clicked = QtCore.Signal() - def __init__( - self, filepath, width=None, height=None, fills=[], - parent=None, checkable=True - ): - super().__init__(parent) - self.checkable = checkable - self.checked = False - - xmldoc = minidom.parse(filepath) - element = xmldoc.getElementsByTagName('svg')[0] - c_actual = '#777777' - if element.hasAttribute('fill'): - c_actual = element.getAttribute('fill') - self.store_fills(fills, c_actual) - - self.installEventFilter(self) - self.svg_widget = SvgResizable(filepath, width, height, self.c_normal) - xmldoc = minidom.parse(filepath) - - layout = QtWidgets.QHBoxLayout(self) - layout.setSpacing(0) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(self.svg_widget) - - if width is not None and height is not None: - self.setMaximumSize(width, height) - self.setMinimumSize(width, height) - - def store_fills(self, fills, actual): - if len(fills) == 0: - fills = [actual, actual, actual, actual] - elif len(fills) == 1: - fills = [fills[0], fills[0], fills[0], fills[0]] - elif len(fills) == 2: - fills = [fills[0], fills[1], fills[1], fills[1]] - elif len(fills) == 3: - fills = [fills[0], fills[1], fills[2], fills[2]] - self.c_normal = fills[0] - self.c_hover = fills[1] - self.c_active = fills[2] - self.c_active_hover = fills[3] - - def eventFilter(self, object, event): - if event.type() == QtCore.QEvent.Enter: - self.hoverEnterEvent(event) - return True - elif event.type() == QtCore.QEvent.Leave: - self.hoverLeaveEvent(event) - return True - elif event.type() == QtCore.QEvent.MouseButtonRelease: - self.mousePressEvent(event) - return False - - def change_checked(self, hover=True): - if self.checkable: - self.checked = not self.checked - if hover: - self.hoverEnterEvent() - else: - self.hoverLeaveEvent() - - def hoverEnterEvent(self, event=None): - color = self.c_hover - if self.checked: - color = self.c_active_hover - self.svg_widget.change_color(color) - - def hoverLeaveEvent(self, event=None): - color = self.c_normal - if self.checked: - color = self.c_active - self.svg_widget.change_color(color) - - def mousePressEvent(self, event=None): - self.clicked.emit() diff --git a/pype/modules/websocket_server/__init__.py b/pype/modules/websocket_server/__init__.py new file mode 100644 index 0000000000..eb5a0d9f27 --- /dev/null +++ b/pype/modules/websocket_server/__init__.py @@ -0,0 +1,5 @@ +from .websocket_server import WebSocketServer + + +def tray_init(tray_widget, main_widget): + return WebSocketServer() diff --git a/pype/modules/websocket_server/hosts/__init__.py b/pype/modules/websocket_server/hosts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/modules/websocket_server/hosts/external_app_1.py b/pype/modules/websocket_server/hosts/external_app_1.py new file mode 100644 index 0000000000..9352787175 --- /dev/null +++ b/pype/modules/websocket_server/hosts/external_app_1.py @@ -0,0 +1,47 @@ +import asyncio + +from pype.api import Logger +from wsrpc_aiohttp import WebSocketRoute + +log = Logger().get_logger("WebsocketServer") + + +class ExternalApp1(WebSocketRoute): + """ + One route, mimicking external application (like Harmony, etc). + All functions could be called from client. + 'do_notify' function calls function on the client - mimicking + notification after long running job on the server or similar + """ + + def init(self, **kwargs): + # Python __init__ must be return "self". + # This method might return anything. + log.debug("someone called ExternalApp1 route") + return kwargs + + async def server_function_one(self): + log.info('In function one') + + async def server_function_two(self): + log.info('In function two') + return 'function two' + + async def server_function_three(self): + log.info('In function three') + asyncio.ensure_future(self.do_notify()) + return '{"message":"function tree"}' + + async def server_function_four(self, *args, **kwargs): + log.info('In function four args {} kwargs {}'.format(args, kwargs)) + ret = dict(**kwargs) + ret["message"] = "function four received arguments" + return str(ret) + + # This method calls function on the client side + async def do_notify(self): + import time + time.sleep(5) + log.info('Calling function on server after delay') + awesome = 'Somebody server_function_three method!' + await self.socket.call('notify', result=awesome) diff --git a/pype/modules/websocket_server/test_client/wsrpc_client.html b/pype/modules/websocket_server/test_client/wsrpc_client.html new file mode 100644 index 0000000000..9c3f469aca --- /dev/null +++ b/pype/modules/websocket_server/test_client/wsrpc_client.html @@ -0,0 +1,179 @@ + + + + + Title + + + + + + + + + + + + + +
+
Test of wsrpc javascript client
+ +
+ +
+
+
+
+

No return value

+
+
+
    +
  • Calls server_function_one
  • +
  • Function only logs on server
  • +
  • No return value
  • +
  •  
  • +
  •  
  • +
  •  
  • +
+ +
+
+
+
+

Return value

+
+
+
    +
  • Calls server_function_two
  • +
  • Function logs on server
  • +
  • Returns simple text value
  • +
  •  
  • +
  •  
  • +
  •  
  • +
+ +
+
+
+
+

Notify

+
+
+
    +
  • Calls server_function_three
  • +
  • Function logs on server
  • +
  • Returns json payload
  • +
  • Server then calls function ON the client after delay
  • +
  •  
  • +
+ +
+
+
+
+

Send value

+
+
+
    +
  • Calls server_function_four
  • +
  • Function logs on server
  • +
  • Returns modified sent values
  • +
  •  
  • +
  •  
  • +
  •  
  • +
+ +
+
+
+
+ + + \ No newline at end of file diff --git a/pype/modules/websocket_server/test_client/wsrpc_client.py b/pype/modules/websocket_server/test_client/wsrpc_client.py new file mode 100644 index 0000000000..ef861513ae --- /dev/null +++ b/pype/modules/websocket_server/test_client/wsrpc_client.py @@ -0,0 +1,34 @@ +import asyncio + +from wsrpc_aiohttp import WSRPCClient + +""" + Simple testing Python client for wsrpc_aiohttp + Calls sequentially multiple methods on server +""" + +loop = asyncio.get_event_loop() + + +async def main(): + print("main") + client = WSRPCClient("ws://127.0.0.1:8099/ws/", + loop=asyncio.get_event_loop()) + + client.add_route('notify', notify) + await client.connect() + print("connected") + print(await client.proxy.ExternalApp1.server_function_one()) + print(await client.proxy.ExternalApp1.server_function_two()) + print(await client.proxy.ExternalApp1.server_function_three()) + print(await client.proxy.ExternalApp1.server_function_four(foo="one")) + await client.close() + + +def notify(socket, *args, **kwargs): + print("called from server") + + +if __name__ == "__main__": + # loop.run_until_complete(main()) + asyncio.run(main()) diff --git a/pype/modules/websocket_server/websocket_server.py b/pype/modules/websocket_server/websocket_server.py new file mode 100644 index 0000000000..56e71ea895 --- /dev/null +++ b/pype/modules/websocket_server/websocket_server.py @@ -0,0 +1,187 @@ +from pype.api import config, Logger + +import threading +from aiohttp import web +import asyncio +from wsrpc_aiohttp import STATIC_DIR, WebSocketAsync + +import os +import sys +import pyclbr +import importlib + +log = Logger().get_logger("WebsocketServer") + + +class WebSocketServer(): + """ + Basic POC implementation of asychronic websocket RPC server. + Uses class in external_app_1.py to mimic implementation for single + external application. + 'test_client' folder contains two test implementations of client + + WIP + """ + + def __init__(self): + self.qaction = None + self.failed_icon = None + self._is_running = False + default_port = 8099 + + try: + self.presets = config.get_presets()["services"]["websocket_server"] + except Exception: + self.presets = {"default_port": default_port, "exclude_ports": []} + log.debug(( + "There are not set presets for WebsocketServer." + " Using defaults \"{}\"" + ).format(str(self.presets))) + + self.app = web.Application() + + self.app.router.add_route("*", "/ws/", WebSocketAsync) + self.app.router.add_static("/js", STATIC_DIR) + self.app.router.add_static("/", ".") + + # add route with multiple methods for single "external app" + directories_with_routes = ['hosts'] + self.add_routes_for_directories(directories_with_routes) + + self.websocket_thread = WebsocketServerThread(self, default_port) + + def add_routes_for_directories(self, directories_with_routes): + """ Loops through selected directories to find all modules and + in them all classes implementing 'WebSocketRoute' that could be + used as route. + All methods in these classes are registered automatically. + """ + for dir_name in directories_with_routes: + dir_name = os.path.join(os.path.dirname(__file__), dir_name) + for file_name in os.listdir(dir_name): + if '.py' in file_name and '__' not in file_name: + self.add_routes_for_module(file_name, dir_name) + + def add_routes_for_module(self, file_name, dir_name): + """ Auto routes for all classes implementing 'WebSocketRoute' + in 'file_name' in 'dir_name' + """ + module_name = file_name.replace('.py', '') + module_info = pyclbr.readmodule(module_name, [dir_name]) + + for class_name, cls_object in module_info.items(): + sys.path.append(dir_name) + if 'WebSocketRoute' in cls_object.super: + log.debug('Adding route for {}'.format(class_name)) + module = importlib.import_module(module_name) + cls = getattr(module, class_name) + WebSocketAsync.add_route(class_name, cls) + sys.path.pop() + + def tray_start(self): + self.websocket_thread.start() + + def tray_exit(self): + self.stop() + + def stop_websocket_server(self): + + self.stop() + + @property + def is_running(self): + return self.websocket_thread.is_running + + def stop(self): + if not self.is_running: + return + try: + log.debug("Stopping websocket server") + self.websocket_thread.is_running = False + self.websocket_thread.stop() + except Exception: + log.warning( + "Error has happened during Killing websocket server", + exc_info=True + ) + + def thread_stopped(self): + self._is_running = False + + +class WebsocketServerThread(threading.Thread): + """ Listener for websocket rpc requests. + + It would be probably better to "attach" this to main thread (as for + example Harmony needs to run something on main thread), but currently + it creates separate thread and separate asyncio event loop + """ + def __init__(self, module, port): + super(WebsocketServerThread, self).__init__() + self.is_running = False + self.port = port + self.module = module + self.loop = None + self.runner = None + self.site = None + + def run(self): + self.is_running = True + + try: + log.info("Starting websocket server") + self.loop = asyncio.new_event_loop() # create new loop for thread + asyncio.set_event_loop(self.loop) + + self.loop.run_until_complete(self.start_server()) + + log.debug( + "Running Websocket server on URL:" + " \"ws://localhost:{}\"".format(self.port) + ) + + asyncio.ensure_future(self.check_shutdown(), loop=self.loop) + self.loop.run_forever() + except Exception: + log.warning( + "Websocket Server service has failed", exc_info=True + ) + finally: + self.loop.close() # optional + + self.is_running = False + self.module.thread_stopped() + log.info("Websocket server stopped") + + async def start_server(self): + """ Starts runner and TCPsite """ + self.runner = web.AppRunner(self.module.app) + await self.runner.setup() + self.site = web.TCPSite(self.runner, 'localhost', self.port) + await self.site.start() + + def stop(self): + """Sets is_running flag to false, 'check_shutdown' shuts server down""" + self.is_running = False + + async def check_shutdown(self): + """ Future that is running and checks if server should be running + periodically. + """ + while self.is_running: + await asyncio.sleep(0.5) + + log.debug("Starting shutdown") + await self.site.stop() + log.debug("Site stopped") + await self.runner.cleanup() + log.debug("Runner stopped") + tasks = [task for task in asyncio.all_tasks() if + task is not asyncio.current_task()] + list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks + results = await asyncio.gather(*tasks, return_exceptions=True) + log.debug(f'Finished awaiting cancelled tasks, results: {results}...') + await self.loop.shutdown_asyncgens() + # to really make sure everything else has time to stop + await asyncio.sleep(0.07) + self.loop.stop() diff --git a/pype/plugins/blender/load/load_camera.py b/pype/plugins/blender/load/load_camera.py index 9dd5c2bfd8..eb53870d5c 100644 --- a/pype/plugins/blender/load/load_camera.py +++ b/pype/plugins/blender/load/load_camera.py @@ -50,26 +50,26 @@ class BlendCameraLoader(pype.hosts.blender.plugin.AssetLoader): objects_list = [] for obj in camera_container.objects: - obj = obj.make_local() - obj.data.make_local() + local_obj = obj.make_local() + local_obj.data.make_local() - if not obj.get(blender.pipeline.AVALON_PROPERTY): - obj[blender.pipeline.AVALON_PROPERTY] = dict() + if not local_obj.get(blender.pipeline.AVALON_PROPERTY): + local_obj[blender.pipeline.AVALON_PROPERTY] = dict() - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] + avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY] avalon_info.update({"container_name": container_name}) if actions[0] is not None: - if obj.animation_data is None: - obj.animation_data_create() - obj.animation_data.action = actions[0] + if local_obj.animation_data is None: + local_obj.animation_data_create() + local_obj.animation_data.action = actions[0] if actions[1] is not None: - if obj.data.animation_data is None: - obj.data.animation_data_create() - obj.data.animation_data.action = actions[1] + if local_obj.data.animation_data is None: + local_obj.data.animation_data_create() + local_obj.data.animation_data.action = actions[1] - objects_list.append(obj) + objects_list.append(local_obj) camera_container.pop(blender.pipeline.AVALON_PROPERTY) @@ -189,7 +189,16 @@ class BlendCameraLoader(pype.hosts.blender.plugin.AssetLoader): camera = objects[0] - actions = (camera.animation_data.action, camera.data.animation_data.action) + camera_action = None + camera_data_action = None + + if camera.animation_data and camera.animation_data.action: + camera_action = camera.animation_data.action + + if camera.data.animation_data and camera.data.animation_data.action: + camera_data_action = camera.data.animation_data.action + + actions = (camera_action, camera_data_action) self._remove(objects, lib_container) diff --git a/pype/plugins/blender/load/load_layout.py b/pype/plugins/blender/load/load_layout.py index 6a51d7cf16..2c8948dd48 100644 --- a/pype/plugins/blender/load/load_layout.py +++ b/pype/plugins/blender/load/load_layout.py @@ -79,21 +79,21 @@ class BlendLayoutLoader(plugin.AssetLoader): # The armature is unparented for all the non-local meshes, # when it is made local. for obj in objects + armatures: - obj.make_local() + local_obj = obj.make_local() if obj.data: obj.data.make_local() - if not obj.get(blender.pipeline.AVALON_PROPERTY): - obj[blender.pipeline.AVALON_PROPERTY] = dict() + if not local_obj.get(blender.pipeline.AVALON_PROPERTY): + local_obj[blender.pipeline.AVALON_PROPERTY] = dict() - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] + avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY] avalon_info.update({"container_name": container_name}) - action = actions.get(obj.name, None) + action = actions.get(local_obj.name, None) + + if local_obj.type == 'ARMATURE' and action is not None: + local_obj.animation_data.action = action - if obj.type == 'ARMATURE' and action is not None: - obj.animation_data.action = action - layout_container.pop(blender.pipeline.AVALON_PROPERTY) bpy.ops.object.select_all(action='DESELECT') @@ -222,7 +222,8 @@ class BlendLayoutLoader(plugin.AssetLoader): for obj in objects: if obj.type == 'ARMATURE': - actions[obj.name] = obj.animation_data.action + if obj.animation_data and obj.animation_data.action: + actions[obj.name] = obj.animation_data.action self._remove(objects, obj_container) diff --git a/pype/plugins/blender/load/load_model.py b/pype/plugins/blender/load/load_model.py index 4ac86b3aef..59dc00726d 100644 --- a/pype/plugins/blender/load/load_model.py +++ b/pype/plugins/blender/load/load_model.py @@ -53,16 +53,16 @@ class BlendModelLoader(plugin.AssetLoader): model_container.name = container_name for obj in model_container.objects: - plugin.prepare_data(obj, container_name) - plugin.prepare_data(obj.data, container_name) + local_obj = plugin.prepare_data(obj, container_name) + plugin.prepare_data(local_obj.data, container_name) - for material_slot in obj.material_slots: + for material_slot in local_obj.material_slots: plugin.prepare_data(material_slot.material, container_name) if not obj.get(blender.pipeline.AVALON_PROPERTY): - obj[blender.pipeline.AVALON_PROPERTY] = dict() + local_obj[blender.pipeline.AVALON_PROPERTY] = dict() - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] + avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY] avalon_info.update({"container_name": container_name}) model_container.pop(blender.pipeline.AVALON_PROPERTY) diff --git a/pype/plugins/blender/load/load_rig.py b/pype/plugins/blender/load/load_rig.py index 6dc2273c6e..7b60b20064 100644 --- a/pype/plugins/blender/load/load_rig.py +++ b/pype/plugins/blender/load/load_rig.py @@ -63,25 +63,25 @@ class BlendRigLoader(plugin.AssetLoader): ] for child in rig_container.children: - plugin.prepare_data(child, container_name) - meshes.extend(child.objects) + local_child = plugin.prepare_data(child, container_name) + meshes.extend(local_child.objects) # Link meshes first, then armatures. # The armature is unparented for all the non-local meshes, # when it is made local. for obj in meshes + armatures: - plugin.prepare_data(obj, container_name) - plugin.prepare_data(obj.data, container_name) - - if not obj.get(blender.pipeline.AVALON_PROPERTY): - obj[blender.pipeline.AVALON_PROPERTY] = dict() - - avalon_info = obj[blender.pipeline.AVALON_PROPERTY] - avalon_info.update({"container_name": container_name}) - - if obj.type == 'ARMATURE' and action is not None: - obj.animation_data.action = action - + local_obj = plugin.prepare_data(obj, container_name) + plugin.prepare_data(local_obj.data, container_name) + + if not local_obj.get(blender.pipeline.AVALON_PROPERTY): + local_obj[blender.pipeline.AVALON_PROPERTY] = dict() + + avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY] + avalon_info.update({"container_name": container_name}) + + if local_obj.type == 'ARMATURE' and action is not None: + local_obj.animation_data.action = action + rig_container.pop(blender.pipeline.AVALON_PROPERTY) bpy.ops.object.select_all(action='DESELECT') @@ -214,7 +214,9 @@ class BlendRigLoader(plugin.AssetLoader): armatures = [obj for obj in objects if obj.type == 'ARMATURE'] assert(len(armatures) == 1) - action = armatures[0].animation_data.action + action = None + if armatures[0].animation_data and armatures[0].animation_data.action: + action = armatures[0].animation_data.action parent = plugin.get_parent_collection(obj_container) diff --git a/pype/plugins/celaction/publish/collect_audio.py b/pype/plugins/celaction/publish/collect_audio.py index 610b81d056..c29e212d80 100644 --- a/pype/plugins/celaction/publish/collect_audio.py +++ b/pype/plugins/celaction/publish/collect_audio.py @@ -16,7 +16,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): # get all available representations subsets = pype.get_subsets(asset_entity["name"], - representations=["audio"] + representations=["audio", "wav"] ) self.log.info(f"subsets is: {pformat(subsets)}") diff --git a/pype/plugins/celaction/publish/collect_celaction_instances.py b/pype/plugins/celaction/publish/collect_celaction_instances.py index 431ab722d3..d3d1d264c0 100644 --- a/pype/plugins/celaction/publish/collect_celaction_instances.py +++ b/pype/plugins/celaction/publish/collect_celaction_instances.py @@ -52,7 +52,7 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin): "subset": subset, "label": scene_file, "family": family, - "families": [family], + "families": [family, "ftrack"], "representations": list() }) diff --git a/pype/plugins/celaction/publish/collect_render_path.py b/pype/plugins/celaction/publish/collect_render_path.py index d5fe6c07a5..9cbb0e4880 100644 --- a/pype/plugins/celaction/publish/collect_render_path.py +++ b/pype/plugins/celaction/publish/collect_render_path.py @@ -10,9 +10,14 @@ class CollectRenderPath(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.495 families = ["render.farm"] + # Presets + anatomy_render_key = None + publish_render_metadata = None + def process(self, instance): anatomy = instance.context.data["anatomy"] anatomy_data = copy.deepcopy(instance.data["anatomyData"]) + anatomy_data["family"] = "render" padding = anatomy.templates.get("frame_padding", 4) anatomy_data.update({ "frame": f"%0{padding}d", @@ -21,12 +26,31 @@ class CollectRenderPath(pyblish.api.InstancePlugin): anatomy_filled = anatomy.format(anatomy_data) - render_dir = anatomy_filled["render_tmp"]["folder"] - render_path = anatomy_filled["render_tmp"]["path"] + # get anatomy rendering keys + anatomy_render_key = self.anatomy_render_key or "render" + publish_render_metadata = self.publish_render_metadata or "render" + + # get folder and path for rendering images from celaction + render_dir = anatomy_filled[anatomy_render_key]["folder"] + render_path = anatomy_filled[anatomy_render_key]["path"] # create dir if it doesnt exists - os.makedirs(render_dir, exist_ok=True) + try: + if not os.path.isdir(render_dir): + os.makedirs(render_dir, exist_ok=True) + except OSError: + # directory is not available + self.log.warning("Path is unreachable: `{}`".format(render_dir)) + # add rendering path to instance data instance.data["path"] = render_path + # get anatomy for published renders folder path + if anatomy_filled.get(publish_render_metadata): + instance.data["publishRenderMetadataFolder"] = anatomy_filled[ + publish_render_metadata]["folder"] + self.log.info("Metadata render path: `{}`".format( + instance.data["publishRenderMetadataFolder"] + )) + self.log.info(f"Render output path set to: `{render_path}`") diff --git a/pype/plugins/celaction/publish/integrate_version_up.py b/pype/plugins/celaction/publish/integrate_version_up.py index 7fb1efa8aa..e15c5d5bf6 100644 --- a/pype/plugins/celaction/publish/integrate_version_up.py +++ b/pype/plugins/celaction/publish/integrate_version_up.py @@ -1,68 +1,20 @@ import shutil -import re +import pype import pyblish.api class VersionUpScene(pyblish.api.ContextPlugin): - order = pyblish.api.IntegratorOrder + order = pyblish.api.IntegratorOrder + 0.5 label = 'Version Up Scene' - families = ['scene'] + families = ['workfile'] optional = True active = True def process(self, context): current_file = context.data.get('currentFile') - v_up = get_version_up(current_file) + v_up = pype.lib.version_up(current_file) self.log.debug('Current file is: {}'.format(current_file)) self.log.debug('Version up: {}'.format(v_up)) shutil.copy2(current_file, v_up) self.log.info('Scene saved into new version: {}'.format(v_up)) - - -def version_get(string, prefix, suffix=None): - """Extract version information from filenames used by DD (and Weta, apparently) - These are _v# or /v# or .v# where v is a prefix string, in our case - we use "v" for render version and "c" for camera track version. - See the version.py and camera.py plugins for usage.""" - - if string is None: - raise ValueError("Empty version string - no match") - - regex = r"[/_.]{}\d+".format(prefix) - matches = re.findall(regex, string, re.IGNORECASE) - if not len(matches): - msg = f"No `_{prefix}#` found in `{string}`" - raise ValueError(msg) - return (matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group()) - - -def version_set(string, prefix, oldintval, newintval): - """Changes version information from filenames used by DD (and Weta, apparently) - These are _v# or /v# or .v# where v is a prefix string, in our case - we use "v" for render version and "c" for camera track version. - See the version.py and camera.py plugins for usage.""" - - regex = r"[/_.]{}\d+".format(prefix) - matches = re.findall(regex, string, re.IGNORECASE) - if not len(matches): - return "" - - # Filter to retain only version strings with matching numbers - matches = filter(lambda s: int(s[2:]) == oldintval, matches) - - # Replace all version strings with matching numbers - for match in matches: - # use expression instead of expr so 0 prefix does not make octal - fmt = "%%(#)0%dd" % (len(match) - 2) - newfullvalue = match[0] + prefix + str(fmt % {"#": newintval}) - string = re.sub(match, newfullvalue, string) - return string - - -def get_version_up(path): - """ Returns the next version of the path """ - - (prefix, v) = version_get(path, 'v') - v = int(v) - return version_set(path, prefix, v, v + 1) diff --git a/pype/plugins/celaction/publish/submit_celaction_deadline.py b/pype/plugins/celaction/publish/submit_celaction_deadline.py index c749ec111f..30e7175a60 100644 --- a/pype/plugins/celaction/publish/submit_celaction_deadline.py +++ b/pype/plugins/celaction/publish/submit_celaction_deadline.py @@ -34,6 +34,7 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): ] def process(self, instance): + instance.data["toBeRenderedOn"] = "deadline" context = instance.context DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL") @@ -74,6 +75,7 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): resolution_width = instance.data["resolutionWidth"] resolution_height = instance.data["resolutionHeight"] render_dir = os.path.normpath(os.path.dirname(render_path)) + render_path = os.path.normpath(render_path) script_name = os.path.basename(script_path) jobname = "%s - %s" % (script_name, instance.name) @@ -98,6 +100,7 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): args = [ f"{script_path}", "-a", + "-16", "-s ", "-e ", f"-d {render_dir}", @@ -135,8 +138,12 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): # Optional, enable double-click to preview rendered # frames from Deadline Monitor - "OutputFilename0": output_filename_0.replace("\\", "/") + "OutputFilename0": output_filename_0.replace("\\", "/"), + # # Asset dependency to wait for at least the scene file to sync. + # "AssetDependency0": script_path + "ScheduledType": "Once", + "JobDelay": "00:00:08:00" }, "PluginInfo": { # Input diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index 151b8882a3..bbda6da3b0 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -96,6 +96,6 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): task_entity = None self.log.warning("Task name is not set.") - context.data["ftrackProject"] = asset_entity + context.data["ftrackProject"] = project_entity context.data["ftrackEntity"] = asset_entity context.data["ftrackTask"] = task_entity diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_api.py b/pype/plugins/ftrack/publish/integrate_ftrack_api.py index cd94b2a150..0c4c6d49b5 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_api.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_api.py @@ -54,8 +54,52 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): self.log.debug(query) return query - def process(self, instance): + def _set_task_status(self, instance, task_entity, session): + project_entity = instance.context.data.get("ftrackProject") + if not project_entity: + self.log.info("Task status won't be set, project is not known.") + return + if not task_entity: + self.log.info("Task status won't be set, task is not known.") + return + + status_name = instance.context.data.get("ftrackStatus") + if not status_name: + self.log.info("Ftrack status name is not set.") + return + + self.log.debug( + "Ftrack status name will be (maybe) set to \"{}\"".format( + status_name + ) + ) + + project_schema = project_entity["project_schema"] + task_statuses = project_schema.get_statuses( + "Task", task_entity["type_id"] + ) + task_statuses_by_low_name = { + status["name"].lower(): status for status in task_statuses + } + status = task_statuses_by_low_name.get(status_name.lower()) + if not status: + self.log.warning(( + "Task status \"{}\" won't be set," + " status is now allowed on task type \"{}\"." + ).format(status_name, task_entity["type"]["name"])) + return + + self.log.info("Setting task status to \"{}\"".format(status_name)) + task_entity["status"] = status + try: + session.commit() + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + six.reraise(tp, value, tb) + + def process(self, instance): session = instance.context.data["ftrackSession"] if instance.data.get("ftrackTask"): task = instance.data["ftrackTask"] @@ -78,9 +122,11 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): info_msg += ", metadata: {metadata}." used_asset_versions = [] + + self._set_task_status(instance, task, session) + # Iterate over components and publish for data in instance.data.get("ftrackComponentsList", []): - # AssetType # Get existing entity. assettype_data = {"short": "upload"} @@ -94,9 +140,9 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): # Create a new entity if none exits. if not assettype_entity: assettype_entity = session.create("AssetType", assettype_data) - self.log.debug( - "Created new AssetType with data: ".format(assettype_data) - ) + self.log.debug("Created new AssetType with data: {}".format( + assettype_data + )) # Asset # Get existing entity. diff --git a/pype/plugins/premiere/publish/integrate_ftrack_component_overwrite.py b/pype/plugins/ftrack/publish/integrate_ftrack_component_overwrite.py similarity index 100% rename from pype/plugins/premiere/publish/integrate_ftrack_component_overwrite.py rename to pype/plugins/ftrack/publish/integrate_ftrack_component_overwrite.py diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index f5d7689678..2646dc90cc 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -30,7 +30,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): 'audio': 'audio', 'workfile': 'scene', 'animation': 'cache', - 'image': 'img' + 'image': 'img', + 'reference': 'reference' } def process(self, instance): @@ -87,8 +88,14 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): instance.data["frameEnd"] - instance.data["frameStart"] ) - if not comp.get('fps'): - comp['fps'] = instance.context.data['fps'] + fps = comp.get('fps') + if fps is None: + fps = instance.data.get( + "fps", instance.context.data['fps'] + ) + + comp['fps'] = fps + location = self.get_ftrack_location( 'ftrack.server', ft_session ) diff --git a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py index a12fdfd36c..cc569ce2d1 100644 --- a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py +++ b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py @@ -1,9 +1,13 @@ import sys - import six import pyblish.api from avalon import io +try: + from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_AUTO_SYNC +except Exception: + CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" + class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): """ @@ -31,7 +35,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder - 0.04 label = 'Integrate Hierarchy To Ftrack' - families = ["clip", "shot"] + families = ["shot"] optional = False def process(self, context): @@ -39,15 +43,32 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): if "hierarchyContext" not in context.data: return + self.session = self.context.data["ftrackSession"] + project_name = self.context.data["projectEntity"]["name"] + query = 'Project where full_name is "{}"'.format(project_name) + project = self.session.query(query).one() + auto_sync_state = project[ + "custom_attributes"][CUST_ATTR_AUTO_SYNC] + if not io.Session: io.install() self.ft_project = None - self.session = context.data["ftrackSession"] input_data = context.data["hierarchyContext"] - self.import_to_ftrack(input_data) + # disable termporarily ftrack project's autosyncing + if auto_sync_state: + self.auto_sync_off(project) + + try: + # import ftrack hierarchy + self.import_to_ftrack(input_data) + except Exception: + raise + finally: + if auto_sync_state: + self.auto_sync_on(project) def import_to_ftrack(self, input_data, parent=None): for entity_name in input_data: @@ -217,3 +238,28 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): six.reraise(tp, value, tb) return entity + + def auto_sync_off(self, project): + project["custom_attributes"][CUST_ATTR_AUTO_SYNC] = False + + self.log.info("Ftrack autosync swithed off") + + try: + self.session.commit() + except Exception: + tp, value, tb = sys.exc_info() + self.session.rollback() + raise + + def auto_sync_on(self, project): + + project["custom_attributes"][CUST_ATTR_AUTO_SYNC] = True + + self.log.info("Ftrack autosync swithed on") + + try: + self.session.commit() + except Exception: + tp, value, tb = sys.exc_info() + self.session.rollback() + raise diff --git a/pype/plugins/fusion/publish/submit_deadline.py b/pype/plugins/fusion/publish/submit_deadline.py index e5deb1b070..0dd34ba713 100644 --- a/pype/plugins/fusion/publish/submit_deadline.py +++ b/pype/plugins/fusion/publish/submit_deadline.py @@ -22,7 +22,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): families = ["saver.deadline"] def process(self, instance): - + instance.data["toBeRenderedOn"] = "deadline" context = instance.context key = "__hasRun{}".format(self.__class__.__name__) diff --git a/pype/plugins/global/publish/cleanup.py b/pype/plugins/global/publish/cleanup.py index 3ab41f90ca..264a04b8bd 100644 --- a/pype/plugins/global/publish/cleanup.py +++ b/pype/plugins/global/publish/cleanup.py @@ -1,29 +1,9 @@ +# -*- coding: utf-8 -*- +"""Cleanup leftover files from publish.""" import os import shutil import pyblish.api - - -def clean_renders(instance): - transfers = instance.data.get("transfers", list()) - - current_families = instance.data.get("families", list()) - instance_family = instance.data.get("family", None) - dirnames = [] - - for src, dest in transfers: - if os.path.normpath(src) != os.path.normpath(dest): - if instance_family == 'render' or 'render' in current_families: - os.remove(src) - dirnames.append(os.path.dirname(src)) - - # make unique set - cleanup_dirs = set(dirnames) - for dir in cleanup_dirs: - try: - os.rmdir(dir) - except OSError: - # directory is not empty, skipping - continue +import re class CleanUp(pyblish.api.InstancePlugin): @@ -39,7 +19,11 @@ class CleanUp(pyblish.api.InstancePlugin): optional = True active = True + # Presets + paterns = None # list of regex paterns + def process(self, instance): + """Plugin entry point.""" # Get the errored instances failed = [] for result in instance.context.data["results"]: @@ -52,24 +36,100 @@ class CleanUp(pyblish.api.InstancePlugin): ) ) - self.log.info("Cleaning renders ...") - clean_renders(instance) + self.log.info("Cleaning renders new...") + self.clean_renders(instance) if [ef for ef in self.exclude_families if instance.data["family"] in ef]: return import tempfile + temp_root = tempfile.gettempdir() staging_dir = instance.data.get("stagingDir", None) - if not staging_dir or not os.path.exists(staging_dir): - self.log.info("No staging directory found: %s" % staging_dir) + + if not staging_dir: + self.log.info("Staging dir not set.") return - temp_root = tempfile.gettempdir() if not os.path.normpath(staging_dir).startswith(temp_root): self.log.info("Skipping cleanup. Staging directory is not in the " "temp folder: %s" % staging_dir) return - self.log.info("Removing staging directory ...") + if not os.path.exists(staging_dir): + self.log.info("No staging directory found: %s" % staging_dir) + return + + self.log.info("Removing staging directory {}".format(staging_dir)) shutil.rmtree(staging_dir) + + def clean_renders(self, instance): + transfers = instance.data.get("transfers", list()) + + current_families = instance.data.get("families", list()) + instance_family = instance.data.get("family", None) + dirnames = [] + transfers_dirs = [] + + for src, dest in transfers: + # fix path inconsistency + src = os.path.normpath(src) + dest = os.path.normpath(dest) + + # add src dir into clearing dir paths (regex paterns) + transfers_dirs.append(os.path.dirname(src)) + + # add dest dir into clearing dir paths (regex paterns) + transfers_dirs.append(os.path.dirname(dest)) + + if os.path.normpath(src) != os.path.normpath(dest): + if instance_family == 'render' or 'render' in current_families: + self.log.info("Removing src: `{}`...".format(src)) + os.remove(src) + + # add dir for cleanup + dirnames.append(os.path.dirname(src)) + + # clean by regex paterns + # make unique set + transfers_dirs = set(transfers_dirs) + + self.log.debug("__ transfers_dirs: `{}`".format(transfers_dirs)) + self.log.debug("__ self.paterns: `{}`".format(self.paterns)) + if self.paterns: + files = list() + # get list of all available content of dirs + for _dir in transfers_dirs: + if not os.path.exists(_dir): + continue + files.extend([ + os.path.join(_dir, f) + for f in os.listdir(_dir)]) + + self.log.debug("__ files: `{}`".format(files)) + + # remove all files which match regex patern + for f in files: + for p in self.paterns: + patern = re.compile(p) + if not patern.findall(f): + continue + if not os.path.exists(f): + continue + + self.log.info("Removing file by regex: `{}`".format(f)) + os.remove(f) + + # add dir for cleanup + dirnames.append(os.path.dirname(f)) + + # make unique set + cleanup_dirs = set(dirnames) + + # clean dirs which are empty + for dir in cleanup_dirs: + try: + os.rmdir(dir) + except OSError: + # directory is not empty, skipping + continue diff --git a/pype/plugins/global/publish/collect_anatomy_instance_data.py b/pype/plugins/global/publish/collect_anatomy_instance_data.py index 6528bede2e..44a4d43946 100644 --- a/pype/plugins/global/publish/collect_anatomy_instance_data.py +++ b/pype/plugins/global/publish/collect_anatomy_instance_data.py @@ -39,11 +39,21 @@ class CollectAnatomyInstanceData(pyblish.api.InstancePlugin): anatomy_data = copy.deepcopy(instance.context.data["anatomyData"]) project_entity = instance.context.data["projectEntity"] context_asset_entity = instance.context.data["assetEntity"] + instance_asset_entity = instance.data.get("assetEntity") asset_name = instance.data["asset"] + + # There is possibility that assetEntity on instance is already set + # which can happen in standalone publisher + if ( + instance_asset_entity + and instance_asset_entity["name"] == asset_name + ): + asset_entity = instance_asset_entity + # Check if asset name is the same as what is in context # - they may be different, e.g. in NukeStudio - if context_asset_entity["name"] == asset_name: + elif context_asset_entity["name"] == asset_name: asset_entity = context_asset_entity else: @@ -92,6 +102,12 @@ class CollectAnatomyInstanceData(pyblish.api.InstancePlugin): "subset": subset_name, "version": version_number } + if ( + asset_entity + and asset_entity["_id"] != context_asset_entity["_id"] + ): + parents = asset_entity["data"].get("parents") or list() + anatomy_updates["hierarchy"] = "/".join(parents) task_name = instance.data.get("task") if task_name: diff --git a/pype/plugins/global/publish/collect_resources_path.py b/pype/plugins/global/publish/collect_resources_path.py index 734d1f84e4..04a33cd5be 100644 --- a/pype/plugins/global/publish/collect_resources_path.py +++ b/pype/plugins/global/publish/collect_resources_path.py @@ -20,6 +20,40 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): label = "Collect Resources Path" order = pyblish.api.CollectorOrder + 0.495 + families = ["workfile", + "pointcache", + "camera", + "animation", + "model", + "mayaAscii", + "setdress", + "layout", + "ass", + "vdbcache", + "scene", + "vrayproxy", + "render", + "prerender", + "imagesequence", + "rendersetup", + "rig", + "plate", + "look", + "lut", + "yetiRig", + "yeticache", + "nukenodes", + "gizmo", + "source", + "matchmove", + "image", + "source", + "assembly", + "fbx", + "textures", + "action", + "background" + ] def process(self, instance): anatomy = instance.context.data["anatomy"] diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 83ad4af1c2..4443cfe223 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -19,7 +19,15 @@ class ExtractBurnin(pype.api.Extractor): label = "Extract burnins" order = pyblish.api.ExtractorOrder + 0.03 families = ["review", "burnin"] - hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"] + hosts = [ + "nuke", + "maya", + "shell", + "nukestudio", + "premiere", + "standalonepublisher", + "harmony" + ] optional = True positions = [ @@ -217,7 +225,7 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug("Executing: {}".format(args)) # Run burnin script - output = pype.api.subprocess(args) + output = pype.api.subprocess(args, shell=True) self.log.debug("Output: {}".format(output)) for filepath in temp_data["full_input_paths"]: @@ -957,7 +965,7 @@ class ExtractBurnin(pype.api.Extractor): args = [executable, scriptpath, json_data] self.log.debug("Executing: {}".format(args)) - output = pype.api.subprocess(args) + output = pype.api.subprocess(args, shell=True) self.log.debug("Output: {}".format(output)) repre_update = { diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index 83cf03b042..1d8191f2e3 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -7,7 +7,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): order = pyblish.api.ExtractorOrder - 0.01 label = "Extract Hierarchy To Avalon" - families = ["clip", "shot", "editorial"] + families = ["clip", "shot"] def process(self, context): if "hierarchyContext" not in context.data: @@ -78,6 +78,11 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if entity: # Do not override data, only update cur_entity_data = entity.get("data") or {} + new_tasks = data.pop("tasks", []) + if "tasks" in cur_entity_data and new_tasks: + for task_name in new_tasks: + if task_name not in cur_entity_data["tasks"]: + cur_entity_data["tasks"].append(task_name) cur_entity_data.update(data) data = cur_entity_data else: diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 9b775f8b6f..89a4bbd664 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -26,6 +26,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): if instance.data.get("multipartExr") is True: return + # Skip review when requested. + if not instance.data.get("review", True): + return + # get representation and loop them representations = instance.data["representations"] @@ -44,10 +48,11 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): continue if not isinstance(repre['files'], (list, tuple)): - continue + input_file = repre['files'] + else: + input_file = repre['files'][0] stagingdir = os.path.normpath(repre.get("stagingDir")) - input_file = repre['files'][0] # input_file = ( # collections[0].format('{head}{padding}{tail}') % start @@ -83,7 +88,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): # run subprocess self.log.debug("{}".format(subprocess_jpeg)) - pype.api.subprocess(subprocess_jpeg) + pype.api.subprocess(subprocess_jpeg, shell=True) if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 30d1de8328..0bae1b2ddc 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -22,7 +22,15 @@ class ExtractReview(pyblish.api.InstancePlugin): label = "Extract Review" order = pyblish.api.ExtractorOrder + 0.02 families = ["review"] - hosts = ["nuke", "maya", "shell", "nukestudio", "premiere", "harmony"] + hosts = [ + "nuke", + "maya", + "shell", + "nukestudio", + "premiere", + "harmony", + "standalonepublisher" + ] # Supported extensions image_exts = ["exr", "jpg", "jpeg", "png", "dpx"] @@ -42,6 +50,10 @@ class ExtractReview(pyblish.api.InstancePlugin): to_height = 1080 def process(self, instance): + # Skip review when requested. + if not instance.data.get("review", True): + return + # ffmpeg doesn't support multipart exrs if instance.data.get("multipartExr") is True: instance_label = ( @@ -111,6 +123,9 @@ class ExtractReview(pyblish.api.InstancePlugin): if "review" not in tags or "thumbnail" in tags: continue + if "passing" in tags: + continue + input_ext = repre["ext"] if input_ext.startswith("."): input_ext = input_ext[1:] @@ -164,7 +179,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # run subprocess self.log.debug("Executing: {}".format(subprcs_cmd)) - output = pype.api.subprocess(subprcs_cmd) + output = pype.api.subprocess(subprcs_cmd, shell=True) self.log.debug("Output: {}".format(output)) output_name = output_def["filename_suffix"] @@ -182,6 +197,8 @@ class ExtractReview(pyblish.api.InstancePlugin): # Force to pop these key if are in new repre new_repre.pop("preview", None) new_repre.pop("thumbnail", None) + if "clean_name" in new_repre.get("tags", []): + new_repre.pop("outputName") # adding representation self.log.debug( @@ -331,6 +348,12 @@ class ExtractReview(pyblish.api.InstancePlugin): "-i \"{}\"".format(temp_data["full_input_path"]) ) + if temp_data["output_is_sequence"]: + # Set start frame + ffmpeg_input_args.append( + "-start_number {}".format(temp_data["output_frame_start"]) + ) + # Add audio arguments if there are any. Skipped when output are images. if not temp_data["output_ext_is_image"]: audio_in_args, audio_filters, audio_out_args = self.audio_args( @@ -1306,7 +1329,8 @@ class ExtractReview(pyblish.api.InstancePlugin): output_args.extend(profile.get('output', [])) # defining image ratios - resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height + resolution_ratio = ( + float(resolution_width) * pixel_aspect) / resolution_height delivery_ratio = float(self.to_width) / float(self.to_height) self.log.debug( "__ resolution_ratio: `{}`".format(resolution_ratio)) @@ -1363,7 +1387,8 @@ class ExtractReview(pyblish.api.InstancePlugin): output_args.append("-shortest") if no_handles: - duration_sec = float(frame_end_handle - frame_start_handle + 1) / fps + duration_sec = float( + frame_end_handle - frame_start_handle + 1) / fps output_args.append("-t {:0.2f}".format(duration_sec)) @@ -1385,7 +1410,7 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("lower then delivery") width_scale = int(self.to_width * scale_factor) width_half_pad = int(( - self.to_width - width_scale)/2) + self.to_width - width_scale) / 2) height_scale = self.to_height height_half_pad = 0 else: @@ -1400,7 +1425,7 @@ class ExtractReview(pyblish.api.InstancePlugin): height_scale = int( resolution_height * scale_factor) height_half_pad = int( - (self.to_height - height_scale)/2) + (self.to_height - height_scale) / 2) self.log.debug( "__ width_scale: `{}`".format(width_scale)) @@ -1417,11 +1442,11 @@ class ExtractReview(pyblish.api.InstancePlugin): scaling_arg = str( "scale={0}x{1}:flags=lanczos," "pad={2}:{3}:{4}:{5}:black,setsar=1" - ).format(width_scale, height_scale, - self.to_width, self.to_height, - width_half_pad, - height_half_pad - ) + ).format(width_scale, height_scale, + self.to_width, self.to_height, + width_half_pad, + height_half_pad + ) vf_back = self.add_video_filter_args( output_args, scaling_arg) @@ -1441,7 +1466,7 @@ class ExtractReview(pyblish.api.InstancePlugin): lut_arg = "lut3d=file='{}'".format( lut_path.replace( "\\", "/").replace(":/", "\\:/") - ) + ) lut_arg += ",colormatrix=bt601:bt709" vf_back = self.add_video_filter_args( @@ -1469,7 +1494,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # run subprocess self.log.debug("Executing: {}".format(subprcs_cmd)) - output = pype.api.subprocess(subprcs_cmd) + output = pype.api.subprocess(subprcs_cmd, shell=True) self.log.debug("Output: {}".format(output)) # create representation data @@ -1496,7 +1521,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "outputName": name + "_noHandles", "frameStartFtrack": frame_start, "frameEndFtrack": frame_end - }) + }) if repre_new.get('preview'): repre_new.pop("preview") if repre_new.get('thumbnail'): @@ -1509,6 +1534,8 @@ class ExtractReview(pyblish.api.InstancePlugin): for repre in representations_new: if "delete" in repre.get("tags", []): representations_new.remove(repre) + if "clean_name" in repre.get("tags", []): + repre_new.pop("outputName") instance.data.update({ "reviewToWidth": self.to_width, diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index f2ea6c0875..2e1fc25ae5 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -186,7 +186,7 @@ class ExtractReviewSlate(pype.api.Extractor): # run slate generation subprocess self.log.debug("Slate Executing: {}".format(slate_subprcs_cmd)) - slate_output = pype.api.subprocess(slate_subprcs_cmd) + slate_output = pype.api.subprocess(slate_subprcs_cmd, shell=True) self.log.debug("Slate Output: {}".format(slate_output)) # create ffmpeg concat text file path @@ -221,7 +221,7 @@ class ExtractReviewSlate(pype.api.Extractor): # ffmpeg concat subprocess self.log.debug("Executing concat: {}".format(concat_subprcs_cmd)) - concat_output = pype.api.subprocess(concat_subprcs_cmd) + concat_output = pype.api.subprocess(concat_subprcs_cmd, shell=True) self.log.debug("Output concat: {}".format(concat_output)) self.log.debug("__ repre[tags]: {}".format(repre["tags"])) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 9f20999f55..f92968e554 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -6,11 +6,15 @@ import copy import clique import errno import six +import re +import shutil from pymongo import DeleteOne, InsertOne import pyblish.api from avalon import io from avalon.vendor import filelink +import pype.api +from datetime import datetime # this is needed until speedcopy for linux is fixed if sys.platform == "win32": @@ -44,6 +48,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "frameStart" "frameEnd" 'fps' + "data": additional metadata for each representation. """ label = "Integrate Asset New" @@ -84,7 +89,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "action", "harmony.template", "harmony.palette", - "editorial" + "editorial", + "background" ] exclude_families = ["clip"] db_representation_context_keys = [ @@ -94,18 +100,28 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): default_template_name = "publish" template_name_profiles = None - def process(self, instance): + # file_url : file_size of all published and uploaded files + integrated_file_sizes = {} + TMP_FILE_EXT = 'tmp' # suffix to denote temporary files, use without '.' + + def process(self, instance): + self.integrated_file_sizes = {} if [ef for ef in self.exclude_families if instance.data["family"] in ef]: return - self.register(instance) - - self.log.info("Integrating Asset in to the database ...") - self.log.info("instance.data: {}".format(instance.data)) - if instance.data.get('transfer', True): - self.integrate(instance) + try: + self.register(instance) + self.log.info("Integrated Asset in to the database ...") + self.log.info("instance.data: {}".format(instance.data)) + self.handle_destination_files(self.integrated_file_sizes, + 'finalize') + except Exception: + # clean destination + self.log.critical("Error when registering", exc_info=True) + self.handle_destination_files(self.integrated_file_sizes, 'remove') + six.reraise(*sys.exc_info()) def register(self, instance): # Required environment variables @@ -145,6 +161,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if task_name: anatomy_data["task"] = task_name + anatomy_data["family"] = instance.data.get("family") + stagingdir = instance.data.get("stagingDir") if not stagingdir: self.log.info(( @@ -266,13 +284,24 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): representations = [] destination_list = [] + orig_transfers = [] if 'transfers' not in instance.data: instance.data['transfers'] = [] + else: + orig_transfers = list(instance.data['transfers']) template_name = self.template_name_from_instance(instance) published_representations = {} for idx, repre in enumerate(instance.data["representations"]): + # reset transfers for next representation + # instance.data['transfers'] is used as a global variable + # in current codebase + instance.data['transfers'] = list(orig_transfers) + + if "delete" in repre.get("tags", []): + continue + published_files = [] # create template data for Anatomy @@ -345,7 +374,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = None - if repre.get("frameStart"): + if repre.get("frameStart") is not None: frame_start_padding = int( anatomy.templates["render"].get( "frame_padding", @@ -372,7 +401,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst_padding = src_padding_exp % i - if index_frame_start: + if index_frame_start is not None: dst_padding_exp = "%0{}d".format(frame_start_padding) dst_padding = dst_padding_exp % index_frame_start index_frame_start += 1 @@ -453,13 +482,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if repre_id is None: repre_id = io.ObjectId() + data = repre.get("data") or {} + data.update({'path': dst, 'template': template}) representation = { "_id": repre_id, "schema": "pype:representation-2.0", "type": "representation", "parent": version_id, "name": repre['name'], - "data": {'path': dst, 'template': template}, + "data": data, "dependencies": instance.data.get("dependencies", "").split(), # Imprint shortcut to context @@ -475,6 +506,24 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst_padding_exp % int(repre.get("frameStart")) ) + # any file that should be physically copied is expected in + # 'transfers' or 'hardlinks' + if instance.data.get('transfers', False) or \ + instance.data.get('hardlinks', False): + # could throw exception, will be caught in 'process' + # all integration to DB is being done together lower, + # so no rollback needed + self.log.debug("Integrating source files to destination ...") + self.integrated_file_sizes.update(self.integrate(instance)) + self.log.debug("Integrated files {}". + format(self.integrated_file_sizes)) + + # get 'files' info for representation and all attached resources + self.log.debug("Preparing files information ...") + representation["files"] = self.get_files_info( + instance, + self.integrated_file_sizes) + self.log.debug("__ representation: {}".format(representation)) destination_list.append(dst) self.log.debug("__ destination_list: {}".format(destination_list)) @@ -512,16 +561,19 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): Args: instance: the instance to integrate + Returns: + integrated_file_sizes: dictionary of destination file url and + its size in bytes """ - transfers = instance.data.get("transfers", list()) - + # store destination url and size for reporting and rollback + integrated_file_sizes = {} + transfers = list(instance.data.get("transfers", list())) for src, dest in transfers: if os.path.normpath(src) != os.path.normpath(dest): + dest = self.get_dest_temp_url(dest) self.copy_file(src, dest) - - transfers = instance.data.get("transfers", list()) - for src, dest in transfers: - self.copy_file(src, dest) + # TODO needs to be updated during site implementation + integrated_file_sizes[dest] = os.path.getsize(dest) # Produce hardlinked copies # Note: hardlink can only be produced between two files on the same @@ -530,8 +582,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # to ensure publishes remain safe and non-edited. hardlinks = instance.data.get("hardlinks", list()) for src, dest in hardlinks: - self.log.debug("Hardlinking file .. {} -> {}".format(src, dest)) - self.hardlink_file(src, dest) + dest = self.get_dest_temp_url(dest) + self.log.debug("Hardlinking file ... {} -> {}".format(src, dest)) + if not os.path.exists(dest): + self.hardlink_file(src, dest) + + # TODO needs to be updated during site implementation + integrated_file_sizes[dest] = os.path.getsize(dest) + + return integrated_file_sizes def copy_file(self, src, dst): """ Copy given source to destination @@ -544,7 +603,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): """ src = os.path.normpath(src) dst = os.path.normpath(dst) - self.log.debug("Copying file .. {} -> {}".format(src, dst)) + self.log.debug("Copying file ... {} -> {}".format(src, dst)) dirname = os.path.dirname(dst) try: os.makedirs(dirname) @@ -553,20 +612,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): pass else: self.log.critical("An unexpected error occurred.") - raise + six.reraise(*sys.exc_info()) # copy file with speedcopy and check if size of files are simetrical while True: + import shutil try: copyfile(src, dst) - except (OSError, AttributeError) as e: - self.log.warning(e) - # try it again with shutil - import shutil + except shutil.SameFileError: + self.log.critical("files are the same {} to {}".format(src, + dst)) + os.remove(dst) try: shutil.copyfile(src, dst) self.log.debug("Copying files with shutil...") - except (OSError) as e: + except OSError as e: self.log.critical("Cannot copy {} to {}".format(src, dst)) self.log.critical(e) six.reraise(*sys.exc_info()) @@ -583,7 +643,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): pass else: self.log.critical("An unexpected error occurred.") - raise + six.reraise(*sys.exc_info()) filelink.create(src, dst, filelink.HARDLINK) @@ -596,7 +656,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): }) if subset is None: - self.log.info("Subset '%s' not found, creating.." % subset_name) + self.log.info("Subset '%s' not found, creating ..." % subset_name) self.log.debug("families. %s" % instance.data.get('families')) self.log.debug( "families. %s" % type(instance.data.get('families'))) @@ -666,16 +726,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): else: source = context.data["currentFile"] anatomy = instance.context.data["anatomy"] - success, rootless_path = ( - anatomy.find_root_template_from_path(source) - ) - if success: - source = rootless_path - else: - self.log.warning(( - "Could not find root path for remapping \"{}\"." - " This may cause issues on farm." - ).format(source)) + source = self.get_rootless_path(anatomy, source) self.log.debug("Source: {}".format(source)) version_data = { @@ -746,6 +797,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): value += 1 if value > highest_value: + matching_profiles = {} highest_value = value if value == highest_value: @@ -773,3 +825,167 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): ).format(family, task_name, template_name)) return template_name + + def get_rootless_path(self, anatomy, path): + """ Returns, if possible, path without absolute portion from host + (eg. 'c:\' or '/opt/..') + This information is host dependent and shouldn't be captured. + Example: + 'c:/projects/MyProject1/Assets/publish...' > + '{root}/MyProject1/Assets...' + + Args: + anatomy: anatomy part from instance + path: path (absolute) + Returns: + path: modified path if possible, or unmodified path + + warning logged + """ + success, rootless_path = ( + anatomy.find_root_template_from_path(path) + ) + if success: + path = rootless_path + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(path)) + return path + + def get_files_info(self, instance, integrated_file_sizes): + """ Prepare 'files' portion for attached resources and main asset. + Combining records from 'transfers' and 'hardlinks' parts from + instance. + All attached resources should be added, currently without + Context info. + + Arguments: + instance: the current instance being published + integrated_file_sizes: dictionary of destination path (absolute) + and its file size + Returns: + output_resources: array of dictionaries to be added to 'files' key + in representation + """ + resources = list(instance.data.get("transfers", [])) + resources.extend(list(instance.data.get("hardlinks", []))) + + self.log.debug("get_resource_files_info.resources:{}". + format(resources)) + + output_resources = [] + anatomy = instance.context.data["anatomy"] + for _src, dest in resources: + path = self.get_rootless_path(anatomy, dest) + dest = self.get_dest_temp_url(dest) + file_hash = pype.api.source_hash(dest) + if self.TMP_FILE_EXT and \ + ',{}'.format(self.TMP_FILE_EXT) in file_hash: + file_hash = file_hash.replace(',{}'.format(self.TMP_FILE_EXT), + '') + + file_info = self.prepare_file_info(path, + integrated_file_sizes[dest], + file_hash) + output_resources.append(file_info) + + return output_resources + + def get_dest_temp_url(self, dest): + """ Enhance destination path with TMP_FILE_EXT to denote temporary + file. + Temporary files will be renamed after successful registration + into DB and full copy to destination + + Arguments: + dest: destination url of published file (absolute) + Returns: + dest: destination path + '.TMP_FILE_EXT' + """ + if self.TMP_FILE_EXT and '.{}'.format(self.TMP_FILE_EXT) not in dest: + dest += '.{}'.format(self.TMP_FILE_EXT) + return dest + + def prepare_file_info(self, path, size=None, file_hash=None, sites=None): + """ Prepare information for one file (asset or resource) + + Arguments: + path: destination url of published file (rootless) + size(optional): size of file in bytes + file_hash(optional): hash of file for synchronization validation + sites(optional): array of published locations, + ['studio': {'created_dt':date}] by default + keys expected ['studio', 'site1', 'gdrive1'] + Returns: + rec: dictionary with filled info + """ + + rec = { + "_id": io.ObjectId(), + "path": path + } + if size: + rec["size"] = size + + if file_hash: + rec["hash"] = file_hash + + if sites: + rec["sites"] = sites + else: + meta = {"created_dt": datetime.now()} + rec["sites"] = {"studio": meta} + + return rec + + def handle_destination_files(self, integrated_file_sizes, mode): + """ Clean destination files + Called when error happened during integrating to DB or to disk + OR called to rename uploaded files from temporary name to final to + highlight publishing in progress/broken + Used to clean unwanted files + + Arguments: + integrated_file_sizes: dictionary, file urls as keys, size as value + mode: 'remove' - clean files, + 'finalize' - rename files, + remove TMP_FILE_EXT suffix denoting temp file + """ + if integrated_file_sizes: + for file_url, _file_size in integrated_file_sizes.items(): + if not os.path.exists(file_url): + self.log.debug( + "File {} was not found.".format(file_url) + ) + continue + + try: + if mode == 'remove': + self.log.debug("Removing file {}".format(file_url)) + os.remove(file_url) + if mode == 'finalize': + new_name = re.sub( + r'\.{}$'.format(self.TMP_FILE_EXT), + '', + file_url + ) + + if os.path.exists(new_name): + self.log.debug( + "Overwriting file {} to {}".format( + file_url, new_name + ) + ) + shutil.copy(file_url, new_name) + else: + self.log.debug( + "Renaming file {} to {}".format( + file_url, new_name + ) + ) + os.rename(file_url, new_name) + except OSError: + self.log.error("Cannot {} file {}".format(mode, file_url), + exc_info=True) + six.reraise(*sys.exc_info()) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 7a73e921e2..758872e717 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -12,7 +12,15 @@ from avalon.vendor import requests, clique import pyblish.api -def _get_script(): +def _get_script(path): + + # pass input path if exists + if path: + if os.path.exists(path): + return str(path) + else: + raise + """Get path to the image sequence script.""" try: from pathlib import Path @@ -192,7 +200,43 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families_transfer = ["render3d", "render2d", "ftrack", "slate"] plugin_python_version = "3.7" - def _submit_deadline_post_job(self, instance, job): + # script path for publish_filesequence.py + publishing_script = None + + # poor man exclusion + skip_integration_repre_list = [] + + def _create_metadata_path(self, instance): + ins_data = instance.data + # Ensure output dir exists + output_dir = ins_data.get( + "publishRenderMetadataFolder", ins_data["outputDir"]) + + try: + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + except OSError: + # directory is not available + self.log.warning("Path is unreachable: `{}`".format(output_dir)) + + metadata_filename = "{}_metadata.json".format(ins_data["subset"]) + + metadata_path = os.path.join(output_dir, metadata_filename) + + # Convert output dir to `{root}/rest/of/path/...` with Anatomy + success, roothless_mtdt_p = self.anatomy.find_root_template_from_path( + metadata_path) + if not success: + # `rootless_path` is not set to `output_dir` if none of roots match + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(output_dir)) + roothless_mtdt_p = metadata_path + + return (metadata_path, roothless_mtdt_p) + + def _submit_deadline_post_job(self, instance, job, instances): """Submit publish job to Deadline. Deadline specific code separated from :meth:`process` for sake of @@ -205,17 +249,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): job_name = "Publish - {subset}".format(subset=subset) output_dir = instance.data["outputDir"] - # Convert output dir to `{root}/rest/of/path/...` with Anatomy - success, rootless_path = ( - self.anatomy.find_root_template_from_path(output_dir) - ) - if not success: - # `rootless_path` is not set to `output_dir` if none of roots match - self.log.warning(( - "Could not find root path for remapping \"{}\"." - " This may cause issues on farm." - ).format(output_dir)) - rootless_path = output_dir # Generate the payload for Deadline submission payload = { @@ -223,7 +256,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Plugin": "Python", "BatchName": job["Props"]["Batch"], "Name": job_name, - "JobDependency0": job["_id"], "UserName": job["Props"]["User"], "Comment": instance.context.data.get("comment", ""), @@ -239,7 +271,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): }, "PluginInfo": { "Version": self.plugin_python_version, - "ScriptFile": _get_script(), + "ScriptFile": _get_script(self.publishing_script), "Arguments": "", "SingleFrameOnly": "True", }, @@ -247,13 +279,22 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "AuxFiles": [], } + # add assembly jobs as dependencies + if instance.data.get("tileRendering"): + self.log.info("Adding tile assembly jobs as dependencies...") + job_index = 0 + for assembly_id in instance.data.get("assemblySubmissionJobs"): + payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501 + job_index += 1 + else: + payload["JobInfo"]["JobDependency0"] = job["_id"] + # Transfer the environment from the original job to this dependent # job so they use the same environment - metadata_filename = "{}_metadata.json".format(subset) - metadata_path = os.path.join(rootless_path, metadata_filename) - + metadata_path, roothless_metadata_path = self._create_metadata_path( + instance) environment = job["Props"].get("Env", {}) - environment["PYPE_METADATA_FILE"] = metadata_path + environment["PYPE_METADATA_FILE"] = roothless_metadata_path environment["AVALON_PROJECT"] = io.Session["AVALON_PROJECT"] environment["PYPE_LOG_NO_COLORS"] = "1" try: @@ -380,15 +421,22 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # go through aovs in expected files for aov, files in exp_files[0].items(): cols, rem = clique.assemble(files) - # we shouldn't have any reminders - if rem: - self.log.warning( - "skipping unexpected files found " - "in sequence: {}".format(rem)) - - # but we really expect only one collection, nothing else make sense - assert len(cols) == 1, "only one image sequence type is expected" + # we shouldn't have any reminders. And if we do, it should + # be just one item for single frame renders. + if not cols and rem: + assert len(rem) == 1, ("Found multiple non related files " + "to render, don't know what to do " + "with them.") + col = rem[0] + ext = os.path.splitext(col)[1].lstrip(".") + else: + # but we really expect only one collection. + # Nothing else make sense. + assert len(cols) == 1, "only one image sequence type is expected" # noqa: E501 + ext = cols[0].tail.lstrip(".") + col = list(cols[0]) + self.log.debug(col) # create subset name `familyTaskSubset_AOV` group_name = 'render{}{}{}{}'.format( task[0].upper(), task[1:], @@ -396,7 +444,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): subset_name = '{}_{}'.format(group_name, aov) - staging = os.path.dirname(list(cols[0])[0]) + if isinstance(col, (list, tuple)): + staging = os.path.dirname(col[0]) + else: + staging = os.path.dirname(col) + success, rootless_staging_dir = ( self.anatomy.find_root_template_from_path(staging) ) @@ -421,13 +473,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): new_instance["subset"] = subset_name new_instance["subsetGroup"] = group_name - ext = cols[0].tail.lstrip(".") - # create represenation + if isinstance(col, (list, tuple)): + files = [os.path.basename(f) for f in col] + else: + files = os.path.basename(col) + rep = { "name": ext, "ext": ext, - "files": [os.path.basename(f) for f in list(cols[0])], + "files": files, "frameStart": int(instance_data.get("frameStartHandle")), "frameEnd": int(instance_data.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames @@ -436,6 +491,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "tags": ["review"] if preview else [] } + # poor man exclusion + if ext in self.skip_integration_repre_list: + rep["tags"].append("delete") + self._solve_families(new_instance, preview) new_instance["representations"] = [rep] @@ -488,7 +547,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if bake_render_path: preview = False - if "celaction" in self.hosts: + if "celaction" in pyblish.api.registered_hosts(): preview = True staging = os.path.dirname(list(collection)[0]) @@ -515,8 +574,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "tags": ["review", "preview"] if preview else [], } + # poor man exclusion + if ext in self.skip_integration_repre_list: + rep["tags"].append("delete") + if instance.get("multipartExr", False): - rep["tags"].append["multipartExr"] + rep["tags"].append("multipartExr") representations.append(rep) @@ -584,25 +647,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if hasattr(instance, "_log"): data['_log'] = instance._log - render_job = data.pop("deadlineSubmissionJob", None) - submission_type = "deadline" - if not render_job: - # No deadline job. Try Muster: musterSubmissionJob - render_job = data.pop("musterSubmissionJob", None) - submission_type = "muster" - assert render_job, ( - "Can't continue without valid Deadline " - "or Muster submission prior to this " - "plug-in." - ) - - if submission_type == "deadline": - self.DEADLINE_REST_URL = os.environ.get( - "DEADLINE_REST_URL", "http://localhost:8082" - ) - assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" - - self._submit_deadline_post_job(instance, render_job) asset = data.get("asset") or api.Session["AVALON_ASSET"] subset = data.get("subset") @@ -673,7 +717,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "pixelAspect": data.get("pixelAspect", 1), "resolutionWidth": data.get("resolutionWidth", 1920), "resolutionHeight": data.get("resolutionHeight", 1080), - "multipartExr": data.get("multipartExr", False) + "multipartExr": data.get("multipartExr", False), + "jobBatchName": data.get("jobBatchName", "") } if "prerender" in instance.data["families"]: @@ -686,13 +731,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if item in instance.data.get("families", []): instance_skeleton_data["families"] += [item] - if "render.farm" in instance.data["families"]: - instance_skeleton_data.update({ - "family": "render2d", - "families": ["render"] + [f for f in instance.data["families"] - if "render.farm" not in f] - }) - # transfer specific properties from original instance based on # mapping dictionary `instance_transfer` for key, values in self.instance_transfer.items(): @@ -824,6 +862,66 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): at.get("subset"), at.get("version"))) instances = new_instances + r''' SUBMiT PUBLiSH JOB 2 D34DLiN3 + ____ + ' ' .---. .---. .--. .---. .--..--..--..--. .---. + | | --= \ | . \/ _|/ \| . \ || || \ |/ _| + | JOB | --= / | | || __| .. | | | |;_ || \ || __| + | | |____./ \.__|._||_.|___./|_____|||__|\__|\.___| + ._____. + + ''' + + render_job = None + if instance.data.get("toBeRenderedOn") == "deadline": + render_job = data.pop("deadlineSubmissionJob", None) + submission_type = "deadline" + + if instance.data.get("toBeRenderedOn") == "muster": + render_job = data.pop("musterSubmissionJob", None) + submission_type = "muster" + + if not render_job and instance.data.get("tileRendering") is False: + raise AssertionError(("Cannot continue without valid Deadline " + "or Muster submission.")) + + if not render_job: + import getpass + + render_job = {} + self.log.info("Faking job data ...") + render_job["Props"] = {} + # Render job doesn't exist because we do not have prior submission. + # We still use data from it so lets fake it. + # + # Batch name reflect original scene name + + if instance.data.get("assemblySubmissionJobs"): + render_job["Props"]["Batch"] = instance.data.get( + "jobBatchName") + else: + render_job["Props"]["Batch"] = os.path.splitext( + os.path.basename(context.data.get("currentFile")))[0] + # User is deadline user + render_job["Props"]["User"] = context.data.get( + "deadlineUser", getpass.getuser()) + # Priority is now not handled at all + render_job["Props"]["Pri"] = instance.data.get("priority") + + render_job["Props"]["Env"] = { + "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), + "FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"), + "FTRACK_SERVER": os.environ.get("FTRACK_SERVER"), + } + + if submission_type == "deadline": + self.DEADLINE_REST_URL = os.environ.get( + "DEADLINE_REST_URL", "http://localhost:8082" + ) + assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" + + self._submit_deadline_post_job(instance, render_job, instances) + # publish job file publish_job = { "asset": asset, @@ -835,7 +933,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "version": context.data["version"], # this is workfile version "intent": context.data.get("intent"), "comment": context.data.get("comment"), - "job": render_job, + "job": render_job or None, "session": api.Session.copy(), "instances": instances } @@ -854,14 +952,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): } publish_job.update({"ftrack": ftrack}) - # Ensure output dir exists - output_dir = instance.data["outputDir"] - if not os.path.isdir(output_dir): - os.makedirs(output_dir) + metadata_path, roothless_metadata_path = self._create_metadata_path( + instance) - metadata_filename = "{}_metadata.json".format(subset) - - metadata_path = os.path.join(output_dir, metadata_filename) self.log.info("Writing json file: {}".format(metadata_path)) with open(metadata_path, "w") as f: json.dump(publish_job, f, indent=4, sort_keys=True) diff --git a/pype/plugins/global/publish/validate_instance_in_context.py b/pype/plugins/global/publish/validate_instance_in_context.py new file mode 100644 index 0000000000..a4fc555161 --- /dev/null +++ b/pype/plugins/global/publish/validate_instance_in_context.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +"""Validate if instance asset is the same as context asset.""" +from __future__ import absolute_import + +import pyblish.api +import pype.api + + +class SelectInvalidInstances(pyblish.api.Action): + """Select invalid instances in Outliner.""" + + label = "Select Instances" + icon = "briefcase" + on = "failed" + + def process(self, context, plugin): + """Process invalid validators and select invalid instances.""" + # Get the errored instances + failed = [] + for result in context.data["results"]: + if result["error"] is None: + continue + if result["instance"] is None: + continue + if result["instance"] in failed: + continue + if result["plugin"] != plugin: + continue + + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + + if instances: + self.log.info( + "Selecting invalid nodes: %s" % ", ".join( + [str(x) for x in instances] + ) + ) + self.select(instances) + else: + self.log.info("No invalid nodes found.") + self.deselect() + + def select(self, instances): + if "nuke" in pyblish.api.registered_hosts(): + import avalon.nuke.lib + import nuke + avalon.nuke.lib.select_nodes( + [nuke.toNode(str(x)) for x in instances] + ) + + if "maya" in pyblish.api.registered_hosts(): + from maya import cmds + cmds.select(instances, replace=True, noExpand=True) + + def deselect(self): + if "nuke" in pyblish.api.registered_hosts(): + import avalon.nuke.lib + avalon.nuke.lib.reset_selection() + + if "maya" in pyblish.api.registered_hosts(): + from maya import cmds + cmds.select(deselect=True) + + +class RepairSelectInvalidInstances(pyblish.api.Action): + """Repair the instance asset.""" + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + # Get the errored instances + failed = [] + for result in context.data["results"]: + if result["error"] is None: + continue + if result["instance"] is None: + continue + if result["instance"] in failed: + continue + if result["plugin"] != plugin: + continue + + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + + context_asset = context.data["assetEntity"]["name"] + for instance in instances: + self.set_attribute(instance, context_asset) + + def set_attribute(self, instance, context_asset): + if "nuke" in pyblish.api.registered_hosts(): + import nuke + nuke.toNode( + instance.data.get("name") + )["avalon:asset"].setValue(context_asset) + + if "maya" in pyblish.api.registered_hosts(): + from maya import cmds + cmds.setAttr( + instance.data.get("name") + ".asset", + context_asset, + type="string" + ) + + +class ValidateInstanceInContext(pyblish.api.InstancePlugin): + """Validator to check if instance asset match context asset. + + When working in per-shot style you always publish data in context of + current asset (shot). This validator checks if this is so. It is optional + so it can be disabled when needed. + + Action on this validator will select invalid instances in Outliner. + """ + + order = pype.api.ValidateContentsOrder + label = "Instance in same Context" + optional = True + hosts = ["maya", "nuke"] + actions = [SelectInvalidInstances, RepairSelectInvalidInstances] + + def process(self, instance): + asset = instance.data.get("asset") + context_asset = instance.context.data["assetEntity"]["name"] + msg = "{} has asset {}".format(instance.name, asset) + assert asset == context_asset, msg diff --git a/pype/plugins/global/publish/validate_version.py b/pype/plugins/global/publish/validate_version.py index 9c7ce72307..6701041541 100644 --- a/pype/plugins/global/publish/validate_version.py +++ b/pype/plugins/global/publish/validate_version.py @@ -10,7 +10,7 @@ class ValidateVersion(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder label = "Validate Version" - hosts = ["nuke", "maya", "blender"] + hosts = ["nuke", "maya", "blender", "standalonepublisher"] def process(self, instance): version = instance.data.get("version") diff --git a/pype/plugins/harmony/load/load_audio.py b/pype/plugins/harmony/load/load_audio.py index a17af78964..600791e61a 100644 --- a/pype/plugins/harmony/load/load_audio.py +++ b/pype/plugins/harmony/load/load_audio.py @@ -31,7 +31,7 @@ func class ImportAudioLoader(api.Loader): """Import audio.""" - families = ["shot"] + families = ["shot", "audio"] representations = ["wav"] label = "Import Audio" @@ -40,3 +40,19 @@ class ImportAudioLoader(api.Loader): harmony.send( {"function": func, "args": [context["subset"]["name"], wav_file]} ) + + subset_name = context["subset"]["name"] + + return harmony.containerise( + subset_name, + namespace, + subset_name, + context, + self.__class__.__name__ + ) + + def update(self, container, representation): + pass + + def remove(self, container): + pass diff --git a/pype/plugins/harmony/load/load_background.py b/pype/plugins/harmony/load/load_background.py new file mode 100644 index 0000000000..f96fc275be --- /dev/null +++ b/pype/plugins/harmony/load/load_background.py @@ -0,0 +1,369 @@ +import os +import uuid + +import clique + +from avalon import api, harmony +import pype.lib +import json + +copy_files = """function copyFile(srcFilename, dstFilename) +{ + var srcFile = new PermanentFile(srcFilename); + var dstFile = new PermanentFile(dstFilename); + srcFile.copy(dstFile); +} +""" + +import_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black +var TGATransparencyMode = 0; //Premultiplied wih Black +var SGITransparencyMode = 0; //Premultiplied wih Black +var LayeredPSDTransparencyMode = 1; //Straight +var FlatPSDTransparencyMode = 2; //Premultiplied wih White + +function getUniqueColumnName( column_prefix ) +{ + var suffix = 0; + // finds if unique name for a column + var column_name = column_prefix; + while(suffix < 2000) + { + if(!column.type(column_name)) + break; + + suffix = suffix + 1; + column_name = column_prefix + "_" + suffix; + } + return column_name; +} + +function import_files(args) +{ + var root = args[0]; + var files = args[1]; + var name = args[2]; + var start_frame = args[3]; + + var vectorFormat = null; + var extension = null; + var filename = files[0]; + + var pos = filename.lastIndexOf("."); + if( pos < 0 ) + return null; + + extension = filename.substr(pos+1).toLowerCase(); + + if(extension == "jpeg") + extension = "jpg"; + if(extension == "tvg") + { + vectorFormat = "TVG" + extension ="SCAN"; // element.add() will use this. + } + + var elemId = element.add( + name, + "BW", + scene.numberOfUnitsZ(), + extension.toUpperCase(), + vectorFormat + ); + if (elemId == -1) + { + // hum, unknown file type most likely -- let's skip it. + return null; // no read to add. + } + + var uniqueColumnName = getUniqueColumnName(name); + column.add(uniqueColumnName , "DRAWING"); + column.setElementIdOfDrawing(uniqueColumnName, elemId); + + var read = node.add(root, name, "READ", 0, 0, 0); + var transparencyAttr = node.getAttr( + read, frame.current(), "READ_TRANSPARENCY" + ); + var opacityAttr = node.getAttr(read, frame.current(), "OPACITY"); + transparencyAttr.setValue(true); + opacityAttr.setValue(true); + + var alignmentAttr = node.getAttr(read, frame.current(), "ALIGNMENT_RULE"); + alignmentAttr.setValue("ASIS"); + + var transparencyModeAttr = node.getAttr( + read, frame.current(), "applyMatteToColor" + ); + if (extension == "png") + transparencyModeAttr.setValue(PNGTransparencyMode); + if (extension == "tga") + transparencyModeAttr.setValue(TGATransparencyMode); + if (extension == "sgi") + transparencyModeAttr.setValue(SGITransparencyMode); + if (extension == "psd") + transparencyModeAttr.setValue(FlatPSDTransparencyMode); + if (extension == "jpg") + transparencyModeAttr.setValue(LayeredPSDTransparencyMode); + + node.linkAttr(read, "DRAWING.ELEMENT", uniqueColumnName); + + if (files.length == 1) + { + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(elemId, 1, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename(elemId, "1"); + copyFile(files[0], drawingFilePath); + // Expose the image for the entire frame range. + for( var i =0; i <= frame.numberOf() - 1; ++i) + { + timing = start_frame + i + column.setEntry(uniqueColumnName, 1, timing, "1"); + } + } else { + // Create a drawing for each file. + for( var i =0; i <= files.length - 1; ++i) + { + timing = start_frame + i + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(elemId, timing, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename(elemId, timing.toString()); + copyFile( files[i], drawingFilePath ); + + column.setEntry(uniqueColumnName, 1, timing, timing.toString()); + } + } + + var green_color = new ColorRGBA(0, 255, 0, 255); + node.setColor(read, green_color); + + return read; +} +import_files +""" + +replace_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black +var TGATransparencyMode = 0; //Premultiplied wih Black +var SGITransparencyMode = 0; //Premultiplied wih Black +var LayeredPSDTransparencyMode = 1; //Straight +var FlatPSDTransparencyMode = 2; //Premultiplied wih White + +function replace_files(args) +{ + var files = args[0]; + MessageLog.trace(files); + MessageLog.trace(files.length); + var _node = args[1]; + var start_frame = args[2]; + + var _column = node.linkedColumn(_node, "DRAWING.ELEMENT"); + var elemId = column.getElementIdOfDrawing(_column); + + // Delete existing drawings. + var timings = column.getDrawingTimings(_column); + for( var i =0; i <= timings.length - 1; ++i) + { + column.deleteDrawingAt(_column, parseInt(timings[i])); + } + + + var filename = files[0]; + var pos = filename.lastIndexOf("."); + if( pos < 0 ) + return null; + var extension = filename.substr(pos+1).toLowerCase(); + + if(extension == "jpeg") + extension = "jpg"; + + var transparencyModeAttr = node.getAttr( + _node, frame.current(), "applyMatteToColor" + ); + if (extension == "png") + transparencyModeAttr.setValue(PNGTransparencyMode); + if (extension == "tga") + transparencyModeAttr.setValue(TGATransparencyMode); + if (extension == "sgi") + transparencyModeAttr.setValue(SGITransparencyMode); + if (extension == "psd") + transparencyModeAttr.setValue(FlatPSDTransparencyMode); + if (extension == "jpg") + transparencyModeAttr.setValue(LayeredPSDTransparencyMode); + + if (files.length == 1) + { + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(elemId, 1, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename(elemId, "1"); + copyFile(files[0], drawingFilePath); + MessageLog.trace(files[0]); + MessageLog.trace(drawingFilePath); + // Expose the image for the entire frame range. + for( var i =0; i <= frame.numberOf() - 1; ++i) + { + timing = start_frame + i + column.setEntry(_column, 1, timing, "1"); + } + } else { + // Create a drawing for each file. + for( var i =0; i <= files.length - 1; ++i) + { + timing = start_frame + i + // Create a drawing drawing, 'true' indicate that the file exists. + Drawing.create(elemId, timing, true); + // Get the actual path, in tmp folder. + var drawingFilePath = Drawing.filename(elemId, timing.toString()); + copyFile( files[i], drawingFilePath ); + + column.setEntry(_column, 1, timing, timing.toString()); + } + } + + var green_color = new ColorRGBA(0, 255, 0, 255); + node.setColor(_node, green_color); +} +replace_files +""" + + +class BackgroundLoader(api.Loader): + """Load images + Stores the imported asset in a container named after the asset. + """ + families = ["background"] + representations = ["json"] + + def load(self, context, name=None, namespace=None, data=None): + + with open(self.fname) as json_file: + data = json.load(json_file) + + layers = list() + + for child in data['children']: + if child.get("filename"): + layers.append(child["filename"]) + else: + for layer in child['children']: + if layer.get("filename"): + layers.append(layer["filename"]) + + bg_folder = os.path.dirname(self.fname) + + subset_name = context["subset"]["name"] + # read_node_name += "_{}".format(uuid.uuid4()) + container_nodes = [] + + for layer in sorted(layers): + file_to_import = [os.path.join(bg_folder, layer).replace("\\", "/")] + + read_node = harmony.send( + { + "function": copy_files + import_files, + "args": ["Top", file_to_import, layer, 1] + } + )["result"] + container_nodes.append(read_node) + + return harmony.containerise( + subset_name, + namespace, + subset_name, + context, + self.__class__.__name__, + nodes=container_nodes + ) + + def update(self, container, representation): + + path = api.get_representation_path(representation) + + with open(path) as json_file: + data = json.load(json_file) + + layers = list() + + for child in data['children']: + if child.get("filename"): + print(child["filename"]) + layers.append(child["filename"]) + else: + for layer in child['children']: + if layer.get("filename"): + print(layer["filename"]) + layers.append(layer["filename"]) + + bg_folder = os.path.dirname(path) + + path = api.get_representation_path(representation) + + print(container) + + for layer in sorted(layers): + file_to_import = [os.path.join(bg_folder, layer).replace("\\", "/")] + print(20*"#") + print(f"FILE TO REPLACE: {file_to_import}") + print(f"LAYER: {layer}") + node = harmony.find_node_by_name(layer, "READ") + print(f"{node}") + + if node in container['nodes']: + harmony.send( + { + "function": copy_files + replace_files, + "args": [file_to_import, node, 1] + } + ) + else: + read_node = harmony.send( + { + "function": copy_files + import_files, + "args": ["Top", file_to_import, layer, 1] + } + )["result"] + container['nodes'].append(read_node) + + + # Colour node. + func = """function func(args){ + for( var i =0; i <= args[0].length - 1; ++i) + { + var red_color = new ColorRGBA(255, 0, 0, 255); + var green_color = new ColorRGBA(0, 255, 0, 255); + if (args[1] == "red"){ + node.setColor(args[0], red_color); + } + if (args[1] == "green"){ + node.setColor(args[0], green_color); + } + } + } + func + """ + if pype.lib.is_latest(representation): + harmony.send({"function": func, "args": [node, "green"]}) + else: + harmony.send({"function": func, "args": [node, "red"]}) + + harmony.imprint( + container['name'], {"representation": str(representation["_id"]), + "nodes": container['nodes']} + ) + + def remove(self, container): + for node in container.get("nodes"): + + func = """function deleteNode(_node) + { + node.deleteNode(_node, true, true); + } + deleteNode + """ + harmony.send( + {"function": func, "args": [node]} + ) + harmony.imprint(container['name'], {}, remove=True) + + def switch(self, container, representation): + self.update(container, representation) diff --git a/pype/plugins/harmony/load/load_imagesequence.py b/pype/plugins/harmony/load/load_imagesequence.py index f81018d0fb..c5f50a7d23 100644 --- a/pype/plugins/harmony/load/load_imagesequence.py +++ b/pype/plugins/harmony/load/load_imagesequence.py @@ -14,7 +14,7 @@ copy_files = """function copyFile(srcFilename, dstFilename) } """ -import_files = """var PNGTransparencyMode = 0; //Premultiplied wih Black +import_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black var TGATransparencyMode = 0; //Premultiplied wih Black var SGITransparencyMode = 0; //Premultiplied wih Black var LayeredPSDTransparencyMode = 1; //Straight @@ -141,7 +141,7 @@ function import_files(args) import_files """ -replace_files = """var PNGTransparencyMode = 0; //Premultiplied wih Black +replace_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black var TGATransparencyMode = 0; //Premultiplied wih Black var SGITransparencyMode = 0; //Premultiplied wih Black var LayeredPSDTransparencyMode = 1; //Straight @@ -230,7 +230,7 @@ class ImageSequenceLoader(api.Loader): """Load images Stores the imported asset in a container named after the asset. """ - families = ["shot", "render", "image"] + families = ["shot", "render", "image", "plate", "reference"] representations = ["jpeg", "png", "jpg"] def load(self, context, name=None, namespace=None, data=None): @@ -267,7 +267,8 @@ class ImageSequenceLoader(api.Loader): namespace, read_node, context, - self.__class__.__name__ + self.__class__.__name__, + nodes=[read_node] ) def update(self, container, representation): @@ -336,6 +337,7 @@ class ImageSequenceLoader(api.Loader): harmony.send( {"function": func, "args": [node]} ) + harmony.imprint(node, {}, remove=True) def switch(self, container, representation): self.update(container, representation) diff --git a/pype/plugins/harmony/load/load_template_workfile.py b/pype/plugins/harmony/load/load_template_workfile.py index b727cf865c..3e79cc1903 100644 --- a/pype/plugins/harmony/load/load_template_workfile.py +++ b/pype/plugins/harmony/load/load_template_workfile.py @@ -9,7 +9,7 @@ from avalon import api, harmony class ImportTemplateLoader(api.Loader): """Import templates.""" - families = ["harmony.template"] + families = ["harmony.template", "workfile"] representations = ["*"] label = "Import Template" @@ -35,6 +35,22 @@ class ImportTemplateLoader(api.Loader): shutil.rmtree(temp_dir) + subset_name = context["subset"]["name"] + + return harmony.containerise( + subset_name, + namespace, + subset_name, + context, + self.__class__.__name__ + ) + + def update(self, container, representation): + pass + + def remove(self, container): + pass + class ImportWorkfileLoader(ImportTemplateLoader): """Import workfiles.""" diff --git a/pype/plugins/harmony/publish/extract_render.py b/pype/plugins/harmony/publish/extract_render.py index 7ca83d3f0f..70dceb9ca2 100644 --- a/pype/plugins/harmony/publish/extract_render.py +++ b/pype/plugins/harmony/publish/extract_render.py @@ -4,6 +4,7 @@ import subprocess import pyblish.api from avalon import harmony +import pype.lib import clique @@ -43,6 +44,9 @@ class ExtractRender(pyblish.api.InstancePlugin): frame_start = result[4] frame_end = result[5] audio_path = result[6] + if audio_path: + instance.data["audio"] = [{"filename": audio_path}] + instance.data["fps"] = frame_rate # Set output path to temp folder. path = tempfile.mkdtemp() @@ -72,24 +76,28 @@ class ExtractRender(pyblish.api.InstancePlugin): self.log.info(output.decode("utf-8")) # Collect rendered files. + self.log.debug(path) files = os.listdir(path) + self.log.debug(files) collections, remainder = clique.assemble(files, minimum_items=1) assert not remainder, ( "There should not be a remainder for {0}: {1}".format( instance[0], remainder ) ) - assert len(collections) == 1, ( - "There should only be one image sequence in {}. Found: {}".format( - path, len(collections) - ) - ) - collection = collections[0] + self.log.debug(collections) + if len(collections) > 1: + for col in collections: + if len(list(col)) > 1: + collection = col + else: + collection = collections[0] # Generate thumbnail. thumbnail_path = os.path.join(path, "thumbnail.png") + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") args = [ - "ffmpeg", "-y", + ffmpeg_path, "-y", "-i", os.path.join(path, list(collections[0])[0]), "-vf", "scale=300:-1", "-vframes", "1", @@ -109,48 +117,17 @@ class ExtractRender(pyblish.api.InstancePlugin): self.log.debug(output.decode("utf-8")) - # Generate mov. - mov_path = os.path.join(path, instance.data["name"] + ".mov") - args = [ - "ffmpeg", "-y", - "-i", audio_path, - "-i", - os.path.join(path, collection.head + "%04d" + collection.tail), - mov_path - ] - process = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - stdin=subprocess.PIPE - ) - - output = process.communicate()[0] - - if process.returncode != 0: - raise ValueError(output.decode("utf-8")) - - self.log.debug(output.decode("utf-8")) - # Generate representations. extension = collection.tail[1:] representation = { "name": extension, "ext": extension, "files": list(collection), - "stagingDir": path - } - movie = { - "name": "mov", - "ext": "mov", - "files": os.path.basename(mov_path), "stagingDir": path, - "frameStart": frame_start, - "frameEnd": frame_end, - "fps": frame_rate, - "preview": True, - "tags": ["review", "ftrackreview"] + "tags": ["review"], + "fps": frame_rate } + thumbnail = { "name": "thumbnail", "ext": "png", @@ -158,7 +135,7 @@ class ExtractRender(pyblish.api.InstancePlugin): "stagingDir": path, "tags": ["thumbnail"] } - instance.data["representations"] = [representation, movie, thumbnail] + instance.data["representations"] = [representation, thumbnail] # Required for extract_review plugin (L222 onwards). instance.data["frameStart"] = frame_start diff --git a/pype/plugins/harmony/publish/validate_audio.py b/pype/plugins/harmony/publish/validate_audio.py new file mode 100644 index 0000000000..ba113e7610 --- /dev/null +++ b/pype/plugins/harmony/publish/validate_audio.py @@ -0,0 +1,37 @@ +import json +import os + +import pyblish.api + +import avalon.harmony +import pype.hosts.harmony + + +class ValidateAudio(pyblish.api.InstancePlugin): + """Ensures that there is an audio file in the scene. If you are sure that you want to send render without audio, you can disable this validator before clicking on "publish" """ + + order = pyblish.api.ValidatorOrder + label = "Validate Audio" + families = ["render"] + hosts = ["harmony"] + optional = True + + def process(self, instance): + # Collect scene data. + func = """function func(write_node) + { + return [ + sound.getSoundtrackAll().path() + ] + } + func + """ + result = avalon.harmony.send( + {"function": func, "args": [instance[0]]} + )["result"] + + audio_path = result[0] + + msg = "You are missing audio file:\n{}".format(audio_path) + + assert os.path.isfile(audio_path), msg diff --git a/pype/plugins/harmony/publish/validate_scene_settings.py b/pype/plugins/harmony/publish/validate_scene_settings.py index aa9a70bd85..d7895804bd 100644 --- a/pype/plugins/harmony/publish/validate_scene_settings.py +++ b/pype/plugins/harmony/publish/validate_scene_settings.py @@ -28,8 +28,11 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): hosts = ["harmony"] actions = [ValidateSceneSettingsRepair] + frame_check_filter = ["_ch_", "_pr_", "_intd_", "_extd_"] + def process(self, instance): expected_settings = pype.hosts.harmony.get_asset_settings() + self.log.info(expected_settings) # Harmony is expected to start at 1. frame_start = expected_settings["frameStart"] @@ -37,6 +40,12 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): expected_settings["frameEnd"] = frame_end - frame_start + 1 expected_settings["frameStart"] = 1 + self.log.info(instance.context.data['anatomyData']['asset']) + + if any(string in instance.context.data['anatomyData']['asset'] + for string in self.frame_check_filter): + expected_settings.pop("frameEnd") + func = """function func() { return { diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index 3b2048d8f0..6826d33c58 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -40,6 +40,9 @@ class CreateRender(avalon.maya.Creator): vrscene (bool): Submit as ``vrscene`` file for standalone V-Ray renderer. ass (bool): Submit as ``ass`` file for standalone Arnold renderer. + tileRendering (bool): Instance is set to tile rendering mode. We + won't submit actuall render, but we'll make publish job to wait + for Tile Assemly job done and then publish. See Also: https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup @@ -172,6 +175,7 @@ class CreateRender(avalon.maya.Creator): self.data["primaryPool"] = pool_names self.data["suspendPublishJob"] = False + self.data["review"] = True self.data["extendFrames"] = False self.data["overrideExistingFrame"] = True # self.data["useLegacyRenderLayers"] = True @@ -181,6 +185,9 @@ class CreateRender(avalon.maya.Creator): self.data["machineList"] = "" self.data["useMayaBatch"] = False self.data["vrayScene"] = False + self.data["tileRendering"] = False + self.data["tilesX"] = 2 + self.data["tilesY"] = 2 # Disable for now as this feature is not working yet # self.data["assScene"] = False @@ -189,8 +196,8 @@ class CreateRender(avalon.maya.Creator): def _load_credentials(self): """Load Muster credentials. - Load Muster credentials from file and set ```MUSTER_USER``, - ```MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from presets. + Load Muster credentials from file and set ``MUSTER_USER``, + ``MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from presets. Raises: RuntimeError: If loaded credentials are invalid. diff --git a/pype/plugins/maya/create/create_review.py b/pype/plugins/maya/create/create_review.py index 3e513032e1..97731d7950 100644 --- a/pype/plugins/maya/create/create_review.py +++ b/pype/plugins/maya/create/create_review.py @@ -11,6 +11,8 @@ class CreateReview(avalon.maya.Creator): family = "review" icon = "video-camera" defaults = ['Main'] + keepImages = False + isolate = False def __init__(self, *args, **kwargs): super(CreateReview, self).__init__(*args, **kwargs) @@ -21,4 +23,7 @@ class CreateReview(avalon.maya.Creator): for key, value in animation_data.items(): data[key] = value + data["isolate"] = self.isolate + data["keepImages"] = self.keepImages + self.data = data diff --git a/pype/plugins/maya/load/actions.py b/pype/plugins/maya/load/actions.py index 77d18b0ee3..d4525511f4 100644 --- a/pype/plugins/maya/load/actions.py +++ b/pype/plugins/maya/load/actions.py @@ -87,7 +87,7 @@ class ImportMayaLoader(api.Loader): so you could also use it as a new base. """ - representations = ["ma"] + representations = ["ma", "mb"] families = ["*"] label = "Import" diff --git a/pype/plugins/maya/load/load_ass.py b/pype/plugins/maya/load/load_ass.py index 210b1fde1e..ffe70c39e8 100644 --- a/pype/plugins/maya/load/load_ass.py +++ b/pype/plugins/maya/load/load_ass.py @@ -98,15 +98,19 @@ class AssProxyLoader(pype.hosts.maya.plugin.ReferenceLoader): node = container["objectName"] + representation["context"].pop("frame", None) path = api.get_representation_path(representation) + print(path) # path = self.fname + print(self.fname) proxyPath = os.path.splitext(path)[0] + ".ma" + print(proxyPath) # Get reference node from container members members = cmds.sets(node, query=True, nodesOnly=True) reference_node = self._get_reference_node(members) - assert os.path.exists(path), "%s does not exist." % proxyPath + assert os.path.exists(proxyPath), "%s does not exist." % proxyPath try: content = cmds.file(proxyPath, diff --git a/pype/plugins/maya/load/load_audio.py b/pype/plugins/maya/load/load_audio.py index e1860d0ca6..ca38082ed0 100644 --- a/pype/plugins/maya/load/load_audio.py +++ b/pype/plugins/maya/load/load_audio.py @@ -1,6 +1,9 @@ from maya import cmds, mel +import pymel.core as pc from avalon import api +from avalon.maya.pipeline import containerise +from avalon.maya import lib class AudioLoader(api.Loader): @@ -24,4 +27,48 @@ class AudioLoader(api.Loader): displaySound=True ) - return [sound_node] + asset = context["asset"]["name"] + namespace = namespace or lib.unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + return containerise( + name=name, + namespace=namespace, + nodes=[sound_node], + context=context, + loader=self.__class__.__name__ + ) + + def update(self, container, representation): + audio_node = None + for node in pc.PyNode(container["objectName"]).members(): + if node.nodeType() == "audio": + audio_node = node + + assert audio_node is not None, "Audio node not found." + + path = api.get_representation_path(representation) + audio_node.filename.set(path) + cmds.setAttr( + container["objectName"] + ".representation", + str(representation["_id"]), + type="string" + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass diff --git a/pype/plugins/maya/load/load_image_plane.py b/pype/plugins/maya/load/load_image_plane.py index 653a8d4128..17a6866f80 100644 --- a/pype/plugins/maya/load/load_image_plane.py +++ b/pype/plugins/maya/load/load_image_plane.py @@ -1,4 +1,9 @@ +import pymel.core as pc +import maya.cmds as cmds + from avalon import api +from avalon.maya.pipeline import containerise +from avalon.maya import lib from Qt import QtWidgets @@ -7,15 +12,19 @@ class ImagePlaneLoader(api.Loader): families = ["plate", "render"] label = "Create imagePlane on selected camera." - representations = ["mov", "exr"] + representations = ["mov", "exr", "preview"] icon = "image" color = "orange" def load(self, context, name, namespace, data): - import pymel.core as pc - new_nodes = [] image_plane_depth = 1000 + asset = context['asset']['name'] + namespace = namespace or lib.unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) # Getting camera from selection. selection = pc.ls(selection=True) @@ -74,12 +83,16 @@ class ImagePlaneLoader(api.Loader): image_plane_shape.frameOut.set(end_frame) image_plane_shape.useFrameExtension.set(1) - if context["representation"]["name"] == "mov": + movie_representations = ["mov", "preview"] + if context["representation"]["name"] in movie_representations: # Need to get "type" by string, because its a method as well. pc.Attribute(image_plane_shape + ".type").set(2) # Ask user whether to use sequence or still image. if context["representation"]["name"] == "exr": + # Ensure OpenEXRLoader plugin is loaded. + pc.loadPlugin("OpenEXRLoader.mll", quiet=True) + reply = QtWidgets.QMessageBox.information( None, "Frame Hold.", @@ -93,11 +106,51 @@ class ImagePlaneLoader(api.Loader): ) image_plane_shape.frameExtension.set(start_frame) - # Ensure OpenEXRLoader plugin is loaded. - pc.loadPlugin("OpenEXRLoader.mll", quiet=True) - new_nodes.extend( - [image_plane_transform.name(), image_plane_shape.name()] + [ + image_plane_transform.longName().split("|")[-1], + image_plane_shape.longName().split("|")[-1] + ] ) - return new_nodes + for node in new_nodes: + pc.rename(node, "{}:{}".format(namespace, node)) + + return containerise( + name=name, + namespace=namespace, + nodes=new_nodes, + context=context, + loader=self.__class__.__name__ + ) + + def update(self, container, representation): + image_plane_shape = None + for node in pc.PyNode(container["objectName"]).members(): + if node.nodeType() == "imagePlane": + image_plane_shape = node + + assert image_plane_shape is not None, "Image plane not found." + + path = api.get_representation_path(representation) + image_plane_shape.imageName.set(path) + cmds.setAttr( + container["objectName"] + ".representation", + str(representation["_id"]), + type="string" + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index 5992980412..3a9b75a2f5 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -16,7 +16,7 @@ class ReferenceLoader(pype.hosts.maya.plugin.ReferenceLoader): "layout", "camera", "rig"] - representations = ["ma", "abc", "fbx"] + representations = ["ma", "abc", "fbx", "mb"] tool_names = ["loader"] label = "Reference" diff --git a/pype/plugins/maya/load/load_rendersetup.py b/pype/plugins/maya/load/load_rendersetup.py index b38e2988b1..45a314a9d1 100644 --- a/pype/plugins/maya/load/load_rendersetup.py +++ b/pype/plugins/maya/load/load_rendersetup.py @@ -1,14 +1,25 @@ -from avalon import api -import maya.app.renderSetup.model.renderSetup as renderSetup -from avalon.maya import lib -from maya import cmds +# -*- coding: utf-8 -*- +"""Load and update RenderSetup settings. + +Working with RenderSetup setting is Maya is done utilizing json files. +When this json is loaded, it will overwrite all settings on RenderSetup +instance. +""" + import json +import six +import sys + +from avalon import api +from avalon.maya import lib +from pype.hosts.maya import lib as pypelib + +from maya import cmds +import maya.app.renderSetup.model.renderSetup as renderSetup class RenderSetupLoader(api.Loader): - """ - This will load json preset for RenderSetup, overwriting current one. - """ + """Load json preset for RenderSetup overwriting current one.""" families = ["rendersetup"] representations = ["json"] @@ -19,7 +30,7 @@ class RenderSetupLoader(api.Loader): color = "orange" def load(self, context, name, namespace, data): - + """Load RenderSetup settings.""" from avalon.maya.pipeline import containerise # from pype.hosts.maya.lib import namespaced @@ -29,7 +40,7 @@ class RenderSetupLoader(api.Loader): prefix="_" if asset[0].isdigit() else "", suffix="_", ) - + self.log.info(">>> loading json [ {} ]".format(self.fname)) with open(self.fname, "r") as file: renderSetup.instance().decode( json.load(file), renderSetup.DECODE_AND_OVERWRITE, None) @@ -42,9 +53,56 @@ class RenderSetupLoader(api.Loader): if not nodes: return + self.log.info(">>> containerising [ {} ]".format(name)) return containerise( name=name, namespace=namespace, nodes=nodes, context=context, loader=self.__class__.__name__) + + def remove(self, container): + """Remove RenderSetup settings instance.""" + from maya import cmds + + container_name = container["objectName"] + + self.log.info("Removing '%s' from Maya.." % container["name"]) + + container_content = cmds.sets(container_name, query=True) + nodes = cmds.ls(container_content, long=True) + + nodes.append(container_name) + + try: + cmds.delete(nodes) + except ValueError: + # Already implicitly deleted by Maya upon removing reference + pass + + def update(self, container, representation): + """Update RenderSetup setting by overwriting existing settings.""" + pypelib.show_message( + "Render setup update", + "Render setup setting will be overwritten by new version. All " + "setting specified by user not included in loaded version " + "will be lost.") + path = api.get_representation_path(representation) + with open(path, "r") as file: + try: + renderSetup.instance().decode( + json.load(file), renderSetup.DECODE_AND_OVERWRITE, None) + except Exception: + self.log.error("There were errors during loading") + six.reraise(*sys.exc_info()) + + # Update metadata + node = container["objectName"] + cmds.setAttr("{}.representation".format(node), + str(representation["_id"]), + type="string") + self.log.info("... updated") + + def switch(self, container, representation): + """Switch representations.""" + self.update(container, representation) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 03b14f76bb..91230fcc46 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -216,6 +216,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "attachTo": attach_to, "setMembers": layer_name, "multipartExr": ef.multipart, + "review": render_instance.data.get("review") or False, "publish": True, "handleStart": handle_start, @@ -242,6 +243,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "resolutionWidth": cmds.getAttr("defaultResolution.width"), "resolutionHeight": cmds.getAttr("defaultResolution.height"), "pixelAspect": cmds.getAttr("defaultResolution.pixelAspect"), + "tileRendering": render_instance.data.get("tileRendering") or False, # noqa: E501 + "tilesX": render_instance.data.get("tilesX") or 2, + "tilesY": render_instance.data.get("tilesY") or 2, + "priority": render_instance.data.get("priority") } # Apply each user defined attribute as data diff --git a/pype/plugins/maya/publish/collect_review.py b/pype/plugins/maya/publish/collect_review.py index 063a854bd1..886e3b82dd 100644 --- a/pype/plugins/maya/publish/collect_review.py +++ b/pype/plugins/maya/publish/collect_review.py @@ -13,6 +13,7 @@ class CollectReview(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.3 label = 'Collect Review Data' families = ["review"] + legacy = True def process(self, instance): @@ -63,13 +64,23 @@ class CollectReview(pyblish.api.InstancePlugin): data['handles'] = instance.data.get('handles', None) data['step'] = instance.data['step'] data['fps'] = instance.data['fps'] + data["isolate"] = instance.data["isolate"] cmds.setAttr(str(instance) + '.active', 1) self.log.debug('data {}'.format(instance.context[i].data)) instance.context[i].data.update(data) instance.data['remove'] = True i += 1 else: - instance.data['subset'] = task + 'Review' + if self.legacy: + instance.data['subset'] = task + 'Review' + else: + subset = "{}{}{}".format( + task, + instance.data["subset"][0].upper(), + instance.data["subset"][1:] + ) + instance.data['subset'] = subset + instance.data['review_camera'] = camera instance.data['frameStartFtrack'] = instance.data["frameStartHandle"] instance.data['frameEndFtrack'] = instance.data["frameEndHandle"] diff --git a/pype/plugins/maya/publish/extract_camera_alembic.py b/pype/plugins/maya/publish/extract_camera_alembic.py index cc090760ff..c61ec5e19e 100644 --- a/pype/plugins/maya/publish/extract_camera_alembic.py +++ b/pype/plugins/maya/publish/extract_camera_alembic.py @@ -19,6 +19,7 @@ class ExtractCameraAlembic(pype.api.Extractor): label = "Camera (Alembic)" hosts = ["maya"] families = ["camera"] + bake_attributes = [] def process(self, instance): @@ -66,6 +67,14 @@ class ExtractCameraAlembic(pype.api.Extractor): job_str += ' -file "{0}"'.format(path) + # bake specified attributes in preset + assert isinstance(self.bake_attributes, (list, tuple)), ( + "Attributes to bake must be specified as a list" + ) + for attr in self.bake_attributes: + self.log.info("Adding {} attribute".format(attr)) + job_str += " -attr {0}".format(attr) + with lib.evaluation("off"): with avalon.maya.suspended_refresh(): cmds.AbcExport(j=job_str, verbose=False) diff --git a/pype/plugins/maya/publish/extract_camera_mayaAscii.py b/pype/plugins/maya/publish/extract_camera_mayaScene.py similarity index 82% rename from pype/plugins/maya/publish/extract_camera_mayaAscii.py rename to pype/plugins/maya/publish/extract_camera_mayaScene.py index 973d8d452a..03dde031e9 100644 --- a/pype/plugins/maya/publish/extract_camera_mayaAscii.py +++ b/pype/plugins/maya/publish/extract_camera_mayaScene.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +"""Extract camera as Maya Scene.""" import os from maya import cmds @@ -65,8 +67,8 @@ def unlock(plug): cmds.disconnectAttr(source, destination) -class ExtractCameraMayaAscii(pype.api.Extractor): - """Extract a Camera as Maya Ascii. +class ExtractCameraMayaScene(pype.api.Extractor): + """Extract a Camera as Maya Scene. This will create a duplicate of the camera that will be baked *with* substeps and handles for the required frames. This temporary duplicate @@ -81,13 +83,28 @@ class ExtractCameraMayaAscii(pype.api.Extractor): """ - label = "Camera (Maya Ascii)" + label = "Camera (Maya Scene)" hosts = ["maya"] families = ["camera"] + scene_type = "ma" def process(self, instance): - + """Plugin entry point.""" # get settings + ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501 + if ext_mapping: + self.log.info("Looking in presets for scene type ...") + # use extension mapping for first family found + for family in self.families: + try: + self.scene_type = ext_mapping[family] + self.log.info( + "Using {} as scene type".format(self.scene_type)) + break + except AttributeError: + # no preset found + pass + framerange = [instance.data.get("frameStart", 1), instance.data.get("frameEnd", 1)] handles = instance.data.get("handles", 0) @@ -95,7 +112,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor): bake_to_worldspace = instance.data("bakeToWorldSpace", True) if not bake_to_worldspace: - self.log.warning("Camera (Maya Ascii) export only supports world" + self.log.warning("Camera (Maya Scene) export only supports world" "space baked camera extractions. The disabled " "bake to world space is ignored...") @@ -115,7 +132,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor): # Define extract output file path dir_path = self.staging_dir(instance) - filename = "{0}.ma".format(instance.name) + filename = "{0}.{1}".format(instance.name, self.scene_type) path = os.path.join(dir_path, filename) # Perform extraction @@ -152,7 +169,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor): cmds.select(baked_shapes, noExpand=True) cmds.file(path, force=True, - typ="mayaAscii", + typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 exportSelected=True, preserveReferences=False, constructionHistory=False, @@ -164,15 +181,15 @@ class ExtractCameraMayaAscii(pype.api.Extractor): # Delete the baked hierarchy if bake_to_worldspace: cmds.delete(baked) - - massage_ma_file(path) + if self.scene_type == "ma": + massage_ma_file(path) if "representations" not in instance.data: instance.data["representations"] = [] representation = { - 'name': 'ma', - 'ext': 'ma', + 'name': self.scene_type, + 'ext': self.scene_type, 'files': filename, "stagingDir": dir_path, } diff --git a/pype/plugins/maya/publish/extract_look.py b/pype/plugins/maya/publish/extract_look.py index f402f61329..6bd202093f 100644 --- a/pype/plugins/maya/publish/extract_look.py +++ b/pype/plugins/maya/publish/extract_look.py @@ -21,27 +21,6 @@ COPY = 1 HARDLINK = 2 -def source_hash(filepath, *args): - """Generate simple identifier for a source file. - This is used to identify whether a source file has previously been - processe into the pipeline, e.g. a texture. - The hash is based on source filepath, modification time and file size. - This is only used to identify whether a specific source file was already - published before from the same location with the same modification date. - We opt to do it this way as opposed to Avalanch C4 hash as this is much - faster and predictable enough for all our production use cases. - Args: - filepath (str): The source file path. - You can specify additional arguments in the function - to allow for specific 'processing' values to be included. - """ - # We replace dots with comma because . cannot be a key in a pymongo dict. - file_name = os.path.basename(filepath) - time = str(os.path.getmtime(filepath)) - size = str(os.path.getsize(filepath)) - return "|".join([file_name, time, size] + list(args)).replace(".", ",") - - def find_paths_by_hash(texture_hash): # Find the texture hash key in the dictionary and all paths that # originate from it. @@ -363,7 +342,7 @@ class ExtractLook(pype.api.Extractor): args = [] if do_maketx: args.append("maketx") - texture_hash = source_hash(filepath, *args) + texture_hash = pype.api.source_hash(filepath, *args) # If source has been published before with the same settings, # then don't reprocess but hardlink from the original diff --git a/pype/plugins/maya/publish/extract_maya_ascii_raw.py b/pype/plugins/maya/publish/extract_maya_scene_raw.py similarity index 60% rename from pype/plugins/maya/publish/extract_maya_ascii_raw.py rename to pype/plugins/maya/publish/extract_maya_scene_raw.py index 895b6acbfe..2971572552 100644 --- a/pype/plugins/maya/publish/extract_maya_ascii_raw.py +++ b/pype/plugins/maya/publish/extract_maya_scene_raw.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +"""Extract data as Maya scene (raw).""" import os from maya import cmds @@ -6,24 +8,37 @@ import avalon.maya import pype.api -class ExtractMayaAsciiRaw(pype.api.Extractor): - """Extract as Maya Ascii (raw) +class ExtractMayaSceneRaw(pype.api.Extractor): + """Extract as Maya Scene (raw). This will preserve all references, construction history, etc. - """ - label = "Maya ASCII (Raw)" + label = "Maya Scene (Raw)" hosts = ["maya"] families = ["mayaAscii", "setdress", "layout"] + scene_type = "ma" def process(self, instance): - + """Plugin entry point.""" + ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501 + if ext_mapping: + self.log.info("Looking in presets for scene type ...") + # use extension mapping for first family found + for family in self.families: + try: + self.scene_type = ext_mapping[family] + self.log.info( + "Using {} as scene type".format(self.scene_type)) + break + except AttributeError: + # no preset found + pass # Define extract output file path dir_path = self.staging_dir(instance) - filename = "{0}.ma".format(instance.name) + filename = "{0}.{1}".format(instance.name, self.scene_type) path = os.path.join(dir_path, filename) # Whether to include all nodes in the instance (including those from @@ -38,12 +53,12 @@ class ExtractMayaAsciiRaw(pype.api.Extractor): members = instance[:] # Perform extraction - self.log.info("Performing extraction..") + self.log.info("Performing extraction ...") with avalon.maya.maintained_selection(): cmds.select(members, noExpand=True) cmds.file(path, force=True, - typ="mayaAscii", + typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 exportSelected=True, preserveReferences=True, constructionHistory=True, @@ -55,8 +70,8 @@ class ExtractMayaAsciiRaw(pype.api.Extractor): instance.data["representations"] = [] representation = { - 'name': 'ma', - 'ext': 'ma', + 'name': self.scene_type, + 'ext': self.scene_type, 'files': filename, "stagingDir": dir_path } diff --git a/pype/plugins/maya/publish/extract_model.py b/pype/plugins/maya/publish/extract_model.py index ba56194eea..330e471e53 100644 --- a/pype/plugins/maya/publish/extract_model.py +++ b/pype/plugins/maya/publish/extract_model.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +"""Extract model as Maya Scene.""" import os from maya import cmds @@ -8,7 +10,7 @@ from pype.hosts.maya import lib class ExtractModel(pype.api.Extractor): - """Extract as Model (Maya Ascii) + """Extract as Model (Maya Scene). Only extracts contents based on the original "setMembers" data to ensure publishing the least amount of required shapes. From that it only takes @@ -22,19 +24,33 @@ class ExtractModel(pype.api.Extractor): """ - label = "Model (Maya ASCII)" + label = "Model (Maya Scene)" hosts = ["maya"] families = ["model"] + scene_type = "ma" def process(self, instance): - + """Plugin entry point.""" + ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501 + if ext_mapping: + self.log.info("Looking in presets for scene type ...") + # use extension mapping for first family found + for family in self.families: + try: + self.scene_type = ext_mapping[family] + self.log.info( + "Using {} as scene type".format(self.scene_type)) + break + except AttributeError: + # no preset found + pass # Define extract output file path stagingdir = self.staging_dir(instance) - filename = "{0}.ma".format(instance.name) + filename = "{0}.{1}".format(instance.name, self.scene_type) path = os.path.join(stagingdir, filename) # Perform extraction - self.log.info("Performing extraction..") + self.log.info("Performing extraction ...") # Get only the shape contents we need in such a way that we avoid # taking along intermediateObjects @@ -59,7 +75,7 @@ class ExtractModel(pype.api.Extractor): cmds.select(members, noExpand=True) cmds.file(path, force=True, - typ="mayaAscii", + typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 exportSelected=True, preserveReferences=False, channels=False, @@ -73,8 +89,8 @@ class ExtractModel(pype.api.Extractor): instance.data["representations"] = [] representation = { - 'name': 'ma', - 'ext': 'ma', + 'name': self.scene_type, + 'ext': self.scene_type, 'files': filename, "stagingDir": stagingdir, } diff --git a/pype/plugins/maya/publish/extract_playblast.py b/pype/plugins/maya/publish/extract_playblast.py index 8d45f98b90..8f8b7fcb36 100644 --- a/pype/plugins/maya/publish/extract_playblast.py +++ b/pype/plugins/maya/publish/extract_playblast.py @@ -53,7 +53,6 @@ class ExtractPlayblast(pype.api.Extractor): preset['camera'] = camera preset['format'] = "image" - # preset['compression'] = "qt" preset['quality'] = 95 preset['compression'] = "png" preset['start_frame'] = start @@ -77,6 +76,11 @@ class ExtractPlayblast(pype.api.Extractor): pm.currentTime(refreshFrameInt - 1, edit=True) pm.currentTime(refreshFrameInt, edit=True) + # Isolate view is requested by having objects in the set besides a + # camera. + if instance.data.get("isolate"): + preset["isolate"] = instance.data["setMembers"] + with maintained_time(): filename = preset.get("filename", "%TEMP%") @@ -102,6 +106,10 @@ class ExtractPlayblast(pype.api.Extractor): if "representations" not in instance.data: instance.data["representations"] = [] + tags = ["review"] + if not instance.data.get("keepImages"): + tags.append("delete") + representation = { 'name': 'png', 'ext': 'png', @@ -111,7 +119,7 @@ class ExtractPlayblast(pype.api.Extractor): "frameEnd": end, 'fps': fps, 'preview': True, - 'tags': ['review', 'delete'] + 'tags': tags } instance.data["representations"].append(representation) diff --git a/pype/plugins/maya/publish/extract_rig.py b/pype/plugins/maya/publish/extract_rig.py index c98e562313..8ebeae4184 100644 --- a/pype/plugins/maya/publish/extract_rig.py +++ b/pype/plugins/maya/publish/extract_rig.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +"""Extract rig as Maya Scene.""" import os from maya import cmds @@ -7,26 +9,40 @@ import pype.api class ExtractRig(pype.api.Extractor): - """Extract rig as Maya Ascii""" + """Extract rig as Maya Scene.""" - label = "Extract Rig (Maya ASCII)" + label = "Extract Rig (Maya Scene)" hosts = ["maya"] families = ["rig"] + scene_type = "ma" def process(self, instance): - + """Plugin entry point.""" + ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501 + if ext_mapping: + self.log.info("Looking in presets for scene type ...") + # use extension mapping for first family found + for family in self.families: + try: + self.scene_type = ext_mapping[family] + self.log.info( + "Using {} as scene type".format(self.scene_type)) + break + except AttributeError: + # no preset found + pass # Define extract output file path dir_path = self.staging_dir(instance) - filename = "{0}.ma".format(instance.name) + filename = "{0}.{1}".format(instance.name, self.scene_type) path = os.path.join(dir_path, filename) # Perform extraction - self.log.info("Performing extraction..") + self.log.info("Performing extraction ...") with avalon.maya.maintained_selection(): cmds.select(instance, noExpand=True) cmds.file(path, force=True, - typ="mayaAscii", + typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 exportSelected=True, preserveReferences=False, channels=True, @@ -38,12 +54,11 @@ class ExtractRig(pype.api.Extractor): instance.data["representations"] = [] representation = { - 'name': 'ma', - 'ext': 'ma', + 'name': self.scene_type, + 'ext': self.scene_type, 'files': filename, "stagingDir": dir_path } instance.data["representations"].append(representation) - self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) diff --git a/pype/plugins/maya/publish/extract_thumbnail.py b/pype/plugins/maya/publish/extract_thumbnail.py index c0eb2a608e..524fc1e17c 100644 --- a/pype/plugins/maya/publish/extract_thumbnail.py +++ b/pype/plugins/maya/publish/extract_thumbnail.py @@ -77,6 +77,11 @@ class ExtractThumbnail(pype.api.Extractor): pm.currentTime(refreshFrameInt - 1, edit=True) pm.currentTime(refreshFrameInt, edit=True) + # Isolate view is requested by having objects in the set besides a + # camera. + if instance.data.get("isolate"): + preset["isolate"] = instance.data["setMembers"] + with maintained_time(): filename = preset.get("filename", "%TEMP%") diff --git a/pype/plugins/maya/publish/extract_yeti_rig.py b/pype/plugins/maya/publish/extract_yeti_rig.py index d7bbcd6555..2f66d3e026 100644 --- a/pype/plugins/maya/publish/extract_yeti_rig.py +++ b/pype/plugins/maya/publish/extract_yeti_rig.py @@ -1,3 +1,6 @@ +# -*- coding: utf-8 -*- +"""Extract Yeti rig.""" + import os import json import contextlib @@ -11,7 +14,7 @@ import pype.hosts.maya.lib as maya @contextlib.contextmanager def disconnect_plugs(settings, members): - + """Disconnect and store attribute connections.""" members = cmds.ls(members, long=True) original_connections = [] try: @@ -55,7 +58,7 @@ def disconnect_plugs(settings, members): @contextlib.contextmanager def yetigraph_attribute_values(assumed_destination, resources): - + """Get values from Yeti attributes in graph.""" try: for resource in resources: if "graphnode" not in resource: @@ -89,14 +92,28 @@ def yetigraph_attribute_values(assumed_destination, resources): class ExtractYetiRig(pype.api.Extractor): - """Extract the Yeti rig to a MayaAscii and write the Yeti rig data""" + """Extract the Yeti rig to a Maya Scene and write the Yeti rig data.""" label = "Extract Yeti Rig" hosts = ["maya"] families = ["yetiRig"] + scene_type = "ma" def process(self, instance): - + """Plugin entry point.""" + ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501 + if ext_mapping: + self.log.info("Looking in presets for scene type ...") + # use extension mapping for first family found + for family in self.families: + try: + self.scene_type = ext_mapping[family] + self.log.info( + "Using {} as scene type".format(self.scene_type)) + break + except AttributeError: + # no preset found + pass yeti_nodes = cmds.ls(instance, type="pgYetiMaya") if not yeti_nodes: raise RuntimeError("No pgYetiMaya nodes found in the instance") @@ -106,7 +123,8 @@ class ExtractYetiRig(pype.api.Extractor): settings_path = os.path.join(dirname, "yeti.rigsettings") # Yeti related staging dirs - maya_path = os.path.join(dirname, "yeti_rig.ma") + maya_path = os.path.join( + dirname, "yeti_rig.{}".format(self.scene_type)) self.log.info("Writing metadata file") @@ -153,7 +171,7 @@ class ExtractYetiRig(pype.api.Extractor): cmds.file(maya_path, force=True, exportSelected=True, - typ="mayaAscii", + typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501 preserveReferences=False, constructionHistory=True, shader=False) @@ -163,21 +181,21 @@ class ExtractYetiRig(pype.api.Extractor): if "representations" not in instance.data: instance.data["representations"] = [] - self.log.info("rig file: {}".format("yeti_rig.ma")) + self.log.info("rig file: {}".format(maya_path)) instance.data["representations"].append( { - 'name': "ma", - 'ext': 'ma', - 'files': "yeti_rig.ma", + 'name': self.scene_type, + 'ext': self.scene_type, + 'files': os.path.basename(maya_path), 'stagingDir': dirname } ) - self.log.info("settings file: {}".format("yeti.rigsettings")) + self.log.info("settings file: {}".format(settings)) instance.data["representations"].append( { 'name': 'rigsettings', 'ext': 'rigsettings', - 'files': 'yeti.rigsettings', + 'files': os.path.basename(settings), 'stagingDir': dirname } ) diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index d81d43749c..e4048592a7 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -16,10 +16,16 @@ Attributes: """ +from __future__ import print_function import os import json import getpass import copy +import re +import hashlib +from datetime import datetime +import itertools +from collections import OrderedDict import clique import requests @@ -44,6 +50,7 @@ payload_skeleton = { "Plugin": "MayaPype", "Frames": "{start}-{end}x{step}", "Comment": None, + "Priority": 50, }, "PluginInfo": { "SceneFile": None, # Input @@ -59,6 +66,98 @@ payload_skeleton = { } +def _format_tiles( + filename, index, tiles_x, tiles_y, + width, height, prefix): + """Generate tile entries for Deadline tile job. + + Returns two dictionaries - one that can be directly used in Deadline + job, second that can be used for Deadline Assembly job configuration + file. + + This will format tile names: + + Example:: + { + "OutputFilename0Tile0": "_tile_1x1_4x4_Main_beauty.1001.exr", + "OutputFilename0Tile1": "_tile_2x1_4x4_Main_beauty.1001.exr" + } + + And add tile prefixes like: + + Example:: + Image prefix is: + `maya///_` + + Result for tile 0 for 4x4 will be: + `maya///_tile_1x1_4x4__` + + Calculating coordinates is tricky as in Job they are defined as top, + left, bottom, right with zero being in top-left corner. But Assembler + configuration file takes tile coordinates as X, Y, Width and Height and + zero is bottom left corner. + + Args: + filename (str): Filename to process as tiles. + index (int): Index of that file if it is sequence. + tiles_x (int): Number of tiles in X. + tiles_y (int): Number if tikes in Y. + width (int): Width resolution of final image. + height (int): Height resolution of final image. + prefix (str): Image prefix. + + Returns: + (dict, dict): Tuple of two dictionaires - first can be used to + extend JobInfo, second has tiles x, y, width and height + used for assembler configuration. + + """ + tile = 0 + out = {"JobInfo": {}, "PluginInfo": {}} + cfg = OrderedDict() + w_space = width / tiles_x + h_space = height / tiles_y + + cfg["TilesCropped"] = "False" + + for tile_x in range(1, tiles_x + 1): + for tile_y in reversed(range(1, tiles_y + 1)): + tile_prefix = "_tile_{}x{}_{}x{}_".format( + tile_x, tile_y, + tiles_x, + tiles_y + ) + out_tile_index = "OutputFilename{}Tile{}".format( + str(index), tile + ) + new_filename = "{}/{}{}".format( + os.path.dirname(filename), + tile_prefix, + os.path.basename(filename) + ) + out["JobInfo"][out_tile_index] = new_filename + out["PluginInfo"]["RegionPrefix{}".format(str(tile))] = \ + "/{}".format(tile_prefix).join(prefix.rsplit("/", 1)) + + out["PluginInfo"]["RegionTop{}".format(tile)] = int(height) - (tile_y * h_space) # noqa: E501 + out["PluginInfo"]["RegionBottom{}".format(tile)] = int(height) - ((tile_y - 1) * h_space) - 1 # noqa: E501 + out["PluginInfo"]["RegionLeft{}".format(tile)] = (tile_x - 1) * w_space # noqa: E501 + out["PluginInfo"]["RegionRight{}".format(tile)] = (tile_x * w_space) - 1 # noqa: E501 + + cfg["Tile{}".format(tile)] = new_filename + cfg["Tile{}Tile".format(tile)] = new_filename + cfg["Tile{}FileName".format(tile)] = new_filename + cfg["Tile{}X".format(tile)] = (tile_x - 1) * w_space + + cfg["Tile{}Y".format(tile)] = int(height) - (tile_y * h_space) + + cfg["Tile{}Width".format(tile)] = w_space + cfg["Tile{}Height".format(tile)] = h_space + + tile += 1 + return out, cfg + + def get_renderer_variables(renderlayer, root): """Retrieve the extension which has been set in the VRay settings. @@ -85,7 +184,8 @@ def get_renderer_variables(renderlayer, root): gin="#" * int(padding), lut=True, layer=renderlayer or lib.get_current_renderlayer())[0] - filename_0 = filename_0.replace('_', '_beauty') + filename_0 = re.sub('_', '_beauty', + filename_0, flags=re.IGNORECASE) prefix_attr = "defaultRenderGlobals.imageFilePrefix" if renderer == "vray": renderlayer = renderlayer.split("_")[-1] @@ -108,8 +208,8 @@ def get_renderer_variables(renderlayer, root): # does not work for vray. scene = cmds.file(query=True, sceneName=True) scene, _ = os.path.splitext(os.path.basename(scene)) - filename_0 = filename_prefix.replace('', scene) - filename_0 = filename_0.replace('', renderlayer) + filename_0 = re.sub('', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501 + filename_0 = re.sub('', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501 filename_0 = "{}.{}.{}".format( filename_0, "#" * int(padding), extension) filename_0 = os.path.normpath(os.path.join(root, filename_0)) @@ -161,9 +261,11 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): optional = True use_published = True + tile_assembler_plugin = "PypeTileAssembler" def process(self, instance): """Plugin entry point.""" + instance.data["toBeRenderedOn"] = "deadline" self._instance = instance self._deadline_url = os.environ.get( "DEADLINE_REST_URL", "http://localhost:8082") @@ -172,6 +274,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): context = instance.context workspace = context.data["workspaceDir"] anatomy = context.data['anatomy'] + instance.data["toBeRenderedOn"] = "deadline" filepath = None @@ -298,10 +401,13 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): payload_skeleton["JobInfo"]["Name"] = jobname # Arbitrary username, for visualisation in Monitor payload_skeleton["JobInfo"]["UserName"] = deadline_user + # Set job priority + payload_skeleton["JobInfo"]["Priority"] = self._instance.data.get( + "priority", 50) # Optional, enable double-click to preview rendered # frames from Deadline Monitor payload_skeleton["JobInfo"]["OutputDirectory0"] = \ - os.path.dirname(output_filename_0) + os.path.dirname(output_filename_0).replace("\\", "/") payload_skeleton["JobInfo"]["OutputFilename0"] = \ output_filename_0.replace("\\", "/") @@ -368,42 +474,259 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Add list of expected files to job --------------------------------- exp = instance.data.get("expectedFiles") - - output_filenames = {} exp_index = 0 + output_filenames = {} if isinstance(exp[0], dict): # we have aovs and we need to iterate over them for _aov, files in exp[0].items(): - col = clique.assemble(files)[0][0] - output_file = col.format('{head}{padding}{tail}') - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - output_filenames[exp_index] = output_file + col, rem = clique.assemble(files) + if not col and rem: + # we couldn't find any collections but have + # individual files. + assert len(rem) == 1, ("Found multiple non related files " + "to render, don't know what to do " + "with them.") + output_file = rem[0] + if not instance.data.get("tileRendering"): + payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 + else: + output_file = col[0].format('{head}{padding}{tail}') + if not instance.data.get("tileRendering"): + payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 + + output_filenames['OutputFilename' + str(exp_index)] = output_file # noqa: E501 exp_index += 1 else: - col = clique.assemble(files)[0][0] - output_file = col.format('{head}{padding}{tail}') - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file - # OutputFilenames[exp_index] = output_file + col, rem = clique.assemble(exp) + if not col and rem: + # we couldn't find any collections but have + # individual files. + assert len(rem) == 1, ("Found multiple non related files " + "to render, don't know what to do " + "with them.") + + output_file = rem[0] + if not instance.data.get("tileRendering"): + payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 + else: + output_file = col[0].format('{head}{padding}{tail}') + if not instance.data.get("tileRendering"): + payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 + + output_filenames['OutputFilename' + str(exp_index)] = output_file plugin = payload["JobInfo"]["Plugin"] self.log.info("using render plugin : {}".format(plugin)) - self.preflight_check(instance) - - # Submit job to farm ------------------------------------------------ - self.log.info("Submitting ...") - self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) - - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(self._deadline_url) - response = self._requests_post(url, json=payload) - if not response.ok: - raise Exception(response.text) - # Store output dir for unified publisher (filesequence) instance.data["outputDir"] = os.path.dirname(output_filename_0) - instance.data["deadlineSubmissionJob"] = response.json() + + self.preflight_check(instance) + + # Prepare tiles data ------------------------------------------------ + if instance.data.get("tileRendering"): + # if we have sequence of files, we need to create tile job for + # every frame + + payload["JobInfo"]["TileJob"] = True + payload["JobInfo"]["TileJobTilesInX"] = instance.data.get("tilesX") + payload["JobInfo"]["TileJobTilesInY"] = instance.data.get("tilesY") + payload["PluginInfo"]["ImageHeight"] = instance.data.get("resolutionHeight") # noqa: E501 + payload["PluginInfo"]["ImageWidth"] = instance.data.get("resolutionWidth") # noqa: E501 + payload["PluginInfo"]["RegionRendering"] = True + + assembly_payload = { + "AuxFiles": [], + "JobInfo": { + "BatchName": payload["JobInfo"]["BatchName"], + "Frames": 1, + "Name": "{} - Tile Assembly Job".format( + payload["JobInfo"]["Name"]), + "OutputDirectory0": + payload["JobInfo"]["OutputDirectory0"].replace( + "\\", "/"), + "Plugin": self.tile_assembler_plugin, + "MachineLimit": 1 + }, + "PluginInfo": { + "CleanupTiles": 1, + "ErrorOnMissing": True + } + } + assembly_payload["JobInfo"].update(output_filenames) + assembly_payload["JobInfo"]["Priority"] = self._instance.data.get( + "priority", 50) + assembly_payload["JobInfo"]["UserName"] = deadline_user + + frame_payloads = [] + assembly_payloads = [] + + R_FRAME_NUMBER = re.compile(r".+\.(?P[0-9]+)\..+") # noqa: N806, E501 + REPL_FRAME_NUMBER = re.compile(r"(.+\.)([0-9]+)(\..+)") # noqa: N806, E501 + + if isinstance(exp[0], dict): + # we have aovs and we need to iterate over them + # get files from `beauty` + files = exp[0].get("beauty") + # assembly files are used for assembly jobs as we need to put + # together all AOVs + assembly_files = list( + itertools.chain.from_iterable( + [f for _, f in exp[0].items()])) + if not files: + # if beauty doesn't exists, use first aov we found + files = exp[0].get(list(exp[0].keys())[0]) + else: + files = exp + assembly_files = files + + frame_jobs = {} + + file_index = 1 + for file in files: + frame = re.search(R_FRAME_NUMBER, file).group("frame") + new_payload = copy.deepcopy(payload) + new_payload["JobInfo"]["Name"] = \ + "{} (Frame {} - {} tiles)".format( + payload["JobInfo"]["Name"], + frame, + instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 + ) + self.log.info( + "... preparing job {}".format( + new_payload["JobInfo"]["Name"])) + new_payload["JobInfo"]["TileJobFrame"] = frame + + tiles_data = _format_tiles( + file, 0, + instance.data.get("tilesX"), + instance.data.get("tilesY"), + instance.data.get("resolutionWidth"), + instance.data.get("resolutionHeight"), + payload["PluginInfo"]["OutputFilePrefix"] + )[0] + new_payload["JobInfo"].update(tiles_data["JobInfo"]) + new_payload["PluginInfo"].update(tiles_data["PluginInfo"]) + + job_hash = hashlib.sha256("{}_{}".format(file_index, file)) + frame_jobs[frame] = job_hash.hexdigest() + new_payload["JobInfo"]["ExtraInfo0"] = job_hash.hexdigest() + new_payload["JobInfo"]["ExtraInfo1"] = file + + frame_payloads.append(new_payload) + file_index += 1 + + file_index = 1 + for file in assembly_files: + frame = re.search(R_FRAME_NUMBER, file).group("frame") + new_assembly_payload = copy.deepcopy(assembly_payload) + new_assembly_payload["JobInfo"]["Name"] = \ + "{} (Frame {})".format( + assembly_payload["JobInfo"]["Name"], + frame) + new_assembly_payload["JobInfo"]["OutputFilename0"] = re.sub( + REPL_FRAME_NUMBER, + "\\1{}\\3".format("#" * len(frame)), file) + + new_assembly_payload["PluginInfo"]["Renderer"] = self._instance.data["renderer"] # noqa: E501 + new_assembly_payload["JobInfo"]["ExtraInfo0"] = frame_jobs[frame] # noqa: E501 + new_assembly_payload["JobInfo"]["ExtraInfo1"] = file + assembly_payloads.append(new_assembly_payload) + file_index += 1 + + self.log.info( + "Submitting tile job(s) [{}] ...".format(len(frame_payloads))) + + url = "{}/api/jobs".format(self._deadline_url) + tiles_count = instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 + + for tile_job in frame_payloads: + response = self._requests_post(url, json=tile_job) + if not response.ok: + raise Exception(response.text) + + job_id = response.json()["_id"] + hash = response.json()["Props"]["Ex0"] + + for assembly_job in assembly_payloads: + if assembly_job["JobInfo"]["ExtraInfo0"] == hash: + assembly_job["JobInfo"]["JobDependency0"] = job_id + + for assembly_job in assembly_payloads: + file = assembly_job["JobInfo"]["ExtraInfo1"] + # write assembly job config files + now = datetime.now() + + config_file = os.path.join( + os.path.dirname(output_filename_0), + "{}_config_{}.txt".format( + os.path.splitext(file)[0], + now.strftime("%Y_%m_%d_%H_%M_%S") + ) + ) + + try: + if not os.path.isdir(os.path.dirname(config_file)): + os.makedirs(os.path.dirname(config_file)) + except OSError: + # directory is not available + self.log.warning( + "Path is unreachable: `{}`".format( + os.path.dirname(config_file))) + + # add config file as job auxFile + assembly_job["AuxFiles"] = [config_file] + + with open(config_file, "w") as cf: + print("TileCount={}".format(tiles_count), file=cf) + print("ImageFileName={}".format(file), file=cf) + print("ImageWidth={}".format( + instance.data.get("resolutionWidth")), file=cf) + print("ImageHeight={}".format( + instance.data.get("resolutionHeight")), file=cf) + + tiles = _format_tiles( + file, 0, + instance.data.get("tilesX"), + instance.data.get("tilesY"), + instance.data.get("resolutionWidth"), + instance.data.get("resolutionHeight"), + payload["PluginInfo"]["OutputFilePrefix"] + )[1] + sorted(tiles) + for k, v in tiles.items(): + print("{}={}".format(k, v), file=cf) + + job_idx = 1 + instance.data["assemblySubmissionJobs"] = [] + for ass_job in assembly_payloads: + self.log.info("submitting assembly job {} of {}".format( + job_idx, len(assembly_payloads) + )) + self.log.debug(json.dumps(ass_job, indent=4, sort_keys=True)) + response = self._requests_post(url, json=ass_job) + if not response.ok: + raise Exception(response.text) + + instance.data["assemblySubmissionJobs"].append( + response.json()["_id"]) + job_idx += 1 + + instance.data["jobBatchName"] = payload["JobInfo"]["BatchName"] + self.log.info("Setting batch name on instance: {}".format( + instance.data["jobBatchName"])) + else: + # Submit job to farm -------------------------------------------- + self.log.info("Submitting ...") + self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) + + # E.g. http://192.168.0.1:8082/api/jobs + url = "{}/api/jobs".format(self._deadline_url) + response = self._requests_post(url, json=payload) + if not response.ok: + raise Exception(response.text) + instance.data["deadlineSubmissionJob"] = response.json() def _get_maya_payload(self, data): payload = copy.deepcopy(payload_skeleton) diff --git a/pype/plugins/maya/publish/submit_maya_muster.py b/pype/plugins/maya/publish/submit_maya_muster.py index 5a2e578793..ffe434048a 100644 --- a/pype/plugins/maya/publish/submit_maya_muster.py +++ b/pype/plugins/maya/publish/submit_maya_muster.py @@ -249,6 +249,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): Authenticate with Muster, collect all data, prepare path for post render publish job and submit job to farm. """ + instance.data["toBeRenderedOn"] = "muster" # setup muster environment self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL") diff --git a/pype/plugins/maya/publish/validate_attributes.py b/pype/plugins/maya/publish/validate_attributes.py index 6ecebfa107..a77fbe5e93 100644 --- a/pype/plugins/maya/publish/validate_attributes.py +++ b/pype/plugins/maya/publish/validate_attributes.py @@ -62,9 +62,16 @@ class ValidateAttributes(pyblish.api.ContextPlugin): for family in families: for preset in presets[family]: [node_name, attribute_name] = preset.split(".") - attributes.update( - {node_name: {attribute_name: presets[family][preset]}} - ) + try: + attributes[node_name].update( + {attribute_name: presets[family][preset]} + ) + except KeyError: + attributes.update({ + node_name: { + attribute_name: presets[family][preset] + } + }) # Get invalid attributes. nodes = pm.ls() diff --git a/pype/plugins/maya/publish/validate_frame_range.py b/pype/plugins/maya/publish/validate_frame_range.py index 0d51a83cf5..1ee6e2bd25 100644 --- a/pype/plugins/maya/publish/validate_frame_range.py +++ b/pype/plugins/maya/publish/validate_frame_range.py @@ -29,6 +29,12 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): def process(self, instance): context = instance.context + if instance.data.get("tileRendering"): + self.log.info(( + "Skipping frame range validation because " + "tile rendering is enabled." + )) + return frame_start_handle = int(context.data.get("frameStartHandle")) frame_end_handle = int(context.data.get("frameEndHandle")) diff --git a/pype/plugins/maya/publish/validate_transform_naming_suffix.py b/pype/plugins/maya/publish/validate_transform_naming_suffix.py index 17066f6b12..120123af4b 100644 --- a/pype/plugins/maya/publish/validate_transform_naming_suffix.py +++ b/pype/plugins/maya/publish/validate_transform_naming_suffix.py @@ -103,9 +103,7 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin): instance (:class:`pyblish.api.Instance`): published instance. """ - invalid = self.get_invalid(instance, - self.SUFFIX_NAMING_TABLE, - self.ALLOW_IF_NOT_IN_SUFFIX_TABLE) + invalid = self.get_invalid(instance) if invalid: raise ValueError("Incorrectly named geometry " "transforms: {0}".format(invalid)) diff --git a/pype/plugins/nuke/load/load_image.py b/pype/plugins/nuke/load/load_image.py new file mode 100644 index 0000000000..377d52aa14 --- /dev/null +++ b/pype/plugins/nuke/load/load_image.py @@ -0,0 +1,233 @@ +import re +import nuke + +from avalon.vendor import qargparse +from avalon import api, io + +from pype.hosts.nuke import presets + + +class LoadImage(api.Loader): + """Load still image into Nuke""" + + families = [ + "render2d", "source", "plate", + "render", "prerender", "review", + "image" + ] + representations = ["exr", "dpx", "jpg", "jpeg", "png", "psd"] + + label = "Load Image" + order = -10 + icon = "image" + color = "white" + + options = [ + qargparse.Integer( + "frame_number", + label="Frame Number", + default=int(nuke.root()["first_frame"].getValue()), + min=1, + max=999999, + help="What frame is reading from?" + ) + ] + + def load(self, context, name, namespace, options): + from avalon.nuke import ( + containerise, + viewer_update_and_undo_stop + ) + self.log.info("__ options: `{}`".format(options)) + frame_number = options.get("frame_number", 1) + + version = context['version'] + version_data = version.get("data", {}) + repr_id = context["representation"]["_id"] + + self.log.info("version_data: {}\n".format(version_data)) + self.log.debug( + "Representation id `{}` ".format(repr_id)) + + last = first = int(frame_number) + + # Fallback to asset name when namespace is None + if namespace is None: + namespace = context['asset']['name'] + + file = self.fname + + if not file: + repr_id = context["representation"]["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") + + repr_cont = context["representation"]["context"] + frame = repr_cont.get("frame") + if frame: + padding = len(frame) + file = file.replace( + frame, + format(frame_number, "0{}".format(padding))) + + read_name = "Read_{0}_{1}_{2}".format( + repr_cont["asset"], + repr_cont["subset"], + repr_cont["representation"]) + + # Create the Loader with the filename path set + with viewer_update_and_undo_stop(): + r = nuke.createNode( + "Read", + "name {}".format(read_name)) + r["file"].setValue(file) + + # Set colorspace defined in version data + colorspace = context["version"]["data"].get("colorspace") + if colorspace: + r["colorspace"].setValue(str(colorspace)) + + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + r["colorspace"].setValue(str(preset_clrsp)) + + r["origfirst"].setValue(first) + r["first"].setValue(first) + r["origlast"].setValue(last) + r["last"].setValue(last) + + # add additional metadata from the version to imprint Avalon knob + add_keys = ["source", "colorspace", "author", "fps", "version"] + + data_imprint = { + "frameStart": first, + "frameEnd": last + } + for k in add_keys: + if k == 'version': + data_imprint.update({k: context["version"]['name']}) + else: + data_imprint.update( + {k: context["version"]['data'].get(k, str(None))}) + + data_imprint.update({"objectName": read_name}) + + r["tile_color"].setValue(int("0x4ecd25ff", 16)) + + return containerise(r, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + from avalon.nuke import ( + update_container + ) + + node = nuke.toNode(container["objectName"]) + frame_number = node["first"].value() + + assert node.Class() == "Read", "Must be Read" + + repr_cont = representation["context"] + + file = api.get_representation_path(representation) + + if not file: + repr_id = representation["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") + + frame = repr_cont.get("frame") + if frame: + padding = len(frame) + file = file.replace( + frame, + format(frame_number, "0{}".format(padding))) + + # Get start frame from version data + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + version_data = version.get("data", {}) + + last = first = int(frame_number) + + # Set the global in to the start frame of the sequence + node["origfirst"].setValue(first) + node["first"].setValue(first) + node["origlast"].setValue(last) + node["last"].setValue(last) + + updated_dict = {} + updated_dict.update({ + "representation": str(representation["_id"]), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version.get("name")), + "colorspace": version_data.get("colorspace"), + "source": version_data.get("source"), + "fps": str(version_data.get("fps")), + "author": version_data.get("author"), + "outputDir": version_data.get("outputDir"), + }) + + # change color of node + if version.get("name") not in [max_version]: + node["tile_color"].setValue(int("0xd84f20ff", 16)) + else: + node["tile_color"].setValue(int("0x4ecd25ff", 16)) + + # Update the imprinted representation + update_container( + node, + updated_dict + ) + self.log.info("udated to version: {}".format(version.get("name"))) + + def remove(self, container): + + from avalon.nuke import viewer_update_and_undo_stop + + node = nuke.toNode(container['objectName']) + assert node.Class() == "Read", "Must be Read" + + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index aa79d8736a..c5ce288540 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -70,7 +70,7 @@ def loader_shift(node, frame, relative=True): class LoadSequence(api.Loader): """Load image sequence into Nuke""" - families = ["render2d", "source", "plate", "render", "prerender"] + families = ["render2d", "source", "plate", "render", "prerender", "review"] representations = ["exr", "dpx", "jpg", "jpeg", "png"] label = "Load sequence" @@ -120,12 +120,12 @@ class LoadSequence(api.Loader): if "#" not in file: frame = repr_cont.get("frame") padding = len(frame) - file = file.replace(frame, "#"*padding) + file = file.replace(frame, "#" * padding) read_name = "Read_{0}_{1}_{2}".format( - repr_cont["asset"], - repr_cont["subset"], - repr_cont["representation"]) + repr_cont["asset"], + repr_cont["subset"], + repr_cont["representation"]) # Create the Loader with the filename path set with viewer_update_and_undo_stop(): @@ -250,7 +250,7 @@ class LoadSequence(api.Loader): if "#" not in file: frame = repr_cont.get("frame") padding = len(frame) - file = file.replace(frame, "#"*padding) + file = file.replace(frame, "#" * padding) # Get start frame from version data version = io.find_one({ @@ -276,10 +276,10 @@ class LoadSequence(api.Loader): last = version_data.get("frameEnd") if first is None: - self.log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format( - node['name'].value(), representation)) + self.log.warning( + "Missing start frame for updated version" + "assuming starts at frame 0 for: " + "{} ({})".format(node['name'].value(), representation)) first = 0 first -= self.handle_start diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py index 54891d189c..9085e12bd8 100644 --- a/pype/plugins/nuke/publish/collect_instances.py +++ b/pype/plugins/nuke/publish/collect_instances.py @@ -55,6 +55,12 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): families_ak = avalon_knob_data.get("families") families = list() + if families_ak: + families.append(families_ak) + + families.append(family) + + # except disabled nodes but exclude backdrops in test if ("nukenodes" not in family) and (node["disable"].value()): continue @@ -70,18 +76,19 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): if node.Class() == "Group": # only alter families for render family if "write" in families_ak: + if node["render"].value(): self.log.info("flagged for render") - add_family = "{}.local".format(family) + add_family = "{}.local".format("render") # dealing with local/farm rendering if node["render_farm"].value(): self.log.info("adding render farm family") - add_family = "{}.farm".format(family) + add_family = "{}.farm".format("render") instance.data["transfer"] = False families.append(add_family) - else: - # add family into families - families.insert(0, family) + if "render" in families: + families.remove("render") + family = "write" node.begin() for i in nuke.allNodes(): @@ -90,10 +97,6 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("__ families: `{}`".format(families)) - if families_ak: - families.append(families_ak) - else: - families.append(family) # Get format format = root['format'].value() @@ -103,7 +106,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): instance.data.update({ "subset": subset, - "asset": os.environ["AVALON_ASSET"], + "asset": avalon_knob_data["asset"], "label": node.name(), "name": node.name(), "subset": subset, diff --git a/pype/plugins/nuke/publish/collect_review.py b/pype/plugins/nuke/publish/collect_review.py index c95c94541d..e7e8da19a1 100644 --- a/pype/plugins/nuke/publish/collect_review.py +++ b/pype/plugins/nuke/publish/collect_review.py @@ -1,4 +1,7 @@ import pyblish.api +import pype.api +from avalon import io, api + import nuke @@ -23,6 +26,21 @@ class CollectReview(pyblish.api.InstancePlugin): if not node["review"].value(): return + # Add audio to instance if it exists. + try: + version = pype.api.get_latest_version( + instance.context.data["assetEntity"]["name"], "audioMain" + ) + representation = io.find_one( + {"type": "representation", "parent": version["_id"]} + ) + instance.data["audio"] = [{ + "offset": 0, + "filename": api.get_representation_path(representation) + }] + except AssertionError: + pass + instance.data["families"].append("review") instance.data['families'].append('ftrack') diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index c70953d23f..fb00aeb1ae 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -118,7 +118,6 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "colorspace": node["colorspace"].value(), } - instance.data["family"] = "write" group_node = [x for x in instance if x.Class() == "Group"][0] deadlineChunkSize = 1 if "deadlineChunkSize" in group_node.knobs(): @@ -128,8 +127,6 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): if "deadlinePriority" in group_node.knobs(): deadlinePriority = group_node["deadlinePriority"].value() - families = [f for f in instance.data["families"] if "write" not in f] - instance.data.update({ "versionData": version_data, "path": path, @@ -143,18 +140,12 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "frameStartHandle": first_frame, "frameEndHandle": last_frame, "outputType": output_type, - "family": "write", "families": families, "colorspace": node["colorspace"].value(), "deadlineChunkSize": deadlineChunkSize, "deadlinePriority": deadlinePriority }) - if "render" in families: - instance.data["family"] = "render2d" - if "render" not in families: - instance.data["families"].insert(0, "render") - if "prerender" in families: instance.data.update({ "family": "prerender", diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py index 37a6701380..79662d62a8 100644 --- a/pype/plugins/nuke/publish/extract_render_local.py +++ b/pype/plugins/nuke/publish/extract_render_local.py @@ -82,9 +82,9 @@ class NukeRenderLocal(pype.api.Extractor): # redefinition of families if "render.local" in families: - instance.data['family'] = 'render2d' + instance.data['family'] = 'render' families.remove('render.local') - families.insert(0, "render") + families.insert(0, "render2d") elif "prerender.local" in families: instance.data['family'] = 'prerender' families.remove('prerender.local') @@ -99,4 +99,5 @@ class NukeRenderLocal(pype.api.Extractor): instance.data['collection'] = collection self.log.info('Finished render') - return + + self.log.debug("instance extracted: {}".format(instance.data)) diff --git a/pype/plugins/nuke/publish/extract_thumbnail.py b/pype/plugins/nuke/publish/extract_thumbnail.py index 5e9302a01a..a3ef09bc9f 100644 --- a/pype/plugins/nuke/publish/extract_thumbnail.py +++ b/pype/plugins/nuke/publish/extract_thumbnail.py @@ -152,7 +152,7 @@ class ExtractThumbnail(pype.api.Extractor): ipn_orig = None for v in [n for n in nuke.allNodes() - if "Viewer" in n.Class()]: + if "Viewer" == n.Class()]: ip = v['input_process'].getValue() ipn = v['input_process_node'].getValue() if "VIEWER_INPUT" not in ipn and ip: diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 2b8efb4640..2c7d468d3a 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -28,6 +28,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): deadline_chunk_size = 1 def process(self, instance): + instance.data["toBeRenderedOn"] = "deadline" families = instance.data["families"] node = instance[0] @@ -49,6 +50,24 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): render_path = instance.data['path'] script_path = context.data["currentFile"] + for item in context: + if "workfile" in item.data["families"]: + msg = "Workfile (scene) must be published along" + assert item.data["publish"] is True, msg + + template_data = item.data.get("anatomyData") + rep = item.data.get("representations")[0].get("name") + template_data["representation"] = rep + template_data["ext"] = rep + template_data["comment"] = None + anatomy_filled = context.data["anatomy"].format(template_data) + template_filled = anatomy_filled["publish"]["path"] + script_path = os.path.normpath(template_filled) + + self.log.info( + "Using published scene for render {}".format(script_path) + ) + # exception for slate workflow if "slate" in instance.data["families"]: self._frame_start -= 1 diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 38040f8c51..a41e987bdb 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -210,6 +210,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): self.log.debug( "assets_shared: {assets_shared}".format(**locals())) + class CollectHierarchyContext(pyblish.api.ContextPlugin): '''Collecting Hierarchy from instaces and building context hierarchy tree diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index aa8c60767c..3167c66170 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -63,10 +63,14 @@ class CollectReviews(api.InstancePlugin): self.log.debug("Track item on plateMain") rev_inst = None for inst in instance.context[:]: - if inst.data["track"] in track: - rev_inst = inst - self.log.debug("Instance review: {}".format( - rev_inst.data["name"])) + if inst.data["track"] != track: + continue + + if inst.data["item"].name() != instance.data["item"].name(): + continue + + rev_inst = inst + break if rev_inst is None: raise RuntimeError(( @@ -82,7 +86,7 @@ class CollectReviews(api.InstancePlugin): ext = os.path.splitext(file)[-1][1:] # change label - instance.data["label"] = "{0} - {1} - ({2}) - review".format( + instance.data["label"] = "{0} - {1} - ({2})".format( instance.data['asset'], instance.data["subset"], ext ) @@ -99,7 +103,7 @@ class CollectReviews(api.InstancePlugin): "step": 1, "fps": rev_inst.data.get("fps"), "name": "preview", - "tags": ["preview"], + "tags": ["preview", "ftrackreview"], "ext": ext } diff --git a/pype/plugins/photoshop/publish/extract_image.py b/pype/plugins/photoshop/publish/extract_image.py index da3197c7da..6dfccdc4f2 100644 --- a/pype/plugins/photoshop/publish/extract_image.py +++ b/pype/plugins/photoshop/publish/extract_image.py @@ -13,6 +13,7 @@ class ExtractImage(pype.api.Extractor): label = "Extract Image" hosts = ["photoshop"] families = ["image"] + formats = ["png", "jpg"] def process(self, instance): @@ -32,20 +33,22 @@ class ExtractImage(pype.api.Extractor): if layer.id not in extract_ids: layer.Visible = False - save_options = { - "png": photoshop.com_objects.PNGSaveOptions(), - "jpg": photoshop.com_objects.JPEGSaveOptions() - } + save_options = {} + if "png" in self.formats: + save_options["png"] = photoshop.com_objects.PNGSaveOptions() + if "jpg" in self.formats: + save_options["jpg"] = photoshop.com_objects.JPEGSaveOptions() + file_basename = os.path.splitext( + photoshop.app().ActiveDocument.Name + )[0] for extension, save_option in save_options.items(): + _filename = "{}.{}".format(file_basename, extension) + files[extension] = _filename + + full_filename = os.path.join(staging_dir, _filename) photoshop.app().ActiveDocument.SaveAs( - staging_dir, save_option, True - ) - files[extension] = "{} copy.{}".format( - os.path.splitext( - photoshop.app().ActiveDocument.Name - )[0], - extension + full_filename, save_option, True ) representations = [] diff --git a/pype/plugins/photoshop/publish/extract_review.py b/pype/plugins/photoshop/publish/extract_review.py index 49e932eb67..078ee53899 100644 --- a/pype/plugins/photoshop/publish/extract_review.py +++ b/pype/plugins/photoshop/publish/extract_review.py @@ -24,9 +24,10 @@ class ExtractReview(pype.api.Extractor): layers.append(image_instance[0]) # Perform extraction - output_image = "{} copy.jpg".format( + output_image = "{}.jpg".format( os.path.splitext(photoshop.app().ActiveDocument.Name)[0] ) + output_image_path = os.path.join(staging_dir, output_image) with photoshop.maintained_visibility(): # Hide all other layers. extract_ids = [ @@ -39,9 +40,13 @@ class ExtractReview(pype.api.Extractor): layer.Visible = False photoshop.app().ActiveDocument.SaveAs( - staging_dir, photoshop.com_objects.JPEGSaveOptions(), True + output_image_path, + photoshop.com_objects.JPEGSaveOptions(), + True ) + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + instance.data["representations"].append({ "name": "jpg", "ext": "jpg", @@ -53,13 +58,13 @@ class ExtractReview(pype.api.Extractor): # Generate thumbnail. thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") args = [ - "ffmpeg", "-y", - "-i", os.path.join(staging_dir, output_image), + ffmpeg_path, "-y", + "-i", output_image_path, "-vf", "scale=300:-1", "-vframes", "1", thumbnail_path ] - output = pype.lib._subprocess(args, cwd=os.environ["FFMPEG_PATH"]) + output = pype.lib._subprocess(args) self.log.debug(output) @@ -74,12 +79,13 @@ class ExtractReview(pype.api.Extractor): # Generate mov. mov_path = os.path.join(staging_dir, "review.mov") args = [ - "ffmpeg", "-y", - "-i", os.path.join(staging_dir, output_image), + ffmpeg_path, "-y", + "-i", output_image_path, + "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", "-vframes", "1", mov_path ] - output = pype.lib._subprocess(args, cwd=os.environ["FFMPEG_PATH"]) + output = pype.lib._subprocess(args) self.log.debug(output) diff --git a/pype/plugins/premiere/publish/collect_instance_representations.py b/pype/plugins/premiere/publish/collect_instance_representations.py index b62b47c473..a7aa033f02 100644 --- a/pype/plugins/premiere/publish/collect_instance_representations.py +++ b/pype/plugins/premiere/publish/collect_instance_representations.py @@ -53,7 +53,7 @@ class CollectClipRepresentations(pyblish.api.InstancePlugin): "fps": fps, "name": json_repr_subset, "ext": json_repr_ext, - "tags": ["review", "delete"] + "tags": ["review", "passing", "ftrackreview"] } else: representation = { diff --git a/pype/plugins/premiere/publish/validate_auto_sync_off.py b/pype/plugins/premiere/publish/validate_auto_sync_off.py deleted file mode 100644 index 1f3f0b58a5..0000000000 --- a/pype/plugins/premiere/publish/validate_auto_sync_off.py +++ /dev/null @@ -1,52 +0,0 @@ -import sys -import pyblish.api -import pype.api -import avalon.api -import six - - -class ValidateAutoSyncOff(pyblish.api.ContextPlugin): - """Ensure that autosync value in ftrack project is set to False. - - In case was set to True and event server with the sync to avalon event - is running will cause integration to avalon will be override. - - """ - - order = pyblish.api.ValidatorOrder - families = ['clip'] - label = 'Ftrack project\'s auto sync off' - actions = [pype.api.RepairAction] - - def process(self, context): - session = context.data["ftrackSession"] - project_name = avalon.api.Session["AVALON_PROJECT"] - query = 'Project where full_name is "{}"'.format(project_name) - project = session.query(query).one() - invalid = self.get_invalid(context) - - assert not invalid, ( - "Ftrack Project has 'Auto sync' set to On." - " That may cause issues during integration." - ) - - @staticmethod - def get_invalid(context): - session = context.data["ftrackSession"] - project_name = avalon.api.Session["AVALON_PROJECT"] - query = 'Project where full_name is "{}"'.format(project_name) - project = session.query(query).one() - - return project - - @classmethod - def repair(cls, context): - session = context.data["ftrackSession"] - invalid = cls.get_invalid(context) - invalid['custom_attributes']['avalon_auto_sync'] = False - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - raise diff --git a/pype/plugins/resolve/create/create_shot_clip.py b/pype/plugins/resolve/create/create_shot_clip.py new file mode 100644 index 0000000000..bd2e013fac --- /dev/null +++ b/pype/plugins/resolve/create/create_shot_clip.py @@ -0,0 +1,79 @@ +from pprint import pformat +from pype.hosts import resolve +from pype.hosts.resolve import lib + + +class CreateShotClip(resolve.Creator): + """Publishable clip""" + + label = "Shot" + family = "clip" + icon = "film" + defaults = ["Main"] + + gui_name = "Pype sequencial rename with hirerarchy" + gui_info = "Define sequencial rename and fill hierarchy data." + gui_inputs = { + "clipName": "{episode}{sequence}{shot}", + "hierarchy": "{folder}/{sequence}/{shot}", + "countFrom": 10, + "steps": 10, + "hierarchyData": { + "folder": "shots", + "shot": "sh####", + "track": "{track}", + "sequence": "sc010", + "episode": "ep01" + } + } + presets = None + + def process(self): + # solve gui inputs overwrites from presets + # overwrite gui inputs from presets + for k, v in self.gui_inputs.items(): + if isinstance(v, dict): + # nested dictionary (only one level allowed) + for _k, _v in v.items(): + if self.presets.get(_k): + self.gui_inputs[k][_k] = self.presets[_k] + if self.presets.get(k): + self.gui_inputs[k] = self.presets[k] + + # open widget for plugins inputs + widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs) + widget.exec_() + + print(f"__ selected_clips: {self.selected}") + if len(self.selected) < 1: + return + + if not widget.result: + print("Operation aborted") + return + + # sequence attrs + sq_frame_start = self.sequence.GetStartFrame() + sq_markers = self.sequence.GetMarkers() + print(f"__ sq_frame_start: {pformat(sq_frame_start)}") + print(f"__ seq_markers: {pformat(sq_markers)}") + + # create media bin for compound clips (trackItems) + mp_folder = resolve.create_current_sequence_media_bin(self.sequence) + print(f"_ mp_folder: {mp_folder.GetName()}") + + lib.rename_add = 0 + for i, t_data in enumerate(self.selected): + lib.rename_index = i + + # clear color after it is done + t_data["clip"]["item"].ClearClipColor() + + # convert track item to timeline media pool item + resolve.create_compound_clip( + t_data, + mp_folder, + rename=True, + **dict( + {"presets": widget.result}) + ) diff --git a/pype/plugins/resolve/publish/collect_clips.py b/pype/plugins/resolve/publish/collect_clips.py new file mode 100644 index 0000000000..f86e5c8384 --- /dev/null +++ b/pype/plugins/resolve/publish/collect_clips.py @@ -0,0 +1,162 @@ +import os +from pyblish import api +from pype.hosts import resolve +import json + + +class CollectClips(api.ContextPlugin): + """Collect all Track items selection.""" + + order = api.CollectorOrder + 0.01 + label = "Collect Clips" + hosts = ["resolve"] + + def process(self, context): + # create asset_names conversion table + if not context.data.get("assetsShared"): + self.log.debug("Created `assetsShared` in context") + context.data["assetsShared"] = dict() + + projectdata = context.data["projectEntity"]["data"] + selection = resolve.get_current_track_items( + filter=True, selecting_color="Pink") + + for clip_data in selection: + data = dict() + + # get basic objects form data + project = clip_data["project"] + sequence = clip_data["sequence"] + clip = clip_data["clip"] + + # sequence attrs + sq_frame_start = sequence.GetStartFrame() + self.log.debug(f"sq_frame_start: {sq_frame_start}") + + sq_markers = sequence.GetMarkers() + + # get details of objects + clip_item = clip["item"] + track = clip_data["track"] + + mp = project.GetMediaPool() + + # get clip attributes + clip_metadata = resolve.get_pype_clip_metadata(clip_item) + clip_metadata = json.loads(clip_metadata) + self.log.debug(f"clip_metadata: {clip_metadata}") + + compound_source_prop = clip_metadata["sourceProperties"] + self.log.debug(f"compound_source_prop: {compound_source_prop}") + + asset_name = clip_item.GetName() + mp_item = clip_item.GetMediaPoolItem() + mp_prop = mp_item.GetClipProperty() + source_first = int(compound_source_prop["Start"]) + source_last = int(compound_source_prop["End"]) + source_duration = compound_source_prop["Frames"] + fps = float(mp_prop["FPS"]) + self.log.debug(f"source_first: {source_first}") + self.log.debug(f"source_last: {source_last}") + self.log.debug(f"source_duration: {source_duration}") + self.log.debug(f"fps: {fps}") + + source_path = os.path.normpath( + compound_source_prop["File Path"]) + source_name = compound_source_prop["File Name"] + source_id = clip_metadata["sourceId"] + self.log.debug(f"source_path: {source_path}") + self.log.debug(f"source_name: {source_name}") + self.log.debug(f"source_id: {source_id}") + + clip_left_offset = int(clip_item.GetLeftOffset()) + clip_right_offset = int(clip_item.GetRightOffset()) + self.log.debug(f"clip_left_offset: {clip_left_offset}") + self.log.debug(f"clip_right_offset: {clip_right_offset}") + + # source in/out + source_in = int(source_first + clip_left_offset) + source_out = int(source_first + clip_right_offset) + self.log.debug(f"source_in: {source_in}") + self.log.debug(f"source_out: {source_out}") + + clip_in = int(clip_item.GetStart() - sq_frame_start) + clip_out = int(clip_item.GetEnd() - sq_frame_start) + clip_duration = int(clip_item.GetDuration()) + self.log.debug(f"clip_in: {clip_in}") + self.log.debug(f"clip_out: {clip_out}") + self.log.debug(f"clip_duration: {clip_duration}") + + is_sequence = False + + self.log.debug( + "__ assets_shared: {}".format( + context.data["assetsShared"])) + + # Check for clips with the same range + # this is for testing if any vertically neighbouring + # clips has been already processed + clip_matching_with_range = next( + (k for k, v in context.data["assetsShared"].items() + if (v.get("_clipIn", 0) == clip_in) + and (v.get("_clipOut", 0) == clip_out) + ), False) + + # check if clip name is the same in matched + # vertically neighbouring clip + # if it is then it is correct and resent variable to False + # not to be rised wrong name exception + if asset_name in str(clip_matching_with_range): + clip_matching_with_range = False + + # rise wrong name exception if found one + assert (not clip_matching_with_range), ( + "matching clip: {asset}" + " timeline range ({clip_in}:{clip_out})" + " conflicting with {clip_matching_with_range}" + " >> rename any of clips to be the same as the other <<" + ).format( + **locals()) + + if ("[" in source_name) and ("]" in source_name): + is_sequence = True + + data.update({ + "name": "_".join([ + track["name"], asset_name, source_name]), + "item": clip_item, + "source": mp_item, + # "timecodeStart": str(source.timecodeStart()), + "timelineStart": sq_frame_start, + "sourcePath": source_path, + "sourceFileHead": source_name, + "isSequence": is_sequence, + "track": track["name"], + "trackIndex": track["index"], + "sourceFirst": source_first, + + "sourceIn": source_in, + "sourceOut": source_out, + "mediaDuration": source_duration, + "clipIn": clip_in, + "clipOut": clip_out, + "clipDuration": clip_duration, + "asset": asset_name, + "subset": "plateMain", + "family": "clip", + "families": [], + "handleStart": projectdata.get("handleStart", 0), + "handleEnd": projectdata.get("handleEnd", 0)}) + + instance = context.create_instance(**data) + + self.log.info("Created instance: {}".format(instance)) + self.log.info("Created instance.data: {}".format(instance.data)) + + context.data["assetsShared"][asset_name] = { + "_clipIn": clip_in, + "_clipOut": clip_out + } + self.log.info( + "context.data[\"assetsShared\"]: {}".format( + context.data["assetsShared"])) diff --git a/pype/plugins/resolve/publish/collect_host.py b/pype/plugins/resolve/publish/collect_host.py deleted file mode 100644 index a5c4b0936c..0000000000 --- a/pype/plugins/resolve/publish/collect_host.py +++ /dev/null @@ -1,17 +0,0 @@ -import pyblish.api -from pype.hosts.resolve.utils import get_resolve_module - - -class CollectProject(pyblish.api.ContextPlugin): - """Collect Project object""" - - order = pyblish.api.CollectorOrder - 0.1 - label = "Collect Project" - hosts = ["resolve"] - - def process(self, context): - resolve = get_resolve_module() - PM = resolve.GetProjectManager() - P = PM.GetCurrentProject() - - self.log.info(P.GetName()) diff --git a/pype/plugins/resolve/publish/collect_project.py b/pype/plugins/resolve/publish/collect_project.py new file mode 100644 index 0000000000..aa57f93619 --- /dev/null +++ b/pype/plugins/resolve/publish/collect_project.py @@ -0,0 +1,29 @@ +import os +import pyblish.api +from pype.hosts.resolve.utils import get_resolve_module + + +class CollectProject(pyblish.api.ContextPlugin): + """Collect Project object""" + + order = pyblish.api.CollectorOrder - 0.1 + label = "Collect Project" + hosts = ["resolve"] + + def process(self, context): + exported_projet_ext = ".drp" + current_dir = os.getenv("AVALON_WORKDIR") + resolve = get_resolve_module() + PM = resolve.GetProjectManager() + P = PM.GetCurrentProject() + name = P.GetName() + + fname = name + exported_projet_ext + current_file = os.path.join(current_dir, fname) + normalised = os.path.normpath(current_file) + + context.data["project"] = P + context.data["currentFile"] = normalised + + self.log.info(name) + self.log.debug(normalised) diff --git a/pype/plugins/standalonepublisher/publish/collect_clip_instances.py b/pype/plugins/standalonepublisher/publish/collect_clip_instances.py new file mode 100644 index 0000000000..a7af8df143 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/collect_clip_instances.py @@ -0,0 +1,187 @@ +import os +import opentimelineio as otio +import tempfile +import pyblish.api +from pype import lib as plib + + +class CollectClipInstances(pyblish.api.InstancePlugin): + """Collect Clips instances from editorial's OTIO sequence""" + + order = pyblish.api.CollectorOrder + 0.01 + label = "Collect Clips" + hosts = ["standalonepublisher"] + families = ["editorial"] + + # presets + subsets = { + "referenceMain": { + "family": "review", + "families": ["review", "ftrack"], + # "ftrackFamily": "review", + "extension": ".mp4" + }, + "audioMain": { + "family": "audio", + "families": ["ftrack"], + # "ftrackFamily": "audio", + "extension": ".wav", + # "version": 1 + }, + "shotMain": { + "family": "shot", + "families": [] + } + } + timeline_frame_offset = None # if 900000 for edl default then -900000 + custom_start_frame = None + + def process(self, instance): + staging_dir = os.path.normpath( + tempfile.mkdtemp(prefix="pyblish_tmp_") + ) + # get context + context = instance.context + + # attribute for checking duplicity during creation + if not context.data.get("assetNameCheck"): + context.data["assetNameCheck"] = list() + + # create asset_names conversion table + if not context.data.get("assetsShared"): + context.data["assetsShared"] = dict() + + # get timeline otio data + timeline = instance.data["otio_timeline"] + fps = plib.get_asset()["data"]["fps"] + + tracks = timeline.each_child( + descended_from_type=otio.schema.track.Track + ) + + # get data from avalon + asset_entity = instance.context.data["assetEntity"] + asset_data = asset_entity["data"] + asset_name = asset_entity["name"] + + # Timeline data. + handle_start = int(asset_data["handleStart"]) + handle_end = int(asset_data["handleEnd"]) + + instances = [] + for track in tracks: + try: + track_start_frame = ( + abs(track.source_range.start_time.value) + ) + except AttributeError: + track_start_frame = 0 + + for clip in track.each_child(): + if clip.name is None: + continue + + # skip all generators like black ampty + if isinstance( + clip.media_reference, + otio.schema.GeneratorReference): + continue + + # Transitions are ignored, because Clips have the full frame + # range. + if isinstance(clip, otio.schema.transition.Transition): + continue + + # basic unique asset name + clip_name = os.path.splitext(clip.name)[0].lower() + name = f"{asset_name.split('_')[0]}_{clip_name}" + + if name not in context.data["assetNameCheck"]: + context.data["assetNameCheck"].append(name) + else: + self.log.warning(f"duplicate shot name: {name}") + + # frame ranges data + clip_in = clip.range_in_parent().start_time.value + clip_out = clip.range_in_parent().end_time_inclusive().value + + # add offset in case there is any + if self.timeline_frame_offset: + clip_in += self.timeline_frame_offset + clip_out += self.timeline_frame_offset + + clip_duration = clip.duration().value + self.log.info(f"clip duration: {clip_duration}") + + source_in = clip.trimmed_range().start_time.value + source_out = source_in + clip_duration + source_in_h = source_in - handle_start + source_out_h = source_out + handle_end + + clip_in_h = clip_in - handle_start + clip_out_h = clip_out + handle_end + + # define starting frame for future shot + if self.custom_start_frame is not None: + frame_start = self.custom_start_frame + else: + frame_start = clip_in + + frame_end = frame_start + (clip_duration - 1) + + # create shared new instance data + instance_data = { + "stagingDir": staging_dir, + + # shared attributes + "asset": name, + "assetShareName": name, + "editorialVideoPath": instance.data[ + "editorialVideoPath"], + "item": clip, + + # parent time properities + "trackStartFrame": track_start_frame, + "handleStart": handle_start, + "handleEnd": handle_end, + "fps": fps, + + # media source + "sourceIn": source_in, + "sourceOut": source_out, + "sourceInH": source_in_h, + "sourceOutH": source_out_h, + + # timeline + "clipIn": clip_in, + "clipOut": clip_out, + "clipDuration": clip_duration, + "clipInH": clip_in_h, + "clipOutH": clip_out_h, + "clipDurationH": clip_duration + handle_start + handle_end, + + # task + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartH": frame_start - handle_start, + "frameEndH": frame_end + handle_end + } + + # adding subsets to context as instances + for subset, properities in self.subsets.items(): + # adding Review-able instance + subset_instance_data = instance_data.copy() + subset_instance_data.update(properities) + subset_instance_data.update({ + # unique attributes + "name": f"{subset}_{name}", + "label": f"{subset} {name} ({clip_in}-{clip_out})", + "subset": subset + }) + instances.append(instance.context.create_instance( + **subset_instance_data)) + + context.data["assetsShared"][name] = { + "_clipIn": clip_in, + "_clipOut": clip_out + } diff --git a/pype/plugins/standalonepublisher/publish/collect_context.py b/pype/plugins/standalonepublisher/publish/collect_context.py index 8bd4e609ab..9dbeec93fb 100644 --- a/pype/plugins/standalonepublisher/publish/collect_context.py +++ b/pype/plugins/standalonepublisher/publish/collect_context.py @@ -17,10 +17,9 @@ import os import pyblish.api from avalon import io import json -import logging +import copy import clique - -log = logging.getLogger("collector") +from pprint import pformat class CollectContextDataSAPublish(pyblish.api.ContextPlugin): @@ -33,55 +32,109 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.49 hosts = ["standalonepublisher"] + # presets + batch_extensions = ["edl", "xml", "psd"] + default_families = ["ftrack"] + def process(self, context): # get json paths from os and load them io.install() - input_json_path = os.environ.get("SAPUBLISH_INPATH") - output_json_path = os.environ.get("SAPUBLISH_OUTPATH") - # context.data["stagingDir"] = os.path.dirname(input_json_path) - context.data["returnJsonPath"] = output_json_path + # get json file context + input_json_path = os.environ.get("SAPUBLISH_INPATH") with open(input_json_path, "r") as f: in_data = json.load(f) + self.log.debug(f"_ in_data: {pformat(in_data)}") - asset_name = in_data["asset"] - family = in_data["family"] + # exception for editorial + if in_data["family"] in ["editorial", "background_batch"]: + in_data_list = self.multiple_instances(context, in_data) + else: + in_data_list = [in_data] + + self.log.debug(f"_ in_data_list: {pformat(in_data_list)}") + + for in_data in in_data_list: + # create instance + self.create_instance(context, in_data) + + def multiple_instances(self, context, in_data): + # avoid subset name duplicity + if not context.data.get("subsetNamesCheck"): + context.data["subsetNamesCheck"] = list() + + in_data_list = list() + representations = in_data.pop("representations") + for repr in representations: + in_data_copy = copy.deepcopy(in_data) + ext = repr["ext"][1:] + subset = in_data_copy["subset"] + # filter out non editorial files + if ext not in self.batch_extensions: + in_data_copy["representations"] = [repr] + in_data_copy["subset"] = f"{ext}{subset}" + in_data_list.append(in_data_copy) + + files = repr.get("files") + + # delete unneeded keys + delete_repr_keys = ["frameStart", "frameEnd"] + for k in delete_repr_keys: + if repr.get(k): + repr.pop(k) + + # convert files to list if it isnt + if not isinstance(files, (tuple, list)): + files = [files] + + self.log.debug(f"_ files: {files}") + for index, f in enumerate(files): + index += 1 + # copy dictionaries + in_data_copy = copy.deepcopy(in_data_copy) + repr_new = copy.deepcopy(repr) + + repr_new["files"] = f + repr_new["name"] = ext + in_data_copy["representations"] = [repr_new] + + # create subset Name + new_subset = f"{ext}{index}{subset}" + while new_subset in context.data["subsetNamesCheck"]: + index += 1 + new_subset = f"{ext}{index}{subset}" + + context.data["subsetNamesCheck"].append(new_subset) + in_data_copy["subset"] = new_subset + in_data_list.append(in_data_copy) + self.log.info(f"Creating subset: {ext}{index}{subset}") + + return in_data_list + + def create_instance(self, context, in_data): subset = in_data["subset"] - # Load presets - presets = context.data.get("presets") - if not presets: - from pype.api import config - - presets = config.get_presets() - - project = io.find_one({"type": "project"}) - asset = io.find_one({"type": "asset", "name": asset_name}) - context.data["project"] = project - context.data["asset"] = asset - instance = context.create_instance(subset) - instance.data.update( { "subset": subset, - "asset": asset_name, + "asset": in_data["asset"], "label": subset, "name": subset, - "family": family, - "version": in_data.get("version", 1), + "family": in_data["family"], + # "version": in_data.get("version", 1), "frameStart": in_data.get("representations", [None])[0].get( "frameStart", None ), "frameEnd": in_data.get("representations", [None])[0].get( "frameEnd", None ), - "families": [family, "ftrack"], + "families": self.default_families or [], } ) - self.log.info("collected instance: {}".format(instance.data)) - self.log.info("parsing data: {}".format(in_data)) + self.log.info("collected instance: {}".format(pformat(instance.data))) + self.log.info("parsing data: {}".format(pformat(in_data))) instance.data["destination_list"] = list() instance.data["representations"] = list() @@ -104,6 +157,8 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): component["tags"] = ["review"] self.log.debug("Adding review family") - instance.data["representations"].append(component) + if "psd" in component["name"]: + instance.data["source"] = component["files"] + self.log.debug("Adding image:background_batch family") - self.log.info(in_data) + instance.data["representations"].append(component) diff --git a/pype/plugins/standalonepublisher/publish/collect_editorial.py b/pype/plugins/standalonepublisher/publish/collect_editorial.py new file mode 100644 index 0000000000..a31125d9a8 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/collect_editorial.py @@ -0,0 +1,82 @@ +import os +import opentimelineio as otio +import pyblish.api +from pype import lib as plib + + +class OTIO_View(pyblish.api.Action): + """Currently disabled because OTIO requires PySide2. Issue on Qt.py: + https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/289 + """ + + label = "OTIO View" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + instance = context[0] + representation = instance.data["representations"][0] + file_path = os.path.join( + representation["stagingDir"], representation["files"] + ) + plib._subprocess(["otioview", file_path]) + + +class CollectEditorial(pyblish.api.InstancePlugin): + """Collect Editorial OTIO timeline""" + + order = pyblish.api.CollectorOrder + label = "Collect Editorial" + hosts = ["standalonepublisher"] + families = ["editorial"] + actions = [] + + # presets + extensions = [".mov"] + + def process(self, instance): + # remove context test attribute + if instance.context.data.get("subsetNamesCheck"): + instance.context.data.pop("subsetNamesCheck") + + self.log.debug(f"__ instance: `{instance}`") + # get representation with editorial file + for representation in instance.data["representations"]: + self.log.debug(f"__ representation: `{representation}`") + # make editorial sequence file path + staging_dir = representation["stagingDir"] + file_path = os.path.join( + staging_dir, str(representation["files"]) + ) + instance.context.data["currentFile"] = file_path + + # get video file path + video_path = None + basename = os.path.splitext(os.path.basename(file_path))[0] + for f in os.listdir(staging_dir): + self.log.debug(f"__ test file: `{f}`") + # filter out by not sharing the same name + if os.path.splitext(f)[0] not in basename: + continue + # filter out by respected extensions + if os.path.splitext(f)[1] not in self.extensions: + continue + video_path = os.path.join( + staging_dir, f + ) + self.log.debug(f"__ video_path: `{video_path}`") + instance.data["editorialVideoPath"] = video_path + instance.data["stagingDir"] = staging_dir + + # get editorial sequence file into otio timeline object + extension = os.path.splitext(file_path)[1] + kwargs = {} + if extension == ".edl": + # EDL has no frame rate embedded so needs explicit + # frame rate else 24 is asssumed. + kwargs["rate"] = plib.get_asset()["data"]["fps"] + + instance.data["otio_timeline"] = otio.adapters.read_from_file( + file_path, **kwargs) + + self.log.info(f"Added OTIO timeline from: `{file_path}`") diff --git a/pype/plugins/standalonepublisher/publish/collect_hierarchy.py b/pype/plugins/standalonepublisher/publish/collect_hierarchy.py new file mode 100644 index 0000000000..ac7413706a --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/collect_hierarchy.py @@ -0,0 +1,243 @@ +import pyblish.api +import re +import os +from avalon import io + + +class CollectHierarchyInstance(pyblish.api.ContextPlugin): + """Collecting hierarchy context from `parents` and `hierarchy` data + present in `clip` family instances coming from the request json data file + + It will add `hierarchical_context` into each instance for integrate + plugins to be able to create needed parents for the context if they + don't exist yet + """ + + label = "Collect Hierarchy Clip" + order = pyblish.api.CollectorOrder + 0.101 + hosts = ["standalonepublisher"] + families = ["shot"] + + # presets + shot_rename_template = None + shot_rename_search_patterns = None + shot_add_hierarchy = None + shot_add_tasks = None + + def convert_to_entity(self, key, value): + # ftrack compatible entity types + types = {"shot": "Shot", + "folder": "Folder", + "episode": "Episode", + "sequence": "Sequence", + "track": "Sequence", + } + # convert to entity type + entity_type = types.get(key, None) + + # return if any + if entity_type: + return {"entityType": entity_type, "entityName": value} + + def rename_with_hierarchy(self, instance): + search_text = "" + parent_name = instance.context.data["assetEntity"]["name"] + clip = instance.data["item"] + clip_name = os.path.splitext(clip.name)[0].lower() + if self.shot_rename_search_patterns: + search_text += parent_name + clip_name + instance.data["anatomyData"].update({"clip_name": clip_name}) + for type, pattern in self.shot_rename_search_patterns.items(): + p = re.compile(pattern) + match = p.findall(search_text) + if not match: + continue + instance.data["anatomyData"][type] = match[-1] + + # format to new shot name + instance.data["asset"] = self.shot_rename_template.format( + **instance.data["anatomyData"]) + + def create_hierarchy(self, instance): + parents = list() + hierarchy = "" + visual_hierarchy = [instance.context.data["assetEntity"]] + while True: + visual_parent = io.find_one( + {"_id": visual_hierarchy[-1]["data"]["visualParent"]} + ) + if visual_parent: + visual_hierarchy.append(visual_parent) + else: + visual_hierarchy.append( + instance.context.data["projectEntity"]) + break + + # add current selection context hierarchy from standalonepublisher + for entity in reversed(visual_hierarchy): + parents.append({ + "entityType": entity["data"]["entityType"], + "entityName": entity["name"] + }) + + if self.shot_add_hierarchy: + # fill the parents parts from presets + shot_add_hierarchy = self.shot_add_hierarchy.copy() + hierarchy_parents = shot_add_hierarchy["parents"].copy() + for parent in hierarchy_parents: + hierarchy_parents[parent] = hierarchy_parents[parent].format( + **instance.data["anatomyData"]) + prnt = self.convert_to_entity( + parent, hierarchy_parents[parent]) + parents.append(prnt) + + hierarchy = shot_add_hierarchy[ + "parents_path"].format(**hierarchy_parents) + + instance.data["hierarchy"] = hierarchy + instance.data["parents"] = parents + self.log.debug(f"Hierarchy: {hierarchy}") + + if self.shot_add_tasks: + instance.data["tasks"] = self.shot_add_tasks + else: + instance.data["tasks"] = list() + + # updating hierarchy data + instance.data["anatomyData"].update({ + "asset": instance.data["asset"], + "task": "conform" + }) + + def process(self, context): + for instance in context: + if instance.data["family"] in self.families: + self.processing_instance(instance) + + def processing_instance(self, instance): + self.log.info(f"_ instance: {instance}") + # adding anatomyData for burnins + instance.data["anatomyData"] = instance.context.data["anatomyData"] + + asset = instance.data["asset"] + assets_shared = instance.context.data.get("assetsShared") + + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + + if self.shot_rename_template: + self.rename_with_hierarchy(instance) + + self.create_hierarchy(instance) + + shot_name = instance.data["asset"] + self.log.debug(f"Shot Name: {shot_name}") + + if instance.data["hierarchy"] not in shot_name: + self.log.warning("wrong parent") + + label = f"{shot_name} ({frame_start}-{frame_end})" + instance.data["label"] = label + + # dealing with shared attributes across instances + # with the same asset name + if assets_shared.get(asset): + asset_shared = assets_shared.get(asset) + else: + asset_shared = assets_shared[asset] + + asset_shared.update({ + "asset": instance.data["asset"], + "hierarchy": instance.data["hierarchy"], + "parents": instance.data["parents"], + "tasks": instance.data["tasks"] + }) + + +class CollectHierarchyContext(pyblish.api.ContextPlugin): + '''Collecting Hierarchy from instaces and building + context hierarchy tree + ''' + + label = "Collect Hierarchy Context" + order = pyblish.api.CollectorOrder + 0.102 + hosts = ["standalonepublisher"] + families = ["shot"] + + def update_dict(self, ex_dict, new_dict): + for key in ex_dict: + if key in new_dict and isinstance(ex_dict[key], dict): + new_dict[key] = self.update_dict(ex_dict[key], new_dict[key]) + else: + if ex_dict.get(key) and new_dict.get(key): + continue + else: + new_dict[key] = ex_dict[key] + + return new_dict + + def process(self, context): + instances = context + # create hierarchyContext attr if context has none + assets_shared = context.data.get("assetsShared") + final_context = {} + for instance in instances: + if 'editorial' in instance.data.get('family', ''): + continue + # inject assetsShared to other instances with + # the same `assetShareName` attribute in data + asset_shared_name = instance.data.get("assetShareName") + + s_asset_data = assets_shared.get(asset_shared_name) + if s_asset_data: + instance.data["asset"] = s_asset_data["asset"] + instance.data["parents"] = s_asset_data["parents"] + instance.data["hierarchy"] = s_asset_data["hierarchy"] + instance.data["tasks"] = s_asset_data["tasks"] + + # generate hierarchy data only on shot instances + if 'shot' not in instance.data.get('family', ''): + continue + + name = instance.data["asset"] + + # get handles + handle_start = int(instance.data["handleStart"]) + handle_end = int(instance.data["handleEnd"]) + + in_info = {} + + # suppose that all instances are Shots + in_info['entity_type'] = 'Shot' + + # get custom attributes of the shot + + in_info['custom_attributes'] = { + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "clipIn": instance.data["clipIn"], + "clipOut": instance.data["clipOut"], + 'fps': instance.data["fps"] + } + + in_info['tasks'] = instance.data['tasks'] + + parents = instance.data.get('parents', []) + + actual = {name: in_info} + + for parent in reversed(parents): + next_dict = {} + parent_name = parent["entityName"] + next_dict[parent_name] = {} + next_dict[parent_name]["entity_type"] = parent["entityType"] + next_dict[parent_name]["childs"] = actual + actual = next_dict + + final_context = self.update_dict(final_context, actual) + + # adding hierarchy context to instance + context.data["hierarchyContext"] = final_context + self.log.info("Hierarchy instance collected") diff --git a/pype/plugins/standalonepublisher/publish/collect_matching_asset.py b/pype/plugins/standalonepublisher/publish/collect_matching_asset.py new file mode 100644 index 0000000000..48065c4662 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/collect_matching_asset.py @@ -0,0 +1,82 @@ +import os +import collections +import pyblish.api +from avalon import io +from pprint import pformat + + +class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): + """ + Collecting temp json data sent from a host context + and path for returning json data back to hostself. + """ + + label = "Collect Matching Asset to Instance" + order = pyblish.api.CollectorOrder - 0.05 + hosts = ["standalonepublisher"] + families = ["background_batch"] + + def process(self, instance): + source_file = os.path.basename(instance.data["source"]).lower() + self.log.info("Looking for asset document for file \"{}\"".format( + instance.data["source"] + )) + + asset_docs_by_name = self.selection_children_by_name(instance) + + matching_asset_doc = asset_docs_by_name.get(source_file) + if matching_asset_doc is None: + for asset_name_low, asset_doc in asset_docs_by_name.items(): + if asset_name_low in source_file: + matching_asset_doc = asset_doc + break + + if matching_asset_doc: + instance.data["asset"] = matching_asset_doc["name"] + instance.data["assetEntity"] = matching_asset_doc + self.log.info( + f"Matching asset found: {pformat(matching_asset_doc)}" + ) + + else: + # TODO better error message + raise AssertionError(( + "Filename \"{}\" does not match" + " any name of asset documents in database for your selection." + ).format(instance.data["source"])) + + def selection_children_by_name(self, instance): + storing_key = "childrenDocsForSelection" + + children_docs = instance.context.data.get(storing_key) + if children_docs is None: + top_asset_doc = instance.context.data["assetEntity"] + assets_by_parent_id = self._asset_docs_by_parent_id(instance) + _children_docs = self._children_docs( + assets_by_parent_id, top_asset_doc + ) + children_docs = { + children_doc["name"].lower(): children_doc + for children_doc in _children_docs + } + instance.context.data[storing_key] = children_docs + return children_docs + + def _children_docs(self, documents_by_parent_id, parent_doc): + # Find all children in reverse order, last children is at first place. + output = [] + children = documents_by_parent_id.get(parent_doc["_id"]) or tuple() + for child in children: + output.extend( + self._children_docs(documents_by_parent_id, child) + ) + output.append(parent_doc) + return output + + def _asset_docs_by_parent_id(self, instance): + # Query all assets for project and store them by parent's id to list + asset_docs_by_parent_id = collections.defaultdict(list) + for asset_doc in io.find({"type": "asset"}): + parent_id = asset_doc["data"]["visualParent"] + asset_docs_by_parent_id[parent_id].append(asset_doc) + return asset_docs_by_parent_id diff --git a/pype/plugins/standalonepublisher/publish/collect_psd_instances.py b/pype/plugins/standalonepublisher/publish/collect_psd_instances.py new file mode 100644 index 0000000000..b5db437473 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/collect_psd_instances.py @@ -0,0 +1,66 @@ +import copy +import pyblish.api +from pprint import pformat + + +class CollectPsdInstances(pyblish.api.InstancePlugin): + """ + Collect all available instances from psd batch. + """ + + label = "Collect Psd Instances" + order = pyblish.api.CollectorOrder + 0.489 + hosts = ["standalonepublisher"] + families = ["background_batch"] + + # presets + subsets = { + "backgroundLayout": { + "task": "background", + "family": "backgroundLayout" + }, + "backgroundComp": { + "task": "background", + "family": "backgroundComp" + }, + "workfileBackground": { + "task": "background", + "family": "workfile" + } + } + unchecked_by_default = [] + + def process(self, instance): + context = instance.context + asset_data = instance.data["assetEntity"] + asset_name = instance.data["asset"] + for subset_name, subset_data in self.subsets.items(): + instance_name = f"{asset_name}_{subset_name}" + task = subset_data.get("task", "background") + + # create new instance + new_instance = context.create_instance(instance_name) + + # add original instance data except name key + for key, value in instance.data.items(): + if key not in ["name"]: + # Make sure value is copy since value may be object which + # can be shared across all new created objects + new_instance.data[key] = copy.deepcopy(value) + + # add subset data from preset + new_instance.data.update(subset_data) + + new_instance.data["label"] = f"{instance_name}" + new_instance.data["subset"] = subset_name + new_instance.data["task"] = task + + + if subset_name in self.unchecked_by_default: + new_instance.data["publish"] = False + + self.log.info(f"Created new instance: {instance_name}") + self.log.debug(f"_ inst_data: {pformat(new_instance.data)}") + + # delete original instance + context.remove(instance) diff --git a/pype/plugins/standalonepublisher/publish/collect_shots.py b/pype/plugins/standalonepublisher/publish/collect_shots.py deleted file mode 100644 index 4f682bd808..0000000000 --- a/pype/plugins/standalonepublisher/publish/collect_shots.py +++ /dev/null @@ -1,147 +0,0 @@ -import os - -import opentimelineio as otio -from bson import json_util - -import pyblish.api -from pype import lib -from avalon import io - - -class OTIO_View(pyblish.api.Action): - """Currently disabled because OTIO requires PySide2. Issue on Qt.py: - https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/289 - """ - - label = "OTIO View" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - instance = context[0] - representation = instance.data["representations"][0] - file_path = os.path.join( - representation["stagingDir"], representation["files"] - ) - lib._subprocess(["otioview", file_path]) - - -class CollectShots(pyblish.api.InstancePlugin): - """Collect Anatomy object into Context""" - - order = pyblish.api.CollectorOrder - label = "Collect Shots" - hosts = ["standalonepublisher"] - families = ["editorial"] - actions = [] - - def process(self, instance): - representation = instance.data["representations"][0] - file_path = os.path.join( - representation["stagingDir"], representation["files"] - ) - instance.context.data["editorialPath"] = file_path - - extension = os.path.splitext(file_path)[1][1:] - kwargs = {} - if extension == "edl": - # EDL has no frame rate embedded so needs explicit frame rate else - # 24 is asssumed. - kwargs["rate"] = lib.get_asset()["data"]["fps"] - - timeline = otio.adapters.read_from_file(file_path, **kwargs) - tracks = timeline.each_child( - descended_from_type=otio.schema.track.Track - ) - asset_entity = instance.context.data["assetEntity"] - asset_name = asset_entity["name"] - - # Ask user for sequence start. Usually 10:00:00:00. - sequence_start_frame = 900000 - - # Project specific prefix naming. This needs to be replaced with some - # options to be more flexible. - asset_name = asset_name.split("_")[0] - - instances = [] - for track in tracks: - track_start_frame = ( - abs(track.source_range.start_time.value) - sequence_start_frame - ) - for child in track.each_child(): - - # Transitions are ignored, because Clips have the full frame - # range. - if isinstance(child, otio.schema.transition.Transition): - continue - - if child.name is None: - continue - - # Hardcoded to expect a shot name of "[name].[extension]" - child_name = os.path.splitext(child.name)[0].lower() - name = f"{asset_name}_{child_name}" - - frame_start = track_start_frame - frame_start += child.range_in_parent().start_time.value - frame_end = track_start_frame - frame_end += child.range_in_parent().end_time_inclusive().value - - label = f"{name} (framerange: {frame_start}-{frame_end})" - instances.append( - instance.context.create_instance(**{ - "name": name, - "label": label, - "frameStart": frame_start, - "frameEnd": frame_end, - "family": "shot", - "families": ["review", "ftrack"], - "ftrackFamily": "review", - "asset": name, - "subset": "shotMain", - "representations": [], - "source": file_path - }) - ) - - visual_hierarchy = [asset_entity] - while True: - visual_parent = io.find_one( - {"_id": visual_hierarchy[-1]["data"]["visualParent"]} - ) - if visual_parent: - visual_hierarchy.append(visual_parent) - else: - visual_hierarchy.append(instance.context.data["projectEntity"]) - break - - context_hierarchy = None - for entity in visual_hierarchy: - childs = {} - if context_hierarchy: - name = context_hierarchy.pop("name") - childs = {name: context_hierarchy} - else: - for instance in instances: - childs[instance.data["name"]] = { - "childs": {}, - "entity_type": "Shot", - "custom_attributes": { - "frameStart": instance.data["frameStart"], - "frameEnd": instance.data["frameEnd"] - } - } - - context_hierarchy = { - "entity_type": entity["data"]["entityType"], - "childs": childs, - "name": entity["name"] - } - - name = context_hierarchy.pop("name") - context_hierarchy = {name: context_hierarchy} - instance.context.data["hierarchyContext"] = context_hierarchy - self.log.info( - "Hierarchy:\n" + - json_util.dumps(context_hierarchy, sort_keys=True, indent=4) - ) diff --git a/pype/plugins/standalonepublisher/publish/extract_bg_for_compositing.py b/pype/plugins/standalonepublisher/publish/extract_bg_for_compositing.py new file mode 100644 index 0000000000..064c226ff7 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/extract_bg_for_compositing.py @@ -0,0 +1,238 @@ +import os +import json +import copy +import pype.api +from avalon import io + +PSDImage = None + + +class ExtractBGForComp(pype.api.Extractor): + label = "Extract Background for Compositing" + families = ["backgroundComp"] + hosts = ["standalonepublisher"] + + new_instance_family = "background" + + # Presetable + allowed_group_names = [ + "OL", "BG", "MG", "FG", "SB", "UL", "SKY", "Field Guide", "Field_Guide", + "ANIM" + ] + + def process(self, instance): + # Check if python module `psd_tools` is installed + try: + global PSDImage + from psd_tools import PSDImage + except Exception: + raise AssertionError( + "BUG: Python module `psd-tools` is not installed!" + ) + + self.allowed_group_names = [ + name.lower() + for name in self.allowed_group_names + ] + + self.redo_global_plugins(instance) + + repres = instance.data.get("representations") + if not repres: + self.log.info("There are no representations on instance.") + return + + if not instance.data.get("transfers"): + instance.data["transfers"] = [] + + # Prepare staging dir + staging_dir = self.staging_dir(instance) + if not os.path.exists(staging_dir): + os.makedirs(staging_dir) + + for repre in tuple(repres): + # Skip all files without .psd extension + if repre["ext"] != ".psd": + continue + + # Prepare publish dir for transfers + publish_dir = instance.data["publishDir"] + + # Prepare json filepath where extracted metadata are stored + json_filename = "{}.json".format(instance.name) + json_full_path = os.path.join(staging_dir, json_filename) + + self.log.debug(f"`staging_dir` is \"{staging_dir}\"") + + # Prepare new repre data + new_repre = { + "name": "json", + "ext": "json", + "files": json_filename, + "stagingDir": staging_dir + } + + # TODO add check of list + psd_filename = repre["files"] + psd_folder_path = repre["stagingDir"] + psd_filepath = os.path.join(psd_folder_path, psd_filename) + self.log.debug(f"psd_filepath: \"{psd_filepath}\"") + psd_object = PSDImage.open(psd_filepath) + + json_data, transfers = self.export_compositing_images( + psd_object, staging_dir, publish_dir + ) + self.log.info("Json file path: {}".format(json_full_path)) + with open(json_full_path, "w") as json_filestream: + json.dump(json_data, json_filestream, indent=4) + + instance.data["transfers"].extend(transfers) + instance.data["representations"].remove(repre) + instance.data["representations"].append(new_repre) + + def export_compositing_images(self, psd_object, output_dir, publish_dir): + json_data = { + "__schema_version__": 1, + "children": [] + } + transfers = [] + for main_idx, main_layer in enumerate(psd_object): + if ( + not main_layer.is_visible() + or main_layer.name.lower() not in self.allowed_group_names + or not main_layer.is_group + ): + continue + + export_layers = [] + layers_idx = 0 + for layer in main_layer: + # TODO this way may be added also layers next to "ADJ" + if layer.name.lower() == "adj": + for _layer in layer: + export_layers.append((layers_idx, _layer)) + layers_idx += 1 + + else: + export_layers.append((layers_idx, layer)) + layers_idx += 1 + + if not export_layers: + continue + + main_layer_data = { + "index": main_idx, + "name": main_layer.name, + "children": [] + } + + for layer_idx, layer in export_layers: + has_size = layer.width > 0 and layer.height > 0 + if not has_size: + self.log.debug(( + "Skipping layer \"{}\" because does " + "not have any content." + ).format(layer.name)) + continue + + main_layer_name = main_layer.name.replace(" ", "_") + layer_name = layer.name.replace(" ", "_") + + filename = "{:0>2}_{}_{:0>2}_{}.png".format( + main_idx + 1, main_layer_name, layer_idx + 1, layer_name + ) + layer_data = { + "index": layer_idx, + "name": layer.name, + "filename": filename + } + output_filepath = os.path.join(output_dir, filename) + dst_filepath = os.path.join(publish_dir, filename) + transfers.append((output_filepath, dst_filepath)) + + pil_object = layer.composite(viewport=psd_object.viewbox) + pil_object.save(output_filepath, "PNG") + + main_layer_data["children"].append(layer_data) + + if main_layer_data["children"]: + json_data["children"].append(main_layer_data) + + return json_data, transfers + + def redo_global_plugins(self, instance): + # TODO do this in collection phase + # Copy `families` and check if `family` is not in current families + families = instance.data.get("families") or list() + if families: + families = list(set(families)) + + if self.new_instance_family in families: + families.remove(self.new_instance_family) + + self.log.debug( + "Setting new instance families {}".format(str(families)) + ) + instance.data["families"] = families + + # Override instance data with new information + instance.data["family"] = self.new_instance_family + + subset_name = instance.data["anatomyData"]["subset"] + asset_doc = instance.data["assetEntity"] + latest_version = self.find_last_version(subset_name, asset_doc) + version_number = 1 + if latest_version is not None: + version_number += latest_version + + instance.data["latestVersion"] = latest_version + instance.data["version"] = version_number + + # Same data apply to anatomy data + instance.data["anatomyData"].update({ + "family": self.new_instance_family, + "version": version_number + }) + + # Redo publish and resources dir + anatomy = instance.context.data["anatomy"] + template_data = copy.deepcopy(instance.data["anatomyData"]) + template_data.update({ + "frame": "FRAME_TEMP", + "representation": "TEMP" + }) + anatomy_filled = anatomy.format(template_data) + if "folder" in anatomy.templates["publish"]: + publish_folder = anatomy_filled["publish"]["folder"] + else: + publish_folder = os.path.dirname(anatomy_filled["publish"]["path"]) + + publish_folder = os.path.normpath(publish_folder) + resources_folder = os.path.join(publish_folder, "resources") + + instance.data["publishDir"] = publish_folder + instance.data["resourcesDir"] = resources_folder + + self.log.debug("publishDir: \"{}\"".format(publish_folder)) + self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) + + def find_last_version(self, subset_name, asset_doc): + subset_doc = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset_doc["_id"] + }) + + if subset_doc is None: + self.log.debug("Subset entity does not exist yet.") + else: + version_doc = io.find_one( + { + "type": "version", + "parent": subset_doc["_id"] + }, + sort=[("name", -1)] + ) + if version_doc: + return int(version_doc["name"]) + return None diff --git a/pype/plugins/standalonepublisher/publish/extract_bg_main_groups.py b/pype/plugins/standalonepublisher/publish/extract_bg_main_groups.py new file mode 100644 index 0000000000..42530aeb14 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/extract_bg_main_groups.py @@ -0,0 +1,213 @@ +import os +import copy +import json +import pype.api +import pyblish.api +from avalon import io + +PSDImage = None + + +class ExtractBGMainGroups(pype.api.Extractor): + label = "Extract Background Layout" + order = pyblish.api.ExtractorOrder + 0.02 + families = ["backgroundLayout"] + hosts = ["standalonepublisher"] + + new_instance_family = "background" + + # Presetable + allowed_group_names = [ + "OL", "BG", "MG", "FG", "UL", "SB", "SKY", "Field Guide", "Field_Guide", + "ANIM" + ] + + def process(self, instance): + # Check if python module `psd_tools` is installed + try: + global PSDImage + from psd_tools import PSDImage + except Exception: + raise AssertionError( + "BUG: Python module `psd-tools` is not installed!" + ) + + self.allowed_group_names = [ + name.lower() + for name in self.allowed_group_names + ] + repres = instance.data.get("representations") + if not repres: + self.log.info("There are no representations on instance.") + return + + self.redo_global_plugins(instance) + + repres = instance.data.get("representations") + if not repres: + self.log.info("There are no representations on instance.") + return + + if not instance.data.get("transfers"): + instance.data["transfers"] = [] + + # Prepare staging dir + staging_dir = self.staging_dir(instance) + if not os.path.exists(staging_dir): + os.makedirs(staging_dir) + + # Prepare publish dir for transfers + publish_dir = instance.data["publishDir"] + + for repre in tuple(repres): + # Skip all files without .psd extension + if repre["ext"] != ".psd": + continue + + # Prepare json filepath where extracted metadata are stored + json_filename = "{}.json".format(instance.name) + json_full_path = os.path.join(staging_dir, json_filename) + + self.log.debug(f"`staging_dir` is \"{staging_dir}\"") + + # Prepare new repre data + new_repre = { + "name": "json", + "ext": "json", + "files": json_filename, + "stagingDir": staging_dir + } + + # TODO add check of list + psd_filename = repre["files"] + psd_folder_path = repre["stagingDir"] + psd_filepath = os.path.join(psd_folder_path, psd_filename) + self.log.debug(f"psd_filepath: \"{psd_filepath}\"") + psd_object = PSDImage.open(psd_filepath) + + json_data, transfers = self.export_compositing_images( + psd_object, staging_dir, publish_dir + ) + self.log.info("Json file path: {}".format(json_full_path)) + with open(json_full_path, "w") as json_filestream: + json.dump(json_data, json_filestream, indent=4) + + instance.data["transfers"].extend(transfers) + instance.data["representations"].remove(repre) + instance.data["representations"].append(new_repre) + + def export_compositing_images(self, psd_object, output_dir, publish_dir): + json_data = { + "__schema_version__": 1, + "children": [] + } + transfers = [] + for layer_idx, layer in enumerate(psd_object): + layer_name = layer.name.replace(" ", "_") + if ( + not layer.is_visible() + or layer_name.lower() not in self.allowed_group_names + ): + continue + + has_size = layer.width > 0 and layer.height > 0 + if not has_size: + self.log.debug(( + "Skipping layer \"{}\" because does not have any content." + ).format(layer.name)) + continue + + filename = "{:0>2}_{}.png".format(layer_idx, layer_name) + layer_data = { + "index": layer_idx, + "name": layer.name, + "filename": filename + } + + output_filepath = os.path.join(output_dir, filename) + dst_filepath = os.path.join(publish_dir, filename) + transfers.append((output_filepath, dst_filepath)) + + pil_object = layer.composite(viewport=psd_object.viewbox) + pil_object.save(output_filepath, "PNG") + + json_data["children"].append(layer_data) + + return json_data, transfers + + def redo_global_plugins(self, instance): + # TODO do this in collection phase + # Copy `families` and check if `family` is not in current families + families = instance.data.get("families") or list() + if families: + families = list(set(families)) + + if self.new_instance_family in families: + families.remove(self.new_instance_family) + + self.log.debug( + "Setting new instance families {}".format(str(families)) + ) + instance.data["families"] = families + + # Override instance data with new information + instance.data["family"] = self.new_instance_family + + subset_name = instance.data["anatomyData"]["subset"] + asset_doc = instance.data["assetEntity"] + latest_version = self.find_last_version(subset_name, asset_doc) + version_number = 1 + if latest_version is not None: + version_number += latest_version + + instance.data["latestVersion"] = latest_version + instance.data["version"] = version_number + + # Same data apply to anatomy data + instance.data["anatomyData"].update({ + "family": self.new_instance_family, + "version": version_number + }) + + # Redo publish and resources dir + anatomy = instance.context.data["anatomy"] + template_data = copy.deepcopy(instance.data["anatomyData"]) + template_data.update({ + "frame": "FRAME_TEMP", + "representation": "TEMP" + }) + anatomy_filled = anatomy.format(template_data) + if "folder" in anatomy.templates["publish"]: + publish_folder = anatomy_filled["publish"]["folder"] + else: + publish_folder = os.path.dirname(anatomy_filled["publish"]["path"]) + + publish_folder = os.path.normpath(publish_folder) + resources_folder = os.path.join(publish_folder, "resources") + + instance.data["publishDir"] = publish_folder + instance.data["resourcesDir"] = resources_folder + + self.log.debug("publishDir: \"{}\"".format(publish_folder)) + self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) + + def find_last_version(self, subset_name, asset_doc): + subset_doc = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset_doc["_id"] + }) + + if subset_doc is None: + self.log.debug("Subset entity does not exist yet.") + else: + version_doc = io.find_one( + { + "type": "version", + "parent": subset_doc["_id"] + }, + sort=[("name", -1)] + ) + if version_doc: + return int(version_doc["name"]) + return None diff --git a/pype/plugins/standalonepublisher/publish/extract_images_from_psd.py b/pype/plugins/standalonepublisher/publish/extract_images_from_psd.py new file mode 100644 index 0000000000..5a2109478c --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/extract_images_from_psd.py @@ -0,0 +1,166 @@ +import os +import copy +import pype.api +import pyblish.api +from avalon import io + +PSDImage = None + + +class ExtractImagesFromPSD(pype.api.Extractor): + # PLUGIN is not currently enabled because was decided to use different + # approach + enabled = False + active = False + label = "Extract Images from PSD" + order = pyblish.api.ExtractorOrder + 0.02 + families = ["backgroundLayout"] + hosts = ["standalonepublisher"] + + new_instance_family = "image" + ignored_instance_data_keys = ("name", "label", "stagingDir", "version") + # Presetable + allowed_group_names = [ + "OL", "BG", "MG", "FG", "UL", "SKY", "Field Guide", "Field_Guide", + "ANIM" + ] + + def process(self, instance): + # Check if python module `psd_tools` is installed + try: + global PSDImage + from psd_tools import PSDImage + except Exception: + raise AssertionError( + "BUG: Python module `psd-tools` is not installed!" + ) + + self.allowed_group_names = [ + name.lower() + for name in self.allowed_group_names + ] + repres = instance.data.get("representations") + if not repres: + self.log.info("There are no representations on instance.") + return + + for repre in tuple(repres): + # Skip all files without .psd extension + if repre["ext"] != ".psd": + continue + + # TODO add check of list of "files" value + psd_filename = repre["files"] + psd_folder_path = repre["stagingDir"] + psd_filepath = os.path.join(psd_folder_path, psd_filename) + self.log.debug(f"psd_filepath: \"{psd_filepath}\"") + psd_object = PSDImage.open(psd_filepath) + + self.create_new_instances(instance, psd_object) + + # Remove the instance from context + instance.context.remove(instance) + + def create_new_instances(self, instance, psd_object): + asset_doc = instance.data["assetEntity"] + for layer in psd_object: + if ( + not layer.is_visible() + or layer.name.lower() not in self.allowed_group_names + ): + continue + + has_size = layer.width > 0 and layer.height > 0 + if not has_size: + self.log.debug(( + "Skipping layer \"{}\" because does " + "not have any content." + ).format(layer.name)) + continue + + layer_name = layer.name.replace(" ", "_") + instance_name = subset_name = f"image{layer_name}" + self.log.info( + f"Creating new instance with name \"{instance_name}\"" + ) + new_instance = instance.context.create_instance(instance_name) + for key, value in instance.data.items(): + if key not in self.ignored_instance_data_keys: + new_instance.data[key] = copy.deepcopy(value) + + new_instance.data["label"] = " ".join( + (new_instance.data["asset"], instance_name) + ) + + # Find latest version + latest_version = self.find_last_version(subset_name, asset_doc) + version_number = 1 + if latest_version is not None: + version_number += latest_version + + self.log.info( + "Next version of instance \"{}\" will be {}".format( + instance_name, version_number + ) + ) + + # Set family and subset + new_instance.data["family"] = self.new_instance_family + new_instance.data["subset"] = subset_name + new_instance.data["version"] = version_number + new_instance.data["latestVersion"] = latest_version + + new_instance.data["anatomyData"].update({ + "subset": subset_name, + "family": self.new_instance_family, + "version": version_number + }) + + # Copy `families` and check if `family` is not in current families + families = new_instance.data.get("families") or list() + if families: + families = list(set(families)) + + if self.new_instance_family in families: + families.remove(self.new_instance_family) + new_instance.data["families"] = families + + # Prepare staging dir for new instance + staging_dir = self.staging_dir(new_instance) + + output_filename = "{}.png".format(layer_name) + output_filepath = os.path.join(staging_dir, output_filename) + pil_object = layer.composite(viewport=psd_object.viewbox) + pil_object.save(output_filepath, "PNG") + + new_repre = { + "name": "png", + "ext": "png", + "files": output_filename, + "stagingDir": staging_dir + } + self.log.debug( + "Creating new representation: {}".format(new_repre) + ) + new_instance.data["representations"] = [new_repre] + + def find_last_version(self, subset_name, asset_doc): + subset_doc = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset_doc["_id"] + }) + + if subset_doc is None: + self.log.debug("Subset entity does not exist yet.") + else: + version_doc = io.find_one( + { + "type": "version", + "parent": subset_doc["_id"] + }, + sort=[("name", -1)] + ) + if version_doc: + return int(version_doc["name"]) + return None diff --git a/pype/plugins/standalonepublisher/publish/extract_review.py b/pype/plugins/standalonepublisher/publish/extract_review.py deleted file mode 100644 index 0f845afcb1..0000000000 --- a/pype/plugins/standalonepublisher/publish/extract_review.py +++ /dev/null @@ -1,199 +0,0 @@ -import os -import tempfile - -import pyblish.api -import clique -import pype.api -import pype.lib - - -class ExtractReviewSP(pyblish.api.InstancePlugin): - """Extracting Review mov file for Ftrack - - Compulsory attribute of representation is tags list with "review", - otherwise the representation is ignored. - - All new represetnations are created and encoded by ffmpeg following - presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension - filter values use preset's attributes `ext_filter` - """ - - label = "Extract Review SP" - order = pyblish.api.ExtractorOrder + 0.02 - families = ["review"] - hosts = ["standalonepublisher"] - - def process(self, instance): - # adding plugin attributes from presets - presets = instance.context.data["presets"] - try: - publish_presets = presets["plugins"]["standalonepublisher"]["publish"] - plugin_attrs = publish_presets[self.__class__.__name__] - except KeyError: - raise KeyError("Preset for plugin \"{}\" are not set".format( - self.__class__.__name__ - )) - - output_profiles = plugin_attrs.get("outputs", {}) - - fps = instance.data.get("fps") - start_frame = instance.data.get("frameStart") - - self.log.debug("Families In: `{}`".format(instance.data["families"])) - - # get specific profile if was defined - specific_profiles = instance.data.get("repreProfiles", []) - - new_repres = [] - # filter out mov and img sequences - for repre in instance.data["representations"]: - tags = repre.get("tags", []) - if "review" not in tags: - continue - - staging_dir = repre["stagingDir"] - for name in specific_profiles: - profile = output_profiles.get(name) - if not profile: - self.log.warning( - "Profile \"{}\" was not found in presets".format(name) - ) - continue - - self.log.debug("Processing profile: {}".format(name)) - - ext = profile.get("ext", None) - if not ext: - ext = "mov" - self.log.debug(( - "`ext` attribute not in output profile \"{}\"." - " Setting to default ext: `mov`" - ).format(name)) - - if isinstance(repre["files"], list): - collections, remainder = clique.assemble(repre["files"]) - - full_input_path = os.path.join( - staging_dir, - collections[0].format("{head}{padding}{tail}") - ) - filename = collections[0].format('{head}') - if filename.endswith("."): - filename = filename[:-1] - else: - full_input_path = os.path.join(staging_dir, repre["files"]) - filename = repre["files"].split(".")[0] - - # prepare output file - repr_file = filename + "_{0}.{1}".format(name, ext) - out_stagigng_dir = tempfile.mkdtemp(prefix="extract_review_") - full_output_path = os.path.join(out_stagigng_dir, repr_file) - - self.log.info("input {}".format(full_input_path)) - self.log.info("output {}".format(full_output_path)) - - repre_new = repre.copy() - - new_tags = [x for x in tags if x != "delete"] - p_tags = profile.get("tags", []) - self.log.info("p_tags: `{}`".format(p_tags)) - - for _tag in p_tags: - if _tag not in new_tags: - new_tags.append(_tag) - - self.log.info("new_tags: `{}`".format(new_tags)) - - input_args = [] - - # overrides output file - input_args.append("-y") - - # preset's input data - input_args.extend(profile.get("input", [])) - - # necessary input data - # adds start arg only if image sequence - if isinstance(repre["files"], list): - input_args.extend([ - "-start_number {}".format(start_frame), - "-framerate {}".format(fps) - ]) - - input_args.append("-i {}".format(full_input_path)) - - output_args = [] - # preset's output data - output_args.extend(profile.get("output", [])) - - if isinstance(repre["files"], list): - # set length of video by len of inserted files - video_len = len(repre["files"]) - else: - video_len = repre["frameEnd"] - repre["frameStart"] + 1 - output_args.append( - "-frames {}".format(video_len) - ) - - # letter_box - lb_string = ( - "-filter:v " - "drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black," - "drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:" - "round((ih-(iw*(1/{0})))/2):t=fill:c=black" - ) - letter_box = profile.get("letter_box", None) - if letter_box: - output_args.append(lb_string.format(letter_box)) - - # output filename - output_args.append(full_output_path) - - ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - mov_args = [ - ffmpeg_path, - " ".join(input_args), - " ".join(output_args) - ] - subprcs_cmd = " ".join(mov_args) - - # run subprocess - self.log.debug("Executing: {}".format(subprcs_cmd)) - output = pype.api.subprocess(subprcs_cmd) - self.log.debug("Output: {}".format(output)) - - # create representation data - repre_new.update({ - "name": name, - "ext": ext, - "files": repr_file, - "stagingDir": out_stagigng_dir, - "tags": new_tags, - "outputName": name, - "frameStartFtrack": 1, - "frameEndFtrack": video_len - }) - # cleanup thumbnail from new repre - if repre_new.get("thumbnail"): - repre_new.pop("thumbnail") - if "thumbnail" in repre_new["tags"]: - repre_new["tags"].remove("thumbnail") - - # adding representation - self.log.debug("Adding: {}".format(repre_new)) - # cleanup repre from preview - if "preview" in repre: - repre.pop("preview") - if "preview" in repre["tags"]: - repre["tags"].remove("preview") - new_repres.append(repre_new) - - for repre in instance.data["representations"]: - if "delete" in repre.get("tags", []): - instance.data["representations"].remove(repre) - - for repre in new_repres: - self.log.debug("Adding repre: \"{}\"".format( - repre - )) - instance.data["representations"].append(repre) diff --git a/pype/plugins/standalonepublisher/publish/extract_shot.py b/pype/plugins/standalonepublisher/publish/extract_shot.py deleted file mode 100644 index d58ddfe8d5..0000000000 --- a/pype/plugins/standalonepublisher/publish/extract_shot.py +++ /dev/null @@ -1,96 +0,0 @@ -import os - -import clique - -import pype.api -import pype.lib - - -class ExtractShot(pype.api.Extractor): - """Extract shot "mov" and "wav" files.""" - - label = "Extract Shot" - hosts = ["standalonepublisher"] - families = ["shot"] - - def process(self, instance): - staging_dir = self.staging_dir(instance) - self.log.info("Outputting shot to {}".format(staging_dir)) - - editorial_path = instance.context.data["editorialPath"] - basename = os.path.splitext(os.path.basename(editorial_path))[0] - - # Generate mov file. - fps = pype.lib.get_asset()["data"]["fps"] - input_path = os.path.join( - os.path.dirname(editorial_path), basename + ".mov" - ) - shot_mov = os.path.join(staging_dir, instance.data["name"] + ".mov") - ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - args = [ - ffmpeg_path, - "-ss", str(instance.data["frameStart"] / fps), - "-i", input_path, - "-t", str( - (instance.data["frameEnd"] - instance.data["frameStart"] + 1) / - fps - ), - "-crf", "18", - "-pix_fmt", "yuv420p", - shot_mov - ] - self.log.info(f"Processing: {args}") - output = pype.lib._subprocess(args) - self.log.info(output) - - instance.data["representations"].append({ - "name": "mov", - "ext": "mov", - "files": os.path.basename(shot_mov), - "stagingDir": staging_dir, - "frameStart": instance.data["frameStart"], - "frameEnd": instance.data["frameEnd"], - "fps": fps, - "thumbnail": True, - "tags": ["review", "ftrackreview"] - }) - - # Generate jpegs. - shot_jpegs = os.path.join( - staging_dir, instance.data["name"] + ".%04d.jpeg" - ) - args = [ffmpeg_path, "-i", shot_mov, shot_jpegs] - self.log.info(f"Processing: {args}") - output = pype.lib._subprocess(args) - self.log.info(output) - - collection = clique.Collection( - head=instance.data["name"] + ".", tail='.jpeg', padding=4 - ) - for f in os.listdir(staging_dir): - if collection.match(f): - collection.add(f) - - instance.data["representations"].append({ - "name": "jpeg", - "ext": "jpeg", - "files": list(collection), - "stagingDir": staging_dir - }) - - # Generate wav file. - shot_wav = os.path.join(staging_dir, instance.data["name"] + ".wav") - args = [ffmpeg_path, "-i", shot_mov, shot_wav] - self.log.info(f"Processing: {args}") - output = pype.lib._subprocess(args) - self.log.info(output) - - instance.data["representations"].append({ - "name": "wav", - "ext": "wav", - "files": os.path.basename(shot_wav), - "stagingDir": staging_dir - }) - - # Required for extract_review plugin (L222 onwards). - instance.data["fps"] = fps diff --git a/pype/plugins/standalonepublisher/publish/extract_shot_data.py b/pype/plugins/standalonepublisher/publish/extract_shot_data.py new file mode 100644 index 0000000000..c39247d6d6 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/extract_shot_data.py @@ -0,0 +1,92 @@ +import os +import clique +import pype.api + +from pprint import pformat + + +class ExtractShotData(pype.api.Extractor): + """Extract shot "mov" and "wav" files.""" + + label = "Extract Shot Data" + hosts = ["standalonepublisher"] + families = ["review", "audio"] + + # presets + + def process(self, instance): + representation = instance.data.get("representations") + self.log.debug(f"_ representation: {representation}") + + if not representation: + instance.data["representations"] = list() + + # get ffmpet path + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + + # get staging dir + staging_dir = self.staging_dir(instance) + self.log.info("Staging dir set to: `{}`".format(staging_dir)) + + # Generate mov file. + fps = instance.data["fps"] + video_file_path = instance.data["editorialVideoPath"] + ext = instance.data.get("extension", ".mov") + + clip_trimed_path = os.path.join( + staging_dir, instance.data["name"] + ext) + # + # # check video file metadata + # input_data = plib.ffprobe_streams(video_file_path)[0] + # self.log.debug(f"__ input_data: `{input_data}`") + + start = float(instance.data["clipInH"]) + dur = float(instance.data["clipDurationH"]) + + if ext in ".wav": + start += 0.5 + + args = [ + ffmpeg_path, + "-ss", str(start / fps), + "-i", f"\"{video_file_path}\"", + "-t", str(dur / fps) + ] + if ext in [".mov", ".mp4"]: + args.extend([ + "-crf", "18", + "-pix_fmt", "yuv420p"]) + elif ext in ".wav": + args.extend([ + "-vn -acodec pcm_s16le", + "-ar 48000 -ac 2" + ]) + + # add output path + args.append(f"\"{clip_trimed_path}\"") + + self.log.info(f"Processing: {args}") + ffmpeg_args = " ".join(args) + output = pype.api.subprocess(ffmpeg_args, shell=True) + self.log.info(output) + + repr = { + "name": ext[1:], + "ext": ext[1:], + "files": os.path.basename(clip_trimed_path), + "stagingDir": staging_dir, + "frameStart": int(instance.data["frameStart"]), + "frameEnd": int(instance.data["frameEnd"]), + "frameStartFtrack": int(instance.data["frameStartH"]), + "frameEndFtrack": int(instance.data["frameEndH"]), + "fps": fps, + } + + if ext[1:] in ["mov", "mp4"]: + repr.update({ + "thumbnail": True, + "tags": ["review", "ftrackreview", "delete"]}) + + instance.data["representations"].append(repr) + + self.log.debug(f"Instance data: {pformat(instance.data)}") diff --git a/pype/plugins/standalonepublisher/publish/validate_editorial_resources.py b/pype/plugins/standalonepublisher/publish/validate_editorial_resources.py index 961641b8fa..ebc449c4ec 100644 --- a/pype/plugins/standalonepublisher/publish/validate_editorial_resources.py +++ b/pype/plugins/standalonepublisher/publish/validate_editorial_resources.py @@ -9,20 +9,10 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin): label = "Validate Editorial Resources" hosts = ["standalonepublisher"] - families = ["editorial"] + families = ["audio", "review"] order = pype.api.ValidateContentsOrder def process(self, instance): - representation = instance.data["representations"][0] - staging_dir = representation["stagingDir"] - basename = os.path.splitext( - os.path.basename(representation["files"]) - )[0] - - files = [x for x in os.listdir(staging_dir)] - - # Check for "mov" file. - filename = basename + ".mov" - filepath = os.path.join(staging_dir, filename) - msg = f"Missing \"{filepath}\"." - assert filename in files, msg + check_file = instance.data["editorialVideoPath"] + msg = f"Missing \"{check_file}\"." + assert check_file, msg diff --git a/pype/plugins/standalonepublisher/publish/validate_shots.py b/pype/plugins/standalonepublisher/publish/validate_shot_duplicates.py similarity index 77% rename from pype/plugins/standalonepublisher/publish/validate_shots.py rename to pype/plugins/standalonepublisher/publish/validate_shot_duplicates.py index 3267af7685..04d2f3ea6c 100644 --- a/pype/plugins/standalonepublisher/publish/validate_shots.py +++ b/pype/plugins/standalonepublisher/publish/validate_shot_duplicates.py @@ -2,10 +2,10 @@ import pyblish.api import pype.api -class ValidateShots(pyblish.api.ContextPlugin): - """Validate there is a "mov" next to the editorial file.""" +class ValidateShotDuplicates(pyblish.api.ContextPlugin): + """Validating no duplicate names are in context.""" - label = "Validate Shots" + label = "Validate Shot Duplicates" hosts = ["standalonepublisher"] order = pype.api.ValidateContentsOrder diff --git a/res/icons/folder-favorite.png b/pype/resources/icons/folder-favorite.png similarity index 100% rename from res/icons/folder-favorite.png rename to pype/resources/icons/folder-favorite.png diff --git a/res/icons/folder-favorite2.png b/pype/resources/icons/folder-favorite2.png similarity index 100% rename from res/icons/folder-favorite2.png rename to pype/resources/icons/folder-favorite2.png diff --git a/res/icons/folder-favorite3.png b/pype/resources/icons/folder-favorite3.png similarity index 100% rename from res/icons/folder-favorite3.png rename to pype/resources/icons/folder-favorite3.png diff --git a/res/icons/inventory.png b/pype/resources/icons/inventory.png similarity index 100% rename from res/icons/inventory.png rename to pype/resources/icons/inventory.png diff --git a/res/icons/loader.png b/pype/resources/icons/loader.png similarity index 100% rename from res/icons/loader.png rename to pype/resources/icons/loader.png diff --git a/res/icons/lookmanager.png b/pype/resources/icons/lookmanager.png similarity index 100% rename from res/icons/lookmanager.png rename to pype/resources/icons/lookmanager.png diff --git a/res/icons/workfiles.png b/pype/resources/icons/workfiles.png similarity index 100% rename from res/icons/workfiles.png rename to pype/resources/icons/workfiles.png diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 104ff0255c..156896a759 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -524,10 +524,17 @@ def burnins_from_data( profile_name = profile_name.replace(" ", "_").lower() ffmpeg_args.append("-profile:v {}".format(profile_name)) + bit_rate = burnin._streams[0].get("bit_rate") + if bit_rate: + ffmpeg_args.append("-b:v {}".format(bit_rate)) + pix_fmt = burnin._streams[0].get("pix_fmt") if pix_fmt: ffmpeg_args.append("-pix_fmt {}".format(pix_fmt)) + # Use group one (same as `-intra` argument, which is deprecated) + ffmpeg_args.append("-g 1") + ffmpeg_args_str = " ".join(ffmpeg_args) burnin.render( output_path, args=ffmpeg_args_str, overwrite=overwrite, **data diff --git a/pype/settings/__init__.py b/pype/settings/__init__.py new file mode 100644 index 0000000000..7e73d541a4 --- /dev/null +++ b/pype/settings/__init__.py @@ -0,0 +1,9 @@ +from .lib import ( + system_settings, + project_settings +) + +__all__ = ( + "system_settings", + "project_settings" +) diff --git a/pype/settings/defaults/project_anatomy/colorspace.json b/pype/settings/defaults/project_anatomy/colorspace.json new file mode 100644 index 0000000000..8b934f810d --- /dev/null +++ b/pype/settings/defaults/project_anatomy/colorspace.json @@ -0,0 +1,42 @@ +{ + "nuke": { + "root": { + "colorManagement": "Nuke", + "OCIO_config": "nuke-default", + "defaultViewerLUT": "Nuke Root LUTs", + "monitorLut": "sRGB", + "int8Lut": "sRGB", + "int16Lut": "sRGB", + "logLut": "Cineon", + "floatLut": "linear" + }, + "viewer": { + "viewerProcess": "sRGB" + }, + "write": { + "render": { + "colorspace": "linear" + }, + "prerender": { + "colorspace": "linear" + }, + "still": { + "colorspace": "sRGB" + } + }, + "read": { + "[^-a-zA-Z0-9]beauty[^-a-zA-Z0-9]": "linear", + "[^-a-zA-Z0-9](P|N|Z|crypto)[^-a-zA-Z0-9]": "linear", + "[^-a-zA-Z0-9](plateRef)[^-a-zA-Z0-9]": "sRGB" + } + }, + "maya": { + + }, + "houdini": { + + }, + "resolve": { + + } +} diff --git a/pype/settings/defaults/project_anatomy/dataflow.json b/pype/settings/defaults/project_anatomy/dataflow.json new file mode 100644 index 0000000000..d2f470b5bc --- /dev/null +++ b/pype/settings/defaults/project_anatomy/dataflow.json @@ -0,0 +1,55 @@ +{ + "nuke": { + "nodes": { + "connected": true, + "modifymetadata": { + "_id": "connect_metadata", + "_previous": "ENDING", + "metadata.set.pype_studio_name": "{PYPE_STUDIO_NAME}", + "metadata.set.avalon_project_name": "{AVALON_PROJECT}", + "metadata.set.avalon_project_code": "{PYPE_STUDIO_CODE}", + "metadata.set.avalon_asset_name": "{AVALON_ASSET}" + }, + "crop": { + "_id": "connect_crop", + "_previous": "connect_metadata", + "box": [ + "{metadata.crop.x}", + "{metadata.crop.y}", + "{metadata.crop.right}", + "{metadata.crop.top}" + ] + }, + "write": { + "render": { + "_id": "output_write", + "_previous": "connect_crop", + "file_type": "exr", + "datatype": "16 bit half", + "compression": "Zip (1 scanline)", + "autocrop": true, + "tile_color": "0xff0000ff", + "channels": "rgb" + }, + "prerender": { + "_id": "output_write", + "_previous": "connect_crop", + "file_type": "exr", + "datatype": "16 bit half", + "compression": "Zip (1 scanline)", + "autocrop": false, + "tile_color": "0xc9892aff", + "channels": "rgba" + }, + "still": { + "_previous": "connect_crop", + "channels": "rgba", + "file_type": "tiff", + "datatype": "16 bit", + "compression": "LZW", + "tile_color": "0x4145afff" + } + } + } + } +} diff --git a/pype/settings/defaults/project_anatomy/roots.json b/pype/settings/defaults/project_anatomy/roots.json new file mode 100644 index 0000000000..0282471a60 --- /dev/null +++ b/pype/settings/defaults/project_anatomy/roots.json @@ -0,0 +1,5 @@ +{ + "windows": "C:/projects", + "linux": "/mnt/share/projects", + "darwin": "/Volumes/path" +} diff --git a/pype/settings/defaults/project_anatomy/templates.json b/pype/settings/defaults/project_anatomy/templates.json new file mode 100644 index 0000000000..0fff0265b3 --- /dev/null +++ b/pype/settings/defaults/project_anatomy/templates.json @@ -0,0 +1,30 @@ +{ + "version_padding": 3, + "version": "v{version:0>{@version_padding}}", + "frame_padding": 4, + "frame": "{frame:0>{@frame_padding}}", + "work": { + "folder": "{root}/{project[name]}/{hierarchy}/{asset}/work/{task}", + "file": "{project[code]}_{asset}_{task}_{@version}<_{comment}>.{ext}", + "path": "{@folder}/{@file}" + }, + "render": { + "folder": "{root}/{project[name]}/{hierarchy}/{asset}/publish/render/{subset}/{@version}", + "file": "{project[code]}_{asset}_{subset}_{@version}<_{output}><.{@frame}>.{representation}", + "path": "{@folder}/{@file}" + }, + "texture": { + "path": "{root}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}" + }, + "publish": { + "folder": "{root}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}", + "file": "{project[code]}_{asset}_{subset}_{@version}<_{output}><.{@frame}>.{representation}", + "path": "{@folder}/{@file}", + "thumbnail": "{thumbnail_root}/{project[name]}/{_id}_{thumbnail_type}{ext}" + }, + "master": { + "folder": "{root}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/master", + "file": "{project[code]}_{asset}_{subset}_master<_{output}><.{frame}>.{representation}", + "path": "{@folder}/{@file}" + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/ftrack/ftrack_config.json b/pype/settings/defaults/project_settings/ftrack/ftrack_config.json new file mode 100644 index 0000000000..1ef3a9d69f --- /dev/null +++ b/pype/settings/defaults/project_settings/ftrack/ftrack_config.json @@ -0,0 +1,16 @@ +{ + "sync_to_avalon": { + "statuses_name_change": ["not ready", "ready"] + }, + + "status_update": { + "_ignore_": ["in progress", "ommited", "on hold"], + "Ready": ["not ready"], + "In Progress" : ["_any_"] + }, + "status_version_to_task": { + "__description__": "Status `from` (key) must be lowered!", + "in progress": "in progress", + "approved": "approved" + } +} diff --git a/pype/settings/defaults/project_settings/ftrack/ftrack_custom_attributes.json b/pype/settings/defaults/project_settings/ftrack/ftrack_custom_attributes.json new file mode 100644 index 0000000000..f03d473cd0 --- /dev/null +++ b/pype/settings/defaults/project_settings/ftrack/ftrack_custom_attributes.json @@ -0,0 +1,165 @@ +[{ + "label": "FPS", + "key": "fps", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "write_security_role": ["ALL"], + "read_security_role": ["ALL"], + "default": null, + "config": { + "isdecimal": true + } +}, { + "label": "Applications", + "key": "applications", + "type": "enumerator", + "entity_type": "show", + "group": "avalon", + "config": { + "multiselect": true, + "data": [ + {"blender_2.80": "Blender 2.80"}, + {"blender_2.81": "Blender 2.81"}, + {"blender_2.82": "Blender 2.82"}, + {"blender_2.83": "Blender 2.83"}, + {"celaction_local": "CelAction2D Local"}, + {"maya_2017": "Maya 2017"}, + {"maya_2018": "Maya 2018"}, + {"maya_2019": "Maya 2019"}, + {"nuke_10.0": "Nuke 10.0"}, + {"nuke_11.2": "Nuke 11.2"}, + {"nuke_11.3": "Nuke 11.3"}, + {"nuke_12.0": "Nuke 12.0"}, + {"nukex_10.0": "NukeX 10.0"}, + {"nukex_11.2": "NukeX 11.2"}, + {"nukex_11.3": "NukeX 11.3"}, + {"nukex_12.0": "NukeX 12.0"}, + {"nukestudio_10.0": "NukeStudio 10.0"}, + {"nukestudio_11.2": "NukeStudio 11.2"}, + {"nukestudio_11.3": "NukeStudio 11.3"}, + {"nukestudio_12.0": "NukeStudio 12.0"}, + {"harmony_17": "Harmony 17"}, + {"houdini_16.5": "Houdini 16.5"}, + {"houdini_17": "Houdini 17"}, + {"houdini_18": "Houdini 18"}, + {"photoshop_2020": "Photoshop 2020"}, + {"python_3": "Python 3"}, + {"python_2": "Python 2"}, + {"premiere_2019": "Premiere Pro 2019"}, + {"premiere_2020": "Premiere Pro 2020"}, + {"resolve_16": "BM DaVinci Resolve 16"} + ] + } +}, { + "label": "Avalon auto-sync", + "key": "avalon_auto_sync", + "type": "boolean", + "entity_type": "show", + "group": "avalon", + "write_security_role": ["API", "Administrator"], + "read_security_role": ["API", "Administrator"] +}, { + "label": "Intent", + "key": "intent", + "type": "enumerator", + "entity_type": "assetversion", + "group": "avalon", + "config": { + "multiselect": false, + "data": [ + {"test": "Test"}, + {"wip": "WIP"}, + {"final": "Final"} + ] + } +}, { + "label": "Library Project", + "key": "library_project", + "type": "boolean", + "entity_type": "show", + "group": "avalon", + "write_security_role": ["API", "Administrator"], + "read_security_role": ["API", "Administrator"] +}, { + "label": "Clip in", + "key": "clipIn", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "default": null +}, { + "label": "Clip out", + "key": "clipOut", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "default": null +}, { + "label": "Frame start", + "key": "frameStart", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "default": null +}, { + "label": "Frame end", + "key": "frameEnd", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "default": null +}, { + "label": "Tools", + "key": "tools_env", + "type": "enumerator", + "is_hierarchical": true, + "group": "avalon", + "config": { + "multiselect": true, + "data": [ + {"mtoa_3.0.1": "mtoa_3.0.1"}, + {"mtoa_3.1.1": "mtoa_3.1.1"}, + {"mtoa_3.2.0": "mtoa_3.2.0"}, + {"yeti_2.1.2": "yeti_2.1"} + ] + } +}, { + "label": "Resolution Width", + "key": "resolutionWidth", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "default": null +}, { + "label": "Resolution Height", + "key": "resolutionHeight", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "default": null +}, { + "label": "Pixel aspect", + "key": "pixelAspect", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "config": { + "isdecimal": true + } +}, { + "label": "Frame handles start", + "key": "handleStart", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "default": null +}, { + "label": "Frame handles end", + "key": "handleEnd", + "type": "number", + "is_hierarchical": true, + "group": "avalon", + "default": null +} +] diff --git a/pype/settings/defaults/project_settings/ftrack/partnership_ftrack_cred.json b/pype/settings/defaults/project_settings/ftrack/partnership_ftrack_cred.json new file mode 100644 index 0000000000..6b3a32f181 --- /dev/null +++ b/pype/settings/defaults/project_settings/ftrack/partnership_ftrack_cred.json @@ -0,0 +1,5 @@ +{ + "server_url": "", + "api_key": "", + "api_user": "" +} diff --git a/pype/settings/defaults/project_settings/ftrack/plugins/server.json b/pype/settings/defaults/project_settings/ftrack/plugins/server.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/pype/settings/defaults/project_settings/ftrack/plugins/server.json @@ -0,0 +1 @@ +{} diff --git a/pype/settings/defaults/project_settings/ftrack/plugins/user.json b/pype/settings/defaults/project_settings/ftrack/plugins/user.json new file mode 100644 index 0000000000..1ba8e9b511 --- /dev/null +++ b/pype/settings/defaults/project_settings/ftrack/plugins/user.json @@ -0,0 +1,5 @@ +{ + "TestAction": { + "ignore_me": true + } +} diff --git a/pype/settings/defaults/project_settings/ftrack/project_defaults.json b/pype/settings/defaults/project_settings/ftrack/project_defaults.json new file mode 100644 index 0000000000..a4e3aa3362 --- /dev/null +++ b/pype/settings/defaults/project_settings/ftrack/project_defaults.json @@ -0,0 +1,18 @@ +{ + "fps": 25, + "frameStart": 1001, + "frameEnd": 1100, + "clipIn": 1001, + "clipOut": 1100, + "handleStart": 10, + "handleEnd": 10, + + "resolutionHeight": 1080, + "resolutionWidth": 1920, + "pixelAspect": 1.0, + "applications": [ + "maya_2019", "nuke_11.3", "nukex_11.3", "nukestudio_11.3", "deadline" + ], + "tools_env": [], + "avalon_auto_sync": true +} diff --git a/pype/settings/defaults/project_settings/global/creator.json b/pype/settings/defaults/project_settings/global/creator.json new file mode 100644 index 0000000000..d14e779f01 --- /dev/null +++ b/pype/settings/defaults/project_settings/global/creator.json @@ -0,0 +1,8 @@ +{ + "Model": ["model"], + "Render Globals": ["light", "render"], + "Layout": ["layout"], + "Set Dress": ["setdress"], + "Look": ["look"], + "Rig": ["rigging"] +} diff --git a/pype/settings/defaults/project_settings/global/project_folder_structure.json b/pype/settings/defaults/project_settings/global/project_folder_structure.json new file mode 100644 index 0000000000..83bd5f12a9 --- /dev/null +++ b/pype/settings/defaults/project_settings/global/project_folder_structure.json @@ -0,0 +1,22 @@ +{ + "__project_root__": { + "prod" : {}, + "resources" : { + "footage": { + "plates": {}, + "offline": {} + }, + "audio": {}, + "art_dept": {} + }, + "editorial" : {}, + "assets[ftrack.Library]": { + "characters[ftrack]": {}, + "locations[ftrack]": {} + }, + "shots[ftrack.Sequence]": { + "scripts": {}, + "editorial[ftrack.Folder]": {} + } + } +} diff --git a/pype/settings/defaults/project_settings/global/sw_folders.json b/pype/settings/defaults/project_settings/global/sw_folders.json new file mode 100644 index 0000000000..a154935dce --- /dev/null +++ b/pype/settings/defaults/project_settings/global/sw_folders.json @@ -0,0 +1,8 @@ +{ + "compositing": ["nuke", "ae"], + "modeling": ["maya", "app2"], + "lookdev": ["substance"], + "animation": [], + "lighting": [], + "rigging": [] +} diff --git a/pype/settings/defaults/project_settings/global/workfiles.json b/pype/settings/defaults/project_settings/global/workfiles.json new file mode 100644 index 0000000000..393b2e3c10 --- /dev/null +++ b/pype/settings/defaults/project_settings/global/workfiles.json @@ -0,0 +1,7 @@ +{ + "last_workfile_on_startup": [ + { + "enabled": false + } + ] +} diff --git a/pype/settings/defaults/project_settings/maya/capture.json b/pype/settings/defaults/project_settings/maya/capture.json new file mode 100644 index 0000000000..b6c4893034 --- /dev/null +++ b/pype/settings/defaults/project_settings/maya/capture.json @@ -0,0 +1,108 @@ +{ + "Codec": { + "compression": "jpg", + "format": "image", + "quality": 95 + }, + "Display Options": { + "background": [ + 0.7137254901960784, + 0.7137254901960784, + 0.7137254901960784 + ], + "backgroundBottom": [ + 0.7137254901960784, + 0.7137254901960784, + 0.7137254901960784 + ], + "backgroundTop": [ + 0.7137254901960784, + 0.7137254901960784, + 0.7137254901960784 + ], + "override_display": true + }, + "Generic": { + "isolate_view": true, + "off_screen": true + }, + "IO": { + "name": "", + "open_finished": false, + "raw_frame_numbers": false, + "recent_playblasts": [], + "save_file": false + }, + "PanZoom": { + "pan_zoom": true + }, + "Renderer": { + "rendererName": "vp2Renderer" + }, + "Resolution": { + "height": 1080, + "mode": "Custom", + "percent": 1.0, + "width": 1920 + }, + "Time Range": { + "end_frame": 25, + "frame": "", + "start_frame": 0, + "time": "Time Slider" + }, + "Viewport Options": { + "cameras": false, + "clipGhosts": false, + "controlVertices": false, + "deformers": false, + "dimensions": false, + "displayLights": 0, + "dynamicConstraints": false, + "dynamics": false, + "fluids": false, + "follicles": false, + "gpuCacheDisplayFilter": false, + "greasePencils": false, + "grid": false, + "hairSystems": false, + "handles": false, + "high_quality": true, + "hud": false, + "hulls": false, + "ikHandles": false, + "imagePlane": false, + "joints": false, + "lights": false, + "locators": false, + "manipulators": false, + "motionTrails": false, + "nCloths": false, + "nParticles": false, + "nRigids": false, + "nurbsCurves": false, + "nurbsSurfaces": false, + "override_viewport_options": true, + "particleInstancers": false, + "pivots": false, + "planes": false, + "pluginShapes": false, + "polymeshes": true, + "shadows": false, + "strokes": false, + "subdivSurfaces": false, + "textures": false, + "twoSidedLighting": true + }, + "Camera Options": { + "displayGateMask": false, + "displayResolution": false, + "displayFilmGate": false, + "displayFieldChart": false, + "displaySafeAction": false, + "displaySafeTitle": false, + "displayFilmPivot": false, + "displayFilmOrigin": false, + "overscan": 1.0 + } +} diff --git a/pype/settings/defaults/project_settings/muster/templates_mapping.json b/pype/settings/defaults/project_settings/muster/templates_mapping.json new file mode 100644 index 0000000000..4edab9077d --- /dev/null +++ b/pype/settings/defaults/project_settings/muster/templates_mapping.json @@ -0,0 +1,19 @@ +{ + "3delight": 41, + "arnold": 46, + "arnold_sf": 57, + "gelato": 30, + "harware": 3, + "krakatoa": 51, + "file_layers": 7, + "mentalray": 2, + "mentalray_sf": 6, + "redshift": 55, + "renderman": 29, + "software": 1, + "software_sf": 5, + "turtle": 10, + "vector": 4, + "vray": 37, + "ffmpeg": 48 +} diff --git a/pype/settings/defaults/project_settings/plugins/celaction/publish.json b/pype/settings/defaults/project_settings/plugins/celaction/publish.json new file mode 100644 index 0000000000..fd1af23d84 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/celaction/publish.json @@ -0,0 +1,11 @@ +{ + "ExtractCelactionDeadline": { + "enabled": true, + "deadline_department": "", + "deadline_priority": 50, + "deadline_pool": "", + "deadline_pool_secondary": "", + "deadline_group": "", + "deadline_chunk_size": 10 + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/config.json b/pype/settings/defaults/project_settings/plugins/config.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/config.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/ftrack/publish.json b/pype/settings/defaults/project_settings/plugins/ftrack/publish.json new file mode 100644 index 0000000000..d8d93a36ee --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/ftrack/publish.json @@ -0,0 +1,7 @@ +{ + "IntegrateFtrackNote": { + "enabled": false, + "note_with_intent_template": "{intent}: {comment}", + "note_labels": [] + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/global/create.json b/pype/settings/defaults/project_settings/plugins/global/create.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/global/create.json @@ -0,0 +1 @@ +{} diff --git a/pype/settings/defaults/project_settings/plugins/global/filter.json b/pype/settings/defaults/project_settings/plugins/global/filter.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/global/filter.json @@ -0,0 +1 @@ +{} diff --git a/pype/settings/defaults/project_settings/plugins/global/load.json b/pype/settings/defaults/project_settings/plugins/global/load.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/global/load.json @@ -0,0 +1 @@ +{} diff --git a/pype/settings/defaults/project_settings/plugins/global/publish.json b/pype/settings/defaults/project_settings/plugins/global/publish.json new file mode 100644 index 0000000000..b946ac4b32 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/global/publish.json @@ -0,0 +1,98 @@ +{ + "IntegrateMasterVersion": { + "enabled": false + }, + "ExtractJpegEXR": { + "enabled": true, + "ffmpeg_args": { + "input": [ + "-gamma 2.2" + ], + "output": [] + } + }, + "ExtractReview": { + "enabled": true, + "profiles": [ + { + "families": [], + "hosts": [], + "outputs": { + "h264": { + "filter": { + "families": [ + "render", + "review", + "ftrack" + ] + }, + "ext": "mp4", + "ffmpeg_args": { + "input": [ + "-gamma 2.2" + ], + "video_filters": [], + "audio_filters": [], + "output": [ + "-pix_fmt yuv420p", + "-crf 18", + "-intra" + ] + }, + "tags": [ + "burnin", + "ftrackreview" + ] + } + } + } + ] + }, + "ExtractBurnin": { + "enabled": false, + "options": { + "font_size": 42, + "opacity": 1, + "bg_opacity": 0, + "x_offset": 5, + "y_offset": 5, + "bg_padding": 5 + }, + "fields": {}, + "profiles": [ + { + "burnins": { + "burnin": { + "TOP_LEFT": "{yy}-{mm}-{dd}", + "TOP_RIGHT": "{anatomy[version]}", + "TOP_CENTERED": "", + "BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}", + "BOTTOM_CENTERED": "{asset}", + "BOTTOM_LEFT": "{username}" + } + } + } + ] + }, + "IntegrateAssetNew": { + "template_name_profiles": { + "publish": { + "families": [], + "tasks": [] + }, + "render": { + "families": [ + "review", + "render", + "prerender" + ] + } + } + }, + "ProcessSubmittedJobOnFarm": { + "enabled": false, + "deadline_department": "", + "deadline_pool": "", + "deadline_group": "" + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/maya/create.json b/pype/settings/defaults/project_settings/plugins/maya/create.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/maya/create.json @@ -0,0 +1 @@ +{} diff --git a/pype/settings/defaults/project_settings/plugins/maya/filter.json b/pype/settings/defaults/project_settings/plugins/maya/filter.json new file mode 100644 index 0000000000..83d6f05f31 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/maya/filter.json @@ -0,0 +1,9 @@ +{ + "Preset n1": { + "ValidateNoAnimation": false, + "ValidateShapeDefaultNames": false + }, + "Preset n2": { + "ValidateNoAnimation": false + } +} diff --git a/pype/settings/defaults/project_settings/plugins/maya/load.json b/pype/settings/defaults/project_settings/plugins/maya/load.json new file mode 100644 index 0000000000..260fbb35ee --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/maya/load.json @@ -0,0 +1,18 @@ +{ + "colors": { + "model": [0.821, 0.518, 0.117], + "rig": [0.144, 0.443, 0.463], + "pointcache": [0.368, 0.821, 0.117], + "animation": [0.368, 0.821, 0.117], + "ass": [1.0, 0.332, 0.312], + "camera": [0.447, 0.312, 1.0], + "fbx": [1.0, 0.931, 0.312], + "mayaAscii": [0.312, 1.0, 0.747], + "setdress": [0.312, 1.0, 0.747], + "layout": [0.312, 1.0, 0.747], + "vdbcache": [0.312, 1.0, 0.428], + "vrayproxy": [0.258, 0.95, 0.541], + "yeticache": [0.2, 0.8, 0.3], + "yetiRig": [0, 0.8, 0.5] + } +} diff --git a/pype/settings/defaults/project_settings/plugins/maya/publish.json b/pype/settings/defaults/project_settings/plugins/maya/publish.json new file mode 100644 index 0000000000..2b3637ff80 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/maya/publish.json @@ -0,0 +1,17 @@ +{ + "ValidateModelName": { + "enabled": false, + "material_file": "/path/to/shader_name_definition.txt", + "regex": "(.*)_(\\d)*_(?P.*)_(GEO)" + }, + "ValidateAssemblyName": { + "enabled": false + }, + "ValidateShaderName": { + "enabled": false, + "regex": "(?P.*)_(.*)_SHD" + }, + "ValidateMeshHasOverlappingUVs": { + "enabled": false + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/maya/workfile_build.json b/pype/settings/defaults/project_settings/plugins/maya/workfile_build.json new file mode 100644 index 0000000000..443bc2cb2c --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/maya/workfile_build.json @@ -0,0 +1,136 @@ +[ + { + "tasks": [ + "lighting" + ], + "current_context": [ + { + "subset_name_filters": [ + ".+[Mm]ain" + ], + "families": [ + "model" + ], + "repre_names": [ + "abc", + "ma" + ], + "loaders": [ + "ReferenceLoader" + ] + }, + { + "families": [ + "animation", + "pointcache" + ], + "repre_names": [ + "abc" + ], + "loaders": [ + "ReferenceLoader" + ] + }, + { + "families": [ + "rendersetup" + ], + "repre_names": [ + "json" + ], + "loaders": [ + "RenderSetupLoader" + ] + }, + { + "families": [ + "camera" + ], + "repre_names": [ + "abc" + ], + "loaders": [ + "ReferenceLoader" + ] + } + ], + "linked_assets": [ + { + "families": [ + "setdress" + ], + "repre_names": [ + "ma" + ], + "loaders": [ + "ReferenceLoader" + ] + }, + { + "families": [ + "ass" + ], + "repre_names": [ + "ass" + ], + "loaders": [ + "assLoader" + ] + } + ] + }, + { + "tasks": [ + "animation" + ], + "current_context": [ + { + "families": [ + "camera" + ], + "repre_names": [ + "abc", + "ma" + ], + "loaders": [ + "ReferenceLoader" + ] + }, + { + "families": [ + "audio" + ], + "repre_names": [ + "wav" + ], + "loaders": [ + "RenderSetupLoader" + ] + } + ], + "linked_assets": [ + { + "families": [ + "setdress" + ], + "repre_names": [ + "proxy" + ], + "loaders": [ + "ReferenceLoader" + ] + }, + { + "families": [ + "rig" + ], + "repre_names": [ + "ass" + ], + "loaders": [ + "rigLoader" + ] + } + ] + } +] \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/nuke/create.json b/pype/settings/defaults/project_settings/plugins/nuke/create.json new file mode 100644 index 0000000000..79ab665696 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/nuke/create.json @@ -0,0 +1,8 @@ +{ + "CreateWriteRender": { + "fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}" + }, + "CreateWritePrerender": { + "fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}" + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/nuke/load.json b/pype/settings/defaults/project_settings/plugins/nuke/load.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/nuke/load.json @@ -0,0 +1 @@ +{} diff --git a/pype/settings/defaults/project_settings/plugins/nuke/publish.json b/pype/settings/defaults/project_settings/plugins/nuke/publish.json new file mode 100644 index 0000000000..08a099a0a0 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/nuke/publish.json @@ -0,0 +1,53 @@ +{ + "ExtractThumbnail": { + "enabled": true, + "nodes": { + "Reformat": [ + [ + "type", + "to format" + ], + [ + "format", + "HD_1080" + ], + [ + "filter", + "Lanczos6" + ], + [ + "black_outside", + true + ], + [ + "pbb", + false + ] + ] + } + }, + "ValidateNukeWriteKnobs": { + "enabled": false, + "knobs": { + "render": { + "review": true + } + } + }, + "ExtractReviewDataLut": { + "enabled": false + }, + "ExtractReviewDataMov": { + "enabled": true, + "viewer_lut_raw": false + }, + "ExtractSlateFrame": { + "viewer_lut_raw": false + }, + "NukeSubmitDeadline": { + "deadline_priority": 50, + "deadline_pool": "", + "deadline_pool_secondary": "", + "deadline_chunk_size": 1 + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/nuke/workfile_build.json b/pype/settings/defaults/project_settings/plugins/nuke/workfile_build.json new file mode 100644 index 0000000000..4b48b46184 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/nuke/workfile_build.json @@ -0,0 +1,23 @@ +[ + { + "tasks": [ + "compositing" + ], + "current_context": [ + { + "families": [ + "render", + "plate" + ], + "repre_names": [ + "exr", + "dpx" + ], + "loaders": [ + "LoadSequence" + ] + } + ], + "linked_assets": [] + } +] \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/nukestudio/filter.json b/pype/settings/defaults/project_settings/plugins/nukestudio/filter.json new file mode 100644 index 0000000000..bd6a0dc1bd --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/nukestudio/filter.json @@ -0,0 +1,10 @@ +{ + "strict": { + "ValidateVersion": true, + "VersionUpWorkfile": true + }, + "benevolent": { + "ValidateVersion": false, + "VersionUpWorkfile": false + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/nukestudio/publish.json b/pype/settings/defaults/project_settings/plugins/nukestudio/publish.json new file mode 100644 index 0000000000..d99a878c35 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/nukestudio/publish.json @@ -0,0 +1,9 @@ +{ + "CollectInstanceVersion": { + "enabled": false + }, + "ExtractReviewCutUpVideo": { + "enabled": true, + "tags_addition": [] + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/resolve/create.json b/pype/settings/defaults/project_settings/plugins/resolve/create.json new file mode 100644 index 0000000000..8ff5b15714 --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/resolve/create.json @@ -0,0 +1,7 @@ +{ + "CreateShotClip": { + "clipName": "{track}{sequence}{shot}", + "folder": "takes", + "steps": 20 + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/standalonepublisher/publish.json b/pype/settings/defaults/project_settings/plugins/standalonepublisher/publish.json new file mode 100644 index 0000000000..2f1a3e7aca --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/standalonepublisher/publish.json @@ -0,0 +1,27 @@ +{ + "ExtractThumbnailSP": { + "ffmpeg_args": { + "input": [ + "-gamma 2.2" + ], + "output": [] + } + }, + "ExtractReviewSP": { + "outputs": { + "h264": { + "input": [ + "-gamma 2.2" + ], + "output": [ + "-pix_fmt yuv420p", + "-crf 18" + ], + "tags": [ + "preview" + ], + "ext": "mov" + } + } + } +} \ No newline at end of file diff --git a/pype/settings/defaults/project_settings/plugins/test/create.json b/pype/settings/defaults/project_settings/plugins/test/create.json new file mode 100644 index 0000000000..fa0b2fc05f --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/test/create.json @@ -0,0 +1,8 @@ +{ + "MyTestCreator": { + "my_test_property": "B", + "active": false, + "new_property": "new", + "family": "new_family" + } +} diff --git a/pype/settings/defaults/project_settings/plugins/test/publish.json b/pype/settings/defaults/project_settings/plugins/test/publish.json new file mode 100644 index 0000000000..3180dd5d8a --- /dev/null +++ b/pype/settings/defaults/project_settings/plugins/test/publish.json @@ -0,0 +1,10 @@ +{ + "MyTestPlugin": { + "label": "loaded from preset", + "optional": true, + "families": ["changed", "by", "preset"] + }, + "MyTestRemovedPlugin": { + "enabled": false + } +} diff --git a/pype/settings/defaults/project_settings/premiere/asset_default.json b/pype/settings/defaults/project_settings/premiere/asset_default.json new file mode 100644 index 0000000000..84d2bde3d8 --- /dev/null +++ b/pype/settings/defaults/project_settings/premiere/asset_default.json @@ -0,0 +1,5 @@ +{ + "frameStart": 1001, + "handleStart": 0, + "handleEnd": 0 +} diff --git a/pype/settings/defaults/project_settings/premiere/rules_tasks.json b/pype/settings/defaults/project_settings/premiere/rules_tasks.json new file mode 100644 index 0000000000..333c9cd70b --- /dev/null +++ b/pype/settings/defaults/project_settings/premiere/rules_tasks.json @@ -0,0 +1,21 @@ +{ + "defaultTasks": ["Layout", "Animation"], + "taskToSubsets": { + "Layout": ["reference", "audio"], + "Animation": ["audio"] + }, + "subsetToRepresentations": { + "reference": { + "preset": "h264", + "representation": "mp4" + }, + "thumbnail": { + "preset": "jpeg_thumb", + "representation": "jpg" + }, + "audio": { + "preset": "48khz", + "representation": "wav" + } + } +} diff --git a/pype/settings/defaults/project_settings/standalonepublisher/families.json b/pype/settings/defaults/project_settings/standalonepublisher/families.json new file mode 100644 index 0000000000..d05941cc26 --- /dev/null +++ b/pype/settings/defaults/project_settings/standalonepublisher/families.json @@ -0,0 +1,90 @@ +{ + "create_look": { + "name": "look", + "label": "Look", + "family": "look", + "icon": "paint-brush", + "defaults": ["Main"], + "help": "Shader connections defining shape look" + }, + "create_model": { + "name": "model", + "label": "Model", + "family": "model", + "icon": "cube", + "defaults": ["Main", "Proxy", "Sculpt"], + "help": "Polygonal static geometry" + }, + "create_workfile": { + "name": "workfile", + "label": "Workfile", + "family": "workfile", + "icon": "cube", + "defaults": ["Main"], + "help": "Working scene backup" + }, + "create_camera": { + "name": "camera", + "label": "Camera", + "family": "camera", + "icon": "video-camera", + "defaults": ["Main"], + "help": "Single baked camera" + }, + "create_pointcache": { + "name": "pointcache", + "label": "Pointcache", + "family": "pointcache", + "icon": "gears", + "defaults": ["Main"], + "help": "Alembic pointcache for animated data" + }, + "create_rig": { + "name": "rig", + "label": "Rig", + "family": "rig", + "icon": "wheelchair", + "defaults": ["Main"], + "help": "Artist-friendly rig with controls" + }, + "create_layout": { + "name": "layout", + "label": "Layout", + "family": "layout", + "icon": "cubes", + "defaults": ["Main"], + "help": "Simple scene for animators with camera" + }, + "create_plate": { + "name": "plate", + "label": "Plate", + "family": "plate", + "icon": "camera", + "defaults": ["Main", "BG", "Reference"], + "help": "Plates for compositors" + }, + "create_matchmove": { + "name": "matchmove", + "label": "Matchmove script", + "family": "matchmove", + "icon": "empire", + "defaults": ["Camera", "Object", "Mocap"], + "help": "Script exported from matchmoving application" + }, + "create_images": { + "name": "image", + "label": "Image file", + "family": "image", + "icon": "image", + "defaults": ["ConceptArt", "Reference", "Texture", "MattePaint"], + "help": "Holder for all kinds of image data" + }, + "create_editorial": { + "name": "editorial", + "label": "Editorial", + "family": "editorial", + "icon": "image", + "defaults": ["Main"], + "help": "Editorial files to generate shots." + } +} diff --git a/pype/settings/defaults/project_settings/tools/slates/example_HD.json b/pype/settings/defaults/project_settings/tools/slates/example_HD.json new file mode 100644 index 0000000000..b06391fb63 --- /dev/null +++ b/pype/settings/defaults/project_settings/tools/slates/example_HD.json @@ -0,0 +1,212 @@ +{ + "width": 1920, + "height": 1080, + "destination_path": "{destination_path}", + "style": { + "*": { + "font-family": "arial", + "font-color": "#ffffff", + "font-bold": false, + "font-italic": false, + "bg-color": "#0077ff", + "alignment-horizontal": "left", + "alignment-vertical": "top" + }, + "layer": { + "padding": 0, + "margin": 0 + }, + "rectangle": { + "padding": 0, + "margin": 0, + "bg-color": "#E9324B", + "fill": true + }, + "main_frame": { + "padding": 0, + "margin": 0, + "bg-color": "#252525" + }, + "table": { + "padding": 0, + "margin": 0, + "bg-color": "transparent" + }, + "table-item": { + "padding": 5, + "padding-bottom": 10, + "margin": 0, + "bg-color": "#212121", + "bg-alter-color": "#272727", + "font-color": "#dcdcdc", + "font-bold": false, + "font-italic": false, + "alignment-horizontal": "left", + "alignment-vertical": "top", + "word-wrap": false, + "ellide": true, + "max-lines": 1 + }, + "table-item-col[0]": { + "font-size": 20, + "font-color": "#898989", + "font-bold": true, + "ellide": false, + "word-wrap": true, + "max-lines": null + }, + "table-item-col[1]": { + "font-size": 40, + "padding-left": 10 + }, + "#colorbar": { + "bg-color": "#9932CC" + } + }, + "items": [{ + "type": "layer", + "direction": 1, + "name": "MainLayer", + "style": { + "#MainLayer": { + "width": 1094, + "height": 1000, + "margin": 25, + "padding": 0 + }, + "#LeftSide": { + "margin-right": 25 + } + }, + "items": [{ + "type": "layer", + "name": "LeftSide", + "items": [{ + "type": "layer", + "direction": 1, + "style": { + "table-item": { + "bg-color": "transparent", + "padding-bottom": 20 + }, + "table-item-col[0]": { + "font-size": 20, + "font-color": "#898989", + "alignment-horizontal": "right" + }, + "table-item-col[1]": { + "alignment-horizontal": "left", + "font-bold": true, + "font-size": 40 + } + }, + "items": [{ + "type": "table", + "values": [ + ["Show:", "{project[name]}"] + ], + "style": { + "table-item-field[0:0]": { + "width": 150 + }, + "table-item-field[0:1]": { + "width": 580 + } + } + }, { + "type": "table", + "values": [ + ["Submitting For:", "{intent}"] + ], + "style": { + "table-item-field[0:0]": { + "width": 160 + }, + "table-item-field[0:1]": { + "width": 218, + "alignment-horizontal": "right" + } + } + }] + }, { + "type": "rectangle", + "style": { + "bg-color": "#bc1015", + "width": 1108, + "height": 5, + "fill": true + } + }, { + "type": "table", + "use_alternate_color": true, + "values": [ + ["Version name:", "{version_name}"], + ["Date:", "{date}"], + ["Shot Types:", "{shot_type}"], + ["Submission Note:", "{submission_note}"] + ], + "style": { + "table-item": { + "padding-bottom": 20 + }, + "table-item-field[0:1]": { + "font-bold": true + }, + "table-item-field[3:0]": { + "word-wrap": true, + "ellide": true, + "max-lines": 4 + }, + "table-item-col[0]": { + "alignment-horizontal": "right", + "width": 150 + }, + "table-item-col[1]": { + "alignment-horizontal": "left", + "width": 958 + } + } + }] + }, { + "type": "layer", + "name": "RightSide", + "items": [{ + "type": "placeholder", + "name": "thumbnail", + "path": "{thumbnail_path}", + "style": { + "width": 730, + "height": 412 + } + }, { + "type": "placeholder", + "name": "colorbar", + "path": "{color_bar_path}", + "return_data": true, + "style": { + "width": 730, + "height": 55 + } + }, { + "type": "table", + "use_alternate_color": true, + "values": [ + ["Vendor:", "{vendor}"], + ["Shot Name:", "{shot_name}"], + ["Frames:", "{frame_start} - {frame_end} ({duration})"] + ], + "style": { + "table-item-col[0]": { + "alignment-horizontal": "left", + "width": 200 + }, + "table-item-col[1]": { + "alignment-horizontal": "right", + "width": 530, + "font-size": 30 + } + } + }] + }] + }] +} diff --git a/pype/settings/defaults/project_settings/unreal/project_setup.json b/pype/settings/defaults/project_settings/unreal/project_setup.json new file mode 100644 index 0000000000..8a4dffc526 --- /dev/null +++ b/pype/settings/defaults/project_settings/unreal/project_setup.json @@ -0,0 +1,4 @@ +{ + "dev_mode": false, + "install_unreal_python_engine": false +} diff --git a/pype/settings/defaults/system_settings/environments/avalon.json b/pype/settings/defaults/system_settings/environments/avalon.json new file mode 100644 index 0000000000..832ba07e71 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/avalon.json @@ -0,0 +1,16 @@ +{ + "AVALON_CONFIG": "pype", + "AVALON_PROJECTS": "{PYPE_PROJECTS_PATH}", + "AVALON_USERNAME": "avalon", + "AVALON_PASSWORD": "secret", + "AVALON_DEBUG": "1", + "AVALON_MONGO": "mongodb://localhost:2707", + "AVALON_DB": "avalon", + "AVALON_DB_DATA": "{PYPE_SETUP_PATH}/../mongo_db_data", + "AVALON_EARLY_ADOPTER": "1", + "AVALON_SCHEMA": "{PYPE_MODULE_ROOT}/schema", + "AVALON_LOCATION": "http://127.0.0.1", + "AVALON_LABEL": "Pype", + "AVALON_TIMEOUT": "1000", + "AVALON_THUMBNAIL_ROOT": "" +} diff --git a/pype/settings/defaults/system_settings/environments/blender.json b/pype/settings/defaults/system_settings/environments/blender.json new file mode 100644 index 0000000000..6f4f6a012d --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/blender.json @@ -0,0 +1,7 @@ +{ + "BLENDER_USER_SCRIPTS": "{PYPE_SETUP_PATH}/repos/avalon-core/setup/blender", + "PYTHONPATH": [ + "{PYPE_SETUP_PATH}/repos/avalon-core/setup/blender", + "{PYTHONPATH}" + ] +} diff --git a/pype/settings/defaults/system_settings/environments/celaction.json b/pype/settings/defaults/system_settings/environments/celaction.json new file mode 100644 index 0000000000..cdd4e609ab --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/celaction.json @@ -0,0 +1,3 @@ +{ + "CELACTION_TEMPLATE": "{PYPE_MODULE_ROOT}/pype/hosts/celaction/celaction_template_scene.scn" +} diff --git a/pype/settings/defaults/system_settings/environments/deadline.json b/pype/settings/defaults/system_settings/environments/deadline.json new file mode 100644 index 0000000000..e8ef52805b --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/deadline.json @@ -0,0 +1,3 @@ +{ + "DEADLINE_REST_URL": "http://localhost:8082" +} diff --git a/pype/settings/defaults/system_settings/environments/ftrack.json b/pype/settings/defaults/system_settings/environments/ftrack.json new file mode 100644 index 0000000000..4f25de027b --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/ftrack.json @@ -0,0 +1,18 @@ +{ + "FTRACK_SERVER": "https://pype.ftrackapp.com", + "FTRACK_ACTIONS_PATH": [ + "{PYPE_MODULE_ROOT}/pype/modules/ftrack/actions" + ], + "FTRACK_EVENTS_PATH": [ + "{PYPE_MODULE_ROOT}/pype/modules/ftrack/events" + ], + "PYTHONPATH": [ + "{PYPE_MODULE_ROOT}/pype/vendor", + "{PYTHONPATH}" + ], + "PYBLISHPLUGINPATH": [ + "{PYPE_MODULE_ROOT}/pype/plugins/ftrack/publish" + ], + "FTRACK_EVENTS_MONGO_DB": "pype", + "FTRACK_EVENTS_MONGO_COL": "ftrack_events" +} diff --git a/pype/settings/defaults/system_settings/environments/global.json b/pype/settings/defaults/system_settings/environments/global.json new file mode 100644 index 0000000000..ef528e6857 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/global.json @@ -0,0 +1,44 @@ +{ + "PYPE_STUDIO_NAME": "Studio Name", + "PYPE_STUDIO_CODE": "stu", + "PYPE_APP_ROOT": "{PYPE_SETUP_PATH}/pypeapp", + "PYPE_MODULE_ROOT": "{PYPE_SETUP_PATH}/repos/pype", + "PYPE_PROJECT_PLUGINS": "", + "STUDIO_SOFT": "{PYP_SETUP_ROOT}/soft", + "FFMPEG_PATH": { + "windows": "{VIRTUAL_ENV}/localized/ffmpeg_exec/windows/bin;{PYPE_SETUP_PATH}/vendor/ffmpeg_exec/windows/bin", + "darwin": "{VIRTUAL_ENV}/localized/ffmpeg_exec/darwin/bin:{PYPE_SETUP_PATH}/vendor/ffmpeg_exec/darwin/bin", + "linux": "{VIRTUAL_ENV}/localized/ffmpeg_exec/linux:{PYPE_SETUP_PATH}/vendor/ffmpeg_exec/linux" + }, + "DJV_PATH": { + "windows": [ + "C:/Program Files/djv-1.1.0-Windows-64/bin/djv_view.exe", + "C:/Program Files/DJV/bin/djv_view.exe", + "{STUDIO_SOFT}/djv/windows/bin/djv_view.exe" + ], + "linux": [ + "usr/local/djv/djv_view", + "{STUDIO_SOFT}/djv/linux/bin/djv_view" + ], + "darwin": "Application/DJV.app/Contents/MacOS/DJV" + }, + "PATH": [ + "{PYPE_CONFIG}/launchers", + "{PYPE_APP_ROOT}", + "{FFMPEG_PATH}", + "{PATH}" + ], + "PYPE_OCIO_CONFIG": "{STUDIO_SOFT}/OpenColorIO-Configs", + "PYTHONPATH": { + "windows": "{VIRTUAL_ENV}/Lib/site-packages;{PYPE_MODULE_ROOT}/pype/tools;{PYTHONPATH}", + "linux": "{VIRTUAL_ENV}/lib/python{PYTHON_VERSION}/site-packages:{PYPE_MODULE_ROOT}/pype/tools:{PYTHONPATH}", + "darwin": "{VIRTUAL_ENV}/lib/python{PYTHON_VERSION}/site-packages:{PYPE_MODULE_ROOT}/pype/tools:{PYTHONPATH}" + }, + "PYPE_PROJECT_CONFIGS": "{PYPE_SETUP_PATH}/../studio-project-configs", + "PYPE_PYTHON_EXE": { + "windows": "{VIRTUAL_ENV}/Scripts/python.exe", + "linux": "{VIRTUAL_ENV}/Scripts/python", + "darwin": "{VIRTUAL_ENV}/bin/python" + }, + "PYBLISH_GUI": "pyblish_pype" +} diff --git a/pype/settings/defaults/system_settings/environments/harmony.json b/pype/settings/defaults/system_settings/environments/harmony.json new file mode 100644 index 0000000000..d394343935 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/harmony.json @@ -0,0 +1,4 @@ +{ + "AVALON_HARMONY_WORKFILES_ON_LAUNCH": "1", + "PYBLISH_GUI_ALWAYS_EXEC": "1" +} diff --git a/pype/settings/defaults/system_settings/environments/houdini.json b/pype/settings/defaults/system_settings/environments/houdini.json new file mode 100644 index 0000000000..95c7d19088 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/houdini.json @@ -0,0 +1,12 @@ +{ + "HOUDINI_PATH": { + "darwin": "{PYPE_MODULE_ROOT}/setup/houdini:&", + "linux": "{PYPE_MODULE_ROOT}/setup/houdini:&", + "windows": "{PYPE_MODULE_ROOT}/setup/houdini;&" + }, + "HOUDINI_MENU_PATH": { + "darwin": "{PYPE_MODULE_ROOT}/setup/houdini:&", + "linux": "{PYPE_MODULE_ROOT}/setup/houdini:&", + "windows": "{PYPE_MODULE_ROOT}/setup/houdini;&" + } +} diff --git a/pype/settings/defaults/system_settings/environments/maya.json b/pype/settings/defaults/system_settings/environments/maya.json new file mode 100644 index 0000000000..7785b108f7 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/maya.json @@ -0,0 +1,14 @@ +{ + "PYTHONPATH": [ + "{PYPE_SETUP_PATH}/repos/avalon-core/setup/maya", + "{PYPE_SETUP_PATH}/repos/maya-look-assigner", + "{PYTHON_ENV}/python2/Lib/site-packages", + "{PYTHONPATH}" + ], + "MAYA_DISABLE_CLIC_IPM": "Yes", + "MAYA_DISABLE_CIP": "Yes", + "MAYA_DISABLE_CER": "Yes", + "PYMEL_SKIP_MEL_INIT": "Yes", + "LC_ALL": "C", + "PYPE_LOG_NO_COLORS": "Yes" +} diff --git a/pype/settings/defaults/system_settings/environments/maya_2018.json b/pype/settings/defaults/system_settings/environments/maya_2018.json new file mode 100644 index 0000000000..72a0c57ce3 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/maya_2018.json @@ -0,0 +1,11 @@ +{ + "MAYA_VERSION": "2018", + "MAYA_LOCATION": { + "darwin": "/Applications/Autodesk/maya{MAYA_VERSION}/Maya.app/Contents", + "linux": "/usr/autodesk/maya{MAYA_VERSION}", + "windows": "C:/Program Files/Autodesk/Maya{MAYA_VERSION}" + }, + "DYLD_LIBRARY_PATH": { + "darwin": "{MAYA_LOCATION}/MacOS" + } +} diff --git a/pype/settings/defaults/system_settings/environments/maya_2020.json b/pype/settings/defaults/system_settings/environments/maya_2020.json new file mode 100644 index 0000000000..efd0250bc8 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/maya_2020.json @@ -0,0 +1,11 @@ +{ + "MAYA_VERSION": "2020", + "MAYA_LOCATION": { + "darwin": "/Applications/Autodesk/maya{MAYA_VERSION}/Maya.app/Contents", + "linux": "/usr/autodesk/maya{MAYA_VERSION}", + "windows": "C:/Program Files/Autodesk/Maya{MAYA_VERSION}" + }, + "DYLD_LIBRARY_PATH": { + "darwin": "{MAYA_LOCATION}/MacOS" + } +} diff --git a/pype/settings/defaults/system_settings/environments/mayabatch.json b/pype/settings/defaults/system_settings/environments/mayabatch.json new file mode 100644 index 0000000000..7785b108f7 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/mayabatch.json @@ -0,0 +1,14 @@ +{ + "PYTHONPATH": [ + "{PYPE_SETUP_PATH}/repos/avalon-core/setup/maya", + "{PYPE_SETUP_PATH}/repos/maya-look-assigner", + "{PYTHON_ENV}/python2/Lib/site-packages", + "{PYTHONPATH}" + ], + "MAYA_DISABLE_CLIC_IPM": "Yes", + "MAYA_DISABLE_CIP": "Yes", + "MAYA_DISABLE_CER": "Yes", + "PYMEL_SKIP_MEL_INIT": "Yes", + "LC_ALL": "C", + "PYPE_LOG_NO_COLORS": "Yes" +} diff --git a/pype/settings/defaults/system_settings/environments/mayabatch_2019.json b/pype/settings/defaults/system_settings/environments/mayabatch_2019.json new file mode 100644 index 0000000000..aa7360a943 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/mayabatch_2019.json @@ -0,0 +1,11 @@ +{ + "MAYA_VERSION": "2019", + "MAYA_LOCATION": { + "darwin": "/Applications/Autodesk/maya{MAYA_VERSION}/Maya.app/Contents", + "linux": "/usr/autodesk/maya{MAYA_VERSION}", + "windows": "C:/Program Files/Autodesk/Maya{MAYA_VERSION}" + }, + "DYLD_LIBRARY_PATH": { + "darwin": "{MAYA_LOCATION}/MacOS" + } +} diff --git a/pype/settings/defaults/system_settings/environments/mtoa_3.1.1.json b/pype/settings/defaults/system_settings/environments/mtoa_3.1.1.json new file mode 100644 index 0000000000..f7b9f94d4e --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/mtoa_3.1.1.json @@ -0,0 +1,23 @@ +{ + "MTOA": "{PYPE_STUDIO_SOFTWARE}/arnold/mtoa_{MAYA_VERSION}_{MTOA_VERSION}", + "MTOA_VERSION": "3.1.1", + "MAYA_RENDER_DESC_PATH": "{MTOA}", + "MAYA_MODULE_PATH": "{MTOA}", + "ARNOLD_PLUGIN_PATH": "{MTOA}/shaders", + "MTOA_EXTENSIONS_PATH": { + "darwin": "{MTOA}/extensions", + "linux": "{MTOA}/extensions", + "windows": "{MTOA}/extensions" + }, + "MTOA_EXTENSIONS": { + "darwin": "{MTOA}/extensions", + "linux": "{MTOA}/extensions", + "windows": "{MTOA}/extensions" + }, + "DYLD_LIBRARY_PATH": { + "darwin": "{MTOA}/bin" + }, + "PATH": { + "windows": "{PATH};{MTOA}/bin" + } +} diff --git a/pype/settings/defaults/system_settings/environments/muster.json b/pype/settings/defaults/system_settings/environments/muster.json new file mode 100644 index 0000000000..26f311146a --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/muster.json @@ -0,0 +1,3 @@ +{ + "MUSTER_REST_URL": "http://127.0.0.1:9890" +} diff --git a/pype/settings/defaults/system_settings/environments/nuke.json b/pype/settings/defaults/system_settings/environments/nuke.json new file mode 100644 index 0000000000..50dd31ac91 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/nuke.json @@ -0,0 +1,15 @@ +{ + "NUKE_PATH": [ + "{PYPE_SETUP_PATH}/repos/avalon-core/setup/nuke/nuke_path", + "{PYPE_MODULE_ROOT}/setup/nuke/nuke_path", + "{PYPE_STUDIO_PLUGINS}/nuke" + ], + "PATH": { + "windows": "C:/Program Files (x86)/QuickTime/QTSystem/;{PATH}" + }, + "PYPE_LOG_NO_COLORS": "True", + "PYTHONPATH": { + "windows": "{VIRTUAL_ENV}/Lib/site-packages;{PYTHONPATH}", + "linux": "{VIRTUAL_ENV}/lib/python3.6/site-packages:{PYTHONPATH}" + } +} diff --git a/pype/settings/defaults/system_settings/environments/nukestudio.json b/pype/settings/defaults/system_settings/environments/nukestudio.json new file mode 100644 index 0000000000..b05e2411f0 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/nukestudio.json @@ -0,0 +1,11 @@ +{ + "HIERO_PLUGIN_PATH": [ + "{PYPE_MODULE_ROOT}/setup/nukestudio/hiero_plugin_path" + ], + "PATH": { + "windows": "C:/Program Files (x86)/QuickTime/QTSystem/;{PATH}" + }, + "WORKFILES_STARTUP": "0", + "TAG_ASSETBUILD_STARTUP": "0", + "PYPE_LOG_NO_COLORS": "True" +} diff --git a/pype/settings/defaults/system_settings/environments/nukestudio_10.0.json b/pype/settings/defaults/system_settings/environments/nukestudio_10.0.json new file mode 100644 index 0000000000..9bdcef53c9 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/nukestudio_10.0.json @@ -0,0 +1,4 @@ +{ + "PYPE_LOG_NO_COLORS": "Yes", + "QT_PREFERRED_BINDING": "PySide" +} \ No newline at end of file diff --git a/pype/settings/defaults/system_settings/environments/nukex.json b/pype/settings/defaults/system_settings/environments/nukex.json new file mode 100644 index 0000000000..2b77f44076 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/nukex.json @@ -0,0 +1,10 @@ +{ + "NUKE_PATH": [ + "{PYPE_SETUP_PATH}/repos/avalon-core/setup/nuke/nuke_path", + "{PYPE_MODULE_ROOT}/setup/nuke/nuke_path", + "{PYPE_STUDIO_PLUGINS}/nuke" + ], + "PATH": { + "windows": "C:/Program Files (x86)/QuickTime/QTSystem/;{PATH}" + } +} diff --git a/pype/settings/defaults/system_settings/environments/nukex_10.0.json b/pype/settings/defaults/system_settings/environments/nukex_10.0.json new file mode 100644 index 0000000000..9bdcef53c9 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/nukex_10.0.json @@ -0,0 +1,4 @@ +{ + "PYPE_LOG_NO_COLORS": "Yes", + "QT_PREFERRED_BINDING": "PySide" +} \ No newline at end of file diff --git a/pype/settings/defaults/system_settings/environments/photoshop.json b/pype/settings/defaults/system_settings/environments/photoshop.json new file mode 100644 index 0000000000..2208a88665 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/photoshop.json @@ -0,0 +1,4 @@ +{ + "AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH": "1", + "PYTHONPATH": "{PYTHONPATH}" +} diff --git a/pype/settings/defaults/system_settings/environments/premiere.json b/pype/settings/defaults/system_settings/environments/premiere.json new file mode 100644 index 0000000000..27dc5c564b --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/premiere.json @@ -0,0 +1,11 @@ +{ + "EXTENSIONS_PATH": { + "windows": "{USERPROFILE}/AppData/Roaming/Adobe/CEP/extensions", + "darvin": "{USER}/Library/Application Support/Adobe/CEP/extensions" + }, + "EXTENSIONS_CACHE_PATH": { + "windows": "{USERPROFILE}/AppData/Local/Temp/cep_cache", + "darvin": "{USER}/Library/Application Support/Adobe/CEP/cep_cache" + }, + "installed_zxp": "" +} diff --git a/pype/settings/defaults/system_settings/environments/resolve.json b/pype/settings/defaults/system_settings/environments/resolve.json new file mode 100644 index 0000000000..1ff197dd5a --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/resolve.json @@ -0,0 +1,40 @@ +{ + "RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [ + "{STUDIO_SOFT}/davinci_resolve/scripts/python" + ], + "RESOLVE_SCRIPT_API": { + "windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Support/Developer/Scripting", + "darvin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting", + "linux": "/opt/resolve/Developer/Scripting" + }, + "RESOLVE_SCRIPT_LIB": { + "windows": "C:/Program Files/Blackmagic Design/DaVinci Resolve/fusionscript.dll", + "darvin": "/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so", + "linux": "/opt/resolve/libs/Fusion/fusionscript.so" + }, + "RESOLVE_UTILITY_SCRIPTS_DIR": { + "windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp", + "darvin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp", + "linux": "/opt/resolve/Fusion/Scripts/Comp" + }, + "PYTHON36_RESOLVE": { + "windows": "{LOCALAPPDATA}/Programs/Python/Python36", + "darvin": "~/Library/Python/3.6/bin", + "linux": "/opt/Python/3.6/bin" + }, + "PYTHONPATH": [ + "{PYTHON36_RESOLVE}/Lib/site-packages", + "{VIRTUAL_ENV}/Lib/site-packages", + "{PYTHONPATH}", + "{RESOLVE_SCRIPT_API}/Modules", + "{PYTHONPATH}" + ], + "PATH": [ + "{PYTHON36_RESOLVE}", + "{PYTHON36_RESOLVE}/Scripts", + "{PATH}" + ], + "PRE_PYTHON_SCRIPT": "{PYPE_MODULE_ROOT}/pype/resolve/preload_console.py", + "PYPE_LOG_NO_COLORS": "True", + "RESOLVE_DEV": "True" +} diff --git a/pype/settings/defaults/system_settings/environments/storyboardpro.json b/pype/settings/defaults/system_settings/environments/storyboardpro.json new file mode 100644 index 0000000000..581ad4db45 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/storyboardpro.json @@ -0,0 +1,4 @@ +{ + "AVALON_TOONBOOM_WORKFILES_ON_LAUNCH": "1", + "PYBLISH_LITE_ALWAYS_EXEC": "1" +} diff --git a/pype/settings/defaults/system_settings/environments/unreal_4.24.json b/pype/settings/defaults/system_settings/environments/unreal_4.24.json new file mode 100644 index 0000000000..8feeb0230f --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/unreal_4.24.json @@ -0,0 +1,5 @@ +{ + "AVALON_UNREAL_PLUGIN": "{PYPE_SETUP_PATH}/repos/avalon-unreal-integration", + "PYPE_LOG_NO_COLORS": "True", + "QT_PREFERRED_BINDING": "PySide" +} diff --git a/pype/settings/defaults/system_settings/environments/vray_4300.json b/pype/settings/defaults/system_settings/environments/vray_4300.json new file mode 100644 index 0000000000..3212188441 --- /dev/null +++ b/pype/settings/defaults/system_settings/environments/vray_4300.json @@ -0,0 +1,15 @@ +{ + "VRAY_VERSION": "43001", + "VRAY_ROOT": "C:/vray/vray_{VRAY_VERSION}", + "MAYA_RENDER_DESC_PATH": "{VRAY_ROOT}/maya_root/bin/rendererDesc", + "VRAY_FOR_MAYA2019_MAIN": "{VRAY_ROOT}/maya_vray", + "VRAY_FOR_MAYA2019_PLUGINS": "{VRAY_ROOT}/maya_vray/vrayplugins", + "VRAY_PLUGINS": "{VRAY_ROOT}/maya_vray/vrayplugins", + "VRAY_OSL_PATH_MAYA2019": "{VRAY_ROOT}/vray/opensl", + "PATH": "{VRAY_ROOT}/maya_root/bin;{PATH}", + "MAYA_PLUG_IN_PATH": "{VRAY_ROOT}/maya_vray/plug-ins", + "MAYA_SCRIPT_PATH": "{VRAY_ROOT}/maya_vray/scripts", + "PYTHONPATH": "{VRAY_ROOT}/maya_vray/scripts;{PYTHONPATH}", + "XBMLANGPATH": "{VRAY_ROOT}/maya_vray/icons;{XBMLANGPATH}", + "VRAY_AUTH_CLIENT_FILE_PATH": "{VRAY_ROOT}" +} diff --git a/pype/settings/defaults/system_settings/global/applications.json b/pype/settings/defaults/system_settings/global/applications.json new file mode 100644 index 0000000000..3a74a85468 --- /dev/null +++ b/pype/settings/defaults/system_settings/global/applications.json @@ -0,0 +1,34 @@ +{ + "blender_2.80": true, + "blender_2.81": true, + "blender_2.82": true, + "blender_2.83": true, + "celaction_local": true, + "celaction_remote": true, + "harmony_17": true, + "maya_2017": true, + "maya_2018": true, + "maya_2019": true, + "maya_2020": true, + "nuke_10.0": true, + "nuke_11.2": true, + "nuke_11.3": true, + "nuke_12.0": true, + "nukex_10.0": true, + "nukex_11.2": true, + "nukex_11.3": true, + "nukex_12.0": true, + "nukestudio_10.0": true, + "nukestudio_11.2": true, + "nukestudio_11.3": true, + "nukestudio_12.0": true, + "houdini_16": true, + "houdini_16.5": false, + "houdini_17": true, + "houdini_18": true, + "premiere_2019": true, + "premiere_2020": true, + "resolve_16": true, + "storyboardpro_7": true, + "unreal_4.24": true +} \ No newline at end of file diff --git a/pype/settings/defaults/system_settings/global/intent.json b/pype/settings/defaults/system_settings/global/intent.json new file mode 100644 index 0000000000..844bd1b518 --- /dev/null +++ b/pype/settings/defaults/system_settings/global/intent.json @@ -0,0 +1,8 @@ +{ + "items": { + "wip": "WIP", + "test": "TEST", + "final": "FINAL" + }, + "default": "wip" +} \ No newline at end of file diff --git a/pype/settings/defaults/system_settings/global/tools.json b/pype/settings/defaults/system_settings/global/tools.json new file mode 100644 index 0000000000..93895c0e81 --- /dev/null +++ b/pype/settings/defaults/system_settings/global/tools.json @@ -0,0 +1,6 @@ +{ + "mtoa_3.0.1": true, + "mtoa_3.1.1": true, + "mtoa_3.2.0": true, + "yeti_2.1.2": true +} \ No newline at end of file diff --git a/pype/settings/defaults/system_settings/global/tray_modules.json b/pype/settings/defaults/system_settings/global/tray_modules.json new file mode 100644 index 0000000000..0ff5b15552 --- /dev/null +++ b/pype/settings/defaults/system_settings/global/tray_modules.json @@ -0,0 +1,28 @@ +{ + "item_usage": { + "User settings": false, + "Ftrack": true, + "Muster": false, + "Avalon": true, + "Clockify": false, + "Standalone Publish": true, + "Logging": true, + "Idle Manager": true, + "Timers Manager": true, + "Rest Api": true, + "Adobe Communicator": true + }, + "attributes": { + "Rest Api": { + "default_port": 8021, + "exclude_ports": [] + }, + "Timers Manager": { + "full_time": 15.0, + "message_time": 0.5 + }, + "Clockify": { + "workspace_name": "" + } + } +} \ No newline at end of file diff --git a/pype/settings/defaults/system_settings/launchers/blender_2.80.toml b/pype/settings/defaults/system_settings/launchers/blender_2.80.toml new file mode 100644 index 0000000000..5fea78b7b0 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/blender_2.80.toml @@ -0,0 +1,7 @@ +application_dir = "blender" +executable = "blender_2.80" +schema = "avalon-core:application-1.0" +label = "Blender 2.80" +ftrack_label = "Blender" +icon ="blender" +ftrack_icon = '{}/app_icons/blender.png' diff --git a/pype/settings/defaults/system_settings/launchers/blender_2.81.toml b/pype/settings/defaults/system_settings/launchers/blender_2.81.toml new file mode 100644 index 0000000000..4f85ee5558 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/blender_2.81.toml @@ -0,0 +1,7 @@ +application_dir = "blender" +executable = "blender_2.81" +schema = "avalon-core:application-1.0" +label = "Blender 2.81" +ftrack_label = "Blender" +icon ="blender" +ftrack_icon = '{}/app_icons/blender.png' diff --git a/pype/settings/defaults/system_settings/launchers/blender_2.82.toml b/pype/settings/defaults/system_settings/launchers/blender_2.82.toml new file mode 100644 index 0000000000..840001452e --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/blender_2.82.toml @@ -0,0 +1,7 @@ +application_dir = "blender" +executable = "blender_2.82" +schema = "avalon-core:application-1.0" +label = "Blender 2.82" +ftrack_label = "Blender" +icon ="blender" +ftrack_icon = '{}/app_icons/blender.png' diff --git a/pype/settings/defaults/system_settings/launchers/blender_2.83.toml b/pype/settings/defaults/system_settings/launchers/blender_2.83.toml new file mode 100644 index 0000000000..7fc8bf87b9 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/blender_2.83.toml @@ -0,0 +1,7 @@ +application_dir = "blender" +executable = "blender_2.83" +schema = "avalon-core:application-1.0" +label = "Blender 2.83" +ftrack_label = "Blender" +icon ="blender" +ftrack_icon = '{}/app_icons/blender.png' diff --git a/pype/settings/defaults/system_settings/launchers/celaction_local.toml b/pype/settings/defaults/system_settings/launchers/celaction_local.toml new file mode 100644 index 0000000000..aef3548e08 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/celaction_local.toml @@ -0,0 +1,8 @@ +executable = "celaction_local" +schema = "avalon-core:application-1.0" +application_dir = "celaction" +label = "CelAction2D" +ftrack_label = "CelAction2D" +icon ="celaction_local" +launch_hook = "pype/hooks/celaction/prelaunch.py/CelactionPrelaunchHook" +ftrack_icon = '{}/app_icons/celaction_local.png' diff --git a/pype/settings/defaults/system_settings/launchers/celaction_publish.toml b/pype/settings/defaults/system_settings/launchers/celaction_publish.toml new file mode 100644 index 0000000000..86f4ae39e7 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/celaction_publish.toml @@ -0,0 +1,7 @@ +schema = "avalon-core:application-1.0" +application_dir = "shell" +executable = "celaction_publish" +label = "Shell" + +[environment] +CREATE_NEW_CONSOLE = "Yes" diff --git a/pype/settings/defaults/system_settings/launchers/darwin/blender_2.82 b/pype/settings/defaults/system_settings/launchers/darwin/blender_2.82 new file mode 100644 index 0000000000..8254411ea2 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/darwin/blender_2.82 @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +open -a blender $@ diff --git a/pype/settings/defaults/system_settings/launchers/darwin/harmony_17 b/pype/settings/defaults/system_settings/launchers/darwin/harmony_17 new file mode 100644 index 0000000000..b7eba2c2d0 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/darwin/harmony_17 @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +DIRNAME="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +set >~/environment.tmp +if [ $? -ne -0 ] ; then + echo "ERROR: cannot write to '~/environment.tmp'!" + read -n 1 -s -r -p "Press any key to exit" + return +fi +open -a Terminal.app "$DIRNAME/harmony_17_launch" diff --git a/pype/settings/defaults/system_settings/launchers/darwin/harmony_17_launch b/pype/settings/defaults/system_settings/launchers/darwin/harmony_17_launch new file mode 100644 index 0000000000..5dcf5db57e --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/darwin/harmony_17_launch @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +source ~/environment.tmp +export $(cut -d= -f1 ~/environment.tmp) +exe="/Applications/Toon Boom Harmony 17 Premium/Harmony Premium.app/Contents/MacOS/Harmony Premium" +$PYPE_PYTHON_EXE -c "import avalon.harmony;avalon.harmony.launch('$exe')" diff --git a/pype/settings/defaults/system_settings/launchers/darwin/python3 b/pype/settings/defaults/system_settings/launchers/darwin/python3 new file mode 100644 index 0000000000..c2b82c7638 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/darwin/python3 @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +open /usr/bin/python3 --args $@ diff --git a/pype/settings/defaults/system_settings/launchers/harmony_17.toml b/pype/settings/defaults/system_settings/launchers/harmony_17.toml new file mode 100644 index 0000000000..dbb76444a7 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/harmony_17.toml @@ -0,0 +1,8 @@ +application_dir = "harmony" +label = "Harmony 17" +ftrack_label = "Harmony" +schema = "avalon-core:application-1.0" +executable = "harmony_17" +description = "" +icon ="harmony_icon" +ftrack_icon = '{}/app_icons/harmony.png' diff --git a/pype/settings/defaults/system_settings/launchers/houdini_16.toml b/pype/settings/defaults/system_settings/launchers/houdini_16.toml new file mode 100644 index 0000000000..e29fa74cad --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/houdini_16.toml @@ -0,0 +1,7 @@ +executable = "houdini_16" +schema = "avalon-core:application-1.0" +application_dir = "houdini" +label = "Houdini 16" +ftrack_label = "Houdini" +icon = "houdini_icon" +ftrack_icon = '{}/app_icons/houdini.png' diff --git a/pype/settings/defaults/system_settings/launchers/houdini_17.toml b/pype/settings/defaults/system_settings/launchers/houdini_17.toml new file mode 100644 index 0000000000..5d01364330 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/houdini_17.toml @@ -0,0 +1,7 @@ +executable = "houdini_17" +schema = "avalon-core:application-1.0" +application_dir = "houdini" +label = "Houdini 17.0" +ftrack_label = "Houdini" +icon = "houdini_icon" +ftrack_icon = '{}/app_icons/houdini.png' diff --git a/pype/settings/defaults/system_settings/launchers/houdini_18.toml b/pype/settings/defaults/system_settings/launchers/houdini_18.toml new file mode 100644 index 0000000000..93b9a3334d --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/houdini_18.toml @@ -0,0 +1,7 @@ +executable = "houdini_18" +schema = "avalon-core:application-1.0" +application_dir = "houdini" +label = "Houdini 18" +ftrack_label = "Houdini" +icon = "houdini_icon" +ftrack_icon = '{}/app_icons/houdini.png' diff --git a/pype/settings/defaults/system_settings/launchers/linux/maya2016 b/pype/settings/defaults/system_settings/launchers/linux/maya2016 new file mode 100644 index 0000000000..98424304b1 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/maya2016 @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +maya_path = "/usr/autodesk/maya2016/bin/maya" + +if [[ -z $PYPE_LOG_NO_COLORS ]]; then + $maya_path -file "$AVALON_LAST_WORKFILE" $@ +else + $maya_path $@ diff --git a/pype/settings/defaults/system_settings/launchers/linux/maya2017 b/pype/settings/defaults/system_settings/launchers/linux/maya2017 new file mode 100644 index 0000000000..7a2662a55e --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/maya2017 @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +maya_path = "/usr/autodesk/maya2017/bin/maya" + +if [[ -z $AVALON_LAST_WORKFILE ]]; then + $maya_path -file "$AVALON_LAST_WORKFILE" $@ +else + $maya_path $@ diff --git a/pype/settings/defaults/system_settings/launchers/linux/maya2018 b/pype/settings/defaults/system_settings/launchers/linux/maya2018 new file mode 100644 index 0000000000..db832b3fe7 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/maya2018 @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +maya_path = "/usr/autodesk/maya2018/bin/maya" + +if [[ -z $AVALON_LAST_WORKFILE ]]; then + $maya_path -file "$AVALON_LAST_WORKFILE" $@ +else + $maya_path $@ diff --git a/pype/settings/defaults/system_settings/launchers/linux/maya2019 b/pype/settings/defaults/system_settings/launchers/linux/maya2019 new file mode 100644 index 0000000000..8398734ab9 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/maya2019 @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +maya_path = "/usr/autodesk/maya2019/bin/maya" + +if [[ -z $AVALON_LAST_WORKFILE ]]; then + $maya_path -file "$AVALON_LAST_WORKFILE" $@ +else + $maya_path $@ diff --git a/pype/settings/defaults/system_settings/launchers/linux/maya2020 b/pype/settings/defaults/system_settings/launchers/linux/maya2020 new file mode 100644 index 0000000000..18a1edd598 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/maya2020 @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +maya_path = "/usr/autodesk/maya2020/bin/maya" + +if [[ -z $AVALON_LAST_WORKFILE ]]; then + $maya_path -file "$AVALON_LAST_WORKFILE" $@ +else + $maya_path $@ diff --git a/pype/settings/defaults/system_settings/launchers/linux/nuke11.3 b/pype/settings/defaults/system_settings/launchers/linux/nuke11.3 new file mode 100644 index 0000000000..b1c9a90d74 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/nuke11.3 @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +gnome-terminal -e '/usr/local/Nuke11.3v5/Nuke11.3' diff --git a/pype/settings/defaults/system_settings/launchers/linux/nuke12.0 b/pype/settings/defaults/system_settings/launchers/linux/nuke12.0 new file mode 100644 index 0000000000..99ea1a6b0c --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/nuke12.0 @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +gnome-terminal -e '/usr/local/Nuke12.0v1/Nuke12.0' diff --git a/pype/settings/defaults/system_settings/launchers/linux/nukestudio11.3 b/pype/settings/defaults/system_settings/launchers/linux/nukestudio11.3 new file mode 100644 index 0000000000..750d54a7d5 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/nukestudio11.3 @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +gnome-terminal -e '/usr/local/Nuke11.3v5/Nuke11.3 --studio' diff --git a/pype/settings/defaults/system_settings/launchers/linux/nukestudio12.0 b/pype/settings/defaults/system_settings/launchers/linux/nukestudio12.0 new file mode 100644 index 0000000000..ba5cf654a8 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/nukestudio12.0 @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +gnome-terminal -e '/usr/local/Nuke12.0v1/Nuke12.0 --studio' diff --git a/pype/settings/defaults/system_settings/launchers/linux/nukex11.3 b/pype/settings/defaults/system_settings/launchers/linux/nukex11.3 new file mode 100644 index 0000000000..d913e4b961 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/nukex11.3 @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +gnome-terminal -e '/usr/local/Nuke11.3v5/Nuke11.3 -nukex' diff --git a/pype/settings/defaults/system_settings/launchers/linux/nukex12.0 b/pype/settings/defaults/system_settings/launchers/linux/nukex12.0 new file mode 100644 index 0000000000..da2721c48b --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/linux/nukex12.0 @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +gnome-terminal -e '/usr/local/Nuke12.0v1/Nuke12.0 -nukex' diff --git a/pype/settings/defaults/system_settings/launchers/maya_2016.toml b/pype/settings/defaults/system_settings/launchers/maya_2016.toml new file mode 100644 index 0000000000..d69c4effaf --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/maya_2016.toml @@ -0,0 +1,26 @@ +application_dir = "maya" +default_dirs = [ + "scenes", + "data", + "renderData/shaders", + "images" +] +label = "Autodesk Maya 2016x64" +ftrack_label = "Maya" +schema = "avalon-core:application-1.0" +executable = "maya2016" +description = "" +icon ="maya_icon" +ftrack_icon = '{}/app_icons/maya.png' + +[copy] +"{PYPE_MODULE_ROOT}/pype/resources/maya/workspace.mel" = "workspace.mel" + +[environment] +MAYA_DISABLE_CLIC_IPM = "Yes" # Disable the AdSSO process +MAYA_DISABLE_CIP = "Yes" # Shorten time to boot +MAYA_DISABLE_CER = "Yes" +PYTHONPATH = [ + "{AVALON_CORE}/setup/maya", + "{PYTHONPATH}" +] diff --git a/pype/settings/defaults/system_settings/launchers/maya_2017.toml b/pype/settings/defaults/system_settings/launchers/maya_2017.toml new file mode 100644 index 0000000000..2d1c35b530 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/maya_2017.toml @@ -0,0 +1,28 @@ +application_dir = "maya" +default_dirs = [ + "scenes", + "data", + "renderData/shaders", + "images" +] +label = "Autodesk Maya 2017" +ftrack_label = "Maya" +schema = "avalon-core:application-1.0" +executable = "maya2017" +description = "" +icon ="maya_icon" +ftrack_icon = '{}/app_icons/maya.png' + +[copy] +"{PYPE_MODULE_ROOT}/pype/resources/maya/workspace.mel" = "workspace.mel" + +[environment] +MAYA_DISABLE_CLIC_IPM = "Yes" # Disable the AdSSO process +MAYA_DISABLE_CIP = "Yes" # Shorten time to boot +MAYA_DISABLE_CER = "Yes" +PYMEL_SKIP_MEL_INIT = "Yes" +LC_ALL= "C" # Mute color management warnings +PYTHONPATH = [ + "{AVALON_CORE}/setup/maya", + "{PYTHONPATH}" +] diff --git a/pype/settings/defaults/system_settings/launchers/maya_2018.toml b/pype/settings/defaults/system_settings/launchers/maya_2018.toml new file mode 100644 index 0000000000..f180263fa2 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/maya_2018.toml @@ -0,0 +1,14 @@ +application_dir = "maya" +default_dirs = [ + "renders" +] +label = "Autodesk Maya 2018" +ftrack_label = "Maya" +schema = "avalon-core:application-1.0" +executable = "maya2018" +description = "" +icon ="maya_icon" +ftrack_icon = '{}/app_icons/maya.png' + +[copy] +"{PYPE_MODULE_ROOT}/pype/resources/maya/workspace.mel" = "workspace.mel" diff --git a/pype/settings/defaults/system_settings/launchers/maya_2019.toml b/pype/settings/defaults/system_settings/launchers/maya_2019.toml new file mode 100644 index 0000000000..7ec2cbcedd --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/maya_2019.toml @@ -0,0 +1,14 @@ +application_dir = "maya" +default_dirs = [ + "renders" +] +label = "Autodesk Maya 2019" +ftrack_label = "Maya" +schema = "avalon-core:application-1.0" +executable = "maya2019" +description = "" +icon ="maya_icon" +ftrack_icon = '{}/app_icons/maya.png' + +[copy] +"{PYPE_MODULE_ROOT}/pype/resources/maya/workspace.mel" = "workspace.mel" diff --git a/pype/settings/defaults/system_settings/launchers/maya_2020.toml b/pype/settings/defaults/system_settings/launchers/maya_2020.toml new file mode 100644 index 0000000000..49d84ef9a0 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/maya_2020.toml @@ -0,0 +1,14 @@ +application_dir = "maya" +default_dirs = [ + "renders" +] +label = "Autodesk Maya 2020" +ftrack_label = "Maya" +schema = "avalon-core:application-1.0" +executable = "maya2020" +description = "" +icon ="maya_icon" +ftrack_icon = '{}/app_icons/maya.png' + +[copy] +"{PYPE_MODULE_ROOT}/pype/resources/maya/workspace.mel" = "workspace.mel" diff --git a/pype/settings/defaults/system_settings/launchers/mayabatch_2019.toml b/pype/settings/defaults/system_settings/launchers/mayabatch_2019.toml new file mode 100644 index 0000000000..a928618d2b --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/mayabatch_2019.toml @@ -0,0 +1,17 @@ +application_dir = "maya" +default_dirs = [ + "scenes", + "data", + "renderData/shaders", + "images" +] +label = "Autodesk Maya 2019x64" +schema = "avalon-core:application-1.0" +executable = "mayabatch2019" +description = "" + +[environment] +PYTHONPATH = [ + "{AVALON_CORE}/setup/maya", + "{PYTHONPATH}" +] diff --git a/pype/settings/defaults/system_settings/launchers/mayabatch_2020.toml b/pype/settings/defaults/system_settings/launchers/mayabatch_2020.toml new file mode 100644 index 0000000000..cd1e1e4474 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/mayabatch_2020.toml @@ -0,0 +1,17 @@ +application_dir = "maya" +default_dirs = [ + "scenes", + "data", + "renderData/shaders", + "images" +] +label = "Autodesk Maya 2020x64" +schema = "avalon-core:application-1.0" +executable = "mayabatch2020" +description = "" + +[environment] +PYTHONPATH = [ + "{AVALON_CORE}/setup/maya", + "{PYTHONPATH}" +] diff --git a/pype/settings/defaults/system_settings/launchers/mayapy2016.toml b/pype/settings/defaults/system_settings/launchers/mayapy2016.toml new file mode 100644 index 0000000000..ad1e3dee86 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/mayapy2016.toml @@ -0,0 +1,17 @@ +application_dir = "maya" +default_dirs = [ + "scenes", + "data", + "renderData/shaders", + "images" +] +label = "Autodesk Maya 2016x64" +schema = "avalon-core:application-1.0" +executable = "mayapy2016" +description = "" + +[environment] +PYTHONPATH = [ + "{AVALON_CORE}/setup/maya", + "{PYTHONPATH}" +] diff --git a/pype/settings/defaults/system_settings/launchers/mayapy2017.toml b/pype/settings/defaults/system_settings/launchers/mayapy2017.toml new file mode 100644 index 0000000000..8d2095ff47 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/mayapy2017.toml @@ -0,0 +1,17 @@ +application_dir = "maya" +default_dirs = [ + "scenes", + "data", + "renderData/shaders", + "images" +] +label = "Autodesk Maya 2017x64" +schema = "avalon-core:application-1.0" +executable = "mayapy2017" +description = "" + +[environment] +PYTHONPATH = [ + "{AVALON_CORE}/setup/maya", + "{PYTHONPATH}" +] diff --git a/pype/settings/defaults/system_settings/launchers/mayapy2018.toml b/pype/settings/defaults/system_settings/launchers/mayapy2018.toml new file mode 100644 index 0000000000..597744fd85 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/mayapy2018.toml @@ -0,0 +1,17 @@ +application_dir = "maya" +default_dirs = [ + "scenes", + "data", + "renderData/shaders", + "images" +] +label = "Autodesk Maya 2018x64" +schema = "avalon-core:application-1.0" +executable = "mayapy2017" +description = "" + +[environment] +PYTHONPATH = [ + "{AVALON_CORE}/setup/maya", + "{PYTHONPATH}" +] diff --git a/pype/settings/defaults/system_settings/launchers/mayapy2019.toml b/pype/settings/defaults/system_settings/launchers/mayapy2019.toml new file mode 100644 index 0000000000..3c8a9860f9 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/mayapy2019.toml @@ -0,0 +1,17 @@ +application_dir = "maya" +default_dirs = [ + "scenes", + "data", + "renderData/shaders", + "images" +] +label = "Autodesk Maya 2019x64" +schema = "avalon-core:application-1.0" +executable = "mayapy2019" +description = "" + +[environment] +PYTHONPATH = [ + "{AVALON_CORE}/setup/maya", + "{PYTHONPATH}" +] diff --git a/pype/settings/defaults/system_settings/launchers/mayapy2020.toml b/pype/settings/defaults/system_settings/launchers/mayapy2020.toml new file mode 100644 index 0000000000..8f2d2e4a67 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/mayapy2020.toml @@ -0,0 +1,17 @@ +application_dir = "maya" +default_dirs = [ + "scenes", + "data", + "renderData/shaders", + "images" +] +label = "Autodesk Maya 2020x64" +schema = "avalon-core:application-1.0" +executable = "mayapy2020" +description = "" + +[environment] +PYTHONPATH = [ + "{AVALON_CORE}/setup/maya", + "{PYTHONPATH}" +] diff --git a/pype/settings/defaults/system_settings/launchers/myapp.toml b/pype/settings/defaults/system_settings/launchers/myapp.toml new file mode 100644 index 0000000000..21da0d52b2 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/myapp.toml @@ -0,0 +1,5 @@ +executable = "python" +schema = "avalon-core:application-1.0" +application_dir = "myapp" +label = "My App" +arguments = [ "-c", "import sys; from Qt import QtWidgets; if __name__ == '__main__':;\n app = QtWidgets.QApplication(sys.argv);\n window = QtWidgets.QWidget();\n window.setWindowTitle(\"My App\");\n window.resize(400, 300);\n window.show();\n app.exec_();\n",] \ No newline at end of file diff --git a/pype/settings/defaults/system_settings/launchers/nuke_10.0.toml b/pype/settings/defaults/system_settings/launchers/nuke_10.0.toml new file mode 100644 index 0000000000..2195fd3e82 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nuke_10.0.toml @@ -0,0 +1,7 @@ +executable = "nuke10.0" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "Nuke 10.0v4" +ftrack_label = "Nuke" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nuke_11.0.toml b/pype/settings/defaults/system_settings/launchers/nuke_11.0.toml new file mode 100644 index 0000000000..0c981b479a --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nuke_11.0.toml @@ -0,0 +1,7 @@ +executable = "nuke11.0" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "Nuke 11.0" +ftrack_label = "Nuke" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nuke_11.2.toml b/pype/settings/defaults/system_settings/launchers/nuke_11.2.toml new file mode 100644 index 0000000000..57c962d126 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nuke_11.2.toml @@ -0,0 +1,7 @@ +executable = "nuke11.2" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "Nuke 11.2" +ftrack_label = "Nuke" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nuke_11.3.toml b/pype/settings/defaults/system_settings/launchers/nuke_11.3.toml new file mode 100644 index 0000000000..87f769c23b --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nuke_11.3.toml @@ -0,0 +1,7 @@ +executable = "nuke11.3" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "Nuke 11.3" +ftrack_label = "Nuke" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nuke_12.0.toml b/pype/settings/defaults/system_settings/launchers/nuke_12.0.toml new file mode 100644 index 0000000000..62936b4cdb --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nuke_12.0.toml @@ -0,0 +1,7 @@ +executable = "nuke12.0" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "Nuke 12.0" +ftrack_label = "Nuke" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukestudio_10.0.toml b/pype/settings/defaults/system_settings/launchers/nukestudio_10.0.toml new file mode 100644 index 0000000000..41601e4d40 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukestudio_10.0.toml @@ -0,0 +1,7 @@ +executable = "nukestudio10.0" +schema = "avalon-core:application-1.0" +application_dir = "nukestudio" +label = "NukeStudio 10.0" +ftrack_label = "NukeStudio" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukestudio_11.0.toml b/pype/settings/defaults/system_settings/launchers/nukestudio_11.0.toml new file mode 100644 index 0000000000..7a9d84707a --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukestudio_11.0.toml @@ -0,0 +1,7 @@ +executable = "nukestudio11.0" +schema = "avalon-core:application-1.0" +application_dir = "nukestudio" +label = "NukeStudio 11.0" +ftrack_label = "NukeStudio" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukestudio_11.2.toml b/pype/settings/defaults/system_settings/launchers/nukestudio_11.2.toml new file mode 100644 index 0000000000..21557033ca --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukestudio_11.2.toml @@ -0,0 +1,7 @@ +executable = "nukestudio11.2" +schema = "avalon-core:application-1.0" +application_dir = "nukestudio" +label = "NukeStudio 11.2" +ftrack_label = "NukeStudio" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukestudio_11.3.toml b/pype/settings/defaults/system_settings/launchers/nukestudio_11.3.toml new file mode 100644 index 0000000000..1946ad6c3b --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukestudio_11.3.toml @@ -0,0 +1,7 @@ +executable = "nukestudio11.3" +schema = "avalon-core:application-1.0" +application_dir = "nukestudio" +label = "NukeStudio 11.3" +ftrack_label = "NukeStudio" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukestudio_12.0.toml b/pype/settings/defaults/system_settings/launchers/nukestudio_12.0.toml new file mode 100644 index 0000000000..4ce7f9b538 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukestudio_12.0.toml @@ -0,0 +1,7 @@ +executable = "nukestudio12.0" +schema = "avalon-core:application-1.0" +application_dir = "nukestudio" +label = "NukeStudio 12.0" +ftrack_label = "NukeStudio" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nuke.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukex_10.0.toml b/pype/settings/defaults/system_settings/launchers/nukex_10.0.toml new file mode 100644 index 0000000000..7dee22996d --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukex_10.0.toml @@ -0,0 +1,7 @@ +executable = "nukex10.0" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "NukeX 10.0" +ftrack_label = "NukeX" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nukex.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukex_11.0.toml b/pype/settings/defaults/system_settings/launchers/nukex_11.0.toml new file mode 100644 index 0000000000..c2b4970a26 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukex_11.0.toml @@ -0,0 +1,7 @@ +executable = "nukex11.0" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "NukeX 11.2" +ftrack_label = "NukeX" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nukex.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukex_11.2.toml b/pype/settings/defaults/system_settings/launchers/nukex_11.2.toml new file mode 100644 index 0000000000..3857b9995c --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukex_11.2.toml @@ -0,0 +1,7 @@ +executable = "nukex11.2" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "NukeX 11.2" +ftrack_label = "NukeX" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nukex.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukex_11.3.toml b/pype/settings/defaults/system_settings/launchers/nukex_11.3.toml new file mode 100644 index 0000000000..56428470eb --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukex_11.3.toml @@ -0,0 +1,7 @@ +executable = "nukex11.3" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "NukeX 11.3" +ftrack_label = "NukeX" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nukex.png' diff --git a/pype/settings/defaults/system_settings/launchers/nukex_12.0.toml b/pype/settings/defaults/system_settings/launchers/nukex_12.0.toml new file mode 100644 index 0000000000..33d7fddb88 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/nukex_12.0.toml @@ -0,0 +1,7 @@ +executable = "nukex12.0" +schema = "avalon-core:application-1.0" +application_dir = "nuke" +label = "NukeX 12.0" +ftrack_label = "NukeX" +icon ="nuke_icon" +ftrack_icon = '{}/app_icons/nukex.png' diff --git a/pype/settings/defaults/system_settings/launchers/photoshop_2020.toml b/pype/settings/defaults/system_settings/launchers/photoshop_2020.toml new file mode 100644 index 0000000000..117b668232 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/photoshop_2020.toml @@ -0,0 +1,8 @@ +executable = "photoshop_2020" +schema = "avalon-core:application-1.0" +application_dir = "photoshop" +label = "Adobe Photoshop 2020" +icon ="photoshop_icon" +ftrack_label = "Photoshop" +ftrack_icon = '{}/app_icons/photoshop.png' +launch_hook = "pype/hooks/photoshop/prelaunch.py/PhotoshopPrelaunch" diff --git a/pype/settings/defaults/system_settings/launchers/premiere_2019.toml b/pype/settings/defaults/system_settings/launchers/premiere_2019.toml new file mode 100644 index 0000000000..f4c19c62cb --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/premiere_2019.toml @@ -0,0 +1,8 @@ +executable = "premiere_pro_2019" +schema = "avalon-core:application-1.0" +application_dir = "premiere" +label = "Adobe Premiere Pro CC 2019" +icon ="premiere_icon" + +ftrack_label = "Premiere" +ftrack_icon = '{}/app_icons/premiere.png' diff --git a/pype/settings/defaults/system_settings/launchers/premiere_2020.toml b/pype/settings/defaults/system_settings/launchers/premiere_2020.toml new file mode 100644 index 0000000000..4d721c898f --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/premiere_2020.toml @@ -0,0 +1,9 @@ +executable = "premiere_pro_2020" +schema = "avalon-core:application-1.0" +application_dir = "premiere" +label = "Adobe Premiere Pro CC 2020" +launch_hook = "pype/hooks/premiere/prelaunch.py/PremierePrelaunch" +icon ="premiere_icon" + +ftrack_label = "Premiere" +ftrack_icon = '{}/app_icons/premiere.png' diff --git a/pype/settings/defaults/system_settings/launchers/python_2.toml b/pype/settings/defaults/system_settings/launchers/python_2.toml new file mode 100644 index 0000000000..e9e8dd7899 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/python_2.toml @@ -0,0 +1,10 @@ +schema = "avalon-core:application-1.0" +application_dir = "python" +executable = "python" +label = "Python 2" +ftrack_label = "Python" +icon ="python_icon" +ftrack_icon = '{}/app_icons/python.png' + +[environment] +CREATE_NEW_CONSOLE = "Yes" diff --git a/pype/settings/defaults/system_settings/launchers/python_3.toml b/pype/settings/defaults/system_settings/launchers/python_3.toml new file mode 100644 index 0000000000..5cbd8b2943 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/python_3.toml @@ -0,0 +1,10 @@ +schema = "avalon-core:application-1.0" +application_dir = "python" +executable = "python3" +label = "Python 3" +ftrack_label = "Python" +icon ="python_icon" +ftrack_icon = '{}/app_icons/python.png' + +[environment] +CREATE_NEW_CONSOLE = "Yes" diff --git a/pype/settings/defaults/system_settings/launchers/resolve_16.toml b/pype/settings/defaults/system_settings/launchers/resolve_16.toml new file mode 100644 index 0000000000..430fd1a638 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/resolve_16.toml @@ -0,0 +1,9 @@ +executable = "resolve_16" +schema = "avalon-core:application-1.0" +application_dir = "resolve" +label = "BM DaVinci Resolve 16" +launch_hook = "pype/hooks/resolve/prelaunch.py/ResolvePrelaunch" +icon ="resolve" + +ftrack_label = "BM DaVinci Resolve" +ftrack_icon = '{}/app_icons/resolve.png' diff --git a/pype/settings/defaults/system_settings/launchers/shell.toml b/pype/settings/defaults/system_settings/launchers/shell.toml new file mode 100644 index 0000000000..959ad392ea --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/shell.toml @@ -0,0 +1,7 @@ +schema = "avalon-core:application-1.0" +application_dir = "shell" +executable = "shell" +label = "Shell" + +[environment] +CREATE_NEW_CONSOLE = "Yes" \ No newline at end of file diff --git a/pype/settings/defaults/system_settings/launchers/storyboardpro_7.toml b/pype/settings/defaults/system_settings/launchers/storyboardpro_7.toml new file mode 100644 index 0000000000..ce8e96a49d --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/storyboardpro_7.toml @@ -0,0 +1,8 @@ +application_dir = "storyboardpro" +label = "Storyboard Pro 7" +ftrack_label = "Storyboard Pro" +schema = "avalon-core:application-1.0" +executable = "storyboardpro_7" +description = "" +icon ="storyboardpro_icon" +ftrack_icon = '{}/app_icons/storyboardpro.png' diff --git a/pype/settings/defaults/system_settings/launchers/unreal_4.24.toml b/pype/settings/defaults/system_settings/launchers/unreal_4.24.toml new file mode 100644 index 0000000000..0a799e5dcb --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/unreal_4.24.toml @@ -0,0 +1,8 @@ +executable = "unreal" +schema = "avalon-core:application-1.0" +application_dir = "unreal" +label = "Unreal Editor 4.24" +ftrack_label = "UnrealEditor" +icon ="ue4_icon" +launch_hook = "pype/hooks/unreal/unreal_prelaunch.py/UnrealPrelaunch" +ftrack_icon = '{}/app_icons/ue4.png' diff --git a/pype/settings/defaults/system_settings/launchers/windows/blender_2.80.bat b/pype/settings/defaults/system_settings/launchers/windows/blender_2.80.bat new file mode 100644 index 0000000000..5b8a37356b --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/blender_2.80.bat @@ -0,0 +1,11 @@ +set __app__="Blender" +set __exe__="C:\Program Files\Blender Foundation\Blender 2.80\blender.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/blender_2.81.bat b/pype/settings/defaults/system_settings/launchers/windows/blender_2.81.bat new file mode 100644 index 0000000000..a900b18eda --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/blender_2.81.bat @@ -0,0 +1,11 @@ +set __app__="Blender" +set __exe__="C:\Program Files\Blender Foundation\Blender 2.81\blender.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/blender_2.82.bat b/pype/settings/defaults/system_settings/launchers/windows/blender_2.82.bat new file mode 100644 index 0000000000..7105c1efe1 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/blender_2.82.bat @@ -0,0 +1,11 @@ +set __app__="Blender" +set __exe__="C:\Program Files\Blender Foundation\Blender 2.82\blender.exe" --python-use-system-env +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/blender_2.83.bat b/pype/settings/defaults/system_settings/launchers/windows/blender_2.83.bat new file mode 100644 index 0000000000..671952f0d7 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/blender_2.83.bat @@ -0,0 +1,11 @@ +set __app__="Blender" +set __exe__="C:\Program Files\Blender Foundation\Blender 2.83\blender.exe" --python-use-system-env +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/celaction_local.bat b/pype/settings/defaults/system_settings/launchers/windows/celaction_local.bat new file mode 100644 index 0000000000..8f2171617e --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/celaction_local.bat @@ -0,0 +1,19 @@ +set __app__="CelAction2D" +set __app_dir__="C:\Program Files (x86)\CelAction\" +set __exe__="C:\Program Files (x86)\CelAction\CelAction2D.exe" + +if not exist %__exe__% goto :missing_app + +pushd %__app_dir__% + +if "%PYPE_CELACTION_PROJECT_FILE%"=="" ( + start %__app__% %__exe__% %* +) else ( + start %__app__% %__exe__% "%PYPE_CELACTION_PROJECT_FILE%" %* +) + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/celaction_publish.bat b/pype/settings/defaults/system_settings/launchers/windows/celaction_publish.bat new file mode 100644 index 0000000000..77ec2ac24e --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/celaction_publish.bat @@ -0,0 +1,3 @@ +echo %* + +%PYPE_PYTHON_EXE% "%PYPE_MODULE_ROOT%\pype\hosts\celaction\cli.py" %* diff --git a/pype/settings/defaults/system_settings/launchers/windows/harmony_17.bat b/pype/settings/defaults/system_settings/launchers/windows/harmony_17.bat new file mode 100644 index 0000000000..0822650875 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/harmony_17.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Harmony 17" +set __exe__="C:/Program Files (x86)/Toon Boom Animation/Toon Boom Harmony 17 Premium/win64/bin/HarmonyPremium.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% cmd.exe /k "python -c ^"import avalon.harmony;avalon.harmony.launch("%__exe__%")^"" + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/houdini_16.bat b/pype/settings/defaults/system_settings/launchers/windows/houdini_16.bat new file mode 100644 index 0000000000..018ba08b4c --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/houdini_16.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Houdini 16.0" +set __exe__="C:\Program Files\Side Effects Software\Houdini 16.0.621\bin\houdini.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/houdini_17.bat b/pype/settings/defaults/system_settings/launchers/windows/houdini_17.bat new file mode 100644 index 0000000000..950a599623 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/houdini_17.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Houdini 17.0" +set __exe__="C:\Program Files\Side Effects Software\Houdini 17.0.459\bin\houdini.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/houdini_18.bat b/pype/settings/defaults/system_settings/launchers/windows/houdini_18.bat new file mode 100644 index 0000000000..3d6b1ae258 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/houdini_18.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Houdini 18.0" +set __exe__="C:\Program Files\Side Effects Software\Houdini 18.0.287\bin\houdini.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/maya2016.bat b/pype/settings/defaults/system_settings/launchers/windows/maya2016.bat new file mode 100644 index 0000000000..54f15cf269 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/maya2016.bat @@ -0,0 +1,17 @@ +@echo off + +set __app__="Maya 2016" +set __exe__="C:\Program Files\Autodesk\Maya2016\bin\maya.exe" +if not exist %__exe__% goto :missing_app + +if "%AVALON_LAST_WORKFILE%"=="" ( + start %__app__% %__exe__% %* +) else ( + start %__app__% %__exe__% -file "%AVALON_LAST_WORKFILE%" %* +) + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/maya2017.bat b/pype/settings/defaults/system_settings/launchers/windows/maya2017.bat new file mode 100644 index 0000000000..5c2aeb495c --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/maya2017.bat @@ -0,0 +1,17 @@ +@echo off + +set __app__="Maya 2017" +set __exe__="C:\Program Files\Autodesk\Maya2017\bin\maya.exe" +if not exist %__exe__% goto :missing_app + +if "%AVALON_LAST_WORKFILE%"=="" ( + start %__app__% %__exe__% %* +) else ( + start %__app__% %__exe__% -file "%AVALON_LAST_WORKFILE%" %* +) + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/maya2018.bat b/pype/settings/defaults/system_settings/launchers/windows/maya2018.bat new file mode 100644 index 0000000000..28cf776c77 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/maya2018.bat @@ -0,0 +1,17 @@ +@echo off + +set __app__="Maya 2018" +set __exe__="C:\Program Files\Autodesk\Maya2018\bin\maya.exe" +if not exist %__exe__% goto :missing_app + +if "%AVALON_LAST_WORKFILE%"=="" ( + start %__app__% %__exe__% %* +) else ( + start %__app__% %__exe__% -file "%AVALON_LAST_WORKFILE%" %* +) + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/maya2019.bat b/pype/settings/defaults/system_settings/launchers/windows/maya2019.bat new file mode 100644 index 0000000000..7e80dd2557 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/maya2019.bat @@ -0,0 +1,17 @@ +@echo off + +set __app__="Maya 2019" +set __exe__="C:\Program Files\Autodesk\Maya2019\bin\maya.exe" +if not exist %__exe__% goto :missing_app + +if "%AVALON_LAST_WORKFILE%"=="" ( + start %__app__% %__exe__% %* +) else ( + start %__app__% %__exe__% -file "%AVALON_LAST_WORKFILE%" %* +) + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/maya2020.bat b/pype/settings/defaults/system_settings/launchers/windows/maya2020.bat new file mode 100644 index 0000000000..b2acb5df5a --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/maya2020.bat @@ -0,0 +1,17 @@ +@echo off + +set __app__="Maya 2020" +set __exe__="C:\Program Files\Autodesk\maya2020\bin\maya.exe" +if not exist %__exe__% goto :missing_app + +if "%AVALON_LAST_WORKFILE%"=="" ( + start %__app__% %__exe__% %* +) else ( + start %__app__% %__exe__% -file "%AVALON_LAST_WORKFILE%" %* +) + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/mayabatch2019.bat b/pype/settings/defaults/system_settings/launchers/windows/mayabatch2019.bat new file mode 100644 index 0000000000..ddd9b9b956 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/mayabatch2019.bat @@ -0,0 +1,14 @@ +@echo off + +set __app__="Maya Batch 2019" +set __exe__="C:\Program Files\Autodesk\Maya2019\bin\mayabatch.exe" +if not exist %__exe__% goto :missing_app + +echo "running maya : %*" +%__exe__% %* +echo "done." +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/mayabatch2020.bat b/pype/settings/defaults/system_settings/launchers/windows/mayabatch2020.bat new file mode 100644 index 0000000000..b1cbc6dbb6 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/mayabatch2020.bat @@ -0,0 +1,14 @@ +@echo off + +set __app__="Maya Batch 2020" +set __exe__="C:\Program Files\Autodesk\Maya2020\bin\mayabatch.exe" +if not exist %__exe__% goto :missing_app + +echo "running maya : %*" +%__exe__% %* +echo "done." +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/mayapy2016.bat b/pype/settings/defaults/system_settings/launchers/windows/mayapy2016.bat new file mode 100644 index 0000000000..205991fd3d --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/mayapy2016.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Mayapy 2016" +set __exe__="C:\Program Files\Autodesk\Maya2016\bin\mayapy.exe" +if not exist %__exe__% goto :missing_app + +call %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found at %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/mayapy2017.bat b/pype/settings/defaults/system_settings/launchers/windows/mayapy2017.bat new file mode 100644 index 0000000000..14aacc5a7f --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/mayapy2017.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Mayapy 2017" +set __exe__="C:\Program Files\Autodesk\Maya2017\bin\mayapy.exe" +if not exist %__exe__% goto :missing_app + +call %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found at %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/mayapy2018.bat b/pype/settings/defaults/system_settings/launchers/windows/mayapy2018.bat new file mode 100644 index 0000000000..c47c472f46 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/mayapy2018.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Mayapy 2018" +set __exe__="C:\Program Files\Autodesk\Maya2018\bin\mayapy.exe" +if not exist %__exe__% goto :missing_app + +call %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found at %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/mayapy2019.bat b/pype/settings/defaults/system_settings/launchers/windows/mayapy2019.bat new file mode 100644 index 0000000000..73ca5b2d40 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/mayapy2019.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Mayapy 2019" +set __exe__="C:\Program Files\Autodesk\Maya2019\bin\mayapy.exe" +if not exist %__exe__% goto :missing_app + +call %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found at %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/mayapy2020.bat b/pype/settings/defaults/system_settings/launchers/windows/mayapy2020.bat new file mode 100644 index 0000000000..770a03dcf5 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/mayapy2020.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Mayapy 2020" +set __exe__="C:\Program Files\Autodesk\Maya2020\bin\mayapy.exe" +if not exist %__exe__% goto :missing_app + +call %__exe__% %* + +goto :eofS + +:missing_app + echo ERROR: %__app__% not found at %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nuke10.0.bat b/pype/settings/defaults/system_settings/launchers/windows/nuke10.0.bat new file mode 100644 index 0000000000..a47cbdfb20 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nuke10.0.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Nuke10.0v4" +set __exe__="C:\Program Files\Nuke10.0v4\Nuke10.0.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nuke11.0.bat b/pype/settings/defaults/system_settings/launchers/windows/nuke11.0.bat new file mode 100644 index 0000000000..a374c5cf5b --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nuke11.0.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Nuke11.0v4" +set __exe__="C:\Program Files\Nuke11.0v4\Nuke11.0.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nuke11.2.bat b/pype/settings/defaults/system_settings/launchers/windows/nuke11.2.bat new file mode 100644 index 0000000000..4c777ac28c --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nuke11.2.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Nuke11.2v3" +set __exe__="C:\Program Files\Nuke11.2v3\Nuke11.2.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nuke11.3.bat b/pype/settings/defaults/system_settings/launchers/windows/nuke11.3.bat new file mode 100644 index 0000000000..a023f5f46f --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nuke11.3.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Nuke11.3v1" +set __exe__="C:\Program Files\Nuke11.3v1\Nuke11.3.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nuke12.0.bat b/pype/settings/defaults/system_settings/launchers/windows/nuke12.0.bat new file mode 100644 index 0000000000..d8fb5772bb --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nuke12.0.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Nuke12.0v1" +set __exe__="C:\Program Files\Nuke12.0v1\Nuke12.0.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukestudio10.0.bat b/pype/settings/defaults/system_settings/launchers/windows/nukestudio10.0.bat new file mode 100644 index 0000000000..82f833667c --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukestudio10.0.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeStudio10.0v4" +set __exe__="C:\Program Files\Nuke10.0v4\Nuke10.0.exe" --studio +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukestudio11.0.bat b/pype/settings/defaults/system_settings/launchers/windows/nukestudio11.0.bat new file mode 100644 index 0000000000..b66797727e --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukestudio11.0.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeStudio11.0v4" +set __exe__="C:\Program Files\Nuke11.0v4\Nuke11.0.exe" -studio +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukestudio11.2.bat b/pype/settings/defaults/system_settings/launchers/windows/nukestudio11.2.bat new file mode 100644 index 0000000000..a653d816b4 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukestudio11.2.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeStudio11.2v3" +set __exe__="C:\Program Files\Nuke11.2v3\Nuke11.2.exe" -studio +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukestudio11.3.bat b/pype/settings/defaults/system_settings/launchers/windows/nukestudio11.3.bat new file mode 100644 index 0000000000..62c8718873 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukestudio11.3.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeStudio11.3v1" +set __exe__="C:\Program Files\Nuke11.3v1\Nuke11.3.exe" --studio +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukestudio12.0.bat b/pype/settings/defaults/system_settings/launchers/windows/nukestudio12.0.bat new file mode 100644 index 0000000000..488232bcbf --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukestudio12.0.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeStudio12.0v1" +set __exe__="C:\Program Files\Nuke12.0v1\Nuke12.0.exe" --studio +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukex10.0.bat b/pype/settings/defaults/system_settings/launchers/windows/nukex10.0.bat new file mode 100644 index 0000000000..1759706a7b --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukex10.0.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeX10.0v4" +set __exe__="C:\Program Files\Nuke10.0v4\Nuke10.0.exe" -nukex +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukex11.0.bat b/pype/settings/defaults/system_settings/launchers/windows/nukex11.0.bat new file mode 100644 index 0000000000..b554a7b6fa --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukex11.0.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeX11.0v4" +set __exe__="C:\Program Files\Nuke11.0v4\Nuke11.0.exe" --nukex +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukex11.2.bat b/pype/settings/defaults/system_settings/launchers/windows/nukex11.2.bat new file mode 100644 index 0000000000..a4cb5dec5c --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukex11.2.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeX11.2v3" +set __exe__="C:\Program Files\Nuke11.2v3\Nuke11.2.exe" --nukex +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukex11.3.bat b/pype/settings/defaults/system_settings/launchers/windows/nukex11.3.bat new file mode 100644 index 0000000000..490b55cf4c --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukex11.3.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeX11.3v1" +set __exe__="C:\Program Files\Nuke11.3v1\Nuke11.3.exe" --nukex +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/nukex12.0.bat b/pype/settings/defaults/system_settings/launchers/windows/nukex12.0.bat new file mode 100644 index 0000000000..26adf0d3f1 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/nukex12.0.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="NukeX12.0v1" +set __exe__="C:\Program Files\Nuke12.0v1\Nuke12.0.exe" --nukex +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/photoshop_2020.bat b/pype/settings/defaults/system_settings/launchers/windows/photoshop_2020.bat new file mode 100644 index 0000000000..6b90922ef6 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/photoshop_2020.bat @@ -0,0 +1,15 @@ +@echo off + +set __app__="Photoshop 2020" +set __exe__="C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% cmd.exe /k "%PYPE_PYTHON_EXE% -c ^"import avalon.photoshop;avalon.photoshop.launch("%__exe__%")^"" + +goto :eof + +pause + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/premiere_pro_2019.bat b/pype/settings/defaults/system_settings/launchers/windows/premiere_pro_2019.bat new file mode 100644 index 0000000000..4886737d2f --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/premiere_pro_2019.bat @@ -0,0 +1,14 @@ +@echo off + +set __app__="Adobe Premiere Pro" +set __exe__="C:\Program Files\Adobe\Adobe Premiere Pro CC 2019\Adobe Premiere Pro.exe" +if not exist %__exe__% goto :missing_app + +python -u %PREMIERA_PATH%\init.py +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/premiere_pro_2020.bat b/pype/settings/defaults/system_settings/launchers/windows/premiere_pro_2020.bat new file mode 100644 index 0000000000..14662d3be3 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/premiere_pro_2020.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Adobe Premiere Pro" +set __exe__="C:\Program Files\Adobe\Adobe Premiere Pro 2020\Adobe Premiere Pro.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/python3.bat b/pype/settings/defaults/system_settings/launchers/windows/python3.bat new file mode 100644 index 0000000000..c7c116fe72 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/python3.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Python36" +set __exe__="C:\Python36\python.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/resolve_16.bat b/pype/settings/defaults/system_settings/launchers/windows/resolve_16.bat new file mode 100644 index 0000000000..1a5d964e6b --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/resolve_16.bat @@ -0,0 +1,17 @@ +@echo off + +set __app__="Resolve" +set __appy__="Resolve Python Console" +set __exe__="C:/Program Files/Blackmagic Design/DaVinci Resolve/Resolve.exe" +set __py__="%PYTHON36_RESOLVE%/python.exe" + +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %* +IF "%RESOLVE_DEV%"=="True" (start %__appy__% %__py__% -i %PRE_PYTHON_SCRIPT%) + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/shell.bat b/pype/settings/defaults/system_settings/launchers/windows/shell.bat new file mode 100644 index 0000000000..eb0895364f --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/shell.bat @@ -0,0 +1,2 @@ +@echo off +start cmd diff --git a/pype/settings/defaults/system_settings/launchers/windows/storyboardpro_7.bat b/pype/settings/defaults/system_settings/launchers/windows/storyboardpro_7.bat new file mode 100644 index 0000000000..122edac572 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/storyboardpro_7.bat @@ -0,0 +1,13 @@ +@echo off + +set __app__="Storyboard Pro 7" +set __exe__="C:/Program Files (x86)/Toon Boom Animation/Toon Boom Storyboard Pro 7/win64/bin/StoryboardPro.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% cmd.exe /k "python -c ^"import avalon.storyboardpro;avalon.storyboardpro.launch("%__exe__%")^"" + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/launchers/windows/unreal.bat b/pype/settings/defaults/system_settings/launchers/windows/unreal.bat new file mode 100644 index 0000000000..7771aaa5a5 --- /dev/null +++ b/pype/settings/defaults/system_settings/launchers/windows/unreal.bat @@ -0,0 +1,11 @@ +set __app__="Unreal Editor" +set __exe__="%AVALON_CURRENT_UNREAL_ENGINE%\Engine\Binaries\Win64\UE4Editor.exe" +if not exist %__exe__% goto :missing_app + +start %__app__% %__exe__% %PYPE_UNREAL_PROJECT_FILE% %* + +goto :eof + +:missing_app + echo ERROR: %__app__% not found in %__exe__% + exit /B 1 diff --git a/pype/settings/defaults/system_settings/muster/templates_mapping.json b/pype/settings/defaults/system_settings/muster/templates_mapping.json new file mode 100644 index 0000000000..0c09113515 --- /dev/null +++ b/pype/settings/defaults/system_settings/muster/templates_mapping.json @@ -0,0 +1,19 @@ +{ + "3delight": 41, + "arnold": 46, + "arnold_sf": 57, + "gelato": 30, + "harware": 3, + "krakatoa": 51, + "file_layers": 7, + "mentalray": 2, + "mentalray_sf": 6, + "redshift": 55, + "renderman": 29, + "software": 1, + "software_sf": 5, + "turtle": 10, + "vector": 4, + "vray": 37, + "ffmpeg": 48 +} \ No newline at end of file diff --git a/pype/settings/defaults/system_settings/standalone_publish/families.json b/pype/settings/defaults/system_settings/standalone_publish/families.json new file mode 100644 index 0000000000..d05941cc26 --- /dev/null +++ b/pype/settings/defaults/system_settings/standalone_publish/families.json @@ -0,0 +1,90 @@ +{ + "create_look": { + "name": "look", + "label": "Look", + "family": "look", + "icon": "paint-brush", + "defaults": ["Main"], + "help": "Shader connections defining shape look" + }, + "create_model": { + "name": "model", + "label": "Model", + "family": "model", + "icon": "cube", + "defaults": ["Main", "Proxy", "Sculpt"], + "help": "Polygonal static geometry" + }, + "create_workfile": { + "name": "workfile", + "label": "Workfile", + "family": "workfile", + "icon": "cube", + "defaults": ["Main"], + "help": "Working scene backup" + }, + "create_camera": { + "name": "camera", + "label": "Camera", + "family": "camera", + "icon": "video-camera", + "defaults": ["Main"], + "help": "Single baked camera" + }, + "create_pointcache": { + "name": "pointcache", + "label": "Pointcache", + "family": "pointcache", + "icon": "gears", + "defaults": ["Main"], + "help": "Alembic pointcache for animated data" + }, + "create_rig": { + "name": "rig", + "label": "Rig", + "family": "rig", + "icon": "wheelchair", + "defaults": ["Main"], + "help": "Artist-friendly rig with controls" + }, + "create_layout": { + "name": "layout", + "label": "Layout", + "family": "layout", + "icon": "cubes", + "defaults": ["Main"], + "help": "Simple scene for animators with camera" + }, + "create_plate": { + "name": "plate", + "label": "Plate", + "family": "plate", + "icon": "camera", + "defaults": ["Main", "BG", "Reference"], + "help": "Plates for compositors" + }, + "create_matchmove": { + "name": "matchmove", + "label": "Matchmove script", + "family": "matchmove", + "icon": "empire", + "defaults": ["Camera", "Object", "Mocap"], + "help": "Script exported from matchmoving application" + }, + "create_images": { + "name": "image", + "label": "Image file", + "family": "image", + "icon": "image", + "defaults": ["ConceptArt", "Reference", "Texture", "MattePaint"], + "help": "Holder for all kinds of image data" + }, + "create_editorial": { + "name": "editorial", + "label": "Editorial", + "family": "editorial", + "icon": "image", + "defaults": ["Main"], + "help": "Editorial files to generate shots." + } +} diff --git a/pype/settings/lib.py b/pype/settings/lib.py new file mode 100644 index 0000000000..388557ca9b --- /dev/null +++ b/pype/settings/lib.py @@ -0,0 +1,258 @@ +import os +import json +import logging +import copy + +log = logging.getLogger(__name__) + +# Metadata keys for work with studio and project overrides +OVERRIDEN_KEY = "__overriden_keys__" +# NOTE key popping not implemented yet +POP_KEY = "__pop_key__" + +# Folder where studio overrides are stored +STUDIO_OVERRIDES_PATH = os.environ["PYPE_PROJECT_CONFIGS"] + +# File where studio's system overrides are stored +SYSTEM_SETTINGS_KEY = "system_settings" +SYSTEM_SETTINGS_PATH = os.path.join( + STUDIO_OVERRIDES_PATH, SYSTEM_SETTINGS_KEY + ".json" +) + +# File where studio's default project overrides are stored +PROJECT_SETTINGS_KEY = "project_settings" +PROJECT_SETTINGS_FILENAME = PROJECT_SETTINGS_KEY + ".json" +PROJECT_SETTINGS_PATH = os.path.join( + STUDIO_OVERRIDES_PATH, PROJECT_SETTINGS_FILENAME +) + +PROJECT_ANATOMY_KEY = "project_anatomy" +PROJECT_ANATOMY_FILENAME = PROJECT_ANATOMY_KEY + ".json" +PROJECT_ANATOMY_PATH = os.path.join( + STUDIO_OVERRIDES_PATH, PROJECT_ANATOMY_FILENAME +) + +# Path to default settings +DEFAULTS_DIR = os.path.join(os.path.dirname(__file__), "defaults") + +# Variable where cache of default settings are stored +_DEFAULT_SETTINGS = None + + +def reset_default_settings(): + global _DEFAULT_SETTINGS + _DEFAULT_SETTINGS = None + + +def default_settings(): + global _DEFAULT_SETTINGS + if _DEFAULT_SETTINGS is None: + _DEFAULT_SETTINGS = load_jsons_from_dir(DEFAULTS_DIR) + return _DEFAULT_SETTINGS + + +def load_json(fpath): + # Load json data + with open(fpath, "r") as opened_file: + lines = opened_file.read().splitlines() + + # prepare json string + standard_json = "" + for line in lines: + # Remove all whitespace on both sides + line = line.strip() + + # Skip blank lines + if len(line) == 0: + continue + + standard_json += line + + # Check if has extra commas + extra_comma = False + if ",]" in standard_json or ",}" in standard_json: + extra_comma = True + standard_json = standard_json.replace(",]", "]") + standard_json = standard_json.replace(",}", "}") + + if extra_comma: + log.error("Extra comma in json file: \"{}\"".format(fpath)) + + # return empty dict if file is empty + if standard_json == "": + return {} + + # Try to parse string + try: + return json.loads(standard_json) + + except json.decoder.JSONDecodeError: + # Return empty dict if it is first time that decode error happened + return {} + + # Repreduce the exact same exception but traceback contains better + # information about position of error in the loaded json + try: + with open(fpath, "r") as opened_file: + json.load(opened_file) + + except json.decoder.JSONDecodeError: + log.warning( + "File has invalid json format \"{}\"".format(fpath), + exc_info=True + ) + + return {} + + +def subkey_merge(_dict, value, keys): + key = keys.pop(0) + if not keys: + _dict[key] = value + return _dict + + if key not in _dict: + _dict[key] = {} + _dict[key] = subkey_merge(_dict[key], value, keys) + + return _dict + + +def load_jsons_from_dir(path, *args, **kwargs): + output = {} + + path = os.path.normpath(path) + if not os.path.exists(path): + # TODO warning + return output + + sub_keys = list(kwargs.pop("subkeys", args)) + for sub_key in tuple(sub_keys): + _path = os.path.join(path, sub_key) + if not os.path.exists(_path): + break + + path = _path + sub_keys.pop(0) + + base_len = len(path) + 1 + for base, _directories, filenames in os.walk(path): + base_items_str = base[base_len:] + if not base_items_str: + base_items = [] + else: + base_items = base_items_str.split(os.path.sep) + + for filename in filenames: + basename, ext = os.path.splitext(filename) + if ext == ".json": + full_path = os.path.join(base, filename) + value = load_json(full_path) + dict_keys = base_items + [basename] + output = subkey_merge(output, value, dict_keys) + + for sub_key in sub_keys: + output = output[sub_key] + return output + + +def studio_system_settings(): + if os.path.exists(SYSTEM_SETTINGS_PATH): + return load_json(SYSTEM_SETTINGS_PATH) + return {} + + +def studio_project_settings(): + if os.path.exists(PROJECT_SETTINGS_PATH): + return load_json(PROJECT_SETTINGS_PATH) + return {} + + +def studio_project_anatomy(): + if os.path.exists(PROJECT_ANATOMY_PATH): + return load_json(PROJECT_ANATOMY_PATH) + return {} + + +def path_to_project_overrides(project_name): + return os.path.join( + STUDIO_OVERRIDES_PATH, + project_name, + PROJECT_SETTINGS_FILENAME + ) + + +def path_to_project_anatomy(project_name): + return os.path.join( + STUDIO_OVERRIDES_PATH, + project_name, + PROJECT_ANATOMY_FILENAME + ) + + +def project_settings_overrides(project_name): + if not project_name: + return {} + + path_to_json = path_to_project_overrides(project_name) + if not os.path.exists(path_to_json): + return {} + return load_json(path_to_json) + + +def project_anatomy_overrides(project_name): + if not project_name: + return {} + + path_to_json = path_to_project_anatomy(project_name) + if not os.path.exists(path_to_json): + return {} + return load_json(path_to_json) + + +def merge_overrides(global_dict, override_dict): + if OVERRIDEN_KEY in override_dict: + overriden_keys = set(override_dict.pop(OVERRIDEN_KEY)) + else: + overriden_keys = set() + + for key, value in override_dict.items(): + if value == POP_KEY: + global_dict.pop(key) + + elif ( + key in overriden_keys + or key not in global_dict + ): + global_dict[key] = value + + elif isinstance(value, dict) and isinstance(global_dict[key], dict): + global_dict[key] = merge_overrides(global_dict[key], value) + + else: + global_dict[key] = value + return global_dict + + +def apply_overrides(source_data, override_data): + if not override_data: + return source_data + _source_data = copy.deepcopy(source_data) + return merge_overrides(_source_data, override_data) + + +def system_settings(): + default_values = default_settings()[SYSTEM_SETTINGS_KEY] + studio_values = studio_system_settings() + return apply_overrides(default_values, studio_values) + + +def project_settings(project_name): + default_values = default_settings()[PROJECT_SETTINGS_KEY] + studio_values = studio_project_settings() + + studio_overrides = apply_overrides(default_values, studio_values) + + project_overrides = project_settings_overrides(project_name) + + return apply_overrides(studio_overrides, project_overrides) diff --git a/pype/tests/test_mongo_performance.py b/pype/tests/test_mongo_performance.py new file mode 100644 index 0000000000..6b62f0fd1c --- /dev/null +++ b/pype/tests/test_mongo_performance.py @@ -0,0 +1,236 @@ +import pymongo +import bson +import random + + +class TestPerformance(): + ''' + Class for testing performance of representation and their 'files' parts. + Discussion is if embedded array: + 'files' : [ {'_id': '1111', 'path':'....}, + {'_id'...}] + OR documents: + 'files' : { + '1111': {'path':'....'}, + '2222': {'path':'...'} + } + is faster. + + Current results: without additional partial index documents is 3x faster + With index is array 50x faster then document + + Partial index something like: + db.getCollection('performance_test').createIndex + ({'files._id': 1}, + {partialFilterExpresion: {'files': {'$exists': true}}) + !DIDNT work for me, had to create manually in Compass + + ''' + + MONGO_URL = 'mongodb://localhost:27017' + MONGO_DB = 'performance_test' + MONGO_COLLECTION = 'performance_test' + + inserted_ids = [] + + def __init__(self, version='array'): + ''' + It creates and fills collection, based on value of 'version'. + + :param version: 'array' - files as embedded array, + 'doc' - as document + ''' + self.client = pymongo.MongoClient(self.MONGO_URL) + self.db = self.client[self.MONGO_DB] + self.collection_name = self.MONGO_COLLECTION + + self.version = version + + if self.version != 'array': + self.collection_name = self.MONGO_COLLECTION + '_doc' + + self.collection = self.db[self.collection_name] + + self.ids = [] # for testing + self.inserted_ids = [] + + def prepare(self, no_of_records=100000): + ''' + Produce 'no_of_records' of representations with 'files' segment. + It depends on 'version' value in constructor, 'arrray' or 'doc' + :return: + ''' + print('Purging {} collection'.format(self.collection_name)) + self.collection.delete_many({}) + + id = bson.objectid.ObjectId() + + insert_recs = [] + for i in range(no_of_records): + file_id = bson.objectid.ObjectId() + file_id2 = bson.objectid.ObjectId() + file_id3 = bson.objectid.ObjectId() + + self.inserted_ids.extend([file_id, file_id2, file_id3]) + + document = {"files": self.get_files(self.version, i, + file_id, file_id2, file_id3) + , + "context": { + "subset": "workfileLookdev", + "username": "petrk", + "task": "lookdev", + "family": "workfile", + "hierarchy": "Assets", + "project": {"code": "test", "name": "Test"}, + "version": 1, + "asset": "Cylinder", + "representation": "mb", + "root": "C:/projects" + }, + "dependencies": [], + "name": "mb", + "parent": {"oid": '{}'.format(id)}, + "data": { + "path": "C:\\projects\\Test\\Assets\\Cylinder\\publish\\workfile\\workfileLookdev\\v001\\test_Cylinder_workfileLookdev_v001.mb", + "template": "{root}\\{project[name]}\\{hierarchy}\\{asset}\\publish\\{family}\\{subset}\\v{version:0>3}\\{project[code]}_{asset}_{subset}_v{version:0>3}<_{output}><.{frame:0>4}>.{representation}" + }, + "type": "representation", + "schema": "pype:representation-2.0" + } + + insert_recs.append(document) + + print('Prepared {} records in {} collection'. + format(no_of_records, self.collection_name)) + + self.collection.insert_many(insert_recs) + # TODO refactore to produce real array and not needeing ugly regex + self.collection.insert_one({"inserted_id": self.inserted_ids}) + print('-' * 50) + + def run(self, queries=1000, loops=3): + ''' + Run X'queries' that are searching collection Y'loops' times + :param queries: how many times do ..find(...) + :param loops: loop of testing X queries + :return: None + ''' + print('Testing version {} on {}'.format(self.version, + self.collection_name)) + + inserted_ids = list(self.collection. + find({"inserted_id": {"$exists": True}})) + import re + self.ids = re.findall("'[0-9a-z]*'", str(inserted_ids)) + + import time + + found_cnt = 0 + for _ in range(loops): + start = time.time() + for _ in range(queries): + val = random.choice(self.ids) + val = val.replace("'", '') + + if (self.version == 'array'): + # prepared for partial index, without 'files': exists + # wont engage + found = self.collection.\ + find_one({'files': {"$exists": True}, + 'files._id': "{}".format(val)}) + else: + key = "files.{}".format(val) + found = self.collection.find_one({key: {"$exists": True}}) + if found: + found_cnt += 1 + + end = time.time() + print('duration per loop {}'.format(end - start)) + print("found_cnt {}".format(found_cnt)) + + def get_files(self, mode, i, file_id, file_id2, file_id3): + ''' + Wrapper to decide if 'array' or document version should be used + :param mode: 'array'|'doc' + :param i: step number + :param file_id: ObjectId of first dummy file + :param file_id2: .. + :param file_id3: .. + :return: + ''' + if mode == 'array': + return self.get_files_array(i, file_id, file_id2, file_id3) + else: + return self.get_files_doc(i, file_id, file_id2, file_id3) + + def get_files_array(self, i, file_id, file_id2, file_id3): + return [ + { + "path": "c:/Test/Assets/Cylinder/publish/workfile/" + "workfileLookdev/v001/" + "test_CylinderA_workfileLookdev_v{0:03}.mb".format(i), + "_id": '{}'.format(file_id), + "hash": "temphash", + "sites": ["studio"], + "size":87236 + }, + { + "path": "c:/Test/Assets/Cylinder/publish/workfile/" + "workfileLookdev/v001/" + "test_CylinderB_workfileLookdev_v{0:03}.mb".format(i), + "_id": '{}'.format(file_id2), + "hash": "temphash", + "sites": ["studio"], + "size": 87236 + }, + { + "path": "c:/Test/Assets/Cylinder/publish/workfile/" + "workfileLookdev/v001/" + "test_CylinderC_workfileLookdev_v{0:03}.mb".format(i), + "_id": '{}'.format(file_id3), + "hash": "temphash", + "sites": ["studio"], + "size": 87236 + } + + ] + + def get_files_doc(self, i, file_id, file_id2, file_id3): + ret = {} + ret['{}'.format(file_id)] = { + "path": "c:/Test/Assets/Cylinder/publish/workfile/workfileLookdev/" + "v001/test_CylinderA_workfileLookdev_v{0:03}.mb".format(i), + "hash": "temphash", + "sites": ["studio"], + "size": 87236 + } + + ret['{}'.format(file_id2)] = { + "path": "c:/Test/Assets/Cylinder/publish/workfile/workfileLookdev/" + "v001/test_CylinderB_workfileLookdev_v{0:03}.mb".format(i), + "hash": "temphash", + "sites": ["studio"], + "size": 87236 + } + ret['{}'.format(file_id3)] = { + "path": "c:/Test/Assets/Cylinder/publish/workfile/workfileLookdev/" + "v001/test_CylinderC_workfileLookdev_v{0:03}.mb".format(i), + "hash": "temphash", + "sites": ["studio"], + "size": 87236 + } + + return ret + + +if __name__ == '__main__': + tp = TestPerformance('array') + tp.prepare() # enable to prepare data + tp.run(1000, 3) + + print('-'*50) + + tp = TestPerformance('doc') + tp.prepare() # enable to prepare data + tp.run(1000, 3) diff --git a/pype/tools/launcher/__init__.py b/pype/tools/launcher/__init__.py new file mode 100644 index 0000000000..109d642e86 --- /dev/null +++ b/pype/tools/launcher/__init__.py @@ -0,0 +1,7 @@ +from .window import LauncherWindow +from . import actions + +__all__ = [ + "LauncherWindow", + "actions" +] diff --git a/pype/tools/launcher/actions.py b/pype/tools/launcher/actions.py new file mode 100644 index 0000000000..80e6f71ae7 --- /dev/null +++ b/pype/tools/launcher/actions.py @@ -0,0 +1,104 @@ +import os +import importlib + +from avalon import api, lib + + +class ProjectManagerAction(api.Action): + name = "projectmanager" + label = "Project Manager" + icon = "gear" + order = 999 # at the end + + def is_compatible(self, session): + return "AVALON_PROJECT" in session + + def process(self, session, **kwargs): + return lib.launch( + executable="python", + args=[ + "-u", "-m", "avalon.tools.projectmanager", + session['AVALON_PROJECT'] + ] + ) + + +class LoaderAction(api.Action): + name = "loader" + label = "Loader" + icon = "cloud-download" + order = 998 + + def is_compatible(self, session): + return "AVALON_PROJECT" in session + + def process(self, session, **kwargs): + return lib.launch( + executable="python", + args=[ + "-u", "-m", "avalon.tools.loader", session['AVALON_PROJECT'] + ] + ) + + +class LoaderLibrary(api.Action): + name = "loader_os" + label = "Library Loader" + icon = "book" + order = 997 # at the end + + def is_compatible(self, session): + return True + + def process(self, session, **kwargs): + return lib.launch( + executable="python", + args=["-u", "-m", "avalon.tools.libraryloader"] + ) + + +def register_default_actions(): + """Register default actions for Launcher""" + api.register_plugin(api.Action, ProjectManagerAction) + api.register_plugin(api.Action, LoaderAction) + api.register_plugin(api.Action, LoaderLibrary) + + +def register_config_actions(): + """Register actions from the configuration for Launcher""" + + module_name = os.environ["AVALON_CONFIG"] + config = importlib.import_module(module_name) + if not hasattr(config, "register_launcher_actions"): + print( + "Current configuration `%s` has no 'register_launcher_actions'" + % config.__name__ + ) + return + + config.register_launcher_actions() + + +def register_environment_actions(): + """Register actions from AVALON_ACTIONS for Launcher.""" + + paths = os.environ.get("AVALON_ACTIONS") + if not paths: + return + + for path in paths.split(os.pathsep): + api.register_plugin_path(api.Action, path) + + # Run "register" if found. + for module in lib.modules_from_path(path): + if "register" not in dir(module): + continue + + try: + module.register() + except Exception as e: + print( + "Register method in {0} failed: {1}".format( + module, str(e) + ) + ) diff --git a/pype/tools/launcher/delegates.py b/pype/tools/launcher/delegates.py new file mode 100644 index 0000000000..95ccde6445 --- /dev/null +++ b/pype/tools/launcher/delegates.py @@ -0,0 +1,50 @@ +from Qt import QtCore, QtWidgets, QtGui + + +class ActionDelegate(QtWidgets.QStyledItemDelegate): + extender_lines = 2 + extender_bg_brush = QtGui.QBrush(QtGui.QColor(100, 100, 100, 160)) + extender_fg = QtGui.QColor(255, 255, 255, 160) + + def __init__(self, group_roles, *args, **kwargs): + super(ActionDelegate, self).__init__(*args, **kwargs) + self.group_roles = group_roles + + def paint(self, painter, option, index): + super(ActionDelegate, self).paint(painter, option, index) + is_group = False + for group_role in self.group_roles: + is_group = index.data(group_role) + if is_group: + break + if not is_group: + return + + extender_width = int(option.decorationSize.width() / 2) + extender_height = int(option.decorationSize.height() / 2) + + exteder_rect = QtCore.QRectF( + option.rect.x() + (option.rect.width() / 10), + option.rect.y() + (option.rect.height() / 10), + extender_width, + extender_height + ) + path = QtGui.QPainterPath() + path.addRoundedRect(exteder_rect, 2, 2) + + painter.fillPath(path, self.extender_bg_brush) + + painter.setPen(self.extender_fg) + painter.drawPath(path) + + divider = (2 * self.extender_lines) + 1 + line_height = extender_height / divider + line_width = extender_width - (extender_width / 5) + pos_x = exteder_rect.x() + extender_width / 10 + pos_y = exteder_rect.y() + line_height + for _ in range(self.extender_lines): + line_rect = QtCore.QRectF( + pos_x, pos_y, line_width, round(line_height) + ) + painter.fillRect(line_rect, self.extender_fg) + pos_y += 2 * line_height diff --git a/pype/tools/launcher/flickcharm.py b/pype/tools/launcher/flickcharm.py new file mode 100644 index 0000000000..a5ea5a79d8 --- /dev/null +++ b/pype/tools/launcher/flickcharm.py @@ -0,0 +1,304 @@ +""" +This based on the flickcharm-python code from: + https://code.google.com/archive/p/flickcharm-python/ + +Which states: + This is a Python (PyQt) port of Ariya Hidayat's elegant FlickCharm + hack which adds kinetic scrolling to any scrollable Qt widget. + + Licensed under GNU GPL version 2 or later. + +It has been altered to fix edge cases where clicks and drags would not +propagate correctly under some conditions. It also allows a small "dead zone" +threshold in which it will still propagate the user pressed click if he or she +travelled only very slightly with the cursor. + +""" + +import copy +from Qt import QtWidgets, QtCore, QtGui + + +class FlickData(object): + Steady = 0 + Pressed = 1 + ManualScroll = 2 + AutoScroll = 3 + Stop = 4 + + def __init__(self): + self.state = FlickData.Steady + self.widget = None + self.pressPos = QtCore.QPoint(0, 0) + self.offset = QtCore.QPoint(0, 0) + self.dragPos = QtCore.QPoint(0, 0) + self.speed = QtCore.QPoint(0, 0) + self.travelled = 0 + self.ignored = [] + + +class FlickCharm(QtCore.QObject): + """Make scrollable widgets flickable. + + For example: + charm = FlickCharm() + charm.activateOn(widget) + + It can `activateOn` multiple widgets with a single FlickCharm instance. + Be aware that the FlickCharm object must be kept around for it not + to get garbage collected and losing the flickable behavior. + + Flick away! + + """ + + def __init__(self, parent=None): + super(FlickCharm, self).__init__(parent=parent) + + self.flickData = {} + self.ticker = QtCore.QBasicTimer() + + # The flick button to use + self.button = QtCore.Qt.LeftButton + + # The time taken per update tick of flicking behavior + self.tick_time = 20 + + # Allow a item click/press directly when AutoScroll is slower than + # this threshold velocity + self.click_in_autoscroll_threshold = 10 + + # Allow an item click/press to propagate as opposed to scrolling + # when the cursor travelled less than this amount of pixels + # Note: back & forth motion increases the value too + self.travel_threshold = 20 + + self.max_speed = 64 # max scroll speed + self.drag = 1 # higher drag will stop autoscroll faster + + def activateOn(self, widget): + viewport = widget.viewport() + viewport.installEventFilter(self) + widget.installEventFilter(self) + self.flickData[viewport] = FlickData() + self.flickData[viewport].widget = widget + self.flickData[viewport].state = FlickData.Steady + + def deactivateFrom(self, widget): + + viewport = widget.viewport() + viewport.removeEventFilter(self) + widget.removeEventFilter(self) + self.flickData.pop(viewport) + + def eventFilter(self, obj, event): + + if not obj.isWidgetType(): + return False + + eventType = event.type() + if eventType != QtCore.QEvent.MouseButtonPress and \ + eventType != QtCore.QEvent.MouseButtonRelease and \ + eventType != QtCore.QEvent.MouseMove: + return False + + if event.modifiers() != QtCore.Qt.NoModifier: + return False + + if obj not in self.flickData: + return False + + data = self.flickData[obj] + found, newIgnored = removeAll(data.ignored, event) + if found: + data.ignored = newIgnored + return False + + if data.state == FlickData.Steady: + if eventType == QtCore.QEvent.MouseButtonPress: + if event.buttons() == self.button: + self._set_press_pos_and_offset(event, data) + data.state = FlickData.Pressed + return True + + elif data.state == FlickData.Pressed: + if eventType == QtCore.QEvent.MouseButtonRelease: + # User didn't actually scroll but clicked in + # the widget. Let the original press and release + # event be evaluated on the Widget + data.state = FlickData.Steady + event1 = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonPress, + data.pressPos, + QtCore.Qt.LeftButton, + QtCore.Qt.LeftButton, + QtCore.Qt.NoModifier) + # Copy the current event + event2 = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonRelease, + event.pos(), + event.button(), + event.buttons(), + event.modifiers()) + data.ignored.append(event1) + data.ignored.append(event2) + QtWidgets.QApplication.postEvent(obj, event1) + QtWidgets.QApplication.postEvent(obj, event2) + return True + elif eventType == QtCore.QEvent.MouseMove: + data.state = FlickData.ManualScroll + data.dragPos = QtGui.QCursor.pos() + if not self.ticker.isActive(): + self.ticker.start(self.tick_time, self) + return True + + elif data.state == FlickData.ManualScroll: + if eventType == QtCore.QEvent.MouseMove: + pos = event.pos() + delta = pos - data.pressPos + data.travelled += delta.manhattanLength() + setScrollOffset(data.widget, data.offset - delta) + return True + elif eventType == QtCore.QEvent.MouseButtonRelease: + + if data.travelled <= self.travel_threshold: + # If the user travelled less than the threshold + # don't go into autoscroll mode but assume the user + # intended to click instead + return self._propagate_click(obj, event, data) + + data.state = FlickData.AutoScroll + return True + + elif data.state == FlickData.AutoScroll: + if eventType == QtCore.QEvent.MouseButtonPress: + + # Allow pressing when auto scroll is already slower than + # the click in autoscroll threshold + velocity = data.speed.manhattanLength() + if velocity <= self.click_in_autoscroll_threshold: + self._set_press_pos_and_offset(event, data) + data.state = FlickData.Pressed + else: + data.state = FlickData.Stop + + data.speed = QtCore.QPoint(0, 0) + return True + elif eventType == QtCore.QEvent.MouseButtonRelease: + data.state = FlickData.Steady + data.speed = QtCore.QPoint(0, 0) + return True + + elif data.state == FlickData.Stop: + if eventType == QtCore.QEvent.MouseButtonRelease: + data.state = FlickData.Steady + + # If the user had a very limited scroll smaller than the + # threshold consider it a regular press and release. + if data.travelled < self.travel_threshold: + return self._propagate_click(obj, event, data) + + return True + elif eventType == QtCore.QEvent.MouseMove: + # Reset the press position and offset to allow us to "continue" + # the scroll from the new point the user clicked and then held + # down to continue scrolling after AutoScroll. + self._set_press_pos_and_offset(event, data) + data.state = FlickData.ManualScroll + + data.dragPos = QtGui.QCursor.pos() + if not self.ticker.isActive(): + self.ticker.start(self.tick_time, self) + return True + + return False + + def _set_press_pos_and_offset(self, event, data): + """Store current event position on Press""" + data.state = FlickData.Pressed + data.pressPos = copy.copy(event.pos()) + data.offset = scrollOffset(data.widget) + data.travelled = 0 + + def _propagate_click(self, obj, event, data): + """Propagate from Pressed state with MouseButtonRelease event. + + Use only on button release in certain states to propagate a click, + for example when the user dragged only a slight distance under the + travel threshold. + + """ + + data.state = FlickData.Pressed + data.pressPos = copy.copy(event.pos()) + data.offset = scrollOffset(data.widget) + data.travelled = 0 + self.eventFilter(obj, event) + return True + + def timerEvent(self, event): + + count = 0 + for data in self.flickData.values(): + if data.state == FlickData.ManualScroll: + count += 1 + cursorPos = QtGui.QCursor.pos() + data.speed = cursorPos - data.dragPos + data.dragPos = cursorPos + elif data.state == FlickData.AutoScroll: + count += 1 + data.speed = deaccelerate(data.speed, + a=self.drag, + maxVal=self.max_speed) + p = scrollOffset(data.widget) + new_p = p - data.speed + setScrollOffset(data.widget, new_p) + + if scrollOffset(data.widget) == p: + # If this scroll resulted in no change on the widget + # we reached the end of the list and set the speed to + # zero. + data.speed = QtCore.QPoint(0, 0) + + if data.speed == QtCore.QPoint(0, 0): + data.state = FlickData.Steady + + if count == 0: + self.ticker.stop() + + super(FlickCharm, self).timerEvent(event) + + +def scrollOffset(widget): + x = widget.horizontalScrollBar().value() + y = widget.verticalScrollBar().value() + return QtCore.QPoint(x, y) + + +def setScrollOffset(widget, p): + widget.horizontalScrollBar().setValue(p.x()) + widget.verticalScrollBar().setValue(p.y()) + + +def deaccelerate(speed, a=1, maxVal=64): + + x = max(min(speed.x(), maxVal), -maxVal) + y = max(min(speed.y(), maxVal), -maxVal) + if x > 0: + x = max(0, x - a) + elif x < 0: + x = min(0, x + a) + if y > 0: + y = max(0, y - a) + elif y < 0: + y = min(0, y + a) + return QtCore.QPoint(x, y) + + +def removeAll(list, val): + found = False + ret = [] + for element in list: + if element == val: + found = True + else: + ret.append(element) + return found, ret diff --git a/pype/tools/launcher/lib.py b/pype/tools/launcher/lib.py new file mode 100644 index 0000000000..a6d6ff6865 --- /dev/null +++ b/pype/tools/launcher/lib.py @@ -0,0 +1,113 @@ +"""Utility script for updating database with configuration files + +Until assets are created entirely in the database, this script +provides a bridge between the file-based project inventory and configuration. + +- Migrating an old project: + $ python -m avalon.inventory --extract --silo-parent=f02_prod + $ python -m avalon.inventory --upload + +- Managing an existing project: + 1. Run `python -m avalon.inventory --load` + 2. Update the .inventory.toml or .config.toml + 3. Run `python -m avalon.inventory --save` + +""" + +import os +from Qt import QtGui +from avalon import lib +from avalon.vendor import qtawesome +from pype.api import resources +from pype.lib import ApplicationAction + +ICON_CACHE = {} +NOT_FOUND = type("NotFound", (object, ), {}) + + +def get_application_actions(project): + """Define dynamic Application classes for project using `.toml` files + + Args: + project (dict): project document from the database + + Returns: + list: list of dictionaries + """ + + apps = [] + for app in project["config"]["apps"]: + try: + app_name = app["name"] + app_definition = lib.get_application(app_name) + except Exception as exc: + print("Unable to load application: %s - %s" % (app['name'], exc)) + continue + + # Get from app definition, if not there from app in project + icon = app_definition.get("icon", app.get("icon", "folder-o")) + color = app_definition.get("color", app.get("color", None)) + order = app_definition.get("order", app.get("order", 0)) + label = app_definition.get("label") or app.get("label") or app_name + label_variant = app_definition.get("label_variant") + group = app_definition.get("group") or app.get("group") + action = type( + "app_{}".format(app_name), + (ApplicationAction,), + { + "name": app_name, + "label": label, + "label_variant": label_variant, + "group": group, + "icon": icon, + "color": color, + "order": order, + "config": app_definition.copy() + } + ) + + apps.append(action) + return apps + + +def get_action_icon(action): + icon_name = action.icon + if not icon_name: + return None + + global ICON_CACHE + + icon = ICON_CACHE.get(icon_name) + if icon is NOT_FOUND: + return None + elif icon: + return icon + + icon_path = resources.get_resource(icon_name) + if os.path.exists(icon_path): + icon = QtGui.QIcon(icon_path) + ICON_CACHE[icon_name] = icon + return icon + + try: + icon_color = getattr(action, "color", None) or "white" + icon = qtawesome.icon( + "fa.{}".format(icon_name), color=icon_color + ) + + except Exception: + ICON_CACHE[icon_name] = NOT_FOUND + print("Can't load icon \"{}\"".format(icon_name)) + + return icon + + +def get_action_label(action): + label = getattr(action, "label", None) + if not label: + return action.name + + label_variant = getattr(action, "label_variant", None) + if not label_variant: + return label + return " ".join([label, label_variant]) diff --git a/pype/tools/launcher/models.py b/pype/tools/launcher/models.py new file mode 100644 index 0000000000..b2743d221c --- /dev/null +++ b/pype/tools/launcher/models.py @@ -0,0 +1,306 @@ +import copy +import logging +import collections + +from . import lib +from Qt import QtCore, QtGui +from avalon.vendor import qtawesome +from avalon import style, api + +log = logging.getLogger(__name__) + + +class TaskModel(QtGui.QStandardItemModel): + """A model listing the tasks combined for a list of assets""" + + def __init__(self, dbcon, parent=None): + super(TaskModel, self).__init__(parent=parent) + self.dbcon = dbcon + + self._num_assets = 0 + + self.default_icon = qtawesome.icon( + "fa.male", color=style.colors.default + ) + self.no_task_icon = qtawesome.icon( + "fa.exclamation-circle", color=style.colors.mid + ) + + self._icons = {} + + self._get_task_icons() + + def _get_task_icons(self): + if not self.dbcon.Session.get("AVALON_PROJECT"): + return + + # Get the project configured icons from database + project = self.dbcon.find_one({"type": "project"}) + for task in project["config"].get("tasks") or []: + icon_name = task.get("icon") + if icon_name: + self._icons[task["name"]] = qtawesome.icon( + "fa.{}".format(icon_name), color=style.colors.default + ) + + def set_assets(self, asset_ids=None, asset_docs=None): + """Set assets to track by their database id + + Arguments: + asset_ids (list): List of asset ids. + asset_docs (list): List of asset entities from MongoDB. + + """ + + if asset_docs is None and asset_ids is not None: + # find assets in db by query + asset_docs = list(self.dbcon.find({ + "type": "asset", + "_id": {"$in": asset_ids} + })) + db_assets_ids = tuple(asset_doc["_id"] for asset_doc in asset_docs) + + # check if all assets were found + not_found = tuple( + str(asset_id) + for asset_id in asset_ids + if asset_id not in db_assets_ids + ) + + assert not not_found, "Assets not found by id: {0}".format( + ", ".join(not_found) + ) + + self.clear() + + if not asset_docs: + return + + task_names = set() + for asset_doc in asset_docs: + asset_tasks = asset_doc.get("data", {}).get("tasks") or set() + task_names.update(asset_tasks) + + self.beginResetModel() + + if not task_names: + item = QtGui.QStandardItem(self.no_task_icon, "No task") + item.setEnabled(False) + self.appendRow(item) + + else: + for task_name in sorted(task_names): + icon = self._icons.get(task_name, self.default_icon) + item = QtGui.QStandardItem(icon, task_name) + self.appendRow(item) + + self.endResetModel() + + def headerData(self, section, orientation, role): + if ( + role == QtCore.Qt.DisplayRole + and orientation == QtCore.Qt.Horizontal + and section == 0 + ): + return "Tasks" + return super(TaskModel, self).headerData(section, orientation, role) + + +class ActionModel(QtGui.QStandardItemModel): + ACTION_ROLE = QtCore.Qt.UserRole + GROUP_ROLE = QtCore.Qt.UserRole + 1 + VARIANT_GROUP_ROLE = QtCore.Qt.UserRole + 2 + + def __init__(self, dbcon, parent=None): + super(ActionModel, self).__init__(parent=parent) + self.dbcon = dbcon + + self._session = {} + self._groups = {} + self.default_icon = qtawesome.icon("fa.cube", color="white") + # Cache of available actions + self._registered_actions = list() + + self.discover() + + def discover(self): + """Set up Actions cache. Run this for each new project.""" + if not self.dbcon.Session.get("AVALON_PROJECT"): + self._registered_actions = list() + return + + # Discover all registered actions + actions = api.discover(api.Action) + + # Get available project actions and the application actions + project_doc = self.dbcon.find_one({"type": "project"}) + app_actions = lib.get_application_actions(project_doc) + actions.extend(app_actions) + + self._registered_actions = actions + + def get_icon(self, action, skip_default=False): + icon = lib.get_action_icon(action) + if not icon and not skip_default: + return self.default_icon + return icon + + def refresh(self): + # Validate actions based on compatibility + self.clear() + + self._groups.clear() + + actions = self.filter_compatible_actions(self._registered_actions) + + self.beginResetModel() + + single_actions = [] + varianted_actions = collections.defaultdict(list) + grouped_actions = collections.defaultdict(list) + for action in actions: + # Groups + group_name = getattr(action, "group", None) + + # Lable variants + label = getattr(action, "label", None) + label_variant = getattr(action, "label_variant", None) + if label_variant and not label: + print(( + "Invalid action \"{}\" has set `label_variant` to \"{}\"" + ", but doesn't have set `label` attribute" + ).format(action.name, label_variant)) + action.label_variant = None + label_variant = None + + if group_name: + grouped_actions[group_name].append(action) + + elif label_variant: + varianted_actions[label].append(action) + else: + single_actions.append(action) + + items_by_order = collections.defaultdict(list) + for label, actions in tuple(varianted_actions.items()): + if len(actions) == 1: + varianted_actions.pop(label) + single_actions.append(actions[0]) + continue + + icon = None + order = None + for action in actions: + if icon is None: + _icon = lib.get_action_icon(action) + if _icon: + icon = _icon + + if order is None or action.order < order: + order = action.order + + if icon is None: + icon = self.default_icon + + item = QtGui.QStandardItem(icon, action.label) + item.setData(actions, self.ACTION_ROLE) + item.setData(True, self.VARIANT_GROUP_ROLE) + items_by_order[order].append(item) + + for action in single_actions: + icon = self.get_icon(action) + item = QtGui.QStandardItem(icon, lib.get_action_label(action)) + item.setData(action, self.ACTION_ROLE) + items_by_order[action.order].append(item) + + for group_name, actions in grouped_actions.items(): + icon = None + order = None + for action in actions: + if order is None or action.order < order: + order = action.order + + if icon is None: + _icon = lib.get_action_icon(action) + if _icon: + icon = _icon + + if icon is None: + icon = self.default_icon + + item = QtGui.QStandardItem(icon, group_name) + item.setData(actions, self.ACTION_ROLE) + item.setData(True, self.GROUP_ROLE) + + items_by_order[order].append(item) + + for order in sorted(items_by_order.keys()): + for item in items_by_order[order]: + self.appendRow(item) + + self.endResetModel() + + def set_session(self, session): + assert isinstance(session, dict) + self._session = copy.deepcopy(session) + self.refresh() + + def filter_compatible_actions(self, actions): + """Collect all actions which are compatible with the environment + + Each compatible action will be translated to a dictionary to ensure + the action can be visualized in the launcher. + + Args: + actions (list): list of classes + + Returns: + list: collection of dictionaries sorted on order int he + """ + + compatible = [] + for action in actions: + if action().is_compatible(self._session): + compatible.append(action) + + # Sort by order and name + return sorted( + compatible, + key=lambda action: (action.order, action.name) + ) + + +class ProjectModel(QtGui.QStandardItemModel): + """List of projects""" + + def __init__(self, dbcon, parent=None): + super(ProjectModel, self).__init__(parent=parent) + + self.dbcon = dbcon + + self.hide_invisible = False + self.project_icon = qtawesome.icon("fa.map", color="white") + + def refresh(self): + self.clear() + self.beginResetModel() + + for project_doc in self.get_projects(): + item = QtGui.QStandardItem(self.project_icon, project_doc["name"]) + self.appendRow(item) + + self.endResetModel() + + def get_projects(self): + project_docs = [] + for project_doc in sorted( + self.dbcon.projects(), key=lambda x: x["name"] + ): + if ( + self.hide_invisible + and not project_doc["data"].get("visible", True) + ): + continue + project_docs.append(project_doc) + + return project_docs diff --git a/pype/tools/launcher/widgets.py b/pype/tools/launcher/widgets.py new file mode 100644 index 0000000000..894dde3926 --- /dev/null +++ b/pype/tools/launcher/widgets.py @@ -0,0 +1,444 @@ +import copy +import collections +from Qt import QtWidgets, QtCore, QtGui +from avalon.vendor import qtawesome + +from .delegates import ActionDelegate +from . import lib +from .models import TaskModel, ActionModel, ProjectModel +from .flickcharm import FlickCharm + + +class ProjectBar(QtWidgets.QWidget): + project_changed = QtCore.Signal(int) + + def __init__(self, dbcon, parent=None): + super(ProjectBar, self).__init__(parent) + + self.dbcon = dbcon + + self.model = ProjectModel(self.dbcon) + self.model.hide_invisible = True + + self.project_combobox = QtWidgets.QComboBox() + self.project_combobox.setModel(self.model) + self.project_combobox.setRootModelIndex(QtCore.QModelIndex()) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(self.project_combobox) + + self.setSizePolicy( + QtWidgets.QSizePolicy.MinimumExpanding, + QtWidgets.QSizePolicy.Maximum + ) + + # Initialize + self.refresh() + + # Signals + self.project_combobox.currentIndexChanged.connect(self.project_changed) + + # Set current project by default if it's set. + project_name = self.dbcon.Session.get("AVALON_PROJECT") + if project_name: + self.set_project(project_name) + + def get_current_project(self): + return self.project_combobox.currentText() + + def set_project(self, project_name): + index = self.project_combobox.findText(project_name) + if index >= 0: + self.project_combobox.setCurrentIndex(index) + + def refresh(self): + prev_project_name = self.get_current_project() + + # Refresh without signals + self.project_combobox.blockSignals(True) + + self.model.refresh() + self.set_project(prev_project_name) + + self.project_combobox.blockSignals(False) + + self.project_changed.emit(self.project_combobox.currentIndex()) + + +class ActionBar(QtWidgets.QWidget): + """Launcher interface""" + + action_clicked = QtCore.Signal(object) + + def __init__(self, dbcon, parent=None): + super(ActionBar, self).__init__(parent) + + self.dbcon = dbcon + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(8, 0, 8, 0) + + view = QtWidgets.QListView(self) + view.setProperty("mode", "icon") + view.setObjectName("IconView") + view.setViewMode(QtWidgets.QListView.IconMode) + view.setResizeMode(QtWidgets.QListView.Adjust) + view.setSelectionMode(QtWidgets.QListView.NoSelection) + view.setEditTriggers(QtWidgets.QListView.NoEditTriggers) + view.setWrapping(True) + view.setGridSize(QtCore.QSize(70, 75)) + view.setIconSize(QtCore.QSize(30, 30)) + view.setSpacing(0) + view.setWordWrap(True) + + model = ActionModel(self.dbcon, self) + view.setModel(model) + + # TODO better group delegate + delegate = ActionDelegate( + [model.GROUP_ROLE, model.VARIANT_GROUP_ROLE], + self + ) + view.setItemDelegate(delegate) + + layout.addWidget(view) + + self.model = model + self.view = view + + # Make view flickable + flick = FlickCharm(parent=view) + flick.activateOn(view) + + self.set_row_height(1) + + view.clicked.connect(self.on_clicked) + + def set_row_height(self, rows): + self.setMinimumHeight(rows * 75) + + def on_clicked(self, index): + if not index.isValid(): + return + + is_group = index.data(self.model.GROUP_ROLE) + is_variant_group = index.data(self.model.VARIANT_GROUP_ROLE) + if not is_group and not is_variant_group: + action = index.data(self.model.ACTION_ROLE) + self.action_clicked.emit(action) + return + + actions = index.data(self.model.ACTION_ROLE) + + menu = QtWidgets.QMenu(self) + actions_mapping = {} + + if is_variant_group: + for action in actions: + menu_action = QtWidgets.QAction( + lib.get_action_label(action) + ) + menu.addAction(menu_action) + actions_mapping[menu_action] = action + else: + by_variant_label = collections.defaultdict(list) + orders = [] + for action in actions: + # Lable variants + label = getattr(action, "label", None) + label_variant = getattr(action, "label_variant", None) + if label_variant and not label: + label_variant = None + + if not label_variant: + orders.append(action) + continue + + if label not in orders: + orders.append(label) + by_variant_label[label].append(action) + + for action_item in orders: + actions = by_variant_label.get(action_item) + if not actions: + action = action_item + elif len(actions) == 1: + action = actions[0] + else: + action = None + + if action: + menu_action = QtWidgets.QAction( + lib.get_action_label(action) + ) + menu.addAction(menu_action) + actions_mapping[menu_action] = action + continue + + sub_menu = QtWidgets.QMenu(label, menu) + for action in actions: + menu_action = QtWidgets.QAction( + lib.get_action_label(action) + ) + sub_menu.addAction(menu_action) + actions_mapping[menu_action] = action + + menu.addMenu(sub_menu) + + result = menu.exec_(QtGui.QCursor.pos()) + if result: + action = actions_mapping[result] + self.action_clicked.emit(action) + + +class TasksWidget(QtWidgets.QWidget): + """Widget showing active Tasks""" + + task_changed = QtCore.Signal() + selection_mode = ( + QtCore.QItemSelectionModel.Select | QtCore.QItemSelectionModel.Rows + ) + + def __init__(self, dbcon, parent=None): + super(TasksWidget, self).__init__(parent) + + self.dbcon = dbcon + + view = QtWidgets.QTreeView(self) + view.setIndentation(0) + view.setEditTriggers(QtWidgets.QTreeView.NoEditTriggers) + model = TaskModel(self.dbcon) + view.setModel(model) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(view) + + view.selectionModel().selectionChanged.connect(self.task_changed) + + self.model = model + self.view = view + + self._last_selected_task = None + + def set_asset(self, asset_id): + if asset_id is None: + # Asset deselected + self.model.set_assets() + return + + # Try and preserve the last selected task and reselect it + # after switching assets. If there's no currently selected + # asset keep whatever the "last selected" was prior to it. + current = self.get_current_task() + if current: + self._last_selected_task = current + + self.model.set_assets([asset_id]) + + if self._last_selected_task: + self.select_task(self._last_selected_task) + + # Force a task changed emit. + self.task_changed.emit() + + def select_task(self, task_name): + """Select a task by name. + + If the task does not exist in the current model then selection is only + cleared. + + Args: + task (str): Name of the task to select. + + """ + + # Clear selection + self.view.selectionModel().clearSelection() + + # Select the task + for row in range(self.model.rowCount()): + index = self.model.index(row, 0) + _task_name = index.data(QtCore.Qt.DisplayRole) + if _task_name == task_name: + self.view.selectionModel().select(index, self.selection_mode) + # Set the currently active index + self.view.setCurrentIndex(index) + break + + def get_current_task(self): + """Return name of task at current index (selected) + + Returns: + str: Name of the current task. + + """ + index = self.view.currentIndex() + if self.view.selectionModel().isSelected(index): + return index.data(QtCore.Qt.DisplayRole) + + +class ActionHistory(QtWidgets.QPushButton): + trigger_history = QtCore.Signal(tuple) + + def __init__(self, parent=None): + super(ActionHistory, self).__init__(parent=parent) + + self.max_history = 15 + + self.setFixedWidth(25) + self.setFixedHeight(25) + + self.setIcon(qtawesome.icon("fa.history", color="#CCCCCC")) + self.setIconSize(QtCore.QSize(15, 15)) + + self._history = [] + self.clicked.connect(self.show_history) + + def show_history(self): + # Show history popup + if not self._history: + return + + widget = QtWidgets.QListWidget() + widget.setSelectionMode(widget.NoSelection) + widget.setStyleSheet(""" + * { + font-family: "Courier New"; + } + """) + + largest_label_num_chars = 0 + largest_action_label = max(len(x[0].label) for x in self._history) + action_session_role = QtCore.Qt.UserRole + 1 + + for action, session in reversed(self._history): + project = session.get("AVALON_PROJECT") + asset = session.get("AVALON_ASSET") + task = session.get("AVALON_TASK") + breadcrumb = " > ".join(x for x in [project, asset, task] if x) + + m = "{{action:{0}}} | {{breadcrumb}}".format(largest_action_label) + label = m.format(action=action.label, breadcrumb=breadcrumb) + + icon = lib.get_action_icon(action) + item = QtWidgets.QListWidgetItem(icon, label) + item.setData(action_session_role, (action, session)) + + largest_label_num_chars = max(largest_label_num_chars, len(label)) + + widget.addItem(item) + + # Show history + dialog = QtWidgets.QDialog(parent=self) + dialog.setWindowTitle("Action History") + dialog.setWindowFlags( + QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup + ) + dialog.setSizePolicy( + QtWidgets.QSizePolicy.Ignored, + QtWidgets.QSizePolicy.Ignored + ) + + layout = QtWidgets.QVBoxLayout(dialog) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(widget) + + def on_clicked(index): + data = index.data(action_session_role) + self.trigger_history.emit(data) + dialog.close() + + widget.clicked.connect(on_clicked) + + # padding + icon + text + width = 40 + (largest_label_num_chars * 7) + entry_height = 21 + height = entry_height * len(self._history) + + point = QtGui.QCursor().pos() + dialog.setGeometry( + point.x() - width, + point.y() - height, + width, + height + ) + dialog.exec_() + + self.widget_popup = widget + + def add_action(self, action, session): + key = (action, copy.deepcopy(session)) + + # Remove entry if already exists + if key in self._history: + self._history.remove(key) + + self._history.append(key) + + # Slice the end of the list if we exceed the max history + if len(self._history) > self.max_history: + self._history = self._history[-self.max_history:] + + def clear_history(self): + self._history.clear() + + +class SlidePageWidget(QtWidgets.QStackedWidget): + """Stacked widget that nicely slides between its pages""" + + directions = { + "left": QtCore.QPoint(-1, 0), + "right": QtCore.QPoint(1, 0), + "up": QtCore.QPoint(0, 1), + "down": QtCore.QPoint(0, -1) + } + + def slide_view(self, index, direction="right"): + if self.currentIndex() == index: + return + + offset_direction = self.directions.get(direction) + if offset_direction is None: + print("BUG: invalid slide direction: {}".format(direction)) + return + + width = self.frameRect().width() + height = self.frameRect().height() + offset = QtCore.QPoint( + offset_direction.x() * width, + offset_direction.y() * height + ) + + new_page = self.widget(index) + new_page.setGeometry(0, 0, width, height) + curr_pos = new_page.pos() + new_page.move(curr_pos + offset) + new_page.show() + new_page.raise_() + + current_page = self.currentWidget() + + b_pos = QtCore.QByteArray(b"pos") + + anim_old = QtCore.QPropertyAnimation(current_page, b_pos, self) + anim_old.setDuration(250) + anim_old.setStartValue(curr_pos) + anim_old.setEndValue(curr_pos - offset) + anim_old.setEasingCurve(QtCore.QEasingCurve.OutQuad) + + anim_new = QtCore.QPropertyAnimation(new_page, b_pos, self) + anim_new.setDuration(250) + anim_new.setStartValue(curr_pos + offset) + anim_new.setEndValue(curr_pos) + anim_new.setEasingCurve(QtCore.QEasingCurve.OutQuad) + + anim_group = QtCore.QParallelAnimationGroup(self) + anim_group.addAnimation(anim_old) + anim_group.addAnimation(anim_new) + + def slide_finished(): + self.setCurrentWidget(new_page) + + anim_group.finished.connect(slide_finished) + anim_group.start() diff --git a/pype/tools/launcher/window.py b/pype/tools/launcher/window.py new file mode 100644 index 0000000000..7c680a927b --- /dev/null +++ b/pype/tools/launcher/window.py @@ -0,0 +1,467 @@ +import copy +import logging + +from Qt import QtWidgets, QtCore, QtGui +from avalon import style + +from avalon.api import AvalonMongoDB +from pype.api import resources + +from avalon.tools import lib as tools_lib +from avalon.tools.widgets import AssetWidget +from avalon.vendor import qtawesome +from .models import ProjectModel +from .widgets import ( + ProjectBar, ActionBar, TasksWidget, ActionHistory, SlidePageWidget +) + +from .flickcharm import FlickCharm + + +class IconListView(QtWidgets.QListView): + """Styled ListView that allows to toggle between icon and list mode. + + Toggling between the two modes is done by Right Mouse Click. + + """ + + IconMode = 0 + ListMode = 1 + + def __init__(self, parent=None, mode=ListMode): + super(IconListView, self).__init__(parent=parent) + + # Workaround for scrolling being super slow or fast when + # toggling between the two visual modes + self.setVerticalScrollMode(self.ScrollPerPixel) + self.setObjectName("IconView") + + self._mode = None + self.set_mode(mode) + + def set_mode(self, mode): + if mode == self._mode: + return + + self._mode = mode + + if mode == self.IconMode: + self.setViewMode(QtWidgets.QListView.IconMode) + self.setResizeMode(QtWidgets.QListView.Adjust) + self.setWrapping(True) + self.setWordWrap(True) + self.setGridSize(QtCore.QSize(151, 90)) + self.setIconSize(QtCore.QSize(50, 50)) + self.setSpacing(0) + self.setAlternatingRowColors(False) + + self.setProperty("mode", "icon") + self.style().polish(self) + + self.verticalScrollBar().setSingleStep(30) + + elif self.ListMode: + self.setProperty("mode", "list") + self.style().polish(self) + + self.setViewMode(QtWidgets.QListView.ListMode) + self.setResizeMode(QtWidgets.QListView.Adjust) + self.setWrapping(False) + self.setWordWrap(False) + self.setIconSize(QtCore.QSize(20, 20)) + self.setGridSize(QtCore.QSize(100, 25)) + self.setSpacing(0) + self.setAlternatingRowColors(False) + + self.verticalScrollBar().setSingleStep(33.33) + + def mousePressEvent(self, event): + if event.button() == QtCore.Qt.RightButton: + self.set_mode(int(not self._mode)) + return super(IconListView, self).mousePressEvent(event) + + +class ProjectsPanel(QtWidgets.QWidget): + """Projects Page""" + + project_clicked = QtCore.Signal(str) + + def __init__(self, dbcon, parent=None): + super(ProjectsPanel, self).__init__(parent=parent) + + layout = QtWidgets.QVBoxLayout(self) + + self.dbcon = dbcon + self.dbcon.install() + + view = IconListView(parent=self) + view.setSelectionMode(QtWidgets.QListView.NoSelection) + flick = FlickCharm(parent=self) + flick.activateOn(view) + model = ProjectModel(self.dbcon) + model.hide_invisible = True + model.refresh() + view.setModel(model) + + layout.addWidget(view) + + view.clicked.connect(self.on_clicked) + + self.model = model + self.view = view + + def on_clicked(self, index): + if index.isValid(): + project_name = index.data(QtCore.Qt.DisplayRole) + self.project_clicked.emit(project_name) + + +class AssetsPanel(QtWidgets.QWidget): + """Assets page""" + back_clicked = QtCore.Signal() + + def __init__(self, dbcon, parent=None): + super(AssetsPanel, self).__init__(parent=parent) + + self.dbcon = dbcon + + # project bar + project_bar_widget = QtWidgets.QWidget(self) + + layout = QtWidgets.QHBoxLayout(project_bar_widget) + layout.setSpacing(4) + + btn_back_icon = qtawesome.icon("fa.angle-left", color="white") + btn_back = QtWidgets.QPushButton(project_bar_widget) + btn_back.setIcon(btn_back_icon) + btn_back.setFixedWidth(23) + btn_back.setFixedHeight(23) + + project_bar = ProjectBar(self.dbcon, project_bar_widget) + + layout.addWidget(btn_back) + layout.addWidget(project_bar) + + # assets + assets_proxy_widgets = QtWidgets.QWidget(self) + assets_proxy_widgets.setContentsMargins(0, 0, 0, 0) + assets_layout = QtWidgets.QVBoxLayout(assets_proxy_widgets) + assets_widget = AssetWidget( + dbcon=self.dbcon, parent=assets_proxy_widgets + ) + + # Make assets view flickable + flick = FlickCharm(parent=self) + flick.activateOn(assets_widget.view) + assets_widget.view.setVerticalScrollMode( + assets_widget.view.ScrollPerPixel + ) + assets_layout.addWidget(assets_widget) + + # tasks + tasks_widget = TasksWidget(self.dbcon, self) + body = QtWidgets.QSplitter() + body.setContentsMargins(0, 0, 0, 0) + body.setSizePolicy( + QtWidgets.QSizePolicy.Expanding, + QtWidgets.QSizePolicy.Expanding + ) + body.setOrientation(QtCore.Qt.Horizontal) + body.addWidget(assets_proxy_widgets) + body.addWidget(tasks_widget) + body.setStretchFactor(0, 100) + body.setStretchFactor(1, 65) + + # main layout + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) + layout.addWidget(project_bar_widget) + layout.addWidget(body) + + self.project_bar = project_bar + self.assets_widget = assets_widget + self.tasks_widget = tasks_widget + + # signals + project_bar.project_changed.connect(self.on_project_changed) + assets_widget.selection_changed.connect(self.on_asset_changed) + btn_back.clicked.connect(self.back_clicked) + + # Force initial refresh for the assets since we might not be + # trigging a Project switch if we click the project that was set + # prior to launching the Launcher + # todo: remove this behavior when AVALON_PROJECT is not required + assets_widget.refresh() + + def set_project(self, project): + before = self.project_bar.get_current_project() + self.project_bar.set_project(project) + if project == before: + # Force a refresh on the assets if the project hasn't changed + self.assets_widget.refresh() + + def on_project_changed(self): + project_name = self.project_bar.get_current_project() + self.dbcon.Session["AVALON_PROJECT"] = project_name + self.assets_widget.refresh() + + # Force asset change callback to ensure tasks are correctly reset + tools_lib.schedule(self.on_asset_changed, 0.05, channel="assets") + + def on_asset_changed(self): + """Callback on asset selection changed + + This updates the task view. + + """ + + print("Asset changed..") + + asset_doc = self.assets_widget.get_active_asset_document() + if asset_doc: + self.tasks_widget.set_asset(asset_doc["_id"]) + else: + self.tasks_widget.set_asset(None) + + def get_current_session(self): + asset_doc = self.assets_widget.get_active_asset_document() + session = copy.deepcopy(self.dbcon.Session) + + # Clear some values that we are about to collect if available + session.pop("AVALON_SILO", None) + session.pop("AVALON_ASSET", None) + session.pop("AVALON_TASK", None) + + if asset_doc: + session["AVALON_ASSET"] = asset_doc["name"] + task_name = self.tasks_widget.get_current_task() + if task_name: + session["AVALON_TASK"] = task_name + + return session + + +class LauncherWindow(QtWidgets.QDialog): + """Launcher interface""" + + def __init__(self, parent=None): + super(LauncherWindow, self).__init__(parent) + + self.log = logging.getLogger( + ".".join([__name__, self.__class__.__name__]) + ) + self.dbcon = AvalonMongoDB() + + self.setWindowTitle("Launcher") + self.setFocusPolicy(QtCore.Qt.StrongFocus) + self.setAttribute(QtCore.Qt.WA_DeleteOnClose, False) + + icon = QtGui.QIcon(resources.pype_icon_filepath()) + self.setWindowIcon(icon) + self.setStyleSheet(style.load_stylesheet()) + + # Allow minimize + self.setWindowFlags( + self.windowFlags() | QtCore.Qt.WindowMinimizeButtonHint + ) + + project_panel = ProjectsPanel(self.dbcon) + asset_panel = AssetsPanel(self.dbcon) + + page_slider = SlidePageWidget() + page_slider.addWidget(project_panel) + page_slider.addWidget(asset_panel) + + # actions + actions_bar = ActionBar(self.dbcon, self) + + # statusbar + statusbar = QtWidgets.QWidget() + layout = QtWidgets.QHBoxLayout(statusbar) + + message_label = QtWidgets.QLabel() + message_label.setFixedHeight(15) + + action_history = ActionHistory() + action_history.setStatusTip("Show Action History") + + layout.addWidget(message_label) + layout.addWidget(action_history) + + # Vertically split Pages and Actions + body = QtWidgets.QSplitter() + body.setContentsMargins(0, 0, 0, 0) + body.setSizePolicy( + QtWidgets.QSizePolicy.Expanding, + QtWidgets.QSizePolicy.Expanding + ) + body.setOrientation(QtCore.Qt.Vertical) + body.addWidget(page_slider) + body.addWidget(actions_bar) + + # Set useful default sizes and set stretch + # for the pages so that is the only one that + # stretches on UI resize. + body.setStretchFactor(0, 10) + body.setSizes([580, 160]) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(body) + layout.addWidget(statusbar) + layout.setSpacing(0) + layout.setContentsMargins(0, 0, 0, 0) + + self.message_label = message_label + self.project_panel = project_panel + self.asset_panel = asset_panel + self.actions_bar = actions_bar + self.action_history = action_history + self.page_slider = page_slider + self._page = 0 + + # signals + actions_bar.action_clicked.connect(self.on_action_clicked) + action_history.trigger_history.connect(self.on_history_action) + project_panel.project_clicked.connect(self.on_project_clicked) + asset_panel.back_clicked.connect(self.on_back_clicked) + + # Add some signals to propagate from the asset panel + for signal in ( + asset_panel.project_bar.project_changed, + asset_panel.assets_widget.selection_changed, + asset_panel.tasks_widget.task_changed + ): + signal.connect(self.on_session_changed) + + # todo: Simplify this callback connection + asset_panel.project_bar.project_changed.connect( + self.on_project_changed + ) + + self.resize(520, 740) + + def set_page(self, page): + current = self.page_slider.currentIndex() + if current == page and self._page == page: + return + + direction = "right" if page > current else "left" + self._page = page + self.page_slider.slide_view(page, direction=direction) + + def refresh(self): + self.asset_panel.assets_widget.refresh() + self.refresh_actions() + + def echo(self, message): + self.message_label.setText(str(message)) + QtCore.QTimer.singleShot(5000, lambda: self.message_label.setText("")) + self.log.debug(message) + + def on_project_changed(self): + project_name = self.asset_panel.project_bar.get_current_project() + self.dbcon.Session["AVALON_PROJECT"] = project_name + + # Update the Action plug-ins available for the current project + self.actions_bar.model.discover() + + def on_session_changed(self): + self.refresh_actions() + + def refresh_actions(self, delay=1): + tools_lib.schedule(self.on_refresh_actions, delay) + + def on_project_clicked(self, project_name): + self.dbcon.Session["AVALON_PROJECT"] = project_name + # Refresh projects + self.asset_panel.project_bar.refresh() + self.asset_panel.set_project(project_name) + self.set_page(1) + self.refresh_actions() + + def on_back_clicked(self): + self.set_page(0) + self.project_panel.model.refresh() # Refresh projects + self.refresh_actions() + + def on_refresh_actions(self): + session = self.get_current_session() + self.actions_bar.model.set_session(session) + self.actions_bar.model.refresh() + + def on_action_clicked(self, action): + self.echo("Running action: {}".format(action.name)) + self.run_action(action) + + def on_history_action(self, history_data): + action, session = history_data + app = QtWidgets.QApplication.instance() + modifiers = app.keyboardModifiers() + + is_control_down = QtCore.Qt.ControlModifier & modifiers + if is_control_down: + # Revert to that "session" location + self.set_session(session) + else: + # User is holding control, rerun the action + self.run_action(action, session=session) + + def get_current_session(self): + if self._page == 1: + # Assets page + return self.asset_panel.get_current_session() + + session = copy.deepcopy(self.dbcon.Session) + + # Remove some potential invalid session values + # that we know are not set when not browsing in + # a project. + session.pop("AVALON_PROJECT", None) + session.pop("AVALON_ASSET", None) + session.pop("AVALON_SILO", None) + session.pop("AVALON_TASK", None) + + return session + + def run_action(self, action, session=None): + if session is None: + session = self.get_current_session() + + # Add to history + self.action_history.add_action(action, session) + + # Process the Action + try: + action().process(session) + except Exception as exc: + self.log.warning("Action launch failed.", exc_info=True) + self.echo("Failed: {}".format(str(exc))) + + def set_session(self, session): + project_name = session.get("AVALON_PROJECT") + silo = session.get("AVALON_SILO") + asset_name = session.get("AVALON_ASSET") + task_name = session.get("AVALON_TASK") + + if project_name: + # Force the "in project" view. + self.page_slider.slide_view(1, direction="right") + index = self.asset_panel.project_bar.project_combobox.findText( + project_name + ) + if index >= 0: + self.asset_panel.project_bar.project_combobox.setCurrentIndex( + index + ) + + if silo: + self.asset_panel.assets_widget.set_silo(silo) + + if asset_name: + self.asset_panel.assets_widget.select_assets([asset_name]) + + if task_name: + # requires a forced refresh first + self.asset_panel.on_asset_changed() + self.asset_panel.tasks_widget.select_task(task_name) diff --git a/pype/tools/pyblish_pype/control.py b/pype/tools/pyblish_pype/control.py index 5138b5cc4c..0162848f2b 100644 --- a/pype/tools/pyblish_pype/control.py +++ b/pype/tools/pyblish_pype/control.py @@ -183,7 +183,18 @@ class Controller(QtCore.QObject): plugins = pyblish.api.discover() targets = pyblish.logic.registered_targets() or ["default"] - self.plugins = pyblish.logic.plugins_by_targets(plugins, targets) + plugins_by_targets = pyblish.logic.plugins_by_targets(plugins, targets) + + _plugins = [] + for plugin in plugins_by_targets: + # Skip plugin if is not optional and not active + if ( + not getattr(plugin, "optional", False) + and not getattr(plugin, "active", True) + ): + continue + _plugins.append(plugin) + self.plugins = _plugins def on_published(self): if self.is_running: @@ -239,6 +250,8 @@ class Controller(QtCore.QObject): self.processing["current_group_order"] is not None and plugin.order > self.processing["current_group_order"] ): + current_group_order = self.processing["current_group_order"] + new_next_group_order = None new_current_group_order = self.processing["next_group_order"] if new_current_group_order is not None: @@ -259,12 +272,13 @@ class Controller(QtCore.QObject): if self.collect_state == 0: self.collect_state = 1 self.switch_toggleability.emit(True) - self.passed_group.emit(new_current_group_order) + self.passed_group.emit(current_group_order) yield IterationBreak("Collected") - self.passed_group.emit(new_current_group_order) - if self.errored: - yield IterationBreak("Last group errored") + else: + self.passed_group.emit(current_group_order) + if self.errored: + yield IterationBreak("Last group errored") if self.collect_state == 1: self.collect_state = 2 diff --git a/pype/tools/pyblish_pype/model.py b/pype/tools/pyblish_pype/model.py index 9086003258..3c9d4806ac 100644 --- a/pype/tools/pyblish_pype/model.py +++ b/pype/tools/pyblish_pype/model.py @@ -105,11 +105,10 @@ class IntentModel(QtGui.QStandardItemModel): intents_preset = ( config.get_presets() - .get("tools", {}) - .get("pyblish", {}) - .get("ui", {}) - .get("intents", {}) + .get("global", {}) + .get("intent", {}) ) + default = intents_preset.get("default") items = intents_preset.get("items", {}) if not items: @@ -441,9 +440,6 @@ class PluginModel(QtGui.QStandardItemModel): if label is None: label = "Other" - if order is None: - order = 99999999999999 - group_item = self.group_items.get(label) if not group_item: group_item = GroupItem(label, order=order) @@ -874,13 +870,18 @@ class ArtistProxy(QtCore.QAbstractProxyModel): self.rowsInserted.emit(self.parent(), new_from, new_to + 1) def _remove_rows(self, parent_row, from_row, to_row): - removed_rows = [] increment_num = self.mapping_from[parent_row][from_row] + + to_end_index = len(self.mapping_from[parent_row]) - 1 + for _idx in range(0, parent_row): + to_end_index += len(self.mapping_from[_idx]) + + removed_rows = 0 _emit_last = None for row_num in reversed(range(from_row, to_row + 1)): row = self.mapping_from[parent_row].pop(row_num) _emit_last = row - removed_rows.append(row) + removed_rows += 1 _emit_first = int(increment_num) mapping_from_len = len(self.mapping_from) @@ -900,11 +901,8 @@ class ArtistProxy(QtCore.QAbstractProxyModel): self.mapping_from[idx_i][idx_j] = increment_num increment_num += 1 - first_to_row = None - for row in removed_rows: - if first_to_row is None: - first_to_row = row - self.mapping_to.pop(row) + for idx in range(removed_rows): + self.mapping_to.pop(to_end_index - idx) return (_emit_first, _emit_last) diff --git a/pype/tools/pyblish_pype/util.py b/pype/tools/pyblish_pype/util.py index d10e7a002a..5a4dbfb250 100644 --- a/pype/tools/pyblish_pype/util.py +++ b/pype/tools/pyblish_pype/util.py @@ -309,3 +309,12 @@ class OrderGroups: return group_range return float(group_range) + + +def env_variable_to_bool(env_key): + value = os.environ.get(env_key) + if value is not None: + value = value.lower() + if value in ("true", "1", "yes"): + return True + return False diff --git a/pype/tools/pyblish_pype/window.py b/pype/tools/pyblish_pype/window.py index 7d79e0e26c..76f31e2442 100644 --- a/pype/tools/pyblish_pype/window.py +++ b/pype/tools/pyblish_pype/window.py @@ -55,6 +55,7 @@ class Window(QtWidgets.QDialog): super(Window, self).__init__(parent=parent) self._suspend_logs = False + # Use plastique style for specific ocations # TODO set style name via environment variable low_keys = { @@ -511,6 +512,10 @@ class Window(QtWidgets.QDialog): self.tabs[current_page].setChecked(True) + self.apply_log_suspend_value( + util.env_variable_to_bool("PYBLISH_SUSPEND_LOGS") + ) + # ------------------------------------------------------------------------- # # Event handlers @@ -633,8 +638,11 @@ class Window(QtWidgets.QDialog): self.footer_button_play.setEnabled(False) self.footer_button_stop.setEnabled(False) - def on_suspend_clicked(self): - self._suspend_logs = not self._suspend_logs + def on_suspend_clicked(self, value=None): + self.apply_log_suspend_value(not self._suspend_logs) + + def apply_log_suspend_value(self, value): + self._suspend_logs = value if self.state["current_page"] == "terminal": self.on_tab_changed("overview") @@ -771,10 +779,10 @@ class Window(QtWidgets.QDialog): for group_item in self.plugin_model.group_items.values(): # TODO check only plugins from the group - if ( - group_item.publish_states & GroupStates.HasFinished - or (order is not None and group_item.order >= order) - ): + if group_item.publish_states & GroupStates.HasFinished: + continue + + if order != group_item.order: continue if group_item.publish_states & GroupStates.HasError: diff --git a/pype/tools/settings/__init__.py b/pype/tools/settings/__init__.py new file mode 100644 index 0000000000..7df121f06e --- /dev/null +++ b/pype/tools/settings/__init__.py @@ -0,0 +1,7 @@ +from settings import style, MainWidget + + +__all__ = ( + "style", + "MainWidget" +) diff --git a/pype/tools/settings/__main__.py b/pype/tools/settings/__main__.py new file mode 100644 index 0000000000..55a38b3604 --- /dev/null +++ b/pype/tools/settings/__main__.py @@ -0,0 +1,18 @@ +import sys + +import settings +from Qt import QtWidgets, QtGui + + +if __name__ == "__main__": + app = QtWidgets.QApplication(sys.argv) + + stylesheet = settings.style.load_stylesheet() + app.setStyleSheet(stylesheet) + app.setWindowIcon(QtGui.QIcon(settings.style.app_icon_path())) + + develop = "-d" in sys.argv or "--develop" in sys.argv + widget = settings.MainWidget(develop) + widget.show() + + sys.exit(app.exec_()) diff --git a/pype/tools/settings/settings/__init__.py b/pype/tools/settings/settings/__init__.py new file mode 100644 index 0000000000..0c2fd6d4bb --- /dev/null +++ b/pype/tools/settings/settings/__init__.py @@ -0,0 +1,8 @@ +from . import style +from .widgets import MainWidget + + +__all__ = ( + "style", + "MainWidget" +) diff --git a/pype/tools/settings/settings/gui_schemas/projects_schema/0_project_gui_schema.json b/pype/tools/settings/settings/gui_schemas/projects_schema/0_project_gui_schema.json new file mode 100644 index 0000000000..fa7c6a366d --- /dev/null +++ b/pype/tools/settings/settings/gui_schemas/projects_schema/0_project_gui_schema.json @@ -0,0 +1,32 @@ +{ + "key": "project", + "type": "dict-invisible", + "children": [ + { + "type": "anatomy", + "key": "project_anatomy", + "children": [ + { + "type": "anatomy_roots", + "key": "roots", + "is_file": true + }, { + "type": "anatomy_templates", + "key": "templates", + "is_file": true + } + ] + }, { + "type": "dict-invisible", + "key": "project_settings", + "children": [ + { + "type": "schema", + "children": [ + "1_plugins_gui_schema" + ] + } + ] + } + ] +} diff --git a/pype/tools/settings/settings/gui_schemas/projects_schema/1_plugins_gui_schema.json b/pype/tools/settings/settings/gui_schemas/projects_schema/1_plugins_gui_schema.json new file mode 100644 index 0000000000..721b0924e8 --- /dev/null +++ b/pype/tools/settings/settings/gui_schemas/projects_schema/1_plugins_gui_schema.json @@ -0,0 +1,677 @@ +{ + "type": "dict", + "collapsable": true, + "key": "plugins", + "label": "Plugins", + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "celaction", + "label": "CelAction", + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "publish", + "label": "Publish plugins", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "ExtractCelactionDeadline", + "label": "ExtractCelactionDeadline", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "dict-form", + "children": [ + { + "type": "text", + "key": "deadline_department", + "label": "Deadline apartment" + }, { + "type": "number", + "key": "deadline_priority", + "label": "Deadline priority" + }, { + "type": "text", + "key": "deadline_pool", + "label": "Deadline pool" + }, { + "type": "text", + "key": "deadline_pool_secondary", + "label": "Deadline pool (secondary)" + }, { + "type": "text", + "key": "deadline_group", + "label": "Deadline Group" + }, { + "type": "number", + "key": "deadline_chunk_size", + "label": "Deadline Chunk size" + } + ] + } + ] + } + ] + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "ftrack", + "label": "Ftrack", + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "publish", + "label": "Publish plugins", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "IntegrateFtrackNote", + "label": "IntegrateFtrackNote", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "text", + "key": "note_with_intent_template", + "label": "Note with intent template" + }, { + "type": "list", + "object_type": "text", + "key": "note_labels", + "label": "Note labels" + } + ] + } + ] + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "global", + "label": "Global", + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "publish", + "label": "Publish plugins", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "IntegrateMasterVersion", + "label": "IntegrateMasterVersion", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "ExtractJpegEXR", + "label": "ExtractJpegEXR", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "dict-invisible", + "key": "ffmpeg_args", + "children": [ + { + "type": "list", + "object_type": "text", + "key": "input", + "label": "FFmpeg input arguments" + }, { + "type": "list", + "object_type": "text", + "key": "output", + "label": "FFmpeg output arguments" + } + ] + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "ExtractReview", + "label": "ExtractReview", + "checkbox_key": "enabled", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "raw-json", + "key": "profiles", + "label": "Profiles" + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "ExtractBurnin", + "label": "ExtractBurnin", + "checkbox_key": "enabled", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "dict", + "collapsable": true, + "key": "options", + "label": "Burnin formating options", + "children": [ + { + "type": "number", + "key": "font_size", + "label": "Font size" + }, { + "type": "number", + "key": "opacity", + "label": "Font opacity" + }, { + "type": "number", + "key": "bg_opacity", + "label": "Background opacity" + }, { + "type": "number", + "key": "x_offset", + "label": "X Offset" + }, { + "type": "number", + "key": "y_offset", + "label": "Y Offset" + }, { + "type": "number", + "key": "bg_padding", + "label": "Padding aroung text" + } + ] + }, { + "type": "raw-json", + "key": "profiles", + "label": "Burnin profiles" + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "IntegrateAssetNew", + "label": "IntegrateAssetNew", + "is_group": true, + "children": [ + { + "type": "raw-json", + "key": "template_name_profiles", + "label": "template_name_profiles" + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "ProcessSubmittedJobOnFarm", + "label": "ProcessSubmittedJobOnFarm", + "checkbox_key": "enabled", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "text", + "key": "deadline_department", + "label": "Deadline department" + }, { + "type": "text", + "key": "deadline_pool", + "label": "Deadline Pool" + }, { + "type": "text", + "key": "deadline_group", + "label": "Deadline Group" + } + ] + } + ] + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "maya", + "label": "Maya", + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "publish", + "label": "Publish plugins", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "ValidateModelName", + "label": "Validate Model Name", + "checkbox_key": "enabled", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "text", + "key": "material_file", + "label": "Material File" + }, { + "type": "text", + "key": "regex", + "label": "Validation regex" + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "ValidateAssemblyName", + "label": "Validate Assembly Name", + "checkbox_key": "enabled", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "ValidateShaderName", + "label": "ValidateShaderName", + "checkbox_key": "enabled", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "text", + "key": "regex", + "label": "Validation regex" + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "ValidateMeshHasOverlappingUVs", + "label": "ValidateMeshHasOverlappingUVs", + "checkbox_key": "enabled", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + } + ] + }, { + "type": "raw-json", + "key": "workfile_build", + "label": "Workfile Build logic", + "is_file": true + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "nuke", + "label": "Nuke", + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "create", + "label": "Create plugins", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsable": false, + "key": "CreateWriteRender", + "label": "CreateWriteRender", + "is_group": true, + "children": [ + { + "type": "text", + "key": "fpath_template", + "label": "Path template" + } + ] + }, { + "type": "dict", + "collapsable": false, + "key": "CreateWritePrerender", + "label": "CreateWritePrerender", + "is_group": true, + "children": [ + { + "type": "text", + "key": "fpath_template", + "label": "Path template" + } + ] + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "publish", + "label": "Publish plugins", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "ExtractThumbnail", + "label": "ExtractThumbnail", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "raw-json", + "key": "nodes", + "label": "Nodes" + } + ] + }, { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "ValidateNukeWriteKnobs", + "label": "ValidateNukeWriteKnobs", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "raw-json", + "key": "knobs", + "label": "Knobs" + } + ] + }, { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "ExtractReviewDataLut", + "label": "ExtractReviewDataLut", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "ExtractReviewDataMov", + "label": "ExtractReviewDataMov", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "boolean", + "key": "viewer_lut_raw", + "label": "Viewer LUT raw" + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "ExtractSlateFrame", + "label": "ExtractSlateFrame", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "viewer_lut_raw", + "label": "Viewer LUT raw" + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "NukeSubmitDeadline", + "label": "NukeSubmitDeadline", + "is_group": true, + "children": [ + { + "type": "number", + "key": "deadline_priority", + "label": "deadline_priority" + }, { + "type": "text", + "key": "deadline_pool", + "label": "deadline_pool" + }, { + "type": "text", + "key": "deadline_pool_secondary", + "label": "deadline_pool_secondary" + }, { + "type": "number", + "key": "deadline_chunk_size", + "label": "deadline_chunk_size" + } + ] + } + ] + }, { + "type": "raw-json", + "key": "workfile_build", + "label": "Workfile Build logic", + "is_file": true + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "nukestudio", + "label": "NukeStudio", + "children": [ + { + "type": "raw-json", + "collapsable": true, + "key": "filter", + "label": "Publish GUI Filters", + "is_file": true + }, + { + "type": "dict", + "collapsable": true, + "key": "publish", + "label": "Publish plugins", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "CollectInstanceVersion", + "label": "Collect Instance Version", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { + "type": "dict", + "collapsable": true, + "checkbox_key": "enabled", + "key": "ExtractReviewCutUpVideo", + "label": "Extract Review Cut Up Video", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { + "type": "list", + "object_type": "text", + "key": "tags_addition", + "label": "Tags addition" + } + ] + } + ] + } + ] + }, { + "type": "dict", + "collapsable": true, + "key": "resolve", + "label": "DaVinci Resolve", + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "create", + "label": "Creator plugins", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "CreateShotClip", + "label": "Create Shot Clip", + "is_group": true, + "children": [ + { + "type": "text", + "key": "clipName", + "label": "Clip name template" + }, { + "type": "text", + "key": "folder", + "label": "Folder" + }, { + "type": "number", + "key": "steps", + "label": "Steps" + } + ] + } + + ] + } + ] + }, + { + "type": "dict", + "collapsable": true, + "key": "standalonepublisher", + "label": "Standalone Publisher", + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "publish", + "label": "Publish plugins", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsable": true, + "key": "ExtractThumbnailSP", + "label": "ExtractThumbnailSP", + "is_group": true, + "children": [ + { + "type": "dict", + "collapsable": false, + "key": "ffmpeg_args", + "label": "ffmpeg_args", + "children": [ + { + "type": "dict-form", + "children": [ + { + "type": "list", + "object_type": "text", + "key": "input", + "label": "input" + }, + { + "type": "list", + "object_type": "text", + "key": "output", + "label": "output" + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] +} diff --git a/pype/tools/settings/settings/gui_schemas/system_schema/0_system_gui_schema.json b/pype/tools/settings/settings/gui_schemas/system_schema/0_system_gui_schema.json new file mode 100644 index 0000000000..b16545111c --- /dev/null +++ b/pype/tools/settings/settings/gui_schemas/system_schema/0_system_gui_schema.json @@ -0,0 +1,34 @@ +{ + "key": "system", + "type": "dict-invisible", + "children": [ + { + "type": "dict-invisible", + "key": "global", + "children": [{ + "type": "schema", + "children": [ + "1_tray_items", + "1_applications_gui_schema", + "1_tools_gui_schema", + "1_intents_gui_schema" + ] + }] + }, { + "type": "dict-invisible", + "key": "muster", + "children": [{ + "type": "dict-modifiable", + "object_type": "number", + "input_modifiers": { + "minimum": 0, + "maximum": 300 + }, + "is_group": true, + "key": "templates_mapping", + "label": "Muster - Templates mapping", + "is_file": true + }] + } + ] +} diff --git a/pype/tools/settings/settings/gui_schemas/system_schema/1_applications_gui_schema.json b/pype/tools/settings/settings/gui_schemas/system_schema/1_applications_gui_schema.json new file mode 100644 index 0000000000..48f8ecbd7c --- /dev/null +++ b/pype/tools/settings/settings/gui_schemas/system_schema/1_applications_gui_schema.json @@ -0,0 +1,144 @@ +{ + "key": "applications", + "type": "dict", + "label": "Applications", + "collapsable": true, + "is_group": true, + "is_file": true, + "children": [ + { + "type": "dict-form", + "children": [ + { + "type": "boolean", + "key": "blender_2.80", + "label": "Blender 2.80" + }, { + "type": "boolean", + "key": "blender_2.81", + "label": "Blender 2.81" + }, { + "type": "boolean", + "key": "blender_2.82", + "label": "Blender 2.82" + }, { + "type": "boolean", + "key": "blender_2.83", + "label": "Blender 2.83" + }, { + "type": "boolean", + "key": "celaction_local", + "label": "Celaction Local" + }, { + "type": "boolean", + "key": "celaction_remote", + "label": "Celaction Remote" + }, { + "type": "boolean", + "key": "harmony_17", + "label": "Harmony 17" + }, { + "type": "boolean", + "key": "maya_2017", + "label": "Autodest Maya 2017" + }, { + "type": "boolean", + "key": "maya_2018", + "label": "Autodest Maya 2018" + }, { + "type": "boolean", + "key": "maya_2019", + "label": "Autodest Maya 2019" + }, { + "type": "boolean", + "key": "maya_2020", + "label": "Autodest Maya 2020" + }, { + "key": "nuke_10.0", + "type": "boolean", + "label": "Nuke 10.0" + }, { + "type": "boolean", + "key": "nuke_11.2", + "label": "Nuke 11.2" + }, { + "type": "boolean", + "key": "nuke_11.3", + "label": "Nuke 11.3" + }, { + "type": "boolean", + "key": "nuke_12.0", + "label": "Nuke 12.0" + }, { + "type": "boolean", + "key": "nukex_10.0", + "label": "NukeX 10.0" + }, { + "type": "boolean", + "key": "nukex_11.2", + "label": "NukeX 11.2" + }, { + "type": "boolean", + "key": "nukex_11.3", + "label": "NukeX 11.3" + }, { + "type": "boolean", + "key": "nukex_12.0", + "label": "NukeX 12.0" + }, { + "type": "boolean", + "key": "nukestudio_10.0", + "label": "NukeStudio 10.0" + }, { + "type": "boolean", + "key": "nukestudio_11.2", + "label": "NukeStudio 11.2" + }, { + "type": "boolean", + "key": "nukestudio_11.3", + "label": "NukeStudio 11.3" + }, { + "type": "boolean", + "key": "nukestudio_12.0", + "label": "NukeStudio 12.0" + }, { + "type": "boolean", + "key": "houdini_16", + "label": "Houdini 16" + }, { + "type": "boolean", + "key": "houdini_16.5", + "label": "Houdini 16.5" + }, { + "type": "boolean", + "key": "houdini_17", + "label": "Houdini 17" + }, { + "type": "boolean", + "key": "houdini_18", + "label": "Houdini 18" + }, { + "type": "boolean", + "key": "premiere_2019", + "label": "Premiere 2019" + }, { + "type": "boolean", + "key": "premiere_2020", + "label": "Premiere 2020" + }, { + "type": "boolean", + "key": "resolve_16", + "label": "BM DaVinci Resolve 16" + }, { + "type": "boolean", + "key": "storyboardpro_7", + "label": "Storyboard Pro 7" + }, { + "type": "boolean", + "key": "unreal_4.24", + "label": "Unreal Editor 4.24" + } + ] + } + ] +} diff --git a/pype/tools/settings/settings/gui_schemas/system_schema/1_examples.json b/pype/tools/settings/settings/gui_schemas/system_schema/1_examples.json new file mode 100644 index 0000000000..a884dcb31e --- /dev/null +++ b/pype/tools/settings/settings/gui_schemas/system_schema/1_examples.json @@ -0,0 +1,234 @@ +{ + "key": "example_dict", + "label": "Examples", + "type": "dict", + "is_file": true, + "children": [ + { + "key": "dict_wrapper", + "type": "dict-invisible", + "children": [ + { + "type": "boolean", + "key": "bool", + "label": "Boolean checkbox" + }, { + "type": "label", + "label": "NOTE: This is label" + }, { + "type": "splitter" + }, { + "type": "number", + "key": "integer", + "label": "Integer", + "decimal": 0, + "minimum": 0, + "maximum": 10 + }, { + "type": "number", + "key": "float", + "label": "Float (2 decimals)", + "decimal": 2, + "minimum": -10, + "maximum": -5 + }, { + "type": "text", + "key": "singleline_text", + "label": "Singleline text" + }, { + "type": "text", + "key": "multiline_text", + "label": "Multiline text", + "multiline": true + }, { + "type": "raw-json", + "key": "raw_json", + "label": "Raw json input" + }, { + "type": "list", + "key": "list_item_of_multiline_texts", + "label": "List of multiline texts", + "object_type": "text", + "input_modifiers": { + "multiline": true + } + }, { + "type": "list", + "key": "list_item_of_floats", + "label": "List of floats", + "object_type": "number", + "input_modifiers": { + "decimal": 3, + "minimum": 1000, + "maximum": 2000 + } + }, { + "type": "dict-modifiable", + "key": "modifiable_dict_of_integers", + "label": "Modifiable dict of integers", + "object_type": "number", + "input_modifiers": { + "decimal": 0, + "minimum": 10, + "maximum": 100 + } + }, { + "type": "path-widget", + "key": "single_path_input", + "label": "Single path input", + "multiplatform": false, + "multipath": false + }, { + "type": "path-widget", + "key": "multi_path_input", + "label": "Multi path input", + "multiplatform": false, + "multipath": true + }, { + "type": "path-widget", + "key": "single_os_specific_path_input", + "label": "Single OS specific path input", + "multiplatform": true, + "multipath": false + }, { + "type": "path-widget", + "key": "multi_os_specific_path_input", + "label": "Multi OS specific path input", + "multiplatform": true, + "multipath": true + }, { + "key": "collapsable", + "type": "dict", + "label": "collapsable dictionary", + "collapsable": true, + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "_nothing", + "label": "Exmaple input" + } + ] + }, { + "key": "collapsable_expanded", + "type": "dict", + "label": "collapsable dictionary, expanded on creation", + "collapsable": true, + "collapsed": false, + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "_nothing", + "label": "Exmaple input" + } + ] + }, { + "key": "not_collapsable", + "type": "dict", + "label": "Not collapsable", + "collapsable": false, + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "_nothing", + "label": "Exmaple input" + } + ] + }, { + "key": "nested_dict_lvl1", + "type": "dict", + "label": "Nested dictionary (level 1)", + "children": [ + { + "key": "nested_dict_lvl2", + "type": "dict", + "label": "Nested dictionary (level 2)", + "is_group": true, + "children": [ + { + "key": "nested_dict_lvl3", + "type": "dict", + "label": "Nested dictionary (level 3)", + "children": [ + { + "type": "boolean", + "key": "_nothing", + "label": "Exmaple input" + } + ] + }, { + "key": "nested_dict_lvl3_2", + "type": "dict", + "label": "Nested dictionary (level 3) (2)", + "children": [ + { + "type": "text", + "key": "_nothing", + "label": "Exmaple input" + }, { + "type": "text", + "key": "_nothing2", + "label": "Exmaple input 2" + } + ] + } + ] + } + ] + }, { + "key": "form_examples", + "type": "dict", + "label": "Form examples", + "children": [ + { + "key": "inputs_without_form_example", + "type": "dict", + "label": "Inputs without form", + "children": [ + { + "type": "text", + "key": "_nothing_1", + "label": "Example label" + }, { + "type": "text", + "key": "_nothing_2", + "label": "Example label ####" + }, { + "type": "text", + "key": "_nothing_3", + "label": "Example label ########" + } + ] + }, { + "key": "inputs_with_form_example", + "type": "dict", + "label": "Inputs with form", + "children": [ + { + "type": "dict-form", + "children": [ + { + "type": "text", + "key": "_nothing_1", + "label": "Example label" + }, { + "type": "text", + "key": "_nothing_2", + "label": "Example label ####" + }, { + "type": "text", + "key": "_nothing_3", + "label": "Example label ########" + } + ] + } + ] + } + ] + } + ] + } + ] +} diff --git a/pype/tools/settings/settings/gui_schemas/system_schema/1_intents_gui_schema.json b/pype/tools/settings/settings/gui_schemas/system_schema/1_intents_gui_schema.json new file mode 100644 index 0000000000..0c252d2ca9 --- /dev/null +++ b/pype/tools/settings/settings/gui_schemas/system_schema/1_intents_gui_schema.json @@ -0,0 +1,20 @@ +{ + "key": "intent", + "type": "dict", + "label": "Intent Setting", + "collapsable": true, + "is_group": true, + "is_file": true, + "children": [ + { + "type": "dict-modifiable", + "object_type": "text", + "key": "items", + "label": "Intent Key/Label" + }, { + "type": "text", + "key": "default", + "label": "Default intent" + } + ] +} diff --git a/pype/tools/settings/settings/gui_schemas/system_schema/1_tools_gui_schema.json b/pype/tools/settings/settings/gui_schemas/system_schema/1_tools_gui_schema.json new file mode 100644 index 0000000000..d9540eeb3e --- /dev/null +++ b/pype/tools/settings/settings/gui_schemas/system_schema/1_tools_gui_schema.json @@ -0,0 +1,32 @@ +{ + "key": "tools", + "type": "dict", + "label": "Tools", + "collapsable": true, + "is_group": true, + "is_file": true, + "children": [ + { + "type": "dict-form", + "children": [ + { + "key": "mtoa_3.0.1", + "type": "boolean", + "label": "Arnold Maya 3.0.1" + }, { + "key": "mtoa_3.1.1", + "type": "boolean", + "label": "Arnold Maya 3.1.1" + }, { + "key": "mtoa_3.2.0", + "type": "boolean", + "label": "Arnold Maya 3.2.0" + }, { + "key": "yeti_2.1.2", + "type": "boolean", + "label": "Yeti 2.1.2" + } + ] + } + ] +} diff --git a/pype/tools/settings/settings/gui_schemas/system_schema/1_tray_items.json b/pype/tools/settings/settings/gui_schemas/system_schema/1_tray_items.json new file mode 100644 index 0000000000..6da974a415 --- /dev/null +++ b/pype/tools/settings/settings/gui_schemas/system_schema/1_tray_items.json @@ -0,0 +1,125 @@ +{ + "key": "tray_modules", + "type": "dict", + "label": "Modules", + "collapsable": true, + "is_group": true, + "is_file": true, + "children": [ + { + "key": "item_usage", + "type": "dict-invisible", + "children": [ + { + "type": "dict-form", + "children": [ + { + "type": "boolean", + "key": "User settings", + "label": "User settings" + }, { + "type": "boolean", + "key": "Ftrack", + "label": "Ftrack" + }, { + "type": "boolean", + "key": "Muster", + "label": "Muster" + }, { + "type": "boolean", + "key": "Avalon", + "label": "Avalon" + }, { + "type": "boolean", + "key": "Clockify", + "label": "Clockify" + }, { + "type": "boolean", + "key": "Standalone Publish", + "label": "Standalone Publish" + }, { + "type": "boolean", + "key": "Logging", + "label": "Logging" + }, { + "type": "boolean", + "key": "Idle Manager", + "label": "Idle Manager" + }, { + "type": "boolean", + "key": "Timers Manager", + "label": "Timers Manager" + }, { + "type": "boolean", + "key": "Rest Api", + "label": "Rest Api" + }, { + "type": "boolean", + "key": "Adobe Communicator", + "label": "Adobe Communicator" + } + ] + } + ] + }, { + "key": "attributes", + "type": "dict-invisible", + "children": [ + { + "type": "dict", + "key": "Rest Api", + "label": "Rest Api", + "collapsable": true, + "children": [ + { + "type": "number", + "key": "default_port", + "label": "Default Port", + "minimum": 1, + "maximum": 65535 + }, { + "type": "list", + "object_type": "number", + "key": "exclude_ports", + "label": "Exclude ports", + "input_modifiers": { + "minimum": 1, + "maximum": 65535 + } + } + ] + }, { + "type": "dict", + "key": "Timers Manager", + "label": "Timers Manager", + "collapsable": true, + "children": [ + { + "type": "number", + "decimal": 2, + "key": "full_time", + "label": "Max idle time" + }, { + "type": "number", + "decimal": 2, + "key": "message_time", + "label": "When dialog will show" + } + ] + }, { + "type": "dict", + "key": "Clockify", + "label": "Clockify", + "collapsable": true, + "children": [ + { + "type": "text", + "key": "workspace_name", + "label": "Workspace name" + } + ] + } + ] + } + ] +} diff --git a/pype/tools/settings/settings/style/__init__.py b/pype/tools/settings/settings/style/__init__.py new file mode 100644 index 0000000000..a8f202d97b --- /dev/null +++ b/pype/tools/settings/settings/style/__init__.py @@ -0,0 +1,12 @@ +import os + + +def load_stylesheet(): + style_path = os.path.join(os.path.dirname(__file__), "style.css") + with open(style_path, "r") as style_file: + stylesheet = style_file.read() + return stylesheet + + +def app_icon_path(): + return os.path.join(os.path.dirname(__file__), "pype_icon.png") diff --git a/pype/tools/settings/settings/style/pype_icon.png b/pype/tools/settings/settings/style/pype_icon.png new file mode 100644 index 0000000000..bfacf6eeed Binary files /dev/null and b/pype/tools/settings/settings/style/pype_icon.png differ diff --git a/pype/tools/settings/settings/style/style.css b/pype/tools/settings/settings/style/style.css new file mode 100644 index 0000000000..38f69fef50 --- /dev/null +++ b/pype/tools/settings/settings/style/style.css @@ -0,0 +1,315 @@ +QWidget { + color: #bfccd6; + background-color: #293742; + font-size: 12px; + border-radius: 0px; +} + +QMenu { + border: 1px solid #555555; + background-color: #1d272f; +} + +QMenu::item { + padding: 5px 10px 5px 10px; + border-left: 5px solid #313131; +} + +QMenu::item:selected { + border-left-color: #61839e; + background-color: #222d37; +} +QCheckBox { + spacing: 0px; +} +QCheckBox::indicator {} +QCheckBox::indicator:focus {} + +QLineEdit, QSpinBox, QDoubleSpinBox, QPlainTextEdit, QTextEdit { + border: 1px solid #aaaaaa; + border-radius: 3px; + background-color: #1d272f; +} + +QLineEdit:disabled, QSpinBox:disabled, QDoubleSpinBox:disabled, QPlainTextEdit:disabled, QTextEdit:disabled, QPushButton:disabled { + background-color: #4e6474; +} + +QLineEdit:focus, QSpinBox:focus, QDoubleSpinBox:focus, QPlainTextEdit:focus, QTextEdit:focus { + border: 1px solid #ffffff; +} +QToolButton { + background: transparent; +} + +QLabel { + background: transparent; + color: #7390a5; +} +QLabel:hover {color: #839caf;} + +QLabel[state="studio"] {color: #bfccd6;} +QLabel[state="studio"]:hover {color: #ffffff;} +QLabel[state="modified"] {color: #137cbd;} +QLabel[state="modified"]:hover {color: #1798e8;} +QLabel[state="overriden-modified"] {color: #137cbd;} +QLabel[state="overriden-modified"]:hover {color: #1798e8;} +QLabel[state="overriden"] {color: #ff8c1a;} +QLabel[state="overriden"]:hover {color: #ffa64d;} +QLabel[state="invalid"] {color: #ad2e2e;} +QLabel[state="invalid"]:hover {color: #ad2e2e;} + + +QWidget[input-state="studio"] {border-color: #bfccd6;} +QWidget[input-state="modified"] {border-color: #137cbd;} +QWidget[input-state="overriden-modified"] {border-color: #137cbd;} +QWidget[input-state="overriden"] {border-color: #ff8c1a;} +QWidget[input-state="invalid"] {border-color: #ad2e2e;} + +QPushButton { + border: 1px solid #aaaaaa; + border-radius: 3px; + padding: 5px; +} +QPushButton:hover { + background-color: #31424e; +} +QPushButton[btn-type="tool-item"] { + border: 1px solid #bfccd6; + border-radius: 10px; +} + +QPushButton[btn-type="tool-item"]:hover { + border-color: #137cbd; + color: #137cbd; + background-color: transparent; +} + +QPushButton[btn-type="expand-toggle"] { + background: #1d272f; +} + +#GroupWidget { + border-bottom: 1px solid #1d272f; +} + +#ProjectListWidget QListView { + border: 1px solid #aaaaaa; + background: #1d272f; +} +#ProjectListWidget QLabel { + background: transparent; + font-weight: bold; +} + +#DictKey[state="studio"] {border-color: #bfccd6;} +#DictKey[state="modified"] {border-color: #137cbd;} +#DictKey[state="overriden"] {border-color: #00f;} +#DictKey[state="overriden-modified"] {border-color: #0f0;} +#DictKey[state="invalid"] {border-color: #ad2e2e;} + +#DictLabel { + font-weight: bold; +} + +#ContentWidget { + background-color: transparent; +} +#ContentWidget[content_state="hightlighted"] { + background-color: rgba(19, 26, 32, 15%); +} + +#SideLineWidget { + background-color: #31424e; + border-style: solid; + border-color: #3b4f5e; + border-left-width: 3px; + border-bottom-width: 0px; + border-right-width: 0px; + border-top-width: 0px; +} + +#SideLineWidget:hover { + border-color: #58768d; +} + +#SideLineWidget[state="child-studio"] {border-color: #455c6e;} +#SideLineWidget[state="child-studio"]:hover {border-color: #62839d;} + +#SideLineWidget[state="child-modified"] {border-color: #106aa2;} +#SideLineWidget[state="child-modified"]:hover {border-color: #137cbd;} + +#SideLineWidget[state="child-invalid"] {border-color: #ad2e2e;} +#SideLineWidget[state="child-invalid"]:hover {border-color: #c93636;} + +#SideLineWidget[state="child-overriden"] {border-color: #e67300;} +#SideLineWidget[state="child-overriden"]:hover {border-color: #ff8c1a;} + +#SideLineWidget[state="child-overriden-modified"] {border-color: #106aa2;} +#SideLineWidget[state="child-overriden-modified"]:hover {border-color: #137cbd;} + +#MainWidget { + background: #141a1f; +} + +#SplitterItem { + background-color: #1d272f; +} + +QTabWidget::pane { + border-top-style: none; +} + +QTabBar { + background: transparent; +} + +QTabBar::tab { + border-top-left-radius: 4px; + border-top-right-radius: 4px; + padding: 5px; +} + +QTabBar::tab:selected { + background: #293742; + border-color: #9B9B9B; + border-bottom-color: #C2C7CB; +} + +QTabBar::tab:!selected { + margin-top: 2px; + background: #1d272f; +} + +QTabBar::tab:!selected:hover { + background: #3b4f5e; +} + + + +QTabBar::tab:first:selected { + margin-left: 0; +} + +QTabBar::tab:last:selected { + margin-right: 0; +} + +QTabBar::tab:only-one { + margin: 0; +} + +QScrollBar:horizontal { + height: 15px; + margin: 3px 15px 3px 15px; + border: 1px transparent #1d272f; + border-radius: 4px; + background-color: #1d272f; +} + +QScrollBar::handle:horizontal { + background-color: #61839e; + min-width: 5px; + border-radius: 4px; +} + +QScrollBar::add-line:horizontal { + margin: 0px 3px 0px 3px; + border-image: url(:/qss_icons/rc/right_arrow_disabled.png); + width: 10px; + height: 10px; + subcontrol-position: right; + subcontrol-origin: margin; +} + +QScrollBar::sub-line:horizontal { + margin: 0px 3px 0px 3px; + border-image: url(:/qss_icons/rc/left_arrow_disabled.png); + height: 10px; + width: 10px; + subcontrol-position: left; + subcontrol-origin: margin; +} + +QScrollBar::add-line:horizontal:hover,QScrollBar::add-line:horizontal:on { + border-image: url(:/qss_icons/rc/right_arrow.png); + height: 10px; + width: 10px; + subcontrol-position: right; + subcontrol-origin: margin; +} + +QScrollBar::sub-line:horizontal:hover, QScrollBar::sub-line:horizontal:on { + border-image: url(:/qss_icons/rc/left_arrow.png); + height: 10px; + width: 10px; + subcontrol-position: left; + subcontrol-origin: margin; +} + +QScrollBar::up-arrow:horizontal, QScrollBar::down-arrow:horizontal { + background: none; +} + +QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal { + background: none; +} + +QScrollBar:vertical { + background-color: #1d272f; + width: 15px; + margin: 15px 3px 15px 3px; + border: 1px transparent #1d272f; + border-radius: 4px; +} + +QScrollBar::handle:vertical { + background-color: #61839e; + min-height: 5px; + border-radius: 4px; +} + +QScrollBar::sub-line:vertical { + margin: 3px 0px 3px 0px; + border-image: url(:/qss_icons/rc/up_arrow_disabled.png); + height: 10px; + width: 10px; + subcontrol-position: top; + subcontrol-origin: margin; +} + +QScrollBar::add-line:vertical { + margin: 3px 0px 3px 0px; + border-image: url(:/qss_icons/rc/down_arrow_disabled.png); + height: 10px; + width: 10px; + subcontrol-position: bottom; + subcontrol-origin: margin; +} + +QScrollBar::sub-line:vertical:hover,QScrollBar::sub-line:vertical:on { + + border-image: url(:/qss_icons/rc/up_arrow.png); + height: 10px; + width: 10px; + subcontrol-position: top; + subcontrol-origin: margin; +} + + +QScrollBar::add-line:vertical:hover, QScrollBar::add-line:vertical:on { + border-image: url(:/qss_icons/rc/down_arrow.png); + height: 10px; + width: 10px; + subcontrol-position: bottom; + subcontrol-origin: margin; +} + +QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical { + background: none; +} + + +QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { + background: none; +} diff --git a/pype/tools/settings/settings/widgets/__init__.py b/pype/tools/settings/settings/widgets/__init__.py new file mode 100644 index 0000000000..361fd9d23d --- /dev/null +++ b/pype/tools/settings/settings/widgets/__init__.py @@ -0,0 +1,9 @@ +from .window import MainWidget +from . import item_types +from . import anatomy_types + +__all__ = [ + "MainWidget", + "item_types", + "anatomy_types" +] diff --git a/pype/tools/settings/settings/widgets/anatomy_types.py b/pype/tools/settings/settings/widgets/anatomy_types.py new file mode 100644 index 0000000000..6d7b3292ce --- /dev/null +++ b/pype/tools/settings/settings/widgets/anatomy_types.py @@ -0,0 +1,758 @@ +from Qt import QtWidgets, QtCore +from .widgets import ExpandingWidget +from .item_types import ( + SettingObject, ModifiableDict, PathWidget, RawJsonWidget +) +from .lib import NOT_SET, TypeToKlass, CHILD_OFFSET, METADATA_KEY + + +class AnatomyWidget(QtWidgets.QWidget, SettingObject): + value_changed = QtCore.Signal(object) + template_keys = ( + "project[name]", + "project[code]", + "asset", + "task", + "subset", + "family", + "version", + "ext", + "representation" + ) + default_exmaple_data = { + "project": { + "name": "ProjectPype", + "code": "pp", + }, + "asset": "sq01sh0010", + "task": "compositing", + "subset": "renderMain", + "family": "render", + "version": 1, + "ext": ".png", + "representation": "png" + } + + def __init__( + self, input_data, parent, as_widget=False, label_widget=None + ): + if as_widget: + raise TypeError( + "`AnatomyWidget` does not allow to be used as widget." + ) + super(AnatomyWidget, self).__init__(parent) + self.setObjectName("AnatomyWidget") + + self.initial_attributes(input_data, parent, as_widget) + + self.key = input_data["key"] + + children_data = input_data["children"] + roots_input_data = {} + templates_input_data = {} + for child in children_data: + if child["type"] == "anatomy_roots": + roots_input_data = child + elif child["type"] == "anatomy_templates": + templates_input_data = child + + self.root_widget = RootsWidget(roots_input_data, self) + self.templates_widget = TemplatesWidget(templates_input_data, self) + + self.setAttribute(QtCore.Qt.WA_StyledBackground) + + body_widget = ExpandingWidget("Anatomy", self) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) + layout.addWidget(body_widget) + + content_widget = QtWidgets.QWidget(body_widget) + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.setContentsMargins(CHILD_OFFSET, 5, 0, 0) + content_layout.setSpacing(5) + + content_layout.addWidget(self.root_widget) + content_layout.addWidget(self.templates_widget) + + body_widget.set_content_widget(content_widget) + + self.body_widget = body_widget + self.label_widget = body_widget.label_widget + + self.root_widget.value_changed.connect(self._on_value_change) + self.templates_widget.value_changed.connect(self._on_value_change) + + def update_default_values(self, parent_values): + self._state = None + self._child_state = None + + if isinstance(parent_values, dict): + value = parent_values.get(self.key, NOT_SET) + else: + value = NOT_SET + + self.root_widget.update_default_values(value) + self.templates_widget.update_default_values(value) + + def update_studio_values(self, parent_values): + self._state = None + self._child_state = None + + if isinstance(parent_values, dict): + value = parent_values.get(self.key, NOT_SET) + else: + value = NOT_SET + + self.root_widget.update_studio_values(value) + self.templates_widget.update_studio_values(value) + + def apply_overrides(self, parent_values): + # Make sure this is set to False + self._state = None + self._child_state = None + + value = NOT_SET + if parent_values is not NOT_SET: + value = parent_values.get(self.key, value) + + self.root_widget.apply_overrides(value) + self.templates_widget.apply_overrides(value) + + def set_value(self, value): + raise TypeError("AnatomyWidget does not allow to use `set_value`") + + def _on_value_change(self, item=None): + if self.ignore_value_changes: + return + + self.hierarchical_style_update() + + self.value_changed.emit(self) + + def update_style(self, is_overriden=None): + child_has_studio_override = self.child_has_studio_override + child_modified = self.child_modified + child_invalid = self.child_invalid + child_state = self.style_state( + child_has_studio_override, + child_invalid, + self.child_overriden, + child_modified + ) + if child_state: + child_state = "child-{}".format(child_state) + + if child_state != self._child_state: + self.body_widget.side_line_widget.setProperty("state", child_state) + self.body_widget.side_line_widget.style().polish( + self.body_widget.side_line_widget + ) + self._child_state = child_state + + def hierarchical_style_update(self): + self.root_widget.hierarchical_style_update() + self.templates_widget.hierarchical_style_update() + self.update_style() + + @property + def child_has_studio_override(self): + return ( + self.root_widget.child_has_studio_override + or self.templates_widget.child_has_studio_override + ) + + @property + def child_modified(self): + return ( + self.root_widget.child_modified + or self.templates_widget.child_modified + ) + + @property + def child_overriden(self): + return ( + self.root_widget.child_overriden + or self.templates_widget.child_overriden + ) + + @property + def child_invalid(self): + return ( + self.root_widget.child_invalid + or self.templates_widget.child_invalid + ) + + def set_as_overriden(self): + self.root_widget.set_as_overriden() + self.templates_widget.set_as_overriden() + + def remove_overrides(self): + self.root_widget.remove_overrides() + self.templates_widget.remove_overrides() + + def reset_to_pype_default(self): + self.root_widget.reset_to_pype_default() + self.templates_widget.reset_to_pype_default() + + def set_studio_default(self): + self.root_widget.set_studio_default() + self.templates_widget.set_studio_default() + + def discard_changes(self): + self.root_widget.discard_changes() + self.templates_widget.discard_changes() + + def overrides(self): + if self.child_overriden: + return self.config_value(), True + return NOT_SET, False + + def item_value(self): + output = {} + output.update(self.root_widget.config_value()) + output.update(self.templates_widget.config_value()) + return output + + def studio_overrides(self): + if ( + self.root_widget.child_has_studio_override + or self.templates_widget.child_has_studio_override + ): + groups = [self.root_widget.key, self.templates_widget.key] + value = self.config_value() + value[self.key][METADATA_KEY] = {"groups": groups} + return value, True + return NOT_SET, False + + def config_value(self): + return {self.key: self.item_value()} + + +class RootsWidget(QtWidgets.QWidget, SettingObject): + value_changed = QtCore.Signal(object) + + def __init__(self, input_data, parent): + super(RootsWidget, self).__init__(parent) + self.setObjectName("RootsWidget") + + input_data["is_group"] = True + self.initial_attributes(input_data, parent, False) + + self.key = input_data["key"] + + self._multiroot_state = None + self.default_is_multiroot = False + self.studio_is_multiroot = False + self.was_multiroot = NOT_SET + + checkbox_widget = QtWidgets.QWidget(self) + multiroot_label = QtWidgets.QLabel( + "Use multiple roots", checkbox_widget + ) + multiroot_checkbox = QtWidgets.QCheckBox(checkbox_widget) + + checkbox_layout = QtWidgets.QHBoxLayout(checkbox_widget) + checkbox_layout.addWidget(multiroot_label, 0) + checkbox_layout.addWidget(multiroot_checkbox, 1) + + body_widget = ExpandingWidget("Roots", self) + content_widget = QtWidgets.QWidget(body_widget) + + path_widget_data = { + "key": self.key, + "multipath": False, + "multiplatform": True + } + singleroot_widget = PathWidget( + path_widget_data, self, + as_widget=True, parent_widget=content_widget + ) + multiroot_data = { + "key": self.key, + "object_type": "path-widget", + "expandable": False, + "input_modifiers": { + "multiplatform": True + } + } + multiroot_widget = ModifiableDict( + multiroot_data, self, + as_widget=True, parent_widget=content_widget + ) + + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.setContentsMargins(0, 0, 0, 0) + content_layout.addWidget(checkbox_widget) + content_layout.addWidget(singleroot_widget) + content_layout.addWidget(multiroot_widget) + + body_widget.set_content_widget(content_widget) + self.label_widget = body_widget.label_widget + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(body_widget) + + self.body_widget = body_widget + self.multiroot_label = multiroot_label + self.multiroot_checkbox = multiroot_checkbox + self.singleroot_widget = singleroot_widget + self.multiroot_widget = multiroot_widget + + multiroot_checkbox.stateChanged.connect(self._on_multiroot_checkbox) + singleroot_widget.value_changed.connect(self._on_value_change) + multiroot_widget.value_changed.connect(self._on_value_change) + + self._on_multiroot_checkbox() + + @property + def is_multiroot(self): + return self.multiroot_checkbox.isChecked() + + def update_default_values(self, parent_values): + self._state = None + self._multiroot_state = None + self._is_modified = False + + if isinstance(parent_values, dict): + value = parent_values.get(self.key, NOT_SET) + else: + value = NOT_SET + + is_multiroot = False + if isinstance(value, dict): + for _value in value.values(): + if isinstance(_value, dict): + is_multiroot = True + break + + self.default_is_multiroot = is_multiroot + self.was_multiroot = is_multiroot + self.set_multiroot(is_multiroot) + + self._has_studio_override = False + self._had_studio_override = False + if is_multiroot: + for _value in value.values(): + singleroot_value = _value + break + + multiroot_value = value + else: + singleroot_value = value + multiroot_value = {"": value} + + self.singleroot_widget.update_default_values(singleroot_value) + self.multiroot_widget.update_default_values(multiroot_value) + + def update_studio_values(self, parent_values): + self._state = None + self._multiroot_state = None + self._is_modified = False + + if isinstance(parent_values, dict): + value = parent_values.get(self.key, NOT_SET) + else: + value = NOT_SET + + if value is NOT_SET: + is_multiroot = self.default_is_multiroot + self.studio_is_multiroot = NOT_SET + self._has_studio_override = False + self._had_studio_override = False + else: + is_multiroot = False + if isinstance(value, dict): + for _value in value.values(): + if isinstance(_value, dict): + is_multiroot = True + break + self.studio_is_multiroot = is_multiroot + self._has_studio_override = True + self._had_studio_override = True + + self.was_multiroot = is_multiroot + self.set_multiroot(is_multiroot) + + if is_multiroot: + self.multiroot_widget.update_studio_values(value) + else: + self.singleroot_widget.update_studio_values(value) + + def apply_overrides(self, parent_values): + # Make sure this is set to False + self._state = None + self._multiroot_state = None + self._is_modified = False + + value = NOT_SET + if parent_values is not NOT_SET: + value = parent_values.get(self.key, value) + + if value is NOT_SET: + is_multiroot = self.studio_is_multiroot + if is_multiroot is NOT_SET: + is_multiroot = self.default_is_multiroot + else: + is_multiroot = False + if isinstance(value, dict): + for _value in value.values(): + if isinstance(_value, dict): + is_multiroot = True + break + + self.was_multiroot = is_multiroot + self.set_multiroot(is_multiroot) + + if is_multiroot: + self._is_overriden = value is not NOT_SET + self._was_overriden = bool(self._is_overriden) + self.multiroot_widget.apply_overrides(value) + else: + self._is_overriden = value is not NOT_SET + self._was_overriden = bool(self._is_overriden) + self.singleroot_widget.apply_overrides(value) + + def hierarchical_style_update(self): + self.singleroot_widget.hierarchical_style_update() + self.multiroot_widget.hierarchical_style_update() + self.update_style() + + def update_style(self): + multiroot_state = self.style_state( + self.has_studio_override, + False, + False, + self.was_multiroot != self.is_multiroot + ) + if multiroot_state != self._multiroot_state: + self.multiroot_label.setProperty("state", multiroot_state) + self.multiroot_label.style().polish(self.multiroot_label) + self._multiroot_state = multiroot_state + + state = self.style_state( + self.has_studio_override, + self.child_invalid, + self.is_overriden, + self.is_modified + ) + if self._state == state: + return + + if state: + child_state = "child-{}".format(state) + else: + child_state = "" + + self.body_widget.side_line_widget.setProperty("state", child_state) + self.body_widget.side_line_widget.style().polish( + self.body_widget.side_line_widget + ) + + self.label_widget.setProperty("state", state) + self.label_widget.style().polish(self.label_widget) + + self._state = state + + def _on_multiroot_checkbox(self): + self.set_multiroot() + + def _on_value_change(self, item=None): + if self.ignore_value_changes: + return + + if item is not None and ( + (self.is_multiroot and item != self.multiroot_widget) + or (not self.is_multiroot and item != self.singleroot_widget) + ): + return + + if self.is_group and self.is_overidable: + self._is_overriden = True + + self._is_modified = ( + self.was_multiroot != self.is_multiroot + or self.child_modified + ) + + self.update_style() + + self.value_changed.emit(self) + + def _from_single_to_multi(self): + single_value = self.singleroot_widget.item_value() + mutli_value = self.multiroot_widget.item_value() + first_key = None + for key in mutli_value.keys(): + first_key = key + break + + if first_key is None: + first_key = "" + + mutli_value[first_key] = single_value + + self.multiroot_widget.set_value(mutli_value) + + def _from_multi_to_single(self): + mutli_value = self.multiroot_widget.all_item_values() + for value in mutli_value.values(): + single_value = value + break + + self.singleroot_widget.set_value(single_value) + + def set_multiroot(self, is_multiroot=None): + if is_multiroot is None: + is_multiroot = self.is_multiroot + if is_multiroot: + self._from_single_to_multi() + else: + self._from_multi_to_single() + + if is_multiroot != self.is_multiroot: + self.multiroot_checkbox.setChecked(is_multiroot) + + self.singleroot_widget.setVisible(not is_multiroot) + self.multiroot_widget.setVisible(is_multiroot) + + self._on_value_change() + + @property + def child_has_studio_override(self): + if self.is_multiroot: + return self.multiroot_widget.has_studio_override + else: + return self.singleroot_widget.has_studio_override + + @property + def child_modified(self): + if self.is_multiroot: + return self.multiroot_widget.child_modified + else: + return self.singleroot_widget.child_modified + + @property + def child_overriden(self): + if self.is_multiroot: + return ( + self.multiroot_widget.is_overriden + or self.multiroot_widget.child_overriden + ) + else: + return ( + self.singleroot_widget.is_overriden + or self.singleroot_widget.child_overriden + ) + + @property + def child_invalid(self): + if self.is_multiroot: + return self.multiroot_widget.child_invalid + else: + return self.singleroot_widget.child_invalid + + def remove_overrides(self): + self._is_overriden = False + self._is_modified = False + + if self.studio_is_multiroot is NOT_SET: + self.set_multiroot(self.default_is_multiroot) + else: + self.set_multiroot(self.studio_is_multiroot) + + if self.is_multiroot: + self.multiroot_widget.remove_overrides() + else: + self.singleroot_widget.remove_overrides() + + def reset_to_pype_default(self): + self.set_multiroot(self.default_is_multiroot) + if self.is_multiroot: + self.multiroot_widget.reset_to_pype_default() + else: + self.singleroot_widget.reset_to_pype_default() + self._has_studio_override = False + + def set_studio_default(self): + if self.is_multiroot: + self.multiroot_widget.reset_to_pype_default() + else: + self.singleroot_widget.reset_to_pype_default() + self._has_studio_override = True + + def discard_changes(self): + self._is_overriden = self._was_overriden + self._is_modified = False + if self._is_overriden: + self.set_multiroot(self.was_multiroot) + else: + if self.studio_is_multiroot is NOT_SET: + self.set_multiroot(self.default_is_multiroot) + else: + self.set_multiroot(self.studio_is_multiroot) + + if self.is_multiroot: + self.multiroot_widget.discard_changes() + else: + self.singleroot_widget.discard_changes() + + self._is_modified = self.child_modified + self._has_studio_override = self._had_studio_override + + def set_as_overriden(self): + self._is_overriden = True + self.singleroot_widget.set_as_overriden() + self.multiroot_widget.set_as_overriden() + + def item_value(self): + if self.is_multiroot: + return self.multiroot_widget.item_value() + else: + return self.singleroot_widget.item_value() + + def config_value(self): + return {self.key: self.item_value()} + + +class TemplatesWidget(QtWidgets.QWidget, SettingObject): + value_changed = QtCore.Signal(object) + + def __init__(self, input_data, parent): + super(TemplatesWidget, self).__init__(parent) + + input_data["is_group"] = True + self.initial_attributes(input_data, parent, False) + + self.key = input_data["key"] + + body_widget = ExpandingWidget("Templates", self) + content_widget = QtWidgets.QWidget(body_widget) + body_widget.set_content_widget(content_widget) + content_layout = QtWidgets.QVBoxLayout(content_widget) + + template_input_data = { + "key": self.key + } + self.body_widget = body_widget + self.label_widget = body_widget.label_widget + self.value_input = RawJsonWidget( + template_input_data, self, + label_widget=self.label_widget + ) + content_layout.addWidget(self.value_input) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) + + layout.addWidget(body_widget) + + self.value_input.value_changed.connect(self._on_value_change) + + def _on_value_change(self, item): + self.update_style() + + self.value_changed.emit(self) + + def update_default_values(self, values): + self._state = None + self.value_input.update_default_values(values) + + def update_studio_values(self, values): + self._state = None + self.value_input.update_studio_values(values) + + def apply_overrides(self, parent_values): + self._state = None + self.value_input.apply_overrides(parent_values) + + def hierarchical_style_update(self): + self.value_input.hierarchical_style_update() + self.update_style() + + def update_style(self): + state = self.style_state( + self.has_studio_override, + self.child_invalid, + self.child_overriden, + self.child_modified + ) + if self._state == state: + return + + if state: + child_state = "child-{}".format(state) + else: + child_state = "" + + self.body_widget.side_line_widget.setProperty("state", child_state) + self.body_widget.side_line_widget.style().polish( + self.body_widget.side_line_widget + ) + + self.label_widget.setProperty("state", state) + self.label_widget.style().polish(self.label_widget) + + self._state = state + + @property + def is_modified(self): + return self.value_input.is_modified + + @property + def is_overriden(self): + return self._is_overriden + + @property + def has_studio_override(self): + return self.value_input._has_studio_override + + @property + def child_has_studio_override(self): + return self.value_input.child_has_studio_override + + @property + def child_modified(self): + return self.value_input.child_modified + + @property + def child_overriden(self): + return self.value_input.child_overriden + + @property + def child_invalid(self): + return self.value_input.child_invalid + + def remove_overrides(self): + self.value_input.remove_overrides() + + def reset_to_pype_default(self): + self.value_input.reset_to_pype_default() + + def set_studio_default(self): + self.value_input.set_studio_default() + + def discard_changes(self): + self.value_input.discard_changes() + + def set_as_overriden(self): + self.value_input.set_as_overriden() + + def overrides(self): + if not self.child_overriden: + return NOT_SET, False + return self.config_value(), True + + def item_value(self): + return self.value_input.item_value() + + def config_value(self): + return self.value_input.config_value() + + +TypeToKlass.types["anatomy"] = AnatomyWidget +TypeToKlass.types["anatomy_roots"] = AnatomyWidget +TypeToKlass.types["anatomy_templates"] = AnatomyWidget diff --git a/pype/tools/settings/settings/widgets/base.py b/pype/tools/settings/settings/widgets/base.py new file mode 100644 index 0000000000..dbcc380daf --- /dev/null +++ b/pype/tools/settings/settings/widgets/base.py @@ -0,0 +1,735 @@ +import os +import json +from Qt import QtWidgets, QtCore, QtGui +from pype.settings.lib import ( + SYSTEM_SETTINGS_KEY, + SYSTEM_SETTINGS_PATH, + PROJECT_SETTINGS_KEY, + PROJECT_SETTINGS_PATH, + PROJECT_ANATOMY_KEY, + PROJECT_ANATOMY_PATH, + + DEFAULTS_DIR, + + reset_default_settings, + default_settings, + + studio_system_settings, + studio_project_settings, + studio_project_anatomy, + + project_settings_overrides, + project_anatomy_overrides, + + path_to_project_overrides, + path_to_project_anatomy +) +from .widgets import UnsavedChangesDialog +from . import lib +from avalon import io +from avalon.vendor import qtawesome + + +class SystemWidget(QtWidgets.QWidget): + is_overidable = False + has_studio_override = _has_studio_override = False + is_overriden = _is_overriden = False + is_group = _is_group = False + any_parent_is_group = _any_parent_is_group = False + + def __init__(self, develop_mode, parent=None): + super(SystemWidget, self).__init__(parent) + + self.develop_mode = develop_mode + self._hide_studio_overrides = False + self._ignore_value_changes = False + + self.input_fields = [] + + scroll_widget = QtWidgets.QScrollArea(self) + scroll_widget.setObjectName("GroupWidget") + content_widget = QtWidgets.QWidget(scroll_widget) + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.setContentsMargins(3, 3, 3, 3) + content_layout.setSpacing(0) + content_layout.setAlignment(QtCore.Qt.AlignTop) + content_widget.setLayout(content_layout) + + scroll_widget.setWidgetResizable(True) + scroll_widget.setWidget(content_widget) + + self.scroll_widget = scroll_widget + self.content_layout = content_layout + self.content_widget = content_widget + + footer_widget = QtWidgets.QWidget() + footer_layout = QtWidgets.QHBoxLayout(footer_widget) + + if self.develop_mode: + save_as_default_btn = QtWidgets.QPushButton("Save as Default") + save_as_default_btn.clicked.connect(self._save_as_defaults) + + refresh_icon = qtawesome.icon("fa.refresh", color="white") + refresh_button = QtWidgets.QPushButton() + refresh_button.setIcon(refresh_icon) + refresh_button.clicked.connect(self._on_refresh) + + hide_studio_overrides = QtWidgets.QCheckBox() + hide_studio_overrides.setChecked(self._hide_studio_overrides) + hide_studio_overrides.stateChanged.connect( + self._on_hide_studio_overrides + ) + + hide_studio_overrides_widget = QtWidgets.QWidget() + hide_studio_overrides_layout = QtWidgets.QHBoxLayout( + hide_studio_overrides_widget + ) + _label_widget = QtWidgets.QLabel( + "Hide studio overrides", hide_studio_overrides_widget + ) + hide_studio_overrides_layout.addWidget(_label_widget) + hide_studio_overrides_layout.addWidget(hide_studio_overrides) + + footer_layout.addWidget(save_as_default_btn, 0) + footer_layout.addWidget(refresh_button, 0) + footer_layout.addWidget(hide_studio_overrides_widget, 0) + + save_btn = QtWidgets.QPushButton("Save") + spacer_widget = QtWidgets.QWidget() + footer_layout.addWidget(spacer_widget, 1) + footer_layout.addWidget(save_btn, 0) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) + self.setLayout(layout) + + layout.addWidget(scroll_widget, 1) + layout.addWidget(footer_widget, 0) + + save_btn.clicked.connect(self._save) + + self.reset() + + def any_parent_overriden(self): + return False + + @property + def ignore_value_changes(self): + return self._ignore_value_changes + + @ignore_value_changes.setter + def ignore_value_changes(self, value): + self._ignore_value_changes = value + if value is False: + self.hierarchical_style_update() + + def hierarchical_style_update(self): + for input_field in self.input_fields: + input_field.hierarchical_style_update() + + def reset(self): + reset_default_settings() + + if self.content_layout.count() != 0: + for widget in self.input_fields: + self.content_layout.removeWidget(widget) + widget.deleteLater() + self.input_fields.clear() + + self.schema = lib.gui_schema("system_schema", "0_system_gui_schema") + self.keys = self.schema.get("keys", []) + self.add_children_gui(self.schema) + self._update_values() + self.hierarchical_style_update() + + def _save(self): + has_invalid = False + for item in self.input_fields: + if item.child_invalid: + has_invalid = True + + if has_invalid: + invalid_items = [] + for item in self.input_fields: + invalid_items.extend(item.get_invalid()) + msg_box = QtWidgets.QMessageBox( + QtWidgets.QMessageBox.Warning, + "Invalid input", + "There is invalid value in one of inputs." + " Please lead red color and fix them." + ) + msg_box.setStandardButtons(QtWidgets.QMessageBox.Ok) + msg_box.exec_() + + first_invalid_item = invalid_items[0] + self.scroll_widget.ensureWidgetVisible(first_invalid_item) + if first_invalid_item.isVisible(): + first_invalid_item.setFocus(True) + return + + _data = {} + for input_field in self.input_fields: + value, is_group = input_field.studio_overrides() + if value is not lib.NOT_SET: + _data.update(value) + + values = lib.convert_gui_data_to_overrides(_data.get("system", {})) + + dirpath = os.path.dirname(SYSTEM_SETTINGS_PATH) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + print("Saving data to:", SYSTEM_SETTINGS_PATH) + with open(SYSTEM_SETTINGS_PATH, "w") as file_stream: + json.dump(values, file_stream, indent=4) + + self._update_values() + + def _on_refresh(self): + self.reset() + + def _on_hide_studio_overrides(self, state): + self._hide_studio_overrides = (state == QtCore.Qt.Checked) + self._update_values() + self.hierarchical_style_update() + + def _save_as_defaults(self): + output = {} + for item in self.input_fields: + output.update(item.config_value()) + + for key in reversed(self.keys): + _output = {key: output} + output = _output + + all_values = {} + for item in self.input_fields: + all_values.update(item.config_value()) + + for key in reversed(self.keys): + _all_values = {key: all_values} + all_values = _all_values + + # Skip first key + all_values = all_values["system"] + + prject_defaults_dir = os.path.join( + DEFAULTS_DIR, SYSTEM_SETTINGS_KEY + ) + keys_to_file = lib.file_keys_from_schema(self.schema) + for key_sequence in keys_to_file: + # Skip first key + key_sequence = key_sequence[1:] + subpath = "/".join(key_sequence) + ".json" + + new_values = all_values + for key in key_sequence: + new_values = new_values[key] + + output_path = os.path.join(prject_defaults_dir, subpath) + dirpath = os.path.dirname(output_path) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + print("Saving data to: ", subpath) + with open(output_path, "w") as file_stream: + json.dump(new_values, file_stream, indent=4) + + reset_default_settings() + + self._update_values() + self.hierarchical_style_update() + + def _update_values(self): + self.ignore_value_changes = True + + default_values = { + "system": default_settings()[SYSTEM_SETTINGS_KEY] + } + for input_field in self.input_fields: + input_field.update_default_values(default_values) + + if self._hide_studio_overrides: + system_values = lib.NOT_SET + else: + system_values = {"system": studio_system_settings()} + for input_field in self.input_fields: + input_field.update_studio_values(system_values) + + self.ignore_value_changes = False + + def add_children_gui(self, child_configuration): + item_type = child_configuration["type"] + klass = lib.TypeToKlass.types.get(item_type) + item = klass(child_configuration, self) + self.input_fields.append(item) + self.content_layout.addWidget(item) + + +class ProjectListView(QtWidgets.QListView): + left_mouse_released_at = QtCore.Signal(QtCore.QModelIndex) + + def mouseReleaseEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + index = self.indexAt(event.pos()) + self.left_mouse_released_at.emit(index) + super(ProjectListView, self).mouseReleaseEvent(event) + + +class ProjectListWidget(QtWidgets.QWidget): + default = "< Default >" + project_changed = QtCore.Signal() + + def __init__(self, parent): + self._parent = parent + + self.current_project = None + + super(ProjectListWidget, self).__init__(parent) + self.setObjectName("ProjectListWidget") + + label_widget = QtWidgets.QLabel("Projects") + label_widget.setProperty("state", "studio") + project_list = ProjectListView(self) + project_list.setModel(QtGui.QStandardItemModel()) + + # Do not allow editing + project_list.setEditTriggers( + QtWidgets.QAbstractItemView.EditTrigger.NoEditTriggers + ) + # Do not automatically handle selection + project_list.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection) + + layout = QtWidgets.QVBoxLayout(self) + layout.setSpacing(3) + layout.addWidget(label_widget, 0) + layout.addWidget(project_list, 1) + + project_list.left_mouse_released_at.connect(self.on_item_clicked) + + self.project_list = project_list + + self.refresh() + + def on_item_clicked(self, new_index): + new_project_name = new_index.data(QtCore.Qt.DisplayRole) + if new_project_name is None: + return + + if self.current_project == new_project_name: + return + + save_changes = False + change_project = False + if self.validate_context_change(): + change_project = True + + else: + dialog = UnsavedChangesDialog(self) + result = dialog.exec_() + if result == 1: + save_changes = True + change_project = True + + elif result == 2: + change_project = True + + if save_changes: + self._parent._save() + + if change_project: + self.select_project(new_project_name) + self.current_project = new_project_name + self.project_changed.emit() + else: + self.select_project(self.current_project) + + def validate_context_change(self): + # TODO add check if project can be changed (is modified) + for item in self._parent.input_fields: + is_modified = item.child_modified + if is_modified: + return False + return True + + def project_name(self): + if self.current_project == self.default: + return None + return self.current_project + + def select_project(self, project_name): + model = self.project_list.model() + found_items = model.findItems(project_name) + if not found_items: + found_items = model.findItems(self.default) + + index = model.indexFromItem(found_items[0]) + self.project_list.selectionModel().clear() + self.project_list.selectionModel().setCurrentIndex( + index, QtCore.QItemSelectionModel.SelectionFlag.SelectCurrent + ) + + def refresh(self): + selected_project = None + for index in self.project_list.selectedIndexes(): + selected_project = index.data(QtCore.Qt.DisplayRole) + break + + model = self.project_list.model() + model.clear() + items = [self.default] + io.install() + for project_doc in tuple(io.projects()): + items.append(project_doc["name"]) + + for item in items: + model.appendRow(QtGui.QStandardItem(item)) + + self.select_project(selected_project) + + self.current_project = self.project_list.currentIndex().data( + QtCore.Qt.DisplayRole + ) + + +class ProjectWidget(QtWidgets.QWidget): + has_studio_override = _has_studio_override = False + is_overriden = _is_overriden = False + is_group = _is_group = False + any_parent_is_group = _any_parent_is_group = False + + def __init__(self, develop_mode, parent=None): + super(ProjectWidget, self).__init__(parent) + + self.develop_mode = develop_mode + self._hide_studio_overrides = False + + self.is_overidable = False + self._ignore_value_changes = False + self.project_name = None + + self.input_fields = [] + + scroll_widget = QtWidgets.QScrollArea(self) + scroll_widget.setObjectName("GroupWidget") + content_widget = QtWidgets.QWidget(scroll_widget) + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.setContentsMargins(3, 3, 3, 3) + content_layout.setSpacing(0) + content_layout.setAlignment(QtCore.Qt.AlignTop) + content_widget.setLayout(content_layout) + + scroll_widget.setWidgetResizable(True) + scroll_widget.setWidget(content_widget) + + project_list_widget = ProjectListWidget(self) + content_layout.addWidget(project_list_widget) + + footer_widget = QtWidgets.QWidget() + footer_layout = QtWidgets.QHBoxLayout(footer_widget) + + if self.develop_mode: + save_as_default_btn = QtWidgets.QPushButton("Save as Default") + save_as_default_btn.clicked.connect(self._save_as_defaults) + + refresh_icon = qtawesome.icon("fa.refresh", color="white") + refresh_button = QtWidgets.QPushButton() + refresh_button.setIcon(refresh_icon) + refresh_button.clicked.connect(self._on_refresh) + + hide_studio_overrides = QtWidgets.QCheckBox() + hide_studio_overrides.setChecked(self._hide_studio_overrides) + hide_studio_overrides.stateChanged.connect( + self._on_hide_studio_overrides + ) + + hide_studio_overrides_widget = QtWidgets.QWidget() + hide_studio_overrides_layout = QtWidgets.QHBoxLayout( + hide_studio_overrides_widget + ) + _label_widget = QtWidgets.QLabel( + "Hide studio overrides", hide_studio_overrides_widget + ) + hide_studio_overrides_layout.addWidget(_label_widget) + hide_studio_overrides_layout.addWidget(hide_studio_overrides) + + footer_layout.addWidget(save_as_default_btn, 0) + footer_layout.addWidget(refresh_button, 0) + footer_layout.addWidget(hide_studio_overrides_widget, 0) + + save_btn = QtWidgets.QPushButton("Save") + spacer_widget = QtWidgets.QWidget() + footer_layout.addWidget(spacer_widget, 1) + footer_layout.addWidget(save_btn, 0) + + configurations_widget = QtWidgets.QWidget() + configurations_layout = QtWidgets.QVBoxLayout(configurations_widget) + configurations_layout.setContentsMargins(0, 0, 0, 0) + configurations_layout.setSpacing(0) + + configurations_layout.addWidget(scroll_widget, 1) + configurations_layout.addWidget(footer_widget, 0) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) + self.setLayout(layout) + + layout.addWidget(project_list_widget, 0) + layout.addWidget(configurations_widget, 1) + + save_btn.clicked.connect(self._save) + project_list_widget.project_changed.connect(self._on_project_change) + + self.project_list_widget = project_list_widget + self.scroll_widget = scroll_widget + self.content_layout = content_layout + self.content_widget = content_widget + + self.reset() + + def any_parent_overriden(self): + return False + + @property + def ignore_value_changes(self): + return self._ignore_value_changes + + @ignore_value_changes.setter + def ignore_value_changes(self, value): + self._ignore_value_changes = value + if value is False: + self.hierarchical_style_update() + + def hierarchical_style_update(self): + for input_field in self.input_fields: + input_field.hierarchical_style_update() + + def reset(self): + if self.content_layout.count() != 0: + for widget in self.input_fields: + self.content_layout.removeWidget(widget) + widget.deleteLater() + self.input_fields.clear() + + self.schema = lib.gui_schema("projects_schema", "0_project_gui_schema") + self.keys = self.schema.get("keys", []) + self.add_children_gui(self.schema) + self._update_values() + self.hierarchical_style_update() + + def add_children_gui(self, child_configuration): + item_type = child_configuration["type"] + klass = lib.TypeToKlass.types.get(item_type) + item = klass(child_configuration, self) + self.input_fields.append(item) + self.content_layout.addWidget(item) + + def _on_project_change(self): + project_name = self.project_list_widget.project_name() + if project_name is None: + _project_overrides = lib.NOT_SET + _project_anatomy = lib.NOT_SET + self.is_overidable = False + else: + _project_overrides = project_settings_overrides(project_name) + _project_anatomy = project_anatomy_overrides(project_name) + self.is_overidable = True + + overrides = {"project": { + PROJECT_SETTINGS_KEY: lib.convert_overrides_to_gui_data( + _project_overrides + ), + PROJECT_ANATOMY_KEY: lib.convert_overrides_to_gui_data( + _project_anatomy + ) + }} + self.project_name = project_name + self.ignore_value_changes = True + for item in self.input_fields: + item.apply_overrides(overrides) + self.ignore_value_changes = False + + def _save_as_defaults(self): + output = {} + for item in self.input_fields: + output.update(item.config_value()) + + for key in reversed(self.keys): + _output = {key: output} + output = _output + + all_values = {} + for item in self.input_fields: + all_values.update(item.config_value()) + + for key in reversed(self.keys): + _all_values = {key: all_values} + all_values = _all_values + + # Skip first key + all_values = all_values["project"] + + keys_to_file = lib.file_keys_from_schema(self.schema) + for key_sequence in keys_to_file: + # Skip first key + key_sequence = key_sequence[1:] + subpath = "/".join(key_sequence) + ".json" + + new_values = all_values + for key in key_sequence: + new_values = new_values[key] + + output_path = os.path.join(DEFAULTS_DIR, subpath) + dirpath = os.path.dirname(output_path) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + print("Saving data to: ", subpath) + with open(output_path, "w") as file_stream: + json.dump(new_values, file_stream, indent=4) + + reset_default_settings() + + self._update_values() + self.hierarchical_style_update() + + def _save(self): + has_invalid = False + for item in self.input_fields: + if item.child_invalid: + has_invalid = True + + if has_invalid: + invalid_items = [] + for item in self.input_fields: + invalid_items.extend(item.get_invalid()) + msg_box = QtWidgets.QMessageBox( + QtWidgets.QMessageBox.Warning, + "Invalid input", + "There is invalid value in one of inputs." + " Please lead red color and fix them." + ) + msg_box.setStandardButtons(QtWidgets.QMessageBox.Ok) + msg_box.exec_() + + first_invalid_item = invalid_items[0] + self.scroll_widget.ensureWidgetVisible(first_invalid_item) + if first_invalid_item.isVisible(): + first_invalid_item.setFocus(True) + return + + if self.project_name is None: + self._save_studio_overrides() + else: + self._save_overrides() + + def _on_refresh(self): + self.reset() + + def _on_hide_studio_overrides(self, state): + self._hide_studio_overrides = (state == QtCore.Qt.Checked) + self._update_values() + self.hierarchical_style_update() + + def _save_overrides(self): + data = {} + for item in self.input_fields: + value, is_group = item.overrides() + if value is not lib.NOT_SET: + data.update(value) + + output_data = lib.convert_gui_data_to_overrides( + data.get("project") or {} + ) + + # Saving overrides data + project_overrides_data = output_data.get( + PROJECT_SETTINGS_KEY, {} + ) + project_overrides_json_path = path_to_project_overrides( + self.project_name + ) + dirpath = os.path.dirname(project_overrides_json_path) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + print("Saving data to:", project_overrides_json_path) + with open(project_overrides_json_path, "w") as file_stream: + json.dump(project_overrides_data, file_stream, indent=4) + + # Saving anatomy data + project_anatomy_data = output_data.get( + PROJECT_ANATOMY_KEY, {} + ) + project_anatomy_json_path = path_to_project_anatomy( + self.project_name + ) + dirpath = os.path.dirname(project_anatomy_json_path) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + print("Saving data to:", project_anatomy_json_path) + with open(project_anatomy_json_path, "w") as file_stream: + json.dump(project_anatomy_data, file_stream, indent=4) + + # Refill values with overrides + self._on_project_change() + + def _save_studio_overrides(self): + data = {} + for input_field in self.input_fields: + value, is_group = input_field.studio_overrides() + if value is not lib.NOT_SET: + data.update(value) + + output_data = lib.convert_gui_data_to_overrides( + data.get("project", {}) + ) + + # Project overrides data + project_overrides_data = output_data.get( + PROJECT_SETTINGS_KEY, {} + ) + dirpath = os.path.dirname(PROJECT_SETTINGS_PATH) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + print("Saving data to:", PROJECT_SETTINGS_PATH) + with open(PROJECT_SETTINGS_PATH, "w") as file_stream: + json.dump(project_overrides_data, file_stream, indent=4) + + # Project Anatomy data + project_anatomy_data = output_data.get( + PROJECT_ANATOMY_KEY, {} + ) + dirpath = os.path.dirname(PROJECT_ANATOMY_PATH) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + print("Saving data to:", PROJECT_ANATOMY_PATH) + with open(PROJECT_ANATOMY_PATH, "w") as file_stream: + json.dump(project_anatomy_data, file_stream, indent=4) + + # Update saved values + self._update_values() + + def _update_values(self): + self.ignore_value_changes = True + + default_values = {"project": default_settings()} + for input_field in self.input_fields: + input_field.update_default_values(default_values) + + if self._hide_studio_overrides: + studio_values = lib.NOT_SET + else: + studio_values = {"project": { + PROJECT_SETTINGS_KEY: studio_project_settings(), + PROJECT_ANATOMY_KEY: studio_project_anatomy() + }} + for input_field in self.input_fields: + input_field.update_studio_values(studio_values) + + self.ignore_value_changes = False diff --git a/pype/tools/settings/settings/widgets/item_types.py b/pype/tools/settings/settings/widgets/item_types.py new file mode 100644 index 0000000000..ea32d9c79c --- /dev/null +++ b/pype/tools/settings/settings/widgets/item_types.py @@ -0,0 +1,3111 @@ +import json +import logging +import collections +from Qt import QtWidgets, QtCore, QtGui +from .widgets import ( + ExpandingWidget, + NumberSpinBox, + PathInput +) +from .lib import NOT_SET, METADATA_KEY, TypeToKlass, CHILD_OFFSET +from avalon.vendor import qtawesome + + +class SettingObject: + """Partially abstract class for Setting's item type workflow.""" + # `is_input_type` attribute says if has implemented item type methods + is_input_type = True + # Each input must have implemented default value for development + # when defaults are not filled yet. + default_input_value = NOT_SET + # Will allow to show actions for the item type (disabled for proxies) else + # item is skipped and try to trigger actions on it's parent. + allow_actions = True + # All item types must have implemented Qt signal which is emitted when + # it's or it's children value has changed, + value_changed = None + + def _set_default_attributes(self): + """Create and reset attributes required for all item types. + + They may not be used in the item but are required to be set. + """ + # Default input attributes + self._has_studio_override = False + self._had_studio_override = False + + self._is_overriden = False + self._was_overriden = False + + self._is_modified = False + self._is_invalid = False + + self._is_nullable = False + self._as_widget = False + self._is_group = False + + self._any_parent_is_group = None + + # Parent input + self._parent = None + + # States of inputs + self._state = None + self._child_state = None + + # Attributes where values are stored + self.default_value = NOT_SET + self.studio_value = NOT_SET + self.override_value = NOT_SET + + # Log object + self._log = None + + # Only for develop mode + self.defaults_not_set = False + + def initial_attributes(self, input_data, parent, as_widget): + """Prepare attributes based on entered arguments. + + This method should be same for each item type. Few item types + may require to extend with specific attributes for their case. + """ + self._set_default_attributes() + + self._parent = parent + self._as_widget = as_widget + + self._is_group = input_data.get("is_group", False) + # TODO not implemented yet + self._is_nullable = input_data.get("is_nullable", False) + + any_parent_is_group = parent.is_group + if not any_parent_is_group: + any_parent_is_group = parent.any_parent_is_group + + self._any_parent_is_group = any_parent_is_group + + @property + def develop_mode(self): + """Tool is in develop mode or not. + + Returns: + bool + + """ + return self._parent.develop_mode + + @property + def log(self): + """Auto created logger for debugging.""" + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + @property + def had_studio_override(self): + """Item had studio overrides on refresh. + + Use attribute `_had_studio_override` which should be changed only + during methods `update_studio_values` and `update_default_values`. + + Returns: + bool + + """ + return self._had_studio_override + + @property + def has_studio_override(self): + """Item has studio override at the moment. + + With combination of `had_studio_override` is possible to know if item + is modified (not value change). + + Returns: + bool + + """ + return self._has_studio_override or self._parent.has_studio_override + + @property + def is_group(self): + """Item represents key that can be overriden. + + Attribute `is_group` can be set to True only once in item hierarchy. + + Returns: + bool + + """ + return self._is_group + + @property + def any_parent_is_group(self): + """Any parent of item is group. + + Attribute holding this information is set during creation and + stored to `_any_parent_is_group`. + + Why is this information useful: If any parent is group and + the parent is set as overriden, this item is overriden too. + + Returns: + bool + + """ + if self._any_parent_is_group is None: + return super(SettingObject, self).any_parent_is_group + return self._any_parent_is_group + + @property + def is_modified(self): + """Has object any changes that require saving.""" + if self._is_modified or self.defaults_not_set: + return True + + if self.is_overidable: + return self.was_overriden != self.is_overriden + else: + return self.has_studio_override != self.had_studio_override + + @property + def is_overriden(self): + """Is object overriden so should be saved to overrides.""" + return self._is_overriden or self._parent.is_overriden + + @property + def was_overriden(self): + """Item had set value of project overrides on project change.""" + if self._as_widget: + return self._parent.was_overriden + return self._was_overriden + + @property + def is_invalid(self): + """Value set in is not valid.""" + return self._is_invalid + + @property + def is_nullable(self): + """Value of item can be set to None. + + NOT IMPLEMENTED! + """ + return self._is_nullable + + @property + def is_overidable(self): + """ care about overrides.""" + + return self._parent.is_overidable + + def any_parent_overriden(self): + """Any of parent objects up to top hiearchy item is overriden. + + Returns: + bool + + """ + + if self._parent._is_overriden: + return True + return self._parent.any_parent_overriden() + + @property + def ignore_value_changes(self): + """Most of attribute changes are ignored on value change when True.""" + return self._parent.ignore_value_changes + + @ignore_value_changes.setter + def ignore_value_changes(self, value): + """Setter for global parent item to apply changes for all inputs.""" + self._parent.ignore_value_changes = value + + def config_value(self): + """Output for saving changes or overrides.""" + return {self.key: self.item_value()} + + @classmethod + def style_state( + cls, has_studio_override, is_invalid, is_overriden, is_modified + ): + """Return stylesheet state by intered booleans.""" + items = [] + if is_invalid: + items.append("invalid") + else: + if is_overriden: + items.append("overriden") + if is_modified: + items.append("modified") + + if not items and has_studio_override: + items.append("studio") + + return "-".join(items) or "" + + def mouseReleaseEvent(self, event): + if self.allow_actions and event.button() == QtCore.Qt.RightButton: + menu = QtWidgets.QMenu() + + actions_mapping = {} + if self.child_modified: + action = QtWidgets.QAction("Discard changes") + actions_mapping[action] = self._discard_changes + menu.addAction(action) + + if ( + self.is_overidable + and not self.is_overriden + and not self.any_parent_is_group + ): + action = QtWidgets.QAction("Set project override") + actions_mapping[action] = self._set_as_overriden + menu.addAction(action) + + if ( + not self.is_overidable + and ( + self.has_studio_override + ) + ): + action = QtWidgets.QAction("Reset to pype default") + actions_mapping[action] = self._reset_to_pype_default + menu.addAction(action) + + if ( + not self.is_overidable + and not self.is_overriden + and not self.any_parent_is_group + and not self._had_studio_override + ): + action = QtWidgets.QAction("Set studio default") + actions_mapping[action] = self._set_studio_default + menu.addAction(action) + + if ( + not self.any_parent_overriden() + and (self.is_overriden or self.child_overriden) + ): + # TODO better label + action = QtWidgets.QAction("Remove project override") + actions_mapping[action] = self._remove_overrides + menu.addAction(action) + + if not actions_mapping: + action = QtWidgets.QAction("< No action >") + actions_mapping[action] = None + menu.addAction(action) + + result = menu.exec_(QtGui.QCursor.pos()) + if result: + to_run = actions_mapping[result] + if to_run: + to_run() + return + + mro = type(self).mro() + index = mro.index(self.__class__) + item = None + for idx in range(index + 1, len(mro)): + _item = mro[idx] + if hasattr(_item, "mouseReleaseEvent"): + item = _item + break + + if item: + return item.mouseReleaseEvent(self, event) + + def _discard_changes(self): + self.ignore_value_changes = True + self.discard_changes() + self.ignore_value_changes = False + + def discard_changes(self): + """Item's implementation to discard all changes made by user. + + Reset all values to same values as had when opened GUI + or when changed project. + + Must not affect `had_studio_override` value or `was_overriden` + value. It must be marked that there are keys/values which are not in + defaults or overrides. + """ + raise NotImplementedError( + "{} Method `discard_changes` not implemented!".format( + repr(self) + ) + ) + + def _set_studio_default(self): + self.ignore_value_changes = True + self.set_studio_default() + self.ignore_value_changes = False + + def set_studio_default(self): + """Item's implementation to set current values as studio's overrides. + + Mark item and it's children as they have studio overrides. + """ + raise NotImplementedError( + "{} Method `set_studio_default` not implemented!".format( + repr(self) + ) + ) + + def _reset_to_pype_default(self): + self.ignore_value_changes = True + self.reset_to_pype_default() + self.ignore_value_changes = False + + def reset_to_pype_default(self): + """Item's implementation to remove studio overrides. + + Mark item as it does not have studio overrides unset studio + override values. + """ + raise NotImplementedError( + "{} Method `reset_to_pype_default` not implemented!".format( + repr(self) + ) + ) + + def _remove_overrides(self): + self.ignore_value_changes = True + self.remove_overrides() + self.ignore_value_changes = False + + def remove_overrides(self): + """Item's implementation to remove project overrides. + + Mark item as does not have project overrides. Must not change + `was_overriden` attribute value. + """ + raise NotImplementedError( + "{} Method `remove_overrides` not implemented!".format( + repr(self) + ) + ) + + def _set_as_overriden(self): + self.ignore_value_changes = True + self.set_as_overriden() + self.ignore_value_changes = False + + def set_as_overriden(self): + """Item's implementation to set values as overriden for project. + + Mark item and all it's children as they're overriden. Must skip + items with children items that has attributes `is_group` + and `any_parent_is_group` set to False. In that case those items + are not meant to be overridable and should trigger the method on it's + children. + + """ + raise NotImplementedError( + "{} Method `set_as_overriden` not implemented!".format(repr(self)) + ) + + def hierarchical_style_update(self): + """Trigger update style method down the hierarchy.""" + raise NotImplementedError( + "{} Method `hierarchical_style_update` not implemented!".format( + repr(self) + ) + ) + + def update_default_values(self, parent_values): + """Fill default values on startup or on refresh. + + Default values stored in `pype` repository should update all items in + schema. Each item should take values for his key and set it's value or + pass values down to children items. + + Args: + parent_values (dict): Values of parent's item. But in case item is + used as widget, `parent_values` contain value for item. + """ + raise NotImplementedError( + "{} does not have implemented `update_default_values`".format(self) + ) + + def update_studio_values(self, parent_values): + """Fill studio override values on startup or on refresh. + + Set studio value if is not set to NOT_SET, in that case studio + overrides are not set yet. + + Args: + parent_values (dict): Values of parent's item. But in case item is + used as widget, `parent_values` contain value for item. + """ + raise NotImplementedError( + "{} does not have implemented `update_studio_values`".format(self) + ) + + def apply_overrides(self, parent_values): + """Fill project override values on startup, refresh or project change. + + Set project value if is not set to NOT_SET, in that case project + overrides are not set yet. + + Args: + parent_values (dict): Values of parent's item. But in case item is + used as widget, `parent_values` contain value for item. + """ + raise NotImplementedError( + "{} does not have implemented `apply_overrides`".format(self) + ) + + @property + def child_has_studio_override(self): + """Any children item has studio overrides.""" + raise NotImplementedError( + "{} does not have implemented `child_has_studio_override`".format( + self + ) + ) + + @property + def child_modified(self): + """Any children item is modified.""" + raise NotImplementedError( + "{} does not have implemented `child_modified`".format(self) + ) + + @property + def child_overriden(self): + """Any children item has project overrides.""" + raise NotImplementedError( + "{} does not have implemented `child_overriden`".format(self) + ) + + @property + def child_invalid(self): + """Any children item does not have valid value.""" + raise NotImplementedError( + "{} does not have implemented `child_invalid`".format(self) + ) + + def get_invalid(self): + """Return invalid item types all down the hierarchy.""" + raise NotImplementedError( + "{} does not have implemented `get_invalid`".format(self) + ) + + def item_value(self): + """Value of an item without key.""" + raise NotImplementedError( + "Method `item_value` not implemented!" + ) + + def studio_value(self): + """Output for saving changes or overrides.""" + return {self.key: self.item_value()} + + +class InputObject(SettingObject): + """Class for inputs with pre-implemented methods. + + Class is for item types not creating or using other item types, most + of methods has same code in that case. + """ + def update_default_values(self, parent_values): + self._state = None + self._is_modified = False + + value = NOT_SET + if self._as_widget: + value = parent_values + elif parent_values is not NOT_SET: + value = parent_values.get(self.key, NOT_SET) + + if value is NOT_SET: + if self.develop_mode: + value = self.default_input_value + self.defaults_not_set = True + if value is NOT_SET: + raise NotImplementedError(( + "{} Does not have implemented" + " attribute `default_input_value`" + ).format(self)) + + else: + raise ValueError( + "Default value is not set. This is implementation BUG." + ) + + self.default_value = value + self._has_studio_override = False + self._had_studio_override = False + self.set_value(value) + + def update_studio_values(self, parent_values): + self._state = None + self._is_modified = False + + value = NOT_SET + if self._as_widget: + value = parent_values + elif parent_values is not NOT_SET: + value = parent_values.get(self.key, NOT_SET) + + self.studio_value = value + if value is not NOT_SET: + self._has_studio_override = True + self._had_studio_override = True + self.set_value(value) + + else: + self._has_studio_override = False + self._had_studio_override = False + self.set_value(self.default_value) + + def apply_overrides(self, parent_values): + self._is_modified = False + self._state = None + self._had_studio_override = bool(self._has_studio_override) + if self._as_widget: + override_value = parent_values + elif parent_values is NOT_SET or self.key not in parent_values: + override_value = NOT_SET + else: + override_value = parent_values[self.key] + + self.override_value = override_value + + if override_value is NOT_SET: + self._is_overriden = False + self._was_overriden = False + if self.has_studio_override: + value = self.studio_value + else: + value = self.default_value + else: + self._is_overriden = True + self._was_overriden = True + value = override_value + + self.set_value(value) + + def _on_value_change(self, item=None): + if self.ignore_value_changes: + return + + if self.is_overidable: + self._is_overriden = True + else: + self._has_studio_override = True + + if self._is_invalid: + self._is_modified = True + elif self._is_overriden: + self._is_modified = self.item_value() != self.override_value + elif self._has_studio_override: + self._is_modified = self.item_value() != self.studio_value + else: + self._is_modified = self.item_value() != self.default_value + + self.update_style() + + self.value_changed.emit(self) + + def studio_overrides(self): + if not self.has_studio_override: + return NOT_SET, False + return self.config_value(), self.is_group + + def overrides(self): + if not self.is_overriden: + return NOT_SET, False + return self.config_value(), self.is_group + + def hierarchical_style_update(self): + self.update_style() + + def remove_overrides(self): + if self.has_studio_override: + self.set_value(self.studio_value) + else: + self.set_value(self.default_value) + self._is_overriden = False + self._is_modified = False + + def reset_to_pype_default(self): + self.set_value(self.default_value) + self._has_studio_override = False + + def set_studio_default(self): + self._has_studio_override = True + + def discard_changes(self): + self._is_overriden = self._was_overriden + self._has_studio_override = self._had_studio_override + if self.is_overidable: + if self._was_overriden and self.override_value is not NOT_SET: + self.set_value(self.override_value) + else: + if self._had_studio_override: + self.set_value(self.studio_value) + else: + self.set_value(self.default_value) + + if not self.is_overidable: + if self.has_studio_override: + self._is_modified = self.studio_value != self.item_value() + else: + self._is_modified = self.default_value != self.item_value() + self._is_overriden = False + return + + self._is_modified = False + self._is_overriden = self._was_overriden + + def set_as_overriden(self): + self._is_overriden = True + + @property + def child_has_studio_override(self): + return self._has_studio_override + + @property + def child_modified(self): + return self.is_modified + + @property + def child_overriden(self): + return self._is_overriden + + @property + def child_invalid(self): + return self.is_invalid + + def get_invalid(self): + output = [] + if self.is_invalid: + output.append(self) + return output + + def reset_children_attributes(self): + return + + +class BooleanWidget(QtWidgets.QWidget, InputObject): + default_input_value = True + value_changed = QtCore.Signal(object) + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(BooleanWidget, self).__init__(parent_widget) + + self.initial_attributes(input_data, parent, as_widget) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + + if not self._as_widget: + self.key = input_data["key"] + if not label_widget: + label = input_data["label"] + label_widget = QtWidgets.QLabel(label) + label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + layout.addWidget(label_widget, 0) + self.label_widget = label_widget + + self.checkbox = QtWidgets.QCheckBox(self) + spacer = QtWidgets.QWidget(self) + layout.addWidget(self.checkbox, 0) + layout.addWidget(spacer, 1) + + spacer.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + self.setFocusProxy(self.checkbox) + + self.checkbox.stateChanged.connect(self._on_value_change) + + def set_value(self, value): + # Ignore value change because if `self.isChecked()` has same + # value as `value` the `_on_value_change` is not triggered + self.checkbox.setChecked(value) + + def update_style(self): + if self._as_widget: + if not self.isEnabled(): + state = self.style_state(False, False, False, False) + else: + state = self.style_state( + False, + self._is_invalid, + False, + self._is_modified + ) + else: + state = self.style_state( + self.has_studio_override, + self.is_invalid, + self.is_overriden, + self.is_modified + ) + if self._state == state: + return + + if self._as_widget: + property_name = "input-state" + else: + property_name = "state" + + self.label_widget.setProperty(property_name, state) + self.label_widget.style().polish(self.label_widget) + self._state = state + + def item_value(self): + return self.checkbox.isChecked() + + +class NumberWidget(QtWidgets.QWidget, InputObject): + default_input_value = 0 + value_changed = QtCore.Signal(object) + input_modifiers = ("minimum", "maximum", "decimal") + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(NumberWidget, self).__init__(parent_widget) + + self.initial_attributes(input_data, parent, as_widget) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + + kwargs = { + modifier: input_data.get(modifier) + for modifier in self.input_modifiers + if input_data.get(modifier) + } + self.input_field = NumberSpinBox(self, **kwargs) + + self.setFocusProxy(self.input_field) + + if not self._as_widget: + self.key = input_data["key"] + if not label_widget: + label = input_data["label"] + label_widget = QtWidgets.QLabel(label) + layout.addWidget(label_widget, 0) + self.label_widget = label_widget + + layout.addWidget(self.input_field, 1) + + self.input_field.valueChanged.connect(self._on_value_change) + + def set_value(self, value): + self.input_field.setValue(value) + + def update_style(self): + if self._as_widget: + if not self.isEnabled(): + state = self.style_state(False, False, False, False) + else: + state = self.style_state( + False, + self._is_invalid, + False, + self._is_modified + ) + else: + state = self.style_state( + self.has_studio_override, + self.is_invalid, + self.is_overriden, + self.is_modified + ) + if self._state == state: + return + + if self._as_widget: + property_name = "input-state" + widget = self.input_field + else: + property_name = "state" + widget = self.label_widget + + widget.setProperty(property_name, state) + widget.style().polish(widget) + + def item_value(self): + return self.input_field.value() + + +class TextWidget(QtWidgets.QWidget, InputObject): + default_input_value = "" + value_changed = QtCore.Signal(object) + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(TextWidget, self).__init__(parent_widget) + + self.initial_attributes(input_data, parent, as_widget) + + self.multiline = input_data.get("multiline", False) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + + if self.multiline: + self.text_input = QtWidgets.QPlainTextEdit(self) + else: + self.text_input = QtWidgets.QLineEdit(self) + + self.setFocusProxy(self.text_input) + + layout_kwargs = {} + if self.multiline: + layout_kwargs["alignment"] = QtCore.Qt.AlignTop + + if not self._as_widget: + self.key = input_data["key"] + if not label_widget: + label = input_data["label"] + label_widget = QtWidgets.QLabel(label) + layout.addWidget(label_widget, 0, **layout_kwargs) + self.label_widget = label_widget + + layout.addWidget(self.text_input, 1, **layout_kwargs) + + self.text_input.textChanged.connect(self._on_value_change) + + def set_value(self, value): + if self.multiline: + self.text_input.setPlainText(value) + else: + self.text_input.setText(value) + + def update_style(self): + if self._as_widget: + if not self.isEnabled(): + state = self.style_state(False, False, False, False) + else: + state = self.style_state( + False, + self._is_invalid, + False, + self._is_modified + ) + else: + state = self.style_state( + self.has_studio_override, + self.is_invalid, + self.is_overriden, + self.is_modified + ) + + if self._state == state: + return + + if self._as_widget: + property_name = "input-state" + widget = self.text_input + else: + property_name = "state" + widget = self.label_widget + + widget.setProperty(property_name, state) + widget.style().polish(widget) + + def item_value(self): + if self.multiline: + return self.text_input.toPlainText() + else: + return self.text_input.text() + + +class PathInputWidget(QtWidgets.QWidget, InputObject): + default_input_value = "" + value_changed = QtCore.Signal(object) + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(PathInputWidget, self).__init__(parent_widget) + + self.initial_attributes(input_data, parent, as_widget) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + + if not self._as_widget: + self.key = input_data["key"] + if not label_widget: + label = input_data["label"] + label_widget = QtWidgets.QLabel(label) + layout.addWidget(label_widget, 0) + self.label_widget = label_widget + + self.path_input = PathInput(self) + self.setFocusProxy(self.path_input) + layout.addWidget(self.path_input, 1) + + self.path_input.textChanged.connect(self._on_value_change) + + def set_value(self, value): + self.path_input.setText(value) + + def focusOutEvent(self, event): + self.path_input.clear_end_path() + super(PathInput, self).focusOutEvent(event) + + def update_style(self): + if self._as_widget: + if not self.isEnabled(): + state = self.style_state(False, False, False, False) + else: + state = self.style_state( + False, + self._is_invalid, + False, + self._is_modified + ) + else: + state = self.style_state( + self.has_studio_override, + self.is_invalid, + self.is_overriden, + self.is_modified + ) + + if self._state == state: + return + + if self._as_widget: + property_name = "input-state" + widget = self.path_input + else: + property_name = "state" + widget = self.label_widget + + widget.setProperty(property_name, state) + widget.style().polish(widget) + + def item_value(self): + return self.path_input.text() + + +class RawJsonInput(QtWidgets.QPlainTextEdit): + tab_length = 4 + + def __init__(self, *args, **kwargs): + super(RawJsonInput, self).__init__(*args, **kwargs) + self.setObjectName("RawJsonInput") + self.setTabStopDistance( + QtGui.QFontMetricsF( + self.font() + ).horizontalAdvance(" ") * self.tab_length + ) + + def sizeHint(self): + document = self.document() + layout = document.documentLayout() + + height = document.documentMargin() + 2 * self.frameWidth() + 1 + block = document.begin() + while block != document.end(): + height += layout.blockBoundingRect(block).height() + block = block.next() + + hint = super(RawJsonInput, self).sizeHint() + hint.setHeight(height) + + return hint + + def set_value(self, value): + if value is NOT_SET: + value = "" + elif not isinstance(value, str): + try: + value = json.dumps(value, indent=4) + except Exception: + value = "" + self.setPlainText(value) + + def json_value(self): + return json.loads(self.toPlainText()) + + def has_invalid_value(self): + try: + self.json_value() + return False + except Exception: + return True + + def resizeEvent(self, event): + self.updateGeometry() + super(RawJsonInput, self).resizeEvent(event) + + +class RawJsonWidget(QtWidgets.QWidget, InputObject): + default_input_value = "{}" + value_changed = QtCore.Signal(object) + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(RawJsonWidget, self).__init__(parent_widget) + + self.initial_attributes(input_data, parent, as_widget) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + + self.text_input = RawJsonInput(self) + self.text_input.setSizePolicy( + QtWidgets.QSizePolicy.Minimum, + QtWidgets.QSizePolicy.MinimumExpanding + ) + + self.setFocusProxy(self.text_input) + + if not self._as_widget: + self.key = input_data["key"] + if not label_widget: + label = input_data["label"] + label_widget = QtWidgets.QLabel(label) + layout.addWidget(label_widget, 0, alignment=QtCore.Qt.AlignTop) + self.label_widget = label_widget + layout.addWidget(self.text_input, 1, alignment=QtCore.Qt.AlignTop) + + self.text_input.textChanged.connect(self._on_value_change) + + def update_studio_values(self, parent_values): + self._is_invalid = self.text_input.has_invalid_value() + return super(RawJsonWidget, self).update_studio_values(parent_values) + + def set_value(self, value): + self.text_input.set_value(value) + + def _on_value_change(self, *args, **kwargs): + self._is_invalid = self.text_input.has_invalid_value() + return super(RawJsonWidget, self)._on_value_change(*args, **kwargs) + + def update_style(self): + if self._as_widget: + if not self.isEnabled(): + state = self.style_state(False, False, False, False) + else: + state = self.style_state( + False, + self._is_invalid, + False, + self._is_modified + ) + else: + state = self.style_state( + self.has_studio_override, + self.is_invalid, + self.is_overriden, + self.is_modified + ) + + if self._state == state: + return + + if self._as_widget: + property_name = "input-state" + widget = self.text_input + else: + property_name = "state" + widget = self.label_widget + + widget.setProperty(property_name, state) + widget.style().polish(widget) + + def item_value(self): + if self.is_invalid: + return NOT_SET + return self.text_input.json_value() + + +class ListItem(QtWidgets.QWidget, SettingObject): + _btn_size = 20 + value_changed = QtCore.Signal(object) + + def __init__(self, object_type, input_modifiers, config_parent, parent): + super(ListItem, self).__init__(parent) + + self._set_default_attributes() + + self._parent = config_parent + self._any_parent_is_group = True + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(3) + + char_up = qtawesome.charmap("fa.angle-up") + char_down = qtawesome.charmap("fa.angle-down") + + self.add_btn = QtWidgets.QPushButton("+") + self.remove_btn = QtWidgets.QPushButton("-") + self.up_btn = QtWidgets.QPushButton(char_up) + self.down_btn = QtWidgets.QPushButton(char_down) + + font_up_down = qtawesome.font("fa", 13) + self.up_btn.setFont(font_up_down) + self.down_btn.setFont(font_up_down) + + self.add_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + self.remove_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + self.up_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + self.down_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + + self.add_btn.setFixedSize(self._btn_size, self._btn_size) + self.remove_btn.setFixedSize(self._btn_size, self._btn_size) + self.up_btn.setFixedSize(self._btn_size, self._btn_size) + self.down_btn.setFixedSize(self._btn_size, self._btn_size) + + self.add_btn.setProperty("btn-type", "tool-item") + self.remove_btn.setProperty("btn-type", "tool-item") + self.up_btn.setProperty("btn-type", "tool-item") + self.down_btn.setProperty("btn-type", "tool-item") + + layout.addWidget(self.add_btn, 0) + layout.addWidget(self.remove_btn, 0) + + self.add_btn.clicked.connect(self._on_add_clicked) + self.remove_btn.clicked.connect(self._on_remove_clicked) + self.up_btn.clicked.connect(self._on_up_clicked) + self.down_btn.clicked.connect(self._on_down_clicked) + + ItemKlass = TypeToKlass.types[object_type] + self.value_input = ItemKlass( + input_modifiers, + self, + as_widget=True, + label_widget=None + ) + layout.addWidget(self.value_input, 1) + + layout.addWidget(self.up_btn, 0) + layout.addWidget(self.down_btn, 0) + + self.value_input.value_changed.connect(self._on_value_change) + + def set_as_empty(self, is_empty=True): + self.value_input.setEnabled(not is_empty) + self.remove_btn.setEnabled(not is_empty) + self.order_changed() + self._on_value_change() + + def order_changed(self): + row = self.row() + parent_row_count = self.parent_rows_count() + if parent_row_count == 1: + self.up_btn.setEnabled(False) + self.down_btn.setEnabled(False) + + elif row == 0: + self.up_btn.setEnabled(False) + self.down_btn.setEnabled(True) + + elif row == parent_row_count - 1: + self.up_btn.setEnabled(True) + self.down_btn.setEnabled(False) + + else: + self.up_btn.setEnabled(True) + self.down_btn.setEnabled(True) + + def _on_value_change(self, item=None): + self.value_changed.emit(self) + + def row(self): + return self._parent.input_fields.index(self) + + def parent_rows_count(self): + return len(self._parent.input_fields) + + def _on_add_clicked(self): + if self.value_input.isEnabled(): + self._parent.add_row(row=self.row() + 1) + else: + self.set_as_empty(False) + + def _on_remove_clicked(self): + self._parent.remove_row(self) + + def _on_up_clicked(self): + row = self.row() + self._parent.swap_rows(row - 1, row) + + def _on_down_clicked(self): + row = self.row() + self._parent.swap_rows(row, row + 1) + + def config_value(self): + if self.value_input.isEnabled(): + return self.value_input.item_value() + return NOT_SET + + @property + def child_has_studio_override(self): + return self.value_input.child_has_studio_override + + @property + def child_modified(self): + return self.value_input.child_modified + + @property + def child_overriden(self): + return self.value_input.child_overriden + + def hierarchical_style_update(self): + self.value_input.hierarchical_style_update() + + def mouseReleaseEvent(self, event): + return QtWidgets.QWidget.mouseReleaseEvent(self, event) + + def update_default_values(self, value): + self.value_input.update_default_values(value) + + def update_studio_values(self, value): + self.value_input.update_studio_values(value) + + def apply_overrides(self, value): + self.value_input.apply_overrides(value) + + +class ListWidget(QtWidgets.QWidget, InputObject): + default_input_value = [] + value_changed = QtCore.Signal(object) + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(ListWidget, self).__init__(parent_widget) + self.setObjectName("ListWidget") + + self.initial_attributes(input_data, parent, as_widget) + + self.object_type = input_data["object_type"] + self.input_modifiers = input_data.get("input_modifiers") or {} + + self.key = input_data["key"] + + self.input_fields = [] + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 5) + layout.setSpacing(5) + + if not label_widget: + label_widget = QtWidgets.QLabel(input_data["label"], self) + layout.addWidget(label_widget, alignment=QtCore.Qt.AlignTop) + + self.label_widget = label_widget + + inputs_widget = QtWidgets.QWidget(self) + inputs_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + layout.addWidget(inputs_widget) + + inputs_layout = QtWidgets.QVBoxLayout(inputs_widget) + inputs_layout.setContentsMargins(0, 0, 0, 0) + inputs_layout.setSpacing(3) + + self.inputs_widget = inputs_widget + self.inputs_layout = inputs_layout + + self.add_row(is_empty=True) + + def count(self): + return len(self.input_fields) + + def update_studio_values(self, parent_values): + super(ListWidget, self).update_studio_values(parent_values) + + self.hierarchical_style_update() + + def set_value(self, value): + previous_inputs = tuple(self.input_fields) + for item_value in value: + self.add_row(value=item_value) + + for input_field in previous_inputs: + self.remove_row(input_field) + + if self.count() == 0: + self.add_row(is_empty=True) + + def swap_rows(self, row_1, row_2): + if row_1 == row_2: + return + + if row_1 > row_2: + row_1, row_2 = row_2, row_1 + + field_1 = self.input_fields[row_1] + field_2 = self.input_fields[row_2] + + self.input_fields[row_1] = field_2 + self.input_fields[row_2] = field_1 + + layout_index = self.inputs_layout.indexOf(field_1) + self.inputs_layout.insertWidget(layout_index + 1, field_1) + + field_1.order_changed() + field_2.order_changed() + + def add_row(self, row=None, value=None, is_empty=False): + # Create new item + item_widget = ListItem( + self.object_type, self.input_modifiers, self, self.inputs_widget + ) + if row is None: + if self.input_fields: + self.input_fields[-1].order_changed() + self.inputs_layout.addWidget(item_widget) + self.input_fields.append(item_widget) + else: + previous_field = None + if row > 0: + previous_field = self.input_fields[row - 1] + + next_field = None + max_index = self.count() + if row < max_index: + next_field = self.input_fields[row] + + self.inputs_layout.insertWidget(row, item_widget) + self.input_fields.insert(row, item_widget) + if previous_field: + previous_field.order_changed() + + if next_field: + next_field.order_changed() + + if is_empty: + item_widget.set_as_empty() + item_widget.value_changed.connect(self._on_value_change) + + item_widget.order_changed() + + previous_input = None + for input_field in self.input_fields: + if previous_input is not None: + self.setTabOrder( + previous_input, input_field.value_input.focusProxy() + ) + previous_input = input_field.value_input.focusProxy() + + # Set text if entered text is not None + # else (when add button clicked) trigger `_on_value_change` + if value is not None: + if self._is_overriden: + item_widget.apply_overrides(value) + elif not self._has_studio_override: + item_widget.update_default_values(value) + else: + item_widget.update_studio_values(value) + self.hierarchical_style_update() + else: + self._on_value_change() + self.updateGeometry() + + def remove_row(self, item_widget): + item_widget.value_changed.disconnect() + + row = self.input_fields.index(item_widget) + previous_field = None + next_field = None + if row > 0: + previous_field = self.input_fields[row - 1] + + if row != len(self.input_fields) - 1: + next_field = self.input_fields[row + 1] + + self.inputs_layout.removeWidget(item_widget) + self.input_fields.pop(row) + item_widget.setParent(None) + item_widget.deleteLater() + + if previous_field: + previous_field.order_changed() + + if next_field: + next_field.order_changed() + + if self.count() == 0: + self.add_row(is_empty=True) + + self._on_value_change() + self.updateGeometry() + + def apply_overrides(self, parent_values): + self._is_modified = False + if parent_values is NOT_SET or self.key not in parent_values: + override_value = NOT_SET + else: + override_value = parent_values[self.key] + + self.override_value = override_value + + if override_value is NOT_SET: + self._is_overriden = False + self._was_overriden = False + if self.has_studio_override: + value = self.studio_value + else: + value = self.default_value + else: + self._is_overriden = True + self._was_overriden = True + value = override_value + + self._is_modified = False + self._state = None + + self.set_value(value) + + def hierarchical_style_update(self): + for input_field in self.input_fields: + input_field.hierarchical_style_update() + self.update_style() + + def update_style(self): + if self._as_widget: + if not self.isEnabled(): + state = self.style_state(False, False, False, False) + else: + state = self.style_state( + False, + self._is_invalid, + False, + self._is_modified + ) + else: + state = self.style_state( + self.has_studio_override, + self.is_invalid, + self.is_overriden, + self.is_modified + ) + if self._state == state: + return + + self.label_widget.setProperty("state", state) + self.label_widget.style().polish(self.label_widget) + + def item_value(self): + output = [] + for item in self.input_fields: + value = item.config_value() + if value is not NOT_SET: + output.append(value) + return output + + +class ModifiableDictItem(QtWidgets.QWidget, SettingObject): + _btn_size = 20 + value_changed = QtCore.Signal(object) + + def __init__(self, object_type, input_modifiers, config_parent, parent): + super(ModifiableDictItem, self).__init__(parent) + + self._set_default_attributes() + self._parent = config_parent + + self.is_key_duplicated = False + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(3) + + ItemKlass = TypeToKlass.types[object_type] + + self.key_input = QtWidgets.QLineEdit(self) + self.key_input.setObjectName("DictKey") + + self.value_input = ItemKlass( + input_modifiers, + self, + as_widget=True, + label_widget=None + ) + self.add_btn = QtWidgets.QPushButton("+") + self.remove_btn = QtWidgets.QPushButton("-") + + self.add_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + self.remove_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + + self.add_btn.setProperty("btn-type", "tool-item") + self.remove_btn.setProperty("btn-type", "tool-item") + + layout.addWidget(self.add_btn, 0) + layout.addWidget(self.remove_btn, 0) + layout.addWidget(self.key_input, 0) + layout.addWidget(self.value_input, 1) + + self.setFocusProxy(self.value_input) + + self.add_btn.setFixedSize(self._btn_size, self._btn_size) + self.remove_btn.setFixedSize(self._btn_size, self._btn_size) + self.add_btn.clicked.connect(self.on_add_clicked) + self.remove_btn.clicked.connect(self.on_remove_clicked) + + self.key_input.textChanged.connect(self._on_value_change) + self.value_input.value_changed.connect(self._on_value_change) + + self.origin_key = NOT_SET + + def key_value(self): + return self.key_input.text() + + def _is_enabled(self): + return self.key_input.isEnabled() + + def is_key_invalid(self): + if not self._is_enabled(): + return False + + if self.key_value() == "": + return True + + if self.is_key_duplicated: + return True + return False + + def _on_value_change(self, item=None): + self.update_style() + self.value_changed.emit(self) + + def update_default_values(self, key, value): + self.origin_key = key + self.key_input.setText(key) + self.value_input.update_default_values(value) + + def update_studio_values(self, key, value): + self.origin_key = key + self.key_input.setText(key) + self.value_input.update_studio_values(value) + + def apply_overrides(self, key, value): + self.origin_key = key + self.key_input.setText(key) + self.value_input.apply_overrides(value) + + @property + def is_group(self): + return self._parent.is_group + + def on_add_clicked(self): + if self._is_enabled(): + self._parent.add_row(row=self.row() + 1) + else: + self.set_as_empty(False) + + def on_remove_clicked(self): + self._parent.remove_row(self) + + def set_as_empty(self, is_empty=True): + self.key_input.setEnabled(not is_empty) + self.value_input.setEnabled(not is_empty) + self.remove_btn.setEnabled(not is_empty) + self._on_value_change() + + @property + def any_parent_is_group(self): + return self._parent.any_parent_is_group + + def is_key_modified(self): + return self.key_value() != self.origin_key + + def is_value_modified(self): + return self.value_input.is_modified + + @property + def is_modified(self): + return self.is_value_modified() or self.is_key_modified() + + def hierarchical_style_update(self): + self.value_input.hierarchical_style_update() + self.update_style() + + @property + def is_invalid(self): + if not self._is_enabled(): + return False + return self.is_key_invalid() or self.value_input.is_invalid + + def update_style(self): + state = "" + if self._is_enabled(): + if self.is_key_invalid(): + state = "invalid" + elif self.is_key_modified(): + state = "modified" + + self.key_input.setProperty("state", state) + self.key_input.style().polish(self.key_input) + + def row(self): + return self._parent.input_fields.index(self) + + def item_value(self): + key = self.key_input.text() + value = self.value_input.item_value() + return {key: value} + + def config_value(self): + if self._is_enabled(): + return self.item_value() + return {} + + def mouseReleaseEvent(self, event): + return QtWidgets.QWidget.mouseReleaseEvent(self, event) + + +class ModifiableDict(QtWidgets.QWidget, InputObject): + default_input_value = {} + # Should be used only for dictionary with one datatype as value + # TODO this is actually input field (do not care if is group or not) + value_changed = QtCore.Signal(object) + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(ModifiableDict, self).__init__(parent_widget) + self.setObjectName("ModifiableDict") + + self.initial_attributes(input_data, parent, as_widget) + + self.input_fields = [] + + self.key = input_data["key"] + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(0) + + content_widget = QtWidgets.QWidget(self) + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.setContentsMargins(CHILD_OFFSET, 3, 0, 3) + + if as_widget: + main_layout.addWidget(content_widget) + body_widget = None + else: + body_widget = ExpandingWidget(input_data["label"], self) + main_layout.addWidget(body_widget) + body_widget.set_content_widget(content_widget) + + self.body_widget = body_widget + self.label_widget = body_widget.label_widget + + collapsable = input_data.get("collapsable", True) + if collapsable: + collapsed = input_data.get("collapsed", True) + if not collapsed: + body_widget.toggle_content() + + else: + body_widget.hide_toolbox(hide_content=False) + + self.body_widget = body_widget + self.content_widget = content_widget + self.content_layout = content_layout + + self.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + self.object_type = input_data["object_type"] + self.input_modifiers = input_data.get("input_modifiers") or {} + + self.add_row(is_empty=True) + + def count(self): + return len(self.input_fields) + + def set_value(self, value): + previous_inputs = tuple(self.input_fields) + for item_key, item_value in value.items(): + self.add_row(key=item_key, value=item_value) + + for input_field in previous_inputs: + self.remove_row(input_field) + + if self.count() == 0: + self.add_row(is_empty=True) + + def _on_value_change(self, item=None): + if self.ignore_value_changes: + return + + fields_by_keys = collections.defaultdict(list) + for input_field in self.input_fields: + key = input_field.key_value() + fields_by_keys[key].append(input_field) + + for fields in fields_by_keys.values(): + if len(fields) == 1: + field = fields[0] + if field.is_key_duplicated: + field.is_key_duplicated = False + field.update_style() + else: + for field in fields: + field.is_key_duplicated = True + field.update_style() + + if self.is_overidable: + self._is_overriden = True + else: + self._has_studio_override = True + + if self._is_invalid: + self._is_modified = True + elif self._is_overriden: + self._is_modified = self.item_value() != self.override_value + elif self._has_studio_override: + self._is_modified = self.item_value() != self.studio_value + else: + self._is_modified = self.item_value() != self.default_value + + self.update_style() + + self.value_changed.emit(self) + + def hierarchical_style_update(self): + for input_field in self.input_fields: + input_field.hierarchical_style_update() + self.update_style() + + def update_style(self): + if self._as_widget: + if not self.isEnabled(): + state = self.style_state(False, False, False, False) + else: + state = self.style_state( + False, + self.is_invalid, + False, + self._is_modified + ) + else: + state = self.style_state( + self.has_studio_override, + self.is_invalid, + self.is_overriden, + self.is_modified + ) + if self._state == state: + return + + if state: + child_state = "child-{}".format(state) + else: + child_state = "" + + if self.body_widget: + self.body_widget.side_line_widget.setProperty("state", child_state) + self.body_widget.side_line_widget.style().polish( + self.body_widget.side_line_widget + ) + + if not self._as_widget: + self.label_widget.setProperty("state", state) + self.label_widget.style().polish(self.label_widget) + + self._state = state + + def all_item_values(self): + output = {} + for item in self.input_fields: + output.update(item.item_value()) + return output + + def item_value(self): + output = {} + for item in self.input_fields: + output.update(item.config_value()) + return output + + def add_row(self, row=None, key=None, value=None, is_empty=False): + # Create new item + item_widget = ModifiableDictItem( + self.object_type, self.input_modifiers, self, self.content_widget + ) + if is_empty: + item_widget.set_as_empty() + + item_widget.value_changed.connect(self._on_value_change) + + if row is None: + self.content_layout.addWidget(item_widget) + self.input_fields.append(item_widget) + else: + self.content_layout.insertWidget(row, item_widget) + self.input_fields.insert(row, item_widget) + + previous_input = None + for input_field in self.input_fields: + if previous_input is not None: + self.setTabOrder( + previous_input, input_field.key_input + ) + previous_input = input_field.value_input.focusProxy() + self.setTabOrder( + input_field.key_input, previous_input + ) + + # Set value if entered value is not None + # else (when add button clicked) trigger `_on_value_change` + if value is not None and key is not None: + if not self._has_studio_override: + item_widget.update_default_values(key, value) + elif self._is_overriden: + item_widget.apply_overrides(key, value) + else: + item_widget.update_studio_values(key, value) + self.hierarchical_style_update() + else: + self._on_value_change() + self.parent().updateGeometry() + + def remove_row(self, item_widget): + item_widget.value_changed.disconnect() + + self.content_layout.removeWidget(item_widget) + self.input_fields.remove(item_widget) + item_widget.setParent(None) + item_widget.deleteLater() + + if self.count() == 0: + self.add_row(is_empty=True) + + self._on_value_change() + self.parent().updateGeometry() + + @property + def is_invalid(self): + return self._is_invalid or self.child_invalid + + @property + def child_invalid(self): + for input_field in self.input_fields: + if input_field.is_invalid: + return True + return False + + +# Dictionaries +class DictWidget(QtWidgets.QWidget, SettingObject): + value_changed = QtCore.Signal(object) + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if as_widget: + raise TypeError("Can't use \"{}\" as widget item.".format( + self.__class__.__name__ + )) + + if parent_widget is None: + parent_widget = parent + super(DictWidget, self).__init__(parent_widget) + self.setObjectName("DictWidget") + + self.initial_attributes(input_data, parent, as_widget) + + if input_data.get("highlight_content", False): + content_state = "hightlighted" + bottom_margin = 5 + else: + content_state = "" + bottom_margin = 0 + + self.input_fields = [] + + self.key = input_data["key"] + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(0) + + body_widget = ExpandingWidget(input_data["label"], self) + + main_layout.addWidget(body_widget) + + content_widget = QtWidgets.QWidget(body_widget) + content_widget.setObjectName("ContentWidget") + content_widget.setProperty("content_state", content_state) + content_layout = QtWidgets.QVBoxLayout(content_widget) + content_layout.setContentsMargins(CHILD_OFFSET, 5, 0, bottom_margin) + + body_widget.set_content_widget(content_widget) + + self.body_widget = body_widget + self.content_widget = content_widget + self.content_layout = content_layout + + self.label_widget = body_widget.label_widget + + self.checkbox_widget = None + self.checkbox_key = input_data.get("checkbox_key") + + for child_data in input_data.get("children", []): + self.add_children_gui(child_data) + + collapsable = input_data.get("collapsable", True) + if len(self.input_fields) == 1 and self.checkbox_widget: + body_widget.hide_toolbox(hide_content=True) + + elif collapsable: + collapsed = input_data.get("collapsed", True) + if not collapsed: + body_widget.toggle_content() + else: + body_widget.hide_toolbox(hide_content=False) + + def add_children_gui(self, child_configuration): + item_type = child_configuration["type"] + klass = TypeToKlass.types.get(item_type) + + if not klass.is_input_type: + item = klass(child_configuration, self) + self.content_layout.addWidget(item) + return item + + if self.checkbox_key and not self.checkbox_widget: + key = child_configuration.get("key") + if key == self.checkbox_key: + return self._add_checkbox_child(child_configuration) + + item = klass(child_configuration, self) + item.value_changed.connect(self._on_value_change) + self.content_layout.addWidget(item) + + self.input_fields.append(item) + return item + + def _add_checkbox_child(self, child_configuration): + item = BooleanWidget( + child_configuration, self, label_widget=self.label_widget + ) + item.value_changed.connect(self._on_value_change) + + self.body_widget.add_widget_after_label(item) + self.checkbox_widget = item + self.input_fields.append(item) + return item + + def remove_overrides(self): + self._is_overriden = False + self._is_modified = False + for input_field in self.input_fields: + input_field.remove_overrides() + + def reset_to_pype_default(self): + for input_field in self.input_fields: + input_field.reset_to_pype_default() + self._has_studio_override = False + + def set_studio_default(self): + for input_field in self.input_fields: + input_field.set_studio_default() + + if self.is_group: + self._has_studio_override = True + + def discard_changes(self): + self._is_overriden = self._was_overriden + self._is_modified = False + + for input_field in self.input_fields: + input_field.discard_changes() + + self._is_modified = self.child_modified + + def set_as_overriden(self): + if self.is_overriden: + return + + if self.is_group: + self._is_overriden = True + return + + for item in self.input_fields: + item.set_as_overriden() + + def update_default_values(self, parent_values): + value = NOT_SET + if self._as_widget: + value = parent_values + elif parent_values is not NOT_SET: + value = parent_values.get(self.key, NOT_SET) + + for item in self.input_fields: + item.update_default_values(value) + + def update_studio_values(self, parent_values): + value = NOT_SET + if parent_values is not NOT_SET: + value = parent_values.get(self.key, NOT_SET) + + self._has_studio_override = False + if self.is_group and value is not NOT_SET: + self._has_studio_override = True + + self._had_studio_override = bool(self._has_studio_override) + + for item in self.input_fields: + item.update_studio_values(value) + + def apply_overrides(self, parent_values): + # Make sure this is set to False + self._state = None + self._child_state = None + + metadata = {} + groups = tuple() + override_values = NOT_SET + if parent_values is not NOT_SET: + metadata = parent_values.get(METADATA_KEY) or metadata + groups = metadata.get("groups") or groups + override_values = parent_values.get(self.key, override_values) + + self._is_overriden = self.key in groups + + for item in self.input_fields: + item.apply_overrides(override_values) + + if not self._is_overriden: + self._is_overriden = ( + self.is_group + and self.is_overidable + and self.child_overriden + ) + self._was_overriden = bool(self._is_overriden) + + def _on_value_change(self, item=None): + if self.ignore_value_changes: + return + + if self.is_group: + if self.is_overidable: + self._is_overriden = True + else: + self._has_studio_override = True + + self.hierarchical_style_update() + + self.value_changed.emit(self) + + self.update_style() + + def hierarchical_style_update(self): + for input_field in self.input_fields: + input_field.hierarchical_style_update() + self.update_style() + + def update_style(self, is_overriden=None): + child_has_studio_override = self.child_has_studio_override + child_modified = self.child_modified + child_invalid = self.child_invalid + child_state = self.style_state( + child_has_studio_override, + child_invalid, + self.child_overriden, + child_modified + ) + if child_state: + child_state = "child-{}".format(child_state) + + if child_state != self._child_state: + self.body_widget.side_line_widget.setProperty("state", child_state) + self.body_widget.side_line_widget.style().polish( + self.body_widget.side_line_widget + ) + self._child_state = child_state + + state = self.style_state( + self.had_studio_override, + child_invalid, + self.is_overriden, + self.is_modified + ) + if self._state == state: + return + + self.label_widget.setProperty("state", state) + self.label_widget.style().polish(self.label_widget) + + self._state = state + + @property + def is_modified(self): + if self.is_group: + return self._is_modified or self.child_modified + return False + + @property + def child_has_studio_override(self): + for input_field in self.input_fields: + if ( + input_field.has_studio_override + or input_field.child_has_studio_override + ): + return True + return False + + @property + def child_modified(self): + for input_field in self.input_fields: + if input_field.child_modified: + return True + return False + + @property + def child_overriden(self): + for input_field in self.input_fields: + if input_field.is_overriden or input_field.child_overriden: + return True + return False + + @property + def child_invalid(self): + for input_field in self.input_fields: + if input_field.child_invalid: + return True + return False + + def get_invalid(self): + output = [] + for input_field in self.input_fields: + output.extend(input_field.get_invalid()) + return output + + def item_value(self): + output = {} + for input_field in self.input_fields: + # TODO maybe merge instead of update should be used + # NOTE merge is custom function which merges 2 dicts + output.update(input_field.config_value()) + return output + + def studio_overrides(self): + if not self.has_studio_override and not self.child_has_studio_override: + return NOT_SET, False + + values = {} + groups = [] + for input_field in self.input_fields: + value, is_group = input_field.studio_overrides() + if value is not NOT_SET: + values.update(value) + if is_group: + groups.extend(value.keys()) + if groups: + values[METADATA_KEY] = {"groups": groups} + return {self.key: values}, self.is_group + + def overrides(self): + if not self.is_overriden and not self.child_overriden: + return NOT_SET, False + + values = {} + groups = [] + for input_field in self.input_fields: + value, is_group = input_field.overrides() + if value is not NOT_SET: + values.update(value) + if is_group: + groups.extend(value.keys()) + if groups: + values[METADATA_KEY] = {"groups": groups} + return {self.key: values}, self.is_group + + +class DictInvisible(QtWidgets.QWidget, SettingObject): + # TODO is not overridable by itself + value_changed = QtCore.Signal(object) + allow_actions = False + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(DictInvisible, self).__init__(parent_widget) + self.setObjectName("DictInvisible") + + self.initial_attributes(input_data, parent, as_widget) + + if self._is_group: + raise TypeError("DictInvisible can't be marked as group input.") + + self.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + + self.input_fields = [] + + self.key = input_data["key"] + + for child_data in input_data.get("children", []): + self.add_children_gui(child_data) + + def add_children_gui(self, child_configuration): + item_type = child_configuration["type"] + klass = TypeToKlass.types.get(item_type) + + if not klass.is_input_type: + item = klass(child_configuration, self) + self.layout().addWidget(item) + return item + + item = klass(child_configuration, self) + self.layout().addWidget(item) + + item.value_changed.connect(self._on_value_change) + + self.input_fields.append(item) + return item + + def update_style(self, *args, **kwargs): + return + + @property + def child_has_studio_override(self): + for input_field in self.input_fields: + if ( + input_field.has_studio_override + or input_field.child_has_studio_override + ): + return True + return False + + @property + def child_modified(self): + for input_field in self.input_fields: + if input_field.child_modified: + return True + return False + + @property + def child_overriden(self): + for input_field in self.input_fields: + if input_field.is_overriden or input_field.child_overriden: + return True + return False + + @property + def child_invalid(self): + for input_field in self.input_fields: + if input_field.child_invalid: + return True + return False + + def get_invalid(self): + output = [] + for input_field in self.input_fields: + output.extend(input_field.get_invalid()) + return output + + def item_value(self): + output = {} + for input_field in self.input_fields: + # TODO maybe merge instead of update should be used + # NOTE merge is custom function which merges 2 dicts + output.update(input_field.config_value()) + return output + + def _on_value_change(self, item=None): + if self.ignore_value_changes: + return + + if self.is_group: + if self.is_overidable: + self._is_overriden = True + else: + self._has_studio_override = True + self.hierarchical_style_update() + + self.value_changed.emit(self) + + def hierarchical_style_update(self): + for input_field in self.input_fields: + input_field.hierarchical_style_update() + self.update_style() + + def remove_overrides(self): + self._is_overriden = False + self._is_modified = False + for input_field in self.input_fields: + input_field.remove_overrides() + + def reset_to_pype_default(self): + for input_field in self.input_fields: + input_field.reset_to_pype_default() + self._has_studio_override = False + + def set_studio_default(self): + for input_field in self.input_fields: + input_field.set_studio_default() + + if self.is_group: + self._has_studio_override = True + + def discard_changes(self): + self._is_modified = False + self._is_overriden = self._was_overriden + + for input_field in self.input_fields: + input_field.discard_changes() + + self._is_modified = self.child_modified + + def set_as_overriden(self): + if self.is_overriden: + return + + if self.is_group: + self._is_overriden = True + return + + for item in self.input_fields: + item.set_as_overriden() + + def update_default_values(self, parent_values): + value = NOT_SET + if self._as_widget: + value = parent_values + elif parent_values is not NOT_SET: + value = parent_values.get(self.key, NOT_SET) + + for item in self.input_fields: + item.update_default_values(value) + + def update_studio_values(self, parent_values): + value = NOT_SET + if parent_values is not NOT_SET: + value = parent_values.get(self.key, NOT_SET) + + for item in self.input_fields: + item.update_studio_values(value) + + def apply_overrides(self, parent_values): + # Make sure this is set to False + self._state = None + self._child_state = None + + metadata = {} + groups = tuple() + override_values = NOT_SET + if parent_values is not NOT_SET: + metadata = parent_values.get(METADATA_KEY) or metadata + groups = metadata.get("groups") or groups + override_values = parent_values.get(self.key, override_values) + + self._is_overriden = self.key in groups + + for item in self.input_fields: + item.apply_overrides(override_values) + + if not self._is_overriden: + self._is_overriden = ( + self.is_group + and self.is_overidable + and self.child_overriden + ) + self._was_overriden = bool(self._is_overriden) + + def studio_overrides(self): + if not self.has_studio_override and not self.child_has_studio_override: + return NOT_SET, False + + values = {} + groups = [] + for input_field in self.input_fields: + value, is_group = input_field.studio_overrides() + if value is not NOT_SET: + values.update(value) + if is_group: + groups.extend(value.keys()) + if groups: + values[METADATA_KEY] = {"groups": groups} + return {self.key: values}, self.is_group + + def overrides(self): + if not self.is_overriden and not self.child_overriden: + return NOT_SET, False + + values = {} + groups = [] + for input_field in self.input_fields: + value, is_group = input_field.overrides() + if value is not NOT_SET: + values.update(value) + if is_group: + groups.extend(value.keys()) + if groups: + values[METADATA_KEY] = {"groups": groups} + return {self.key: values}, self.is_group + + +class PathWidget(QtWidgets.QWidget, SettingObject): + value_changed = QtCore.Signal(object) + platforms = ("windows", "darwin", "linux") + platform_labels_mapping = { + "windows": "Windows", + "darwin": "MacOS", + "linux": "Linux" + } + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(PathWidget, self).__init__(parent_widget) + + self.initial_attributes(input_data, parent, as_widget) + + # This is partial input and dictionary input + if not self.any_parent_is_group and not self._as_widget: + self._is_group = True + else: + self._is_group = False + + self.multiplatform = input_data.get("multiplatform", False) + self.multipath = input_data.get("multipath", False) + + self.input_fields = [] + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(5) + + if not self._as_widget: + self.key = input_data["key"] + if not label_widget: + label = input_data["label"] + label_widget = QtWidgets.QLabel(label) + label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + layout.addWidget(label_widget, 0, alignment=QtCore.Qt.AlignTop) + self.label_widget = label_widget + + self.content_widget = QtWidgets.QWidget(self) + self.content_layout = QtWidgets.QVBoxLayout(self.content_widget) + self.content_layout.setSpacing(0) + self.content_layout.setContentsMargins(0, 0, 0, 0) + + layout.addWidget(self.content_widget) + + self.create_gui() + + @property + def default_input_value(self): + if self.multipath: + value_type = list + else: + value_type = str + + if self.multiplatform: + return { + platform: value_type() + for platform in self.platforms + } + else: + return value_type() + + def create_gui(self): + if not self.multiplatform and not self.multipath: + input_data = {"key": self.key} + path_input = PathInputWidget( + input_data, self, label_widget=self.label_widget + ) + self.setFocusProxy(path_input) + self.content_layout.addWidget(path_input) + self.input_fields.append(path_input) + path_input.value_changed.connect(self._on_value_change) + return + + input_data_for_list = { + "object_type": "path-input" + } + if not self.multiplatform: + input_data_for_list["key"] = self.key + input_widget = ListWidget( + input_data_for_list, self, label_widget=self.label_widget + ) + self.setFocusProxy(input_widget) + self.content_layout.addWidget(input_widget) + self.input_fields.append(input_widget) + input_widget.value_changed.connect(self._on_value_change) + return + + proxy_widget = QtWidgets.QWidget(self.content_widget) + proxy_layout = QtWidgets.QFormLayout(proxy_widget) + for platform_key in self.platforms: + platform_label = self.platform_labels_mapping[platform_key] + label_widget = QtWidgets.QLabel(platform_label, proxy_widget) + if self.multipath: + input_data_for_list["key"] = platform_key + input_widget = ListWidget( + input_data_for_list, self, label_widget=label_widget + ) + else: + input_data = {"key": platform_key} + input_widget = PathInputWidget( + input_data, self, label_widget=label_widget + ) + proxy_layout.addRow(label_widget, input_widget) + self.input_fields.append(input_widget) + input_widget.value_changed.connect(self._on_value_change) + + self.setFocusProxy(self.input_fields[0]) + self.content_layout.addWidget(proxy_widget) + + def update_default_values(self, parent_values): + self._state = None + self._child_state = None + self._is_modified = False + + value = NOT_SET + if self._as_widget: + value = parent_values + elif parent_values is not NOT_SET: + if not self.multiplatform: + value = parent_values + else: + value = parent_values.get(self.key, NOT_SET) + + if value is NOT_SET: + if self.develop_mode: + if self._as_widget or not self.multiplatform: + value = {self.key: self.default_input_value} + else: + value = self.default_input_value + self.defaults_not_set = True + if value is NOT_SET: + raise NotImplementedError(( + "{} Does not have implemented" + " attribute `default_input_value`" + ).format(self)) + + else: + raise ValueError( + "Default value is not set. This is implementation BUG." + ) + + self.default_value = value + self._has_studio_override = False + self._had_studio_override = False + + if not self.multiplatform: + self.input_fields[0].update_default_values(value) + else: + for input_field in self.input_fields: + input_field.update_default_values(value) + + def update_studio_values(self, parent_values): + self._state = None + self._child_state = None + self._is_modified = False + + value = NOT_SET + if self._as_widget: + value = parent_values + elif parent_values is not NOT_SET: + if not self.multiplatform: + value = parent_values + else: + value = parent_values.get(self.key, NOT_SET) + + self.studio_value = value + if value is not NOT_SET: + self._has_studio_override = True + self._had_studio_override = True + else: + self._has_studio_override = False + self._had_studio_override = False + value = self.default_value + + if not self.multiplatform: + self.input_fields[0].update_studio_values(value) + else: + for input_field in self.input_fields: + input_field.update_studio_values(value) + + def apply_overrides(self, parent_values): + self._is_modified = False + self._state = None + self._child_state = None + + override_values = NOT_SET + if self._as_widget: + override_values = parent_values + elif parent_values is not NOT_SET: + if not self.multiplatform: + override_values = parent_values + else: + override_values = parent_values.get(self.key, NOT_SET) + + self._is_overriden = override_values is not NOT_SET + self._was_overriden = bool(self._is_overriden) + + if not self.multiplatform: + self.input_fields[0].apply_overrides(parent_values) + else: + for input_field in self.input_fields: + input_field.apply_overrides(override_values) + + if not self._is_overriden: + self._is_overriden = ( + self.is_group + and self.is_overidable + and self.child_overriden + ) + self._is_modified = False + self._was_overriden = bool(self._is_overriden) + + def set_value(self, value): + if not self.multiplatform: + self.input_fields[0].set_value(value) + + else: + for input_field in self.input_fields: + _value = value[input_field.key] + input_field.set_value(_value) + + def _on_value_change(self, item=None): + if self.ignore_value_changes: + return + + if self.is_overidable: + self._is_overriden = True + else: + self._has_studio_override = True + + if self._is_invalid: + self._is_modified = True + elif self._is_overriden: + self._is_modified = self.item_value() != self.override_value + elif self._has_studio_override: + self._is_modified = self.item_value() != self.studio_value + else: + self._is_modified = self.item_value() != self.default_value + + self.hierarchical_style_update() + + self.value_changed.emit(self) + + def update_style(self, is_overriden=None): + child_has_studio_override = self.child_has_studio_override + child_modified = self.child_modified + child_invalid = self.child_invalid + child_state = self.style_state( + child_has_studio_override, + child_invalid, + self.child_overriden, + child_modified + ) + if child_state: + child_state = "child-{}".format(child_state) + + if child_state != self._child_state: + self.setProperty("state", child_state) + self.style().polish(self) + self._child_state = child_state + + if not self._as_widget: + state = self.style_state( + child_has_studio_override, + child_invalid, + self.is_overriden, + self.is_modified + ) + if self._state == state: + return + + self.label_widget.setProperty("state", state) + self.label_widget.style().polish(self.label_widget) + + self._state = state + + def remove_overrides(self): + self._is_overriden = False + self._is_modified = False + for input_field in self.input_fields: + input_field.remove_overrides() + + def reset_to_pype_default(self): + for input_field in self.input_fields: + input_field.reset_to_pype_default() + self._has_studio_override = False + + def set_studio_default(self): + for input_field in self.input_fields: + input_field.set_studio_default() + + if self.is_group: + self._has_studio_override = True + + def discard_changes(self): + self._is_modified = False + self._is_overriden = self._was_overriden + + for input_field in self.input_fields: + input_field.discard_changes() + + self._is_modified = self.child_modified + + def set_as_overriden(self): + self._is_overriden = True + + @property + def child_has_studio_override(self): + for input_field in self.input_fields: + if ( + input_field.has_studio_override + or input_field.child_has_studio_override + ): + return True + return False + + @property + def child_modified(self): + for input_field in self.input_fields: + if input_field.child_modified: + return True + return False + + @property + def child_overriden(self): + for input_field in self.input_fields: + if input_field.child_overriden: + return True + return False + + @property + def child_invalid(self): + for input_field in self.input_fields: + if input_field.child_invalid: + return True + return False + + def hierarchical_style_update(self): + for input_field in self.input_fields: + input_field.hierarchical_style_update() + self.update_style() + + def item_value(self): + if not self.multiplatform and not self.multipath: + return self.input_fields[0].item_value() + + if not self.multiplatform: + return self.input_fields[0].item_value() + + output = {} + for input_field in self.input_fields: + output.update(input_field.config_value()) + return output + + def studio_overrides(self): + if not self.has_studio_override and not self.child_has_studio_override: + return NOT_SET, False + + value = self.item_value() + if not self.multiplatform: + value = {self.key: value} + return value, self.is_group + + def overrides(self): + if not self.is_overriden and not self.child_overriden: + return NOT_SET, False + + value = self.item_value() + if not self.multiplatform: + value = {self.key: value} + return value, self.is_group + + +# Proxy for form layout +class FormLabel(QtWidgets.QLabel): + def __init__(self, *args, **kwargs): + super(FormLabel, self).__init__(*args, **kwargs) + self.item = None + + +class DictFormWidget(QtWidgets.QWidget, SettingObject): + value_changed = QtCore.Signal(object) + allow_actions = False + + def __init__( + self, input_data, parent, + as_widget=False, label_widget=None, parent_widget=None + ): + if parent_widget is None: + parent_widget = parent + super(DictFormWidget, self).__init__(parent_widget) + + self.initial_attributes(input_data, parent, as_widget) + + self._as_widget = False + self._is_group = False + + self.input_fields = [] + self.content_layout = QtWidgets.QFormLayout(self) + self.content_layout.setContentsMargins(0, 0, 0, 0) + + for child_data in input_data.get("children", []): + self.add_children_gui(child_data) + + self.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + def add_children_gui(self, child_configuration): + item_type = child_configuration["type"] + # Pop label to not be set in child + label = child_configuration["label"] + + klass = TypeToKlass.types.get(item_type) + + label_widget = FormLabel(label, self) + + item = klass(child_configuration, self, label_widget=label_widget) + label_widget.item = item + + item.value_changed.connect(self._on_value_change) + self.content_layout.addRow(label_widget, item) + self.input_fields.append(item) + return item + + def mouseReleaseEvent(self, event): + if event.button() == QtCore.Qt.RightButton: + position = self.mapFromGlobal(QtGui.QCursor().pos()) + widget = self.childAt(position) + if widget and isinstance(widget, FormLabel): + widget.item.mouseReleaseEvent(event) + event.accept() + return + super(DictFormWidget, self).mouseReleaseEvent(event) + + def apply_overrides(self, parent_values): + for item in self.input_fields: + item.apply_overrides(parent_values) + + def discard_changes(self): + self._is_modified = False + self._is_overriden = self._was_overriden + + for item in self.input_fields: + item.discard_changes() + + self._is_modified = self.child_modified + + def remove_overrides(self): + self._is_overriden = False + self._is_modified = False + for input_field in self.input_fields: + input_field.remove_overrides() + + def reset_to_pype_default(self): + for input_field in self.input_fields: + input_field.reset_to_pype_default() + self._has_studio_override = False + + def set_studio_default(self): + for input_field in self.input_fields: + input_field.set_studio_default() + + if self.is_group: + self._has_studio_override = True + + def set_as_overriden(self): + if self.is_overriden: + return + + if self.is_group: + self._is_overriden = True + return + + for item in self.input_fields: + item.set_as_overriden() + + def update_default_values(self, value): + for item in self.input_fields: + item.update_default_values(value) + + def update_studio_values(self, value): + for item in self.input_fields: + item.update_studio_values(value) + + def _on_value_change(self, item=None): + if self.ignore_value_changes: + return + + self.value_changed.emit(self) + if self.any_parent_is_group: + self.hierarchical_style_update() + + @property + def child_has_studio_override(self): + for input_field in self.input_fields: + if ( + input_field.has_studio_override + or input_field.child_has_studio_override + ): + return True + return False + + @property + def child_modified(self): + for input_field in self.input_fields: + if input_field.child_modified: + return True + return False + + @property + def child_overriden(self): + for input_field in self.input_fields: + if input_field.is_overriden or input_field.child_overriden: + return True + return False + + @property + def child_invalid(self): + for input_field in self.input_fields: + if input_field.child_invalid: + return True + return False + + def get_invalid(self): + output = [] + for input_field in self.input_fields: + output.extend(input_field.get_invalid()) + return output + + def hierarchical_style_update(self): + for input_field in self.input_fields: + input_field.hierarchical_style_update() + + def item_value(self): + output = {} + for input_field in self.input_fields: + # TODO maybe merge instead of update should be used + # NOTE merge is custom function which merges 2 dicts + output.update(input_field.config_value()) + return output + + def config_value(self): + return self.item_value() + + def studio_overrides(self): + if not self.has_studio_override and not self.child_has_studio_override: + return NOT_SET, False + + values = {} + groups = [] + for input_field in self.input_fields: + value, is_group = input_field.studio_overrides() + if value is not NOT_SET: + values.update(value) + if is_group: + groups.extend(value.keys()) + if groups: + values[METADATA_KEY] = {"groups": groups} + return values, self.is_group + + def overrides(self): + if not self.is_overriden and not self.child_overriden: + return NOT_SET, False + + values = {} + groups = [] + for input_field in self.input_fields: + value, is_group = input_field.overrides() + if value is not NOT_SET: + values.update(value) + if is_group: + groups.extend(value.keys()) + if groups: + values[METADATA_KEY] = {"groups": groups} + return values, self.is_group + + +class LabelWidget(QtWidgets.QWidget): + is_input_type = False + + def __init__(self, configuration, parent=None): + super(LabelWidget, self).__init__(parent) + self.setObjectName("LabelWidget") + + label = configuration["label"] + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(5, 5, 5, 5) + label_widget = QtWidgets.QLabel(label, self) + layout.addWidget(label_widget) + + +class SplitterWidget(QtWidgets.QWidget): + is_input_type = False + _height = 2 + + def __init__(self, configuration, parent=None): + super(SplitterWidget, self).__init__(parent) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(5, 5, 5, 5) + splitter_item = QtWidgets.QWidget(self) + splitter_item.setObjectName("SplitterItem") + splitter_item.setMinimumHeight(self._height) + splitter_item.setMaximumHeight(self._height) + layout.addWidget(splitter_item) + + +TypeToKlass.types["boolean"] = BooleanWidget +TypeToKlass.types["number"] = NumberWidget +TypeToKlass.types["text"] = TextWidget +TypeToKlass.types["path-input"] = PathInputWidget +TypeToKlass.types["raw-json"] = RawJsonWidget +TypeToKlass.types["list"] = ListWidget +TypeToKlass.types["dict-modifiable"] = ModifiableDict +TypeToKlass.types["dict"] = DictWidget +TypeToKlass.types["dict-invisible"] = DictInvisible +TypeToKlass.types["path-widget"] = PathWidget +TypeToKlass.types["dict-form"] = DictFormWidget + +TypeToKlass.types["label"] = LabelWidget +TypeToKlass.types["splitter"] = SplitterWidget diff --git a/pype/tools/settings/settings/widgets/lib.py b/pype/tools/settings/settings/widgets/lib.py new file mode 100644 index 0000000000..e225d65417 --- /dev/null +++ b/pype/tools/settings/settings/widgets/lib.py @@ -0,0 +1,311 @@ +import os +import json +import copy +from pype.settings.lib import OVERRIDEN_KEY +from queue import Queue + + +# Singleton database of available inputs +class TypeToKlass: + types = {} + + +NOT_SET = type("NOT_SET", (), {"__bool__": lambda obj: False})() +METADATA_KEY = type("METADATA_KEY", (), {}) +OVERRIDE_VERSION = 1 +CHILD_OFFSET = 15 + + +def convert_gui_data_to_overrides(data, first=True): + if not data or not isinstance(data, dict): + return data + + output = {} + if first: + output["__override_version__"] = OVERRIDE_VERSION + + if METADATA_KEY in data: + metadata = data.pop(METADATA_KEY) + for key, value in metadata.items(): + if key == "groups": + output[OVERRIDEN_KEY] = value + else: + KeyError("Unknown metadata key \"{}\"".format(key)) + + for key, value in data.items(): + output[key] = convert_gui_data_to_overrides(value, False) + return output + + +def convert_overrides_to_gui_data(data, first=True): + if not data or not isinstance(data, dict): + return data + + output = {} + if OVERRIDEN_KEY in data: + groups = data.pop(OVERRIDEN_KEY) + if METADATA_KEY not in output: + output[METADATA_KEY] = {} + output[METADATA_KEY]["groups"] = groups + + for key, value in data.items(): + output[key] = convert_overrides_to_gui_data(value, False) + + return output + + +def _fill_inner_schemas(schema_data, schema_collection): + if schema_data["type"] == "schema": + raise ValueError("First item in schema data can't be schema.") + + children = schema_data.get("children") + if not children: + return schema_data + + new_children = [] + for child in children: + if child["type"] != "schema": + new_child = _fill_inner_schemas(child, schema_collection) + new_children.append(new_child) + continue + + for schema_name in child["children"]: + new_child = _fill_inner_schemas( + schema_collection[schema_name], + schema_collection + ) + new_children.append(new_child) + + schema_data["children"] = new_children + return schema_data + + +class SchemaMissingFileInfo(Exception): + def __init__(self, invalid): + full_path_keys = [] + for item in invalid: + full_path_keys.append("\"{}\"".format("/".join(item))) + + msg = ( + "Schema has missing definition of output file (\"is_file\" key)" + " for keys. [{}]" + ).format(", ".join(full_path_keys)) + super(SchemaMissingFileInfo, self).__init__(msg) + + +class SchemeGroupHierarchyBug(Exception): + def __init__(self, invalid): + full_path_keys = [] + for item in invalid: + full_path_keys.append("\"{}\"".format("/".join(item))) + + msg = ( + "Items with attribute \"is_group\" can't have another item with" + " \"is_group\" attribute as child. Error happened for keys: [{}]" + ).format(", ".join(full_path_keys)) + super(SchemeGroupHierarchyBug, self).__init__(msg) + + +class SchemaDuplicatedKeys(Exception): + def __init__(self, invalid): + items = [] + for key_path, keys in invalid.items(): + joined_keys = ", ".join([ + "\"{}\"".format(key) for key in keys + ]) + items.append("\"{}\" ({})".format(key_path, joined_keys)) + + msg = ( + "Schema items contain duplicated keys in one hierarchy level. {}" + ).format(" || ".join(items)) + super(SchemaDuplicatedKeys, self).__init__(msg) + + +def file_keys_from_schema(schema_data): + output = [] + item_type = schema_data["type"] + klass = TypeToKlass.types[item_type] + if not klass.is_input_type: + return output + + keys = [] + key = schema_data.get("key") + if key: + keys.append(key) + + for child in schema_data["children"]: + if child.get("is_file"): + _keys = copy.deepcopy(keys) + _keys.append(child["key"]) + output.append(_keys) + continue + + for result in file_keys_from_schema(child): + _keys = copy.deepcopy(keys) + _keys.extend(result) + output.append(_keys) + return output + + +def validate_all_has_ending_file(schema_data, is_top=True): + item_type = schema_data["type"] + klass = TypeToKlass.types[item_type] + if not klass.is_input_type: + return None + + if schema_data.get("is_file"): + return None + + children = schema_data.get("children") + if not children: + return [[schema_data["key"]]] + + invalid = [] + keyless = "key" not in schema_data + for child in children: + result = validate_all_has_ending_file(child, False) + if result is None: + continue + + if keyless: + invalid.extend(result) + else: + for item in result: + new_invalid = [schema_data["key"]] + new_invalid.extend(item) + invalid.append(new_invalid) + + if not invalid: + return None + + if not is_top: + return invalid + + raise SchemaMissingFileInfo(invalid) + + +def validate_is_group_is_unique_in_hierarchy( + schema_data, any_parent_is_group=False, keys=None +): + is_top = keys is None + if keys is None: + keys = [] + + keyless = "key" not in schema_data + + if not keyless: + keys.append(schema_data["key"]) + + invalid = [] + is_group = schema_data.get("is_group") + if is_group and any_parent_is_group: + invalid.append(copy.deepcopy(keys)) + + if is_group: + any_parent_is_group = is_group + + children = schema_data.get("children") + if not children: + return invalid + + for child in children: + result = validate_is_group_is_unique_in_hierarchy( + child, any_parent_is_group, copy.deepcopy(keys) + ) + if not result: + continue + + invalid.extend(result) + + if invalid and is_group and keys not in invalid: + invalid.append(copy.deepcopy(keys)) + + if not is_top: + return invalid + + if invalid: + raise SchemeGroupHierarchyBug(invalid) + + +def validate_keys_are_unique(schema_data, keys=None): + children = schema_data.get("children") + if not children: + return + + is_top = keys is None + if keys is None: + keys = [schema_data["key"]] + else: + keys.append(schema_data["key"]) + + child_queue = Queue() + for child in children: + child_queue.put(child) + + child_inputs = [] + while not child_queue.empty(): + child = child_queue.get() + if "key" not in child: + _children = child.get("children") or [] + for _child in _children: + child_queue.put(_child) + else: + child_inputs.append(child) + + duplicated_keys = set() + child_keys = set() + for child in child_inputs: + key = child["key"] + if key in child_keys: + duplicated_keys.add(key) + else: + child_keys.add(key) + + invalid = {} + if duplicated_keys: + joined_keys = "/".join(keys) + invalid[joined_keys] = duplicated_keys + + for child in child_inputs: + result = validate_keys_are_unique(child, copy.deepcopy(keys)) + if result: + invalid.update(result) + + if not is_top: + return invalid + + if invalid: + raise SchemaDuplicatedKeys(invalid) + + +def validate_schema(schema_data): + validate_all_has_ending_file(schema_data) + validate_is_group_is_unique_in_hierarchy(schema_data) + validate_keys_are_unique(schema_data) + + +def gui_schema(subfolder, main_schema_name): + subfolder, main_schema_name + dirpath = os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "gui_schemas", + subfolder + ) + + loaded_schemas = {} + for filename in os.listdir(dirpath): + basename, ext = os.path.splitext(filename) + if ext != ".json": + continue + + filepath = os.path.join(dirpath, filename) + with open(filepath, "r") as json_stream: + schema_data = json.load(json_stream) + loaded_schemas[basename] = schema_data + + main_schema = _fill_inner_schemas( + loaded_schemas[main_schema_name], + loaded_schemas + ) + validate_schema(main_schema) + return main_schema diff --git a/pype/tools/settings/settings/widgets/tests.py b/pype/tools/settings/settings/widgets/tests.py new file mode 100644 index 0000000000..fc53e38ad5 --- /dev/null +++ b/pype/tools/settings/settings/widgets/tests.py @@ -0,0 +1,136 @@ +from Qt import QtWidgets, QtCore + + +def indented_print(data, indent=0): + spaces = " " * (indent * 4) + if not isinstance(data, dict): + print("{}{}".format(spaces, data)) + return + + for key, value in data.items(): + print("{}{}".format(spaces, key)) + indented_print(value, indent + 1) + + +class SelectableMenu(QtWidgets.QMenu): + + selection_changed = QtCore.Signal() + + def mouseReleaseEvent(self, event): + action = self.activeAction() + if action and action.isEnabled(): + action.trigger() + self.selection_changed.emit() + else: + super(SelectableMenu, self).mouseReleaseEvent(event) + + def event(self, event): + result = super(SelectableMenu, self).event(event) + if event.type() == QtCore.QEvent.Show: + parent = self.parent() + + move_point = parent.mapToGlobal(QtCore.QPoint(0, parent.height())) + check_point = ( + move_point + + QtCore.QPoint(self.width(), self.height()) + ) + visibility_check = ( + QtWidgets.QApplication.desktop().rect().contains(check_point) + ) + if not visibility_check: + move_point -= QtCore.QPoint(0, parent.height() + self.height()) + self.move(move_point) + + self.updateGeometry() + self.repaint() + + return result + + +class AddibleComboBox(QtWidgets.QComboBox): + """Searchable ComboBox with empty placeholder value as first value""" + + def __init__(self, placeholder="", parent=None): + super(AddibleComboBox, self).__init__(parent) + + self.setEditable(True) + # self.setInsertPolicy(self.NoInsert) + + self.lineEdit().setPlaceholderText(placeholder) + # self.lineEdit().returnPressed.connect(self.on_return_pressed) + + # Apply completer settings + completer = self.completer() + completer.setCompletionMode(completer.PopupCompletion) + completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive) + + # def on_return_pressed(self): + # text = self.lineEdit().text().strip() + # if not text: + # return + # + # index = self.findText(text) + # if index < 0: + # self.addItems([text]) + # index = self.findText(text) + + def populate(self, items): + self.clear() + # self.addItems([""]) # ensure first item is placeholder + self.addItems(items) + + def get_valid_value(self): + """Return the current text if it's a valid value else None + + Note: The empty placeholder value is valid and returns as "" + + """ + + text = self.currentText() + lookup = set(self.itemText(i) for i in range(self.count())) + if text not in lookup: + return None + + return text or None + + +class MultiselectEnum(QtWidgets.QWidget): + + selection_changed = QtCore.Signal() + + def __init__(self, title, parent=None): + super(MultiselectEnum, self).__init__(parent) + toolbutton = QtWidgets.QToolButton(self) + toolbutton.setText(title) + + toolmenu = SelectableMenu(toolbutton) + + toolbutton.setMenu(toolmenu) + toolbutton.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup) + + layout = QtWidgets.QHBoxLayout() + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(toolbutton) + + self.setLayout(layout) + + toolmenu.selection_changed.connect(self.selection_changed) + + self.toolbutton = toolbutton + self.toolmenu = toolmenu + self.main_layout = layout + + def populate(self, items): + self.toolmenu.clear() + self.addItems(items) + + def addItems(self, items): + for item in items: + action = self.toolmenu.addAction(item) + action.setCheckable(True) + action.setChecked(True) + self.toolmenu.addAction(action) + + def items(self): + for action in self.toolmenu.actions(): + yield action diff --git a/pype/tools/settings/settings/widgets/widgets.py b/pype/tools/settings/settings/widgets/widgets.py new file mode 100644 index 0000000000..400b9371fd --- /dev/null +++ b/pype/tools/settings/settings/widgets/widgets.py @@ -0,0 +1,228 @@ +from Qt import QtWidgets, QtCore, QtGui + + +class NumberSpinBox(QtWidgets.QDoubleSpinBox): + def __init__(self, *args, **kwargs): + min_value = kwargs.pop("minimum", -99999) + max_value = kwargs.pop("maximum", 99999) + decimals = kwargs.pop("decimal", 0) + super(NumberSpinBox, self).__init__(*args, **kwargs) + self.setFocusPolicy(QtCore.Qt.StrongFocus) + self.setDecimals(decimals) + self.setMinimum(min_value) + self.setMaximum(max_value) + + def wheelEvent(self, event): + if self.hasFocus(): + super(NumberSpinBox, self).wheelEvent(event) + else: + event.ignore() + + def value(self): + output = super(NumberSpinBox, self).value() + if self.decimals() == 0: + output = int(output) + return output + + +class PathInput(QtWidgets.QLineEdit): + def clear_end_path(self): + value = self.text().strip() + if value.endswith("/"): + while value and value[-1] == "/": + value = value[:-1] + self.setText(value) + + def keyPressEvent(self, event): + # Always change backslash `\` for forwardslash `/` + if event.key() == QtCore.Qt.Key_Backslash: + event.accept() + new_event = QtGui.QKeyEvent( + event.type(), + QtCore.Qt.Key_Slash, + event.modifiers(), + "/", + event.isAutoRepeat(), + event.count() + ) + QtWidgets.QApplication.sendEvent(self, new_event) + return + super(PathInput, self).keyPressEvent(event) + + def focusOutEvent(self, event): + super(PathInput, self).focusOutEvent(event) + self.clear_end_path() + + +class ClickableWidget(QtWidgets.QWidget): + clicked = QtCore.Signal() + + def mouseReleaseEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self.clicked.emit() + super(ClickableWidget, self).mouseReleaseEvent(event) + + +class ExpandingWidget(QtWidgets.QWidget): + def __init__(self, label, parent): + super(ExpandingWidget, self).__init__(parent) + + self.toolbox_hidden = False + + top_part = ClickableWidget(parent=self) + + button_size = QtCore.QSize(5, 5) + button_toggle = QtWidgets.QToolButton(parent=top_part) + button_toggle.setProperty("btn-type", "expand-toggle") + button_toggle.setIconSize(button_size) + button_toggle.setArrowType(QtCore.Qt.RightArrow) + button_toggle.setCheckable(True) + button_toggle.setChecked(False) + + label_widget = QtWidgets.QLabel(label, parent=top_part) + label_widget.setObjectName("DictLabel") + + side_line_widget = QtWidgets.QWidget(top_part) + side_line_widget.setObjectName("SideLineWidget") + side_line_layout = QtWidgets.QHBoxLayout(side_line_widget) + side_line_layout.setContentsMargins(5, 10, 0, 10) + side_line_layout.addWidget(button_toggle) + side_line_layout.addWidget(label_widget) + + top_part_layout = QtWidgets.QHBoxLayout(top_part) + top_part_layout.setContentsMargins(0, 0, 0, 0) + top_part_layout.addWidget(side_line_widget) + + self.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + self.top_part_ending = None + self.after_label_layout = None + self.end_of_layout = None + + self.side_line_widget = side_line_widget + self.side_line_layout = side_line_layout + self.button_toggle = button_toggle + self.label_widget = label_widget + + top_part.clicked.connect(self._top_part_clicked) + self.button_toggle.clicked.connect(self._btn_clicked) + + self.main_layout = QtWidgets.QVBoxLayout(self) + self.main_layout.setContentsMargins(0, 0, 0, 0) + self.main_layout.setSpacing(0) + self.main_layout.addWidget(top_part) + + def hide_toolbox(self, hide_content=False): + self.button_toggle.setArrowType(QtCore.Qt.NoArrow) + self.toolbox_hidden = True + self.content_widget.setVisible(not hide_content) + self.parent().updateGeometry() + + def set_content_widget(self, content_widget): + content_widget.setVisible(False) + self.main_layout.addWidget(content_widget) + self.content_widget = content_widget + + def _btn_clicked(self): + self.toggle_content(self.button_toggle.isChecked()) + + def _top_part_clicked(self): + self.toggle_content() + + def toggle_content(self, *args): + if self.toolbox_hidden: + return + + if len(args) > 0: + checked = args[0] + else: + checked = not self.button_toggle.isChecked() + arrow_type = QtCore.Qt.RightArrow + if checked: + arrow_type = QtCore.Qt.DownArrow + self.button_toggle.setChecked(checked) + self.button_toggle.setArrowType(arrow_type) + self.content_widget.setVisible(checked) + self.parent().updateGeometry() + + def add_widget_after_label(self, widget): + self._add_side_widget_subwidgets() + self.after_label_layout.addWidget(widget) + + def _add_side_widget_subwidgets(self): + if self.top_part_ending is not None: + return + + top_part_ending = QtWidgets.QWidget(self.side_line_widget) + top_part_ending.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + top_part_ending_layout = QtWidgets.QHBoxLayout(top_part_ending) + top_part_ending_layout.setContentsMargins(0, 0, 0, 0) + top_part_ending_layout.setSpacing(0) + top_part_ending_layout.setAlignment(QtCore.Qt.AlignVCenter) + + after_label_widget = QtWidgets.QWidget(top_part_ending) + spacer_item = QtWidgets.QWidget(top_part_ending) + end_of_widget = QtWidgets.QWidget(top_part_ending) + + self.after_label_layout = QtWidgets.QVBoxLayout(after_label_widget) + self.after_label_layout.setContentsMargins(0, 0, 0, 0) + + self.end_of_layout = QtWidgets.QVBoxLayout(end_of_widget) + self.end_of_layout.setContentsMargins(0, 0, 0, 0) + + spacer_layout = QtWidgets.QVBoxLayout(spacer_item) + spacer_layout.setContentsMargins(0, 0, 0, 0) + + top_part_ending_layout.addWidget(after_label_widget, 0) + top_part_ending_layout.addWidget(spacer_item, 1) + top_part_ending_layout.addWidget(end_of_widget, 0) + + top_part_ending.setAttribute(QtCore.Qt.WA_TranslucentBackground) + after_label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + spacer_item.setAttribute(QtCore.Qt.WA_TranslucentBackground) + end_of_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + self.top_part_ending = top_part_ending + self.side_line_layout.addWidget(top_part_ending) + + def resizeEvent(self, event): + super(ExpandingWidget, self).resizeEvent(event) + self.content_widget.updateGeometry() + + +class UnsavedChangesDialog(QtWidgets.QDialog): + message = "You have unsaved changes. What do you want to do with them?" + + def __init__(self, parent=None): + super().__init__(parent) + message_label = QtWidgets.QLabel(self.message) + + btns_widget = QtWidgets.QWidget(self) + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + + btn_ok = QtWidgets.QPushButton("Save") + btn_ok.clicked.connect(self.on_ok_pressed) + btn_discard = QtWidgets.QPushButton("Discard") + btn_discard.clicked.connect(self.on_discard_pressed) + btn_cancel = QtWidgets.QPushButton("Cancel") + btn_cancel.clicked.connect(self.on_cancel_pressed) + + btns_layout.addWidget(btn_ok) + btns_layout.addWidget(btn_discard) + btns_layout.addWidget(btn_cancel) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(message_label) + layout.addWidget(btns_widget) + + self.state = None + + def on_cancel_pressed(self): + self.done(0) + + def on_ok_pressed(self): + self.done(1) + + def on_discard_pressed(self): + self.done(2) diff --git a/pype/tools/settings/settings/widgets/window.py b/pype/tools/settings/settings/widgets/window.py new file mode 100644 index 0000000000..f83da8efe0 --- /dev/null +++ b/pype/tools/settings/settings/widgets/window.py @@ -0,0 +1,28 @@ +from Qt import QtWidgets +from .base import SystemWidget, ProjectWidget + + +class MainWidget(QtWidgets.QWidget): + widget_width = 1000 + widget_height = 600 + + def __init__(self, develop, parent=None): + super(MainWidget, self).__init__(parent) + self.setObjectName("MainWidget") + self.setWindowTitle("Pype Settings") + + self.resize(self.widget_width, self.widget_height) + + header_tab_widget = QtWidgets.QTabWidget(parent=self) + + studio_widget = SystemWidget(develop, header_tab_widget) + project_widget = ProjectWidget(develop, header_tab_widget) + header_tab_widget.addTab(studio_widget, "System") + header_tab_widget.addTab(project_widget, "Project") + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(5, 5, 5, 5) + layout.setSpacing(0) + layout.addWidget(header_tab_widget) + + self.setLayout(layout) diff --git a/pype/tools/standalonepublish/__init__.py b/pype/tools/standalonepublish/__init__.py new file mode 100644 index 0000000000..29a4e52904 --- /dev/null +++ b/pype/tools/standalonepublish/__init__.py @@ -0,0 +1,8 @@ +from .app import ( + show, + cli +) +__all__ = [ + "show", + "cli" +] diff --git a/pype/tools/standalonepublish/__main__.py b/pype/tools/standalonepublish/__main__.py new file mode 100644 index 0000000000..aba8e6c0a4 --- /dev/null +++ b/pype/tools/standalonepublish/__main__.py @@ -0,0 +1,24 @@ +import os +import sys +import app +import signal +from Qt import QtWidgets +from avalon import style + + +if __name__ == "__main__": + qt_app = QtWidgets.QApplication([]) + # app.setQuitOnLastWindowClosed(False) + qt_app.setStyleSheet(style.load_stylesheet()) + + def signal_handler(sig, frame): + print("You pressed Ctrl+C. Process ended.") + qt_app.quit() + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + window = app.Window(sys.argv[-1].split(os.pathsep)) + window.show() + + sys.exit(qt_app.exec_()) diff --git a/pype/modules/standalonepublish/app.py b/pype/tools/standalonepublish/app.py similarity index 80% rename from pype/modules/standalonepublish/app.py rename to pype/tools/standalonepublish/app.py index 60274f6b0a..feba46987f 100644 --- a/pype/modules/standalonepublish/app.py +++ b/pype/tools/standalonepublish/app.py @@ -1,18 +1,8 @@ -import os -import sys -import json -from subprocess import Popen from bson.objectid import ObjectId -from pype import lib as pypelib -from avalon.vendor.Qt import QtWidgets, QtCore -from avalon import api, style, schema -from avalon.tools import lib as parentlib -from .widgets import * -# Move this to pype lib? -from avalon.tools.libraryloader.io_nonsingleton import DbConnector +from Qt import QtWidgets, QtCore +from widgets import AssetWidget, FamilyWidget, ComponentsWidget, ShadowWidget +from avalon.api import AvalonMongoDB -module = sys.modules[__name__] -module.window = None class Window(QtWidgets.QDialog): """Main window of Standalone publisher. @@ -20,7 +10,7 @@ class Window(QtWidgets.QDialog): :param parent: Main widget that cares about all GUIs :type parent: QtWidgets.QMainWindow """ - _db = DbConnector() + _db = AvalonMongoDB() _jobs = {} valid_family = False valid_components = False @@ -28,14 +18,15 @@ class Window(QtWidgets.QDialog): WIDTH = 1100 HEIGHT = 500 - def __init__(self, parent=None): + def __init__(self, pyblish_paths, parent=None): super(Window, self).__init__(parent=parent) self._db.install() + self.pyblish_paths = pyblish_paths + self.setWindowTitle("Standalone Publish") self.setFocusPolicy(QtCore.Qt.StrongFocus) self.setAttribute(QtCore.Qt.WA_DeleteOnClose) - self.setStyleSheet(style.load_stylesheet()) # Validators self.valid_parent = False @@ -99,8 +90,14 @@ class Window(QtWidgets.QDialog): def resizeEvent(self, event=None): ''' Helps resize shadow widget ''' - position_x = (self.frameGeometry().width()-self.shadow_widget.frameGeometry().width())/2 - position_y = (self.frameGeometry().height()-self.shadow_widget.frameGeometry().height())/2 + position_x = ( + self.frameGeometry().width() + - self.shadow_widget.frameGeometry().width() + ) / 2 + position_y = ( + self.frameGeometry().height() + - self.shadow_widget.frameGeometry().height() + ) / 2 self.shadow_widget.move(position_x, position_y) w = self.frameGeometry().width() h = self.frameGeometry().height() @@ -144,7 +141,10 @@ class Window(QtWidgets.QDialog): - files/folders in clipboard (tested only on Windows OS) - copied path of file/folder in clipboard ('c:/path/to/folder') ''' - if event.key() == QtCore.Qt.Key_V and event.modifiers() == QtCore.Qt.ControlModifier: + if ( + event.key() == QtCore.Qt.Key_V + and event.modifiers() == QtCore.Qt.ControlModifier + ): clip = QtWidgets.QApplication.clipboard() self.widget_components.process_mime_data(clip) super().keyPressEvent(event) @@ -190,29 +190,3 @@ class Window(QtWidgets.QDialog): data.update(self.widget_components.collect_data()) return data - -def show(parent=None, debug=False): - try: - module.window.close() - del module.window - except (RuntimeError, AttributeError): - pass - - with parentlib.application(): - window = Window(parent) - window.show() - - module.window = window - - -def cli(args): - import argparse - parser = argparse.ArgumentParser() - parser.add_argument("project") - parser.add_argument("asset") - - args = parser.parse_args(args) - # project = args.project - # asset = args.asset - - show() diff --git a/pype/hosts/resolve/utility_scripts/__test_pyblish.py b/pype/tools/standalonepublish/publish.py similarity index 59% rename from pype/hosts/resolve/utility_scripts/__test_pyblish.py rename to pype/tools/standalonepublish/publish.py index a6fe991025..a4bb81ad3c 100644 --- a/pype/hosts/resolve/utility_scripts/__test_pyblish.py +++ b/pype/tools/standalonepublish/publish.py @@ -1,36 +1,15 @@ import os import sys -import pype -import importlib -import pyblish.api -import pyblish.util -import avalon.api -from avalon.tools import publish -from pypeapp import Logger -log = Logger().get_logger(__name__) +import pype +import pyblish.api def main(env): + from avalon.tools import publish # Registers pype's Global pyblish plugins pype.install() - # Register Host (and it's pyblish plugins) - host_name = env["AVALON_APP"] - # TODO not sure if use "pype." or "avalon." for host import - host_import_str = f"pype.{host_name}" - - try: - host_module = importlib.import_module(host_import_str) - except ModuleNotFoundError: - log.error(( - f"Host \"{host_name}\" can't be imported." - f" Import string \"{host_import_str}\" failed." - )) - return False - - avalon.api.install(host_module) - # Register additional paths addition_paths_str = env.get("PUBLISH_PATHS") or "" addition_paths = addition_paths_str.split(os.pathsep) @@ -38,7 +17,6 @@ def main(env): path = os.path.normpath(path) if not os.path.exists(path): continue - pyblish.api.register_plugin_path(path) # Register project specific plugins diff --git a/pype/modules/standalonepublish/widgets/__init__.py b/pype/tools/standalonepublish/widgets/__init__.py similarity index 84% rename from pype/modules/standalonepublish/widgets/__init__.py rename to pype/tools/standalonepublish/widgets/__init__.py index 9a71e0dee6..e61897f807 100644 --- a/pype/modules/standalonepublish/widgets/__init__.py +++ b/pype/tools/standalonepublish/widgets/__init__.py @@ -1,6 +1,4 @@ -from avalon.vendor.Qt import * -from avalon.vendor import qtawesome -from avalon import style +from Qt import QtCore HelpRole = QtCore.Qt.UserRole + 2 FamilyRole = QtCore.Qt.UserRole + 3 @@ -8,9 +6,6 @@ ExistsRole = QtCore.Qt.UserRole + 4 PluginRole = QtCore.Qt.UserRole + 5 PluginKeyRole = QtCore.Qt.UserRole + 6 -from ..resources import get_resource -from .button_from_svgs import SvgResizable, SvgButton - from .model_node import Node from .model_tree import TreeModel from .model_asset import AssetModel, _iter_model_rows diff --git a/pype/modules/standalonepublish/widgets/model_asset.py b/pype/tools/standalonepublish/widgets/model_asset.py similarity index 98% rename from pype/modules/standalonepublish/widgets/model_asset.py rename to pype/tools/standalonepublish/widgets/model_asset.py index 6bea35ebd7..44649b3dc3 100644 --- a/pype/modules/standalonepublish/widgets/model_asset.py +++ b/pype/tools/standalonepublish/widgets/model_asset.py @@ -1,8 +1,9 @@ import logging import collections -from . import QtCore, QtGui +from Qt import QtCore, QtGui from . import TreeModel, Node -from . import style, qtawesome +from avalon.vendor import qtawesome +from avalon import style log = logging.getLogger(__name__) diff --git a/pype/modules/standalonepublish/widgets/model_filter_proxy_exact_match.py b/pype/tools/standalonepublish/widgets/model_filter_proxy_exact_match.py similarity index 97% rename from pype/modules/standalonepublish/widgets/model_filter_proxy_exact_match.py rename to pype/tools/standalonepublish/widgets/model_filter_proxy_exact_match.py index 862e4071db..604ae30934 100644 --- a/pype/modules/standalonepublish/widgets/model_filter_proxy_exact_match.py +++ b/pype/tools/standalonepublish/widgets/model_filter_proxy_exact_match.py @@ -1,4 +1,4 @@ -from . import QtCore +from Qt import QtCore class ExactMatchesFilterProxyModel(QtCore.QSortFilterProxyModel): diff --git a/pype/modules/standalonepublish/widgets/model_filter_proxy_recursive_sort.py b/pype/tools/standalonepublish/widgets/model_filter_proxy_recursive_sort.py similarity index 97% rename from pype/modules/standalonepublish/widgets/model_filter_proxy_recursive_sort.py rename to pype/tools/standalonepublish/widgets/model_filter_proxy_recursive_sort.py index 9528e96ebf..71ecdf41dc 100644 --- a/pype/modules/standalonepublish/widgets/model_filter_proxy_recursive_sort.py +++ b/pype/tools/standalonepublish/widgets/model_filter_proxy_recursive_sort.py @@ -1,4 +1,4 @@ -from . import QtCore +from Qt import QtCore import re diff --git a/pype/modules/standalonepublish/widgets/model_node.py b/pype/tools/standalonepublish/widgets/model_node.py similarity index 100% rename from pype/modules/standalonepublish/widgets/model_node.py rename to pype/tools/standalonepublish/widgets/model_node.py diff --git a/pype/modules/standalonepublish/widgets/model_tasks_template.py b/pype/tools/standalonepublish/widgets/model_tasks_template.py similarity index 92% rename from pype/modules/standalonepublish/widgets/model_tasks_template.py rename to pype/tools/standalonepublish/widgets/model_tasks_template.py index 336921b37a..476f45391d 100644 --- a/pype/modules/standalonepublish/widgets/model_tasks_template.py +++ b/pype/tools/standalonepublish/widgets/model_tasks_template.py @@ -1,6 +1,7 @@ -from . import QtCore, TreeModel -from . import Node -from . import qtawesome, style +from Qt import QtCore +from . import Node, TreeModel +from avalon.vendor import qtawesome +from avalon import style class TasksTemplateModel(TreeModel): diff --git a/pype/modules/standalonepublish/widgets/model_tree.py b/pype/tools/standalonepublish/widgets/model_tree.py similarity index 99% rename from pype/modules/standalonepublish/widgets/model_tree.py rename to pype/tools/standalonepublish/widgets/model_tree.py index f37b7a00b2..efac0d6b78 100644 --- a/pype/modules/standalonepublish/widgets/model_tree.py +++ b/pype/tools/standalonepublish/widgets/model_tree.py @@ -1,4 +1,4 @@ -from . import QtCore +from Qt import QtCore from . import Node diff --git a/pype/modules/standalonepublish/widgets/model_tree_view_deselectable.py b/pype/tools/standalonepublish/widgets/model_tree_view_deselectable.py similarity index 93% rename from pype/modules/standalonepublish/widgets/model_tree_view_deselectable.py rename to pype/tools/standalonepublish/widgets/model_tree_view_deselectable.py index 78bec44d36..6a15916981 100644 --- a/pype/modules/standalonepublish/widgets/model_tree_view_deselectable.py +++ b/pype/tools/standalonepublish/widgets/model_tree_view_deselectable.py @@ -1,4 +1,4 @@ -from . import QtWidgets, QtCore +from Qt import QtWidgets, QtCore class DeselectableTreeView(QtWidgets.QTreeView): diff --git a/pype/modules/standalonepublish/resources/__init__.py b/pype/tools/standalonepublish/widgets/resources/__init__.py similarity index 100% rename from pype/modules/standalonepublish/resources/__init__.py rename to pype/tools/standalonepublish/widgets/resources/__init__.py diff --git a/pype/modules/standalonepublish/resources/edit.svg b/pype/tools/standalonepublish/widgets/resources/edit.svg similarity index 100% rename from pype/modules/standalonepublish/resources/edit.svg rename to pype/tools/standalonepublish/widgets/resources/edit.svg diff --git a/pype/modules/standalonepublish/resources/file.png b/pype/tools/standalonepublish/widgets/resources/file.png similarity index 100% rename from pype/modules/standalonepublish/resources/file.png rename to pype/tools/standalonepublish/widgets/resources/file.png diff --git a/pype/modules/standalonepublish/resources/files.png b/pype/tools/standalonepublish/widgets/resources/files.png similarity index 100% rename from pype/modules/standalonepublish/resources/files.png rename to pype/tools/standalonepublish/widgets/resources/files.png diff --git a/pype/modules/standalonepublish/resources/houdini.png b/pype/tools/standalonepublish/widgets/resources/houdini.png similarity index 100% rename from pype/modules/standalonepublish/resources/houdini.png rename to pype/tools/standalonepublish/widgets/resources/houdini.png diff --git a/pype/modules/standalonepublish/resources/image_file.png b/pype/tools/standalonepublish/widgets/resources/image_file.png similarity index 100% rename from pype/modules/standalonepublish/resources/image_file.png rename to pype/tools/standalonepublish/widgets/resources/image_file.png diff --git a/pype/modules/standalonepublish/resources/image_files.png b/pype/tools/standalonepublish/widgets/resources/image_files.png similarity index 100% rename from pype/modules/standalonepublish/resources/image_files.png rename to pype/tools/standalonepublish/widgets/resources/image_files.png diff --git a/pype/modules/standalonepublish/resources/information.svg b/pype/tools/standalonepublish/widgets/resources/information.svg similarity index 100% rename from pype/modules/standalonepublish/resources/information.svg rename to pype/tools/standalonepublish/widgets/resources/information.svg diff --git a/pype/modules/standalonepublish/resources/maya.png b/pype/tools/standalonepublish/widgets/resources/maya.png similarity index 100% rename from pype/modules/standalonepublish/resources/maya.png rename to pype/tools/standalonepublish/widgets/resources/maya.png diff --git a/pype/modules/standalonepublish/resources/menu.png b/pype/tools/standalonepublish/widgets/resources/menu.png similarity index 100% rename from pype/modules/standalonepublish/resources/menu.png rename to pype/tools/standalonepublish/widgets/resources/menu.png diff --git a/pype/modules/standalonepublish/resources/menu_disabled.png b/pype/tools/standalonepublish/widgets/resources/menu_disabled.png similarity index 100% rename from pype/modules/standalonepublish/resources/menu_disabled.png rename to pype/tools/standalonepublish/widgets/resources/menu_disabled.png diff --git a/pype/modules/standalonepublish/resources/menu_hover.png b/pype/tools/standalonepublish/widgets/resources/menu_hover.png similarity index 100% rename from pype/modules/standalonepublish/resources/menu_hover.png rename to pype/tools/standalonepublish/widgets/resources/menu_hover.png diff --git a/pype/modules/standalonepublish/resources/menu_pressed.png b/pype/tools/standalonepublish/widgets/resources/menu_pressed.png similarity index 100% rename from pype/modules/standalonepublish/resources/menu_pressed.png rename to pype/tools/standalonepublish/widgets/resources/menu_pressed.png diff --git a/pype/modules/standalonepublish/resources/menu_pressed_hover.png b/pype/tools/standalonepublish/widgets/resources/menu_pressed_hover.png similarity index 100% rename from pype/modules/standalonepublish/resources/menu_pressed_hover.png rename to pype/tools/standalonepublish/widgets/resources/menu_pressed_hover.png diff --git a/pype/modules/standalonepublish/resources/nuke.png b/pype/tools/standalonepublish/widgets/resources/nuke.png similarity index 100% rename from pype/modules/standalonepublish/resources/nuke.png rename to pype/tools/standalonepublish/widgets/resources/nuke.png diff --git a/pype/modules/standalonepublish/resources/premiere.png b/pype/tools/standalonepublish/widgets/resources/premiere.png similarity index 100% rename from pype/modules/standalonepublish/resources/premiere.png rename to pype/tools/standalonepublish/widgets/resources/premiere.png diff --git a/pype/modules/standalonepublish/resources/trash.png b/pype/tools/standalonepublish/widgets/resources/trash.png similarity index 100% rename from pype/modules/standalonepublish/resources/trash.png rename to pype/tools/standalonepublish/widgets/resources/trash.png diff --git a/pype/modules/standalonepublish/resources/trash_disabled.png b/pype/tools/standalonepublish/widgets/resources/trash_disabled.png similarity index 100% rename from pype/modules/standalonepublish/resources/trash_disabled.png rename to pype/tools/standalonepublish/widgets/resources/trash_disabled.png diff --git a/pype/modules/standalonepublish/resources/trash_hover.png b/pype/tools/standalonepublish/widgets/resources/trash_hover.png similarity index 100% rename from pype/modules/standalonepublish/resources/trash_hover.png rename to pype/tools/standalonepublish/widgets/resources/trash_hover.png diff --git a/pype/modules/standalonepublish/resources/trash_pressed.png b/pype/tools/standalonepublish/widgets/resources/trash_pressed.png similarity index 100% rename from pype/modules/standalonepublish/resources/trash_pressed.png rename to pype/tools/standalonepublish/widgets/resources/trash_pressed.png diff --git a/pype/modules/standalonepublish/resources/trash_pressed_hover.png b/pype/tools/standalonepublish/widgets/resources/trash_pressed_hover.png similarity index 100% rename from pype/modules/standalonepublish/resources/trash_pressed_hover.png rename to pype/tools/standalonepublish/widgets/resources/trash_pressed_hover.png diff --git a/pype/modules/standalonepublish/resources/video_file.png b/pype/tools/standalonepublish/widgets/resources/video_file.png similarity index 100% rename from pype/modules/standalonepublish/resources/video_file.png rename to pype/tools/standalonepublish/widgets/resources/video_file.png diff --git a/pype/modules/standalonepublish/widgets/widget_asset.py b/pype/tools/standalonepublish/widgets/widget_asset.py similarity index 97% rename from pype/modules/standalonepublish/widgets/widget_asset.py rename to pype/tools/standalonepublish/widgets/widget_asset.py index d9241bd91f..6f041a535f 100644 --- a/pype/modules/standalonepublish/widgets/widget_asset.py +++ b/pype/tools/standalonepublish/widgets/widget_asset.py @@ -1,7 +1,8 @@ import contextlib -from . import QtWidgets, QtCore +from Qt import QtWidgets, QtCore from . import RecursiveSortFilterProxyModel, AssetModel -from . import qtawesome, style +from avalon.vendor import qtawesome +from avalon import style from . import TasksTemplateModel, DeselectableTreeView from . import _iter_model_rows @@ -239,7 +240,7 @@ class AssetWidget(QtWidgets.QWidget): self.combo_projects.clear() if len(projects) > 0: self.combo_projects.addItems(projects) - self.dbcon.activate_project(projects[0]) + self.dbcon.Session["AVALON_PROJECT"] = projects[0] def on_project_change(self): projects = list() @@ -247,7 +248,7 @@ class AssetWidget(QtWidgets.QWidget): projects.append(project['name']) project_name = self.combo_projects.currentText() if project_name in projects: - self.dbcon.activate_project(project_name) + self.dbcon.Session["AVALON_PROJECT"] = project_name self.refresh() def _refresh_model(self): diff --git a/pype/modules/standalonepublish/widgets/widget_component_item.py b/pype/tools/standalonepublish/widgets/widget_component_item.py similarity index 92% rename from pype/modules/standalonepublish/widgets/widget_component_item.py rename to pype/tools/standalonepublish/widgets/widget_component_item.py index 40298520b1..3850d68b96 100644 --- a/pype/modules/standalonepublish/widgets/widget_component_item.py +++ b/pype/tools/standalonepublish/widgets/widget_component_item.py @@ -1,6 +1,6 @@ import os -from . import QtCore, QtGui, QtWidgets -from . import get_resource +from Qt import QtCore, QtGui, QtWidgets +from .resources import get_resource from avalon import style @@ -353,27 +353,37 @@ class LightingButton(QtWidgets.QPushButton): class PngFactory: - png_names = { - "trash": { - "normal": QtGui.QIcon(get_resource("trash.png")), - "hover": QtGui.QIcon(get_resource("trash_hover.png")), - "pressed": QtGui.QIcon(get_resource("trash_pressed.png")), - "pressed_hover": QtGui.QIcon( - get_resource("trash_pressed_hover.png") - ), - "disabled": QtGui.QIcon(get_resource("trash_disabled.png")) - }, + png_names = None - "menu": { - "normal": QtGui.QIcon(get_resource("menu.png")), - "hover": QtGui.QIcon(get_resource("menu_hover.png")), - "pressed": QtGui.QIcon(get_resource("menu_pressed.png")), - "pressed_hover": QtGui.QIcon( - get_resource("menu_pressed_hover.png") - ), - "disabled": QtGui.QIcon(get_resource("menu_disabled.png")) + @classmethod + def init(cls): + cls.png_names = { + "trash": { + "normal": QtGui.QIcon(get_resource("trash.png")), + "hover": QtGui.QIcon(get_resource("trash_hover.png")), + "pressed": QtGui.QIcon(get_resource("trash_pressed.png")), + "pressed_hover": QtGui.QIcon( + get_resource("trash_pressed_hover.png") + ), + "disabled": QtGui.QIcon(get_resource("trash_disabled.png")) + }, + + "menu": { + "normal": QtGui.QIcon(get_resource("menu.png")), + "hover": QtGui.QIcon(get_resource("menu_hover.png")), + "pressed": QtGui.QIcon(get_resource("menu_pressed.png")), + "pressed_hover": QtGui.QIcon( + get_resource("menu_pressed_hover.png") + ), + "disabled": QtGui.QIcon(get_resource("menu_disabled.png")) + } } - } + + @classmethod + def get_png(cls, name): + if cls.png_names is None: + cls.init() + return cls.png_names.get(name) class PngButton(QtWidgets.QPushButton): @@ -406,7 +416,7 @@ class PngButton(QtWidgets.QPushButton): png_dict = {} if name: - png_dict = PngFactory.png_names.get(name) or {} + png_dict = PngFactory.get_png(name) or {} if not png_dict: print(( "WARNING: There is not set icon with name \"{}\"" diff --git a/pype/modules/standalonepublish/widgets/widget_components.py b/pype/tools/standalonepublish/widgets/widget_components.py similarity index 57% rename from pype/modules/standalonepublish/widgets/widget_components.py rename to pype/tools/standalonepublish/widgets/widget_components.py index 90167f2fa6..7e0327f00a 100644 --- a/pype/modules/standalonepublish/widgets/widget_components.py +++ b/pype/tools/standalonepublish/widgets/widget_components.py @@ -1,7 +1,16 @@ -from . import QtWidgets, QtCore, QtGui -from . import DropDataFrame +import os +import sys +import json +import tempfile +import random +import string -from .. import publish +from Qt import QtWidgets, QtCore +from . import DropDataFrame +from avalon import io +from pype.api import execute, Logger + +log = Logger().get_logger("standalonepublisher") class ComponentsWidget(QtWidgets.QWidget): @@ -113,16 +122,103 @@ class ComponentsWidget(QtWidgets.QWidget): self.parent_widget.working_stop() def _publish(self): + log.info(self.parent_widget.pyblish_paths) self.working_start('Pyblish is running') try: data = self.parent_widget.collect_data() - publish.set_context( - data['project'], data['asset'], data['task'], 'standalonepublish' + set_context( + data['project'], + data['asset'], + data['task'] ) - result = publish.publish(data) + result = cli_publish(data, self.parent_widget.pyblish_paths) # Clear widgets from components list if publishing was successful if result: self.drop_frame.components_list.clear_widgets() self.drop_frame._refresh_view() finally: self.working_stop() + + +def set_context(project, asset, task): + ''' Sets context for pyblish (must be done before pyblish is launched) + :param project: Name of `Project` where instance should be published + :type project: str + :param asset: Name of `Asset` where instance should be published + :type asset: str + ''' + os.environ["AVALON_PROJECT"] = project + io.Session["AVALON_PROJECT"] = project + os.environ["AVALON_ASSET"] = asset + io.Session["AVALON_ASSET"] = asset + if not task: + task = '' + os.environ["AVALON_TASK"] = task + io.Session["AVALON_TASK"] = task + + io.install() + + av_project = io.find_one({'type': 'project'}) + av_asset = io.find_one({ + "type": 'asset', + "name": asset + }) + + parents = av_asset['data']['parents'] + hierarchy = '' + if parents and len(parents) > 0: + hierarchy = os.path.sep.join(parents) + + os.environ["AVALON_HIERARCHY"] = hierarchy + io.Session["AVALON_HIERARCHY"] = hierarchy + + os.environ["AVALON_PROJECTCODE"] = av_project['data'].get('code', '') + io.Session["AVALON_PROJECTCODE"] = av_project['data'].get('code', '') + + io.Session["current_dir"] = os.path.normpath(os.getcwd()) + + os.environ["AVALON_APP"] = "standalonepublish" + io.Session["AVALON_APP"] = "standalonepublish" + + io.uninstall() + + +def cli_publish(data, publish_paths, gui=True): + PUBLISH_SCRIPT_PATH = os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "publish.py" + ) + io.install() + + # Create hash name folder in temp + chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) + staging_dir = tempfile.mkdtemp(chars) + + # create also json and fill with data + json_data_path = staging_dir + os.path.basename(staging_dir) + '.json' + with open(json_data_path, 'w') as outfile: + json.dump(data, outfile) + + envcopy = os.environ.copy() + envcopy["PYBLISH_HOSTS"] = "standalonepublisher" + envcopy["SAPUBLISH_INPATH"] = json_data_path + envcopy["PYBLISHGUI"] = "pyblish_pype" + envcopy["PUBLISH_PATHS"] = os.pathsep.join(publish_paths) + if data.get("family", "").lower() == "editorial": + envcopy["PYBLISH_SUSPEND_LOGS"] = "1" + + result = execute( + [sys.executable, PUBLISH_SCRIPT_PATH], + env=envcopy + ) + + result = {} + if os.path.exists(json_data_path): + with open(json_data_path, "r") as f: + result = json.load(f) + + log.info(f"Publish result: {result}") + + io.uninstall() + + return False diff --git a/pype/modules/standalonepublish/widgets/widget_components_list.py b/pype/tools/standalonepublish/widgets/widget_components_list.py similarity index 98% rename from pype/modules/standalonepublish/widgets/widget_components_list.py rename to pype/tools/standalonepublish/widgets/widget_components_list.py index f85e9f0aa6..4e502a2e5f 100644 --- a/pype/modules/standalonepublish/widgets/widget_components_list.py +++ b/pype/tools/standalonepublish/widgets/widget_components_list.py @@ -1,4 +1,4 @@ -from . import QtCore, QtGui, QtWidgets +from Qt import QtWidgets class ComponentsList(QtWidgets.QTableWidget): diff --git a/pype/modules/standalonepublish/widgets/widget_drop_empty.py b/pype/tools/standalonepublish/widgets/widget_drop_empty.py similarity index 76% rename from pype/modules/standalonepublish/widgets/widget_drop_empty.py rename to pype/tools/standalonepublish/widgets/widget_drop_empty.py index a68b91da59..ed526f2a78 100644 --- a/pype/modules/standalonepublish/widgets/widget_drop_empty.py +++ b/pype/tools/standalonepublish/widgets/widget_drop_empty.py @@ -1,7 +1,4 @@ -import os -import logging -import clique -from . import QtWidgets, QtCore, QtGui +from Qt import QtWidgets, QtCore, QtGui class DropEmpty(QtWidgets.QWidget): @@ -25,14 +22,14 @@ class DropEmpty(QtWidgets.QWidget): self._label = QtWidgets.QLabel('Drag & Drop') self._label.setFont(font) self._label.setStyleSheet( - 'background-color: rgb(255, 255, 255, 0);' + 'background-color: transparent;' ) font.setPointSize(12) self._sub_label = QtWidgets.QLabel('(drop files here)') self._sub_label.setFont(font) self._sub_label.setStyleSheet( - 'background-color: rgb(255, 255, 255, 0);' + 'background-color: transparent;' ) layout.addWidget(self._label, alignment=BottomCenterAlignment) @@ -42,11 +39,13 @@ class DropEmpty(QtWidgets.QWidget): super().paintEvent(event) painter = QtGui.QPainter(self) pen = QtGui.QPen() - pen.setWidth(1); - pen.setBrush(QtCore.Qt.darkGray); - pen.setStyle(QtCore.Qt.DashLine); + pen.setWidth(1) + pen.setBrush(QtCore.Qt.darkGray) + pen.setStyle(QtCore.Qt.DashLine) painter.setPen(pen) painter.drawRect( - 10, 10, - self.rect().width()-15, self.rect().height()-15 + 10, + 10, + self.rect().width() - 15, + self.rect().height() - 15 ) diff --git a/pype/modules/standalonepublish/widgets/widget_drop_frame.py b/pype/tools/standalonepublish/widgets/widget_drop_frame.py similarity index 83% rename from pype/modules/standalonepublish/widgets/widget_drop_frame.py rename to pype/tools/standalonepublish/widgets/widget_drop_frame.py index c91e906f45..e13f701b30 100644 --- a/pype/modules/standalonepublish/widgets/widget_drop_frame.py +++ b/pype/tools/standalonepublish/widgets/widget_drop_frame.py @@ -3,9 +3,8 @@ import re import json import clique import subprocess -from pype.api import config import pype.lib -from . import QtWidgets, QtCore +from Qt import QtWidgets, QtCore from . import DropEmpty, ComponentsList, ComponentItem @@ -18,7 +17,7 @@ class DropDataFrame(QtWidgets.QFrame): ".jng", ".jpeg", ".jpeg-ls", ".jpeg", ".2000", ".jpg", ".xr", ".jpeg", ".xt", ".jpeg-hdr", ".kra", ".mng", ".miff", ".nrrd", ".ora", ".pam", ".pbm", ".pgm", ".ppm", ".pnm", ".pcx", ".pgf", - ".pictor", ".png", ".psd", ".psb", ".psp", ".qtvr", ".ras", + ".pictor", ".png", ".psb", ".psp", ".qtvr", ".ras", ".rgbe", ".logluv", ".tiff", ".sgi", ".tga", ".tiff", ".tiff/ep", ".tiff/it", ".ufo", ".ufp", ".wbmp", ".webp", ".xbm", ".xcf", ".xpm", ".xwd" @@ -173,10 +172,21 @@ class DropDataFrame(QtWidgets.QFrame): def _process_paths(self, in_paths): self.parent_widget.working_start() paths = self._get_all_paths(in_paths) - collections, remainders = clique.assemble(paths) + collectionable_paths = [] + non_collectionable_paths = [] + for path in in_paths: + ext = os.path.splitext(path)[1] + if ext in self.image_extensions: + collectionable_paths.append(path) + else: + non_collectionable_paths.append(path) + + collections, remainders = clique.assemble(collectionable_paths) + non_collectionable_paths.extend(remainders) for collection in collections: self._process_collection(collection) - for remainder in remainders: + + for remainder in non_collectionable_paths: self._process_remainder(remainder) self.parent_widget.working_stop() @@ -283,10 +293,7 @@ class DropDataFrame(QtWidgets.QFrame): if 'file_info' in data: file_info = data['file_info'] - if ( - ext in self.image_extensions - or ext in self.video_extensions - ): + if ext in self.image_extensions or ext in self.video_extensions: probe_data = self.load_data_with_probe(filepath) if 'fps' not in data: # default value @@ -344,61 +351,62 @@ class DropDataFrame(QtWidgets.QFrame): actions = [] found = False - for item in self.components_list.widgets(): - if data['ext'] != item.in_data['ext']: - continue - if data['folder_path'] != item.in_data['folder_path']: - continue - - ex_is_seq = item.in_data['is_sequence'] - - # If both are single files - if not new_is_seq and not ex_is_seq: - if data['name'] == item.in_data['name']: - found = True - break - paths = data['files'] - paths.extend(item.in_data['files']) - c, r = clique.assemble(paths) - if len(c) == 0: + if data["ext"] in self.image_extensions: + for item in self.components_list.widgets(): + if data['ext'] != item.in_data['ext']: continue - a_name = 'merge' - item.add_action(a_name) - if a_name not in actions: - actions.append(a_name) - - # If new is sequence and ex is single file - elif new_is_seq and not ex_is_seq: - if data['name'] not in item.in_data['name']: + if data['folder_path'] != item.in_data['folder_path']: continue - ex_file = item.in_data['files'][0] - a_name = 'merge' - item.add_action(a_name) - if a_name not in actions: - actions.append(a_name) - continue + ex_is_seq = item.in_data['is_sequence'] - # If new is single file existing is sequence - elif not new_is_seq and ex_is_seq: - if item.in_data['name'] not in data['name']: + # If both are single files + if not new_is_seq and not ex_is_seq: + if data['name'] == item.in_data['name']: + found = True + break + paths = list(data['files']) + paths.extend(item.in_data['files']) + c, r = clique.assemble(paths) + if len(c) == 0: + continue + a_name = 'merge' + item.add_action(a_name) + if a_name not in actions: + actions.append(a_name) + + # If new is sequence and ex is single file + elif new_is_seq and not ex_is_seq: + if data['name'] not in item.in_data['name']: + continue + ex_file = item.in_data['files'][0] + + a_name = 'merge' + item.add_action(a_name) + if a_name not in actions: + actions.append(a_name) continue - a_name = 'merge' - item.add_action(a_name) - if a_name not in actions: - actions.append(a_name) - # If both are sequence - else: - if data['name'] != item.in_data['name']: - continue - if data['files'] == item.in_data['files']: - found = True - break - a_name = 'merge' - item.add_action(a_name) - if a_name not in actions: - actions.append(a_name) + # If new is single file existing is sequence + elif not new_is_seq and ex_is_seq: + if item.in_data['name'] not in data['name']: + continue + a_name = 'merge' + item.add_action(a_name) + if a_name not in actions: + actions.append(a_name) + + # If both are sequence + else: + if data['name'] != item.in_data['name']: + continue + if data['files'] == list(item.in_data['files']): + found = True + break + a_name = 'merge' + item.add_action(a_name) + if a_name not in actions: + actions.append(a_name) if new_is_seq: actions.append('split') diff --git a/pype/modules/standalonepublish/widgets/widget_family.py b/pype/tools/standalonepublish/widgets/widget_family.py similarity index 99% rename from pype/modules/standalonepublish/widgets/widget_family.py rename to pype/tools/standalonepublish/widgets/widget_family.py index 29a0812a91..1c8f2238fc 100644 --- a/pype/modules/standalonepublish/widgets/widget_family.py +++ b/pype/tools/standalonepublish/widgets/widget_family.py @@ -1,10 +1,6 @@ -import os -import sys -import inspect -import json from collections import namedtuple -from . import QtWidgets, QtCore +from Qt import QtWidgets, QtCore from . import HelpRole, FamilyRole, ExistsRole, PluginRole, PluginKeyRole from . import FamilyDescriptionWidget diff --git a/pype/modules/standalonepublish/widgets/widget_family_desc.py b/pype/tools/standalonepublish/widgets/widget_family_desc.py similarity index 92% rename from pype/modules/standalonepublish/widgets/widget_family_desc.py rename to pype/tools/standalonepublish/widgets/widget_family_desc.py index 7c80dcfd57..8c95ddf2e4 100644 --- a/pype/modules/standalonepublish/widgets/widget_family_desc.py +++ b/pype/tools/standalonepublish/widgets/widget_family_desc.py @@ -1,13 +1,7 @@ -import os -import sys -import inspect -import json - -from . import QtWidgets, QtCore, QtGui -from . import HelpRole, FamilyRole, ExistsRole, PluginRole -from . import qtawesome +from Qt import QtWidgets, QtCore, QtGui +from . import FamilyRole, PluginRole +from avalon.vendor import qtawesome import six -from pype import lib as pypelib class FamilyDescriptionWidget(QtWidgets.QWidget): diff --git a/pype/modules/standalonepublish/widgets/widget_shadow.py b/pype/tools/standalonepublish/widgets/widget_shadow.py similarity index 85% rename from pype/modules/standalonepublish/widgets/widget_shadow.py rename to pype/tools/standalonepublish/widgets/widget_shadow.py index 1bb9cee44b..de5fdf6be0 100644 --- a/pype/modules/standalonepublish/widgets/widget_shadow.py +++ b/pype/tools/standalonepublish/widgets/widget_shadow.py @@ -1,4 +1,4 @@ -from . import QtWidgets, QtCore, QtGui +from Qt import QtWidgets, QtCore, QtGui class ShadowWidget(QtWidgets.QWidget): @@ -26,7 +26,9 @@ class ShadowWidget(QtWidgets.QWidget): painter.begin(self) painter.setFont(self.font) painter.setRenderHint(QtGui.QPainter.Antialiasing) - painter.fillRect(event.rect(), QtGui.QBrush(QtGui.QColor(0, 0, 0, 127))) + painter.fillRect( + event.rect(), QtGui.QBrush(QtGui.QColor(0, 0, 0, 127)) + ) painter.drawText( QtCore.QRectF( 0.0, @@ -34,7 +36,7 @@ class ShadowWidget(QtWidgets.QWidget): self.parent_widget.frameGeometry().width(), self.parent_widget.frameGeometry().height() ), - QtCore.Qt.AlignCenter|QtCore.Qt.AlignCenter, + QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter, self.message ) painter.end() diff --git a/pype/tools/tray/__main__.py b/pype/tools/tray/__main__.py index d0006c0afe..94d5461dc4 100644 --- a/pype/tools/tray/__main__.py +++ b/pype/tools/tray/__main__.py @@ -1,4 +1,12 @@ +import os import sys import pype_tray -sys.exit(pype_tray.PypeTrayApplication().exec_()) +app = pype_tray.PypeTrayApplication() +if os.name == "nt": + import ctypes + ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID( + u"pype_tray" + ) + +sys.exit(app.exec_()) diff --git a/pype/tools/tray/modules_imports.json b/pype/tools/tray/modules_imports.json index e7bdeda0d2..499e5fc08c 100644 --- a/pype/tools/tray/modules_imports.json +++ b/pype/tools/tray/modules_imports.json @@ -54,6 +54,11 @@ "type": "module", "import_path": "pype.modules.adobe_communicator", "fromlist": ["pype", "modules"] + }, { + "title": "Websocket Server", + "type": "module", + "import_path": "pype.modules.websocket_server", + "fromlist": ["pype", "modules"] }, { "title": "Sync Server", "type": "module", diff --git a/pype/tools/tray/pype_tray.py b/pype/tools/tray/pype_tray.py index 5b1185fa71..a4cf4eabfe 100644 --- a/pype/tools/tray/pype_tray.py +++ b/pype/tools/tray/pype_tray.py @@ -4,6 +4,11 @@ import platform from avalon import style from Qt import QtCore, QtGui, QtWidgets, QtSvg from pype.api import config, Logger, resources +import pype.version +try: + import configparser +except Exception: + import ConfigParser as configparser class TrayManager: @@ -100,6 +105,8 @@ class TrayManager: if items and self.services_submenu is not None: self.add_separator(self.tray_widget.menu) + self._add_version_item() + # Add Exit action to menu aExit = QtWidgets.QAction("&Exit", self.tray_widget) aExit.triggered.connect(self.tray_widget.exit) @@ -109,6 +116,34 @@ class TrayManager: self.connect_modules() self.start_modules() + def _add_version_item(self): + config_file_path = os.path.join( + os.environ["PYPE_SETUP_PATH"], "pypeapp", "config.ini" + ) + + default_config = {} + if os.path.exists(config_file_path): + config = configparser.ConfigParser() + config.read(config_file_path) + try: + default_config = config["CLIENT"] + except Exception: + pass + + subversion = default_config.get("subversion") + client_name = default_config.get("client_name") + + version_string = pype.version.__version__ + if subversion: + version_string += " ({})".format(subversion) + + if client_name: + version_string += ", {}".format(client_name) + + version_action = QtWidgets.QAction(version_string, self.tray_widget) + self.tray_widget.menu.addAction(version_action) + self.add_separator(self.tray_widget.menu) + def process_items(self, items, parent_menu): """ Loop through items and add them to parent_menu. @@ -203,7 +238,7 @@ class TrayManager: obj.set_qaction(action, self.icon_failed) self.modules[name] = obj self.log.info("{} - Module imported".format(title)) - except ImportError as ie: + except Exception as exc: if self.services_submenu is None: self.services_submenu = QtWidgets.QMenu( 'Services', self.tray_widget.menu @@ -212,7 +247,7 @@ class TrayManager: action.setIcon(self.icon_failed) self.services_submenu.addAction(action) self.log.warning( - "{} - Module import Error: {}".format(title, str(ie)), + "{} - Module import Error: {}".format(title, str(exc)), exc_info=True ) return False @@ -502,6 +537,14 @@ class PypeTrayApplication(QtWidgets.QApplication): super(self.__class__, self).__init__(sys.argv) # Allows to close widgets without exiting app self.setQuitOnLastWindowClosed(False) + + # Allow show icon istead of python icon in task bar (Windows) + if os.name == "nt": + import ctypes + ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID( + u"pype_tray" + ) + # Sets up splash splash_widget = self.set_splash() diff --git a/pype/version.py b/pype/version.py index 1c622223ba..95a6d3a792 100644 --- a/pype/version.py +++ b/pype/version.py @@ -1 +1 @@ -__version__ = "2.10.0" +__version__ = "2.12.0" diff --git a/res/icons/Thumbs.db b/res/icons/Thumbs.db deleted file mode 100644 index fa56c871f6..0000000000 Binary files a/res/icons/Thumbs.db and /dev/null differ