diff --git a/.github/weekly-digest.yml b/.github/weekly-digest.yml
deleted file mode 100644
index fe502fbc98..0000000000
--- a/.github/weekly-digest.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-# Configuration for weekly-digest - https://github.com/apps/weekly-digest
-publishDay: sun
-canPublishIssues: true
-canPublishPullRequests: true
-canPublishContributors: true
-canPublishStargazers: true
-canPublishCommits: true
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 70e23e0ff8..364555f8b2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,18 +1,264 @@
# Changelog
-## [2.14.0](https://github.com/pypeclub/pype/tree/2.14.0) (2020-11-24)
+## [2.16.1](https://github.com/pypeclub/pype/tree/2.16.1) (2021-04-13)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.16.0...2.16.1)
+
+**Enhancements:**
+
+- Nuke: comp renders mix up [\#1301](https://github.com/pypeclub/pype/pull/1301)
+- Validate project settings [\#1297](https://github.com/pypeclub/pype/pull/1297)
+- After Effects: added SubsetManager [\#1234](https://github.com/pypeclub/pype/pull/1234)
+
+**Fixed bugs:**
+
+- Ftrack custom attributes in bulks [\#1312](https://github.com/pypeclub/pype/pull/1312)
+- Ftrack optional pypclub role [\#1303](https://github.com/pypeclub/pype/pull/1303)
+- AE remove orphaned instance from workfile - fix self.stub [\#1282](https://github.com/pypeclub/pype/pull/1282)
+- Avalon schema names [\#1242](https://github.com/pypeclub/pype/pull/1242)
+- Handle duplication of Task name [\#1226](https://github.com/pypeclub/pype/pull/1226)
+- Modified path of plugin loads for Harmony and TVPaint [\#1217](https://github.com/pypeclub/pype/pull/1217)
+- Regex checks in profiles filtering [\#1214](https://github.com/pypeclub/pype/pull/1214)
+- Bulk mov strict task [\#1204](https://github.com/pypeclub/pype/pull/1204)
+- Update custom ftrack session attributes [\#1202](https://github.com/pypeclub/pype/pull/1202)
+- Nuke: write node colorspace ignore `default\(\)` label [\#1199](https://github.com/pypeclub/pype/pull/1199)
+- Nuke: reverse search to make it more versatile [\#1178](https://github.com/pypeclub/pype/pull/1178)
+
+**Merged pull requests:**
+
+- Forward compatible ftrack group [\#1243](https://github.com/pypeclub/pype/pull/1243)
+- Error message in pyblish UI [\#1206](https://github.com/pypeclub/pype/pull/1206)
+- Nuke: deadline submission with search replaced env values from preset [\#1194](https://github.com/pypeclub/pype/pull/1194)
+
+## [2.16.0](https://github.com/pypeclub/pype/tree/2.16.0) (2021-03-22)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.3...2.16.0)
+
+**Enhancements:**
+
+- Nuke: deadline submit limit group filter [\#1167](https://github.com/pypeclub/pype/pull/1167)
+- Maya: support for Deadline Group and Limit Groups - backport 2.x [\#1156](https://github.com/pypeclub/pype/pull/1156)
+- Maya: fixes for Redshift support [\#1152](https://github.com/pypeclub/pype/pull/1152)
+- Nuke: adding preset for a Read node name to all img and mov Loaders [\#1146](https://github.com/pypeclub/pype/pull/1146)
+- nuke deadline submit with environ var from presets overrides [\#1142](https://github.com/pypeclub/pype/pull/1142)
+- Change timers after task change [\#1138](https://github.com/pypeclub/pype/pull/1138)
+- Nuke: shortcuts for Pype menu [\#1127](https://github.com/pypeclub/pype/pull/1127)
+- Nuke: workfile template [\#1124](https://github.com/pypeclub/pype/pull/1124)
+- Sites local settings by site name [\#1117](https://github.com/pypeclub/pype/pull/1117)
+- Reset loader's asset selection on context change [\#1106](https://github.com/pypeclub/pype/pull/1106)
+- Bulk mov render publishing [\#1101](https://github.com/pypeclub/pype/pull/1101)
+- Photoshop: mark publishable instances [\#1093](https://github.com/pypeclub/pype/pull/1093)
+- Added ability to define BG color for extract review [\#1088](https://github.com/pypeclub/pype/pull/1088)
+- TVPaint extractor enhancement [\#1080](https://github.com/pypeclub/pype/pull/1080)
+- Photoshop: added support for .psb in workfiles [\#1078](https://github.com/pypeclub/pype/pull/1078)
+- Optionally add task to subset name [\#1072](https://github.com/pypeclub/pype/pull/1072)
+- Only extend clip range when collecting. [\#1008](https://github.com/pypeclub/pype/pull/1008)
+- Collect audio for farm reviews. [\#1073](https://github.com/pypeclub/pype/pull/1073)
+
+
+**Fixed bugs:**
+
+- Fix path spaces in jpeg extractor [\#1174](https://github.com/pypeclub/pype/pull/1174)
+- Maya: Bugfix: superclass for CreateCameraRig [\#1166](https://github.com/pypeclub/pype/pull/1166)
+- Maya: Submit to Deadline - fix typo in condition [\#1163](https://github.com/pypeclub/pype/pull/1163)
+- Avoid dot in repre extension [\#1125](https://github.com/pypeclub/pype/pull/1125)
+- Fix versions variable usage in standalone publisher [\#1090](https://github.com/pypeclub/pype/pull/1090)
+- Collect instance data fix subset query [\#1082](https://github.com/pypeclub/pype/pull/1082)
+- Fix getting the camera name. [\#1067](https://github.com/pypeclub/pype/pull/1067)
+- Nuke: Ensure "NUKE\_TEMP\_DIR" is not part of the Deadline job environment. [\#1064](https://github.com/pypeclub/pype/pull/1064)
+
+## [2.15.3](https://github.com/pypeclub/pype/tree/2.15.3) (2021-02-26)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.2...2.15.3)
+
+**Enhancements:**
+
+- Maya: speedup renderable camera collection [\#1053](https://github.com/pypeclub/pype/pull/1053)
+- Harmony - add regex search to filter allowed task names for collectin… [\#1047](https://github.com/pypeclub/pype/pull/1047)
+
+**Fixed bugs:**
+
+- Ftrack integrate hierarchy fix [\#1085](https://github.com/pypeclub/pype/pull/1085)
+- Explicit subset filter in anatomy instance data [\#1059](https://github.com/pypeclub/pype/pull/1059)
+- TVPaint frame offset [\#1057](https://github.com/pypeclub/pype/pull/1057)
+- Auto fix unicode strings [\#1046](https://github.com/pypeclub/pype/pull/1046)
+
+## [2.15.2](https://github.com/pypeclub/pype/tree/2.15.2) (2021-02-19)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.1...2.15.2)
+
+**Enhancements:**
+
+- Maya: Vray scene publishing [\#1013](https://github.com/pypeclub/pype/pull/1013)
+
+**Fixed bugs:**
+
+- Fix entity move under project [\#1040](https://github.com/pypeclub/pype/pull/1040)
+- smaller nuke fixes from production [\#1036](https://github.com/pypeclub/pype/pull/1036)
+- TVPaint thumbnail extract fix [\#1031](https://github.com/pypeclub/pype/pull/1031)
+
+## [2.15.1](https://github.com/pypeclub/pype/tree/2.15.1) (2021-02-12)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.0...2.15.1)
+
+**Enhancements:**
+
+- Delete version as loader action [\#1011](https://github.com/pypeclub/pype/pull/1011)
+- Delete old versions [\#445](https://github.com/pypeclub/pype/pull/445)
+
+**Fixed bugs:**
+
+- PS - remove obsolete functions from pywin32 [\#1006](https://github.com/pypeclub/pype/pull/1006)
+- Clone description of review session objects. [\#922](https://github.com/pypeclub/pype/pull/922)
+
+## [2.15.0](https://github.com/pypeclub/pype/tree/2.15.0) (2021-02-09)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.6...2.15.0)
+
+**Enhancements:**
+
+- Resolve - loading and updating clips [\#932](https://github.com/pypeclub/pype/pull/932)
+- Release/2.15.0 [\#926](https://github.com/pypeclub/pype/pull/926)
+- Photoshop: add option for template.psd and prelaunch hook [\#894](https://github.com/pypeclub/pype/pull/894)
+- Nuke: deadline presets [\#993](https://github.com/pypeclub/pype/pull/993)
+- Maya: Alembic only set attributes that exists. [\#986](https://github.com/pypeclub/pype/pull/986)
+- Harmony: render local and handle fixes [\#981](https://github.com/pypeclub/pype/pull/981)
+- PSD Bulk export of ANIM group [\#965](https://github.com/pypeclub/pype/pull/965)
+- AE - added prelaunch hook for opening last or workfile from template [\#944](https://github.com/pypeclub/pype/pull/944)
+- PS - safer handling of loading of workfile [\#941](https://github.com/pypeclub/pype/pull/941)
+- Maya: Handling Arnold referenced AOVs [\#938](https://github.com/pypeclub/pype/pull/938)
+- TVPaint: switch layer IDs for layer names during identification [\#903](https://github.com/pypeclub/pype/pull/903)
+- TVPaint audio/sound loader [\#893](https://github.com/pypeclub/pype/pull/893)
+- Clone review session with children. [\#891](https://github.com/pypeclub/pype/pull/891)
+- Simple compositing data packager for freelancers [\#884](https://github.com/pypeclub/pype/pull/884)
+- Harmony deadline submission [\#881](https://github.com/pypeclub/pype/pull/881)
+- Maya: Optionally hide image planes from reviews. [\#840](https://github.com/pypeclub/pype/pull/840)
+- Maya: handle referenced AOVs for Vray [\#824](https://github.com/pypeclub/pype/pull/824)
+- DWAA/DWAB support on windows [\#795](https://github.com/pypeclub/pype/pull/795)
+- Unreal: animation, layout and setdress updates [\#695](https://github.com/pypeclub/pype/pull/695)
+
+**Fixed bugs:**
+
+- Maya: Looks - disable hardlinks [\#995](https://github.com/pypeclub/pype/pull/995)
+- Fix Ftrack custom attribute update [\#982](https://github.com/pypeclub/pype/pull/982)
+- Prores ks in burnin script [\#960](https://github.com/pypeclub/pype/pull/960)
+- terminal.py crash on import [\#839](https://github.com/pypeclub/pype/pull/839)
+- Extract review handle bizarre pixel aspect ratio [\#990](https://github.com/pypeclub/pype/pull/990)
+- Nuke: add nuke related env var to sumbission [\#988](https://github.com/pypeclub/pype/pull/988)
+- Nuke: missing preset's variable [\#984](https://github.com/pypeclub/pype/pull/984)
+- Get creator by name fix [\#979](https://github.com/pypeclub/pype/pull/979)
+- Fix update of project's tasks on Ftrack sync [\#972](https://github.com/pypeclub/pype/pull/972)
+- nuke: wrong frame offset in mov loader [\#971](https://github.com/pypeclub/pype/pull/971)
+- Create project structure action fix multiroot [\#967](https://github.com/pypeclub/pype/pull/967)
+- PS: remove pywin installation from hook [\#964](https://github.com/pypeclub/pype/pull/964)
+- Prores ks in burnin script [\#959](https://github.com/pypeclub/pype/pull/959)
+- Subset family is now stored in subset document [\#956](https://github.com/pypeclub/pype/pull/956)
+- DJV new version arguments [\#954](https://github.com/pypeclub/pype/pull/954)
+- TV Paint: Fix single frame Sequence [\#953](https://github.com/pypeclub/pype/pull/953)
+- nuke: missing `file` knob update [\#933](https://github.com/pypeclub/pype/pull/933)
+- Photoshop: Create from single layer was failing [\#920](https://github.com/pypeclub/pype/pull/920)
+- Nuke: baking mov with correct colorspace inherited from write [\#909](https://github.com/pypeclub/pype/pull/909)
+- Launcher fix actions discover [\#896](https://github.com/pypeclub/pype/pull/896)
+- Get the correct file path for the updated mov. [\#889](https://github.com/pypeclub/pype/pull/889)
+- Maya: Deadline submitter - shared data access violation [\#831](https://github.com/pypeclub/pype/pull/831)
+- Maya: Take into account vray master AOV switch [\#822](https://github.com/pypeclub/pype/pull/822)
+
+**Merged pull requests:**
+
+- Refactor blender to 3.0 format [\#934](https://github.com/pypeclub/pype/pull/934)
+
+## [2.14.6](https://github.com/pypeclub/pype/tree/2.14.6) (2021-01-15)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.5...2.14.6)
+
+**Fixed bugs:**
+
+- Nuke: improving of hashing path [\#885](https://github.com/pypeclub/pype/pull/885)
+
+**Merged pull requests:**
+
+- Hiero: cut videos with correct secons [\#892](https://github.com/pypeclub/pype/pull/892)
+- Faster sync to avalon preparation [\#869](https://github.com/pypeclub/pype/pull/869)
+
+## [2.14.5](https://github.com/pypeclub/pype/tree/2.14.5) (2021-01-06)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.4...2.14.5)
+
+**Merged pull requests:**
+
+- Pype logger refactor [\#866](https://github.com/pypeclub/pype/pull/866)
+
+## [2.14.4](https://github.com/pypeclub/pype/tree/2.14.4) (2020-12-18)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.3...2.14.4)
+
+**Merged pull requests:**
+
+- Fix - AE - added explicit cast to int [\#837](https://github.com/pypeclub/pype/pull/837)
+
+## [2.14.3](https://github.com/pypeclub/pype/tree/2.14.3) (2020-12-16)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.2...2.14.3)
+
+**Fixed bugs:**
+
+- TVPaint repair invalid metadata [\#809](https://github.com/pypeclub/pype/pull/809)
+- Feature/push hier value to nonhier action [\#807](https://github.com/pypeclub/pype/pull/807)
+- Harmony: fix palette and image sequence loader [\#806](https://github.com/pypeclub/pype/pull/806)
+
+**Merged pull requests:**
+
+- respecting space in path [\#823](https://github.com/pypeclub/pype/pull/823)
+
+## [2.14.2](https://github.com/pypeclub/pype/tree/2.14.2) (2020-12-04)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.1...2.14.2)
+
+**Enhancements:**
+
+- Collapsible wrapper in settings [\#767](https://github.com/pypeclub/pype/pull/767)
+
+**Fixed bugs:**
+
+- Harmony: template extraction and palettes thumbnails on mac [\#768](https://github.com/pypeclub/pype/pull/768)
+- TVPaint store context to workfile metadata \(764\) [\#766](https://github.com/pypeclub/pype/pull/766)
+- Extract review audio cut fix [\#763](https://github.com/pypeclub/pype/pull/763)
+
+**Merged pull requests:**
+
+- AE: fix publish after background load [\#781](https://github.com/pypeclub/pype/pull/781)
+- TVPaint store members key [\#769](https://github.com/pypeclub/pype/pull/769)
+
+## [2.14.1](https://github.com/pypeclub/pype/tree/2.14.1) (2020-11-27)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.0...2.14.1)
+
+**Enhancements:**
+
+- Settings required keys in modifiable dict [\#770](https://github.com/pypeclub/pype/pull/770)
+- Extract review may not add audio to output [\#761](https://github.com/pypeclub/pype/pull/761)
+
+**Fixed bugs:**
+
+- After Effects: frame range, file format and render source scene fixes [\#760](https://github.com/pypeclub/pype/pull/760)
+- Hiero: trimming review with clip event number [\#754](https://github.com/pypeclub/pype/pull/754)
+- TVPaint: fix updating of loaded subsets [\#752](https://github.com/pypeclub/pype/pull/752)
+- Maya: Vray handling of default aov [\#748](https://github.com/pypeclub/pype/pull/748)
+- Maya: multiple renderable cameras in layer didn't work [\#744](https://github.com/pypeclub/pype/pull/744)
+- Ftrack integrate custom attributes fix [\#742](https://github.com/pypeclub/pype/pull/742)
+
+## [2.14.0](https://github.com/pypeclub/pype/tree/2.14.0) (2020-11-23)
[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.7...2.14.0)
**Enhancements:**
+- Render publish plugins abstraction [\#687](https://github.com/pypeclub/pype/pull/687)
- Shot asset build trigger status [\#736](https://github.com/pypeclub/pype/pull/736)
- Maya: add camera rig publishing option [\#721](https://github.com/pypeclub/pype/pull/721)
- Sort instances by label in pyblish gui [\#719](https://github.com/pypeclub/pype/pull/719)
- Synchronize ftrack hierarchical and shot attributes [\#716](https://github.com/pypeclub/pype/pull/716)
- 686 standalonepublisher editorial from image sequences [\#699](https://github.com/pypeclub/pype/pull/699)
-- TV Paint: initial implementation of creators and local rendering [\#693](https://github.com/pypeclub/pype/pull/693)
-- Render publish plugins abstraction [\#687](https://github.com/pypeclub/pype/pull/687)
- Ask user to select non-default camera from scene or create a new. [\#678](https://github.com/pypeclub/pype/pull/678)
- TVPaint: image loader with options [\#675](https://github.com/pypeclub/pype/pull/675)
- Maya: Camera name can be added to burnins. [\#674](https://github.com/pypeclub/pype/pull/674)
@@ -21,25 +267,33 @@
**Fixed bugs:**
+- Bugfix Hiero Review / Plate representation publish [\#743](https://github.com/pypeclub/pype/pull/743)
+- Asset fetch second fix [\#726](https://github.com/pypeclub/pype/pull/726)
- TVPaint extract review fix [\#740](https://github.com/pypeclub/pype/pull/740)
- After Effects: Review were not being sent to ftrack [\#738](https://github.com/pypeclub/pype/pull/738)
-- Asset fetch second fix [\#726](https://github.com/pypeclub/pype/pull/726)
- Maya: vray proxy was not loading [\#722](https://github.com/pypeclub/pype/pull/722)
- Maya: Vray expected file fixes [\#682](https://github.com/pypeclub/pype/pull/682)
+- Missing audio on farm submission. [\#639](https://github.com/pypeclub/pype/pull/639)
**Deprecated:**
- Removed artist view from pyblish gui [\#717](https://github.com/pypeclub/pype/pull/717)
- Maya: disable legacy override check for cameras [\#715](https://github.com/pypeclub/pype/pull/715)
+**Merged pull requests:**
+
+- Application manager [\#728](https://github.com/pypeclub/pype/pull/728)
+- Feature \#664 3.0 lib refactor [\#706](https://github.com/pypeclub/pype/pull/706)
+- Lib from illicit part 2 [\#700](https://github.com/pypeclub/pype/pull/700)
+- 3.0 lib refactor - path tools [\#697](https://github.com/pypeclub/pype/pull/697)
## [2.13.7](https://github.com/pypeclub/pype/tree/2.13.7) (2020-11-19)
[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.6...2.13.7)
-**Merged pull requests:**
+**Fixed bugs:**
-- fix\(SP\): getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729)
+- Standalone Publisher: getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729)
# Changelog
diff --git a/HISTORY.md b/HISTORY.md
index b8b96fb4c3..053059a9ea 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -1,3 +1,268 @@
+## [2.16.0](https://github.com/pypeclub/pype/tree/2.16.0) (2021-03-22)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.3...2.16.0)
+
+**Enhancements:**
+
+- Nuke: deadline submit limit group filter [\#1167](https://github.com/pypeclub/pype/pull/1167)
+- Maya: support for Deadline Group and Limit Groups - backport 2.x [\#1156](https://github.com/pypeclub/pype/pull/1156)
+- Maya: fixes for Redshift support [\#1152](https://github.com/pypeclub/pype/pull/1152)
+- Nuke: adding preset for a Read node name to all img and mov Loaders [\#1146](https://github.com/pypeclub/pype/pull/1146)
+- nuke deadline submit with environ var from presets overrides [\#1142](https://github.com/pypeclub/pype/pull/1142)
+- Change timers after task change [\#1138](https://github.com/pypeclub/pype/pull/1138)
+- Nuke: shortcuts for Pype menu [\#1127](https://github.com/pypeclub/pype/pull/1127)
+- Nuke: workfile template [\#1124](https://github.com/pypeclub/pype/pull/1124)
+- Sites local settings by site name [\#1117](https://github.com/pypeclub/pype/pull/1117)
+- Reset loader's asset selection on context change [\#1106](https://github.com/pypeclub/pype/pull/1106)
+- Bulk mov render publishing [\#1101](https://github.com/pypeclub/pype/pull/1101)
+- Photoshop: mark publishable instances [\#1093](https://github.com/pypeclub/pype/pull/1093)
+- Added ability to define BG color for extract review [\#1088](https://github.com/pypeclub/pype/pull/1088)
+- TVPaint extractor enhancement [\#1080](https://github.com/pypeclub/pype/pull/1080)
+- Photoshop: added support for .psb in workfiles [\#1078](https://github.com/pypeclub/pype/pull/1078)
+- Optionally add task to subset name [\#1072](https://github.com/pypeclub/pype/pull/1072)
+- Only extend clip range when collecting. [\#1008](https://github.com/pypeclub/pype/pull/1008)
+- Collect audio for farm reviews. [\#1073](https://github.com/pypeclub/pype/pull/1073)
+
+
+**Fixed bugs:**
+
+- Fix path spaces in jpeg extractor [\#1174](https://github.com/pypeclub/pype/pull/1174)
+- Maya: Bugfix: superclass for CreateCameraRig [\#1166](https://github.com/pypeclub/pype/pull/1166)
+- Maya: Submit to Deadline - fix typo in condition [\#1163](https://github.com/pypeclub/pype/pull/1163)
+- Avoid dot in repre extension [\#1125](https://github.com/pypeclub/pype/pull/1125)
+- Fix versions variable usage in standalone publisher [\#1090](https://github.com/pypeclub/pype/pull/1090)
+- Collect instance data fix subset query [\#1082](https://github.com/pypeclub/pype/pull/1082)
+- Fix getting the camera name. [\#1067](https://github.com/pypeclub/pype/pull/1067)
+- Nuke: Ensure "NUKE\_TEMP\_DIR" is not part of the Deadline job environment. [\#1064](https://github.com/pypeclub/pype/pull/1064)
+
+## [2.15.3](https://github.com/pypeclub/pype/tree/2.15.3) (2021-02-26)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.2...2.15.3)
+
+**Enhancements:**
+
+- Maya: speedup renderable camera collection [\#1053](https://github.com/pypeclub/pype/pull/1053)
+- Harmony - add regex search to filter allowed task names for collectin… [\#1047](https://github.com/pypeclub/pype/pull/1047)
+
+**Fixed bugs:**
+
+- Ftrack integrate hierarchy fix [\#1085](https://github.com/pypeclub/pype/pull/1085)
+- Explicit subset filter in anatomy instance data [\#1059](https://github.com/pypeclub/pype/pull/1059)
+- TVPaint frame offset [\#1057](https://github.com/pypeclub/pype/pull/1057)
+- Auto fix unicode strings [\#1046](https://github.com/pypeclub/pype/pull/1046)
+
+## [2.15.2](https://github.com/pypeclub/pype/tree/2.15.2) (2021-02-19)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.1...2.15.2)
+
+**Enhancements:**
+
+- Maya: Vray scene publishing [\#1013](https://github.com/pypeclub/pype/pull/1013)
+
+**Fixed bugs:**
+
+- Fix entity move under project [\#1040](https://github.com/pypeclub/pype/pull/1040)
+- smaller nuke fixes from production [\#1036](https://github.com/pypeclub/pype/pull/1036)
+- TVPaint thumbnail extract fix [\#1031](https://github.com/pypeclub/pype/pull/1031)
+
+## [2.15.1](https://github.com/pypeclub/pype/tree/2.15.1) (2021-02-12)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.0...2.15.1)
+
+**Enhancements:**
+
+- Delete version as loader action [\#1011](https://github.com/pypeclub/pype/pull/1011)
+- Delete old versions [\#445](https://github.com/pypeclub/pype/pull/445)
+
+**Fixed bugs:**
+
+- PS - remove obsolete functions from pywin32 [\#1006](https://github.com/pypeclub/pype/pull/1006)
+- Clone description of review session objects. [\#922](https://github.com/pypeclub/pype/pull/922)
+
+## [2.15.0](https://github.com/pypeclub/pype/tree/2.15.0) (2021-02-09)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.6...2.15.0)
+
+**Enhancements:**
+
+- Resolve - loading and updating clips [\#932](https://github.com/pypeclub/pype/pull/932)
+- Release/2.15.0 [\#926](https://github.com/pypeclub/pype/pull/926)
+- Photoshop: add option for template.psd and prelaunch hook [\#894](https://github.com/pypeclub/pype/pull/894)
+- Nuke: deadline presets [\#993](https://github.com/pypeclub/pype/pull/993)
+- Maya: Alembic only set attributes that exists. [\#986](https://github.com/pypeclub/pype/pull/986)
+- Harmony: render local and handle fixes [\#981](https://github.com/pypeclub/pype/pull/981)
+- PSD Bulk export of ANIM group [\#965](https://github.com/pypeclub/pype/pull/965)
+- AE - added prelaunch hook for opening last or workfile from template [\#944](https://github.com/pypeclub/pype/pull/944)
+- PS - safer handling of loading of workfile [\#941](https://github.com/pypeclub/pype/pull/941)
+- Maya: Handling Arnold referenced AOVs [\#938](https://github.com/pypeclub/pype/pull/938)
+- TVPaint: switch layer IDs for layer names during identification [\#903](https://github.com/pypeclub/pype/pull/903)
+- TVPaint audio/sound loader [\#893](https://github.com/pypeclub/pype/pull/893)
+- Clone review session with children. [\#891](https://github.com/pypeclub/pype/pull/891)
+- Simple compositing data packager for freelancers [\#884](https://github.com/pypeclub/pype/pull/884)
+- Harmony deadline submission [\#881](https://github.com/pypeclub/pype/pull/881)
+- Maya: Optionally hide image planes from reviews. [\#840](https://github.com/pypeclub/pype/pull/840)
+- Maya: handle referenced AOVs for Vray [\#824](https://github.com/pypeclub/pype/pull/824)
+- DWAA/DWAB support on windows [\#795](https://github.com/pypeclub/pype/pull/795)
+- Unreal: animation, layout and setdress updates [\#695](https://github.com/pypeclub/pype/pull/695)
+
+**Fixed bugs:**
+
+- Maya: Looks - disable hardlinks [\#995](https://github.com/pypeclub/pype/pull/995)
+- Fix Ftrack custom attribute update [\#982](https://github.com/pypeclub/pype/pull/982)
+- Prores ks in burnin script [\#960](https://github.com/pypeclub/pype/pull/960)
+- terminal.py crash on import [\#839](https://github.com/pypeclub/pype/pull/839)
+- Extract review handle bizarre pixel aspect ratio [\#990](https://github.com/pypeclub/pype/pull/990)
+- Nuke: add nuke related env var to sumbission [\#988](https://github.com/pypeclub/pype/pull/988)
+- Nuke: missing preset's variable [\#984](https://github.com/pypeclub/pype/pull/984)
+- Get creator by name fix [\#979](https://github.com/pypeclub/pype/pull/979)
+- Fix update of project's tasks on Ftrack sync [\#972](https://github.com/pypeclub/pype/pull/972)
+- nuke: wrong frame offset in mov loader [\#971](https://github.com/pypeclub/pype/pull/971)
+- Create project structure action fix multiroot [\#967](https://github.com/pypeclub/pype/pull/967)
+- PS: remove pywin installation from hook [\#964](https://github.com/pypeclub/pype/pull/964)
+- Prores ks in burnin script [\#959](https://github.com/pypeclub/pype/pull/959)
+- Subset family is now stored in subset document [\#956](https://github.com/pypeclub/pype/pull/956)
+- DJV new version arguments [\#954](https://github.com/pypeclub/pype/pull/954)
+- TV Paint: Fix single frame Sequence [\#953](https://github.com/pypeclub/pype/pull/953)
+- nuke: missing `file` knob update [\#933](https://github.com/pypeclub/pype/pull/933)
+- Photoshop: Create from single layer was failing [\#920](https://github.com/pypeclub/pype/pull/920)
+- Nuke: baking mov with correct colorspace inherited from write [\#909](https://github.com/pypeclub/pype/pull/909)
+- Launcher fix actions discover [\#896](https://github.com/pypeclub/pype/pull/896)
+- Get the correct file path for the updated mov. [\#889](https://github.com/pypeclub/pype/pull/889)
+- Maya: Deadline submitter - shared data access violation [\#831](https://github.com/pypeclub/pype/pull/831)
+- Maya: Take into account vray master AOV switch [\#822](https://github.com/pypeclub/pype/pull/822)
+
+**Merged pull requests:**
+
+- Refactor blender to 3.0 format [\#934](https://github.com/pypeclub/pype/pull/934)
+
+## [2.14.6](https://github.com/pypeclub/pype/tree/2.14.6) (2021-01-15)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.5...2.14.6)
+
+**Fixed bugs:**
+
+- Nuke: improving of hashing path [\#885](https://github.com/pypeclub/pype/pull/885)
+
+**Merged pull requests:**
+
+- Hiero: cut videos with correct secons [\#892](https://github.com/pypeclub/pype/pull/892)
+- Faster sync to avalon preparation [\#869](https://github.com/pypeclub/pype/pull/869)
+
+## [2.14.5](https://github.com/pypeclub/pype/tree/2.14.5) (2021-01-06)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.4...2.14.5)
+
+**Merged pull requests:**
+
+- Pype logger refactor [\#866](https://github.com/pypeclub/pype/pull/866)
+
+## [2.14.4](https://github.com/pypeclub/pype/tree/2.14.4) (2020-12-18)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.3...2.14.4)
+
+**Merged pull requests:**
+
+- Fix - AE - added explicit cast to int [\#837](https://github.com/pypeclub/pype/pull/837)
+
+## [2.14.3](https://github.com/pypeclub/pype/tree/2.14.3) (2020-12-16)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.2...2.14.3)
+
+**Fixed bugs:**
+
+- TVPaint repair invalid metadata [\#809](https://github.com/pypeclub/pype/pull/809)
+- Feature/push hier value to nonhier action [\#807](https://github.com/pypeclub/pype/pull/807)
+- Harmony: fix palette and image sequence loader [\#806](https://github.com/pypeclub/pype/pull/806)
+
+**Merged pull requests:**
+
+- respecting space in path [\#823](https://github.com/pypeclub/pype/pull/823)
+
+## [2.14.2](https://github.com/pypeclub/pype/tree/2.14.2) (2020-12-04)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.1...2.14.2)
+
+**Enhancements:**
+
+- Collapsible wrapper in settings [\#767](https://github.com/pypeclub/pype/pull/767)
+
+**Fixed bugs:**
+
+- Harmony: template extraction and palettes thumbnails on mac [\#768](https://github.com/pypeclub/pype/pull/768)
+- TVPaint store context to workfile metadata \(764\) [\#766](https://github.com/pypeclub/pype/pull/766)
+- Extract review audio cut fix [\#763](https://github.com/pypeclub/pype/pull/763)
+
+**Merged pull requests:**
+
+- AE: fix publish after background load [\#781](https://github.com/pypeclub/pype/pull/781)
+- TVPaint store members key [\#769](https://github.com/pypeclub/pype/pull/769)
+
+## [2.14.1](https://github.com/pypeclub/pype/tree/2.14.1) (2020-11-27)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.0...2.14.1)
+
+**Enhancements:**
+
+- Settings required keys in modifiable dict [\#770](https://github.com/pypeclub/pype/pull/770)
+- Extract review may not add audio to output [\#761](https://github.com/pypeclub/pype/pull/761)
+
+**Fixed bugs:**
+
+- After Effects: frame range, file format and render source scene fixes [\#760](https://github.com/pypeclub/pype/pull/760)
+- Hiero: trimming review with clip event number [\#754](https://github.com/pypeclub/pype/pull/754)
+- TVPaint: fix updating of loaded subsets [\#752](https://github.com/pypeclub/pype/pull/752)
+- Maya: Vray handling of default aov [\#748](https://github.com/pypeclub/pype/pull/748)
+- Maya: multiple renderable cameras in layer didn't work [\#744](https://github.com/pypeclub/pype/pull/744)
+- Ftrack integrate custom attributes fix [\#742](https://github.com/pypeclub/pype/pull/742)
+
+## [2.14.0](https://github.com/pypeclub/pype/tree/2.14.0) (2020-11-23)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.7...2.14.0)
+
+**Enhancements:**
+
+- Render publish plugins abstraction [\#687](https://github.com/pypeclub/pype/pull/687)
+- Shot asset build trigger status [\#736](https://github.com/pypeclub/pype/pull/736)
+- Maya: add camera rig publishing option [\#721](https://github.com/pypeclub/pype/pull/721)
+- Sort instances by label in pyblish gui [\#719](https://github.com/pypeclub/pype/pull/719)
+- Synchronize ftrack hierarchical and shot attributes [\#716](https://github.com/pypeclub/pype/pull/716)
+- 686 standalonepublisher editorial from image sequences [\#699](https://github.com/pypeclub/pype/pull/699)
+- Ask user to select non-default camera from scene or create a new. [\#678](https://github.com/pypeclub/pype/pull/678)
+- TVPaint: image loader with options [\#675](https://github.com/pypeclub/pype/pull/675)
+- Maya: Camera name can be added to burnins. [\#674](https://github.com/pypeclub/pype/pull/674)
+- After Effects: base integration with loaders [\#667](https://github.com/pypeclub/pype/pull/667)
+- Harmony: Javascript refactoring and overall stability improvements [\#666](https://github.com/pypeclub/pype/pull/666)
+
+**Fixed bugs:**
+
+- Bugfix Hiero Review / Plate representation publish [\#743](https://github.com/pypeclub/pype/pull/743)
+- Asset fetch second fix [\#726](https://github.com/pypeclub/pype/pull/726)
+- TVPaint extract review fix [\#740](https://github.com/pypeclub/pype/pull/740)
+- After Effects: Review were not being sent to ftrack [\#738](https://github.com/pypeclub/pype/pull/738)
+- Maya: vray proxy was not loading [\#722](https://github.com/pypeclub/pype/pull/722)
+- Maya: Vray expected file fixes [\#682](https://github.com/pypeclub/pype/pull/682)
+- Missing audio on farm submission. [\#639](https://github.com/pypeclub/pype/pull/639)
+
+**Deprecated:**
+
+- Removed artist view from pyblish gui [\#717](https://github.com/pypeclub/pype/pull/717)
+- Maya: disable legacy override check for cameras [\#715](https://github.com/pypeclub/pype/pull/715)
+
+**Merged pull requests:**
+
+- Application manager [\#728](https://github.com/pypeclub/pype/pull/728)
+- Feature \#664 3.0 lib refactor [\#706](https://github.com/pypeclub/pype/pull/706)
+- Lib from illicit part 2 [\#700](https://github.com/pypeclub/pype/pull/700)
+- 3.0 lib refactor - path tools [\#697](https://github.com/pypeclub/pype/pull/697)
+
+## [2.13.7](https://github.com/pypeclub/pype/tree/2.13.7) (2020-11-19)
+
+[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.6...2.13.7)
+
+**Fixed bugs:**
+
+- Standalone Publisher: getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729)
+
# Changelog
## [2.13.6](https://github.com/pypeclub/pype/tree/2.13.6) (2020-11-15)
@@ -789,4 +1054,7 @@ A large cleanup release. Most of the change are under the hood.
- _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner
+\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
+
+
\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
diff --git a/README.md b/README.md
index 73620d7885..566e226538 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,10 @@
OpenPype
====
+[](https://github.com/pypeclub/pype/actions/workflows/documentation.yml)  
+
+
+
Introduction
------------
@@ -61,7 +65,8 @@ git clone --recurse-submodules git@github.com:Pypeclub/OpenPype.git
#### To build OpenPype:
1) Run `.\tools\create_env.ps1` to create virtual environment in `.\venv`
-2) Run `.\tools\build.ps1` to build OpenPype executables in `.\build\`
+2) Run `.\tools\fetch_thirdparty_libs.ps1` to download third-party dependencies like ffmpeg and oiio. Those will be included in build.
+3) Run `.\tools\build.ps1` to build OpenPype executables in `.\build\`
To create distributable OpenPype versions, run `./tools/create_zip.ps1` - that will
create zip file with name `openpype-vx.x.x.zip` parsed from current OpenPype repository and
@@ -116,8 +121,8 @@ pyenv local 3.7.9
#### To build OpenPype:
1) Run `.\tools\create_env.sh` to create virtual environment in `.\venv`
-2) Run `.\tools\build.sh` to build OpenPype executables in `.\build\`
-
+2) Run `.\tools\fetch_thirdparty_libs.sh` to download third-party dependencies like ffmpeg and oiio. Those will be included in build.
+3) Run `.\tools\build.sh` to build OpenPype executables in `.\build\`
### Linux
diff --git a/openpype/hooks/pre_python2_vendor.py b/openpype/hooks/pre_python_2_prelaunch.py
similarity index 86%
rename from openpype/hooks/pre_python2_vendor.py
rename to openpype/hooks/pre_python_2_prelaunch.py
index 815682fef8..8232f35623 100644
--- a/openpype/hooks/pre_python2_vendor.py
+++ b/openpype/hooks/pre_python_2_prelaunch.py
@@ -4,14 +4,15 @@ from openpype.lib import PreLaunchHook
class PrePython2Vendor(PreLaunchHook):
"""Prepend python 2 dependencies for py2 hosts."""
- # WARNING This hook will probably be deprecated in OpenPype 3 - kept for test
+ # WARNING This hook will probably be deprecated in OpenPype 3 - kept for
+ # test
order = 10
- app_groups = ["hiero", "nuke", "nukex", "unreal"]
+ app_groups = ["hiero", "nuke", "nukex", "unreal", "maya", "houdini"]
def execute(self):
# Prepare vendor dir path
self.log.info("adding global python 2 vendor")
- pype_root = os.getenv("OPENPYPE_ROOT")
+ pype_root = os.getenv("OPENPYPE_REPOS_ROOT")
python_2_vendor = os.path.join(
pype_root,
"openpype",
diff --git a/openpype/hosts/maya/plugins/create/create_look.py b/openpype/hosts/maya/plugins/create/create_look.py
index 96266aa799..36a3120a9a 100644
--- a/openpype/hosts/maya/plugins/create/create_look.py
+++ b/openpype/hosts/maya/plugins/create/create_look.py
@@ -12,6 +12,7 @@ class CreateLook(plugin.Creator):
family = "look"
icon = "paint-brush"
defaults = ['Main']
+ make_tx = True
def __init__(self, *args, **kwargs):
super(CreateLook, self).__init__(*args, **kwargs)
@@ -19,7 +20,7 @@ class CreateLook(plugin.Creator):
self.data["renderlayer"] = lib.get_current_renderlayer()
# Whether to automatically convert the textures to .tx upon publish.
- self.data["maketx"] = True
+ self.data["maketx"] = self.make_tx
# Enable users to force a copy.
self.data["forceCopy"] = False
diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py
index 4392d1f78d..c39bbc497e 100644
--- a/openpype/hosts/maya/plugins/load/load_look.py
+++ b/openpype/hosts/maya/plugins/load/load_look.py
@@ -105,7 +105,23 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
# Load relationships
shader_relation = api.get_representation_path(json_representation)
with open(shader_relation, "r") as f:
- relationships = json.load(f)
+ json_data = json.load(f)
+
+ for rel, data in json_data["relationships"].items():
+ # process only non-shading nodes
+ current_node = "{}:{}".format(container["namespace"], rel)
+ if current_node in shader_nodes:
+ continue
+ print("processing {}".format(rel))
+ current_members = set(cmds.ls(
+ cmds.sets(current_node, query=True) or [], long=True))
+ new_members = {"{}".format(
+ m["name"]) for m in data["members"] or []}
+ dif = new_members.difference(current_members)
+
+ # add to set
+ cmds.sets(
+ dif, forceElement="{}:{}".format(container["namespace"], rel))
# update of reference could result in failed edits - material is not
# present because of renaming etc.
@@ -120,7 +136,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
cmds.file(cr=reference_node) # cleanReference
# reapply shading groups from json representation on orig nodes
- openpype.hosts.maya.api.lib.apply_shaders(relationships,
+ openpype.hosts.maya.api.lib.apply_shaders(json_data,
shader_nodes,
orig_nodes)
@@ -128,12 +144,13 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"All successful edits were kept intact.\n",
"Failed and removed edits:"]
msg.extend(failed_edits)
+
msg = ScrollMessageBox(QtWidgets.QMessageBox.Warning,
"Some reference edit failed",
msg)
msg.exec_()
- attributes = relationships.get("attributes", [])
+ attributes = json_data.get("attributes", [])
# region compute lookup
nodes_by_id = defaultdict(list)
diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py
index acc6d8f128..238213c000 100644
--- a/openpype/hosts/maya/plugins/publish/collect_look.py
+++ b/openpype/hosts/maya/plugins/publish/collect_look.py
@@ -1,8 +1,10 @@
+# -*- coding: utf-8 -*-
+"""Maya look collector."""
import re
import os
import glob
-from maya import cmds
+from maya import cmds # noqa
import pyblish.api
from openpype.hosts.maya.api import lib
@@ -16,6 +18,11 @@ SHAPE_ATTRS = ["castsShadows",
"doubleSided",
"opposite"]
+RENDERER_NODE_TYPES = [
+ # redshift
+ "RedshiftMeshParameters"
+]
+
SHAPE_ATTRS = set(SHAPE_ATTRS)
@@ -29,7 +36,6 @@ def get_look_attrs(node):
list: Attribute names to extract
"""
-
# When referenced get only attributes that are "changed since file open"
# which includes any reference edits, otherwise take *all* user defined
# attributes
@@ -219,9 +225,13 @@ class CollectLook(pyblish.api.InstancePlugin):
with lib.renderlayer(instance.data["renderlayer"]):
self.collect(instance)
-
def collect(self, instance):
+ """Collect looks.
+ Args:
+ instance: Instance to collect.
+
+ """
self.log.info("Looking for look associations "
"for %s" % instance.data['name'])
@@ -235,48 +245,91 @@ class CollectLook(pyblish.api.InstancePlugin):
self.log.info("Gathering set relations..")
# Ensure iteration happen in a list so we can remove keys from the
# dict within the loop
- for objset in list(sets):
- self.log.debug("From %s.." % objset)
+
+ # skipped types of attribute on render specific nodes
+ disabled_types = ["message", "TdataCompound"]
+
+ for obj_set in list(sets):
+ self.log.debug("From {}".format(obj_set))
+
+ # if node is specified as renderer node type, it will be
+ # serialized with its attributes.
+ if cmds.nodeType(obj_set) in RENDERER_NODE_TYPES:
+ self.log.info("- {} is {}".format(
+ obj_set, cmds.nodeType(obj_set)))
+
+ node_attrs = []
+
+ # serialize its attributes so they can be recreated on look
+ # load.
+ for attr in cmds.listAttr(obj_set):
+ # skip publishedNodeInfo attributes as they break
+ # getAttr() and we don't need them anyway
+ if attr.startswith("publishedNodeInfo"):
+ continue
+
+ # skip attributes types defined in 'disabled_type' list
+ if cmds.getAttr("{}.{}".format(obj_set, attr), type=True) in disabled_types: # noqa
+ continue
+
+ node_attrs.append((
+ attr,
+ cmds.getAttr("{}.{}".format(obj_set, attr)),
+ cmds.getAttr(
+ "{}.{}".format(obj_set, attr), type=True)
+ ))
+
+ for member in cmds.ls(
+ cmds.sets(obj_set, query=True), long=True):
+ member_data = self.collect_member_data(member,
+ instance_lookup)
+ if not member_data:
+ continue
+
+ # Add information of the node to the members list
+ sets[obj_set]["members"].append(member_data)
# Get all nodes of the current objectSet (shadingEngine)
- for member in cmds.ls(cmds.sets(objset, query=True), long=True):
+ for member in cmds.ls(cmds.sets(obj_set, query=True), long=True):
member_data = self.collect_member_data(member,
instance_lookup)
if not member_data:
continue
# Add information of the node to the members list
- sets[objset]["members"].append(member_data)
+ sets[obj_set]["members"].append(member_data)
# Remove sets that didn't have any members assigned in the end
# Thus the data will be limited to only what we need.
- self.log.info("objset {}".format(sets[objset]))
- if not sets[objset]["members"] or (not objset.endswith("SG")):
- self.log.info("Removing redundant set information: "
- "%s" % objset)
- sets.pop(objset, None)
+ self.log.info("obj_set {}".format(sets[obj_set]))
+ if not sets[obj_set]["members"]:
+ self.log.info(
+ "Removing redundant set information: {}".format(obj_set))
+ sets.pop(obj_set, None)
self.log.info("Gathering attribute changes to instance members..")
attributes = self.collect_attributes_changed(instance)
# Store data on the instance
- instance.data["lookData"] = {"attributes": attributes,
- "relationships": sets}
+ instance.data["lookData"] = {
+ "attributes": attributes,
+ "relationships": sets
+ }
# Collect file nodes used by shading engines (if we have any)
- files = list()
- looksets = sets.keys()
- shaderAttrs = [
- "surfaceShader",
- "volumeShader",
- "displacementShader",
- "aiSurfaceShader",
- "aiVolumeShader"]
- materials = list()
+ files = []
+ look_sets = sets.keys()
+ shader_attrs = [
+ "surfaceShader",
+ "volumeShader",
+ "displacementShader",
+ "aiSurfaceShader",
+ "aiVolumeShader"]
+ if look_sets:
+ materials = []
- if looksets:
- for look in looksets:
- for at in shaderAttrs:
+ for look in look_sets:
+ for at in shader_attrs:
try:
con = cmds.listConnections("{}.{}".format(look, at))
except ValueError:
@@ -289,10 +342,10 @@ class CollectLook(pyblish.api.InstancePlugin):
self.log.info("Found materials:\n{}".format(materials))
- self.log.info("Found the following sets:\n{}".format(looksets))
+ self.log.info("Found the following sets:\n{}".format(look_sets))
# Get the entire node chain of the look sets
- # history = cmds.listHistory(looksets)
- history = list()
+ # history = cmds.listHistory(look_sets)
+ history = []
for material in materials:
history.extend(cmds.listHistory(material))
files = cmds.ls(history, type="file", long=True)
@@ -313,7 +366,7 @@ class CollectLook(pyblish.api.InstancePlugin):
# Ensure unique shader sets
# Add shader sets to the instance for unify ID validation
- instance.extend(shader for shader in looksets if shader
+ instance.extend(shader for shader in look_sets if shader
not in instance_lookup)
self.log.info("Collected look for %s" % instance)
@@ -331,7 +384,7 @@ class CollectLook(pyblish.api.InstancePlugin):
dict
"""
- sets = dict()
+ sets = {}
for node in instance:
related_sets = lib.get_related_sets(node)
if not related_sets:
@@ -427,6 +480,11 @@ class CollectLook(pyblish.api.InstancePlugin):
"""
self.log.debug("processing: {}".format(node))
+ if cmds.nodeType(node) not in ["file", "aiImage"]:
+ self.log.error(
+ "Unsupported file node: {}".format(cmds.nodeType(node)))
+ raise AssertionError("Unsupported file node")
+
if cmds.nodeType(node) == 'file':
self.log.debug(" - file node")
attribute = "{}.fileTextureName".format(node)
@@ -435,6 +493,7 @@ class CollectLook(pyblish.api.InstancePlugin):
self.log.debug("aiImage node")
attribute = "{}.filename".format(node)
computed_attribute = attribute
+
source = cmds.getAttr(attribute)
self.log.info(" - file source: {}".format(source))
color_space_attr = "{}.colorSpace".format(node)
diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py
index 79488a372c..bdd061578e 100644
--- a/openpype/hosts/maya/plugins/publish/extract_look.py
+++ b/openpype/hosts/maya/plugins/publish/extract_look.py
@@ -1,13 +1,14 @@
+# -*- coding: utf-8 -*-
+"""Maya look extractor."""
import os
import sys
import json
-import copy
import tempfile
import contextlib
import subprocess
from collections import OrderedDict
-from maya import cmds
+from maya import cmds # noqa
import pyblish.api
import avalon.maya
@@ -22,23 +23,38 @@ HARDLINK = 2
def find_paths_by_hash(texture_hash):
- # Find the texture hash key in the dictionary and all paths that
- # originate from it.
+ """Find the texture hash key in the dictionary.
+
+ All paths that originate from it.
+
+ Args:
+ texture_hash (str): Hash of the texture.
+
+ Return:
+ str: path to texture if found.
+
+ """
key = "data.sourceHashes.{0}".format(texture_hash)
return io.distinct(key, {"type": "version"})
def maketx(source, destination, *args):
- """Make .tx using maketx with some default settings.
+ """Make `.tx` using `maketx` with some default settings.
+
The settings are based on default as used in Arnold's
txManager in the scene.
This function requires the `maketx` executable to be
on the `PATH`.
+
Args:
source (str): Path to source file.
destination (str): Writing destination path.
- """
+ *args: Additional arguments for `maketx`.
+ Returns:
+ str: Output of `maketx` command.
+
+ """
cmd = [
"maketx",
"-v", # verbose
@@ -56,7 +72,7 @@ def maketx(source, destination, *args):
cmd = " ".join(cmd)
- CREATE_NO_WINDOW = 0x08000000
+ CREATE_NO_WINDOW = 0x08000000 # noqa
kwargs = dict(args=cmd, stderr=subprocess.STDOUT)
if sys.platform == "win32":
@@ -118,12 +134,58 @@ class ExtractLook(openpype.api.Extractor):
hosts = ["maya"]
families = ["look"]
order = pyblish.api.ExtractorOrder + 0.2
+ scene_type = "ma"
+
+ @staticmethod
+ def get_renderer_name():
+ """Get renderer name from Maya.
+
+ Returns:
+ str: Renderer name.
+
+ """
+ renderer = cmds.getAttr(
+ "defaultRenderGlobals.currentRenderer"
+ ).lower()
+ # handle various renderman names
+ if renderer.startswith("renderman"):
+ renderer = "renderman"
+ return renderer
+
+ def get_maya_scene_type(self, instance):
+ """Get Maya scene type from settings.
+
+ Args:
+ instance (pyblish.api.Instance): Instance with collected
+ project settings.
+
+ """
+ ext_mapping = (
+ instance.context.data["project_settings"]["maya"]["ext_mapping"]
+ )
+ if ext_mapping:
+ self.log.info("Looking in settings for scene type ...")
+ # use extension mapping for first family found
+ for family in self.families:
+ try:
+ self.scene_type = ext_mapping[family]
+ self.log.info(
+ "Using {} as scene type".format(self.scene_type))
+ break
+ except KeyError:
+ # no preset found
+ pass
def process(self, instance):
+ """Plugin entry point.
+ Args:
+ instance: Instance to process.
+
+ """
# Define extract output file path
dir_path = self.staging_dir(instance)
- maya_fname = "{0}.ma".format(instance.name)
+ maya_fname = "{0}.{1}".format(instance.name, self.scene_type)
json_fname = "{0}.json".format(instance.name)
# Make texture dump folder
@@ -148,7 +210,7 @@ class ExtractLook(openpype.api.Extractor):
# Collect all unique files used in the resources
files = set()
- files_metadata = dict()
+ files_metadata = {}
for resource in resources:
# Preserve color space values (force value after filepath change)
# This will also trigger in the same order at end of context to
@@ -162,35 +224,33 @@ class ExtractLook(openpype.api.Extractor):
# files.update(os.path.normpath(f))
# Process the resource files
- transfers = list()
- hardlinks = list()
- hashes = dict()
- forceCopy = instance.data.get("forceCopy", False)
+ transfers = []
+ hardlinks = []
+ hashes = {}
+ force_copy = instance.data.get("forceCopy", False)
self.log.info(files)
for filepath in files_metadata:
- cspace = files_metadata[filepath]["color_space"]
- linearise = False
- if do_maketx:
- if cspace == "sRGB":
- linearise = True
- # set its file node to 'raw' as tx will be linearized
- files_metadata[filepath]["color_space"] = "raw"
+ linearize = False
+ if do_maketx and files_metadata[filepath]["color_space"] == "sRGB": # noqa: E501
+ linearize = True
+ # set its file node to 'raw' as tx will be linearized
+ files_metadata[filepath]["color_space"] = "raw"
- source, mode, hash = self._process_texture(
+ source, mode, texture_hash = self._process_texture(
filepath,
do_maketx,
staging=dir_path,
- linearise=linearise,
- force=forceCopy
+ linearize=linearize,
+ force=force_copy
)
destination = self.resource_destination(instance,
source,
do_maketx)
# Force copy is specified.
- if forceCopy:
+ if force_copy:
mode = COPY
if mode == COPY:
@@ -202,10 +262,10 @@ class ExtractLook(openpype.api.Extractor):
# Store the hashes from hash to destination to include in the
# database
- hashes[hash] = destination
+ hashes[texture_hash] = destination
# Remap the resources to the destination path (change node attributes)
- destinations = dict()
+ destinations = {}
remap = OrderedDict() # needs to be ordered, see color space values
for resource in resources:
source = os.path.normpath(resource["source"])
@@ -222,7 +282,7 @@ class ExtractLook(openpype.api.Extractor):
color_space_attr = resource["node"] + ".colorSpace"
color_space = cmds.getAttr(color_space_attr)
if files_metadata[source]["color_space"] == "raw":
- # set colorpsace to raw if we linearized it
+ # set color space to raw if we linearized it
color_space = "Raw"
# Remap file node filename to destination
attr = resource["attribute"]
@@ -267,11 +327,11 @@ class ExtractLook(openpype.api.Extractor):
json.dump(data, f)
if "files" not in instance.data:
- instance.data["files"] = list()
+ instance.data["files"] = []
if "hardlinks" not in instance.data:
- instance.data["hardlinks"] = list()
+ instance.data["hardlinks"] = []
if "transfers" not in instance.data:
- instance.data["transfers"] = list()
+ instance.data["transfers"] = []
instance.data["files"].append(maya_fname)
instance.data["files"].append(json_fname)
@@ -311,14 +371,26 @@ class ExtractLook(openpype.api.Extractor):
maya_path))
def resource_destination(self, instance, filepath, do_maketx):
- anatomy = instance.context.data["anatomy"]
+ """Get resource destination path.
+ This is utility function to change path if resource file name is
+ changed by some external tool like `maketx`.
+
+ Args:
+ instance: Current Instance.
+ filepath (str): Resource path
+ do_maketx (bool): Flag if resource is processed by `maketx`.
+
+ Returns:
+ str: Path to resource file
+
+ """
resources_dir = instance.data["resourcesDir"]
# Compute destination location
basename, ext = os.path.splitext(os.path.basename(filepath))
- # If maketx then the texture will always end with .tx
+ # If `maketx` then the texture will always end with .tx
if do_maketx:
ext = ".tx"
@@ -326,7 +398,7 @@ class ExtractLook(openpype.api.Extractor):
resources_dir, basename + ext
)
- def _process_texture(self, filepath, do_maketx, staging, linearise, force):
+ def _process_texture(self, filepath, do_maketx, staging, linearize, force):
"""Process a single texture file on disk for publishing.
This will:
1. Check whether it's already published, if so it will do hardlink
@@ -363,7 +435,7 @@ class ExtractLook(openpype.api.Extractor):
# Produce .tx file in staging if source file is not .tx
converted = os.path.join(staging, "resources", fname + ".tx")
- if linearise:
+ if linearize:
self.log.info("tx: converting sRGB -> linear")
colorconvert = "--colorconvert sRGB linear"
else:
diff --git a/openpype/hosts/maya/plugins/publish/validate_look_sets.py b/openpype/hosts/maya/plugins/publish/validate_look_sets.py
index 48431d0906..5e737ca876 100644
--- a/openpype/hosts/maya/plugins/publish/validate_look_sets.py
+++ b/openpype/hosts/maya/plugins/publish/validate_look_sets.py
@@ -73,8 +73,10 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
# check if any objectSets are not present ion the relationships
missing_sets = [s for s in sets if s not in relationships]
if missing_sets:
- for set in missing_sets:
- if '_SET' not in set:
+ for missing_set in missing_sets:
+ cls.log.debug(missing_set)
+
+ if '_SET' not in missing_set:
# A set of this node is not coming along, this is wrong!
cls.log.error("Missing sets '{}' for node "
"'{}'".format(missing_sets, node))
@@ -82,8 +84,8 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
continue
# Ensure the node is in the sets that are collected
- for shaderset, data in relationships.items():
- if shaderset not in sets:
+ for shader_set, data in relationships.items():
+ if shader_set not in sets:
# no need to check for a set if the node
# isn't in it anyway
continue
@@ -94,7 +96,7 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
# The node is not found in the collected set
# relationships
cls.log.error("Missing '{}' in collected set node "
- "'{}'".format(node, shaderset))
+ "'{}'".format(node, shader_set))
invalid.append(node)
continue
diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py
index d95af6ec4c..34337f726f 100644
--- a/openpype/hosts/nuke/api/lib.py
+++ b/openpype/hosts/nuke/api/lib.py
@@ -390,16 +390,19 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
"inputName": input.name()})
prev_node = nuke.createNode(
"Input", "name {}".format(input.name()))
+ prev_node.hideControlPanel()
else:
# generic input node connected to nothing
prev_node = nuke.createNode(
"Input", "name {}".format("rgba"))
+ prev_node.hideControlPanel()
# creating pre-write nodes `prenodes`
if prenodes:
for name, klass, properties, set_output_to in prenodes:
# create node
now_node = nuke.createNode(klass, "name {}".format(name))
+ now_node.hideControlPanel()
# add data to knob
for k, v in properties:
@@ -421,17 +424,21 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
for i, node_name in enumerate(set_output_to):
input_node = nuke.createNode(
"Input", "name {}".format(node_name))
+ input_node.hideControlPanel()
connections.append({
"node": nuke.toNode(node_name),
"inputName": node_name})
now_node.setInput(1, input_node)
+
elif isinstance(set_output_to, str):
input_node = nuke.createNode(
"Input", "name {}".format(node_name))
+ input_node.hideControlPanel()
connections.append({
"node": nuke.toNode(set_output_to),
"inputName": set_output_to})
now_node.setInput(0, input_node)
+
else:
now_node.setInput(0, prev_node)
@@ -443,7 +450,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
"inside_{}".format(name),
**_data
)
-
+ write_node.hideControlPanel()
# connect to previous node
now_node.setInput(0, prev_node)
@@ -451,6 +458,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
prev_node = now_node
now_node = nuke.createNode("Output", "name Output1")
+ now_node.hideControlPanel()
# connect to previous node
now_node.setInput(0, prev_node)
@@ -664,8 +672,7 @@ class WorkfileSettings(object):
]
erased_viewers = []
- for v in [n for n in self._nodes
- if "Viewer" in n.Class()]:
+ for v in [n for n in nuke.allNodes(filter="Viewer")]:
v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
if str(viewer_dict["viewerProcess"]) \
not in v['viewerProcess'].value():
diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py
index 92f96ea48d..cdb0589525 100644
--- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py
+++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py
@@ -55,11 +55,6 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
families_ak = avalon_knob_data.get("families", [])
families = list()
- if families_ak:
- families.append(families_ak.lower())
-
- families.append(family)
-
# except disabled nodes but exclude backdrops in test
if ("nukenodes" not in family) and (node["disable"].value()):
continue
@@ -81,36 +76,33 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
# Add all nodes in group instances.
if node.Class() == "Group":
# only alter families for render family
- if "write" in families_ak:
+ if "write" in families_ak.lower():
target = node["render"].value()
if target == "Use existing frames":
# Local rendering
self.log.info("flagged for no render")
- families.append(family)
elif target == "Local":
# Local rendering
self.log.info("flagged for local render")
families.append("{}.local".format(family))
+ family = families_ak.lower()
elif target == "On farm":
# Farm rendering
self.log.info("flagged for farm render")
instance.data["transfer"] = False
families.append("{}.farm".format(family))
-
- # suffle family to `write` as it is main family
- # this will be changed later on in process
- if "render" in families:
- families.remove("render")
- family = "write"
- elif "prerender" in families:
- families.remove("prerender")
- family = "write"
+ family = families_ak.lower()
node.begin()
for i in nuke.allNodes():
instance.append(i)
node.end()
+ if not families and families_ak and family not in [
+ "render", "prerender"]:
+ families.append(families_ak.lower())
+
+ self.log.debug("__ family: `{}`".format(family))
self.log.debug("__ families: `{}`".format(families))
# Get format
@@ -124,7 +116,9 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
anlib.add_publish_knob(node)
# sync workfile version
- if not next((f for f in families
+ _families_test = [family] + families
+ self.log.debug("__ _families_test: `{}`".format(_families_test))
+ if not next((f for f in _families_test
if "prerender" in f),
None) and self.sync_workfile_version:
# get version to instance for integration
diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py
index 57303bd42e..5eaac89e84 100644
--- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py
+++ b/openpype/hosts/nuke/plugins/publish/precollect_writes.py
@@ -1,4 +1,5 @@
import os
+import re
import nuke
import pyblish.api
import openpype.api as pype
@@ -14,11 +15,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
hosts = ["nuke", "nukeassist"]
families = ["write"]
- # preset attributes
- sync_workfile_version = True
-
def process(self, instance):
- families = instance.data["families"]
+ _families_test = [instance.data["family"]] + instance.data["families"]
node = None
for x in instance:
@@ -63,7 +61,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
int(last_frame)
)
- if [fm for fm in families
+ if [fm for fm in _families_test
if fm in ["render", "prerender"]]:
if "representations" not in instance.data:
instance.data["representations"] = list()
@@ -91,9 +89,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
collected_frames_len))
# this will only run if slate frame is not already
# rendered from previews publishes
- if "slate" in instance.data["families"] \
+ if "slate" in _families_test \
and (frame_length == collected_frames_len) \
- and ("prerender" not in instance.data["families"]):
+ and ("prerender" not in _families_test):
frame_slate_str = "%0{}d".format(
len(str(last_frame))) % (first_frame - 1)
slate_frame = collected_frames[0].replace(
@@ -107,10 +105,17 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
self.log.debug("couldn't collect frames: {}".format(label))
# Add version data to instance
+ colorspace = node["colorspace"].value()
+
+ # remove default part of the string
+ if "default (" in colorspace:
+ colorspace = re.sub(r"default.\(|\)", "", colorspace)
+ self.log.debug("colorspace: `{}`".format(colorspace))
+
version_data = {
"families": [f.replace(".local", "").replace(".farm", "")
- for f in families if "write" not in f],
- "colorspace": node["colorspace"].value(),
+ for f in _families_test if "write" not in f],
+ "colorspace": colorspace
}
group_node = [x for x in instance if x.Class() == "Group"][0]
@@ -135,13 +140,12 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"frameStartHandle": first_frame,
"frameEndHandle": last_frame,
"outputType": output_type,
- "families": families,
- "colorspace": node["colorspace"].value(),
+ "colorspace": colorspace,
"deadlineChunkSize": deadlineChunkSize,
"deadlinePriority": deadlinePriority
})
- if "prerender" in families:
+ if "prerender" in _families_test:
instance.data.update({
"family": "prerender",
"families": []
@@ -166,6 +170,4 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"filename": api.get_representation_path(repre_doc)
}]
- self.log.debug("families: {}".format(families))
-
self.log.debug("instance.data: {}".format(instance.data))
diff --git a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py
index 21afc5313b..8b71aff1ac 100644
--- a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py
+++ b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py
@@ -5,23 +5,50 @@ import clique
@pyblish.api.log
-class RepairCollectionAction(pyblish.api.Action):
- label = "Repair"
+class RepairActionBase(pyblish.api.Action):
on = "failed"
icon = "wrench"
+ @staticmethod
+ def get_instance(context, plugin):
+ # Get the errored instances
+ failed = []
+ for result in context.data["results"]:
+ if (result["error"] is not None and result["instance"] is not None
+ and result["instance"] not in failed):
+ failed.append(result["instance"])
+
+ # Apply pyblish.logic to get the instances for the plug-in
+ return pyblish.api.instances_by_plugin(failed, plugin)
+
+ def repair_knob(self, instances, state):
+ for instance in instances:
+ files_remove = [os.path.join(instance.data["outputDir"], f)
+ for r in instance.data.get("representations", [])
+ for f in r.get("files", [])
+ ]
+ self.log.info("Files to be removed: {}".format(files_remove))
+ for f in files_remove:
+ os.remove(f)
+ self.log.debug("removing file: {}".format(f))
+ instance[0]["render"].setValue(state)
+ self.log.info("Rendering toggled to `{}`".format(state))
+
+
+class RepairCollectionActionToLocal(RepairActionBase):
+ label = "Repair > rerender with `Local` machine"
+
def process(self, context, plugin):
- self.log.info(context[0][0])
- files_remove = [os.path.join(context[0].data["outputDir"], f)
- for r in context[0].data.get("representations", [])
- for f in r.get("files", [])
- ]
- self.log.info("Files to be removed: {}".format(files_remove))
- for f in files_remove:
- os.remove(f)
- self.log.debug("removing file: {}".format(f))
- context[0][0]["render"].setValue(True)
- self.log.info("Rendering toggled ON")
+ instances = self.get_instance(context, plugin)
+ self.repair_knob(instances, "Local")
+
+
+class RepairCollectionActionToFarm(RepairActionBase):
+ label = "Repair > rerender `On farm` with remote machines"
+
+ def process(self, context, plugin):
+ instances = self.get_instance(context, plugin)
+ self.repair_knob(instances, "On farm")
class ValidateRenderedFrames(pyblish.api.InstancePlugin):
@@ -32,26 +59,28 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
label = "Validate rendered frame"
hosts = ["nuke", "nukestudio"]
- actions = [RepairCollectionAction]
+ actions = [RepairCollectionActionToLocal, RepairCollectionActionToFarm]
+
def process(self, instance):
- for repre in instance.data.get('representations'):
+ for repre in instance.data["representations"]:
- if not repre.get('files'):
+ if not repre.get("files"):
msg = ("no frames were collected, "
"you need to render them")
self.log.error(msg)
raise ValidationException(msg)
collections, remainder = clique.assemble(repre["files"])
- self.log.info('collections: {}'.format(str(collections)))
- self.log.info('remainder: {}'.format(str(remainder)))
+ self.log.info("collections: {}".format(str(collections)))
+ self.log.info("remainder: {}".format(str(remainder)))
collection = collections[0]
frame_length = int(
- instance.data["frameEndHandle"] - instance.data["frameStartHandle"] + 1
+ instance.data["frameEndHandle"]
+ - instance.data["frameStartHandle"] + 1
)
if frame_length != 1:
@@ -65,15 +94,10 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
self.log.error(msg)
raise ValidationException(msg)
- # if len(remainder) != 0:
- # msg = "There are some extra files in folder"
- # self.log.error(msg)
- # raise ValidationException(msg)
-
collected_frames_len = int(len(collection.indexes))
- self.log.info('frame_length: {}'.format(frame_length))
+ self.log.info("frame_length: {}".format(frame_length))
self.log.info(
- 'len(collection.indexes): {}'.format(collected_frames_len)
+ "len(collection.indexes): {}".format(collected_frames_len)
)
if ("slate" in instance.data["families"]) \
@@ -84,6 +108,6 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
"{} missing frames. Use repair to render all frames"
).format(__name__)
- instance.data['collection'] = collection
+ instance.data["collection"] = collection
return
diff --git a/openpype/hosts/tvpaint/api/__init__.py b/openpype/hosts/tvpaint/api/__init__.py
index 068559049b..bd9ef51a76 100644
--- a/openpype/hosts/tvpaint/api/__init__.py
+++ b/openpype/hosts/tvpaint/api/__init__.py
@@ -1,14 +1,15 @@
import os
import logging
-from avalon.tvpaint.communication_server import register_localization_file
-from avalon.tvpaint import pipeline
import avalon.api
import pyblish.api
+from avalon.tvpaint import pipeline
+from avalon.tvpaint.communication_server import register_localization_file
+from .lib import set_context_settings
from openpype.hosts import tvpaint
-log = logging.getLogger("openpype.hosts.tvpaint")
+log = logging.getLogger(__name__)
HOST_DIR = os.path.dirname(os.path.abspath(tvpaint.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
@@ -34,6 +35,18 @@ def on_instance_toggle(instance, old_value, new_value):
pipeline._write_instances(current_instances)
+def initial_launch():
+ # Setup project settings if its the template that's launched.
+ # TODO also check for template creation when it's possible to define
+ # templates
+ last_workfile = os.environ.get("AVALON_LAST_WORKFILE")
+ if not last_workfile or os.path.exists(last_workfile):
+ return
+
+ log.info("Setting up project...")
+ set_context_settings()
+
+
def install():
log.info("OpenPype - Installing TVPaint integration")
localization_file = os.path.join(HOST_DIR, "resources", "avalon.loc")
@@ -49,6 +62,8 @@ def install():
if on_instance_toggle not in registered_callbacks:
pyblish.api.register_callback("instanceToggled", on_instance_toggle)
+ avalon.api.on("application.launched", initial_launch)
+
def uninstall():
log.info("OpenPype - Uninstalling TVPaint integration")
diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py
index 4267129fe6..cbc86f7b03 100644
--- a/openpype/hosts/tvpaint/api/lib.py
+++ b/openpype/hosts/tvpaint/api/lib.py
@@ -1,5 +1,8 @@
from PIL import Image
+import avalon.io
+from avalon.tvpaint.lib import execute_george
+
def composite_images(input_image_paths, output_filepath):
"""Composite images in order from passed list.
@@ -18,3 +21,64 @@ def composite_images(input_image_paths, output_filepath):
else:
img_obj.alpha_composite(_img_obj)
img_obj.save(output_filepath)
+
+
+def set_context_settings(asset_doc=None):
+ """Set workfile settings by asset document data.
+
+ Change fps, resolution and frame start/end.
+ """
+ if asset_doc is None:
+ # Use current session asset if not passed
+ asset_doc = avalon.io.find_one({
+ "type": "asset",
+ "name": avalon.io.Session["AVALON_ASSET"]
+ })
+
+ project_doc = avalon.io.find_one({"type": "project"})
+
+ framerate = asset_doc["data"].get("fps")
+ if framerate is None:
+ framerate = project_doc["data"].get("fps")
+
+ if framerate is not None:
+ execute_george(
+ "tv_framerate {} \"timestretch\"".format(framerate)
+ )
+ else:
+ print("Framerate was not found!")
+
+ width_key = "resolutionWidth"
+ height_key = "resolutionHeight"
+
+ width = asset_doc["data"].get(width_key)
+ height = asset_doc["data"].get(height_key)
+ if width is None or height is None:
+ width = project_doc["data"].get(width_key)
+ height = project_doc["data"].get(height_key)
+
+ if width is None or height is None:
+ print("Resolution was not found!")
+ else:
+ execute_george("tv_resizepage {} {} 0".format(width, height))
+
+ frame_start = asset_doc["data"].get("frameStart")
+ frame_end = asset_doc["data"].get("frameEnd")
+
+ if frame_start is None or frame_end is None:
+ print("Frame range was not found!")
+ return
+
+ handles = asset_doc["data"].get("handles") or 0
+ handle_start = asset_doc["data"].get("handleStart")
+ handle_end = asset_doc["data"].get("handleEnd")
+
+ if handle_start is None or handle_end is None:
+ handle_start = handles
+ handle_end = handles
+
+ frame_start -= int(handle_start)
+ frame_end += int(handle_end)
+
+ execute_george("tv_markin {} set".format(frame_start - 1))
+ execute_george("tv_markout {} set".format(frame_end - 1))
diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py
index 68c142c005..cc236734e5 100644
--- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py
+++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py
@@ -34,8 +34,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
instance_data["name"] = name
instance_data["label"] = "{} [{}-{}]".format(
name,
- context.data["sceneFrameStart"],
- context.data["sceneFrameEnd"]
+ context.data["sceneMarkIn"] + 1,
+ context.data["sceneMarkOut"] + 1
)
active = instance_data.get("active", True)
@@ -78,8 +78,16 @@ class CollectInstances(pyblish.api.ContextPlugin):
if instance is None:
continue
- instance.data["frameStart"] = context.data["sceneFrameStart"]
- instance.data["frameEnd"] = context.data["sceneFrameEnd"]
+ any_visible = False
+ for layer in instance.data["layers"]:
+ if layer["visible"]:
+ any_visible = True
+ break
+
+ instance.data["publish"] = any_visible
+
+ instance.data["frameStart"] = context.data["sceneMarkIn"] + 1
+ instance.data["frameEnd"] = context.data["sceneMarkOut"] + 1
self.log.debug("Created instance: {}\n{}".format(
instance, json.dumps(instance.data, indent=4)
@@ -108,7 +116,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
group_id = instance_data["group_id"]
group_layers = []
for layer in layers_data:
- if layer["group_id"] == group_id and layer["visible"]:
+ if layer["group_id"] == group_id:
group_layers.append(layer)
if not group_layers:
diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py
index e683c66ea9..13c6c9eb78 100644
--- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py
+++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py
@@ -57,7 +57,10 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect context from workfile metadata
self.log.info("Collecting workfile context")
+
workfile_context = pipeline.get_current_workfile_context()
+ # Store workfile context to pyblish context
+ context.data["workfile_context"] = workfile_context
if workfile_context:
# Change current context with context from workfile
key_map = (
@@ -67,16 +70,27 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
for env_key, key in key_map:
avalon.api.Session[env_key] = workfile_context[key]
os.environ[env_key] = workfile_context[key]
+ self.log.info("Context changed to: {}".format(workfile_context))
+
+ asset_name = workfile_context["asset"]
+ task_name = workfile_context["task"]
+
else:
+ asset_name = current_context["asset"]
+ task_name = current_context["task"]
# Handle older workfiles or workfiles without metadata
- self.log.warning(
+ self.log.warning((
"Workfile does not contain information about context."
" Using current Session context."
- )
- workfile_context = current_context.copy()
+ ))
- context.data["workfile_context"] = workfile_context
- self.log.info("Context changed to: {}".format(workfile_context))
+ # Store context asset name
+ context.data["asset"] = asset_name
+ self.log.info(
+ "Context is set to Asset: \"{}\" and Task: \"{}\"".format(
+ asset_name, task_name
+ )
+ )
# Collect instances
self.log.info("Collecting instance data from workfile")
@@ -122,36 +136,26 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
width = int(workfile_info_parts.pop(-1))
workfile_path = " ".join(workfile_info_parts).replace("\"", "")
- frame_start, frame_end = self.collect_clip_frames()
+ # Marks return as "{frame - 1} {state} ", example "0 set".
+ result = lib.execute_george("tv_markin")
+ mark_in_frame, mark_in_state, _ = result.split(" ")
+
+ result = lib.execute_george("tv_markout")
+ mark_out_frame, mark_out_state, _ = result.split(" ")
+
scene_data = {
"currentFile": workfile_path,
"sceneWidth": width,
"sceneHeight": height,
"scenePixelAspect": pixel_apsect,
- "sceneFrameStart": frame_start,
- "sceneFrameEnd": frame_end,
"sceneFps": frame_rate,
- "sceneFieldOrder": field_order
+ "sceneFieldOrder": field_order,
+ "sceneMarkIn": int(mark_in_frame),
+ "sceneMarkInState": mark_in_state == "set",
+ "sceneMarkOut": int(mark_out_frame),
+ "sceneMarkOutState": mark_out_state == "set"
}
self.log.debug(
"Scene data: {}".format(json.dumps(scene_data, indent=4))
)
context.data.update(scene_data)
-
- def collect_clip_frames(self):
- clip_info_str = lib.execute_george("tv_clipinfo")
- self.log.debug("Clip info: {}".format(clip_info_str))
- clip_info_items = clip_info_str.split(" ")
- # Color index - not used
- clip_info_items.pop(-1)
- clip_info_items.pop(-1)
-
- mark_out = int(clip_info_items.pop(-1))
- frame_end = mark_out + 1
- clip_info_items.pop(-1)
-
- mark_in = int(clip_info_items.pop(-1))
- frame_start = mark_in + 1
- clip_info_items.pop(-1)
-
- return frame_start, frame_end
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py
new file mode 100644
index 0000000000..4ce8d5347d
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py
@@ -0,0 +1,55 @@
+import pyblish.api
+from avalon.tvpaint import pipeline
+
+
+class FixAssetNames(pyblish.api.Action):
+ """Repair the asset names.
+
+ Change instanace metadata in the workfile.
+ """
+
+ label = "Repair"
+ icon = "wrench"
+ on = "failed"
+
+ def process(self, context, plugin):
+ context_asset_name = context.data["asset"]
+ old_instance_items = pipeline.list_instances()
+ new_instance_items = []
+ for instance_item in old_instance_items:
+ instance_asset_name = instance_item.get("asset")
+ if (
+ instance_asset_name
+ and instance_asset_name != context_asset_name
+ ):
+ instance_item["asset"] = context_asset_name
+ new_instance_items.append(instance_item)
+ pipeline._write_instances(new_instance_items)
+
+
+class ValidateMissingLayers(pyblish.api.ContextPlugin):
+ """Validate assset name present on instance.
+
+ Asset name on instance should be the same as context's.
+ """
+
+ label = "Validate Asset Names"
+ order = pyblish.api.ValidatorOrder
+ hosts = ["tvpaint"]
+ actions = [FixAssetNames]
+
+ def process(self, context):
+ context_asset_name = context.data["asset"]
+ for instance in context:
+ asset_name = instance.data.get("asset")
+ if asset_name and asset_name == context_asset_name:
+ continue
+
+ instance_label = (
+ instance.data.get("label") or instance.data["name"]
+ )
+ raise AssertionError((
+ "Different asset name on instance then context's."
+ " Instance \"{}\" has asset name: \"{}\""
+ " Context asset name is: \"{}\""
+ ).format(instance_label, asset_name, context_asset_name))
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py
new file mode 100644
index 0000000000..73486d1005
--- /dev/null
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py
@@ -0,0 +1,64 @@
+import json
+
+import pyblish.api
+from avalon.tvpaint import lib
+
+
+class ValidateMarksRepair(pyblish.api.Action):
+ """Repair the marks."""
+
+ label = "Repair"
+ icon = "wrench"
+ on = "failed"
+
+ def process(self, context, plugin):
+ expected_data = ValidateMarks.get_expected_data(context)
+
+ expected_data["markIn"] -= 1
+ expected_data["markOut"] -= 1
+
+ lib.execute_george("tv_markin {} set".format(expected_data["markIn"]))
+ lib.execute_george(
+ "tv_markout {} set".format(expected_data["markOut"])
+ )
+
+
+class ValidateMarks(pyblish.api.ContextPlugin):
+ """Validate mark in and out are enabled."""
+
+ label = "Validate Marks"
+ order = pyblish.api.ValidatorOrder
+ optional = True
+ actions = [ValidateMarksRepair]
+
+ @staticmethod
+ def get_expected_data(context):
+ return {
+ "markIn": int(context.data["frameStart"]),
+ "markInState": True,
+ "markOut": int(context.data["frameEnd"]),
+ "markOutState": True
+ }
+
+ def process(self, context):
+ current_data = {
+ "markIn": context.data["sceneMarkIn"] + 1,
+ "markInState": context.data["sceneMarkInState"],
+ "markOut": context.data["sceneMarkOut"] + 1,
+ "markOutState": context.data["sceneMarkOutState"]
+ }
+ expected_data = self.get_expected_data(context)
+ invalid = {}
+ for k in current_data.keys():
+ if current_data[k] != expected_data[k]:
+ invalid[k] = {
+ "current": current_data[k],
+ "expected": expected_data[k]
+ }
+
+ if invalid:
+ raise AssertionError(
+ "Marks does not match database:\n{}".format(
+ json.dumps(invalid, sort_keys=True, indent=4)
+ )
+ )
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py
index fead3393ae..84c03a9857 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_project_settings.py
@@ -13,8 +13,6 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin):
def process(self, context):
scene_data = {
- "frameStart": context.data.get("sceneFrameStart"),
- "frameEnd": context.data.get("sceneFrameEnd"),
"fps": context.data.get("sceneFps"),
"resolutionWidth": context.data.get("sceneWidth"),
"resolutionHeight": context.data.get("sceneHeight"),
diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py
index 7c1032fcad..cc664d8030 100644
--- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py
+++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py
@@ -13,7 +13,15 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
order = pyblish.api.ValidatorOrder
def process(self, context):
- workfile_context = context.data["workfile_context"]
+ workfile_context = context.data.get("workfile_context")
+ # If workfile context is missing than project is matching to
+ # `AVALON_PROJECT` value for 100%
+ if not workfile_context:
+ self.log.info(
+ "Workfile context (\"workfile_context\") is not filled."
+ )
+ return
+
workfile_project_name = workfile_context["project"]
env_project_name = os.environ["AVALON_PROJECT"]
if workfile_project_name == env_project_name:
diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py
index ce8f8ec2b6..f46c81bf7a 100644
--- a/openpype/lib/__init__.py
+++ b/openpype/lib/__init__.py
@@ -6,11 +6,21 @@ import sys
import os
import site
-# add Python version specific vendor folder
-site.addsitedir(
- os.path.join(
- os.getenv("OPENPYPE_ROOT", ""),
- "vendor", "python", "python_{}".format(sys.version[0])))
+# Add Python version specific vendor folder
+python_version_dir = os.path.join(
+ os.getenv("OPENPYPE_REPOS_ROOT", ""),
+ "openpype", "vendor", "python", "python_{}".format(sys.version[0])
+)
+# Prepend path in sys paths
+sys.path.insert(0, python_version_dir)
+site.addsitedir(python_version_dir)
+
+
+from .env_tools import (
+ env_value_to_bool,
+ get_paths_from_environ,
+ get_global_environments
+)
from .terminal import Terminal
from .execute import (
@@ -33,10 +43,11 @@ from .anatomy import (
from .config import get_datetime_data
-from .env_tools import (
- env_value_to_bool,
- get_paths_from_environ,
- get_global_environments
+from .vendor_bin_utils import (
+ get_vendor_bin_path,
+ get_oiio_tools_path,
+ get_ffmpeg_tool_path,
+ ffprobe_streams
)
from .python_module_tools import (
@@ -116,11 +127,6 @@ from .path_tools import (
get_last_version_from_path
)
-from .ffmpeg_utils import (
- get_ffmpeg_tool_path,
- ffprobe_streams
-)
-
from .editorial import (
is_overlapping_otio_ranges,
otio_range_to_frame_range,
@@ -143,6 +149,11 @@ __all__ = [
"get_paths_from_environ",
"get_global_environments",
+ "get_vendor_bin_path",
+ "get_oiio_tools_path",
+ "get_ffmpeg_tool_path",
+ "ffprobe_streams",
+
"modules_from_path",
"recursive_bases_from_class",
"classes_from_module",
@@ -199,9 +210,6 @@ __all__ = [
"get_version_from_path",
"get_last_version_from_path",
- "ffprobe_streams",
- "get_ffmpeg_tool_path",
-
"terminal",
"merge_dict",
diff --git a/openpype/lib/env_tools.py b/openpype/lib/env_tools.py
index 025c13a322..ede14e00b2 100644
--- a/openpype/lib/env_tools.py
+++ b/openpype/lib/env_tools.py
@@ -1,5 +1,4 @@
import os
-from openpype.settings import get_environments
def env_value_to_bool(env_key=None, value=None, default=False):
@@ -89,6 +88,7 @@ def get_global_environments(env=None):
"""
import acre
from openpype.modules import ModulesManager
+ from openpype.settings import get_environments
if env is None:
env = {}
diff --git a/openpype/lib/import_utils.py b/openpype/lib/import_utils.py
index 5fca0ae5f9..4e72618803 100644
--- a/openpype/lib/import_utils.py
+++ b/openpype/lib/import_utils.py
@@ -8,7 +8,7 @@ log = Logger().get_logger(__name__)
def discover_host_vendor_module(module_name):
host = os.environ["AVALON_APP"]
- pype_root = os.environ["OPENPYPE_ROOT"]
+ pype_root = os.environ["OPENPYPE_REPOS_ROOT"]
main_module = module_name.split(".")[0]
module_path = os.path.join(
pype_root, "hosts", host, "vendor", main_module)
diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py
index 5c52088493..9a2d30d1a7 100644
--- a/openpype/lib/plugin_tools.py
+++ b/openpype/lib/plugin_tools.py
@@ -9,6 +9,7 @@ import tempfile
from .execute import run_subprocess
from .profiles_filtering import filter_profiles
+from .vendor_bin_utils import get_oiio_tools_path
from openpype.settings import get_project_settings
@@ -235,7 +236,7 @@ def oiio_supported():
Returns:
(bool)
"""
- oiio_path = os.getenv("OPENPYPE_OIIO_PATH", "")
+ oiio_path = get_oiio_tools_path()
if not oiio_path or not os.path.exists(oiio_path):
log.debug("OIIOTool is not configured or not present at {}".
format(oiio_path))
@@ -269,7 +270,7 @@ def decompress(target_dir, file_url,
(int(input_frame_end) > int(input_frame_start))
oiio_cmd = []
- oiio_cmd.append(os.getenv("OPENPYPE_OIIO_PATH"))
+ oiio_cmd.append(get_oiio_tools_path())
oiio_cmd.append("--compression none")
@@ -328,7 +329,7 @@ def should_decompress(file_url):
"""
if oiio_supported():
output = run_subprocess([
- os.getenv("OPENPYPE_OIIO_PATH"),
+ get_oiio_tools_path(),
"--info", "-v", file_url])
return "compression: \"dwaa\"" in output or \
"compression: \"dwab\"" in output
diff --git a/openpype/lib/pype_info.py b/openpype/lib/pype_info.py
index 93d669eb0d..c56782be9e 100644
--- a/openpype/lib/pype_info.py
+++ b/openpype/lib/pype_info.py
@@ -28,7 +28,7 @@ def get_pype_info():
"version": get_pype_version(),
"version_type": version_type,
"executable": executable_args[-1],
- "pype_root": os.environ["OPENPYPE_ROOT"],
+ "pype_root": os.environ["OPENPYPE_REPOS_ROOT"],
"mongo_url": os.environ["OPENPYPE_MONGO"]
}
diff --git a/openpype/lib/ffmpeg_utils.py b/openpype/lib/vendor_bin_utils.py
similarity index 50%
rename from openpype/lib/ffmpeg_utils.py
rename to openpype/lib/vendor_bin_utils.py
index ba9f24c5d7..3b923cb608 100644
--- a/openpype/lib/ffmpeg_utils.py
+++ b/openpype/lib/vendor_bin_utils.py
@@ -1,33 +1,60 @@
import os
import logging
import json
+import platform
import subprocess
-from . import get_paths_from_environ
-
log = logging.getLogger("FFmpeg utils")
-def get_ffmpeg_tool_path(tool="ffmpeg"):
- """Find path to ffmpeg tool in FFMPEG_PATH paths.
+def get_vendor_bin_path(bin_app):
+ """Path to OpenPype vendorized binaries.
- Function looks for tool in paths set in FFMPEG_PATH environment. If tool
- exists then returns it's full path.
+ Vendorized executables are expected in specific hierarchy inside build or
+ in code source.
+
+ "{OPENPYPE_ROOT}/vendor/bin/{name of vendorized app}/{platform}"
Args:
- tool (string): tool name
+ bin_app (str): Name of vendorized application.
Returns:
- (str): tool name itself when tool path was not found. (FFmpeg path
- may be set in PATH environment variable)
+ str: Path to vendorized binaries folder.
"""
- dir_paths = get_paths_from_environ("FFMPEG_PATH")
- for dir_path in dir_paths:
- for file_name in os.listdir(dir_path):
- base, _ext = os.path.splitext(file_name)
- if base.lower() == tool.lower():
- return os.path.join(dir_path, tool)
- return tool
+ return os.path.join(
+ os.environ["OPENPYPE_ROOT"],
+ "vendor",
+ "bin",
+ bin_app,
+ platform.system().lower()
+ )
+
+
+def get_oiio_tools_path(tool="oiiotool"):
+ """Path to vendorized OpenImageIO tool executables.
+
+ Args:
+ tool (string): Tool name (oiiotool, maketx, ...).
+ Default is "oiiotool".
+ """
+ oiio_dir = get_vendor_bin_path("oiio")
+ return os.path.join(oiio_dir, tool)
+
+
+def get_ffmpeg_tool_path(tool="ffmpeg"):
+ """Path to vendorized FFmpeg executable.
+
+ Args:
+ tool (string): Tool name (ffmpeg, ffprobe, ...).
+ Default is "ffmpeg".
+
+ Returns:
+ str: Full path to ffmpeg executable.
+ """
+ ffmpeg_dir = get_vendor_bin_path("ffmpeg")
+ if platform.system().lower() == "windows":
+ ffmpeg_dir = os.path.join(ffmpeg_dir, "bin")
+ return os.path.join(ffmpeg_dir, tool)
def ffprobe_streams(path_to_file, logger=None):
diff --git a/openpype/modules/__init__.py b/openpype/modules/__init__.py
index 4b120647e1..d7c6d99fe6 100644
--- a/openpype/modules/__init__.py
+++ b/openpype/modules/__init__.py
@@ -41,7 +41,7 @@ from .log_viewer import LogViewModule
from .muster import MusterModule
from .deadline import DeadlineModule
from .standalonepublish_action import StandAlonePublishAction
-from .sync_server import SyncServer
+from .sync_server import SyncServerModule
__all__ = (
@@ -82,5 +82,5 @@ __all__ = (
"DeadlineModule",
"StandAlonePublishAction",
- "SyncServer"
+ "SyncServerModule"
)
diff --git a/openpype/modules/clockify/clockify_api.py b/openpype/modules/clockify/clockify_api.py
index 29de5de0c9..3f0a9799b4 100644
--- a/openpype/modules/clockify/clockify_api.py
+++ b/openpype/modules/clockify/clockify_api.py
@@ -34,7 +34,12 @@ class ClockifyAPI:
self.request_counter = 0
self.request_time = time.time()
- self.secure_registry = OpenPypeSecureRegistry("clockify")
+ self._secure_registry = None
+
+ def secure_registry(self):
+ if self._secure_registry is None:
+ self._secure_registry = OpenPypeSecureRegistry("clockify")
+ return self._secure_registry
@property
def headers(self):
diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py
new file mode 100644
index 0000000000..8248bf532e
--- /dev/null
+++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py
@@ -0,0 +1,365 @@
+import json
+
+from openpype.api import ProjectSettings
+
+from openpype.modules.ftrack.lib import ServerAction
+from openpype.modules.ftrack.lib.avalon_sync import (
+ get_pype_attr,
+ CUST_ATTR_AUTO_SYNC
+)
+
+
+class PrepareProjectServer(ServerAction):
+ """Prepare project attributes in Anatomy."""
+
+ identifier = "prepare.project.server"
+ label = "OpenPype Admin"
+ variant = "- Prepare Project (Server)"
+ description = "Set basic attributes on the project"
+
+ settings_key = "prepare_project"
+
+ role_list = ["Pypeclub", "Administrator", "Project Manager"]
+
+ # Key to store info about trigerring create folder structure
+ item_splitter = {"type": "label", "value": "---"}
+
+ def discover(self, session, entities, event):
+ """Show only on project."""
+ if (
+ len(entities) != 1
+ or entities[0].entity_type.lower() != "project"
+ ):
+ return False
+
+ return self.valid_roles(session, entities, event)
+
+ def interface(self, session, entities, event):
+ if event['data'].get('values', {}):
+ return
+
+ # Inform user that this may take a while
+ self.show_message(event, "Preparing data... Please wait", True)
+ self.log.debug("Preparing data which will be shown")
+
+ self.log.debug("Loading custom attributes")
+
+ project_entity = entities[0]
+ project_name = project_entity["full_name"]
+
+ try:
+ project_settings = ProjectSettings(project_name)
+ except ValueError:
+ return {
+ "message": "Project is not synchronized yet",
+ "success": False
+ }
+
+ project_anatom_settings = project_settings["project_anatomy"]
+ root_items = self.prepare_root_items(project_anatom_settings)
+
+ ca_items, multiselect_enumerators = (
+ self.prepare_custom_attribute_items(project_anatom_settings)
+ )
+
+ self.log.debug("Heavy items are ready. Preparing last items group.")
+
+ title = "Prepare Project"
+ items = []
+
+ # Add root items
+ items.extend(root_items)
+
+ items.append(self.item_splitter)
+ items.append({
+ "type": "label",
+ "value": "
Set basic Attributes:
"
+ })
+
+ items.extend(ca_items)
+
+ # This item will be last (before enumerators)
+ # - sets value of auto synchronization
+ auto_sync_name = "avalon_auto_sync"
+ auto_sync_value = project_entity["custom_attributes"].get(
+ CUST_ATTR_AUTO_SYNC, False
+ )
+ auto_sync_item = {
+ "name": auto_sync_name,
+ "type": "boolean",
+ "value": auto_sync_value,
+ "label": "AutoSync to Avalon"
+ }
+ # Add autosync attribute
+ items.append(auto_sync_item)
+
+ # Add enumerator items at the end
+ for item in multiselect_enumerators:
+ items.append(item)
+
+ return {
+ "items": items,
+ "title": title
+ }
+
+ def prepare_root_items(self, project_anatom_settings):
+ self.log.debug("Root items preparation begins.")
+
+ root_items = []
+ root_items.append({
+ "type": "label",
+ "value": "Check your Project root settings
"
+ })
+ root_items.append({
+ "type": "label",
+ "value": (
+ "NOTE: Roots are crutial for path filling"
+ " (and creating folder structure).
"
+ )
+ })
+ root_items.append({
+ "type": "label",
+ "value": (
+ "WARNING: Do not change roots on running project,"
+ " that will cause workflow issues.
"
+ )
+ })
+
+ empty_text = "Enter root path here..."
+
+ roots_entity = project_anatom_settings["roots"]
+ for root_name, root_entity in roots_entity.items():
+ root_items.append(self.item_splitter)
+ root_items.append({
+ "type": "label",
+ "value": "Root: \"{}\"".format(root_name)
+ })
+ for platform_name, value_entity in root_entity.items():
+ root_items.append({
+ "label": platform_name,
+ "name": "__root__{}__{}".format(root_name, platform_name),
+ "type": "text",
+ "value": value_entity.value,
+ "empty_text": empty_text
+ })
+
+ root_items.append({
+ "type": "hidden",
+ "name": "__rootnames__",
+ "value": json.dumps(list(roots_entity.keys()))
+ })
+
+ self.log.debug("Root items preparation ended.")
+ return root_items
+
+ def _attributes_to_set(self, project_anatom_settings):
+ attributes_to_set = {}
+
+ attribute_values_by_key = {}
+ for key, entity in project_anatom_settings["attributes"].items():
+ attribute_values_by_key[key] = entity.value
+
+ cust_attrs, hier_cust_attrs = get_pype_attr(self.session, True)
+
+ for attr in hier_cust_attrs:
+ key = attr["key"]
+ if key.startswith("avalon_"):
+ continue
+ attributes_to_set[key] = {
+ "label": attr["label"],
+ "object": attr,
+ "default": attribute_values_by_key.get(key)
+ }
+
+ for attr in cust_attrs:
+ if attr["entity_type"].lower() != "show":
+ continue
+ key = attr["key"]
+ if key.startswith("avalon_"):
+ continue
+ attributes_to_set[key] = {
+ "label": attr["label"],
+ "object": attr,
+ "default": attribute_values_by_key.get(key)
+ }
+
+ # Sort by label
+ attributes_to_set = dict(sorted(
+ attributes_to_set.items(),
+ key=lambda x: x[1]["label"]
+ ))
+ return attributes_to_set
+
+ def prepare_custom_attribute_items(self, project_anatom_settings):
+ items = []
+ multiselect_enumerators = []
+ attributes_to_set = self._attributes_to_set(project_anatom_settings)
+
+ self.log.debug("Preparing interface for keys: \"{}\"".format(
+ str([key for key in attributes_to_set])
+ ))
+
+ for key, in_data in attributes_to_set.items():
+ attr = in_data["object"]
+
+ # initial item definition
+ item = {
+ "name": key,
+ "label": in_data["label"]
+ }
+
+ # cust attr type - may have different visualization
+ type_name = attr["type"]["name"].lower()
+ easy_types = ["text", "boolean", "date", "number"]
+
+ easy_type = False
+ if type_name in easy_types:
+ easy_type = True
+
+ elif type_name == "enumerator":
+
+ attr_config = json.loads(attr["config"])
+ attr_config_data = json.loads(attr_config["data"])
+
+ if attr_config["multiSelect"] is True:
+ multiselect_enumerators.append(self.item_splitter)
+ multiselect_enumerators.append({
+ "type": "label",
+ "value": in_data["label"]
+ })
+
+ default = in_data["default"]
+ names = []
+ for option in sorted(
+ attr_config_data, key=lambda x: x["menu"]
+ ):
+ name = option["value"]
+ new_name = "__{}__{}".format(key, name)
+ names.append(new_name)
+ item = {
+ "name": new_name,
+ "type": "boolean",
+ "label": "- {}".format(option["menu"])
+ }
+ if default:
+ if isinstance(default, (list, tuple)):
+ if name in default:
+ item["value"] = True
+ else:
+ if name == default:
+ item["value"] = True
+
+ multiselect_enumerators.append(item)
+
+ multiselect_enumerators.append({
+ "type": "hidden",
+ "name": "__hidden__{}".format(key),
+ "value": json.dumps(names)
+ })
+ else:
+ easy_type = True
+ item["data"] = attr_config_data
+
+ else:
+ self.log.warning((
+ "Custom attribute \"{}\" has type \"{}\"."
+ " I don't know how to handle"
+ ).format(key, type_name))
+ items.append({
+ "type": "label",
+ "value": (
+ "!!! Can't handle Custom attritubte type \"{}\""
+ " (key: \"{}\")"
+ ).format(type_name, key)
+ })
+
+ if easy_type:
+ item["type"] = type_name
+
+ # default value in interface
+ default = in_data["default"]
+ if default is not None:
+ item["value"] = default
+
+ items.append(item)
+
+ return items, multiselect_enumerators
+
+ def launch(self, session, entities, event):
+ if not event['data'].get('values', {}):
+ return
+
+ in_data = event['data']['values']
+
+ root_values = {}
+ root_key = "__root__"
+ for key in tuple(in_data.keys()):
+ if key.startswith(root_key):
+ _key = key[len(root_key):]
+ root_values[_key] = in_data.pop(key)
+
+ root_names = in_data.pop("__rootnames__", None)
+ root_data = {}
+ for root_name in json.loads(root_names):
+ root_data[root_name] = {}
+ for key, value in tuple(root_values.items()):
+ prefix = "{}__".format(root_name)
+ if not key.startswith(prefix):
+ continue
+
+ _key = key[len(prefix):]
+ root_data[root_name][_key] = value
+
+ # Find hidden items for multiselect enumerators
+ keys_to_process = []
+ for key in in_data:
+ if key.startswith("__hidden__"):
+ keys_to_process.append(key)
+
+ self.log.debug("Preparing data for Multiselect Enumerators")
+ enumerators = {}
+ for key in keys_to_process:
+ new_key = key.replace("__hidden__", "")
+ enumerator_items = in_data.pop(key)
+ enumerators[new_key] = json.loads(enumerator_items)
+
+ # find values set for multiselect enumerator
+ for key, enumerator_items in enumerators.items():
+ in_data[key] = []
+
+ name = "__{}__".format(key)
+
+ for item in enumerator_items:
+ value = in_data.pop(item)
+ if value is True:
+ new_key = item.replace(name, "")
+ in_data[key].append(new_key)
+
+ self.log.debug("Setting Custom Attribute values")
+
+ project_name = entities[0]["full_name"]
+ project_settings = ProjectSettings(project_name)
+ project_anatomy_settings = project_settings["project_anatomy"]
+ project_anatomy_settings["roots"] = root_data
+
+ custom_attribute_values = {}
+ attributes_entity = project_anatomy_settings["attributes"]
+ for key, value in in_data.items():
+ if key not in attributes_entity:
+ custom_attribute_values[key] = value
+ else:
+ attributes_entity[key] = value
+
+ project_settings.save()
+
+ entity = entities[0]
+ for key, value in custom_attribute_values.items():
+ entity["custom_attributes"][key] = value
+ self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value))
+
+ return True
+
+
+def register(session):
+ '''Register plugin. Called when used as an plugin.'''
+ PrepareProjectServer(session).register()
diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py
index 7f674310fc..bd25f995fe 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py
+++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py
@@ -1,31 +1,34 @@
-import os
import json
-from openpype.modules.ftrack.lib import BaseAction, statics_icon
-from openpype.api import config, Anatomy
-from openpype.modules.ftrack.lib.avalon_sync import get_pype_attr
+from openpype.api import ProjectSettings
+
+from openpype.modules.ftrack.lib import (
+ BaseAction,
+ statics_icon
+)
+from openpype.modules.ftrack.lib.avalon_sync import (
+ get_pype_attr,
+ CUST_ATTR_AUTO_SYNC
+)
-class PrepareProject(BaseAction):
- '''Edit meta data action.'''
+class PrepareProjectLocal(BaseAction):
+ """Prepare project attributes in Anatomy."""
- #: Action identifier.
- identifier = 'prepare.project'
- #: Action label.
- label = 'Prepare Project'
- #: Action description.
- description = 'Set basic attributes on the project'
- #: roles that are allowed to register this action
+ identifier = "prepare.project.local"
+ label = "Prepare Project"
+ description = "Set basic attributes on the project"
icon = statics_icon("ftrack", "action_icons", "PrepareProject.svg")
+ role_list = ["Pypeclub", "Administrator", "Project Manager"]
+
settings_key = "prepare_project"
# Key to store info about trigerring create folder structure
- create_project_structure_key = "create_folder_structure"
- item_splitter = {'type': 'label', 'value': '---'}
+ item_splitter = {"type": "label", "value": "---"}
def discover(self, session, entities, event):
- ''' Validation '''
+ """Show only on project."""
if (
len(entities) != 1
or entities[0].entity_type.lower() != "project"
@@ -44,27 +47,22 @@ class PrepareProject(BaseAction):
self.log.debug("Loading custom attributes")
- project_name = entities[0]["full_name"]
+ project_entity = entities[0]
+ project_name = project_entity["full_name"]
- project_defaults = (
- config.get_presets(project_name)
- .get("ftrack", {})
- .get("project_defaults", {})
- )
-
- anatomy = Anatomy(project_name)
- if not anatomy.roots:
+ try:
+ project_settings = ProjectSettings(project_name)
+ except ValueError:
return {
- "success": False,
- "message": (
- "Have issues with loading Roots for project \"{}\"."
- ).format(anatomy.project_name)
+ "message": "Project is not synchronized yet",
+ "success": False
}
- root_items = self.prepare_root_items(anatomy)
+ project_anatom_settings = project_settings["project_anatomy"]
+ root_items = self.prepare_root_items(project_anatom_settings)
ca_items, multiselect_enumerators = (
- self.prepare_custom_attribute_items(project_defaults)
+ self.prepare_custom_attribute_items(project_anatom_settings)
)
self.log.debug("Heavy items are ready. Preparing last items group.")
@@ -74,19 +72,6 @@ class PrepareProject(BaseAction):
# Add root items
items.extend(root_items)
- items.append(self.item_splitter)
-
- # Ask if want to trigger Action Create Folder Structure
- items.append({
- "type": "label",
- "value": "Want to create basic Folder Structure?
"
- })
- items.append({
- "name": self.create_project_structure_key,
- "type": "boolean",
- "value": False,
- "label": "Check if Yes"
- })
items.append(self.item_splitter)
items.append({
@@ -99,10 +84,13 @@ class PrepareProject(BaseAction):
# This item will be last (before enumerators)
# - sets value of auto synchronization
auto_sync_name = "avalon_auto_sync"
+ auto_sync_value = project_entity["custom_attributes"].get(
+ CUST_ATTR_AUTO_SYNC, False
+ )
auto_sync_item = {
"name": auto_sync_name,
"type": "boolean",
- "value": project_defaults.get(auto_sync_name, False),
+ "value": auto_sync_value,
"label": "AutoSync to Avalon"
}
# Add autosync attribute
@@ -117,13 +105,10 @@ class PrepareProject(BaseAction):
"title": title
}
- def prepare_root_items(self, anatomy):
- root_items = []
+ def prepare_root_items(self, project_anatom_settings):
self.log.debug("Root items preparation begins.")
- root_names = anatomy.root_names()
- roots = anatomy.roots
-
+ root_items = []
root_items.append({
"type": "label",
"value": "Check your Project root settings
"
@@ -143,85 +128,40 @@ class PrepareProject(BaseAction):
)
})
- default_roots = anatomy.roots
- while isinstance(default_roots, dict):
- key = tuple(default_roots.keys())[0]
- default_roots = default_roots[key]
-
empty_text = "Enter root path here..."
- # Root names is None when anatomy templates contain "{root}"
- all_platforms = ["windows", "linux", "darwin"]
- if root_names is None:
- root_items.append(self.item_splitter)
- # find first possible key
- for platform in all_platforms:
- value = default_roots.raw_data.get(platform) or ""
- root_items.append({
- "label": platform,
- "name": "__root__{}".format(platform),
- "type": "text",
- "value": value,
- "empty_text": empty_text
- })
- return root_items
-
- root_name_data = {}
- missing_roots = []
- for root_name in root_names:
- root_name_data[root_name] = {}
- if not isinstance(roots, dict):
- missing_roots.append(root_name)
- continue
-
- root_item = roots.get(root_name)
- if not root_item:
- missing_roots.append(root_name)
- continue
-
- for platform in all_platforms:
- root_name_data[root_name][platform] = (
- root_item.raw_data.get(platform) or ""
- )
-
- if missing_roots:
- default_values = {}
- for platform in all_platforms:
- default_values[platform] = (
- default_roots.raw_data.get(platform) or ""
- )
-
- for root_name in missing_roots:
- root_name_data[root_name] = default_values
-
- root_names = list(root_name_data.keys())
- root_items.append({
- "type": "hidden",
- "name": "__rootnames__",
- "value": json.dumps(root_names)
- })
-
- for root_name, values in root_name_data.items():
+ roots_entity = project_anatom_settings["roots"]
+ for root_name, root_entity in roots_entity.items():
root_items.append(self.item_splitter)
root_items.append({
"type": "label",
"value": "Root: \"{}\"".format(root_name)
})
- for platform, value in values.items():
+ for platform_name, value_entity in root_entity.items():
root_items.append({
- "label": platform,
- "name": "__root__{}{}".format(root_name, platform),
+ "label": platform_name,
+ "name": "__root__{}__{}".format(root_name, platform_name),
"type": "text",
- "value": value,
+ "value": value_entity.value,
"empty_text": empty_text
})
+ root_items.append({
+ "type": "hidden",
+ "name": "__rootnames__",
+ "value": json.dumps(list(roots_entity.keys()))
+ })
+
self.log.debug("Root items preparation ended.")
return root_items
- def _attributes_to_set(self, project_defaults):
+ def _attributes_to_set(self, project_anatom_settings):
attributes_to_set = {}
+ attribute_values_by_key = {}
+ for key, entity in project_anatom_settings["attributes"].items():
+ attribute_values_by_key[key] = entity.value
+
cust_attrs, hier_cust_attrs = get_pype_attr(self.session, True)
for attr in hier_cust_attrs:
@@ -231,7 +171,7 @@ class PrepareProject(BaseAction):
attributes_to_set[key] = {
"label": attr["label"],
"object": attr,
- "default": project_defaults.get(key)
+ "default": attribute_values_by_key.get(key)
}
for attr in cust_attrs:
@@ -243,7 +183,7 @@ class PrepareProject(BaseAction):
attributes_to_set[key] = {
"label": attr["label"],
"object": attr,
- "default": project_defaults.get(key)
+ "default": attribute_values_by_key.get(key)
}
# Sort by label
@@ -253,10 +193,10 @@ class PrepareProject(BaseAction):
))
return attributes_to_set
- def prepare_custom_attribute_items(self, project_defaults):
+ def prepare_custom_attribute_items(self, project_anatom_settings):
items = []
multiselect_enumerators = []
- attributes_to_set = self._attributes_to_set(project_defaults)
+ attributes_to_set = self._attributes_to_set(project_anatom_settings)
self.log.debug("Preparing interface for keys: \"{}\"".format(
str([key for key in attributes_to_set])
@@ -363,24 +303,15 @@ class PrepareProject(BaseAction):
root_names = in_data.pop("__rootnames__", None)
root_data = {}
- if root_names:
- for root_name in json.loads(root_names):
- root_data[root_name] = {}
- for key, value in tuple(root_values.items()):
- if key.startswith(root_name):
- _key = key[len(root_name):]
- root_data[root_name][_key] = value
+ for root_name in json.loads(root_names):
+ root_data[root_name] = {}
+ for key, value in tuple(root_values.items()):
+ prefix = "{}__".format(root_name)
+ if not key.startswith(prefix):
+ continue
- else:
- for key, value in root_values.items():
- root_data[key] = value
-
- # TODO implement creating of anatomy for new projects
- # project_name = entities[0]["full_name"]
- # anatomy = Anatomy(project_name)
-
- # pop out info about creating project structure
- create_proj_struct = in_data.pop(self.create_project_structure_key)
+ _key = key[len(prefix):]
+ root_data[root_name][_key] = value
# Find hidden items for multiselect enumerators
keys_to_process = []
@@ -407,54 +338,31 @@ class PrepareProject(BaseAction):
new_key = item.replace(name, "")
in_data[key].append(new_key)
- self.log.debug("Setting Custom Attribute values:")
- entity = entities[0]
+ self.log.debug("Setting Custom Attribute values")
+
+ project_name = entities[0]["full_name"]
+ project_settings = ProjectSettings(project_name)
+ project_anatomy_settings = project_settings["project_anatomy"]
+ project_anatomy_settings["roots"] = root_data
+
+ custom_attribute_values = {}
+ attributes_entity = project_anatomy_settings["attributes"]
for key, value in in_data.items():
+ if key not in attributes_entity:
+ custom_attribute_values[key] = value
+ else:
+ attributes_entity[key] = value
+
+ project_settings.save()
+
+ entity = entities[0]
+ for key, value in custom_attribute_values.items():
entity["custom_attributes"][key] = value
self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value))
- session.commit()
-
- # Create project structure
- self.create_project_specific_config(entities[0]["full_name"], in_data)
-
- # Trigger Create Project Structure action
- if create_proj_struct is True:
- self.trigger_action("create.project.structure", event)
-
return True
- def create_project_specific_config(self, project_name, json_data):
- self.log.debug("*** Creating project specifig configs ***")
- project_specific_path = project_overrides_dir_path(project_name)
- if not os.path.exists(project_specific_path):
- os.makedirs(project_specific_path)
- self.log.debug((
- "Project specific config folder for project \"{}\" created."
- ).format(project_name))
-
- # Presets ####################################
- self.log.debug("--- Processing Presets Begins: ---")
-
- project_defaults_dir = os.path.normpath(os.path.join(
- project_specific_path, "presets", "ftrack"
- ))
- project_defaults_path = os.path.normpath(os.path.join(
- project_defaults_dir, "project_defaults.json"
- ))
- # Create folder if not exist
- if not os.path.exists(project_defaults_dir):
- self.log.debug("Creating Ftrack Presets folder: \"{}\"".format(
- project_defaults_dir
- ))
- os.makedirs(project_defaults_dir)
-
- with open(project_defaults_path, 'w') as file_stream:
- json.dump(json_data, file_stream, indent=4)
-
- self.log.debug("*** Creating project specifig configs Finished ***")
-
def register(session):
'''Register plugin. Called when used as an plugin.'''
- PrepareProject(session).register()
+ PrepareProjectLocal(session).register()
diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py
index cd383cbdc6..d242268048 100644
--- a/openpype/modules/ftrack/ftrack_module.py
+++ b/openpype/modules/ftrack/ftrack_module.py
@@ -42,7 +42,17 @@ class FtrackModule(
ftrack_settings = settings[self.name]
self.enabled = ftrack_settings["enabled"]
- self.ftrack_url = ftrack_settings["ftrack_server"].strip("/ ")
+ # Add http schema
+ ftrack_url = ftrack_settings["ftrack_server"].strip("/ ")
+ if ftrack_url:
+ if "http" not in ftrack_url:
+ ftrack_url = "https://" + ftrack_url
+
+ # Check if "ftrack.app" is part os url
+ if "ftrackapp.com" not in ftrack_url:
+ ftrack_url = ftrack_url + ".ftrackapp.com"
+
+ self.ftrack_url = ftrack_url
current_dir = os.path.dirname(os.path.abspath(__file__))
server_event_handlers_paths = [
diff --git a/openpype/modules/ftrack/lib/credentials.py b/openpype/modules/ftrack/lib/credentials.py
index 2d719347e7..4e29e66382 100644
--- a/openpype/modules/ftrack/lib/credentials.py
+++ b/openpype/modules/ftrack/lib/credentials.py
@@ -15,7 +15,10 @@ API_KEY_KEY = "api_key"
def get_ftrack_hostname(ftrack_server=None):
if not ftrack_server:
- ftrack_server = os.environ["FTRACK_SERVER"]
+ ftrack_server = os.environ.get("FTRACK_SERVER")
+
+ if not ftrack_server:
+ return None
if "//" not in ftrack_server:
ftrack_server = "//" + ftrack_server
@@ -29,17 +32,24 @@ def _get_ftrack_secure_key(hostname, key):
def get_credentials(ftrack_server=None):
+ output = {
+ USERNAME_KEY: None,
+ API_KEY_KEY: None
+ }
hostname = get_ftrack_hostname(ftrack_server)
+ if not hostname:
+ return output
+
username_name = _get_ftrack_secure_key(hostname, USERNAME_KEY)
api_key_name = _get_ftrack_secure_key(hostname, API_KEY_KEY)
username_registry = OpenPypeSecureRegistry(username_name)
api_key_registry = OpenPypeSecureRegistry(api_key_name)
- return {
- USERNAME_KEY: username_registry.get_item(USERNAME_KEY, None),
- API_KEY_KEY: api_key_registry.get_item(API_KEY_KEY, None)
- }
+ output[USERNAME_KEY] = username_registry.get_item(USERNAME_KEY, None)
+ output[API_KEY_KEY] = api_key_registry.get_item(API_KEY_KEY, None)
+
+ return output
def save_credentials(username, api_key, ftrack_server=None):
@@ -77,9 +87,9 @@ def clear_credentials(ftrack_server=None):
def check_credentials(username, api_key, ftrack_server=None):
if not ftrack_server:
- ftrack_server = os.environ["FTRACK_SERVER"]
+ ftrack_server = os.environ.get("FTRACK_SERVER")
- if not username or not api_key:
+ if not ftrack_server or not username or not api_key:
return False
try:
diff --git a/openpype/modules/ftrack/tray/ftrack_tray.py b/openpype/modules/ftrack/tray/ftrack_tray.py
index ee27d8b730..34e4646767 100644
--- a/openpype/modules/ftrack/tray/ftrack_tray.py
+++ b/openpype/modules/ftrack/tray/ftrack_tray.py
@@ -289,12 +289,6 @@ class FtrackTrayWrapper:
parent_menu.addMenu(tray_menu)
- def tray_start(self):
- self.validate()
-
- def tray_exit(self):
- self.stop_action_server()
-
# Definition of visibility of each menu actions
def set_menu_visibility(self):
self.tray_server_menu.menuAction().setVisible(self.bool_logged)
diff --git a/openpype/modules/ftrack/tray/login_dialog.py b/openpype/modules/ftrack/tray/login_dialog.py
index ce91c6d012..a6360a7380 100644
--- a/openpype/modules/ftrack/tray/login_dialog.py
+++ b/openpype/modules/ftrack/tray/login_dialog.py
@@ -134,11 +134,11 @@ class CredentialsDialog(QtWidgets.QDialog):
def fill_ftrack_url(self):
url = os.getenv("FTRACK_SERVER")
- if url == self.ftsite_input.text():
+ checked_url = self.check_url(url)
+ if checked_url == self.ftsite_input.text():
return
- checked_url = self.check_url(url)
- self.ftsite_input.setText(checked_url or "")
+ self.ftsite_input.setText(checked_url or "< Not set >")
enabled = bool(checked_url)
@@ -147,7 +147,15 @@ class CredentialsDialog(QtWidgets.QDialog):
self.api_input.setEnabled(enabled)
self.user_input.setEnabled(enabled)
- self.ftsite_input.setEnabled(enabled)
+
+ if not url:
+ self.btn_advanced.hide()
+ self.btn_simple.hide()
+ self.btn_ftrack_login.hide()
+ self.btn_login.hide()
+ self.note_label.hide()
+ self.api_input.hide()
+ self.user_input.hide()
def set_advanced_mode(self, is_advanced):
self._in_advance_mode = is_advanced
@@ -293,10 +301,9 @@ class CredentialsDialog(QtWidgets.QDialog):
url = url.strip("/ ")
if not url:
- self.set_error((
- "You need to specify a valid server URL, "
- "for example https://server-name.ftrackapp.com"
- ))
+ self.set_error(
+ "Ftrack URL is not defined in settings!"
+ )
return
if "http" not in url:
diff --git a/openpype/modules/idle_manager/idle_module.py b/openpype/modules/idle_manager/idle_module.py
index c06dbed78c..5dd5160aa7 100644
--- a/openpype/modules/idle_manager/idle_module.py
+++ b/openpype/modules/idle_manager/idle_module.py
@@ -1,3 +1,4 @@
+import platform
import collections
from abc import ABCMeta, abstractmethod
@@ -40,7 +41,12 @@ class IdleManager(PypeModule, ITrayService):
name = "idle_manager"
def initialize(self, module_settings):
- self.enabled = True
+ enabled = True
+ # Ignore on MacOs
+ # - pynput need root permissions and enabled access for application
+ if platform.system().lower() == "darwin":
+ enabled = False
+ self.enabled = enabled
self.time_callbacks = collections.defaultdict(list)
self.idle_thread = None
diff --git a/openpype/modules/sync_server/__init__.py b/openpype/modules/sync_server/__init__.py
index 7123536fcf..a814f0db62 100644
--- a/openpype/modules/sync_server/__init__.py
+++ b/openpype/modules/sync_server/__init__.py
@@ -1,5 +1,5 @@
-from openpype.modules.sync_server.sync_server import SyncServer
+from openpype.modules.sync_server.sync_server_module import SyncServerModule
def tray_init(tray_widget, main_widget):
- return SyncServer()
+ return SyncServerModule()
diff --git a/openpype/modules/sync_server/providers/__init__.py b/openpype/modules/sync_server/providers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/openpype/modules/sync_server/providers/abstract_provider.py b/openpype/modules/sync_server/providers/abstract_provider.py
index 001d4c4d50..a60595ba93 100644
--- a/openpype/modules/sync_server/providers/abstract_provider.py
+++ b/openpype/modules/sync_server/providers/abstract_provider.py
@@ -1,16 +1,23 @@
-from abc import ABCMeta, abstractmethod
+import abc
+import six
+from openpype.api import Logger
+
+log = Logger().get_logger("SyncServer")
-class AbstractProvider(metaclass=ABCMeta):
+@six.add_metaclass(abc.ABCMeta)
+class AbstractProvider:
- def __init__(self, site_name, tree=None, presets=None):
+ def __init__(self, project_name, site_name, tree=None, presets=None):
self.presets = None
self.active = False
self.site_name = site_name
self.presets = presets
- @abstractmethod
+ super(AbstractProvider, self).__init__()
+
+ @abc.abstractmethod
def is_active(self):
"""
Returns True if provider is activated, eg. has working credentials.
@@ -18,36 +25,54 @@ class AbstractProvider(metaclass=ABCMeta):
(boolean)
"""
- @abstractmethod
- def upload_file(self, source_path, target_path, overwrite=True):
+ @abc.abstractmethod
+ def upload_file(self, source_path, path,
+ server, collection, file, representation, site,
+ overwrite=False):
"""
Copy file from 'source_path' to 'target_path' on provider.
Use 'overwrite' boolean to rewrite existing file on provider
Args:
- source_path (string): absolute path on local system
- target_path (string): absolute path on provider (GDrive etc.)
- overwrite (boolean): True if overwite existing
+ source_path (string):
+ path (string): absolute path with or without name of the file
+ overwrite (boolean): replace existing file
+
+ arguments for saving progress:
+ server (SyncServer): server instance to call update_db on
+ collection (str): name of collection
+ file (dict): info about uploaded file (matches structure from db)
+ representation (dict): complete repre containing 'file'
+ site (str): site name
Returns:
(string) file_id of created file, raises exception
"""
pass
- @abstractmethod
- def download_file(self, source_path, local_path, overwrite=True):
+ @abc.abstractmethod
+ def download_file(self, source_path, local_path,
+ server, collection, file, representation, site,
+ overwrite=False):
"""
Download file from provider into local system
Args:
source_path (string): absolute path on provider
- local_path (string): absolute path on local
- overwrite (bool): default set to True
+ local_path (string): absolute path with or without name of the file
+ overwrite (boolean): replace existing file
+
+ arguments for saving progress:
+ server (SyncServer): server instance to call update_db on
+ collection (str): name of collection
+ file (dict): info about uploaded file (matches structure from db)
+ representation (dict): complete repre containing 'file'
+ site (str): site name
Returns:
None
"""
pass
- @abstractmethod
+ @abc.abstractmethod
def delete_file(self, path):
"""
Deletes file from 'path'. Expects path to specific file.
@@ -60,7 +85,7 @@ class AbstractProvider(metaclass=ABCMeta):
"""
pass
- @abstractmethod
+ @abc.abstractmethod
def list_folder(self, folder_path):
"""
List all files and subfolders of particular path non-recursively.
@@ -72,7 +97,7 @@ class AbstractProvider(metaclass=ABCMeta):
"""
pass
- @abstractmethod
+ @abc.abstractmethod
def create_folder(self, folder_path):
"""
Create all nonexistent folders and subfolders in 'path'.
@@ -85,7 +110,7 @@ class AbstractProvider(metaclass=ABCMeta):
"""
pass
- @abstractmethod
+ @abc.abstractmethod
def get_tree(self):
"""
Creates folder structure for providers which do not provide
@@ -94,16 +119,50 @@ class AbstractProvider(metaclass=ABCMeta):
"""
pass
- @abstractmethod
- def resolve_path(self, path, root_config, anatomy=None):
+ @abc.abstractmethod
+ def get_roots_config(self, anatomy=None):
"""
- Replaces root placeholders with appropriate real value from
- 'root_configs' (from Settings or Local Settings) or Anatomy
- (mainly for 'studio' site)
+ Returns root values for path resolving
- Args:
- path(string): path with '{root[work]}/...'
- root_config(dict): from Settings or Local Settings
- anatomy (Anatomy): prepared anatomy object for project
+ Takes value from Anatomy which takes values from Settings
+ overridden by Local Settings
+
+ Returns:
+ (dict) - {"root": {"root": "/My Drive"}}
+ OR
+ {"root": {"root_ONE": "value", "root_TWO":"value}}
+ Format is importing for usage of python's format ** approach
"""
pass
+
+ def resolve_path(self, path, root_config=None, anatomy=None):
+ """
+ Replaces all root placeholders with proper values
+
+ Args:
+ path(string): root[work]/folder...
+ root_config (dict): {'work': "c:/..."...}
+ anatomy (Anatomy): object of Anatomy
+ Returns:
+ (string): proper url
+ """
+ if not root_config:
+ root_config = self.get_roots_config(anatomy)
+
+ if root_config and not root_config.get("root"):
+ root_config = {"root": root_config}
+
+ try:
+ if not root_config:
+ raise KeyError
+
+ path = path.format(**root_config)
+ except KeyError:
+ try:
+ path = anatomy.fill_root(path)
+ except KeyError:
+ msg = "Error in resolving local root from anatomy"
+ log.error(msg)
+ raise ValueError(msg)
+
+ return path
diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/sync_server/providers/gdrive.py
index 6c01bc4e6f..f1ea24f601 100644
--- a/openpype/modules/sync_server/providers/gdrive.py
+++ b/openpype/modules/sync_server/providers/gdrive.py
@@ -10,6 +10,7 @@ from openpype.api import get_system_settings
from ..utils import time_function
import time
+
SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/drive.readonly'] # for write|delete
@@ -45,9 +46,10 @@ class GDriveHandler(AbstractProvider):
MY_DRIVE_STR = 'My Drive' # name of root folder of regular Google drive
CHUNK_SIZE = 2097152 # must be divisible by 256!
- def __init__(self, site_name, tree=None, presets=None):
+ def __init__(self, project_name, site_name, tree=None, presets=None):
self.presets = None
self.active = False
+ self.project_name = project_name
self.site_name = site_name
self.presets = presets
@@ -65,137 +67,6 @@ class GDriveHandler(AbstractProvider):
self._tree = tree
self.active = True
- def _get_gd_service(self):
- """
- Authorize client with 'credentials.json', uses service account.
- Service account needs to have target folder shared with.
- Produces service that communicates with GDrive API.
-
- Returns:
- None
- """
- creds = service_account.Credentials.from_service_account_file(
- self.presets["credentials_url"],
- scopes=SCOPES)
- service = build('drive', 'v3',
- credentials=creds, cache_discovery=False)
- return service
-
- def _prepare_root_info(self):
- """
- Prepare info about roots and theirs folder ids from 'presets'.
- Configuration might be for single or multiroot projects.
- Regular My Drive and Shared drives are implemented, their root
- folder ids need to be queried in slightly different way.
-
- Returns:
- (dicts) of dicts where root folders are keys
- """
- roots = {}
- for path in self.get_roots_config().values():
- if self.MY_DRIVE_STR in path:
- roots[self.MY_DRIVE_STR] = self.service.files()\
- .get(fileId='root').execute()
- else:
- shared_drives = []
- page_token = None
-
- while True:
- response = self.service.drives().list(
- pageSize=100,
- pageToken=page_token).execute()
- shared_drives.extend(response.get('drives', []))
- page_token = response.get('nextPageToken', None)
- if page_token is None:
- break
-
- folders = path.split('/')
- if len(folders) < 2:
- raise ValueError("Wrong root folder definition {}".
- format(path))
-
- for shared_drive in shared_drives:
- if folders[1] in shared_drive["name"]:
- roots[shared_drive["name"]] = {
- "name": shared_drive["name"],
- "id": shared_drive["id"]}
- if self.MY_DRIVE_STR not in roots: # add My Drive always
- roots[self.MY_DRIVE_STR] = self.service.files() \
- .get(fileId='root').execute()
-
- return roots
-
- @time_function
- def _build_tree(self, folders):
- """
- Create in-memory structure resolving paths to folder id as
- recursive querying might be slower.
- Initialized in the time of class initialization.
- Maybe should be persisted
- Tree is structure of path to id:
- '/ROOT': {'id': '1234567'}
- '/ROOT/PROJECT_FOLDER': {'id':'222222'}
- '/ROOT/PROJECT_FOLDER/Assets': {'id': '3434545'}
- Args:
- folders (list): list of dictionaries with folder metadata
- Returns:
- (dictionary) path as a key, folder id as a value
- """
- log.debug("build_tree len {}".format(len(folders)))
- root_ids = []
- default_root_id = None
- tree = {}
- ending_by = {}
- for root_name, root in self.root.items(): # might be multiple roots
- if root["id"] not in root_ids:
- tree["/" + root_name] = {"id": root["id"]}
- ending_by[root["id"]] = "/" + root_name
- root_ids.append(root["id"])
-
- if self.MY_DRIVE_STR == root_name:
- default_root_id = root["id"]
-
- no_parents_yet = {}
- while folders:
- folder = folders.pop(0)
- parents = folder.get("parents", [])
- # weird cases, shared folders, etc, parent under root
- if not parents:
- parent = default_root_id
- else:
- parent = parents[0]
-
- if folder["id"] in root_ids: # do not process root
- continue
-
- if parent in ending_by:
- path_key = ending_by[parent] + "/" + folder["name"]
- ending_by[folder["id"]] = path_key
- tree[path_key] = {"id": folder["id"]}
- else:
- no_parents_yet.setdefault(parent, []).append((folder["id"],
- folder["name"]))
- loop_cnt = 0
- # break if looped more then X times - safety against infinite loop
- while no_parents_yet and loop_cnt < 20:
-
- keys = list(no_parents_yet.keys())
- for parent in keys:
- if parent in ending_by.keys():
- subfolders = no_parents_yet.pop(parent)
- for folder_id, folder_name in subfolders:
- path_key = ending_by[parent] + "/" + folder_name
- ending_by[folder_id] = path_key
- tree[path_key] = {"id": folder_id}
- loop_cnt += 1
-
- if len(no_parents_yet) > 0:
- log.debug("Some folders path are not resolved {}".
- format(no_parents_yet))
- log.debug("Remove deleted folders from trash.")
-
- return tree
-
def is_active(self):
"""
Returns True if provider is activated, eg. has working credentials.
@@ -204,6 +75,21 @@ class GDriveHandler(AbstractProvider):
"""
return self.active
+ def get_roots_config(self, anatomy=None):
+ """
+ Returns root values for path resolving
+
+ Use only Settings as GDrive cannot be modified by Local Settings
+
+ Returns:
+ (dict) - {"root": {"root": "/My Drive"}}
+ OR
+ {"root": {"root_ONE": "value", "root_TWO":"value}}
+ Format is importing for usage of python's format ** approach
+ """
+ # GDrive roots cannot be locally overridden
+ return self.presets['root']
+
def get_tree(self):
"""
Building of the folder tree could be potentially expensive,
@@ -217,26 +103,6 @@ class GDriveHandler(AbstractProvider):
self._tree = self._build_tree(self.list_folders())
return self._tree
- def get_roots_config(self):
- """
- Returns value from presets of roots. It calculates with multi
- roots. Config should be simple key value, or dictionary.
-
- Examples:
- "root": "/My Drive"
- OR
- "root": {"root_ONE": "value", "root_TWO":"value}
- Returns:
- (dict) - {"root": {"root": "/My Drive"}}
- OR
- {"root": {"root_ONE": "value", "root_TWO":"value}}
- Format is importing for usage of python's format ** approach
- """
- roots = self.presets["root"]
- if isinstance(roots, str):
- roots = {"root": roots}
- return roots
-
def create_folder(self, path):
"""
Create all nonexistent folders and subfolders in 'path'.
@@ -510,20 +376,6 @@ class GDriveHandler(AbstractProvider):
self.service.files().delete(fileId=file["id"],
supportsAllDrives=True).execute()
- def _get_folder_metadata(self, path):
- """
- Get info about folder with 'path'
- Args:
- path (string):
-
- Returns:
- (dictionary) with metadata or raises ValueError
- """
- try:
- return self.get_tree()[path]
- except Exception:
- raise ValueError("Uknown folder id {}".format(id))
-
def list_folder(self, folder_path):
"""
List all files and subfolders of particular path non-recursively.
@@ -678,15 +530,151 @@ class GDriveHandler(AbstractProvider):
return
return provider_presets
- def resolve_path(self, path, root_config, anatomy=None):
- if not root_config.get("root"):
- root_config = {"root": root_config}
+ def _get_gd_service(self):
+ """
+ Authorize client with 'credentials.json', uses service account.
+ Service account needs to have target folder shared with.
+ Produces service that communicates with GDrive API.
+ Returns:
+ None
+ """
+ creds = service_account.Credentials.from_service_account_file(
+ self.presets["credentials_url"],
+ scopes=SCOPES)
+ service = build('drive', 'v3',
+ credentials=creds, cache_discovery=False)
+ return service
+
+ def _prepare_root_info(self):
+ """
+ Prepare info about roots and theirs folder ids from 'presets'.
+ Configuration might be for single or multiroot projects.
+ Regular My Drive and Shared drives are implemented, their root
+ folder ids need to be queried in slightly different way.
+
+ Returns:
+ (dicts) of dicts where root folders are keys
+ """
+ roots = {}
+ config_roots = self.get_roots_config()
+ for path in config_roots.values():
+ if self.MY_DRIVE_STR in path:
+ roots[self.MY_DRIVE_STR] = self.service.files()\
+ .get(fileId='root').execute()
+ else:
+ shared_drives = []
+ page_token = None
+
+ while True:
+ response = self.service.drives().list(
+ pageSize=100,
+ pageToken=page_token).execute()
+ shared_drives.extend(response.get('drives', []))
+ page_token = response.get('nextPageToken', None)
+ if page_token is None:
+ break
+
+ folders = path.split('/')
+ if len(folders) < 2:
+ raise ValueError("Wrong root folder definition {}".
+ format(path))
+
+ for shared_drive in shared_drives:
+ if folders[1] in shared_drive["name"]:
+ roots[shared_drive["name"]] = {
+ "name": shared_drive["name"],
+ "id": shared_drive["id"]}
+ if self.MY_DRIVE_STR not in roots: # add My Drive always
+ roots[self.MY_DRIVE_STR] = self.service.files() \
+ .get(fileId='root').execute()
+
+ return roots
+
+ @time_function
+ def _build_tree(self, folders):
+ """
+ Create in-memory structure resolving paths to folder id as
+ recursive querying might be slower.
+ Initialized in the time of class initialization.
+ Maybe should be persisted
+ Tree is structure of path to id:
+ '/ROOT': {'id': '1234567'}
+ '/ROOT/PROJECT_FOLDER': {'id':'222222'}
+ '/ROOT/PROJECT_FOLDER/Assets': {'id': '3434545'}
+ Args:
+ folders (list): list of dictionaries with folder metadata
+ Returns:
+ (dictionary) path as a key, folder id as a value
+ """
+ log.debug("build_tree len {}".format(len(folders)))
+ root_ids = []
+ default_root_id = None
+ tree = {}
+ ending_by = {}
+ for root_name, root in self.root.items(): # might be multiple roots
+ if root["id"] not in root_ids:
+ tree["/" + root_name] = {"id": root["id"]}
+ ending_by[root["id"]] = "/" + root_name
+ root_ids.append(root["id"])
+
+ if self.MY_DRIVE_STR == root_name:
+ default_root_id = root["id"]
+
+ no_parents_yet = {}
+ while folders:
+ folder = folders.pop(0)
+ parents = folder.get("parents", [])
+ # weird cases, shared folders, etc, parent under root
+ if not parents:
+ parent = default_root_id
+ else:
+ parent = parents[0]
+
+ if folder["id"] in root_ids: # do not process root
+ continue
+
+ if parent in ending_by:
+ path_key = ending_by[parent] + "/" + folder["name"]
+ ending_by[folder["id"]] = path_key
+ tree[path_key] = {"id": folder["id"]}
+ else:
+ no_parents_yet.setdefault(parent, []).append((folder["id"],
+ folder["name"]))
+ loop_cnt = 0
+ # break if looped more then X times - safety against infinite loop
+ while no_parents_yet and loop_cnt < 20:
+
+ keys = list(no_parents_yet.keys())
+ for parent in keys:
+ if parent in ending_by.keys():
+ subfolders = no_parents_yet.pop(parent)
+ for folder_id, folder_name in subfolders:
+ path_key = ending_by[parent] + "/" + folder_name
+ ending_by[folder_id] = path_key
+ tree[path_key] = {"id": folder_id}
+ loop_cnt += 1
+
+ if len(no_parents_yet) > 0:
+ log.debug("Some folders path are not resolved {}".
+ format(no_parents_yet))
+ log.debug("Remove deleted folders from trash.")
+
+ return tree
+
+ def _get_folder_metadata(self, path):
+ """
+ Get info about folder with 'path'
+ Args:
+ path (string):
+
+ Returns:
+ (dictionary) with metadata or raises ValueError
+ """
try:
- return path.format(**root_config)
- except KeyError:
- msg = "Error in resolving remote root, unknown key"
- log.error(msg)
+ return self.get_tree()[path]
+ except Exception:
+ raise ValueError("Uknown folder id {}".format(id))
def _handle_q(self, q, trashed=False):
""" API list call contain trashed and hidden files/folder by default.
diff --git a/openpype/modules/sync_server/providers/lib.py b/openpype/modules/sync_server/providers/lib.py
index 144594ecbe..58947e115d 100644
--- a/openpype/modules/sync_server/providers/lib.py
+++ b/openpype/modules/sync_server/providers/lib.py
@@ -1,4 +1,3 @@
-from enum import Enum
from .gdrive import GDriveHandler
from .local_drive import LocalDriveHandler
@@ -25,7 +24,8 @@ class ProviderFactory:
"""
self.providers[provider] = (creator, batch_limit)
- def get_provider(self, provider, site_name, tree=None, presets=None):
+ def get_provider(self, provider, project_name, site_name,
+ tree=None, presets=None):
"""
Returns new instance of provider client for specific site.
One provider could have multiple sites.
@@ -37,6 +37,7 @@ class ProviderFactory:
provider (string): 'gdrive','S3'
site_name (string): descriptor of site, different service accounts
must have different site name
+ project_name (string): different projects could have diff. sites
tree (dictionary): - folder paths to folder id structure
presets (dictionary): config for provider and site (eg.
"credentials_url"..)
@@ -44,7 +45,8 @@ class ProviderFactory:
(implementation of AbstractProvider)
"""
creator_info = self._get_creator_info(provider)
- site = creator_info[0](site_name, tree, presets) # call init
+ # call init
+ site = creator_info[0](project_name, site_name, tree, presets)
return site
diff --git a/openpype/modules/sync_server/providers/local_drive.py b/openpype/modules/sync_server/providers/local_drive.py
index fa8dd4c183..1f4fca80eb 100644
--- a/openpype/modules/sync_server/providers/local_drive.py
+++ b/openpype/modules/sync_server/providers/local_drive.py
@@ -4,7 +4,7 @@ import shutil
import threading
import time
-from openpype.api import Logger
+from openpype.api import Logger, Anatomy
from .abstract_provider import AbstractProvider
log = Logger().get_logger("SyncServer")
@@ -12,6 +12,14 @@ log = Logger().get_logger("SyncServer")
class LocalDriveHandler(AbstractProvider):
""" Handles required operations on mounted disks with OS """
+ def __init__(self, project_name, site_name, tree=None, presets=None):
+ self.presets = None
+ self.active = False
+ self.project_name = project_name
+ self.site_name = site_name
+
+ self.active = self.is_active()
+
def is_active(self):
return True
@@ -82,27 +90,37 @@ class LocalDriveHandler(AbstractProvider):
os.makedirs(folder_path, exist_ok=True)
return folder_path
+ def get_roots_config(self, anatomy=None):
+ """
+ Returns root values for path resolving
+
+ Takes value from Anatomy which takes values from Settings
+ overridden by Local Settings
+
+ Returns:
+ (dict) - {"root": {"root": "/My Drive"}}
+ OR
+ {"root": {"root_ONE": "value", "root_TWO":"value}}
+ Format is importing for usage of python's format ** approach
+ """
+ if not anatomy:
+ anatomy = Anatomy(self.project_name,
+ self._normalize_site_name(self.site_name))
+
+ return {'root': anatomy.roots}
+
def get_tree(self):
return
- def resolve_path(self, path, root_config, anatomy=None):
- if root_config and not root_config.get("root"):
- root_config = {"root": root_config}
+ def get_configurable_items_for_site(self):
+ """
+ Returns list of items that should be configurable by User
- try:
- if not root_config:
- raise KeyError
-
- path = path.format(**root_config)
- except KeyError:
- try:
- path = anatomy.fill_root(path)
- except KeyError:
- msg = "Error in resolving local root from anatomy"
- log.error(msg)
- raise ValueError(msg)
-
- return path
+ Returns:
+ (list of dict)
+ [{key:"root", label:"root", value:"valueFromSettings"}]
+ """
+ pass
def _copy(self, source_path, target_path):
print("copying {}->{}".format(source_path, target_path))
@@ -133,3 +151,9 @@ class LocalDriveHandler(AbstractProvider):
)
target_file_size = os.path.getsize(target_path)
time.sleep(0.5)
+
+ def _normalize_site_name(self, site_name):
+ """Transform user id to 'local' for Local settings"""
+ if site_name != 'studio':
+ return 'local'
+ return site_name
diff --git a/openpype/modules/sync_server/sync_server.py b/openpype/modules/sync_server/sync_server.py
index 62a5dc675c..e97c0e8844 100644
--- a/openpype/modules/sync_server/sync_server.py
+++ b/openpype/modules/sync_server/sync_server.py
@@ -1,1391 +1,225 @@
-from openpype.api import (
- Anatomy,
- get_project_settings,
- get_local_site_id)
-
+"""Python 3 only implementation."""
+import os
+import asyncio
import threading
import concurrent.futures
from concurrent.futures._base import CancelledError
-from enum import Enum
-from datetime import datetime
-
from .providers import lib
-import os
-from bson.objectid import ObjectId
-
-from avalon.api import AvalonMongoDB
-from .utils import time_function
-
-import six
from openpype.lib import PypeLogger
-from .. import PypeModule, ITrayModule
-from .providers.local_drive import LocalDriveHandler
-if six.PY2:
- web = asyncio = STATIC_DIR = WebSocketAsync = None
-else:
- import asyncio
+from .utils import SyncStatus
+
log = PypeLogger().get_logger("SyncServer")
-class SyncStatus(Enum):
- DO_NOTHING = 0
- DO_UPLOAD = 1
- DO_DOWNLOAD = 2
-
-
-class SyncServer(PypeModule, ITrayModule):
+async def upload(module, collection, file, representation, provider_name,
+ remote_site_name, tree=None, preset=None):
"""
- Synchronization server that is syncing published files from local to
- any of implemented providers (like GDrive, S3 etc.)
- Runs in the background and checks all representations, looks for files
- that are marked to be in different location than 'studio' (temporary),
- checks if 'created_dt' field is present denoting successful sync
- with provider destination.
- Sites structure is created during publish OR by calling 'add_site'
- method.
+ Upload single 'file' of a 'representation' to 'provider'.
+ Source url is taken from 'file' portion, where {root} placeholder
+ is replaced by 'representation.Context.root'
+ Provider could be one of implemented in provider.py.
- By default it will always contain 1 record with
- "name" == self.presets["active_site"] and
- filled "created_dt" AND 1 or multiple records for all defined
- remote sites, where "created_dt" is not present.
- This highlights that file should be uploaded to
- remote destination
+ Updates MongoDB, fills in id of file from provider (ie. file_id
+ from GDrive), 'created_dt' - time of upload
- ''' - example of synced file test_Cylinder_lookMain_v010.ma to GDrive
- "files" : [
- {
- "path" : "{root}/Test/Assets/Cylinder/publish/look/lookMain/v010/
- test_Cylinder_lookMain_v010.ma",
- "_id" : ObjectId("5eeb25e411e06a16209ab78f"),
- "hash" : "test_Cylinder_lookMain_v010,ma|1592468963,24|4822",
- "size" : NumberLong(4822),
- "sites" : [
- {
- "name": "john_local_XD4345",
- "created_dt" : ISODate("2020-05-22T08:05:44.000Z")
- },
- {
- "id" : ObjectId("5eeb25e411e06a16209ab78f"),
- "name": "gdrive",
- "created_dt" : ISODate("2020-05-55T08:54:35.833Z")
- ]
- }
- },
- '''
- Each Tray app has assigned its own self.presets["local_id"]
- used in sites as a name.
- Tray is searching only for records where name matches its
- self.presets["active_site"] + self.presets["remote_site"].
- "active_site" could be storage in studio ('studio'), or specific
- "local_id" when user is working disconnected from home.
- If the local record has its "created_dt" filled, it is a source and
- process will try to upload the file to all defined remote sites.
+ 'provider_name' doesn't have to match to 'site_name', single
+ provider (GDrive) might have multiple sites ('projectA',
+ 'projectB')
- Remote files "id" is real id that could be used in appropriate API.
- Local files have "id" too, for conformity, contains just file name.
- It is expected that multiple providers will be implemented in separate
- classes and registered in 'providers.py'.
+ Args:
+ module(SyncServerModule): object to run SyncServerModule API
+ collection (str): source collection
+ file (dictionary): of file from representation in Mongo
+ representation (dictionary): of representation
+ provider_name (string): gdrive, gdc etc.
+ site_name (string): site on provider, single provider(gdrive) could
+ have multiple sites (different accounts, credentials)
+ tree (dictionary): injected memory structure for performance
+ preset (dictionary): site config ('credentials_url', 'root'...)
"""
- # limit querying DB to look for X number of representations that should
- # be sync, we try to run more loops with less records
- # actual number of files synced could be lower as providers can have
- # different limits imposed by its API
- # set 0 to no limit
- REPRESENTATION_LIMIT = 100
- DEFAULT_SITE = 'studio'
- LOCAL_SITE = 'local'
- LOG_PROGRESS_SEC = 5 # how often log progress to DB
+ # create ids sequentially, upload file in parallel later
+ with module.lock:
+ # this part modifies structure on 'remote_site', only single
+ # thread can do that at a time, upload/download to prepared
+ # structure should be run in parallel
+ remote_handler = lib.factory.get_provider(provider_name,
+ collection,
+ remote_site_name,
+ tree=tree,
+ presets=preset)
- name = "sync_server"
- label = "Sync Server"
-
- def initialize(self, module_settings):
- """
- Called during Module Manager creation.
-
- Collects needed data, checks asyncio presence.
- Sets 'enabled' according to global settings for the module.
- Shouldnt be doing any initialization, thats a job for 'tray_init'
- """
- self.enabled = module_settings[self.name]["enabled"]
- if asyncio is None:
- raise AssertionError(
- "SyncServer module requires Python 3.5 or higher."
+ file_path = file.get("path", "")
+ try:
+ local_file_path, remote_file_path = resolve_paths(module,
+ file_path, collection, remote_site_name, remote_handler
)
- # some parts of code need to run sequentially, not in async
- self.lock = None
- self.connection = None # connection to avalon DB to update state
- # settings for all enabled projects for sync
- self.sync_project_settings = None
- self.sync_server_thread = None # asyncio requires new thread
-
- self.action_show_widget = None
- self._paused = False
- self._paused_projects = set()
- self._paused_representations = set()
- self._anatomies = {}
-
- """ Start of Public API """
- def add_site(self, collection, representation_id, site_name=None):
- """
- Adds new site to representation to be synced.
-
- 'collection' must have synchronization enabled (globally or
- project only)
-
- Used as a API endpoint from outside applications (Loader etc)
-
- Args:
- collection (string): project name (must match DB)
- representation_id (string): MongoDB _id value
- site_name (string): name of configured and active site
-
- Returns:
- throws ValueError if any issue
- """
- if not self.get_sync_project_setting(collection):
- raise ValueError("Project not configured")
-
- if not site_name:
- site_name = self.DEFAULT_SITE
-
- self.reset_provider_for_file(collection,
- representation_id,
- site_name=site_name)
-
- # public facing API
- def remove_site(self, collection, representation_id, site_name,
- remove_local_files=False):
- """
- Removes 'site_name' for particular 'representation_id' on
- 'collection'
-
- Args:
- collection (string): project name (must match DB)
- representation_id (string): MongoDB _id value
- site_name (string): name of configured and active site
- remove_local_files (bool): remove only files for 'local_id'
- site
-
- Returns:
- throws ValueError if any issue
- """
- if not self.get_sync_project_setting(collection):
- raise ValueError("Project not configured")
-
- self.reset_provider_for_file(collection,
- representation_id,
- site_name=site_name,
- remove=True)
- if remove_local_files:
- self._remove_local_file(collection, representation_id, site_name)
-
- def clear_project(self, collection, site_name):
- """
- Clear 'collection' of 'site_name' and its local files
-
- Works only on real local sites, not on 'studio'
- """
- query = {
- "type": "representation",
- "files.sites.name": site_name
- }
-
- representations = list(
- self.connection.database[collection].find(query))
- if not representations:
- self.log.debug("No repre found")
- return
-
- for repre in representations:
- self.remove_site(collection, repre.get("_id"), site_name, True)
-
- def pause_representation(self, collection, representation_id, site_name):
- """
- Sets 'representation_id' as paused, eg. no syncing should be
- happening on it.
-
- Args:
- collection (string): project name
- representation_id (string): MongoDB objectId value
- site_name (string): 'gdrive', 'studio' etc.
- """
- log.info("Pausing SyncServer for {}".format(representation_id))
- self._paused_representations.add(representation_id)
- self.reset_provider_for_file(collection, representation_id,
- site_name=site_name, pause=True)
-
- def unpause_representation(self, collection, representation_id, site_name):
- """
- Sets 'representation_id' as unpaused.
-
- Does not fail or warn if repre wasn't paused.
-
- Args:
- collection (string): project name
- representation_id (string): MongoDB objectId value
- site_name (string): 'gdrive', 'studio' etc.
- """
- log.info("Unpausing SyncServer for {}".format(representation_id))
- try:
- self._paused_representations.remove(representation_id)
- except KeyError:
- pass
- # self.paused_representations is not persistent
- self.reset_provider_for_file(collection, representation_id,
- site_name=site_name, pause=False)
-
- def is_representation_paused(self, representation_id,
- check_parents=False, project_name=None):
- """
- Returns if 'representation_id' is paused or not.
-
- Args:
- representation_id (string): MongoDB objectId value
- check_parents (bool): check if parent project or server itself
- are not paused
- project_name (string): project to check if paused
-
- if 'check_parents', 'project_name' should be set too
- Returns:
- (bool)
- """
- condition = representation_id in self._paused_representations
- if check_parents and project_name:
- condition = condition or \
- self.is_project_paused(project_name) or \
- self.is_paused()
- return condition
-
- def pause_project(self, project_name):
- """
- Sets 'project_name' as paused, eg. no syncing should be
- happening on all representation inside.
-
- Args:
- project_name (string): collection name
- """
- log.info("Pausing SyncServer for {}".format(project_name))
- self._paused_projects.add(project_name)
-
- def unpause_project(self, project_name):
- """
- Sets 'project_name' as unpaused
-
- Does not fail or warn if project wasn't paused.
-
- Args:
- project_name (string): collection name
- """
- log.info("Unpausing SyncServer for {}".format(project_name))
- try:
- self._paused_projects.remove(project_name)
- except KeyError:
- pass
-
- def is_project_paused(self, project_name, check_parents=False):
- """
- Returns if 'project_name' is paused or not.
-
- Args:
- project_name (string): collection name
- check_parents (bool): check if server itself
- is not paused
- Returns:
- (bool)
- """
- condition = project_name in self._paused_projects
- if check_parents:
- condition = condition or self.is_paused()
- return condition
-
- def pause_server(self):
- """
- Pause sync server
-
- It won't check anything, not uploading/downloading...
- """
- log.info("Pausing SyncServer")
- self._paused = True
-
- def unpause_server(self):
- """
- Unpause server
- """
- log.info("Unpausing SyncServer")
- self._paused = False
-
- def is_paused(self):
- """ Is server paused """
- return self._paused
-
- def get_active_sites(self, project_name):
- """
- Returns list of active sites for 'project_name'.
-
- By default it returns ['studio'], this site is default
- and always present even if SyncServer is not enabled. (for publish)
-
- Used mainly for Local settings for user override.
-
- Args:
- project_name (string):
-
- Returns:
- (list) of strings
- """
- return self.get_active_sites_from_settings(
- get_project_settings(project_name))
-
- def get_active_sites_from_settings(self, settings):
- """
- List available active sites from incoming 'settings'. Used for
- returning 'default' values for Local Settings
-
- Args:
- settings (dict): full settings (global + project)
- Returns:
- (list) of strings
- """
- sync_settings = self._parse_sync_settings_from_settings(settings)
-
- return self._get_active_sites_from_settings(sync_settings)
-
- def get_active_site(self, project_name):
- """
- Returns active (mine) site for 'project_name' from settings
-
- Returns:
- (string)
- """
- active_site = self.get_sync_project_setting(
- project_name)['config']['active_site']
- if active_site == self.LOCAL_SITE:
- return get_local_site_id()
- return active_site
-
- # remote sites
- def get_remote_sites(self, project_name):
- """
- Returns all remote sites configured on 'project_name'.
-
- If 'project_name' is not enabled for syncing returns [].
-
- Used by Local setting to allow user choose remote site.
-
- Args:
- project_name (string):
-
- Returns:
- (list) of strings
- """
- return self.get_remote_sites_from_settings(
- get_project_settings(project_name))
-
- def get_remote_sites_from_settings(self, settings):
- """
- Get remote sites for returning 'default' values for Local Settings
- """
- sync_settings = self._parse_sync_settings_from_settings(settings)
-
- return self._get_remote_sites_from_settings(sync_settings)
-
- def get_remote_site(self, project_name):
- """
- Returns remote (theirs) site for 'project_name' from settings
- """
- remote_site = self.get_sync_project_setting(
- project_name)['config']['remote_site']
- if remote_site == self.LOCAL_SITE:
- return get_local_site_id()
-
- return remote_site
-
- """ End of Public API """
-
- def get_local_file_path(self, collection, file_path):
- """
- Externalized for app
- """
- local_file_path, _ = self._resolve_paths(file_path, collection)
-
- return local_file_path
-
- def _get_remote_sites_from_settings(self, sync_settings):
- if not self.enabled or not sync_settings['enabled']:
- return []
-
- remote_sites = [self.DEFAULT_SITE, self.LOCAL_SITE]
- if sync_settings:
- remote_sites.extend(sync_settings.get("sites").keys())
-
- return list(set(remote_sites))
-
- def _get_active_sites_from_settings(self, sync_settings):
- sites = [self.DEFAULT_SITE]
- if self.enabled and sync_settings['enabled']:
- sites.append(self.LOCAL_SITE)
-
- return sites
-
- def connect_with_modules(self, *_a, **kw):
- return
-
- def tray_init(self):
- """
- Actual initialization of Sync Server.
-
- Called when tray is initialized, it checks if module should be
- enabled. If not, no initialization necessary.
- """
- if not self.enabled:
- return
-
- self.sync_project_settings = None
- self.lock = threading.Lock()
-
- self.connection = AvalonMongoDB()
- self.connection.install()
-
- try:
- self.set_sync_project_settings()
- self.sync_server_thread = SyncServerThread(self)
- from .tray.app import SyncServerWindow
- self.widget = SyncServerWindow(self)
- except ValueError:
- log.info("No system setting for sync. Not syncing.", exc_info=True)
- self.enabled = False
- except KeyError:
- log.info((
- "There are not set presets for SyncServer OR "
- "Credentials provided are invalid, "
- "no syncing possible").
- format(str(self.sync_project_settings)), exc_info=True)
- self.enabled = False
-
- def tray_start(self):
- """
- Triggered when Tray is started.
-
- Checks if configuration presets are available and if there is
- any provider ('gdrive', 'S3') that is activated
- (eg. has valid credentials).
+ except Exception as exp:
+ print(exp)
+
+ target_folder = os.path.dirname(remote_file_path)
+ folder_id = remote_handler.create_folder(target_folder)
+
+ if not folder_id:
+ err = "Folder {} wasn't created. Check permissions.". \
+ format(target_folder)
+ raise NotADirectoryError(err)
+
+ loop = asyncio.get_running_loop()
+ file_id = await loop.run_in_executor(None,
+ remote_handler.upload_file,
+ local_file_path,
+ remote_file_path,
+ module,
+ collection,
+ file,
+ representation,
+ remote_site_name,
+ True
+ )
+ return file_id
+
+
+async def download(module, collection, file, representation, provider_name,
+ remote_site_name, tree=None, preset=None):
+ """
+ Downloads file to local folder denoted in representation.Context.
+
+ Args:
+ module(SyncServerModule): object to run SyncServerModule API
+ collection (str): source collection
+ file (dictionary) : info about processed file
+ representation (dictionary): repr that 'file' belongs to
+ provider_name (string): 'gdrive' etc
+ site_name (string): site on provider, single provider(gdrive) could
+ have multiple sites (different accounts, credentials)
+ tree (dictionary): injected memory structure for performance
+ preset (dictionary): site config ('credentials_url', 'root'...)
Returns:
- None
- """
- if self.sync_project_settings and self.enabled:
- self.sync_server_thread.start()
- else:
- log.info("No presets or active providers. " +
- "Synchronization not possible.")
+ (string) - 'name' of local file
+ """
+ with module.lock:
+ remote_handler = lib.factory.get_provider(provider_name,
+ collection,
+ remote_site_name,
+ tree=tree,
+ presets=preset)
- def tray_exit(self):
- """
- Stops sync thread if running.
+ file_path = file.get("path", "")
+ local_file_path, remote_file_path = resolve_paths(
+ module, file_path, collection, remote_site_name, remote_handler
+ )
- Called from Module Manager
- """
- if not self.sync_server_thread:
- return
+ local_folder = os.path.dirname(local_file_path)
+ os.makedirs(local_folder, exist_ok=True)
- if not self.is_running:
- return
- try:
- log.info("Stopping sync server server")
- self.sync_server_thread.is_running = False
- self.sync_server_thread.stop()
- except Exception:
- log.warning(
- "Error has happened during Killing sync server",
- exc_info=True
- )
+ local_site = module.get_active_site(collection)
- def tray_menu(self, parent_menu):
- if not self.enabled:
- return
+ loop = asyncio.get_running_loop()
+ file_id = await loop.run_in_executor(None,
+ remote_handler.download_file,
+ remote_file_path,
+ local_file_path,
+ module,
+ collection,
+ file,
+ representation,
+ local_site,
+ True
+ )
+ return file_id
- from Qt import QtWidgets
- """Add menu or action to Tray(or parent)'s menu"""
- action = QtWidgets.QAction("SyncServer", parent_menu)
- action.triggered.connect(self.show_widget)
- parent_menu.addAction(action)
- parent_menu.addSeparator()
- self.action_show_widget = action
+def resolve_paths(module, file_path, collection,
+ remote_site_name=None, remote_handler=None):
+ """
+ Returns tuple of local and remote file paths with {root}
+ placeholders replaced with proper values from Settings or Anatomy
- @property
- def is_running(self):
- return self.sync_server_thread.is_running
+ Ejected here because of Python 2 hosts (GDriveHandler is an issue)
- def get_anatomy(self, project_name):
- """
- Get already created or newly created anatomy for project
-
- Args:
- project_name (string):
-
- Return:
- (Anatomy)
- """
- return self._anatomies.get('project_name') or Anatomy(project_name)
-
- def set_sync_project_settings(self):
- """
- Set sync_project_settings for all projects (caching)
-
- For performance
- """
- sync_project_settings = {}
- if not self.connection:
- self.connection = AvalonMongoDB()
- self.connection.install()
-
- for collection in self.connection.database.collection_names(False):
- sync_settings = self._parse_sync_settings_from_settings(
- get_project_settings(collection))
- if sync_settings:
- default_sites = self._get_default_site_configs()
- sync_settings['sites'].update(default_sites)
- sync_project_settings[collection] = sync_settings
-
- if not sync_project_settings:
- log.info("No enabled and configured projects for sync.")
-
- self.sync_project_settings = sync_project_settings
-
- def get_sync_project_settings(self, refresh=False):
- """
- Collects all projects which have enabled syncing and their settings
Args:
- refresh (bool): refresh presets from settings - used when user
- changes site in Local Settings or any time up-to-date values
- are necessary
+ module(SyncServerModule): object to run SyncServerModule API
+ file_path(string): path with {root}
+ collection(string): project name
+ remote_site_name(string): remote site
+ remote_handler(AbstractProvider): implementation
Returns:
- (dict): of settings, keys are project names
- {'projectA':{enabled: True, sites:{}...}
- """
- # presets set already, do not call again and again
- if refresh or not self.sync_project_settings:
- self.set_sync_project_settings()
+ (string, string) - proper absolute paths, remote path is optional
+ """
+ remote_file_path = ''
+ if remote_handler:
+ remote_file_path = remote_handler.resolve_path(file_path)
- return self.sync_project_settings
+ local_handler = lib.factory.get_provider(
+ 'local_drive', collection, module.get_active_site(collection))
+ local_file_path = local_handler.resolve_path(file_path)
- def get_sync_project_setting(self, project_name):
- """ Handles pulling sync_server's settings for enabled 'project_name'
+ return local_file_path, remote_file_path
- Args:
- project_name (str): used in project settings
- Returns:
- (dict): settings dictionary for the enabled project,
- empty if no settings or sync is disabled
- """
- # presets set already, do not call again and again
- # self.log.debug("project preset {}".format(self.presets))
- if self.sync_project_settings and \
- self.sync_project_settings.get(project_name):
- return self.sync_project_settings.get(project_name)
- settings = get_project_settings(project_name)
- return self._parse_sync_settings_from_settings(settings)
+def site_is_working(module, project_name, site_name):
+ """
+ Confirm that 'site_name' is configured correctly for 'project_name'.
- def site_is_working(self, project_name, site_name):
- """
- Confirm that 'site_name' is configured correctly for 'project_name'
- Args:
- project_name(string):
- site_name(string):
- Returns
- (bool)
- """
- if self._get_configured_sites(project_name).get(site_name):
- return True
- return False
+ Must be here as lib.factory access doesn't work in Python 2 hosts.
- def _parse_sync_settings_from_settings(self, settings):
- """ settings from api.get_project_settings, TOOD rename """
- sync_settings = settings.get("global").get("sync_server")
- if not sync_settings:
- log.info("No project setting not syncing.")
- return {}
- if sync_settings.get("enabled"):
- return sync_settings
+ Args:
+ module (SyncServerModule)
+ project_name(string):
+ site_name(string):
+ Returns
+ (bool)
+ """
+ if _get_configured_sites(module, project_name).get(site_name):
+ return True
+ return False
+
+def _get_configured_sites(module, project_name):
+ """
+ Loops through settings and looks for configured sites and checks
+ its handlers for particular 'project_name'.
+
+ Args:
+ project_setting(dict): dictionary from Settings
+ only_project_name(string, optional): only interested in
+ particular project
+ Returns:
+ (dict of dict)
+ {'ProjectA': {'studio':True, 'gdrive':False}}
+ """
+ settings = module.get_sync_project_setting(project_name)
+ return _get_configured_sites_from_setting(module, project_name, settings)
+
+
+def _get_configured_sites_from_setting(module, project_name, project_setting):
+ if not project_setting.get("enabled"):
return {}
- def _get_configured_sites(self, project_name):
- """
- Loops through settings and looks for configured sites and checks
- its handlers for particular 'project_name'.
-
- Args:
- project_setting(dict): dictionary from Settings
- only_project_name(string, optional): only interested in
- particular project
- Returns:
- (dict of dict)
- {'ProjectA': {'studio':True, 'gdrive':False}}
- """
- settings = self.get_sync_project_setting(project_name)
- return self._get_configured_sites_from_setting(settings)
-
- def _get_configured_sites_from_setting(self, project_setting):
- if not project_setting.get("enabled"):
- return {}
-
- initiated_handlers = {}
- configured_sites = {}
- all_sites = self._get_default_site_configs()
- all_sites.update(project_setting.get("sites"))
- for site_name, config in all_sites.items():
- handler = initiated_handlers. \
- get((config["provider"], site_name))
- if not handler:
- handler = lib.factory.get_provider(config["provider"],
- site_name,
- presets=config)
- initiated_handlers[(config["provider"], site_name)] = \
- handler
-
- if handler.is_active():
- configured_sites[site_name] = True
-
- return configured_sites
-
- def _get_default_site_configs(self):
- """
- Returns skeleton settings for 'studio' and user's local site
- """
- default_config = {'provider': 'local_drive'}
- all_sites = {self.DEFAULT_SITE: default_config,
- get_local_site_id(): default_config}
- return all_sites
-
- def get_provider_for_site(self, project_name, site):
- """
- Return provider name for site.
- """
- site_preset = self.get_sync_project_setting(project_name)["sites"].\
- get(site)
- if site_preset:
- return site_preset["provider"]
-
- return "NA"
-
- @time_function
- def get_sync_representations(self, collection, active_site, remote_site):
- """
- Get representations that should be synced, these could be
- recognised by presence of document in 'files.sites', where key is
- a provider (GDrive, S3) and value is empty document or document
- without 'created_dt' field. (Don't put null to 'created_dt'!).
-
- Querying of 'to-be-synched' files is offloaded to Mongod for
- better performance. Goal is to get as few representations as
- possible.
- Args:
- collection (string): name of collection (in most cases matches
- project name
- active_site (string): identifier of current active site (could be
- 'local_0' when working from home, 'studio' when working in the
- studio (default)
- remote_site (string): identifier of remote site I want to sync to
-
- Returns:
- (list) of dictionaries
- """
- log.debug("Check representations for : {}".format(collection))
- self.connection.Session["AVALON_PROJECT"] = collection
- # retry_cnt - number of attempts to sync specific file before giving up
- retries_arr = self._get_retries_arr(collection)
- query = {
- "type": "representation",
- "$or": [
- {"$and": [
- {
- "files.sites": {
- "$elemMatch": {
- "name": active_site,
- "created_dt": {"$exists": True}
- }
- }}, {
- "files.sites": {
- "$elemMatch": {
- "name": {"$in": [remote_site]},
- "created_dt": {"$exists": False},
- "tries": {"$in": retries_arr}
- }
- }
- }]},
- {"$and": [
- {
- "files.sites": {
- "$elemMatch": {
- "name": active_site,
- "created_dt": {"$exists": False},
- "tries": {"$in": retries_arr}
- }
- }}, {
- "files.sites": {
- "$elemMatch": {
- "name": {"$in": [remote_site]},
- "created_dt": {"$exists": True}
- }
- }
- }
- ]}
- ]
- }
- log.debug("active_site:{} - remote_site:{}".format(active_site,
- remote_site))
- log.debug("query: {}".format(query))
- representations = self.connection.find(query)
-
- return representations
-
- def check_status(self, file, local_site, remote_site, config_preset):
- """
- Check synchronization status for single 'file' of single
- 'representation' by single 'provider'.
- (Eg. check if 'scene.ma' of lookdev.v10 should be synced to GDrive
-
- Always is comparing local record, eg. site with
- 'name' == self.presets[PROJECT_NAME]['config']["active_site"]
-
- Args:
- file (dictionary): of file from representation in Mongo
- local_site (string): - local side of compare (usually 'studio')
- remote_site (string): - gdrive etc.
- config_preset (dict): config about active site, retries
- Returns:
- (string) - one of SyncStatus
- """
- sites = file.get("sites") or []
- # if isinstance(sites, list): # temporary, old format of 'sites'
- # return SyncStatus.DO_NOTHING
- _, remote_rec = self._get_site_rec(sites, remote_site) or {}
- if remote_rec: # sync remote target
- created_dt = remote_rec.get("created_dt")
- if not created_dt:
- tries = self._get_tries_count_from_rec(remote_rec)
- # file will be skipped if unsuccessfully tried over threshold
- # error metadata needs to be purged manually in DB to reset
- if tries < int(config_preset["retry_cnt"]):
- return SyncStatus.DO_UPLOAD
- else:
- _, local_rec = self._get_site_rec(sites, local_site) or {}
- if not local_rec or not local_rec.get("created_dt"):
- tries = self._get_tries_count_from_rec(local_rec)
- # file will be skipped if unsuccessfully tried over
- # threshold times, error metadata needs to be purged
- # manually in DB to reset
- if tries < int(config_preset["retry_cnt"]):
- return SyncStatus.DO_DOWNLOAD
-
- return SyncStatus.DO_NOTHING
-
- async def upload(self, collection, file, representation, provider_name,
- remote_site_name, tree=None, preset=None):
- """
- Upload single 'file' of a 'representation' to 'provider'.
- Source url is taken from 'file' portion, where {root} placeholder
- is replaced by 'representation.Context.root'
- Provider could be one of implemented in provider.py.
-
- Updates MongoDB, fills in id of file from provider (ie. file_id
- from GDrive), 'created_dt' - time of upload
-
- 'provider_name' doesn't have to match to 'site_name', single
- provider (GDrive) might have multiple sites ('projectA',
- 'projectB')
-
- Args:
- collection (str): source collection
- file (dictionary): of file from representation in Mongo
- representation (dictionary): of representation
- provider_name (string): gdrive, gdc etc.
- site_name (string): site on provider, single provider(gdrive) could
- have multiple sites (different accounts, credentials)
- tree (dictionary): injected memory structure for performance
- preset (dictionary): site config ('credentials_url', 'root'...)
-
- """
- # create ids sequentially, upload file in parallel later
- with self.lock:
- # this part modifies structure on 'remote_site', only single
- # thread can do that at a time, upload/download to prepared
- # structure should be run in parallel
- remote_handler = lib.factory.get_provider(provider_name,
- remote_site_name,
- tree=tree,
- presets=preset)
-
- file_path = file.get("path", "")
- local_file_path, remote_file_path = self._resolve_paths(
- file_path, collection, remote_site_name, remote_handler
- )
-
- target_folder = os.path.dirname(remote_file_path)
- folder_id = remote_handler.create_folder(target_folder)
-
- if not folder_id:
- err = "Folder {} wasn't created. Check permissions.".\
- format(target_folder)
- raise NotADirectoryError(err)
-
- loop = asyncio.get_running_loop()
- file_id = await loop.run_in_executor(None,
- remote_handler.upload_file,
- local_file_path,
- remote_file_path,
- self,
- collection,
- file,
- representation,
- remote_site_name,
- True
- )
- return file_id
-
- async def download(self, collection, file, representation, provider_name,
- remote_site_name, tree=None, preset=None):
- """
- Downloads file to local folder denoted in representation.Context.
-
- Args:
- collection (str): source collection
- file (dictionary) : info about processed file
- representation (dictionary): repr that 'file' belongs to
- provider_name (string): 'gdrive' etc
- site_name (string): site on provider, single provider(gdrive) could
- have multiple sites (different accounts, credentials)
- tree (dictionary): injected memory structure for performance
- preset (dictionary): site config ('credentials_url', 'root'...)
-
- Returns:
- (string) - 'name' of local file
- """
- with self.lock:
- remote_handler = lib.factory.get_provider(provider_name,
- remote_site_name,
- tree=tree,
- presets=preset)
-
- file_path = file.get("path", "")
- local_file_path, remote_file_path = self._resolve_paths(
- file_path, collection, remote_site_name, remote_handler
- )
-
- local_folder = os.path.dirname(local_file_path)
- os.makedirs(local_folder, exist_ok=True)
-
- local_site = self.get_active_site(collection)
-
- loop = asyncio.get_running_loop()
- file_id = await loop.run_in_executor(None,
- remote_handler.download_file,
- remote_file_path,
- local_file_path,
- self,
- collection,
- file,
- representation,
- local_site,
- True
- )
- return file_id
-
- def update_db(self, collection, new_file_id, file, representation,
- site, error=None, progress=None):
- """
- Update 'provider' portion of records in DB with success (file_id)
- or error (exception)
-
- Args:
- collection (string): name of project - force to db connection as
- each file might come from different collection
- new_file_id (string):
- file (dictionary): info about processed file (pulled from DB)
- representation (dictionary): parent repr of file (from DB)
- site (string): label ('gdrive', 'S3')
- error (string): exception message
- progress (float): 0-1 of progress of upload/download
-
- Returns:
- None
- """
- representation_id = representation.get("_id")
- file_id = file.get("_id")
- query = {
- "_id": representation_id
- }
-
- update = {}
- if new_file_id:
- update["$set"] = self._get_success_dict(new_file_id)
- # reset previous errors if any
- update["$unset"] = self._get_error_dict("", "", "")
- elif progress is not None:
- update["$set"] = self._get_progress_dict(progress)
- else:
- tries = self._get_tries_count(file, site)
- tries += 1
-
- update["$set"] = self._get_error_dict(error, tries)
-
- arr_filter = [
- {'s.name': site},
- {'f._id': ObjectId(file_id)}
- ]
-
- self.connection.database[collection].update_one(
- query,
- update,
- upsert=True,
- array_filters=arr_filter
- )
-
- if progress is not None:
- return
-
- status = 'failed'
- error_str = 'with error {}'.format(error)
- if new_file_id:
- status = 'succeeded with id {}'.format(new_file_id)
- error_str = ''
-
- source_file = file.get("path", "")
- log.debug("File for {} - {source_file} process {status} {error_str}".
- format(representation_id,
- status=status,
- source_file=source_file,
- error_str=error_str))
-
- def _get_file_info(self, files, _id):
- """
- Return record from list of records which name matches to 'provider'
- Could be possibly refactored with '_get_provider_rec' together.
-
- Args:
- files (list): of dictionaries with info about published files
- _id (string): _id of specific file
-
- Returns:
- (int, dictionary): index from list and record with metadata
- about site (if/when created, errors..)
- OR (-1, None) if not present
- """
- for index, rec in enumerate(files):
- if rec.get("_id") == _id:
- return index, rec
-
- return -1, None
-
- def _get_site_rec(self, sites, site_name):
- """
- Return record from list of records which name matches to
- 'remote_site_name'
-
- Args:
- sites (list): of dictionaries
- site_name (string): 'local_XXX', 'gdrive'
-
- Returns:
- (int, dictionary): index from list and record with metadata
- about site (if/when created, errors..)
- OR (-1, None) if not present
- """
- for index, rec in enumerate(sites):
- if rec.get("name") == site_name:
- return index, rec
-
- return -1, None
-
- def reset_provider_for_file(self, collection, representation_id,
- side=None, file_id=None, site_name=None,
- remove=False, pause=None):
- """
- Reset information about synchronization for particular 'file_id'
- and provider.
- Useful for testing or forcing file to be reuploaded.
-
- 'side' and 'site_name' are disjunctive.
-
- 'side' is used for resetting local or remote side for
- current user for repre.
-
- 'site_name' is used to set synchronization for particular site.
- Should be used when repre should be synced to new site.
-
- Args:
- collection (string): name of project (eg. collection) in DB
- representation_id(string): _id of representation
- file_id (string): file _id in representation
- side (string): local or remote side
- site_name (string): for adding new site
- remove (bool): if True remove site altogether
- pause (bool or None): if True - pause, False - unpause
-
- Returns:
- throws ValueError
- """
- query = {
- "_id": ObjectId(representation_id)
- }
-
- representation = list(self.connection.database[collection].find(query))
- if not representation:
- raise ValueError("Representation {} not found in {}".
- format(representation_id, collection))
- if side and site_name:
- raise ValueError("Misconfiguration, only one of side and " +
- "site_name arguments should be passed.")
-
- local_site = self.get_active_site(collection)
- remote_site = self.get_remote_site(collection)
-
- if side:
- if side == 'local':
- site_name = local_site
- else:
- site_name = remote_site
-
- elem = {"name": site_name}
-
- if file_id: # reset site for particular file
- self._reset_site_for_file(collection, query,
- elem, file_id, site_name)
- elif side: # reset site for whole representation
- self._reset_site(collection, query, elem, site_name)
- elif remove: # remove site for whole representation
- self._remove_site(collection, query, representation, site_name)
- elif pause is not None:
- self._pause_unpause_site(collection, query,
- representation, site_name, pause)
- else: # add new site to all files for representation
- self._add_site(collection, query, representation, elem, site_name)
-
- def _update_site(self, collection, query, update, arr_filter):
- """
- Auxiliary method to call update_one function on DB
-
- Used for refactoring ugly reset_provider_for_file
- """
- self.connection.database[collection].update_one(
- query,
- update,
- upsert=True,
- array_filters=arr_filter
- )
-
- def _reset_site_for_file(self, collection, query,
- elem, file_id, site_name):
- """
- Resets 'site_name' for 'file_id' on representation in 'query' on
- 'collection'
- """
- update = {
- "$set": {"files.$[f].sites.$[s]": elem}
- }
- arr_filter = [
- {'s.name': site_name},
- {'f._id': ObjectId(file_id)}
- ]
-
- self._update_site(collection, query, update, arr_filter)
-
- def _reset_site(self, collection, query, elem, site_name):
- """
- Resets 'site_name' for all files of representation in 'query'
- """
- update = {
- "$set": {"files.$[].sites.$[s]": elem}
- }
-
- arr_filter = [
- {'s.name': site_name}
- ]
-
- self._update_site(collection, query, update, arr_filter)
-
- def _remove_site(self, collection, query, representation, site_name):
- """
- Removes 'site_name' for 'representation' in 'query'
-
- Throws ValueError if 'site_name' not found on 'representation'
- """
- found = False
- for file in representation.pop().get("files"):
- for site in file.get("sites"):
- if site["name"] == site_name:
- found = True
- break
- if not found:
- msg = "Site {} not found".format(site_name)
- log.info(msg)
- raise ValueError(msg)
-
- update = {
- "$pull": {"files.$[].sites": {"name": site_name}}
- }
- arr_filter = []
-
- self._update_site(collection, query, update, arr_filter)
-
- def _pause_unpause_site(self, collection, query,
- representation, site_name, pause):
- """
- Pauses/unpauses all files for 'representation' based on 'pause'
-
- Throws ValueError if 'site_name' not found on 'representation'
- """
- found = False
- site = None
- for file in representation.pop().get("files"):
- for site in file.get("sites"):
- if site["name"] == site_name:
- found = True
- break
- if not found:
- msg = "Site {} not found".format(site_name)
- log.info(msg)
- raise ValueError(msg)
-
- if pause:
- site['paused'] = pause
- else:
- if site.get('paused'):
- site.pop('paused')
-
- update = {
- "$set": {"files.$[].sites.$[s]": site}
- }
-
- arr_filter = [
- {'s.name': site_name}
- ]
-
- self._update_site(collection, query, update, arr_filter)
-
- def _add_site(self, collection, query, representation, elem, site_name):
- """
- Adds 'site_name' to 'representation' on 'collection'
-
- Throws ValueError if already present
- """
- for file in representation.pop().get("files"):
- for site in file.get("sites"):
- if site["name"] == site_name:
- msg = "Site {} already present".format(site_name)
- log.info(msg)
- raise ValueError(msg)
-
- update = {
- "$push": {"files.$[].sites": elem}
- }
-
- arr_filter = []
-
- self._update_site(collection, query, update, arr_filter)
-
- def _remove_local_file(self, collection, representation_id, site_name):
- """
- Removes all local files for 'site_name' of 'representation_id'
-
- Args:
- collection (string): project name (must match DB)
- representation_id (string): MongoDB _id value
- site_name (string): name of configured and active site
-
- Returns:
- only logs, catches IndexError and OSError
- """
- my_local_site = get_local_site_id()
- if my_local_site != site_name:
- self.log.warning("Cannot remove non local file for {}".
- format(site_name))
- return
-
- provider_name = self.get_provider_for_site(collection, site_name)
- handler = lib.factory.get_provider(provider_name, site_name)
-
- if handler and isinstance(handler, LocalDriveHandler):
- query = {
- "_id": ObjectId(representation_id)
- }
-
- representation = list(
- self.connection.database[collection].find(query))
- if not representation:
- self.log.debug("No repre {} found".format(
- representation_id))
- return
-
- representation = representation.pop()
- local_file_path = ''
- for file in representation.get("files"):
- local_file_path, _ = self._resolve_paths(file.get("path", ""),
- collection
- )
- try:
- self.log.debug("Removing {}".format(local_file_path))
- os.remove(local_file_path)
- except IndexError:
- msg = "No file set for {}".format(representation_id)
- self.log.debug(msg)
- raise ValueError(msg)
- except OSError:
- msg = "File {} cannot be removed".format(file["path"])
- self.log.warning(msg)
- raise ValueError(msg)
-
- try:
- folder = os.path.dirname(local_file_path)
- os.rmdir(folder)
- except OSError:
- msg = "folder {} cannot be removed".format(folder)
- self.log.warning(msg)
- raise ValueError(msg)
-
- def get_loop_delay(self, project_name):
- """
- Return count of seconds before next synchronization loop starts
- after finish of previous loop.
- Returns:
- (int): in seconds
- """
- ld = self.sync_project_settings[project_name]["config"]["loop_delay"]
- return int(ld)
-
- def show_widget(self):
- """Show dialog to enter credentials"""
- self.widget.show()
-
- def _get_success_dict(self, new_file_id):
- """
- Provide success metadata ("id", "created_dt") to be stored in Db.
- Used in $set: "DICT" part of query.
- Sites are array inside of array(file), so real indexes for both
- file and site are needed for upgrade in DB.
- Args:
- new_file_id: id of created file
- Returns:
- (dictionary)
- """
- val = {"files.$[f].sites.$[s].id": new_file_id,
- "files.$[f].sites.$[s].created_dt": datetime.now()}
- return val
-
- def _get_error_dict(self, error="", tries="", progress=""):
- """
- Provide error metadata to be stored in Db.
- Used for set (error and tries provided) or unset mode.
- Args:
- error: (string) - message
- tries: how many times failed
- Returns:
- (dictionary)
- """
- val = {"files.$[f].sites.$[s].last_failed_dt": datetime.now(),
- "files.$[f].sites.$[s].error": error,
- "files.$[f].sites.$[s].tries": tries,
- "files.$[f].sites.$[s].progress": progress
- }
- return val
-
- def _get_tries_count_from_rec(self, rec):
- """
- Get number of failed attempts to sync from site record
- Args:
- rec (dictionary): info about specific site record
- Returns:
- (int) - number of failed attempts
- """
- if not rec:
- return 0
- return rec.get("tries", 0)
-
- def _get_tries_count(self, file, provider):
- """
- Get number of failed attempts to sync
- Args:
- file (dictionary): info about specific file
- provider (string): name of site ('gdrive' or specific user site)
- Returns:
- (int) - number of failed attempts
- """
- _, rec = self._get_site_rec(file.get("sites", []), provider)
- return rec.get("tries", 0)
-
- def _get_progress_dict(self, progress):
- """
- Provide progress metadata to be stored in Db.
- Used during upload/download for GUI to show.
- Args:
- progress: (float) - 0-1 progress of upload/download
- Returns:
- (dictionary)
- """
- val = {"files.$[f].sites.$[s].progress": progress}
- return val
-
- def _resolve_paths(self, file_path, collection,
- remote_site_name=None, remote_handler=None):
- """
- Returns tuple of local and remote file paths with {root}
- placeholders replaced with proper values from Settings or Anatomy
-
- Args:
- file_path(string): path with {root}
- collection(string): project name
- remote_site_name(string): remote site
- remote_handler(AbstractProvider): implementation
- Returns:
- (string, string) - proper absolute paths
- """
- remote_file_path = ''
- if remote_handler:
- root_configs = self._get_roots_config(self.sync_project_settings,
- collection,
- remote_site_name)
-
- remote_file_path = remote_handler.resolve_path(file_path,
- root_configs)
-
- local_handler = lib.factory.get_provider(
- 'local_drive', self.get_active_site(collection))
- local_file_path = local_handler.resolve_path(
- file_path, None, self.get_anatomy(collection))
-
- return local_file_path, remote_file_path
-
- def _get_retries_arr(self, project_name):
- """
- Returns array with allowed values in 'tries' field. If repre
- contains these values, it means it was tried to be synchronized
- but failed. We try up to 'self.presets["retry_cnt"]' times before
- giving up and skipping representation.
- Returns:
- (list)
- """
- retry_cnt = self.sync_project_settings[project_name].\
- get("config")["retry_cnt"]
- arr = [i for i in range(int(retry_cnt))]
- arr.append(None)
-
- return arr
-
- def _get_roots_config(self, presets, project_name, site_name):
- """
- Returns configured root(s) for 'project_name' and 'site_name' from
- settings ('presets')
- """
- return presets[project_name]['sites'][site_name]['root']
-
+ initiated_handlers = {}
+ configured_sites = {}
+ all_sites = module._get_default_site_configs()
+ all_sites.update(project_setting.get("sites"))
+ for site_name, config in all_sites.items():
+ handler = initiated_handlers. \
+ get((config["provider"], site_name))
+ if not handler:
+ handler = lib.factory.get_provider(config["provider"],
+ project_name,
+ site_name,
+ presets=config)
+ initiated_handlers[(config["provider"], site_name)] = \
+ handler
+
+ if handler.is_active():
+ configured_sites[site_name] = True
+
+ return configured_sites
class SyncServerThread(threading.Thread):
"""
@@ -1437,7 +271,7 @@ class SyncServerThread(threading.Thread):
import time
start_time = None
self.module.set_sync_project_settings() # clean cache
- for collection, preset in self.module.get_sync_project_settings().\
+ for collection, preset in self.module.sync_project_settings.\
items():
start_time = time.time()
local_site, remote_site = self._working_sites(collection)
@@ -1462,6 +296,7 @@ class SyncServerThread(threading.Thread):
site_preset = preset.get('sites')[remote_site]
remote_provider = site_preset['provider']
handler = lib.factory.get_provider(remote_provider,
+ collection,
remote_site,
presets=site_preset)
limit = lib.factory.get_provider_batch_limit(
@@ -1491,13 +326,14 @@ class SyncServerThread(threading.Thread):
tree = handler.get_tree()
limit -= 1
task = asyncio.create_task(
- self.module.upload(collection,
- file,
- sync,
- remote_provider,
- remote_site,
- tree,
- site_preset))
+ upload(self.module,
+ collection,
+ file,
+ sync,
+ remote_provider,
+ remote_site,
+ tree,
+ site_preset))
task_files_to_process.append(task)
# store info for exception handlingy
files_processed_info.append((file,
@@ -1510,13 +346,14 @@ class SyncServerThread(threading.Thread):
tree = handler.get_tree()
limit -= 1
task = asyncio.create_task(
- self.module.download(collection,
- file,
- sync,
- remote_provider,
- remote_site,
- tree,
- site_preset))
+ download(self.module,
+ collection,
+ file,
+ sync,
+ remote_provider,
+ remote_site,
+ tree,
+ site_preset))
task_files_to_process.append(task)
files_processed_info.append((file,
@@ -1592,8 +429,8 @@ class SyncServerThread(threading.Thread):
remote_site))
return None, None
- if not all([self.module.site_is_working(collection, local_site),
- self.module.site_is_working(collection, remote_site)]):
+ if not all([site_is_working(self.module, collection, local_site),
+ site_is_working(self.module, collection, remote_site)]):
log.debug("Some of the sites {} - {} is not ".format(local_site,
remote_site) +
"working properly")
diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py
new file mode 100644
index 0000000000..59c3787789
--- /dev/null
+++ b/openpype/modules/sync_server/sync_server_module.py
@@ -0,0 +1,1193 @@
+import os
+from bson.objectid import ObjectId
+from datetime import datetime
+import threading
+
+from avalon.api import AvalonMongoDB
+
+from .. import PypeModule, ITrayModule
+from openpype.api import (
+ Anatomy,
+ get_project_settings,
+ get_local_site_id)
+from openpype.lib import PypeLogger
+
+from .providers.local_drive import LocalDriveHandler
+
+from .utils import time_function, SyncStatus
+
+
+log = PypeLogger().get_logger("SyncServer")
+
+
+class SyncServerModule(PypeModule, ITrayModule):
+ """
+ Synchronization server that is syncing published files from local to
+ any of implemented providers (like GDrive, S3 etc.)
+ Runs in the background and checks all representations, looks for files
+ that are marked to be in different location than 'studio' (temporary),
+ checks if 'created_dt' field is present denoting successful sync
+ with provider destination.
+ Sites structure is created during publish OR by calling 'add_site'
+ method.
+
+ By default it will always contain 1 record with
+ "name" == self.presets["active_site"] and
+ filled "created_dt" AND 1 or multiple records for all defined
+ remote sites, where "created_dt" is not present.
+ This highlights that file should be uploaded to
+ remote destination
+
+ ''' - example of synced file test_Cylinder_lookMain_v010.ma to GDrive
+ "files" : [
+ {
+ "path" : "{root}/Test/Assets/Cylinder/publish/look/lookMain/v010/
+ test_Cylinder_lookMain_v010.ma",
+ "_id" : ObjectId("5eeb25e411e06a16209ab78f"),
+ "hash" : "test_Cylinder_lookMain_v010,ma|1592468963,24|4822",
+ "size" : NumberLong(4822),
+ "sites" : [
+ {
+ "name": "john_local_XD4345",
+ "created_dt" : ISODate("2020-05-22T08:05:44.000Z")
+ },
+ {
+ "id" : ObjectId("5eeb25e411e06a16209ab78f"),
+ "name": "gdrive",
+ "created_dt" : ISODate("2020-05-55T08:54:35.833Z")
+ ]
+ }
+ },
+ '''
+ Each Tray app has assigned its own self.presets["local_id"]
+ used in sites as a name.
+ Tray is searching only for records where name matches its
+ self.presets["active_site"] + self.presets["remote_site"].
+ "active_site" could be storage in studio ('studio'), or specific
+ "local_id" when user is working disconnected from home.
+ If the local record has its "created_dt" filled, it is a source and
+ process will try to upload the file to all defined remote sites.
+
+ Remote files "id" is real id that could be used in appropriate API.
+ Local files have "id" too, for conformity, contains just file name.
+ It is expected that multiple providers will be implemented in separate
+ classes and registered in 'providers.py'.
+
+ """
+ # limit querying DB to look for X number of representations that should
+ # be sync, we try to run more loops with less records
+ # actual number of files synced could be lower as providers can have
+ # different limits imposed by its API
+ # set 0 to no limit
+ REPRESENTATION_LIMIT = 100
+ DEFAULT_SITE = 'studio'
+ LOCAL_SITE = 'local'
+ LOG_PROGRESS_SEC = 5 # how often log progress to DB
+
+ name = "sync_server"
+ label = "Sync Queue"
+
+ def initialize(self, module_settings):
+ """
+ Called during Module Manager creation.
+
+ Collects needed data, checks asyncio presence.
+ Sets 'enabled' according to global settings for the module.
+ Shouldnt be doing any initialization, thats a job for 'tray_init'
+ """
+ self.enabled = module_settings[self.name]["enabled"]
+
+ # some parts of code need to run sequentially, not in async
+ self.lock = None
+ # settings for all enabled projects for sync
+ self._sync_project_settings = None
+ self.sync_server_thread = None # asyncio requires new thread
+
+ self.action_show_widget = None
+ self._paused = False
+ self._paused_projects = set()
+ self._paused_representations = set()
+ self._anatomies = {}
+
+ self._connection = None
+
+ """ Start of Public API """
+ def add_site(self, collection, representation_id, site_name=None,
+ force=False):
+ """
+ Adds new site to representation to be synced.
+
+ 'collection' must have synchronization enabled (globally or
+ project only)
+
+ Used as a API endpoint from outside applications (Loader etc)
+
+ Args:
+ collection (string): project name (must match DB)
+ representation_id (string): MongoDB _id value
+ site_name (string): name of configured and active site
+ force (bool): reset site if exists
+
+ Returns:
+ throws ValueError if any issue
+ """
+ if not self.get_sync_project_setting(collection):
+ raise ValueError("Project not configured")
+
+ if not site_name:
+ site_name = self.DEFAULT_SITE
+
+ self.reset_provider_for_file(collection,
+ representation_id,
+ site_name=site_name, force=force)
+
+ # public facing API
+ def remove_site(self, collection, representation_id, site_name,
+ remove_local_files=False):
+ """
+ Removes 'site_name' for particular 'representation_id' on
+ 'collection'
+
+ Args:
+ collection (string): project name (must match DB)
+ representation_id (string): MongoDB _id value
+ site_name (string): name of configured and active site
+ remove_local_files (bool): remove only files for 'local_id'
+ site
+
+ Returns:
+ throws ValueError if any issue
+ """
+ if not self.get_sync_project_setting(collection):
+ raise ValueError("Project not configured")
+
+ self.reset_provider_for_file(collection,
+ representation_id,
+ site_name=site_name,
+ remove=True)
+ if remove_local_files:
+ self._remove_local_file(collection, representation_id, site_name)
+
+ def clear_project(self, collection, site_name):
+ """
+ Clear 'collection' of 'site_name' and its local files
+
+ Works only on real local sites, not on 'studio'
+ """
+ query = {
+ "type": "representation",
+ "files.sites.name": site_name
+ }
+
+ representations = list(
+ self.connection.database[collection].find(query))
+ if not representations:
+ self.log.debug("No repre found")
+ return
+
+ for repre in representations:
+ self.remove_site(collection, repre.get("_id"), site_name, True)
+
+ def pause_representation(self, collection, representation_id, site_name):
+ """
+ Sets 'representation_id' as paused, eg. no syncing should be
+ happening on it.
+
+ Args:
+ collection (string): project name
+ representation_id (string): MongoDB objectId value
+ site_name (string): 'gdrive', 'studio' etc.
+ """
+ log.info("Pausing SyncServer for {}".format(representation_id))
+ self._paused_representations.add(representation_id)
+ self.reset_provider_for_file(collection, representation_id,
+ site_name=site_name, pause=True)
+
+ def unpause_representation(self, collection, representation_id, site_name):
+ """
+ Sets 'representation_id' as unpaused.
+
+ Does not fail or warn if repre wasn't paused.
+
+ Args:
+ collection (string): project name
+ representation_id (string): MongoDB objectId value
+ site_name (string): 'gdrive', 'studio' etc.
+ """
+ log.info("Unpausing SyncServer for {}".format(representation_id))
+ try:
+ self._paused_representations.remove(representation_id)
+ except KeyError:
+ pass
+ # self.paused_representations is not persistent
+ self.reset_provider_for_file(collection, representation_id,
+ site_name=site_name, pause=False)
+
+ def is_representation_paused(self, representation_id,
+ check_parents=False, project_name=None):
+ """
+ Returns if 'representation_id' is paused or not.
+
+ Args:
+ representation_id (string): MongoDB objectId value
+ check_parents (bool): check if parent project or server itself
+ are not paused
+ project_name (string): project to check if paused
+
+ if 'check_parents', 'project_name' should be set too
+ Returns:
+ (bool)
+ """
+ condition = representation_id in self._paused_representations
+ if check_parents and project_name:
+ condition = condition or \
+ self.is_project_paused(project_name) or \
+ self.is_paused()
+ return condition
+
+ def pause_project(self, project_name):
+ """
+ Sets 'project_name' as paused, eg. no syncing should be
+ happening on all representation inside.
+
+ Args:
+ project_name (string): collection name
+ """
+ log.info("Pausing SyncServer for {}".format(project_name))
+ self._paused_projects.add(project_name)
+
+ def unpause_project(self, project_name):
+ """
+ Sets 'project_name' as unpaused
+
+ Does not fail or warn if project wasn't paused.
+
+ Args:
+ project_name (string): collection name
+ """
+ log.info("Unpausing SyncServer for {}".format(project_name))
+ try:
+ self._paused_projects.remove(project_name)
+ except KeyError:
+ pass
+
+ def is_project_paused(self, project_name, check_parents=False):
+ """
+ Returns if 'project_name' is paused or not.
+
+ Args:
+ project_name (string): collection name
+ check_parents (bool): check if server itself
+ is not paused
+ Returns:
+ (bool)
+ """
+ condition = project_name in self._paused_projects
+ if check_parents:
+ condition = condition or self.is_paused()
+ return condition
+
+ def pause_server(self):
+ """
+ Pause sync server
+
+ It won't check anything, not uploading/downloading...
+ """
+ log.info("Pausing SyncServer")
+ self._paused = True
+
+ def unpause_server(self):
+ """
+ Unpause server
+ """
+ log.info("Unpausing SyncServer")
+ self._paused = False
+
+ def is_paused(self):
+ """ Is server paused """
+ return self._paused
+
+ def get_active_sites(self, project_name):
+ """
+ Returns list of active sites for 'project_name'.
+
+ By default it returns ['studio'], this site is default
+ and always present even if SyncServer is not enabled. (for publish)
+
+ Used mainly for Local settings for user override.
+
+ Args:
+ project_name (string):
+
+ Returns:
+ (list) of strings
+ """
+ return self.get_active_sites_from_settings(
+ get_project_settings(project_name))
+
+ def get_active_sites_from_settings(self, settings):
+ """
+ List available active sites from incoming 'settings'. Used for
+ returning 'default' values for Local Settings
+
+ Args:
+ settings (dict): full settings (global + project)
+ Returns:
+ (list) of strings
+ """
+ sync_settings = self._parse_sync_settings_from_settings(settings)
+
+ return self._get_enabled_sites_from_settings(sync_settings)
+
+ def get_configurable_items_for_site(self, project_name, site_name):
+ """
+ Returns list of items that should be configurable by User
+
+ Returns:
+ (list of dict)
+ [{key:"root", label:"root", value:"valueFromSettings"}]
+ """
+ # if project_name is None: ..for get_default_project_settings
+ # return handler.get_configurable_items()
+ pass
+
+ def get_active_site(self, project_name):
+ """
+ Returns active (mine) site for 'project_name' from settings
+
+ Returns:
+ (string)
+ """
+ active_site = self.get_sync_project_setting(
+ project_name)['config']['active_site']
+ if active_site == self.LOCAL_SITE:
+ return get_local_site_id()
+ return active_site
+
+ # remote sites
+ def get_remote_sites(self, project_name):
+ """
+ Returns all remote sites configured on 'project_name'.
+
+ If 'project_name' is not enabled for syncing returns [].
+
+ Used by Local setting to allow user choose remote site.
+
+ Args:
+ project_name (string):
+
+ Returns:
+ (list) of strings
+ """
+ return self.get_remote_sites_from_settings(
+ get_project_settings(project_name))
+
+ def get_remote_sites_from_settings(self, settings):
+ """
+ Get remote sites for returning 'default' values for Local Settings
+ """
+ sync_settings = self._parse_sync_settings_from_settings(settings)
+
+ return self._get_remote_sites_from_settings(sync_settings)
+
+ def get_remote_site(self, project_name):
+ """
+ Returns remote (theirs) site for 'project_name' from settings
+ """
+ remote_site = self.get_sync_project_setting(
+ project_name)['config']['remote_site']
+ if remote_site == self.LOCAL_SITE:
+ return get_local_site_id()
+
+ return remote_site
+
+ """ End of Public API """
+
+ def get_local_file_path(self, collection, site_name, file_path):
+ """
+ Externalized for app
+ """
+ handler = LocalDriveHandler(collection, site_name)
+ local_file_path = handler.resolve_path(file_path)
+
+ return local_file_path
+
+ def _get_remote_sites_from_settings(self, sync_settings):
+ if not self.enabled or not sync_settings['enabled']:
+ return []
+
+ remote_sites = [self.DEFAULT_SITE, self.LOCAL_SITE]
+ if sync_settings:
+ remote_sites.extend(sync_settings.get("sites").keys())
+
+ return list(set(remote_sites))
+
+ def _get_enabled_sites_from_settings(self, sync_settings):
+ sites = [self.DEFAULT_SITE]
+ if self.enabled and sync_settings['enabled']:
+ sites.append(self.LOCAL_SITE)
+
+ return sites
+
+ def connect_with_modules(self, *_a, **kw):
+ return
+
+ def tray_init(self):
+ """
+ Actual initialization of Sync Server.
+
+ Called when tray is initialized, it checks if module should be
+ enabled. If not, no initialization necessary.
+ """
+ # import only in tray, because of Python2 hosts
+ from .sync_server import SyncServerThread
+
+ if not self.enabled:
+ return
+
+ self.lock = threading.Lock()
+
+ try:
+ self.sync_server_thread = SyncServerThread(self)
+ from .tray.app import SyncServerWindow
+ self.widget = SyncServerWindow(self)
+ except ValueError:
+ log.info("No system setting for sync. Not syncing.", exc_info=True)
+ self.enabled = False
+ except KeyError:
+ log.info((
+ "There are not set presets for SyncServer OR "
+ "Credentials provided are invalid, "
+ "no syncing possible").
+ format(str(self.sync_project_settings)), exc_info=True)
+ self.enabled = False
+
+ def tray_start(self):
+ """
+ Triggered when Tray is started.
+
+ Checks if configuration presets are available and if there is
+ any provider ('gdrive', 'S3') that is activated
+ (eg. has valid credentials).
+
+ Returns:
+ None
+ """
+ if self.sync_project_settings and self.enabled:
+ self.sync_server_thread.start()
+ else:
+ log.info("No presets or active providers. " +
+ "Synchronization not possible.")
+
+ def tray_exit(self):
+ """
+ Stops sync thread if running.
+
+ Called from Module Manager
+ """
+ if not self.sync_server_thread:
+ return
+
+ if not self.is_running:
+ return
+ try:
+ log.info("Stopping sync server server")
+ self.sync_server_thread.is_running = False
+ self.sync_server_thread.stop()
+ except Exception:
+ log.warning(
+ "Error has happened during Killing sync server",
+ exc_info=True
+ )
+
+ def tray_menu(self, parent_menu):
+ if not self.enabled:
+ return
+
+ from Qt import QtWidgets
+ """Add menu or action to Tray(or parent)'s menu"""
+ action = QtWidgets.QAction(self.label, parent_menu)
+ action.triggered.connect(self.show_widget)
+ parent_menu.addAction(action)
+ parent_menu.addSeparator()
+
+ self.action_show_widget = action
+
+ @property
+ def is_running(self):
+ return self.sync_server_thread.is_running
+
+ def get_anatomy(self, project_name):
+ """
+ Get already created or newly created anatomy for project
+
+ Args:
+ project_name (string):
+
+ Return:
+ (Anatomy)
+ """
+ return self._anatomies.get('project_name') or Anatomy(project_name)
+
+ @property
+ def connection(self):
+ if self._connection is None:
+ self._connection = AvalonMongoDB()
+
+ return self._connection
+
+ @property
+ def sync_project_settings(self):
+ if self._sync_project_settings is None:
+ self.set_sync_project_settings()
+
+ return self._sync_project_settings
+
+ def set_sync_project_settings(self):
+ """
+ Set sync_project_settings for all projects (caching)
+
+ For performance
+ """
+ sync_project_settings = {}
+
+ for collection in self.connection.database.collection_names(False):
+ sync_settings = self._parse_sync_settings_from_settings(
+ get_project_settings(collection))
+ if sync_settings:
+ default_sites = self._get_default_site_configs()
+ sync_settings['sites'].update(default_sites)
+ sync_project_settings[collection] = sync_settings
+
+ if not sync_project_settings:
+ log.info("No enabled and configured projects for sync.")
+
+ self._sync_project_settings = sync_project_settings
+
+ def get_sync_project_setting(self, project_name):
+ """ Handles pulling sync_server's settings for enabled 'project_name'
+
+ Args:
+ project_name (str): used in project settings
+ Returns:
+ (dict): settings dictionary for the enabled project,
+ empty if no settings or sync is disabled
+ """
+ # presets set already, do not call again and again
+ # self.log.debug("project preset {}".format(self.presets))
+ if self.sync_project_settings and \
+ self.sync_project_settings.get(project_name):
+ return self.sync_project_settings.get(project_name)
+
+ settings = get_project_settings(project_name)
+ return self._parse_sync_settings_from_settings(settings)
+
+ def _parse_sync_settings_from_settings(self, settings):
+ """ settings from api.get_project_settings, TOOD rename """
+ sync_settings = settings.get("global").get("sync_server")
+ if not sync_settings:
+ log.info("No project setting not syncing.")
+ return {}
+ if sync_settings.get("enabled"):
+ return sync_settings
+
+ return {}
+
+ def _get_default_site_configs(self):
+ """
+ Returns skeleton settings for 'studio' and user's local site
+ """
+ default_config = {'provider': 'local_drive'}
+ all_sites = {self.DEFAULT_SITE: default_config,
+ get_local_site_id(): default_config}
+ return all_sites
+
+ def get_provider_for_site(self, project_name, site):
+ """
+ Return provider name for site.
+ """
+ site_preset = self.get_sync_project_setting(project_name)["sites"].\
+ get(site)
+ if site_preset:
+ return site_preset["provider"]
+
+ return "NA"
+
+ @time_function
+ def get_sync_representations(self, collection, active_site, remote_site):
+ """
+ Get representations that should be synced, these could be
+ recognised by presence of document in 'files.sites', where key is
+ a provider (GDrive, S3) and value is empty document or document
+ without 'created_dt' field. (Don't put null to 'created_dt'!).
+
+ Querying of 'to-be-synched' files is offloaded to Mongod for
+ better performance. Goal is to get as few representations as
+ possible.
+ Args:
+ collection (string): name of collection (in most cases matches
+ project name
+ active_site (string): identifier of current active site (could be
+ 'local_0' when working from home, 'studio' when working in the
+ studio (default)
+ remote_site (string): identifier of remote site I want to sync to
+
+ Returns:
+ (list) of dictionaries
+ """
+ log.debug("Check representations for : {}".format(collection))
+ self.connection.Session["AVALON_PROJECT"] = collection
+ # retry_cnt - number of attempts to sync specific file before giving up
+ retries_arr = self._get_retries_arr(collection)
+ query = {
+ "type": "representation",
+ "$or": [
+ {"$and": [
+ {
+ "files.sites": {
+ "$elemMatch": {
+ "name": active_site,
+ "created_dt": {"$exists": True}
+ }
+ }}, {
+ "files.sites": {
+ "$elemMatch": {
+ "name": {"$in": [remote_site]},
+ "created_dt": {"$exists": False},
+ "tries": {"$in": retries_arr}
+ }
+ }
+ }]},
+ {"$and": [
+ {
+ "files.sites": {
+ "$elemMatch": {
+ "name": active_site,
+ "created_dt": {"$exists": False},
+ "tries": {"$in": retries_arr}
+ }
+ }}, {
+ "files.sites": {
+ "$elemMatch": {
+ "name": {"$in": [remote_site]},
+ "created_dt": {"$exists": True}
+ }
+ }
+ }
+ ]}
+ ]
+ }
+ log.debug("active_site:{} - remote_site:{}".format(active_site,
+ remote_site))
+ log.debug("query: {}".format(query))
+ representations = self.connection.find(query)
+
+ return representations
+
+ def check_status(self, file, local_site, remote_site, config_preset):
+ """
+ Check synchronization status for single 'file' of single
+ 'representation' by single 'provider'.
+ (Eg. check if 'scene.ma' of lookdev.v10 should be synced to GDrive
+
+ Always is comparing local record, eg. site with
+ 'name' == self.presets[PROJECT_NAME]['config']["active_site"]
+
+ Args:
+ file (dictionary): of file from representation in Mongo
+ local_site (string): - local side of compare (usually 'studio')
+ remote_site (string): - gdrive etc.
+ config_preset (dict): config about active site, retries
+ Returns:
+ (string) - one of SyncStatus
+ """
+ sites = file.get("sites") or []
+ # if isinstance(sites, list): # temporary, old format of 'sites'
+ # return SyncStatus.DO_NOTHING
+ _, remote_rec = self._get_site_rec(sites, remote_site) or {}
+ if remote_rec: # sync remote target
+ created_dt = remote_rec.get("created_dt")
+ if not created_dt:
+ tries = self._get_tries_count_from_rec(remote_rec)
+ # file will be skipped if unsuccessfully tried over threshold
+ # error metadata needs to be purged manually in DB to reset
+ if tries < int(config_preset["retry_cnt"]):
+ return SyncStatus.DO_UPLOAD
+ else:
+ _, local_rec = self._get_site_rec(sites, local_site) or {}
+ if not local_rec or not local_rec.get("created_dt"):
+ tries = self._get_tries_count_from_rec(local_rec)
+ # file will be skipped if unsuccessfully tried over
+ # threshold times, error metadata needs to be purged
+ # manually in DB to reset
+ if tries < int(config_preset["retry_cnt"]):
+ return SyncStatus.DO_DOWNLOAD
+
+ return SyncStatus.DO_NOTHING
+
+ def update_db(self, collection, new_file_id, file, representation,
+ site, error=None, progress=None):
+ """
+ Update 'provider' portion of records in DB with success (file_id)
+ or error (exception)
+
+ Args:
+ collection (string): name of project - force to db connection as
+ each file might come from different collection
+ new_file_id (string):
+ file (dictionary): info about processed file (pulled from DB)
+ representation (dictionary): parent repr of file (from DB)
+ site (string): label ('gdrive', 'S3')
+ error (string): exception message
+ progress (float): 0-1 of progress of upload/download
+
+ Returns:
+ None
+ """
+ representation_id = representation.get("_id")
+ file_id = file.get("_id")
+ query = {
+ "_id": representation_id
+ }
+
+ update = {}
+ if new_file_id:
+ update["$set"] = self._get_success_dict(new_file_id)
+ # reset previous errors if any
+ update["$unset"] = self._get_error_dict("", "", "")
+ elif progress is not None:
+ update["$set"] = self._get_progress_dict(progress)
+ else:
+ tries = self._get_tries_count(file, site)
+ tries += 1
+
+ update["$set"] = self._get_error_dict(error, tries)
+
+ arr_filter = [
+ {'s.name': site},
+ {'f._id': ObjectId(file_id)}
+ ]
+
+ self.connection.database[collection].update_one(
+ query,
+ update,
+ upsert=True,
+ array_filters=arr_filter
+ )
+
+ if progress is not None:
+ return
+
+ status = 'failed'
+ error_str = 'with error {}'.format(error)
+ if new_file_id:
+ status = 'succeeded with id {}'.format(new_file_id)
+ error_str = ''
+
+ source_file = file.get("path", "")
+ log.debug("File for {} - {source_file} process {status} {error_str}".
+ format(representation_id,
+ status=status,
+ source_file=source_file,
+ error_str=error_str))
+
+ def _get_file_info(self, files, _id):
+ """
+ Return record from list of records which name matches to 'provider'
+ Could be possibly refactored with '_get_provider_rec' together.
+
+ Args:
+ files (list): of dictionaries with info about published files
+ _id (string): _id of specific file
+
+ Returns:
+ (int, dictionary): index from list and record with metadata
+ about site (if/when created, errors..)
+ OR (-1, None) if not present
+ """
+ for index, rec in enumerate(files):
+ if rec.get("_id") == _id:
+ return index, rec
+
+ return -1, None
+
+ def _get_site_rec(self, sites, site_name):
+ """
+ Return record from list of records which name matches to
+ 'remote_site_name'
+
+ Args:
+ sites (list): of dictionaries
+ site_name (string): 'local_XXX', 'gdrive'
+
+ Returns:
+ (int, dictionary): index from list and record with metadata
+ about site (if/when created, errors..)
+ OR (-1, None) if not present
+ """
+ for index, rec in enumerate(sites):
+ if rec.get("name") == site_name:
+ return index, rec
+
+ return -1, None
+
+ def reset_provider_for_file(self, collection, representation_id,
+ side=None, file_id=None, site_name=None,
+ remove=False, pause=None, force=False):
+ """
+ Reset information about synchronization for particular 'file_id'
+ and provider.
+ Useful for testing or forcing file to be reuploaded.
+
+ 'side' and 'site_name' are disjunctive.
+
+ 'side' is used for resetting local or remote side for
+ current user for repre.
+
+ 'site_name' is used to set synchronization for particular site.
+ Should be used when repre should be synced to new site.
+
+ Args:
+ collection (string): name of project (eg. collection) in DB
+ representation_id(string): _id of representation
+ file_id (string): file _id in representation
+ side (string): local or remote side
+ site_name (string): for adding new site
+ remove (bool): if True remove site altogether
+ pause (bool or None): if True - pause, False - unpause
+ force (bool): hard reset - currently only for add_site
+
+ Returns:
+ throws ValueError
+ """
+ query = {
+ "_id": ObjectId(representation_id)
+ }
+
+ representation = list(self.connection.database[collection].find(query))
+ if not representation:
+ raise ValueError("Representation {} not found in {}".
+ format(representation_id, collection))
+ if side and site_name:
+ raise ValueError("Misconfiguration, only one of side and " +
+ "site_name arguments should be passed.")
+
+ local_site = self.get_active_site(collection)
+ remote_site = self.get_remote_site(collection)
+
+ if side:
+ if side == 'local':
+ site_name = local_site
+ else:
+ site_name = remote_site
+
+ elem = {"name": site_name}
+
+ if file_id: # reset site for particular file
+ self._reset_site_for_file(collection, query,
+ elem, file_id, site_name)
+ elif side: # reset site for whole representation
+ self._reset_site(collection, query, elem, site_name)
+ elif remove: # remove site for whole representation
+ self._remove_site(collection, query, representation, site_name)
+ elif pause is not None:
+ self._pause_unpause_site(collection, query,
+ representation, site_name, pause)
+ else: # add new site to all files for representation
+ self._add_site(collection, query, representation, elem, site_name,
+ force)
+
+ def _update_site(self, collection, query, update, arr_filter):
+ """
+ Auxiliary method to call update_one function on DB
+
+ Used for refactoring ugly reset_provider_for_file
+ """
+ self.connection.database[collection].update_one(
+ query,
+ update,
+ upsert=True,
+ array_filters=arr_filter
+ )
+
+ def _reset_site_for_file(self, collection, query,
+ elem, file_id, site_name):
+ """
+ Resets 'site_name' for 'file_id' on representation in 'query' on
+ 'collection'
+ """
+ update = {
+ "$set": {"files.$[f].sites.$[s]": elem}
+ }
+ arr_filter = [
+ {'s.name': site_name},
+ {'f._id': ObjectId(file_id)}
+ ]
+
+ self._update_site(collection, query, update, arr_filter)
+
+ def _reset_site(self, collection, query, elem, site_name):
+ """
+ Resets 'site_name' for all files of representation in 'query'
+ """
+ update = {
+ "$set": {"files.$[].sites.$[s]": elem}
+ }
+
+ arr_filter = [
+ {'s.name': site_name}
+ ]
+
+ self._update_site(collection, query, update, arr_filter)
+
+ def _remove_site(self, collection, query, representation, site_name):
+ """
+ Removes 'site_name' for 'representation' in 'query'
+
+ Throws ValueError if 'site_name' not found on 'representation'
+ """
+ found = False
+ for repre_file in representation.pop().get("files"):
+ for site in repre_file.get("sites"):
+ if site["name"] == site_name:
+ found = True
+ break
+ if not found:
+ msg = "Site {} not found".format(site_name)
+ log.info(msg)
+ raise ValueError(msg)
+
+ update = {
+ "$pull": {"files.$[].sites": {"name": site_name}}
+ }
+ arr_filter = []
+
+ self._update_site(collection, query, update, arr_filter)
+
+ def _pause_unpause_site(self, collection, query,
+ representation, site_name, pause):
+ """
+ Pauses/unpauses all files for 'representation' based on 'pause'
+
+ Throws ValueError if 'site_name' not found on 'representation'
+ """
+ found = False
+ site = None
+ for repre_file in representation.pop().get("files"):
+ for site in repre_file.get("sites"):
+ if site["name"] == site_name:
+ found = True
+ break
+ if not found:
+ msg = "Site {} not found".format(site_name)
+ log.info(msg)
+ raise ValueError(msg)
+
+ if pause:
+ site['paused'] = pause
+ else:
+ if site.get('paused'):
+ site.pop('paused')
+
+ update = {
+ "$set": {"files.$[].sites.$[s]": site}
+ }
+
+ arr_filter = [
+ {'s.name': site_name}
+ ]
+
+ self._update_site(collection, query, update, arr_filter)
+
+ def _add_site(self, collection, query, representation, elem, site_name,
+ force=False):
+ """
+ Adds 'site_name' to 'representation' on 'collection'
+
+ Use 'force' to remove existing or raises ValueError
+ """
+ for repre_file in representation.pop().get("files"):
+ for site in repre_file.get("sites"):
+ if site["name"] == site_name:
+ if force:
+ self._reset_site_for_file(collection, query,
+ elem, repre_file["_id"],
+ site_name)
+ return
+ else:
+ msg = "Site {} already present".format(site_name)
+ log.info(msg)
+ raise ValueError(msg)
+
+ update = {
+ "$push": {"files.$[].sites": elem}
+ }
+
+ arr_filter = []
+
+ self._update_site(collection, query, update, arr_filter)
+
+ def _remove_local_file(self, collection, representation_id, site_name):
+ """
+ Removes all local files for 'site_name' of 'representation_id'
+
+ Args:
+ collection (string): project name (must match DB)
+ representation_id (string): MongoDB _id value
+ site_name (string): name of configured and active site
+
+ Returns:
+ only logs, catches IndexError and OSError
+ """
+ my_local_site = get_local_site_id()
+ if my_local_site != site_name:
+ self.log.warning("Cannot remove non local file for {}".
+ format(site_name))
+ return
+
+ provider_name = self.get_provider_for_site(collection, site_name)
+
+ if provider_name == 'local_drive':
+ query = {
+ "_id": ObjectId(representation_id)
+ }
+
+ representation = list(
+ self.connection.database[collection].find(query))
+ if not representation:
+ self.log.debug("No repre {} found".format(
+ representation_id))
+ return
+
+ representation = representation.pop()
+ local_file_path = ''
+ for file in representation.get("files"):
+ local_file_path = self.get_local_file_path(collection,
+ site_name,
+ file.get("path", "")
+ )
+ try:
+ self.log.debug("Removing {}".format(local_file_path))
+ os.remove(local_file_path)
+ except IndexError:
+ msg = "No file set for {}".format(representation_id)
+ self.log.debug(msg)
+ raise ValueError(msg)
+ except OSError:
+ msg = "File {} cannot be removed".format(file["path"])
+ self.log.warning(msg)
+ raise ValueError(msg)
+
+ folder = None
+ try:
+ folder = os.path.dirname(local_file_path)
+ os.rmdir(folder)
+ except OSError:
+ msg = "folder {} cannot be removed".format(folder)
+ self.log.warning(msg)
+ raise ValueError(msg)
+
+ def get_loop_delay(self, project_name):
+ """
+ Return count of seconds before next synchronization loop starts
+ after finish of previous loop.
+ Returns:
+ (int): in seconds
+ """
+ ld = self.sync_project_settings[project_name]["config"]["loop_delay"]
+ return int(ld)
+
+ def show_widget(self):
+ """Show dialog to enter credentials"""
+ self.widget.show()
+
+ def _get_success_dict(self, new_file_id):
+ """
+ Provide success metadata ("id", "created_dt") to be stored in Db.
+ Used in $set: "DICT" part of query.
+ Sites are array inside of array(file), so real indexes for both
+ file and site are needed for upgrade in DB.
+ Args:
+ new_file_id: id of created file
+ Returns:
+ (dictionary)
+ """
+ val = {"files.$[f].sites.$[s].id": new_file_id,
+ "files.$[f].sites.$[s].created_dt": datetime.now()}
+ return val
+
+ def _get_error_dict(self, error="", tries="", progress=""):
+ """
+ Provide error metadata to be stored in Db.
+ Used for set (error and tries provided) or unset mode.
+ Args:
+ error: (string) - message
+ tries: how many times failed
+ Returns:
+ (dictionary)
+ """
+ val = {"files.$[f].sites.$[s].last_failed_dt": datetime.now(),
+ "files.$[f].sites.$[s].error": error,
+ "files.$[f].sites.$[s].tries": tries,
+ "files.$[f].sites.$[s].progress": progress
+ }
+ return val
+
+ def _get_tries_count_from_rec(self, rec):
+ """
+ Get number of failed attempts to sync from site record
+ Args:
+ rec (dictionary): info about specific site record
+ Returns:
+ (int) - number of failed attempts
+ """
+ if not rec:
+ return 0
+ return rec.get("tries", 0)
+
+ def _get_tries_count(self, file, provider):
+ """
+ Get number of failed attempts to sync
+ Args:
+ file (dictionary): info about specific file
+ provider (string): name of site ('gdrive' or specific user site)
+ Returns:
+ (int) - number of failed attempts
+ """
+ _, rec = self._get_site_rec(file.get("sites", []), provider)
+ return rec.get("tries", 0)
+
+ def _get_progress_dict(self, progress):
+ """
+ Provide progress metadata to be stored in Db.
+ Used during upload/download for GUI to show.
+ Args:
+ progress: (float) - 0-1 progress of upload/download
+ Returns:
+ (dictionary)
+ """
+ val = {"files.$[f].sites.$[s].progress": progress}
+ return val
+
+ def _get_retries_arr(self, project_name):
+ """
+ Returns array with allowed values in 'tries' field. If repre
+ contains these values, it means it was tried to be synchronized
+ but failed. We try up to 'self.presets["retry_cnt"]' times before
+ giving up and skipping representation.
+ Returns:
+ (list)
+ """
+ retry_cnt = self.sync_project_settings[project_name].\
+ get("config")["retry_cnt"]
+ arr = [i for i in range(int(retry_cnt))]
+ arr.append(None)
+
+ return arr
+
+ def _get_roots_config(self, presets, project_name, site_name):
+ """
+ Returns configured root(s) for 'project_name' and 'site_name' from
+ settings ('presets')
+ """
+ return presets[project_name]['sites'][site_name]['root']
diff --git a/openpype/modules/sync_server/tray/app.py b/openpype/modules/sync_server/tray/app.py
index 476e9d16e8..25fbf0e49a 100644
--- a/openpype/modules/sync_server/tray/app.py
+++ b/openpype/modules/sync_server/tray/app.py
@@ -1,35 +1,17 @@
from Qt import QtWidgets, QtCore, QtGui
-from Qt.QtCore import Qt
-import attr
-import os
-import sys
-import subprocess
-
-from openpype.tools.settings import (
- ProjectListWidget,
- style
-)
-
-from avalon.tools.delegates import PrettyTimeDelegate, pretty_timestamp
-from bson.objectid import ObjectId
+from openpype.tools.settings import style
from openpype.lib import PypeLogger
-from openpype.api import get_local_site_id
+from openpype import resources
+
+from openpype.modules.sync_server.tray.widgets import (
+ SyncProjectListWidget,
+ SyncRepresentationWidget
+)
log = PypeLogger().get_logger("SyncServer")
-STATUS = {
- 0: 'In Progress',
- 1: 'Failed',
- 2: 'Queued',
- 3: 'Paused',
- 4: 'Synced OK',
- -1: 'Not available'
-}
-
-DUMMY_PROJECT = "No project configured"
-
class SyncServerWindow(QtWidgets.QDialog):
"""
@@ -44,8 +26,8 @@ class SyncServerWindow(QtWidgets.QDialog):
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setStyleSheet(style.load_stylesheet())
- self.setWindowIcon(QtGui.QIcon(style.app_icon_path()))
- self.resize(1400, 800)
+ self.setWindowIcon(QtGui.QIcon(resources.pype_icon_filepath()))
+ self.resize(1450, 700)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self._hide_message)
@@ -134,1912 +116,3 @@ class SyncServerWindow(QtWidgets.QDialog):
"""
self.message.setText("")
self.message.hide()
-
-
-class SyncProjectListWidget(ProjectListWidget):
- """
- Lists all projects that are synchronized to choose from
- """
-
- def __init__(self, sync_server, parent):
- super(SyncProjectListWidget, self).__init__(parent)
- self.sync_server = sync_server
- self.project_list.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
- self.project_list.customContextMenuRequested.connect(
- self._on_context_menu)
- self.project_name = None
- self.local_site = None
- self.icons = {}
-
- def validate_context_change(self):
- return True
-
- def refresh(self):
- model = self.project_list.model()
- model.clear()
-
- project_name = None
- for project_name in self.sync_server.get_sync_project_settings().\
- keys():
- if self.sync_server.is_paused() or \
- self.sync_server.is_project_paused(project_name):
- icon = self._get_icon("paused")
- else:
- icon = self._get_icon("synced")
-
- model.appendRow(QtGui.QStandardItem(icon, project_name))
-
- if len(self.sync_server.get_sync_project_settings().keys()) == 0:
- model.appendRow(QtGui.QStandardItem(DUMMY_PROJECT))
-
- self.current_project = self.project_list.currentIndex().data(
- QtCore.Qt.DisplayRole
- )
- if not self.current_project:
- self.current_project = self.project_list.model().item(0). \
- data(QtCore.Qt.DisplayRole)
-
- if project_name:
- self.local_site = self.sync_server.get_active_site(project_name)
-
- def _get_icon(self, status):
- if not self.icons.get(status):
- resource_path = os.path.dirname(__file__)
- resource_path = os.path.join(resource_path, "..",
- "resources")
- pix_url = "{}/{}.png".format(resource_path, status)
- icon = QtGui.QIcon(pix_url)
- self.icons[status] = icon
- else:
- icon = self.icons[status]
- return icon
-
- def _on_context_menu(self, point):
- point_index = self.project_list.indexAt(point)
- if not point_index.isValid():
- return
-
- self.project_name = point_index.data(QtCore.Qt.DisplayRole)
-
- menu = QtWidgets.QMenu()
- actions_mapping = {}
-
- if self.sync_server.is_project_paused(self.project_name):
- action = QtWidgets.QAction("Unpause")
- actions_mapping[action] = self._unpause
- else:
- action = QtWidgets.QAction("Pause")
- actions_mapping[action] = self._pause
- menu.addAction(action)
-
- if self.local_site == get_local_site_id():
- action = QtWidgets.QAction("Clear local project")
- actions_mapping[action] = self._clear_project
- menu.addAction(action)
-
- result = menu.exec_(QtGui.QCursor.pos())
- if result:
- to_run = actions_mapping[result]
- if to_run:
- to_run()
-
- def _pause(self):
- if self.project_name:
- self.sync_server.pause_project(self.project_name)
- self.project_name = None
- self.refresh()
-
- def _unpause(self):
- if self.project_name:
- self.sync_server.unpause_project(self.project_name)
- self.project_name = None
- self.refresh()
-
- def _clear_project(self):
- if self.project_name:
- self.sync_server.clear_project(self.project_name, self.local_site)
- self.project_name = None
- self.refresh()
-
-
-class ProjectModel(QtCore.QAbstractListModel):
- def __init__(self, *args, projects=None, **kwargs):
- super(ProjectModel, self).__init__(*args, **kwargs)
- self.projects = projects or []
-
- def data(self, index, role):
- if role == Qt.DisplayRole:
- # See below for the data structure.
- status, text = self.projects[index.row()]
- # Return the todo text only.
- return text
-
- def rowCount(self, index):
- return len(self.todos)
-
-
-class SyncRepresentationWidget(QtWidgets.QWidget):
- """
- Summary dialog with list of representations that matches current
- settings 'local_site' and 'remote_site'.
- """
- active_changed = QtCore.Signal() # active index changed
- message_generated = QtCore.Signal(str)
-
- default_widths = (
- ("asset", 210),
- ("subset", 190),
- ("version", 10),
- ("representation", 90),
- ("created_dt", 100),
- ("sync_dt", 100),
- ("local_site", 60),
- ("remote_site", 70),
- ("files_count", 70),
- ("files_size", 70),
- ("priority", 20),
- ("state", 50)
- )
-
- def __init__(self, sync_server, project=None, parent=None):
- super(SyncRepresentationWidget, self).__init__(parent)
-
- self.sync_server = sync_server
-
- self._selected_id = None # keep last selected _id
- self.representation_id = None
- self.site_name = None # to pause/unpause representation
-
- self.filter = QtWidgets.QLineEdit()
- self.filter.setPlaceholderText("Filter representations..")
-
- top_bar_layout = QtWidgets.QHBoxLayout()
- top_bar_layout.addWidget(self.filter)
-
- self.table_view = QtWidgets.QTableView()
- headers = [item[0] for item in self.default_widths]
-
- model = SyncRepresentationModel(sync_server, headers, project)
- self.table_view.setModel(model)
- self.table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
- self.table_view.setSelectionMode(
- QtWidgets.QAbstractItemView.SingleSelection)
- self.table_view.setSelectionBehavior(
- QtWidgets.QAbstractItemView.SelectRows)
- self.table_view.horizontalHeader().setSortIndicator(
- -1, Qt.AscendingOrder)
- self.table_view.setSortingEnabled(True)
- self.table_view.setAlternatingRowColors(True)
- self.table_view.verticalHeader().hide()
-
- time_delegate = PrettyTimeDelegate(self)
- column = self.table_view.model().get_header_index("created_dt")
- self.table_view.setItemDelegateForColumn(column, time_delegate)
- column = self.table_view.model().get_header_index("sync_dt")
- self.table_view.setItemDelegateForColumn(column, time_delegate)
-
- column = self.table_view.model().get_header_index("local_site")
- delegate = ImageDelegate(self)
- self.table_view.setItemDelegateForColumn(column, delegate)
-
- column = self.table_view.model().get_header_index("remote_site")
- delegate = ImageDelegate(self)
- self.table_view.setItemDelegateForColumn(column, delegate)
-
- column = self.table_view.model().get_header_index("files_size")
- delegate = SizeDelegate(self)
- self.table_view.setItemDelegateForColumn(column, delegate)
-
- for column_name, width in self.default_widths:
- idx = model.get_header_index(column_name)
- self.table_view.setColumnWidth(idx, width)
-
- layout = QtWidgets.QVBoxLayout(self)
- layout.setContentsMargins(0, 0, 0, 0)
- layout.addLayout(top_bar_layout)
- layout.addWidget(self.table_view)
-
- self.table_view.doubleClicked.connect(self._double_clicked)
- self.filter.textChanged.connect(lambda: model.set_filter(
- self.filter.text()))
- self.table_view.customContextMenuRequested.connect(
- self._on_context_menu)
-
- self.table_view.model().modelReset.connect(self._set_selection)
-
- self.selection_model = self.table_view.selectionModel()
- self.selection_model.selectionChanged.connect(self._selection_changed)
-
- def _selection_changed(self, new_selection):
- index = self.selection_model.currentIndex()
- self._selected_id = \
- self.table_view.model().data(index, Qt.UserRole)
-
- def _set_selection(self):
- """
- Sets selection to 'self._selected_id' if exists.
-
- Keep selection during model refresh.
- """
- if self._selected_id:
- index = self.table_view.model().get_index(self._selected_id)
- if index and index.isValid():
- mode = QtCore.QItemSelectionModel.Select | \
- QtCore.QItemSelectionModel.Rows
- self.selection_model.setCurrentIndex(index, mode)
- else:
- self._selected_id = None
-
- def _double_clicked(self, index):
- """
- Opens representation dialog with all files after doubleclick
- """
- _id = self.table_view.model().data(index, Qt.UserRole)
- detail_window = SyncServerDetailWindow(
- self.sync_server, _id, self.table_view.model()._project)
- detail_window.exec()
-
- def _on_context_menu(self, point):
- """
- Shows menu with loader actions on Right-click.
- """
- point_index = self.table_view.indexAt(point)
- if not point_index.isValid():
- return
-
- self.item = self.table_view.model()._data[point_index.row()]
- self.representation_id = self.item._id
- log.debug("menu representation _id:: {}".
- format(self.representation_id))
-
- menu = QtWidgets.QMenu()
- actions_mapping = {}
-
- action = QtWidgets.QAction("Open in explorer")
- actions_mapping[action] = self._open_in_explorer
- menu.addAction(action)
-
- local_site, local_progress = self.item.local_site.split()
- remote_site, remote_progress = self.item.remote_site.split()
- local_progress = float(local_progress)
- remote_progress = float(remote_progress)
-
- # progress smaller then 1.0 --> in progress or queued
- if local_progress < 1.0:
- self.site_name = local_site
- else:
- self.site_name = remote_site
-
- if self.item.state in [STATUS[0], STATUS[2]]:
- action = QtWidgets.QAction("Pause")
- actions_mapping[action] = self._pause
- menu.addAction(action)
-
- if self.item.state == STATUS[3]:
- action = QtWidgets.QAction("Unpause")
- actions_mapping[action] = self._unpause
- menu.addAction(action)
-
- # if self.item.state == STATUS[1]:
- # action = QtWidgets.QAction("Open error detail")
- # actions_mapping[action] = self._show_detail
- # menu.addAction(action)
-
- if remote_progress == 1.0:
- action = QtWidgets.QAction("Reset local site")
- actions_mapping[action] = self._reset_local_site
- menu.addAction(action)
-
- if local_progress == 1.0:
- action = QtWidgets.QAction("Reset remote site")
- actions_mapping[action] = self._reset_remote_site
- menu.addAction(action)
-
- if local_site != self.sync_server.DEFAULT_SITE:
- action = QtWidgets.QAction("Completely remove from local")
- actions_mapping[action] = self._remove_site
- menu.addAction(action)
- else:
- action = QtWidgets.QAction("Mark for sync to local")
- actions_mapping[action] = self._add_site
- menu.addAction(action)
-
- if not actions_mapping:
- action = QtWidgets.QAction("< No action >")
- actions_mapping[action] = None
- menu.addAction(action)
-
- result = menu.exec_(QtGui.QCursor.pos())
- if result:
- to_run = actions_mapping[result]
- if to_run:
- to_run()
-
- self.table_view.model().refresh()
-
- def _pause(self):
- self.sync_server.pause_representation(self.table_view.model()._project,
- self.representation_id,
- self.site_name)
- self.site_name = None
- self.message_generated.emit("Paused {}".format(self.representation_id))
-
- def _unpause(self):
- self.sync_server.unpause_representation(
- self.table_view.model()._project,
- self.representation_id,
- self.site_name)
- self.site_name = None
- self.message_generated.emit("Unpaused {}".format(
- self.representation_id))
-
- # temporary here for testing, will be removed TODO
- def _add_site(self):
- log.info(self.representation_id)
- project_name = self.table_view.model()._project
- local_site_name = self.sync_server.get_my_local_site()
- try:
- self.sync_server.add_site(
- project_name,
- self.representation_id,
- local_site_name
- )
- self.message_generated.emit(
- "Site {} added for {}".format(local_site_name,
- self.representation_id))
- except ValueError as exp:
- self.message_generated.emit("Error {}".format(str(exp)))
-
- def _remove_site(self):
- """
- Removes site record AND files.
-
- This is ONLY for representations stored on local site, which
- cannot be same as SyncServer.DEFAULT_SITE.
-
- This could only happen when artist work on local machine, not
- connected to studio mounted drives.
- """
- log.info("Removing {}".format(self.representation_id))
- try:
- local_site = get_local_site_id()
- self.sync_server.remove_site(
- self.table_view.model()._project,
- self.representation_id,
- local_site,
- True
- )
- self.message_generated.emit("Site {} removed".format(local_site))
- except ValueError as exp:
- self.message_generated.emit("Error {}".format(str(exp)))
-
- def _reset_local_site(self):
- """
- Removes errors or success metadata for particular file >> forces
- redo of upload/download
- """
- self.sync_server.reset_provider_for_file(
- self.table_view.model()._project,
- self.representation_id,
- 'local'
- )
-
- def _reset_remote_site(self):
- """
- Removes errors or success metadata for particular file >> forces
- redo of upload/download
- """
- self.sync_server.reset_provider_for_file(
- self.table_view.model()._project,
- self.representation_id,
- 'remote'
- )
-
- def _open_in_explorer(self):
- if not self.item:
- return
-
- fpath = self.item.path
- project = self.table_view.model()._project
- fpath = self.sync_server.get_local_file_path(project, fpath)
-
- fpath = os.path.normpath(os.path.dirname(fpath))
- if os.path.isdir(fpath):
- if 'win' in sys.platform: # windows
- subprocess.Popen('explorer "%s"' % fpath)
- elif sys.platform == 'darwin': # macOS
- subprocess.Popen(['open', fpath])
- else: # linux
- try:
- subprocess.Popen(['xdg-open', fpath])
- except OSError:
- raise OSError('unsupported xdg-open call??')
-
-
-class SyncRepresentationModel(QtCore.QAbstractTableModel):
- """
- Model for summary of representations.
-
- Groups files information per representation. Allows sorting and
- full text filtering.
-
- Allows pagination, most of heavy lifting is being done on DB side.
- Single model matches to single collection. When project is changed,
- model is reset and refreshed.
-
- Args:
- sync_server (SyncServer) - object to call server operations (update
- db status, set site status...)
- header (list) - names of visible columns
- project (string) - collection name, all queries must be called on
- a specific collection
-
- """
- PAGE_SIZE = 20 # default page size to query for
- REFRESH_SEC = 5000 # in seconds, requery DB for new status
- DEFAULT_SORT = {
- "updated_dt_remote": -1,
- "_id": 1
- }
- SORT_BY_COLUMN = [
- "context.asset", # asset
- "context.subset", # subset
- "context.version", # version
- "context.representation", # representation
- "updated_dt_local", # local created_dt
- "updated_dt_remote", # remote created_dt
- "avg_progress_local", # local progress
- "avg_progress_remote", # remote progress
- "files_count", # count of files
- "files_size", # file size of all files
- "context.asset", # priority TODO
- "status" # state
- ]
-
- @attr.s
- class SyncRepresentation:
- """
- Auxiliary object for easier handling.
-
- Fields must contain all header values (+ any arbitrary values).
- """
- _id = attr.ib()
- asset = attr.ib()
- subset = attr.ib()
- version = attr.ib()
- representation = attr.ib()
- created_dt = attr.ib(default=None)
- sync_dt = attr.ib(default=None)
- local_site = attr.ib(default=None)
- remote_site = attr.ib(default=None)
- files_count = attr.ib(default=None)
- files_size = attr.ib(default=None)
- priority = attr.ib(default=None)
- state = attr.ib(default=None)
- path = attr.ib(default=None)
-
- def __init__(self, sync_server, header, project=None):
- super(SyncRepresentationModel, self).__init__()
- self._header = header
- self._data = []
- self._project = project
- self._rec_loaded = 0
- self._total_records = 0 # how many documents query actually found
- self.filter = None
-
- self._initialized = False
- if not self._project or self._project == DUMMY_PROJECT:
- return
-
- self.sync_server = sync_server
- # TODO think about admin mode
- # this is for regular user, always only single local and single remote
- self.local_site = self.sync_server.get_active_site(self._project)
- self.remote_site = self.sync_server.get_remote_site(self._project)
-
- self.projection = self.get_default_projection()
-
- self.sort = self.DEFAULT_SORT
-
- self.query = self.get_default_query()
- self.default_query = list(self.get_default_query())
-
- representations = self.dbcon.aggregate(self.query)
- self.refresh(representations)
-
- self.timer = QtCore.QTimer()
- self.timer.timeout.connect(self.tick)
- self.timer.start(self.REFRESH_SEC)
-
- @property
- def dbcon(self):
- """
- Database object with preselected project (collection) to run DB
- operations (find, aggregate).
-
- All queries should go through this (because of collection).
- """
- return self.sync_server.connection.database[self._project]
-
- def data(self, index, role):
- item = self._data[index.row()]
-
- if role == Qt.DisplayRole:
- return attr.asdict(item)[self._header[index.column()]]
- if role == Qt.UserRole:
- return item._id
-
- def rowCount(self, index):
- return len(self._data)
-
- def columnCount(self, index):
- return len(self._header)
-
- def headerData(self, section, orientation, role):
- if role == Qt.DisplayRole:
- if orientation == Qt.Horizontal:
- return str(self._header[section])
-
- def tick(self):
- """
- Triggers refresh of model.
-
- Because of pagination, prepared (sorting, filtering) query needs
- to be run on DB every X seconds.
- """
- self.refresh(representations=None, load_records=self._rec_loaded)
- self.timer.start(self.REFRESH_SEC)
-
- def get_header_index(self, value):
- """
- Returns index of 'value' in headers
-
- Args:
- value (str): header name value
- Returns:
- (int)
- """
- return self._header.index(value)
-
- def refresh(self, representations=None, load_records=0):
- """
- Reloads representations from DB if necessary, adds them to model.
-
- Runs periodically (every X seconds) or by demand (change of
- sorting, filtering etc.)
-
- Emits 'modelReset' signal.
-
- Args:
- representations (PaginationResult object): pass result of
- aggregate query from outside - mostly for testing only
- load_records (int) - enforces how many records should be
- actually queried (scrolled a couple of times to list more
- than single page of records)
- """
- if self.sync_server.is_paused() or \
- self.sync_server.is_project_paused(self._project):
- return
-
- self.beginResetModel()
- self._data = []
- self._rec_loaded = 0
-
- if not representations:
- self.query = self.get_default_query(load_records)
- representations = self.dbcon.aggregate(self.query)
-
- self._add_page_records(self.local_site, self.remote_site,
- representations)
- self.endResetModel()
-
- def _add_page_records(self, local_site, remote_site, representations):
- """
- Process all records from 'representation' and add them to storage.
-
- Args:
- local_site (str): name of local site (mine)
- remote_site (str): name of cloud provider (theirs)
- representations (Mongo Cursor) - mimics result set, 1 object
- with paginatedResults array and totalCount array
- """
- result = representations.next()
- count = 0
- total_count = result.get("totalCount")
- if total_count:
- count = total_count.pop().get('count')
- self._total_records = count
-
- local_provider = _translate_provider_for_icon(self.sync_server,
- self._project,
- local_site)
- remote_provider = _translate_provider_for_icon(self.sync_server,
- self._project,
- remote_site)
-
- for repre in result.get("paginatedResults"):
- context = repre.get("context").pop()
- files = repre.get("files", [])
- if isinstance(files, dict): # aggregate returns dictionary
- files = [files]
-
- # representation without files doesnt concern us
- if not files:
- continue
-
- local_updated = remote_updated = None
- if repre.get('updated_dt_local'):
- local_updated = \
- repre.get('updated_dt_local').strftime("%Y%m%dT%H%M%SZ")
-
- if repre.get('updated_dt_remote'):
- remote_updated = \
- repre.get('updated_dt_remote').strftime("%Y%m%dT%H%M%SZ")
-
- avg_progress_remote = _convert_progress(
- repre.get('avg_progress_remote', '0'))
- avg_progress_local = _convert_progress(
- repre.get('avg_progress_local', '0'))
-
- if context.get("version"):
- version = "v{:0>3d}".format(context.get("version"))
- else:
- version = "hero"
-
- item = self.SyncRepresentation(
- repre.get("_id"),
- context.get("asset"),
- context.get("subset"),
- version,
- context.get("representation"),
- local_updated,
- remote_updated,
- '{} {}'.format(local_provider, avg_progress_local),
- '{} {}'.format(remote_provider, avg_progress_remote),
- repre.get("files_count", 1),
- repre.get("files_size", 0),
- 1,
- STATUS[repre.get("status", -1)],
- files[0].get('path')
- )
-
- self._data.append(item)
- self._rec_loaded += 1
-
- def canFetchMore(self, index):
- """
- Check if there are more records than currently loaded
- """
- # 'skip' might be suboptimal when representation hits 500k+
- return self._total_records > self._rec_loaded
-
- def fetchMore(self, index):
- """
- Add more record to model.
-
- Called when 'canFetchMore' returns true, which means there are
- more records in DB than loaded.
- """
- log.debug("fetchMore")
- items_to_fetch = min(self._total_records - self._rec_loaded,
- self.PAGE_SIZE)
- self.query = self.get_default_query(self._rec_loaded)
- representations = self.dbcon.aggregate(self.query)
- self.beginInsertRows(index,
- self._rec_loaded,
- self._rec_loaded + items_to_fetch - 1)
-
- self._add_page_records(self.local_site, self.remote_site,
- representations)
-
- self.endInsertRows()
-
- def sort(self, index, order):
- """
- Summary sort per representation.
-
- Sort is happening on a DB side, model is reset, db queried
- again.
-
- Args:
- index (int): column index
- order (int): 0|
- """
- # limit unwanted first re-sorting by view
- if index < 0:
- return
-
- self._rec_loaded = 0
- if order == 0:
- order = 1
- else:
- order = -1
-
- self.sort = {self.SORT_BY_COLUMN[index]: order, '_id': 1}
- self.query = self.get_default_query()
- # import json
- # log.debug(json.dumps(self.query, indent=4).replace('False', 'false').\
- # replace('True', 'true').replace('None', 'null'))
-
- representations = self.dbcon.aggregate(self.query)
- self.refresh(representations)
-
- def set_filter(self, filter):
- """
- Adds text value filtering
-
- Args:
- filter (str): string inputted by user
- """
- self.filter = filter
- self.refresh()
-
- def set_project(self, project):
- """
- Changes project, called after project selection is changed
-
- Args:
- project (str): name of project
- """
- self._project = project
- self.sync_server.set_sync_project_settings()
- self.local_site = self.sync_server.get_active_site(self._project)
- self.remote_site = self.sync_server.get_remote_site(self._project)
- self.refresh()
-
- def get_index(self, id):
- """
- Get index of 'id' value.
-
- Used for keeping selection after refresh.
-
- Args:
- id (str): MongoDB _id
- Returns:
- (QModelIndex)
- """
- for i in range(self.rowCount(None)):
- index = self.index(i, 0)
- value = self.data(index, Qt.UserRole)
- if value == id:
- return index
- return None
-
- def get_default_query(self, limit=0):
- """
- Returns basic aggregate query for main table.
-
- Main table provides summary information about representation,
- which could have multiple files. Details are accessible after
- double click on representation row.
- Columns:
- 'created_dt' - max of created or updated (when failed) per repr
- 'sync_dt' - same for remote side
- 'local_site' - progress of repr on local side, 1 = finished
- 'remote_site' - progress on remote side, calculates from files
- 'state' -
- 0 - in progress
- 1 - failed
- 2 - queued
- 3 - paused
- 4 - finished on both sides
-
- are calculated and must be calculated in DB because of
- pagination
-
- Args:
- limit (int): how many records should be returned, by default
- it 'PAGE_SIZE' for performance.
- Should be overridden by value of loaded records for refresh
- functionality (got more records by scrolling, refresh
- shouldn't reset that)
- """
- if limit == 0:
- limit = SyncRepresentationModel.PAGE_SIZE
-
- return [
- {"$match": self._get_match_part()},
- {'$unwind': '$files'},
- # merge potentially unwinded records back to single per repre
- {'$addFields': {
- 'order_remote': {
- '$filter': {'input': '$files.sites', 'as': 'p',
- 'cond': {'$eq': ['$$p.name', self.remote_site]}
- }},
- 'order_local': {
- '$filter': {'input': '$files.sites', 'as': 'p',
- 'cond': {'$eq': ['$$p.name', self.local_site]}
- }}
- }},
- {'$addFields': {
- # prepare progress per file, presence of 'created_dt' denotes
- # successfully finished load/download
- 'progress_remote': {'$first': {
- '$cond': [{'$size': "$order_remote.progress"},
- "$order_remote.progress",
- {'$cond': [
- {'$size': "$order_remote.created_dt"},
- [1],
- [0]
- ]}
- ]}},
- 'progress_local': {'$first': {
- '$cond': [{'$size': "$order_local.progress"},
- "$order_local.progress",
- {'$cond': [
- {'$size': "$order_local.created_dt"},
- [1],
- [0]
- ]}
- ]}},
- # file might be successfully created or failed, not both
- 'updated_dt_remote': {'$first': {
- '$cond': [{'$size': "$order_remote.created_dt"},
- "$order_remote.created_dt",
- {'$cond': [
- {'$size': "$order_remote.last_failed_dt"},
- "$order_remote.last_failed_dt",
- []
- ]}
- ]}},
- 'updated_dt_local': {'$first': {
- '$cond': [{'$size': "$order_local.created_dt"},
- "$order_local.created_dt",
- {'$cond': [
- {'$size': "$order_local.last_failed_dt"},
- "$order_local.last_failed_dt",
- []
- ]}
- ]}},
- 'files_size': {'$ifNull': ["$files.size", 0]},
- 'failed_remote': {
- '$cond': [{'$size': "$order_remote.last_failed_dt"},
- 1,
- 0]},
- 'failed_local': {
- '$cond': [{'$size': "$order_local.last_failed_dt"},
- 1,
- 0]},
- 'failed_local_tries': {
- '$cond': [{'$size': '$order_local.tries'},
- {'$first': '$order_local.tries'},
- 0]},
- 'failed_remote_tries': {
- '$cond': [{'$size': '$order_remote.tries'},
- {'$first': '$order_remote.tries'},
- 0]},
- 'paused_remote': {
- '$cond': [{'$size': "$order_remote.paused"},
- 1,
- 0]},
- 'paused_local': {
- '$cond': [{'$size': "$order_local.paused"},
- 1,
- 0]},
- }},
- {'$group': {
- '_id': '$_id',
- # pass through context - same for representation
- 'context': {'$addToSet': '$context'},
- 'data': {'$addToSet': '$data'},
- # pass through files as a list
- 'files': {'$addToSet': '$files'},
- # count how many files
- 'files_count': {'$sum': 1},
- 'files_size': {'$sum': '$files_size'},
- # sum avg progress, finished = 1
- 'avg_progress_remote': {'$avg': "$progress_remote"},
- 'avg_progress_local': {'$avg': "$progress_local"},
- # select last touch of file
- 'updated_dt_remote': {'$max': "$updated_dt_remote"},
- 'failed_remote': {'$sum': '$failed_remote'},
- 'failed_local': {'$sum': '$failed_local'},
- 'failed_remote_tries': {'$sum': '$failed_remote_tries'},
- 'failed_local_tries': {'$sum': '$failed_local_tries'},
- 'paused_remote': {'$sum': '$paused_remote'},
- 'paused_local': {'$sum': '$paused_local'},
- 'updated_dt_local': {'$max': "$updated_dt_local"}
- }},
- {"$project": self.projection},
- {"$sort": self.sort},
- {
- '$facet': {
- 'paginatedResults': [{'$skip': self._rec_loaded},
- {'$limit': limit}],
- 'totalCount': [{'$count': 'count'}]
- }
- }
- ]
-
- def _get_match_part(self):
- """
- Extend match part with filter if present.
-
- Filter is set by user input. Each model has different fields to be
- checked.
- If performance issues are found, '$text' and text indexes should
- be investigated.
-
- Fulltext searches in:
- context.subset
- context.asset
- context.representation names AND _id (ObjectId)
- """
- base_match = {
- "type": "representation",
- 'files.sites.name': {'$all': [self.local_site,
- self.remote_site]}
- }
- if not self.filter:
- return base_match
- else:
- regex_str = '.*{}.*'.format(self.filter)
- base_match['$or'] = [
- {'context.subset': {'$regex': regex_str, '$options': 'i'}},
- {'context.asset': {'$regex': regex_str, '$options': 'i'}},
- {'context.representation': {'$regex': regex_str,
- '$options': 'i'}}]
-
- if ObjectId.is_valid(self.filter):
- base_match['$or'] = [{'_id': ObjectId(self.filter)}]
-
- return base_match
-
- def get_default_projection(self):
- """
- Projection part for aggregate query.
-
- All fields with '1' will be returned, no others.
-
- Returns:
- (dict)
- """
- return {
- "context.subset": 1,
- "context.asset": 1,
- "context.version": 1,
- "context.representation": 1,
- "data.path": 1,
- "files": 1,
- 'files_count': 1,
- "files_size": 1,
- 'avg_progress_remote': 1,
- 'avg_progress_local': 1,
- 'updated_dt_remote': 1,
- 'updated_dt_local': 1,
- 'paused_remote': 1,
- 'paused_local': 1,
- 'status': {
- '$switch': {
- 'branches': [
- {
- 'case': {
- '$or': ['$paused_remote', '$paused_local']},
- 'then': 3 # Paused
- },
- {
- 'case': {
- '$or': [
- {'$gte': ['$failed_local_tries', 3]},
- {'$gte': ['$failed_remote_tries', 3]}
- ]},
- 'then': 1},
- {
- 'case': {
- '$or': [{'$eq': ['$avg_progress_remote', 0]},
- {'$eq': ['$avg_progress_local', 0]}]},
- 'then': 2 # Queued
- },
- {
- 'case': {'$or': [{'$and': [
- {'$gt': ['$avg_progress_remote', 0]},
- {'$lt': ['$avg_progress_remote', 1]}
- ]},
- {'$and': [
- {'$gt': ['$avg_progress_local', 0]},
- {'$lt': ['$avg_progress_local', 1]}
- ]}
- ]},
- 'then': 0 # In progress
- },
- {
- 'case': {'$and': [
- {'$eq': ['$avg_progress_remote', 1]},
- {'$eq': ['$avg_progress_local', 1]}
- ]},
- 'then': 4 # Synced OK
- },
- ],
- 'default': -1
- }
- }
- }
-
-
-class SyncServerDetailWindow(QtWidgets.QDialog):
- def __init__(self, sync_server, _id, project, parent=None):
- log.debug(
- "!!! SyncServerDetailWindow _id:: {}".format(_id))
- super(SyncServerDetailWindow, self).__init__(parent)
- self.setWindowFlags(QtCore.Qt.Window)
- self.setFocusPolicy(QtCore.Qt.StrongFocus)
-
- self.setStyleSheet(style.load_stylesheet())
- self.setWindowIcon(QtGui.QIcon(style.app_icon_path()))
- self.resize(1000, 400)
-
- body = QtWidgets.QWidget()
- footer = QtWidgets.QWidget()
- footer.setFixedHeight(20)
-
- container = SyncRepresentationDetailWidget(sync_server, _id, project,
- parent=self)
- body_layout = QtWidgets.QHBoxLayout(body)
- body_layout.addWidget(container)
- body_layout.setContentsMargins(0, 0, 0, 0)
-
- self.message = QtWidgets.QLabel()
- self.message.hide()
-
- footer_layout = QtWidgets.QVBoxLayout(footer)
- footer_layout.addWidget(self.message)
- footer_layout.setContentsMargins(0, 0, 0, 0)
-
- layout = QtWidgets.QVBoxLayout(self)
- layout.addWidget(body)
- layout.addWidget(footer)
-
- self.setLayout(body_layout)
- self.setWindowTitle("Sync Representation Detail")
-
-
-class SyncRepresentationDetailWidget(QtWidgets.QWidget):
- """
- Widget to display list of synchronizable files for single repre.
-
- Args:
- _id (str): representation _id
- project (str): name of project with repre
- parent (QDialog): SyncServerDetailWindow
- """
- active_changed = QtCore.Signal() # active index changed
-
- default_widths = (
- ("file", 290),
- ("created_dt", 120),
- ("sync_dt", 120),
- ("local_site", 60),
- ("remote_site", 60),
- ("size", 60),
- ("priority", 20),
- ("state", 90)
- )
-
- def __init__(self, sync_server, _id=None, project=None, parent=None):
- super(SyncRepresentationDetailWidget, self).__init__(parent)
-
- log.debug("Representation_id:{}".format(_id))
- self.representation_id = _id
- self.item = None # set to item that mouse was clicked over
- self.project = project
-
- self.sync_server = sync_server
-
- self._selected_id = None
-
- self.filter = QtWidgets.QLineEdit()
- self.filter.setPlaceholderText("Filter representation..")
-
- top_bar_layout = QtWidgets.QHBoxLayout()
- top_bar_layout.addWidget(self.filter)
-
- self.table_view = QtWidgets.QTableView()
- headers = [item[0] for item in self.default_widths]
-
- model = SyncRepresentationDetailModel(sync_server, headers, _id,
- project)
- self.table_view.setModel(model)
- self.table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
- self.table_view.setSelectionMode(
- QtWidgets.QAbstractItemView.SingleSelection)
- self.table_view.setSelectionBehavior(
- QtWidgets.QTableView.SelectRows)
- self.table_view.horizontalHeader().setSortIndicator(-1,
- Qt.AscendingOrder)
- self.table_view.setSortingEnabled(True)
- self.table_view.setAlternatingRowColors(True)
- self.table_view.verticalHeader().hide()
-
- time_delegate = PrettyTimeDelegate(self)
- column = self.table_view.model().get_header_index("created_dt")
- self.table_view.setItemDelegateForColumn(column, time_delegate)
- column = self.table_view.model().get_header_index("sync_dt")
- self.table_view.setItemDelegateForColumn(column, time_delegate)
-
- column = self.table_view.model().get_header_index("local_site")
- delegate = ImageDelegate(self)
- self.table_view.setItemDelegateForColumn(column, delegate)
-
- column = self.table_view.model().get_header_index("remote_site")
- delegate = ImageDelegate(self)
- self.table_view.setItemDelegateForColumn(column, delegate)
-
- column = self.table_view.model().get_header_index("size")
- delegate = SizeDelegate(self)
- self.table_view.setItemDelegateForColumn(column, delegate)
-
- for column_name, width in self.default_widths:
- idx = model.get_header_index(column_name)
- self.table_view.setColumnWidth(idx, width)
-
- layout = QtWidgets.QVBoxLayout(self)
- layout.setContentsMargins(0, 0, 0, 0)
- layout.addLayout(top_bar_layout)
- layout.addWidget(self.table_view)
-
- self.filter.textChanged.connect(lambda: model.set_filter(
- self.filter.text()))
- self.table_view.customContextMenuRequested.connect(
- self._on_context_menu)
-
- self.table_view.model().modelReset.connect(self._set_selection)
-
- self.selection_model = self.table_view.selectionModel()
- self.selection_model.selectionChanged.connect(self._selection_changed)
-
- def _selection_changed(self):
- index = self.selection_model.currentIndex()
- self._selected_id = self.table_view.model().data(index, Qt.UserRole)
-
- def _set_selection(self):
- """
- Sets selection to 'self._selected_id' if exists.
-
- Keep selection during model refresh.
- """
- if self._selected_id:
- index = self.table_view.model().get_index(self._selected_id)
- if index.isValid():
- mode = QtCore.QItemSelectionModel.Select | \
- QtCore.QItemSelectionModel.Rows
- self.selection_model.setCurrentIndex(index, mode)
- else:
- self._selected_id = None
-
- def _show_detail(self):
- """
- Shows windows with error message for failed sync of a file.
- """
- dt = max(self.item.created_dt, self.item.sync_dt)
- detail_window = SyncRepresentationErrorWindow(self.item._id,
- self.project,
- dt,
- self.item.tries,
- self.item.error)
- detail_window.exec()
-
- def _on_context_menu(self, point):
- """
- Shows menu with loader actions on Right-click.
- """
- point_index = self.table_view.indexAt(point)
- if not point_index.isValid():
- return
-
- self.item = self.table_view.model()._data[point_index.row()]
-
- menu = QtWidgets.QMenu()
- actions_mapping = {}
-
- action = QtWidgets.QAction("Open in explorer")
- actions_mapping[action] = self._open_in_explorer
- menu.addAction(action)
-
- if self.item.state == STATUS[1]:
- action = QtWidgets.QAction("Open error detail")
- actions_mapping[action] = self._show_detail
- menu.addAction(action)
-
- remote_site, remote_progress = self.item.remote_site.split()
- if float(remote_progress) == 1.0:
- action = QtWidgets.QAction("Reset local site")
- actions_mapping[action] = self._reset_local_site
- menu.addAction(action)
-
- local_site, local_progress = self.item.local_site.split()
- if float(local_progress) == 1.0:
- action = QtWidgets.QAction("Reset remote site")
- actions_mapping[action] = self._reset_remote_site
- menu.addAction(action)
-
- if not actions_mapping:
- action = QtWidgets.QAction("< No action >")
- actions_mapping[action] = None
- menu.addAction(action)
-
- result = menu.exec_(QtGui.QCursor.pos())
- if result:
- to_run = actions_mapping[result]
- if to_run:
- to_run()
-
- def _reset_local_site(self):
- """
- Removes errors or success metadata for particular file >> forces
- redo of upload/download
- """
- self.sync_server.reset_provider_for_file(
- self.table_view.model()._project,
- self.representation_id,
- 'local',
- self.item._id)
- self.table_view.model().refresh()
-
- def _reset_remote_site(self):
- """
- Removes errors or success metadata for particular file >> forces
- redo of upload/download
- """
- self.sync_server.reset_provider_for_file(
- self.table_view.model()._project,
- self.representation_id,
- 'remote',
- self.item._id)
- self.table_view.model().refresh()
-
- def _open_in_explorer(self):
- if not self.item:
- return
-
- fpath = self.item.path
- project = self.table_view.model()._project
- fpath = self.sync_server.get_local_file_path(project, fpath)
-
- fpath = os.path.normpath(os.path.dirname(fpath))
- if os.path.isdir(fpath):
- if 'win' in sys.platform: # windows
- subprocess.Popen('explorer "%s"' % fpath)
- elif sys.platform == 'darwin': # macOS
- subprocess.Popen(['open', fpath])
- else: # linux
- try:
- subprocess.Popen(['xdg-open', fpath])
- except OSError:
- raise OSError('unsupported xdg-open call??')
-
-
-class SyncRepresentationDetailModel(QtCore.QAbstractTableModel):
- """
- List of all syncronizable files per single representation.
-
- Used in detail window accessible after clicking on single repre in the
- summary.
-
- Args:
- sync_server (SyncServer) - object to call server operations (update
- db status, set site status...)
- header (list) - names of visible columns
- _id (string) - MongoDB _id of representation
- project (string) - collection name, all queries must be called on
- a specific collection
- """
- PAGE_SIZE = 30
- # TODO add filter filename
- DEFAULT_SORT = {
- "files.path": 1
- }
- SORT_BY_COLUMN = [
- "files.path",
- "updated_dt_local", # local created_dt
- "updated_dt_remote", # remote created_dt
- "progress_local", # local progress
- "progress_remote", # remote progress
- "size", # remote progress
- "context.asset", # priority TODO
- "status" # state
- ]
-
- @attr.s
- class SyncRepresentationDetail:
- """
- Auxiliary object for easier handling.
-
- Fields must contain all header values (+ any arbitrary values).
- """
- _id = attr.ib()
- file = attr.ib()
- created_dt = attr.ib(default=None)
- sync_dt = attr.ib(default=None)
- local_site = attr.ib(default=None)
- remote_site = attr.ib(default=None)
- size = attr.ib(default=None)
- priority = attr.ib(default=None)
- state = attr.ib(default=None)
- tries = attr.ib(default=None)
- error = attr.ib(default=None)
- path = attr.ib(default=None)
-
- def __init__(self, sync_server, header, _id, project=None):
- super(SyncRepresentationDetailModel, self).__init__()
- self._header = header
- self._data = []
- self._project = project
- self._rec_loaded = 0
- self._total_records = 0 # how many documents query actually found
- self.filter = None
- self._id = _id
- self._initialized = False
-
- self.sync_server = sync_server
- # TODO think about admin mode
- # this is for regular user, always only single local and single remote
- self.local_site = self.sync_server.get_active_site(self._project)
- self.remote_site = self.sync_server.get_remote_site(self._project)
-
- self.sort = self.DEFAULT_SORT
-
- # in case we would like to hide/show some columns
- self.projection = self.get_default_projection()
-
- self.query = self.get_default_query()
- representations = self.dbcon.aggregate(self.query)
- self.refresh(representations)
-
- self.timer = QtCore.QTimer()
- self.timer.timeout.connect(self.tick)
- self.timer.start(SyncRepresentationModel.REFRESH_SEC)
-
- @property
- def dbcon(self):
- return self.sync_server.connection.database[self._project]
-
- def tick(self):
- self.refresh(representations=None, load_records=self._rec_loaded)
- self.timer.start(SyncRepresentationModel.REFRESH_SEC)
-
- def get_header_index(self, value):
- """
- Returns index of 'value' in headers
-
- Args:
- value (str): header name value
- Returns:
- (int)
- """
- return self._header.index(value)
-
- def data(self, index, role):
- item = self._data[index.row()]
- if role == Qt.DisplayRole:
- return attr.asdict(item)[self._header[index.column()]]
- if role == Qt.UserRole:
- return item._id
-
- def rowCount(self, index):
- return len(self._data)
-
- def columnCount(self, index):
- return len(self._header)
-
- def headerData(self, section, orientation, role):
- if role == Qt.DisplayRole:
- if orientation == Qt.Horizontal:
- return str(self._header[section])
-
- def refresh(self, representations=None, load_records=0):
- if self.sync_server.is_paused():
- return
-
- self.beginResetModel()
- self._data = []
- self._rec_loaded = 0
-
- if not representations:
- self.query = self.get_default_query(load_records)
- representations = self.dbcon.aggregate(self.query)
-
- self._add_page_records(self.local_site, self.remote_site,
- representations)
- self.endResetModel()
-
- def _add_page_records(self, local_site, remote_site, representations):
- """
- Process all records from 'representation' and add them to storage.
-
- Args:
- local_site (str): name of local site (mine)
- remote_site (str): name of cloud provider (theirs)
- representations (Mongo Cursor) - mimics result set, 1 object
- with paginatedResults array and totalCount array
- """
- # representations is a Cursor, get first
- result = representations.next()
- count = 0
- total_count = result.get("totalCount")
- if total_count:
- count = total_count.pop().get('count')
- self._total_records = count
-
- local_provider = _translate_provider_for_icon(self.sync_server,
- self._project,
- local_site)
- remote_provider = _translate_provider_for_icon(self.sync_server,
- self._project,
- remote_site)
-
- for repre in result.get("paginatedResults"):
- # log.info("!!! repre:: {}".format(repre))
- files = repre.get("files", [])
- if isinstance(files, dict): # aggregate returns dictionary
- files = [files]
-
- for file in files:
- local_updated = remote_updated = None
- if repre.get('updated_dt_local'):
- local_updated = \
- repre.get('updated_dt_local').strftime(
- "%Y%m%dT%H%M%SZ")
-
- if repre.get('updated_dt_remote'):
- remote_updated = \
- repre.get('updated_dt_remote').strftime(
- "%Y%m%dT%H%M%SZ")
-
- progress_remote = _convert_progress(
- repre.get('progress_remote', '0'))
- progress_local = _convert_progress(
- repre.get('progress_local', '0'))
-
- errors = []
- if repre.get('failed_remote_error'):
- errors.append(repre.get('failed_remote_error'))
- if repre.get('failed_local_error'):
- errors.append(repre.get('failed_local_error'))
-
- item = self.SyncRepresentationDetail(
- file.get("_id"),
- os.path.basename(file["path"]),
- local_updated,
- remote_updated,
- '{} {}'.format(local_provider, progress_local),
- '{} {}'.format(remote_provider, progress_remote),
- file.get('size', 0),
- 1,
- STATUS[repre.get("status", -1)],
- repre.get("tries"),
- '\n'.join(errors),
- file.get('path')
-
- )
- self._data.append(item)
- self._rec_loaded += 1
-
- def canFetchMore(self, index):
- """
- Check if there are more records than currently loaded
- """
- # 'skip' might be suboptimal when representation hits 500k+
- return self._total_records > self._rec_loaded
-
- def fetchMore(self, index):
- """
- Add more record to model.
-
- Called when 'canFetchMore' returns true, which means there are
- more records in DB than loaded.
- 'self._buffer' is used to stash cursor to limit requery
- """
- log.debug("fetchMore")
- items_to_fetch = min(self._total_records - self._rec_loaded,
- self.PAGE_SIZE)
- self.query = self.get_default_query(self._rec_loaded)
- representations = self.dbcon.aggregate(self.query)
- self.beginInsertRows(index,
- self._rec_loaded,
- self._rec_loaded + items_to_fetch - 1)
-
- self._add_page_records(self.local_site, self.remote_site,
- representations)
-
- self.endInsertRows()
-
- def sort(self, index, order):
- # limit unwanted first re-sorting by view
- if index < 0:
- return
-
- self._rec_loaded = 0 # change sort - reset from start
-
- if order == 0:
- order = 1
- else:
- order = -1
-
- self.sort = {self.SORT_BY_COLUMN[index]: order}
- self.query = self.get_default_query()
-
- representations = self.dbcon.aggregate(self.query)
- self.refresh(representations)
-
- def set_filter(self, filter):
- self.filter = filter
- self.refresh()
-
- def get_index(self, id):
- """
- Get index of 'id' value.
-
- Used for keeping selection after refresh.
-
- Args:
- id (str): MongoDB _id
- Returns:
- (QModelIndex)
- """
- for i in range(self.rowCount(None)):
- index = self.index(i, 0)
- value = self.data(index, Qt.UserRole)
- if value == id:
- return index
- return None
-
- def get_default_query(self, limit=0):
- """
- Gets query that gets used when no extra sorting, filtering or
- projecting is needed.
-
- Called for basic table view.
-
- Returns:
- [(dict)] - list with single dict - appropriate for aggregate
- function for MongoDB
- """
- if limit == 0:
- limit = SyncRepresentationModel.PAGE_SIZE
-
- return [
- {"$match": self._get_match_part()},
- {"$unwind": "$files"},
- {'$addFields': {
- 'order_remote': {
- '$filter': {'input': '$files.sites', 'as': 'p',
- 'cond': {'$eq': ['$$p.name', self.remote_site]}
- }},
- 'order_local': {
- '$filter': {'input': '$files.sites', 'as': 'p',
- 'cond': {'$eq': ['$$p.name', self.local_site]}
- }}
- }},
- {'$addFields': {
- # prepare progress per file, presence of 'created_dt' denotes
- # successfully finished load/download
- 'progress_remote': {'$first': {
- '$cond': [{'$size': "$order_remote.progress"},
- "$order_remote.progress",
- {'$cond': [
- {'$size': "$order_remote.created_dt"},
- [1],
- [0]
- ]}
- ]}},
- 'progress_local': {'$first': {
- '$cond': [{'$size': "$order_local.progress"},
- "$order_local.progress",
- {'$cond': [
- {'$size': "$order_local.created_dt"},
- [1],
- [0]
- ]}
- ]}},
- # file might be successfully created or failed, not both
- 'updated_dt_remote': {'$first': {
- '$cond': [
- {'$size': "$order_remote.created_dt"},
- "$order_remote.created_dt",
- {
- '$cond': [
- {'$size': "$order_remote.last_failed_dt"},
- "$order_remote.last_failed_dt",
- []
- ]
- }
- ]
- }},
- 'updated_dt_local': {'$first': {
- '$cond': [
- {'$size': "$order_local.created_dt"},
- "$order_local.created_dt",
- {
- '$cond': [
- {'$size': "$order_local.last_failed_dt"},
- "$order_local.last_failed_dt",
- []
- ]
- }
- ]
- }},
- 'paused_remote': {
- '$cond': [{'$size': "$order_remote.paused"},
- 1,
- 0]},
- 'paused_local': {
- '$cond': [{'$size': "$order_local.paused"},
- 1,
- 0]},
- 'failed_remote': {
- '$cond': [{'$size': "$order_remote.last_failed_dt"},
- 1,
- 0]},
- 'failed_local': {
- '$cond': [{'$size': "$order_local.last_failed_dt"},
- 1,
- 0]},
- 'failed_remote_error': {'$first': {
- '$cond': [{'$size': "$order_remote.error"},
- "$order_remote.error",
- [""]]}},
- 'failed_local_error': {'$first': {
- '$cond': [{'$size': "$order_local.error"},
- "$order_local.error",
- [""]]}},
- 'tries': {'$first': {
- '$cond': [
- {'$size': "$order_local.tries"},
- "$order_local.tries",
- {'$cond': [
- {'$size': "$order_remote.tries"},
- "$order_remote.tries",
- []
- ]}
- ]}}
- }},
- {"$project": self.projection},
- {"$sort": self.sort},
- {
- '$facet': {
- 'paginatedResults': [{'$skip': self._rec_loaded},
- {'$limit': limit}],
- 'totalCount': [{'$count': 'count'}]
- }
- }
- ]
-
- def _get_match_part(self):
- """
- Returns different content for 'match' portion if filtering by
- name is present
-
- Returns:
- (dict)
- """
- if not self.filter:
- return {
- "type": "representation",
- "_id": self._id
- }
- else:
- regex_str = '.*{}.*'.format(self.filter)
- return {
- "type": "representation",
- "_id": self._id,
- '$or': [{'files.path': {'$regex': regex_str, '$options': 'i'}}]
- }
-
- def get_default_projection(self):
- """
- Projection part for aggregate query.
-
- All fields with '1' will be returned, no others.
-
- Returns:
- (dict)
- """
- return {
- "files": 1,
- 'progress_remote': 1,
- 'progress_local': 1,
- 'updated_dt_remote': 1,
- 'updated_dt_local': 1,
- 'paused_remote': 1,
- 'paused_local': 1,
- 'failed_remote_error': 1,
- 'failed_local_error': 1,
- 'tries': 1,
- 'status': {
- '$switch': {
- 'branches': [
- {
- 'case': {
- '$or': ['$paused_remote', '$paused_local']},
- 'then': 3 # Paused
- },
- {
- 'case': {
- '$and': [{'$or': ['$failed_remote',
- '$failed_local']},
- {'$eq': ['$tries', 3]}]},
- 'then': 1 # Failed (3 tries)
- },
- {
- 'case': {
- '$or': [{'$eq': ['$progress_remote', 0]},
- {'$eq': ['$progress_local', 0]}]},
- 'then': 2 # Queued
- },
- {
- 'case': {
- '$or': ['$failed_remote', '$failed_local']},
- 'then': 1 # Failed
- },
- {
- 'case': {'$or': [{'$and': [
- {'$gt': ['$progress_remote', 0]},
- {'$lt': ['$progress_remote', 1]}
- ]},
- {'$and': [
- {'$gt': ['$progress_local', 0]},
- {'$lt': ['$progress_local', 1]}
- ]}
- ]},
- 'then': 0 # In Progress
- },
- {
- 'case': {'$and': [
- {'$eq': ['$progress_remote', 1]},
- {'$eq': ['$progress_local', 1]}
- ]},
- 'then': 4 # Synced OK
- },
- ],
- 'default': -1
- }
- },
- 'data.path': 1
- }
-
-
-class ImageDelegate(QtWidgets.QStyledItemDelegate):
- """
- Prints icon of site and progress of synchronization
- """
-
- def __init__(self, parent=None):
- super(ImageDelegate, self).__init__(parent)
- self.icons = {}
-
- def paint(self, painter, option, index):
- option = QtWidgets.QStyleOptionViewItem(option)
- option.showDecorationSelected = True
-
- if (option.showDecorationSelected and
- (option.state & QtWidgets.QStyle.State_Selected)):
- painter.setOpacity(0.20) # highlight color is a bit off
- painter.fillRect(option.rect,
- option.palette.highlight())
- painter.setOpacity(1)
-
- d = index.data(QtCore.Qt.DisplayRole)
- if d:
- provider, value = d.split()
- else:
- return
-
- if not self.icons.get(provider):
- resource_path = os.path.dirname(__file__)
- resource_path = os.path.join(resource_path, "..",
- "providers", "resources")
- pix_url = "{}/{}.png".format(resource_path, provider)
- pixmap = QtGui.QPixmap(pix_url)
- self.icons[provider] = pixmap
- else:
- pixmap = self.icons[provider]
-
- point = QtCore.QPoint(option.rect.x() +
- (option.rect.width() - pixmap.width()) / 2,
- option.rect.y() +
- (option.rect.height() - pixmap.height()) / 2)
- painter.drawPixmap(point, pixmap)
-
- painter.setOpacity(0.5)
- overlay_rect = option.rect
- overlay_rect.setHeight(overlay_rect.height() * (1.0 - float(value)))
- painter.fillRect(overlay_rect,
- QtGui.QBrush(QtGui.QColor(0, 0, 0, 200)))
- painter.setOpacity(1)
-
-
-class SyncRepresentationErrorWindow(QtWidgets.QDialog):
- def __init__(self, _id, project, dt, tries, msg, parent=None):
- super(SyncRepresentationErrorWindow, self).__init__(parent)
- self.setWindowFlags(QtCore.Qt.Window)
- self.setFocusPolicy(QtCore.Qt.StrongFocus)
-
- self.setStyleSheet(style.load_stylesheet())
- self.setWindowIcon(QtGui.QIcon(style.app_icon_path()))
- self.resize(250, 200)
-
- body = QtWidgets.QWidget()
- footer = QtWidgets.QWidget()
- footer.setFixedHeight(20)
-
- container = SyncRepresentationErrorWidget(_id, project, dt, tries, msg,
- parent=self)
- body_layout = QtWidgets.QHBoxLayout(body)
- body_layout.addWidget(container)
- body_layout.setContentsMargins(0, 0, 0, 0)
-
- message = QtWidgets.QLabel()
- message.hide()
-
- footer_layout = QtWidgets.QVBoxLayout(footer)
- footer_layout.addWidget(message)
- footer_layout.setContentsMargins(0, 0, 0, 0)
-
- layout = QtWidgets.QVBoxLayout(self)
- layout.addWidget(body)
- layout.addWidget(footer)
-
- self.setLayout(body_layout)
- self.setWindowTitle("Sync Representation Error Detail")
-
-
-class SyncRepresentationErrorWidget(QtWidgets.QWidget):
- """
- Dialog to show when sync error happened, prints error message
- """
-
- def __init__(self, _id, project, dt, tries, msg, parent=None):
- super(SyncRepresentationErrorWidget, self).__init__(parent)
-
- layout = QtWidgets.QFormLayout(self)
- layout.addRow(QtWidgets.QLabel("Last update date"),
- QtWidgets.QLabel(pretty_timestamp(dt)))
- layout.addRow(QtWidgets.QLabel("Retries"),
- QtWidgets.QLabel(str(tries)))
- layout.addRow(QtWidgets.QLabel("Error message"),
- QtWidgets.QLabel(msg))
-
-
-class SizeDelegate(QtWidgets.QStyledItemDelegate):
- """
- Pretty print for file size
- """
-
- def __init__(self, parent=None):
- super(SizeDelegate, self).__init__(parent)
-
- def displayText(self, value, locale):
- if value is None:
- # Ignore None value
- return
-
- return self._pretty_size(value)
-
- def _pretty_size(self, value, suffix='B'):
- for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
- if abs(value) < 1024.0:
- return "%3.1f%s%s" % (value, unit, suffix)
- value /= 1024.0
- return "%.1f%s%s" % (value, 'Yi', suffix)
-
-
-def _convert_progress(value):
- try:
- progress = float(value)
- except (ValueError, TypeError):
- progress = 0.0
-
- return progress
-
-
-def _translate_provider_for_icon(sync_server, project, site):
- """
- Get provider for 'site'
-
- This is used for getting icon, 'studio' should have different icon
- then local sites, even the provider 'local_drive' is same
-
- """
- if site == sync_server.DEFAULT_SITE:
- return sync_server.DEFAULT_SITE
- return sync_server.get_provider_for_site(project, site)
diff --git a/openpype/modules/sync_server/tray/lib.py b/openpype/modules/sync_server/tray/lib.py
new file mode 100644
index 0000000000..0282d79ea1
--- /dev/null
+++ b/openpype/modules/sync_server/tray/lib.py
@@ -0,0 +1,52 @@
+from Qt import QtCore
+
+from openpype.lib import PypeLogger
+
+
+log = PypeLogger().get_logger("SyncServer")
+
+STATUS = {
+ 0: 'In Progress',
+ 1: 'Queued',
+ 2: 'Failed',
+ 3: 'Paused',
+ 4: 'Synced OK',
+ -1: 'Not available'
+}
+
+DUMMY_PROJECT = "No project configured"
+
+ProviderRole = QtCore.Qt.UserRole + 2
+ProgressRole = QtCore.Qt.UserRole + 4
+DateRole = QtCore.Qt.UserRole + 6
+FailedRole = QtCore.Qt.UserRole + 8
+
+
+def pretty_size(value, suffix='B'):
+ for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
+ if abs(value) < 1024.0:
+ return "%3.1f%s%s" % (value, unit, suffix)
+ value /= 1024.0
+ return "%.1f%s%s" % (value, 'Yi', suffix)
+
+
+def convert_progress(value):
+ try:
+ progress = float(value)
+ except (ValueError, TypeError):
+ progress = 0.0
+
+ return progress
+
+
+def translate_provider_for_icon(sync_server, project, site):
+ """
+ Get provider for 'site'
+
+ This is used for getting icon, 'studio' should have different icon
+ then local sites, even the provider 'local_drive' is same
+
+ """
+ if site == sync_server.DEFAULT_SITE:
+ return sync_server.DEFAULT_SITE
+ return sync_server.get_provider_for_site(project, site)
diff --git a/openpype/modules/sync_server/tray/models.py b/openpype/modules/sync_server/tray/models.py
new file mode 100644
index 0000000000..3cc53c6ec4
--- /dev/null
+++ b/openpype/modules/sync_server/tray/models.py
@@ -0,0 +1,1124 @@
+import os
+import attr
+from bson.objectid import ObjectId
+
+from Qt import QtCore
+from Qt.QtCore import Qt
+
+from avalon.tools.delegates import pretty_timestamp
+
+from openpype.lib import PypeLogger
+
+from openpype.modules.sync_server.tray import lib
+
+
+log = PypeLogger().get_logger("SyncServer")
+
+
+class ProjectModel(QtCore.QAbstractListModel):
+ def __init__(self, *args, projects=None, **kwargs):
+ super(ProjectModel, self).__init__(*args, **kwargs)
+ self.projects = projects or []
+
+ def data(self, index, role):
+ if role == Qt.DisplayRole:
+ # See below for the data structure.
+ status, text = self.projects[index.row()]
+ # Return the todo text only.
+ return text
+
+ def rowCount(self, _index):
+ return len(self.todos)
+
+ def columnCount(self, _index):
+ return len(self._header)
+
+
+class _SyncRepresentationModel(QtCore.QAbstractTableModel):
+
+ COLUMN_LABELS = []
+
+ PAGE_SIZE = 20 # default page size to query for
+ REFRESH_SEC = 5000 # in seconds, requery DB for new status
+
+ @property
+ def dbcon(self):
+ """
+ Database object with preselected project (collection) to run DB
+ operations (find, aggregate).
+
+ All queries should go through this (because of collection).
+ """
+ return self.sync_server.connection.database[self.project]
+
+ @property
+ def project(self):
+ """Returns project"""
+ return self._project
+
+ def rowCount(self, _index):
+ return len(self._data)
+
+ def columnCount(self, _index):
+ return len(self._header)
+
+ def headerData(self, section, orientation, role):
+ if role == Qt.DisplayRole:
+ if orientation == Qt.Horizontal:
+ return self.COLUMN_LABELS[section][1]
+
+ def get_header_index(self, value):
+ """
+ Returns index of 'value' in headers
+
+ Args:
+ value (str): header name value
+ Returns:
+ (int)
+ """
+ return self._header.index(value)
+
+ def refresh(self, representations=None, load_records=0):
+ """
+ Reloads representations from DB if necessary, adds them to model.
+
+ Runs periodically (every X seconds) or by demand (change of
+ sorting, filtering etc.)
+
+ Emits 'modelReset' signal.
+
+ Args:
+ representations (PaginationResult object): pass result of
+ aggregate query from outside - mostly for testing only
+ load_records (int) - enforces how many records should be
+ actually queried (scrolled a couple of times to list more
+ than single page of records)
+ """
+ if self.sync_server.is_paused() or \
+ self.sync_server.is_project_paused(self.project):
+ return
+ self.refresh_started.emit()
+ self.beginResetModel()
+ self._data = []
+ self._rec_loaded = 0
+
+ if not representations:
+ self.query = self.get_default_query(load_records)
+ representations = self.dbcon.aggregate(self.query)
+
+ self.add_page_records(self.local_site, self.remote_site,
+ representations)
+ self.endResetModel()
+ self.refresh_finished.emit()
+
+ def tick(self):
+ """
+ Triggers refresh of model.
+
+ Because of pagination, prepared (sorting, filtering) query needs
+ to be run on DB every X seconds.
+ """
+ self.refresh(representations=None, load_records=self._rec_loaded)
+ self.timer.start(self.REFRESH_SEC)
+
+ def canFetchMore(self, _index):
+ """
+ Check if there are more records than currently loaded
+ """
+ # 'skip' might be suboptimal when representation hits 500k+
+ return self._total_records > self._rec_loaded
+
+ def fetchMore(self, index):
+ """
+ Add more record to model.
+
+ Called when 'canFetchMore' returns true, which means there are
+ more records in DB than loaded.
+ """
+ log.debug("fetchMore")
+ items_to_fetch = min(self._total_records - self._rec_loaded,
+ self.PAGE_SIZE)
+ self.query = self.get_default_query(self._rec_loaded)
+ representations = self.dbcon.aggregate(self.query)
+ self.beginInsertRows(index,
+ self._rec_loaded,
+ self._rec_loaded + items_to_fetch - 1)
+
+ self.add_page_records(self.local_site, self.remote_site,
+ representations)
+
+ self.endInsertRows()
+
+ def sort(self, index, order):
+ """
+ Summary sort per representation.
+
+ Sort is happening on a DB side, model is reset, db queried
+ again.
+
+ Args:
+ index (int): column index
+ order (int): 0|
+ """
+ # limit unwanted first re-sorting by view
+ if index < 0:
+ return
+
+ self._rec_loaded = 0
+ if order == 0:
+ order = 1
+ else:
+ order = -1
+
+ self.sort = {self.SORT_BY_COLUMN[index]: order, '_id': 1}
+ self.query = self.get_default_query()
+ # import json
+ # log.debug(json.dumps(self.query, indent=4).\
+ # replace('False', 'false').\
+ # replace('True', 'true').replace('None', 'null'))
+
+ representations = self.dbcon.aggregate(self.query)
+ self.refresh(representations)
+
+ def set_filter(self, word_filter):
+ """
+ Adds text value filtering
+
+ Args:
+ word_filter (str): string inputted by user
+ """
+ self.word_filter = word_filter
+ self.refresh()
+
+ def set_project(self, project):
+ """
+ Changes project, called after project selection is changed
+
+ Args:
+ project (str): name of project
+ """
+ self._project = project
+ self.sync_server.set_sync_project_settings()
+ self.local_site = self.sync_server.get_active_site(self.project)
+ self.remote_site = self.sync_server.get_remote_site(self.project)
+ self.refresh()
+
+ def get_index(self, id):
+ """
+ Get index of 'id' value.
+
+ Used for keeping selection after refresh.
+
+ Args:
+ id (str): MongoDB _id
+ Returns:
+ (QModelIndex)
+ """
+ for i in range(self.rowCount(None)):
+ index = self.index(i, 0)
+ value = self.data(index, Qt.UserRole)
+ if value == id:
+ return index
+ return None
+
+
+class SyncRepresentationSummaryModel(_SyncRepresentationModel):
+ """
+ Model for summary of representations.
+
+ Groups files information per representation. Allows sorting and
+ full text filtering.
+
+ Allows pagination, most of heavy lifting is being done on DB side.
+ Single model matches to single collection. When project is changed,
+ model is reset and refreshed.
+
+ Args:
+ sync_server (SyncServer) - object to call server operations (update
+ db status, set site status...)
+ header (list) - names of visible columns
+ project (string) - collection name, all queries must be called on
+ a specific collection
+
+ """
+ COLUMN_LABELS = [
+ ("asset", "Asset"),
+ ("subset", "Subset"),
+ ("version", "Version"),
+ ("representation", "Representation"),
+ ("local_site", "Active site"),
+ ("remote_site", "Remote site"),
+ ("files_count", "Files"),
+ ("files_size", "Size"),
+ ("priority", "Priority"),
+ ("state", "Status")
+ ]
+
+ DEFAULT_SORT = {
+ "updated_dt_remote": -1,
+ "_id": 1
+ }
+ SORT_BY_COLUMN = [
+ "context.asset", # asset
+ "context.subset", # subset
+ "context.version", # version
+ "context.representation", # representation
+ "updated_dt_local", # local created_dt
+ "updated_dt_remote", # remote created_dt
+ "files_count", # count of files
+ "files_size", # file size of all files
+ "context.asset", # priority TODO
+ "status" # state
+ ]
+
+ refresh_started = QtCore.Signal()
+ refresh_finished = QtCore.Signal()
+
+ @attr.s
+ class SyncRepresentation:
+ """
+ Auxiliary object for easier handling.
+
+ Fields must contain all header values (+ any arbitrary values).
+ """
+ _id = attr.ib()
+ asset = attr.ib()
+ subset = attr.ib()
+ version = attr.ib()
+ representation = attr.ib()
+ created_dt = attr.ib(default=None)
+ sync_dt = attr.ib(default=None)
+ local_site = attr.ib(default=None)
+ remote_site = attr.ib(default=None)
+ local_provider = attr.ib(default=None)
+ remote_provider = attr.ib(default=None)
+ local_progress = attr.ib(default=None)
+ remote_progress = attr.ib(default=None)
+ files_count = attr.ib(default=None)
+ files_size = attr.ib(default=None)
+ priority = attr.ib(default=None)
+ state = attr.ib(default=None)
+ path = attr.ib(default=None)
+
+ def __init__(self, sync_server, header, project=None):
+ super(SyncRepresentationSummaryModel, self).__init__()
+ self._header = header
+ self._data = []
+ self._project = project
+ self._rec_loaded = 0
+ self._total_records = 0 # how many documents query actually found
+ self.word_filter = None
+
+ self._initialized = False
+ if not self._project or self._project == lib.DUMMY_PROJECT:
+ return
+
+ self.sync_server = sync_server
+ # TODO think about admin mode
+ # this is for regular user, always only single local and single remote
+ self.local_site = self.sync_server.get_active_site(self.project)
+ self.remote_site = self.sync_server.get_remote_site(self.project)
+
+ self.projection = self.get_default_projection()
+
+ self.sort = self.DEFAULT_SORT
+
+ self.query = self.get_default_query()
+ self.default_query = list(self.get_default_query())
+
+ representations = self.dbcon.aggregate(self.query)
+ self.refresh(representations)
+
+ self.timer = QtCore.QTimer()
+ self.timer.timeout.connect(self.tick)
+ self.timer.start(self.REFRESH_SEC)
+
+ def data(self, index, role):
+ item = self._data[index.row()]
+
+ header_value = self._header[index.column()]
+ if role == lib.ProviderRole:
+ if header_value == 'local_site':
+ return item.local_provider
+ if header_value == 'remote_site':
+ return item.remote_provider
+
+ if role == lib.ProgressRole:
+ if header_value == 'local_site':
+ return item.local_progress
+ if header_value == 'remote_site':
+ return item.remote_progress
+
+ if role == lib.DateRole:
+ if header_value == 'local_site':
+ if item.created_dt:
+ return pretty_timestamp(item.created_dt)
+ if header_value == 'remote_site':
+ if item.sync_dt:
+ return pretty_timestamp(item.sync_dt)
+
+ if role == lib.FailedRole:
+ if header_value == 'local_site':
+ return item.state == lib.STATUS[2] and item.local_progress < 1
+ if header_value == 'remote_site':
+ return item.state == lib.STATUS[2] and item.remote_progress < 1
+
+ if role == Qt.DisplayRole:
+ # because of ImageDelegate
+ if header_value in ['remote_site', 'local_site']:
+ return ""
+
+ return attr.asdict(item)[self._header[index.column()]]
+ if role == Qt.UserRole:
+ return item._id
+
+ def add_page_records(self, local_site, remote_site, representations):
+ """
+ Process all records from 'representation' and add them to storage.
+
+ Args:
+ local_site (str): name of local site (mine)
+ remote_site (str): name of cloud provider (theirs)
+ representations (Mongo Cursor) - mimics result set, 1 object
+ with paginatedResults array and totalCount array
+ """
+ result = representations.next()
+ count = 0
+ total_count = result.get("totalCount")
+ if total_count:
+ count = total_count.pop().get('count')
+ self._total_records = count
+
+ local_provider = lib.translate_provider_for_icon(self.sync_server,
+ self.project,
+ local_site)
+ remote_provider = lib.translate_provider_for_icon(self.sync_server,
+ self.project,
+ remote_site)
+
+ for repre in result.get("paginatedResults"):
+ context = repre.get("context").pop()
+ files = repre.get("files", [])
+ if isinstance(files, dict): # aggregate returns dictionary
+ files = [files]
+
+ # representation without files doesnt concern us
+ if not files:
+ continue
+
+ local_updated = remote_updated = None
+ if repre.get('updated_dt_local'):
+ local_updated = \
+ repre.get('updated_dt_local').strftime("%Y%m%dT%H%M%SZ")
+
+ if repre.get('updated_dt_remote'):
+ remote_updated = \
+ repre.get('updated_dt_remote').strftime("%Y%m%dT%H%M%SZ")
+
+ avg_progress_remote = lib.convert_progress(
+ repre.get('avg_progress_remote', '0'))
+ avg_progress_local = lib.convert_progress(
+ repre.get('avg_progress_local', '0'))
+
+ if context.get("version"):
+ version = "v{:0>3d}".format(context.get("version"))
+ else:
+ version = "master"
+
+ item = self.SyncRepresentation(
+ repre.get("_id"),
+ context.get("asset"),
+ context.get("subset"),
+ version,
+ context.get("representation"),
+ local_updated,
+ remote_updated,
+ local_site,
+ remote_site,
+ local_provider,
+ remote_provider,
+ avg_progress_local,
+ avg_progress_remote,
+ repre.get("files_count", 1),
+ lib.pretty_size(repre.get("files_size", 0)),
+ 1,
+ lib.STATUS[repre.get("status", -1)],
+ files[0].get('path')
+ )
+
+ self._data.append(item)
+ self._rec_loaded += 1
+
+ def get_default_query(self, limit=0):
+ """
+ Returns basic aggregate query for main table.
+
+ Main table provides summary information about representation,
+ which could have multiple files. Details are accessible after
+ double click on representation row.
+ Columns:
+ 'created_dt' - max of created or updated (when failed) per repr
+ 'sync_dt' - same for remote side
+ 'local_site' - progress of repr on local side, 1 = finished
+ 'remote_site' - progress on remote side, calculates from files
+ 'state' -
+ 0 - in progress
+ 1 - failed
+ 2 - queued
+ 3 - paused
+ 4 - finished on both sides
+
+ are calculated and must be calculated in DB because of
+ pagination
+
+ Args:
+ limit (int): how many records should be returned, by default
+ it 'PAGE_SIZE' for performance.
+ Should be overridden by value of loaded records for refresh
+ functionality (got more records by scrolling, refresh
+ shouldn't reset that)
+ """
+ if limit == 0:
+ limit = SyncRepresentationSummaryModel.PAGE_SIZE
+
+ return [
+ {"$match": self.get_match_part()},
+ {'$unwind': '$files'},
+ # merge potentially unwinded records back to single per repre
+ {'$addFields': {
+ 'order_remote': {
+ '$filter': {'input': '$files.sites', 'as': 'p',
+ 'cond': {'$eq': ['$$p.name', self.remote_site]}
+ }},
+ 'order_local': {
+ '$filter': {'input': '$files.sites', 'as': 'p',
+ 'cond': {'$eq': ['$$p.name', self.local_site]}
+ }}
+ }},
+ {'$addFields': {
+ # prepare progress per file, presence of 'created_dt' denotes
+ # successfully finished load/download
+ 'progress_remote': {'$first': {
+ '$cond': [{'$size': "$order_remote.progress"},
+ "$order_remote.progress",
+ {'$cond': [
+ {'$size': "$order_remote.created_dt"},
+ [1],
+ [0]
+ ]}
+ ]}},
+ 'progress_local': {'$first': {
+ '$cond': [{'$size': "$order_local.progress"},
+ "$order_local.progress",
+ {'$cond': [
+ {'$size': "$order_local.created_dt"},
+ [1],
+ [0]
+ ]}
+ ]}},
+ # file might be successfully created or failed, not both
+ 'updated_dt_remote': {'$first': {
+ '$cond': [{'$size': "$order_remote.created_dt"},
+ "$order_remote.created_dt",
+ {'$cond': [
+ {'$size': "$order_remote.last_failed_dt"},
+ "$order_remote.last_failed_dt",
+ []
+ ]}
+ ]}},
+ 'updated_dt_local': {'$first': {
+ '$cond': [{'$size': "$order_local.created_dt"},
+ "$order_local.created_dt",
+ {'$cond': [
+ {'$size': "$order_local.last_failed_dt"},
+ "$order_local.last_failed_dt",
+ []
+ ]}
+ ]}},
+ 'files_size': {'$ifNull': ["$files.size", 0]},
+ 'failed_remote': {
+ '$cond': [{'$size': "$order_remote.last_failed_dt"},
+ 1,
+ 0]},
+ 'failed_local': {
+ '$cond': [{'$size': "$order_local.last_failed_dt"},
+ 1,
+ 0]},
+ 'failed_local_tries': {
+ '$cond': [{'$size': '$order_local.tries'},
+ {'$first': '$order_local.tries'},
+ 0]},
+ 'failed_remote_tries': {
+ '$cond': [{'$size': '$order_remote.tries'},
+ {'$first': '$order_remote.tries'},
+ 0]},
+ 'paused_remote': {
+ '$cond': [{'$size': "$order_remote.paused"},
+ 1,
+ 0]},
+ 'paused_local': {
+ '$cond': [{'$size': "$order_local.paused"},
+ 1,
+ 0]},
+ }},
+ {'$group': {
+ '_id': '$_id',
+ # pass through context - same for representation
+ 'context': {'$addToSet': '$context'},
+ 'data': {'$addToSet': '$data'},
+ # pass through files as a list
+ 'files': {'$addToSet': '$files'},
+ # count how many files
+ 'files_count': {'$sum': 1},
+ 'files_size': {'$sum': '$files_size'},
+ # sum avg progress, finished = 1
+ 'avg_progress_remote': {'$avg': "$progress_remote"},
+ 'avg_progress_local': {'$avg': "$progress_local"},
+ # select last touch of file
+ 'updated_dt_remote': {'$max': "$updated_dt_remote"},
+ 'failed_remote': {'$sum': '$failed_remote'},
+ 'failed_local': {'$sum': '$failed_local'},
+ 'failed_remote_tries': {'$sum': '$failed_remote_tries'},
+ 'failed_local_tries': {'$sum': '$failed_local_tries'},
+ 'paused_remote': {'$sum': '$paused_remote'},
+ 'paused_local': {'$sum': '$paused_local'},
+ 'updated_dt_local': {'$max': "$updated_dt_local"}
+ }},
+ {"$project": self.projection},
+ {"$sort": self.sort},
+ {
+ '$facet': {
+ 'paginatedResults': [{'$skip': self._rec_loaded},
+ {'$limit': limit}],
+ 'totalCount': [{'$count': 'count'}]
+ }
+ }
+ ]
+
+ def get_match_part(self):
+ """
+ Extend match part with word_filter if present.
+
+ Filter is set by user input. Each model has different fields to be
+ checked.
+ If performance issues are found, '$text' and text indexes should
+ be investigated.
+
+ Fulltext searches in:
+ context.subset
+ context.asset
+ context.representation names AND _id (ObjectId)
+ """
+ base_match = {
+ "type": "representation",
+ 'files.sites.name': {'$all': [self.local_site,
+ self.remote_site]}
+ }
+ if not self.word_filter:
+ return base_match
+ else:
+ regex_str = '.*{}.*'.format(self.word_filter)
+ base_match['$or'] = [
+ {'context.subset': {'$regex': regex_str, '$options': 'i'}},
+ {'context.asset': {'$regex': regex_str, '$options': 'i'}},
+ {'context.representation': {'$regex': regex_str,
+ '$options': 'i'}}]
+
+ if ObjectId.is_valid(self.word_filter):
+ base_match['$or'] = [{'_id': ObjectId(self.word_filter)}]
+
+ return base_match
+
+ def get_default_projection(self):
+ """
+ Projection part for aggregate query.
+
+ All fields with '1' will be returned, no others.
+
+ Returns:
+ (dict)
+ """
+ return {
+ "context.subset": 1,
+ "context.asset": 1,
+ "context.version": 1,
+ "context.representation": 1,
+ "data.path": 1,
+ "files": 1,
+ 'files_count': 1,
+ "files_size": 1,
+ 'avg_progress_remote': 1,
+ 'avg_progress_local': 1,
+ 'updated_dt_remote': 1,
+ 'updated_dt_local': 1,
+ 'paused_remote': 1,
+ 'paused_local': 1,
+ 'status': {
+ '$switch': {
+ 'branches': [
+ {
+ 'case': {
+ '$or': ['$paused_remote', '$paused_local']},
+ 'then': 3 # Paused
+ },
+ {
+ 'case': {
+ '$or': [
+ {'$gte': ['$failed_local_tries', 3]},
+ {'$gte': ['$failed_remote_tries', 3]}
+ ]},
+ 'then': 2}, # Failed
+ {
+ 'case': {
+ '$or': [{'$eq': ['$avg_progress_remote', 0]},
+ {'$eq': ['$avg_progress_local', 0]}]},
+ 'then': 1 # Queued
+ },
+ {
+ 'case': {'$or': [{'$and': [
+ {'$gt': ['$avg_progress_remote', 0]},
+ {'$lt': ['$avg_progress_remote', 1]}
+ ]},
+ {'$and': [
+ {'$gt': ['$avg_progress_local', 0]},
+ {'$lt': ['$avg_progress_local', 1]}
+ ]}
+ ]},
+ 'then': 0 # In progress
+ },
+ {
+ 'case': {'$and': [
+ {'$eq': ['$avg_progress_remote', 1]},
+ {'$eq': ['$avg_progress_local', 1]}
+ ]},
+ 'then': 4 # Synced OK
+ },
+ ],
+ 'default': -1
+ }
+ }
+ }
+
+
+class SyncRepresentationDetailModel(_SyncRepresentationModel):
+ """
+ List of all syncronizable files per single representation.
+
+ Used in detail window accessible after clicking on single repre in the
+ summary.
+
+ Args:
+ sync_server (SyncServer) - object to call server operations (update
+ db status, set site status...)
+ header (list) - names of visible columns
+ _id (string) - MongoDB _id of representation
+ project (string) - collection name, all queries must be called on
+ a specific collection
+ """
+ COLUMN_LABELS = [
+ ("file", "File name"),
+ ("local_site", "Active site"),
+ ("remote_site", "Remote site"),
+ ("files_size", "Size"),
+ ("priority", "Priority"),
+ ("state", "Status")
+ ]
+
+ PAGE_SIZE = 30
+ DEFAULT_SORT = {
+ "files.path": 1
+ }
+ SORT_BY_COLUMN = [
+ "files.path",
+ "updated_dt_local", # local created_dt
+ "updated_dt_remote", # remote created_dt
+ "size", # remote progress
+ "context.asset", # priority TODO
+ "status" # state
+ ]
+
+ refresh_started = QtCore.Signal()
+ refresh_finished = QtCore.Signal()
+
+ @attr.s
+ class SyncRepresentationDetail:
+ """
+ Auxiliary object for easier handling.
+
+ Fields must contain all header values (+ any arbitrary values).
+ """
+ _id = attr.ib()
+ file = attr.ib()
+ created_dt = attr.ib(default=None)
+ sync_dt = attr.ib(default=None)
+ local_site = attr.ib(default=None)
+ remote_site = attr.ib(default=None)
+ local_provider = attr.ib(default=None)
+ remote_provider = attr.ib(default=None)
+ local_progress = attr.ib(default=None)
+ remote_progress = attr.ib(default=None)
+ size = attr.ib(default=None)
+ priority = attr.ib(default=None)
+ state = attr.ib(default=None)
+ tries = attr.ib(default=None)
+ error = attr.ib(default=None)
+ path = attr.ib(default=None)
+
+ def __init__(self, sync_server, header, _id,
+ project=None):
+ super(SyncRepresentationDetailModel, self).__init__()
+ self._header = header
+ self._data = []
+ self._project = project
+ self._rec_loaded = 0
+ self._total_records = 0 # how many documents query actually found
+ self.word_filter = None
+ self._id = _id
+ self._initialized = False
+
+ self.sync_server = sync_server
+ # TODO think about admin mode
+ # this is for regular user, always only single local and single remote
+ self.local_site = self.sync_server.get_active_site(self.project)
+ self.remote_site = self.sync_server.get_remote_site(self.project)
+
+ self.sort = self.DEFAULT_SORT
+
+ # in case we would like to hide/show some columns
+ self.projection = self.get_default_projection()
+
+ self.query = self.get_default_query()
+ representations = self.dbcon.aggregate(self.query)
+ self.refresh(representations)
+
+ self.timer = QtCore.QTimer()
+ self.timer.timeout.connect(self.tick)
+ self.timer.start(SyncRepresentationSummaryModel.REFRESH_SEC)
+
+ def data(self, index, role):
+ item = self._data[index.row()]
+
+ header_value = self._header[index.column()]
+ if role == lib.ProviderRole:
+ if header_value == 'local_site':
+ return item.local_provider
+ if header_value == 'remote_site':
+ return item.remote_provider
+
+ if role == lib.ProgressRole:
+ if header_value == 'local_site':
+ return item.local_progress
+ if header_value == 'remote_site':
+ return item.remote_progress
+
+ if role == lib.DateRole:
+ if header_value == 'local_site':
+ if item.created_dt:
+ return pretty_timestamp(item.created_dt)
+ if header_value == 'remote_site':
+ if item.sync_dt:
+ return pretty_timestamp(item.sync_dt)
+
+ if role == lib.FailedRole:
+ if header_value == 'local_site':
+ return item.state == lib.STATUS[2] and item.local_progress < 1
+ if header_value == 'remote_site':
+ return item.state == lib.STATUS[2] and item.remote_progress < 1
+
+ if role == Qt.DisplayRole:
+ # because of ImageDelegate
+ if header_value in ['remote_site', 'local_site']:
+ return ""
+ return attr.asdict(item)[self._header[index.column()]]
+ if role == Qt.UserRole:
+ return item._id
+
+ def add_page_records(self, local_site, remote_site, representations):
+ """
+ Process all records from 'representation' and add them to storage.
+
+ Args:
+ local_site (str): name of local site (mine)
+ remote_site (str): name of cloud provider (theirs)
+ representations (Mongo Cursor) - mimics result set, 1 object
+ with paginatedResults array and totalCount array
+ """
+ # representations is a Cursor, get first
+ result = representations.next()
+ count = 0
+ total_count = result.get("totalCount")
+ if total_count:
+ count = total_count.pop().get('count')
+ self._total_records = count
+
+ local_provider = lib.translate_provider_for_icon(self.sync_server,
+ self.project,
+ local_site)
+ remote_provider = lib.translate_provider_for_icon(self.sync_server,
+ self.project,
+ remote_site)
+
+ for repre in result.get("paginatedResults"):
+ # log.info("!!! repre:: {}".format(repre))
+ files = repre.get("files", [])
+ if isinstance(files, dict): # aggregate returns dictionary
+ files = [files]
+
+ for file in files:
+ local_updated = remote_updated = None
+ if repre.get('updated_dt_local'):
+ local_updated = \
+ repre.get('updated_dt_local').strftime(
+ "%Y%m%dT%H%M%SZ")
+
+ if repre.get('updated_dt_remote'):
+ remote_updated = \
+ repre.get('updated_dt_remote').strftime(
+ "%Y%m%dT%H%M%SZ")
+
+ remote_progress = lib.convert_progress(
+ repre.get('progress_remote', '0'))
+ local_progress = lib.convert_progress(
+ repre.get('progress_local', '0'))
+
+ errors = []
+ if repre.get('failed_remote_error'):
+ errors.append(repre.get('failed_remote_error'))
+ if repre.get('failed_local_error'):
+ errors.append(repre.get('failed_local_error'))
+
+ item = self.SyncRepresentationDetail(
+ file.get("_id"),
+ os.path.basename(file["path"]),
+ local_updated,
+ remote_updated,
+ local_site,
+ remote_site,
+ local_provider,
+ remote_provider,
+ local_progress,
+ remote_progress,
+ lib.pretty_size(file.get('size', 0)),
+ 1,
+ lib.STATUS[repre.get("status", -1)],
+ repre.get("tries"),
+ '\n'.join(errors),
+ file.get('path')
+
+ )
+ self._data.append(item)
+ self._rec_loaded += 1
+
+ def get_default_query(self, limit=0):
+ """
+ Gets query that gets used when no extra sorting, filtering or
+ projecting is needed.
+
+ Called for basic table view.
+
+ Returns:
+ [(dict)] - list with single dict - appropriate for aggregate
+ function for MongoDB
+ """
+ if limit == 0:
+ limit = SyncRepresentationSummaryModel.PAGE_SIZE
+
+ return [
+ {"$match": self.get_match_part()},
+ {"$unwind": "$files"},
+ {'$addFields': {
+ 'order_remote': {
+ '$filter': {'input': '$files.sites', 'as': 'p',
+ 'cond': {'$eq': ['$$p.name', self.remote_site]}
+ }},
+ 'order_local': {
+ '$filter': {'input': '$files.sites', 'as': 'p',
+ 'cond': {'$eq': ['$$p.name', self.local_site]}
+ }}
+ }},
+ {'$addFields': {
+ # prepare progress per file, presence of 'created_dt' denotes
+ # successfully finished load/download
+ 'progress_remote': {'$first': {
+ '$cond': [{'$size': "$order_remote.progress"},
+ "$order_remote.progress",
+ {'$cond': [
+ {'$size': "$order_remote.created_dt"},
+ [1],
+ [0]
+ ]}
+ ]}},
+ 'progress_local': {'$first': {
+ '$cond': [{'$size': "$order_local.progress"},
+ "$order_local.progress",
+ {'$cond': [
+ {'$size': "$order_local.created_dt"},
+ [1],
+ [0]
+ ]}
+ ]}},
+ # file might be successfully created or failed, not both
+ 'updated_dt_remote': {'$first': {
+ '$cond': [
+ {'$size': "$order_remote.created_dt"},
+ "$order_remote.created_dt",
+ {
+ '$cond': [
+ {'$size': "$order_remote.last_failed_dt"},
+ "$order_remote.last_failed_dt",
+ []
+ ]
+ }
+ ]
+ }},
+ 'updated_dt_local': {'$first': {
+ '$cond': [
+ {'$size': "$order_local.created_dt"},
+ "$order_local.created_dt",
+ {
+ '$cond': [
+ {'$size': "$order_local.last_failed_dt"},
+ "$order_local.last_failed_dt",
+ []
+ ]
+ }
+ ]
+ }},
+ 'paused_remote': {
+ '$cond': [{'$size': "$order_remote.paused"},
+ 1,
+ 0]},
+ 'paused_local': {
+ '$cond': [{'$size': "$order_local.paused"},
+ 1,
+ 0]},
+ 'failed_remote': {
+ '$cond': [{'$size': "$order_remote.last_failed_dt"},
+ 1,
+ 0]},
+ 'failed_local': {
+ '$cond': [{'$size': "$order_local.last_failed_dt"},
+ 1,
+ 0]},
+ 'failed_remote_error': {'$first': {
+ '$cond': [{'$size': "$order_remote.error"},
+ "$order_remote.error",
+ [""]]}},
+ 'failed_local_error': {'$first': {
+ '$cond': [{'$size': "$order_local.error"},
+ "$order_local.error",
+ [""]]}},
+ 'tries': {'$first': {
+ '$cond': [
+ {'$size': "$order_local.tries"},
+ "$order_local.tries",
+ {'$cond': [
+ {'$size': "$order_remote.tries"},
+ "$order_remote.tries",
+ []
+ ]}
+ ]}}
+ }},
+ {"$project": self.projection},
+ {"$sort": self.sort},
+ {
+ '$facet': {
+ 'paginatedResults': [{'$skip': self._rec_loaded},
+ {'$limit': limit}],
+ 'totalCount': [{'$count': 'count'}]
+ }
+ }
+ ]
+
+ def get_match_part(self):
+ """
+ Returns different content for 'match' portion if filtering by
+ name is present
+
+ Returns:
+ (dict)
+ """
+ if not self.word_filter:
+ return {
+ "type": "representation",
+ "_id": self._id
+ }
+ else:
+ regex_str = '.*{}.*'.format(self.word_filter)
+ return {
+ "type": "representation",
+ "_id": self._id,
+ '$or': [{'files.path': {'$regex': regex_str, '$options': 'i'}}]
+ }
+
+ def get_default_projection(self):
+ """
+ Projection part for aggregate query.
+
+ All fields with '1' will be returned, no others.
+
+ Returns:
+ (dict)
+ """
+ return {
+ "files": 1,
+ 'progress_remote': 1,
+ 'progress_local': 1,
+ 'updated_dt_remote': 1,
+ 'updated_dt_local': 1,
+ 'paused_remote': 1,
+ 'paused_local': 1,
+ 'failed_remote_error': 1,
+ 'failed_local_error': 1,
+ 'tries': 1,
+ 'status': {
+ '$switch': {
+ 'branches': [
+ {
+ 'case': {
+ '$or': ['$paused_remote', '$paused_local']},
+ 'then': 3 # Paused
+ },
+ {
+ 'case': {
+ '$and': [{'$or': ['$failed_remote',
+ '$failed_local']},
+ {'$eq': ['$tries', 3]}]},
+ 'then': 2 # Failed (3 tries)
+ },
+ {
+ 'case': {
+ '$or': [{'$eq': ['$progress_remote', 0]},
+ {'$eq': ['$progress_local', 0]}]},
+ 'then': 1 # Queued
+ },
+ {
+ 'case': {
+ '$or': ['$failed_remote', '$failed_local']},
+ 'then': 2 # Failed
+ },
+ {
+ 'case': {'$or': [{'$and': [
+ {'$gt': ['$progress_remote', 0]},
+ {'$lt': ['$progress_remote', 1]}
+ ]},
+ {'$and': [
+ {'$gt': ['$progress_local', 0]},
+ {'$lt': ['$progress_local', 1]}
+ ]}
+ ]},
+ 'then': 0 # In Progress
+ },
+ {
+ 'case': {'$and': [
+ {'$eq': ['$progress_remote', 1]},
+ {'$eq': ['$progress_local', 1]}
+ ]},
+ 'then': 4 # Synced OK
+ },
+ ],
+ 'default': -1
+ }
+ },
+ 'data.path': 1
+ }
diff --git a/openpype/modules/sync_server/tray/widgets.py b/openpype/modules/sync_server/tray/widgets.py
new file mode 100644
index 0000000000..5071ffa2b0
--- /dev/null
+++ b/openpype/modules/sync_server/tray/widgets.py
@@ -0,0 +1,820 @@
+import os
+import subprocess
+import sys
+
+from Qt import QtWidgets, QtCore, QtGui
+from Qt.QtCore import Qt
+
+from openpype.tools.settings import (
+ ProjectListWidget,
+ style
+)
+
+from openpype.api import get_local_site_id
+from openpype.lib import PypeLogger
+
+from avalon.tools.delegates import pretty_timestamp
+
+from openpype.modules.sync_server.tray.models import (
+ SyncRepresentationSummaryModel,
+ SyncRepresentationDetailModel
+)
+
+from openpype.modules.sync_server.tray import lib
+
+log = PypeLogger().get_logger("SyncServer")
+
+
+class SyncProjectListWidget(ProjectListWidget):
+ """
+ Lists all projects that are synchronized to choose from
+ """
+
+ def __init__(self, sync_server, parent):
+ super(SyncProjectListWidget, self).__init__(parent)
+ self.sync_server = sync_server
+ self.project_list.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
+ self.project_list.customContextMenuRequested.connect(
+ self._on_context_menu)
+ self.project_name = None
+ self.local_site = None
+ self.icons = {}
+
+ def validate_context_change(self):
+ return True
+
+ def refresh(self):
+ model = self.project_list.model()
+ model.clear()
+
+ project_name = None
+ for project_name in self.sync_server.sync_project_settings.\
+ keys():
+ if self.sync_server.is_paused() or \
+ self.sync_server.is_project_paused(project_name):
+ icon = self._get_icon("paused")
+ else:
+ icon = self._get_icon("synced")
+
+ model.appendRow(QtGui.QStandardItem(icon, project_name))
+
+ if len(self.sync_server.sync_project_settings.keys()) == 0:
+ model.appendRow(QtGui.QStandardItem(lib.DUMMY_PROJECT))
+
+ self.current_project = self.project_list.currentIndex().data(
+ QtCore.Qt.DisplayRole
+ )
+ if not self.current_project:
+ self.current_project = self.project_list.model().item(0). \
+ data(QtCore.Qt.DisplayRole)
+
+ if project_name:
+ self.local_site = self.sync_server.get_active_site(project_name)
+
+ def _get_icon(self, status):
+ if not self.icons.get(status):
+ resource_path = os.path.dirname(__file__)
+ resource_path = os.path.join(resource_path, "..",
+ "resources")
+ pix_url = "{}/{}.png".format(resource_path, status)
+ icon = QtGui.QIcon(pix_url)
+ self.icons[status] = icon
+ else:
+ icon = self.icons[status]
+ return icon
+
+ def _on_context_menu(self, point):
+ point_index = self.project_list.indexAt(point)
+ if not point_index.isValid():
+ return
+
+ self.project_name = point_index.data(QtCore.Qt.DisplayRole)
+
+ menu = QtWidgets.QMenu()
+ menu.setStyleSheet(style.load_stylesheet())
+ actions_mapping = {}
+
+ if self.sync_server.is_project_paused(self.project_name):
+ action = QtWidgets.QAction("Unpause")
+ actions_mapping[action] = self._unpause
+ else:
+ action = QtWidgets.QAction("Pause")
+ actions_mapping[action] = self._pause
+ menu.addAction(action)
+
+ if self.local_site == get_local_site_id():
+ action = QtWidgets.QAction("Clear local project")
+ actions_mapping[action] = self._clear_project
+ menu.addAction(action)
+
+ result = menu.exec_(QtGui.QCursor.pos())
+ if result:
+ to_run = actions_mapping[result]
+ if to_run:
+ to_run()
+
+ def _pause(self):
+ if self.project_name:
+ self.sync_server.pause_project(self.project_name)
+ self.project_name = None
+ self.refresh()
+
+ def _unpause(self):
+ if self.project_name:
+ self.sync_server.unpause_project(self.project_name)
+ self.project_name = None
+ self.refresh()
+
+ def _clear_project(self):
+ if self.project_name:
+ self.sync_server.clear_project(self.project_name, self.local_site)
+ self.project_name = None
+ self.refresh()
+
+
+class SyncRepresentationWidget(QtWidgets.QWidget):
+ """
+ Summary dialog with list of representations that matches current
+ settings 'local_site' and 'remote_site'.
+ """
+ active_changed = QtCore.Signal() # active index changed
+ message_generated = QtCore.Signal(str)
+
+ default_widths = (
+ ("asset", 220),
+ ("subset", 190),
+ ("version", 55),
+ ("representation", 95),
+ ("local_site", 170),
+ ("remote_site", 170),
+ ("files_count", 50),
+ ("files_size", 60),
+ ("priority", 50),
+ ("state", 110)
+ )
+
+ def __init__(self, sync_server, project=None, parent=None):
+ super(SyncRepresentationWidget, self).__init__(parent)
+
+ self.sync_server = sync_server
+
+ self._selected_id = None # keep last selected _id
+ self.representation_id = None
+ self.site_name = None # to pause/unpause representation
+
+ self.filter = QtWidgets.QLineEdit()
+ self.filter.setPlaceholderText("Filter representations..")
+
+ self._scrollbar_pos = None
+
+ top_bar_layout = QtWidgets.QHBoxLayout()
+ top_bar_layout.addWidget(self.filter)
+
+ self.table_view = QtWidgets.QTableView()
+ headers = [item[0] for item in self.default_widths]
+
+ model = SyncRepresentationSummaryModel(sync_server, headers, project)
+ self.table_view.setModel(model)
+ self.table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
+ self.table_view.setSelectionMode(
+ QtWidgets.QAbstractItemView.SingleSelection)
+ self.table_view.setSelectionBehavior(
+ QtWidgets.QAbstractItemView.SelectRows)
+ self.table_view.horizontalHeader().setSortIndicator(
+ -1, Qt.AscendingOrder)
+ self.table_view.setSortingEnabled(True)
+ self.table_view.horizontalHeader().setSortIndicatorShown(True)
+ self.table_view.setAlternatingRowColors(True)
+ self.table_view.verticalHeader().hide()
+
+ column = self.table_view.model().get_header_index("local_site")
+ delegate = ImageDelegate(self)
+ self.table_view.setItemDelegateForColumn(column, delegate)
+
+ column = self.table_view.model().get_header_index("remote_site")
+ delegate = ImageDelegate(self)
+ self.table_view.setItemDelegateForColumn(column, delegate)
+
+ for column_name, width in self.default_widths:
+ idx = model.get_header_index(column_name)
+ self.table_view.setColumnWidth(idx, width)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.addLayout(top_bar_layout)
+ layout.addWidget(self.table_view)
+
+ self.table_view.doubleClicked.connect(self._double_clicked)
+ self.filter.textChanged.connect(lambda: model.set_filter(
+ self.filter.text()))
+ self.table_view.customContextMenuRequested.connect(
+ self._on_context_menu)
+
+ model.refresh_started.connect(self._save_scrollbar)
+ model.refresh_finished.connect(self._set_scrollbar)
+ self.table_view.model().modelReset.connect(self._set_selection)
+
+ self.selection_model = self.table_view.selectionModel()
+ self.selection_model.selectionChanged.connect(self._selection_changed)
+
+ def _selection_changed(self, _new_selection):
+ index = self.selection_model.currentIndex()
+ self._selected_id = \
+ self.table_view.model().data(index, Qt.UserRole)
+
+ def _set_selection(self):
+ """
+ Sets selection to 'self._selected_id' if exists.
+
+ Keep selection during model refresh.
+ """
+ if self._selected_id:
+ index = self.table_view.model().get_index(self._selected_id)
+ if index and index.isValid():
+ mode = QtCore.QItemSelectionModel.Select | \
+ QtCore.QItemSelectionModel.Rows
+ self.selection_model.setCurrentIndex(index, mode)
+ else:
+ self._selected_id = None
+
+ def _double_clicked(self, index):
+ """
+ Opens representation dialog with all files after doubleclick
+ """
+ _id = self.table_view.model().data(index, Qt.UserRole)
+ detail_window = SyncServerDetailWindow(
+ self.sync_server, _id, self.table_view.model().project)
+ detail_window.exec()
+
+ def _on_context_menu(self, point):
+ """
+ Shows menu with loader actions on Right-click.
+ """
+ point_index = self.table_view.indexAt(point)
+ if not point_index.isValid():
+ return
+
+ self.item = self.table_view.model()._data[point_index.row()]
+ self.representation_id = self.item._id
+ log.debug("menu representation _id:: {}".
+ format(self.representation_id))
+
+ menu = QtWidgets.QMenu()
+ menu.setStyleSheet(style.load_stylesheet())
+ actions_mapping = {}
+ actions_kwargs_mapping = {}
+
+ local_site = self.item.local_site
+ local_progress = self.item.local_progress
+ remote_site = self.item.remote_site
+ remote_progress = self.item.remote_progress
+
+ for site, progress in {local_site: local_progress,
+ remote_site: remote_progress}.items():
+ project = self.table_view.model().project
+ provider = self.sync_server.get_provider_for_site(project,
+ site)
+ if provider == 'local_drive':
+ if 'studio' in site:
+ txt = " studio version"
+ else:
+ txt = " local version"
+ action = QtWidgets.QAction("Open in explorer" + txt)
+ if progress == 1.0:
+ actions_mapping[action] = self._open_in_explorer
+ actions_kwargs_mapping[action] = {'site': site}
+ menu.addAction(action)
+
+ # progress smaller then 1.0 --> in progress or queued
+ if local_progress < 1.0:
+ self.site_name = local_site
+ else:
+ self.site_name = remote_site
+
+ if self.item.state in [lib.STATUS[0], lib.STATUS[1]]:
+ action = QtWidgets.QAction("Pause")
+ actions_mapping[action] = self._pause
+ menu.addAction(action)
+
+ if self.item.state == lib.STATUS[3]:
+ action = QtWidgets.QAction("Unpause")
+ actions_mapping[action] = self._unpause
+ menu.addAction(action)
+
+ # if self.item.state == lib.STATUS[1]:
+ # action = QtWidgets.QAction("Open error detail")
+ # actions_mapping[action] = self._show_detail
+ # menu.addAction(action)
+
+ if remote_progress == 1.0:
+ action = QtWidgets.QAction("Re-sync Active site")
+ actions_mapping[action] = self._reset_local_site
+ menu.addAction(action)
+
+ if local_progress == 1.0:
+ action = QtWidgets.QAction("Re-sync Remote site")
+ actions_mapping[action] = self._reset_remote_site
+ menu.addAction(action)
+
+ if local_site != self.sync_server.DEFAULT_SITE:
+ action = QtWidgets.QAction("Completely remove from local")
+ actions_mapping[action] = self._remove_site
+ menu.addAction(action)
+ else:
+ action = QtWidgets.QAction("Mark for sync to local")
+ actions_mapping[action] = self._add_site
+ menu.addAction(action)
+
+ if not actions_mapping:
+ action = QtWidgets.QAction("< No action >")
+ actions_mapping[action] = None
+ menu.addAction(action)
+
+ result = menu.exec_(QtGui.QCursor.pos())
+ if result:
+ to_run = actions_mapping[result]
+ to_run_kwargs = actions_kwargs_mapping.get(result, {})
+ if to_run:
+ to_run(**to_run_kwargs)
+
+ self.table_view.model().refresh()
+
+ def _pause(self):
+ self.sync_server.pause_representation(self.table_view.model().project,
+ self.representation_id,
+ self.site_name)
+ self.site_name = None
+ self.message_generated.emit("Paused {}".format(self.representation_id))
+
+ def _unpause(self):
+ self.sync_server.unpause_representation(
+ self.table_view.model().project,
+ self.representation_id,
+ self.site_name)
+ self.site_name = None
+ self.message_generated.emit("Unpaused {}".format(
+ self.representation_id))
+
+ # temporary here for testing, will be removed TODO
+ def _add_site(self):
+ log.info(self.representation_id)
+ project_name = self.table_view.model().project
+ local_site_name = get_local_site_id()
+ try:
+ self.sync_server.add_site(
+ project_name,
+ self.representation_id,
+ local_site_name
+ )
+ self.message_generated.emit(
+ "Site {} added for {}".format(local_site_name,
+ self.representation_id))
+ except ValueError as exp:
+ self.message_generated.emit("Error {}".format(str(exp)))
+
+ def _remove_site(self):
+ """
+ Removes site record AND files.
+
+ This is ONLY for representations stored on local site, which
+ cannot be same as SyncServer.DEFAULT_SITE.
+
+ This could only happen when artist work on local machine, not
+ connected to studio mounted drives.
+ """
+ log.info("Removing {}".format(self.representation_id))
+ try:
+ local_site = get_local_site_id()
+ self.sync_server.remove_site(
+ self.table_view.model().project,
+ self.representation_id,
+ local_site,
+ True)
+ self.message_generated.emit("Site {} removed".format(local_site))
+ except ValueError as exp:
+ self.message_generated.emit("Error {}".format(str(exp)))
+ self.table_view.model().refresh(
+ load_records=self.table_view.model()._rec_loaded)
+
+ def _reset_local_site(self):
+ """
+ Removes errors or success metadata for particular file >> forces
+ redo of upload/download
+ """
+ self.sync_server.reset_provider_for_file(
+ self.table_view.model().project,
+ self.representation_id,
+ 'local')
+ self.table_view.model().refresh(
+ load_records=self.table_view.model()._rec_loaded)
+
+ def _reset_remote_site(self):
+ """
+ Removes errors or success metadata for particular file >> forces
+ redo of upload/download
+ """
+ self.sync_server.reset_provider_for_file(
+ self.table_view.model().project,
+ self.representation_id,
+ 'remote')
+ self.table_view.model().refresh(
+ load_records=self.table_view.model()._rec_loaded)
+
+ def _open_in_explorer(self, site):
+ if not self.item:
+ return
+
+ fpath = self.item.path
+ project = self.table_view.model().project
+ fpath = self.sync_server.get_local_file_path(project,
+ site,
+ fpath)
+
+ fpath = os.path.normpath(os.path.dirname(fpath))
+ if os.path.isdir(fpath):
+ if 'win' in sys.platform: # windows
+ subprocess.Popen('explorer "%s"' % fpath)
+ elif sys.platform == 'darwin': # macOS
+ subprocess.Popen(['open', fpath])
+ else: # linux
+ try:
+ subprocess.Popen(['xdg-open', fpath])
+ except OSError:
+ raise OSError('unsupported xdg-open call??')
+
+ def _save_scrollbar(self):
+ self._scrollbar_pos = self.table_view.verticalScrollBar().value()
+
+ def _set_scrollbar(self):
+ if self._scrollbar_pos:
+ self.table_view.verticalScrollBar().setValue(self._scrollbar_pos)
+
+
+class SyncRepresentationDetailWidget(QtWidgets.QWidget):
+ """
+ Widget to display list of synchronizable files for single repre.
+
+ Args:
+ _id (str): representation _id
+ project (str): name of project with repre
+ parent (QDialog): SyncServerDetailWindow
+ """
+ active_changed = QtCore.Signal() # active index changed
+
+ default_widths = (
+ ("file", 290),
+ ("local_site", 185),
+ ("remote_site", 185),
+ ("size", 60),
+ ("priority", 25),
+ ("state", 110)
+ )
+
+ def __init__(self, sync_server, _id=None, project=None, parent=None):
+ super(SyncRepresentationDetailWidget, self).__init__(parent)
+
+ log.debug("Representation_id:{}".format(_id))
+ self.representation_id = _id
+ self.item = None # set to item that mouse was clicked over
+ self.project = project
+
+ self.sync_server = sync_server
+
+ self._selected_id = None
+
+ self.filter = QtWidgets.QLineEdit()
+ self.filter.setPlaceholderText("Filter representation..")
+
+ self._scrollbar_pos = None
+
+ top_bar_layout = QtWidgets.QHBoxLayout()
+ top_bar_layout.addWidget(self.filter)
+
+ self.table_view = QtWidgets.QTableView()
+ headers = [item[0] for item in self.default_widths]
+
+ model = SyncRepresentationDetailModel(sync_server, headers, _id,
+ project)
+ self.table_view.setModel(model)
+ self.table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
+ self.table_view.setSelectionMode(
+ QtWidgets.QAbstractItemView.SingleSelection)
+ self.table_view.setSelectionBehavior(
+ QtWidgets.QTableView.SelectRows)
+ self.table_view.horizontalHeader().setSortIndicator(-1,
+ Qt.AscendingOrder)
+ self.table_view.setSortingEnabled(True)
+ self.table_view.horizontalHeader().setSortIndicatorShown(True)
+ self.table_view.setAlternatingRowColors(True)
+ self.table_view.verticalHeader().hide()
+
+ column = self.table_view.model().get_header_index("local_site")
+ delegate = ImageDelegate(self)
+ self.table_view.setItemDelegateForColumn(column, delegate)
+
+ column = self.table_view.model().get_header_index("remote_site")
+ delegate = ImageDelegate(self)
+ self.table_view.setItemDelegateForColumn(column, delegate)
+
+ for column_name, width in self.default_widths:
+ idx = model.get_header_index(column_name)
+ self.table_view.setColumnWidth(idx, width)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.addLayout(top_bar_layout)
+ layout.addWidget(self.table_view)
+
+ self.filter.textChanged.connect(lambda: model.set_filter(
+ self.filter.text()))
+ self.table_view.customContextMenuRequested.connect(
+ self._on_context_menu)
+
+ model.refresh_started.connect(self._save_scrollbar)
+ model.refresh_finished.connect(self._set_scrollbar)
+ self.table_view.model().modelReset.connect(self._set_selection)
+
+ self.selection_model = self.table_view.selectionModel()
+ self.selection_model.selectionChanged.connect(self._selection_changed)
+
+ def _selection_changed(self):
+ index = self.selection_model.currentIndex()
+ self._selected_id = self.table_view.model().data(index, Qt.UserRole)
+
+ def _set_selection(self):
+ """
+ Sets selection to 'self._selected_id' if exists.
+
+ Keep selection during model refresh.
+ """
+ if self._selected_id:
+ index = self.table_view.model().get_index(self._selected_id)
+ if index and index.isValid():
+ mode = QtCore.QItemSelectionModel.Select | \
+ QtCore.QItemSelectionModel.Rows
+ self.selection_model.setCurrentIndex(index, mode)
+ else:
+ self._selected_id = None
+
+ def _show_detail(self):
+ """
+ Shows windows with error message for failed sync of a file.
+ """
+ dt = max(self.item.created_dt, self.item.sync_dt)
+ detail_window = SyncRepresentationErrorWindow(self.item._id,
+ self.project,
+ dt,
+ self.item.tries,
+ self.item.error)
+ detail_window.exec()
+
+ def _on_context_menu(self, point):
+ """
+ Shows menu with loader actions on Right-click.
+ """
+ point_index = self.table_view.indexAt(point)
+ if not point_index.isValid():
+ return
+
+ self.item = self.table_view.model()._data[point_index.row()]
+
+ menu = QtWidgets.QMenu()
+ menu.setStyleSheet(style.load_stylesheet())
+ actions_mapping = {}
+ actions_kwargs_mapping = {}
+
+ local_site = self.item.local_site
+ local_progress = self.item.local_progress
+ remote_site = self.item.remote_site
+ remote_progress = self.item.remote_progress
+
+ for site, progress in {local_site: local_progress,
+ remote_site: remote_progress}.items():
+ project = self.table_view.model().project
+ provider = self.sync_server.get_provider_for_site(project,
+ site)
+ if provider == 'local_drive':
+ if 'studio' in site:
+ txt = " studio version"
+ else:
+ txt = " local version"
+ action = QtWidgets.QAction("Open in explorer" + txt)
+ if progress == 1:
+ actions_mapping[action] = self._open_in_explorer
+ actions_kwargs_mapping[action] = {'site': site}
+ menu.addAction(action)
+
+ if self.item.state == lib.STATUS[2]:
+ action = QtWidgets.QAction("Open error detail")
+ actions_mapping[action] = self._show_detail
+ menu.addAction(action)
+
+ if float(remote_progress) == 1.0:
+ action = QtWidgets.QAction("Re-sync active site")
+ actions_mapping[action] = self._reset_local_site
+ menu.addAction(action)
+
+ if float(local_progress) == 1.0:
+ action = QtWidgets.QAction("Re-sync remote site")
+ actions_mapping[action] = self._reset_remote_site
+ menu.addAction(action)
+
+ if not actions_mapping:
+ action = QtWidgets.QAction("< No action >")
+ actions_mapping[action] = None
+ menu.addAction(action)
+
+ result = menu.exec_(QtGui.QCursor.pos())
+ if result:
+ to_run = actions_mapping[result]
+ to_run_kwargs = actions_kwargs_mapping.get(result, {})
+ if to_run:
+ to_run(**to_run_kwargs)
+
+ def _reset_local_site(self):
+ """
+ Removes errors or success metadata for particular file >> forces
+ redo of upload/download
+ """
+ self.sync_server.reset_provider_for_file(
+ self.table_view.model().project,
+ self.representation_id,
+ 'local',
+ self.item._id)
+ self.table_view.model().refresh(
+ load_records=self.table_view.model()._rec_loaded)
+
+ def _reset_remote_site(self):
+ """
+ Removes errors or success metadata for particular file >> forces
+ redo of upload/download
+ """
+ self.sync_server.reset_provider_for_file(
+ self.table_view.model().project,
+ self.representation_id,
+ 'remote',
+ self.item._id)
+ self.table_view.model().refresh(
+ load_records=self.table_view.model()._rec_loaded)
+
+ def _open_in_explorer(self, site):
+ if not self.item:
+ return
+
+ fpath = self.item.path
+ project = self.project
+ fpath = self.sync_server.get_local_file_path(project, site, fpath)
+
+ fpath = os.path.normpath(os.path.dirname(fpath))
+ if os.path.isdir(fpath):
+ if 'win' in sys.platform: # windows
+ subprocess.Popen('explorer "%s"' % fpath)
+ elif sys.platform == 'darwin': # macOS
+ subprocess.Popen(['open', fpath])
+ else: # linux
+ try:
+ subprocess.Popen(['xdg-open', fpath])
+ except OSError:
+ raise OSError('unsupported xdg-open call??')
+
+ def _save_scrollbar(self):
+ self._scrollbar_pos = self.table_view.verticalScrollBar().value()
+
+ def _set_scrollbar(self):
+ if self._scrollbar_pos:
+ self.table_view.verticalScrollBar().setValue(self._scrollbar_pos)
+
+
+class SyncRepresentationErrorWidget(QtWidgets.QWidget):
+ """
+ Dialog to show when sync error happened, prints error message
+ """
+
+ def __init__(self, _id, dt, tries, msg, parent=None):
+ super(SyncRepresentationErrorWidget, self).__init__(parent)
+
+ layout = QtWidgets.QHBoxLayout(self)
+
+ txts = []
+ txts.append("{}: {}".format("Last update date", pretty_timestamp(dt)))
+ txts.append("{}: {}".format("Retries", str(tries)))
+ txts.append("{}: {}".format("Error message", msg))
+
+ text_area = QtWidgets.QPlainTextEdit("\n\n".join(txts))
+ text_area.setReadOnly(True)
+ layout.addWidget(text_area)
+
+
+class ImageDelegate(QtWidgets.QStyledItemDelegate):
+ """
+ Prints icon of site and progress of synchronization
+ """
+
+ def __init__(self, parent=None):
+ super(ImageDelegate, self).__init__(parent)
+ self.icons = {}
+
+ def paint(self, painter, option, index):
+ super(ImageDelegate, self).paint(painter, option, index)
+ option = QtWidgets.QStyleOptionViewItem(option)
+ option.showDecorationSelected = True
+
+ provider = index.data(lib.ProviderRole)
+ value = index.data(lib.ProgressRole)
+ date_value = index.data(lib.DateRole)
+ is_failed = index.data(lib.FailedRole)
+
+ if not self.icons.get(provider):
+ resource_path = os.path.dirname(__file__)
+ resource_path = os.path.join(resource_path, "..",
+ "providers", "resources")
+ pix_url = "{}/{}.png".format(resource_path, provider)
+ pixmap = QtGui.QPixmap(pix_url)
+ self.icons[provider] = pixmap
+ else:
+ pixmap = self.icons[provider]
+
+ padding = 10
+ point = QtCore.QPoint(option.rect.x() + padding,
+ option.rect.y() +
+ (option.rect.height() - pixmap.height()) / 2)
+ painter.drawPixmap(point, pixmap)
+
+ overlay_rect = option.rect.translated(0, 0)
+ overlay_rect.setHeight(overlay_rect.height() * (1.0 - float(value)))
+ painter.fillRect(overlay_rect,
+ QtGui.QBrush(QtGui.QColor(0, 0, 0, 100)))
+ text_rect = option.rect.translated(10, 0)
+ painter.drawText(text_rect,
+ QtCore.Qt.AlignCenter,
+ date_value)
+
+ if is_failed:
+ overlay_rect = option.rect.translated(0, 0)
+ painter.fillRect(overlay_rect,
+ QtGui.QBrush(QtGui.QColor(255, 0, 0, 35)))
+
+
+class SyncServerDetailWindow(QtWidgets.QDialog):
+ def __init__(self, sync_server, _id, project, parent=None):
+ log.debug(
+ "!!! SyncServerDetailWindow _id:: {}".format(_id))
+ super(SyncServerDetailWindow, self).__init__(parent)
+ self.setWindowFlags(QtCore.Qt.Window)
+ self.setFocusPolicy(QtCore.Qt.StrongFocus)
+
+ self.setStyleSheet(style.load_stylesheet())
+ self.setWindowIcon(QtGui.QIcon(style.app_icon_path()))
+ self.resize(1000, 400)
+
+ body = QtWidgets.QWidget()
+ footer = QtWidgets.QWidget()
+ footer.setFixedHeight(20)
+
+ container = SyncRepresentationDetailWidget(sync_server, _id, project,
+ parent=self)
+ body_layout = QtWidgets.QHBoxLayout(body)
+ body_layout.addWidget(container)
+ body_layout.setContentsMargins(0, 0, 0, 0)
+
+ self.message = QtWidgets.QLabel()
+ self.message.hide()
+
+ footer_layout = QtWidgets.QVBoxLayout(footer)
+ footer_layout.addWidget(self.message)
+ footer_layout.setContentsMargins(0, 0, 0, 0)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.addWidget(body)
+ layout.addWidget(footer)
+
+ self.setLayout(body_layout)
+ self.setWindowTitle("Sync Representation Detail")
+
+
+class SyncRepresentationErrorWindow(QtWidgets.QDialog):
+ def __init__(self, _id, project, dt, tries, msg, parent=None):
+ super(SyncRepresentationErrorWindow, self).__init__(parent)
+ self.setWindowFlags(QtCore.Qt.Window)
+ self.setFocusPolicy(QtCore.Qt.StrongFocus)
+
+ self.setStyleSheet(style.load_stylesheet())
+ self.setWindowIcon(QtGui.QIcon(style.app_icon_path()))
+ self.resize(900, 150)
+
+ body = QtWidgets.QWidget()
+
+ container = SyncRepresentationErrorWidget(_id, dt, tries, msg,
+ parent=self)
+ body_layout = QtWidgets.QHBoxLayout(body)
+ body_layout.addWidget(container)
+ body_layout.setContentsMargins(0, 0, 0, 0)
+
+ message = QtWidgets.QLabel()
+ message.hide()
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.addWidget(body)
+
+ self.setLayout(body_layout)
+ self.setWindowTitle("Sync Representation Error Detail")
diff --git a/openpype/modules/sync_server/utils.py b/openpype/modules/sync_server/utils.py
index 0762766783..36f3444399 100644
--- a/openpype/modules/sync_server/utils.py
+++ b/openpype/modules/sync_server/utils.py
@@ -1,8 +1,14 @@
import time
-from openpype.api import Logger
+from openpype.api import Logger
log = Logger().get_logger("SyncServer")
+class SyncStatus:
+ DO_NOTHING = 0
+ DO_UPLOAD = 1
+ DO_DOWNLOAD = 2
+
+
def time_function(method):
""" Decorator to print how much time function took.
For debugging.
diff --git a/openpype/plugins/load/add_site.py b/openpype/plugins/load/add_site.py
new file mode 100644
index 0000000000..09448d553c
--- /dev/null
+++ b/openpype/plugins/load/add_site.py
@@ -0,0 +1,33 @@
+from avalon import api
+from openpype.modules import ModulesManager
+
+
+class AddSyncSite(api.Loader):
+ """Add sync site to representation"""
+ representations = ["*"]
+ families = ["*"]
+
+ label = "Add Sync Site"
+ order = 2 # lower means better
+ icon = "download"
+ color = "#999999"
+
+ def load(self, context, name=None, namespace=None, data=None):
+ self.log.info("Adding {} to representation: {}".format(
+ data["site_name"], data["_id"]))
+ self.add_site_to_representation(data["project_name"],
+ data["_id"],
+ data["site_name"])
+ self.log.debug("Site added.")
+
+ @staticmethod
+ def add_site_to_representation(project_name, representation_id, site_name):
+ """Adds new site to representation_id, resets if exists"""
+ manager = ModulesManager()
+ sync_server = manager.modules_by_name["sync_server"]
+ sync_server.add_site(project_name, representation_id, site_name,
+ force=True)
+
+ def filepath_from_context(self, context):
+ """No real file loading"""
+ return ""
diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py
index e5132e0f8a..8e3999e9c4 100644
--- a/openpype/plugins/load/delete_old_versions.py
+++ b/openpype/plugins/load/delete_old_versions.py
@@ -15,11 +15,12 @@ from openpype.api import Anatomy
class DeleteOldVersions(api.Loader):
-
+ """Deletes specific number of old version"""
representations = ["*"]
families = ["*"]
label = "Delete Old Versions"
+ order = 35
icon = "trash"
color = "#d8d8d8"
@@ -421,8 +422,9 @@ class DeleteOldVersions(api.Loader):
class CalculateOldVersions(DeleteOldVersions):
-
+ """Calculate file size of old versions"""
label = "Calculate Old Versions"
+ order = 30
options = [
qargparse.Integer(
diff --git a/openpype/plugins/load/remove_site.py b/openpype/plugins/load/remove_site.py
new file mode 100644
index 0000000000..aedb5d1f2f
--- /dev/null
+++ b/openpype/plugins/load/remove_site.py
@@ -0,0 +1,33 @@
+from avalon import api
+from openpype.modules import ModulesManager
+
+
+class RemoveSyncSite(api.Loader):
+ """Remove sync site and its files on representation"""
+ representations = ["*"]
+ families = ["*"]
+
+ label = "Remove Sync Site"
+ order = 4
+ icon = "download"
+ color = "#999999"
+
+ def load(self, context, name=None, namespace=None, data=None):
+ self.log.info("Removing {} on representation: {}".format(
+ data["site_name"], data["_id"]))
+ self.remove_site_on_representation(data["project_name"],
+ data["_id"],
+ data["site_name"])
+ self.log.debug("Site added.")
+
+ @staticmethod
+ def remove_site_on_representation(project_name, representation_id,
+ site_name):
+ manager = ModulesManager()
+ sync_server = manager.modules_by_name["sync_server"]
+ sync_server.remove_site(project_name, representation_id,
+ site_name, True)
+
+ def filepath_from_context(self, context):
+ """No real file loading"""
+ return ""
diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py
index 23c8ed2a8e..a71b1db66b 100644
--- a/openpype/plugins/publish/extract_review.py
+++ b/openpype/plugins/publish/extract_review.py
@@ -2,12 +2,18 @@ import os
import re
import copy
import json
-import pyblish.api
+
import clique
+
+import pyblish.api
import openpype.api
-import openpype.lib
-from openpype.lib import should_decompress, \
- get_decompress_dir, decompress
+from openpype.lib import (
+ get_ffmpeg_tool_path,
+ ffprobe_streams,
+ should_decompress,
+ get_decompress_dir,
+ decompress
+)
class ExtractReview(pyblish.api.InstancePlugin):
@@ -43,17 +49,11 @@ class ExtractReview(pyblish.api.InstancePlugin):
supported_exts = image_exts + video_exts
# FFmpeg tools paths
- ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
+ ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
# Preset attributes
profiles = None
- # Legacy attributes
- outputs = {}
- ext_filter = []
- to_width = 1920
- to_height = 1080
-
def process(self, instance):
self.log.debug(instance.data["representations"])
# Skip review when requested.
@@ -72,10 +72,6 @@ class ExtractReview(pyblish.api.InstancePlugin):
).format(instance_label))
return
- # Use legacy processing when `profiles` is not set.
- if self.profiles is None:
- return self.legacy_process(instance)
-
# Run processing
self.main_process(instance)
@@ -726,7 +722,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
# NOTE Skipped using instance's resolution
full_input_path_single_file = temp_data["full_input_path_single_file"]
- input_data = openpype.lib.ffprobe_streams(
+ input_data = ffprobe_streams(
full_input_path_single_file, self.log
)[0]
input_width = int(input_data["width"])
@@ -1253,438 +1249,6 @@ class ExtractReview(pyblish.api.InstancePlugin):
return filtered_outputs
- def legacy_process(self, instance):
- self.log.warning("Legacy review presets are used.")
-
- output_profiles = self.outputs or {}
-
- inst_data = instance.data
- context_data = instance.context.data
- fps = float(inst_data.get("fps"))
- frame_start = inst_data.get("frameStart")
- frame_end = inst_data.get("frameEnd")
- handle_start = inst_data.get("handleStart",
- context_data.get("handleStart"))
- handle_end = inst_data.get("handleEnd",
- context_data.get("handleEnd"))
- pixel_aspect = inst_data.get("pixelAspect", 1)
- resolution_width = inst_data.get("resolutionWidth", self.to_width)
- resolution_height = inst_data.get("resolutionHeight", self.to_height)
- self.log.debug("Families In: `{}`".format(inst_data["families"]))
- self.log.debug("__ frame_start: {}".format(frame_start))
- self.log.debug("__ frame_end: {}".format(frame_end))
- self.log.debug("__ handle_start: {}".format(handle_start))
- self.log.debug("__ handle_end: {}".format(handle_end))
-
- # get representation and loop them
- representations = inst_data["representations"]
-
- ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
-
- # filter out mov and img sequences
- representations_new = representations[:]
- for repre in representations:
-
- if repre['ext'] not in self.ext_filter:
- continue
-
- tags = repre.get("tags", [])
-
- if inst_data.get("multipartExr") is True:
- # ffmpeg doesn't support multipart exrs
- continue
-
- if "thumbnail" in tags:
- continue
-
- self.log.info("Try repre: {}".format(repre))
-
- if "review" not in tags:
- continue
-
- staging_dir = repre["stagingDir"]
-
- # iterating preset output profiles
- for name, profile in output_profiles.items():
- repre_new = repre.copy()
- ext = profile.get("ext", None)
- p_tags = profile.get('tags', [])
-
- # append repre tags into profile tags
- for t in tags:
- if t not in p_tags:
- p_tags.append(t)
-
- self.log.info("p_tags: `{}`".format(p_tags))
-
- # adding control for presets to be sequence
- # or single file
- is_sequence = ("sequence" in p_tags) and (ext in (
- "png", "jpg", "jpeg"))
-
- # no handles switch from profile tags
- no_handles = "no-handles" in p_tags
-
- self.log.debug("Profile name: {}".format(name))
-
- if not ext:
- ext = "mov"
- self.log.warning(
- str("`ext` attribute not in output "
- "profile. Setting to default ext: `mov`"))
-
- self.log.debug(
- "instance.families: {}".format(
- instance.data['families']))
- self.log.debug(
- "profile.families: {}".format(profile['families']))
-
- profile_family_check = False
- for _family in profile['families']:
- if _family in instance.data['families']:
- profile_family_check = True
- break
-
- if not profile_family_check:
- continue
-
- if isinstance(repre["files"], list):
- collections, remainder = clique.assemble(
- repre["files"])
-
- full_input_path = os.path.join(
- staging_dir, collections[0].format(
- '{head}{padding}{tail}')
- )
-
- filename = collections[0].format('{head}')
- if filename.endswith('.'):
- filename = filename[:-1]
- else:
- full_input_path = os.path.join(
- staging_dir, repre["files"])
- filename = repre["files"].split(".")[0]
-
- repr_file = filename + "_{0}.{1}".format(name, ext)
- full_output_path = os.path.join(
- staging_dir, repr_file)
-
- if is_sequence:
- filename_base = filename + "_{0}".format(name)
- repr_file = filename_base + ".%08d.{0}".format(
- ext)
- repre_new["sequence_file"] = repr_file
- full_output_path = os.path.join(
- staging_dir, filename_base, repr_file)
-
- self.log.info("input {}".format(full_input_path))
- self.log.info("output {}".format(full_output_path))
-
- new_tags = [x for x in tags if x != "delete"]
-
- # add families
- [instance.data["families"].append(t)
- for t in p_tags
- if t not in instance.data["families"]]
-
- # add to
- [new_tags.append(t) for t in p_tags
- if t not in new_tags]
-
- self.log.info("new_tags: `{}`".format(new_tags))
-
- input_args = []
- output_args = []
-
- # overrides output file
- input_args.append("-y")
-
- # preset's input data
- input_args.extend(profile.get('input', []))
-
- # necessary input data
- # adds start arg only if image sequence
-
- frame_start_handle = frame_start - handle_start
- frame_end_handle = frame_end + handle_end
- if isinstance(repre["files"], list):
- if frame_start_handle != repre.get(
- "detectedStart", frame_start_handle):
- frame_start_handle = repre.get("detectedStart")
-
- # exclude handle if no handles defined
- if no_handles:
- frame_start_handle = frame_start
- frame_end_handle = frame_end
-
- input_args.append(
- "-start_number {0} -framerate {1}".format(
- frame_start_handle, fps))
- else:
- if no_handles:
- start_sec = float(handle_start) / fps
- input_args.append("-ss {:0.2f}".format(start_sec))
- frame_start_handle = frame_start
- frame_end_handle = frame_end
-
- input_args.append("-i {}".format(full_input_path))
-
- for audio in instance.data.get("audio", []):
- offset_frames = (
- instance.data.get("frameStartFtrack") -
- audio["offset"]
- )
- offset_seconds = offset_frames / fps
-
- if offset_seconds > 0:
- input_args.append("-ss")
- else:
- input_args.append("-itsoffset")
-
- input_args.append(str(abs(offset_seconds)))
-
- input_args.extend(
- ["-i", audio["filename"]]
- )
-
- # Need to merge audio if there are more
- # than 1 input.
- if len(instance.data["audio"]) > 1:
- input_args.extend(
- [
- "-filter_complex",
- "amerge",
- "-ac",
- "2"
- ]
- )
-
- codec_args = profile.get('codec', [])
- output_args.extend(codec_args)
- # preset's output data
- output_args.extend(profile.get('output', []))
-
- # defining image ratios
- resolution_ratio = (
- float(resolution_width) * pixel_aspect) / resolution_height
- delivery_ratio = float(self.to_width) / float(self.to_height)
- self.log.debug(
- "__ resolution_ratio: `{}`".format(resolution_ratio))
- self.log.debug(
- "__ delivery_ratio: `{}`".format(delivery_ratio))
-
- # get scale factor
- scale_factor = float(self.to_height) / (
- resolution_height * pixel_aspect)
-
- # shorten two decimals long float number for testing conditions
- resolution_ratio_test = float(
- "{:0.2f}".format(resolution_ratio))
- delivery_ratio_test = float(
- "{:0.2f}".format(delivery_ratio))
-
- if resolution_ratio_test != delivery_ratio_test:
- scale_factor = float(self.to_width) / (
- resolution_width * pixel_aspect)
- if int(scale_factor * 100) == 100:
- scale_factor = (
- float(self.to_height) / resolution_height
- )
-
- self.log.debug("__ scale_factor: `{}`".format(scale_factor))
-
- # letter_box
- lb = profile.get('letter_box', 0)
- if lb != 0:
- ffmpeg_width = self.to_width
- ffmpeg_height = self.to_height
- if "reformat" not in p_tags:
- lb /= pixel_aspect
- if resolution_ratio_test != delivery_ratio_test:
- ffmpeg_width = resolution_width
- ffmpeg_height = int(
- resolution_height * pixel_aspect)
- else:
- if resolution_ratio_test != delivery_ratio_test:
- lb /= scale_factor
- else:
- lb /= pixel_aspect
-
- output_args.append(str(
- "-filter:v scale={0}x{1}:flags=lanczos,"
- "setsar=1,drawbox=0:0:iw:"
- "round((ih-(iw*(1/{2})))/2):t=fill:"
- "c=black,drawbox=0:ih-round((ih-(iw*("
- "1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
- "/2):t=fill:c=black").format(
- ffmpeg_width, ffmpeg_height, lb))
-
- # In case audio is longer than video.
- output_args.append("-shortest")
-
- if no_handles:
- duration_sec = float(
- frame_end_handle - frame_start_handle + 1) / fps
-
- output_args.append("-t {:0.2f}".format(duration_sec))
-
- # output filename
- output_args.append(full_output_path)
-
- self.log.debug(
- "__ pixel_aspect: `{}`".format(pixel_aspect))
- self.log.debug(
- "__ resolution_width: `{}`".format(
- resolution_width))
- self.log.debug(
- "__ resolution_height: `{}`".format(
- resolution_height))
-
- # scaling none square pixels and 1920 width
- if "reformat" in p_tags:
- if resolution_ratio_test < delivery_ratio_test:
- self.log.debug("lower then delivery")
- width_scale = int(self.to_width * scale_factor)
- width_half_pad = int((
- self.to_width - width_scale) / 2)
- height_scale = self.to_height
- height_half_pad = 0
- else:
- self.log.debug("heigher then delivery")
- width_scale = self.to_width
- width_half_pad = 0
- scale_factor = float(self.to_width) / (float(
- resolution_width) * pixel_aspect)
- self.log.debug(
- "__ scale_factor: `{}`".format(
- scale_factor))
- height_scale = int(
- resolution_height * scale_factor)
- height_half_pad = int(
- (self.to_height - height_scale) / 2)
-
- self.log.debug(
- "__ width_scale: `{}`".format(width_scale))
- self.log.debug(
- "__ width_half_pad: `{}`".format(
- width_half_pad))
- self.log.debug(
- "__ height_scale: `{}`".format(
- height_scale))
- self.log.debug(
- "__ height_half_pad: `{}`".format(
- height_half_pad))
-
- scaling_arg = str(
- "scale={0}x{1}:flags=lanczos,"
- "pad={2}:{3}:{4}:{5}:black,setsar=1"
- ).format(width_scale, height_scale,
- self.to_width, self.to_height,
- width_half_pad,
- height_half_pad
- )
-
- vf_back = self.add_video_filter_args(
- output_args, scaling_arg)
- # add it to output_args
- output_args.insert(0, vf_back)
-
- # baking lut file application
- lut_path = instance.data.get("lutPath")
- if lut_path and ("bake-lut" in p_tags):
- # removing Gama info as it is all baked in lut
- gamma = next((g for g in input_args
- if "-gamma" in g), None)
- if gamma:
- input_args.remove(gamma)
-
- # create lut argument
- lut_arg = "lut3d=file='{}'".format(
- lut_path.replace(
- "\\", "/").replace(":/", "\\:/")
- )
- lut_arg += ",colormatrix=bt601:bt709"
-
- vf_back = self.add_video_filter_args(
- output_args, lut_arg)
- # add it to output_args
- output_args.insert(0, vf_back)
- self.log.info("Added Lut to ffmpeg command")
- self.log.debug(
- "_ output_args: `{}`".format(output_args))
-
- if is_sequence:
- stg_dir = os.path.dirname(full_output_path)
-
- if not os.path.exists(stg_dir):
- self.log.debug(
- "creating dir: {}".format(stg_dir))
- os.mkdir(stg_dir)
-
- mov_args = [
- "\"{}\"".format(ffmpeg_path),
- " ".join(input_args),
- " ".join(output_args)
- ]
- subprcs_cmd = " ".join(mov_args)
-
- # run subprocess
- self.log.debug("Executing: {}".format(subprcs_cmd))
- openpype.api.run_subprocess(
- subprcs_cmd, shell=True, logger=self.log
- )
-
- # create representation data
- repre_new.update({
- 'name': name,
- 'ext': ext,
- 'files': repr_file,
- "tags": new_tags,
- "outputName": name,
- "codec": codec_args,
- "_profile": profile,
- "resolutionHeight": resolution_height,
- "resolutionWidth": resolution_width,
- "frameStartFtrack": frame_start_handle,
- "frameEndFtrack": frame_end_handle
- })
- if is_sequence:
- repre_new.update({
- "stagingDir": stg_dir,
- "files": os.listdir(stg_dir)
- })
- if no_handles:
- repre_new.update({
- "outputName": name + "_noHandles",
- "frameStartFtrack": frame_start,
- "frameEndFtrack": frame_end
- })
- if repre_new.get('preview'):
- repre_new.pop("preview")
- if repre_new.get('thumbnail'):
- repre_new.pop("thumbnail")
-
- # adding representation
- self.log.debug("Adding: {}".format(repre_new))
- representations_new.append(repre_new)
-
- for repre in representations_new:
- if "delete" in repre.get("tags", []):
- representations_new.remove(repre)
- if "clean_name" in repre.get("tags", []):
- repre_new.pop("outputName")
-
- instance.data.update({
- "reviewToWidth": self.to_width,
- "reviewToHeight": self.to_height
- })
-
- self.log.debug(
- "new representations: {}".format(representations_new))
- instance.data["representations"] = representations_new
-
- self.log.debug("Families Out: `{}`".format(instance.data["families"]))
-
def add_video_filter_args(self, args, inserting_arg):
"""
Fixing video filter arguments to be one long string
diff --git a/openpype/plugins/publish/extract_scanline_exr.py b/openpype/plugins/publish/extract_scanline_exr.py
index 404aa65ac2..a7f7de5188 100644
--- a/openpype/plugins/publish/extract_scanline_exr.py
+++ b/openpype/plugins/publish/extract_scanline_exr.py
@@ -45,7 +45,7 @@ class ExtractScanlineExr(pyblish.api.InstancePlugin):
stagingdir = os.path.normpath(repre.get("stagingDir"))
- oiio_tool_path = os.getenv("OPENPYPE_OIIO_PATH", "")
+ oiio_tool_path = openpype.lib.get_oiio_tools_path()
if not os.path.exists(oiio_tool_path):
self.log.error(
"OIIO tool not found in {}".format(oiio_tool_path))
diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py
index 0d36828ccf..ea90f284b2 100644
--- a/openpype/plugins/publish/integrate_new.py
+++ b/openpype/plugins/publish/integrate_new.py
@@ -976,6 +976,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
local_site = local_site_id
remote_site = sync_server_presets["config"].get("remote_site")
+ if remote_site == local_site:
+ remote_site = None
+
if remote_site == 'local':
remote_site = local_site_id
diff --git a/openpype/settings/defaults/project_anatomy/attributes.json b/openpype/settings/defaults/project_anatomy/attributes.json
index 987021f25b..3ad6761331 100644
--- a/openpype/settings/defaults/project_anatomy/attributes.json
+++ b/openpype/settings/defaults/project_anatomy/attributes.json
@@ -10,17 +10,17 @@
"resolutionHeight": 1080,
"pixelAspect": 1.0,
"applications": [
- "maya_2020",
- "nuke_12-2",
- "nukex_12-2",
- "hiero_12-2",
- "resolve_16",
- "houdini_18-5",
- "blender_2-90",
- "harmony_20",
- "photoshop_2021",
- "aftereffects_2021",
- "unreal_4-24"
+ "maya/2020",
+ "nuke/12-2",
+ "nukex/12-2",
+ "hiero/12-2",
+ "resolve/16",
+ "houdini/18-5",
+ "blender/2-91",
+ "harmony/20",
+ "photoshop/2021",
+ "aftereffects/2021",
+ "unreal/4-24"
],
"tools_env": []
}
\ No newline at end of file
diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json
index 03ac8f309f..8970aa8ac8 100644
--- a/openpype/settings/defaults/project_settings/ftrack.json
+++ b/openpype/settings/defaults/project_settings/ftrack.json
@@ -7,6 +7,14 @@
"not ready"
]
},
+ "prepare_project": {
+ "enabled": true,
+ "role_list": [
+ "Pypeclub",
+ "Administrator",
+ "Project manager"
+ ]
+ },
"sync_hier_entity_attributes": {
"enabled": true,
"interest_entity_types": [
@@ -195,7 +203,7 @@
"publish": {
"IntegrateFtrackNote": {
"enabled": true,
- "note_with_intent_template": "",
+ "note_with_intent_template": "{intent}: {comment}",
"note_labels": []
},
"ValidateFtrackAttributes": {
diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json
index 8081f92ef7..ca1b258e72 100644
--- a/openpype/settings/defaults/project_settings/global.json
+++ b/openpype/settings/defaults/project_settings/global.json
@@ -6,7 +6,9 @@
"ExtractJpegEXR": {
"enabled": true,
"ffmpeg_args": {
- "input": [],
+ "input": [
+ "-gamma 2.2"
+ ],
"output": []
}
},
diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json
index feddd2860a..dfece74f80 100644
--- a/openpype/settings/defaults/project_settings/maya.json
+++ b/openpype/settings/defaults/project_settings/maya.json
@@ -8,6 +8,13 @@
"yetiRig": "ma"
},
"create": {
+ "CreateLook": {
+ "enabled": true,
+ "make_tx": true,
+ "defaults": [
+ "Main"
+ ]
+ },
"CreateAnimation": {
"enabled": true,
"defaults": [
@@ -38,12 +45,6 @@
"Main"
]
},
- "CreateLook": {
- "enabled": true,
- "defaults": [
- "Main"
- ]
- },
"CreateMayaScene": {
"enabled": true,
"defaults": [
@@ -313,8 +314,8 @@
"rendererName": "vp2Renderer"
},
"Resolution": {
- "width": 1080,
- "height": 1920,
+ "width": 1920,
+ "height": 1080,
"percent": 1.0,
"mode": "Custom"
},
diff --git a/openpype/settings/defaults/project_settings/standalonepublisher.json b/openpype/settings/defaults/project_settings/standalonepublisher.json
index 08895bcba9..9d40d2ded6 100644
--- a/openpype/settings/defaults/project_settings/standalonepublisher.json
+++ b/openpype/settings/defaults/project_settings/standalonepublisher.json
@@ -116,7 +116,7 @@
"ExtractThumbnailSP": {
"ffmpeg_args": {
"input": [
- "gamma 2.2"
+ "-gamma 2.2"
],
"output": []
}
diff --git a/openpype/settings/defaults/project_settings/tvpaint.json b/openpype/settings/defaults/project_settings/tvpaint.json
index d4130c88be..4a424b1c03 100644
--- a/openpype/settings/defaults/project_settings/tvpaint.json
+++ b/openpype/settings/defaults/project_settings/tvpaint.json
@@ -1,6 +1,16 @@
{
"publish": {
- "ValidateMissingLayers": {
+ "ValidateProjectSettings": {
+ "enabled": true,
+ "optional": true,
+ "active": true
+ },
+ "ValidateMarks": {
+ "enabled": true,
+ "optional": true,
+ "active": true
+ },
+ "ValidateAssetName": {
"enabled": true,
"optional": true,
"active": true
diff --git a/openpype/settings/defaults/system_settings/applications.json b/openpype/settings/defaults/system_settings/applications.json
index 8034bc6368..58a9818465 100644
--- a/openpype/settings/defaults/system_settings/applications.json
+++ b/openpype/settings/defaults/system_settings/applications.json
@@ -6,9 +6,9 @@
"host_name": "maya",
"environment": {
"PYTHONPATH": [
- "{OPENPYPE_ROOT}/pype/hosts/maya/startup",
- "{OPENPYPE_ROOT}/repos/avalon-core/setup/maya",
- "{OPENPYPE_ROOT}/repos/maya-look-assigner",
+ "{OPENPYPE_REPOS_ROOT}/openpype/hosts/maya/startup",
+ "{OPENPYPE_REPOS_ROOT}/repos/avalon-core/setup/maya",
+ "{OPENPYPE_REPOS_ROOT}/repos/maya-look-assigner",
"{PYTHONPATH}"
],
"MAYA_DISABLE_CLIC_IPM": "Yes",
@@ -19,6 +19,25 @@
"OPENPYPE_LOG_NO_COLORS": "Yes"
},
"variants": {
+ "2022": {
+ "executables": {
+ "windows": [
+ "C:\\Program Files\\Autodesk\\Maya2022\\bin\\maya.exe"
+ ],
+ "darwin": [],
+ "linux": [
+ "/usr/autodesk/maya2022/bin/maya"
+ ]
+ },
+ "arguments": {
+ "windows": [],
+ "darwin": [],
+ "linux": []
+ },
+ "environment": {
+ "MAYA_VERSION": "2022"
+ }
+ },
"2020": {
"executables": {
"windows": [
@@ -75,6 +94,9 @@
"environment": {
"MAYA_VERSION": "2018"
}
+ },
+ "__dynamic_keys_labels__": {
+ "2022": "2022 (Testing Only)"
}
}
},
@@ -85,8 +107,8 @@
"host_name": "nuke",
"environment": {
"NUKE_PATH": [
- "{OPENPYPE_ROOT}/repos/avalon-core/setup/nuke/nuke_path",
- "{OPENPYPE_ROOT}/openpype/hosts/nuke/startup",
+ "{OPENPYPE_REPOS_ROOT}/repos/avalon-core/setup/nuke/nuke_path",
+ "{OPENPYPE_REPOS_ROOT}/openpype/hosts/nuke/startup",
"{OPENPYPE_STUDIO_PLUGINS}/nuke"
],
"PATH": {
@@ -95,6 +117,23 @@
"LOGLEVEL": "DEBUG"
},
"variants": {
+ "13-0": {
+ "executables": {
+ "windows": [
+ "C:\\Program Files\\Nuke13.0v1\\Nuke13.0.exe"
+ ],
+ "darwin": [],
+ "linux": [
+ "/usr/local/Nuke13.0v1/Nuke13.0"
+ ]
+ },
+ "arguments": {
+ "windows": [],
+ "darwin": [],
+ "linux": []
+ },
+ "environment": {}
+ },
"12-2": {
"executables": {
"windows": [
@@ -164,7 +203,9 @@
"__dynamic_keys_labels__": {
"12-2": "12.2",
"12-0": "12.0",
- "11-3": "11.3"
+ "11-3": "11.3",
+ "11-2": "11.2",
+ "13-0": "13.0 (Testing only)"
}
}
},
@@ -175,8 +216,8 @@
"host_name": "nuke",
"environment": {
"NUKE_PATH": [
- "{OPENPYPE_ROOT}/repos/avalon-core/setup/nuke/nuke_path",
- "{OPENPYPE_ROOT}/openpype/hosts/nuke/startup",
+ "{OPENPYPE_REPOS_ROOT}/repos/avalon-core/setup/nuke/nuke_path",
+ "{OPENPYPE_REPOS_ROOT}/openpype/hosts/nuke/startup",
"{OPENPYPE_STUDIO_PLUGINS}/nuke"
],
"PATH": {
@@ -185,6 +226,29 @@
"LOGLEVEL": "DEBUG"
},
"variants": {
+ "13-0": {
+ "executables": {
+ "windows": [
+ "C:\\Program Files\\Nuke13.0v1\\Nuke13.0.exe"
+ ],
+ "darwin": [],
+ "linux": [
+ "/usr/local/Nuke13.0v1/Nuke13.0"
+ ]
+ },
+ "arguments": {
+ "windows": [
+ "--nukex"
+ ],
+ "darwin": [
+ "--nukex"
+ ],
+ "linux": [
+ "--nukex"
+ ]
+ },
+ "environment": {}
+ },
"12-2": {
"executables": {
"windows": [
@@ -279,7 +343,8 @@
"12-2": "12.2",
"12-0": "12.0",
"11-3": "11.3",
- "11-2": "11.2"
+ "11-2": "11.2",
+ "13-0": "13.0 (Testing only)"
}
}
},
@@ -290,7 +355,7 @@
"host_name": "hiero",
"environment": {
"HIERO_PLUGIN_PATH": [
- "{OPENPYPE_ROOT}/openpype/hosts/hiero/startup"
+ "{OPENPYPE_REPOS_ROOT}/openpype/hosts/hiero/startup"
],
"PATH": {
"windows": "C:/Program Files (x86)/QuickTime/QTSystem/;{PATH}"
@@ -300,6 +365,29 @@
"LOGLEVEL": "DEBUG"
},
"variants": {
+ "13-0": {
+ "executables": {
+ "windows": [
+ "C:\\Program Files\\Nuke13.0v1\\Nuke13.0.exe"
+ ],
+ "darwin": [],
+ "linux": [
+ "/usr/local/Nuke13.0v1/Nuke13.0"
+ ]
+ },
+ "arguments": {
+ "windows": [
+ "--studio"
+ ],
+ "darwin": [
+ "--studio"
+ ],
+ "linux": [
+ "--studio"
+ ]
+ },
+ "environment": {}
+ },
"12-2": {
"executables": {
"windows": [
@@ -392,7 +480,8 @@
"12-2": "12.2",
"12-0": "12.0",
"11-3": "11.3",
- "11-2": "11.2"
+ "11-2": "11.2",
+ "13-0": "13.0 (Testing only)"
}
}
},
@@ -403,7 +492,7 @@
"host_name": "hiero",
"environment": {
"HIERO_PLUGIN_PATH": [
- "{OPENPYPE_ROOT}/openpype/hosts/hiero/startup"
+ "{OPENPYPE_REPOS_ROOT}/openpype/hosts/hiero/startup"
],
"PATH": {
"windows": "C:/Program Files (x86)/QuickTime/QTSystem/;{PATH}"
@@ -413,6 +502,29 @@
"LOGLEVEL": "DEBUG"
},
"variants": {
+ "13-0": {
+ "executables": {
+ "windows": [
+ "C:\\Program Files\\Nuke13.0v1\\Nuke13.0.exe"
+ ],
+ "darwin": [],
+ "linux": [
+ "/usr/local/Nuke13.0v1/Nuke13.0"
+ ]
+ },
+ "arguments": {
+ "windows": [
+ "--hiero"
+ ],
+ "darwin": [
+ "--hiero"
+ ],
+ "linux": [
+ "--hiero"
+ ]
+ },
+ "environment": {}
+ },
"12-2": {
"executables": {
"windows": [
@@ -507,7 +619,8 @@
"12-2": "12.2",
"12-0": "12.0",
"11-3": "11.3",
- "11-2": "11.2"
+ "11-2": "11.2",
+ "13-0": "13.0 (Testing only)"
}
}
},
@@ -614,7 +727,7 @@
"{PYTHON36_RESOLVE}/Scripts",
"{PATH}"
],
- "PRE_PYTHON_SCRIPT": "{OPENPYPE_ROOT}/openpype/resolve/preload_console.py",
+ "PRE_PYTHON_SCRIPT": "{OPENPYPE_REPOS_ROOT}/openpype/resolve/preload_console.py",
"OPENPYPE_LOG_NO_COLORS": "True",
"RESOLVE_DEV": "True"
},
@@ -645,14 +758,14 @@
"host_name": "houdini",
"environment": {
"HOUDINI_PATH": {
- "darwin": "{OPENPYPE_ROOT}/openpype/hosts/houdini/startup:&",
- "linux": "{OPENPYPE_ROOT}/openpype/hosts/houdini/startup:&",
- "windows": "{OPENPYPE_ROOT}/openpype/hosts/houdini/startup;&"
+ "darwin": "{OPENPYPE_REPOS_ROOT}/openpype/hosts/houdini/startup:&",
+ "linux": "{OPENPYPE_REPOS_ROOT}/openpype/hosts/houdini/startup:&",
+ "windows": "{OPENPYPE_REPOS_ROOT}/openpype/hosts/houdini/startup;&"
},
"HOUDINI_MENU_PATH": {
- "darwin": "{OPENPYPE_ROOT}/openpype/hosts/houdini/startup:&",
- "linux": "{OPENPYPE_ROOT}/openpype/hosts/houdini/startup:&",
- "windows": "{OPENPYPE_ROOT}/openpype/hosts/houdini/startup;&"
+ "darwin": "{OPENPYPE_REPOS_ROOT}/openpype/hosts/houdini/startup:&",
+ "linux": "{OPENPYPE_REPOS_ROOT}/openpype/hosts/houdini/startup:&",
+ "windows": "{OPENPYPE_REPOS_ROOT}/openpype/hosts/houdini/startup;&"
}
},
"variants": {
@@ -710,12 +823,12 @@
"icon": "{}/app_icons/blender.png",
"host_name": "blender",
"environment": {
- "BLENDER_USER_SCRIPTS": "{OPENPYPE_ROOT}/repos/avalon-core/setup/blender",
+ "BLENDER_USER_SCRIPTS": "{OPENPYPE_REPOS_ROOT}/repos/avalon-core/setup/blender",
"PYTHONPATH": [
- "{OPENPYPE_ROOT}/repos/avalon-core/setup/blender",
+ "{OPENPYPE_REPOS_ROOT}/repos/avalon-core/setup/blender",
"{PYTHONPATH}"
],
- "CREATE_NEW_CONSOLE": "yes"
+ "QT_PREFERRED_BINDING": "PySide2"
},
"variants": {
"2-83": {
@@ -760,9 +873,31 @@
},
"environment": {}
},
+ "2-91": {
+ "executables": {
+ "windows": [
+ "C:\\Program Files\\Blender Foundation\\Blender 2.91\\blender.exe"
+ ],
+ "darwin": [],
+ "linux": []
+ },
+ "arguments": {
+ "windows": [
+ "--python-use-system-env"
+ ],
+ "darwin": [
+ "--python-use-system-env"
+ ],
+ "linux": [
+ "--python-use-system-env"
+ ]
+ },
+ "environment": {}
+ },
"__dynamic_keys_labels__": {
"2-83": "2.83",
- "2-90": "2.90"
+ "2-90": "2.90",
+ "2-91": "2.91"
}
}
},
@@ -773,7 +908,7 @@
"host_name": "harmony",
"environment": {
"AVALON_HARMONY_WORKFILES_ON_LAUNCH": "1",
- "LIB_OPENHARMONY_PATH": "{OPENPYPE_ROOT}/pype/vendor/OpenHarmony"
+ "LIB_OPENHARMONY_PATH": "{OPENPYPE_REPOS_ROOT}/pype/vendor/OpenHarmony"
},
"variants": {
"20": {
@@ -957,7 +1092,7 @@
"icon": "app_icons/celaction.png",
"host_name": "celaction",
"environment": {
- "CELACTION_TEMPLATE": "{OPENPYPE_ROOT}/openpype/hosts/celaction/celaction_template_scene.scn"
+ "CELACTION_TEMPLATE": "{OPENPYPE_REPOS_ROOT}/openpype/hosts/celaction/celaction_template_scene.scn"
},
"variants": {
"local": {
@@ -983,7 +1118,7 @@
"icon": "{}/app_icons/ue4.png'",
"host_name": "unreal",
"environment": {
- "AVALON_UNREAL_PLUGIN": "{OPENPYPE_ROOT}/repos/avalon-unreal-integration",
+ "AVALON_UNREAL_PLUGIN": "{OPENPYPE_REPOS_ROOT}/repos/avalon-unreal-integration",
"OPENPYPE_LOG_NO_COLORS": "True",
"QT_PREFERRED_BINDING": "PySide"
},
diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json
index d93d2a0c3a..2568e8b6a8 100644
--- a/openpype/settings/defaults/system_settings/general.json
+++ b/openpype/settings/defaults/system_settings/general.json
@@ -2,15 +2,9 @@
"studio_name": "Studio name",
"studio_code": "stu",
"environment": {
- "FFMPEG_PATH": {
- "windows": "{OPENPYPE_ROOT}/vendor/bin/ffmpeg_exec/windows/bin",
- "darwin": "{OPENPYPE_ROOT}/vendor/bin/ffmpeg_exec/darwin/bin",
- "linux": ":{OPENPYPE_ROOT}/vendor/bin/ffmpeg_exec/linux"
- },
"OPENPYPE_OCIO_CONFIG": "{STUDIO_SOFT}/OpenColorIO-Configs",
"__environment_keys__": {
"global": [
- "FFMPEG_PATH",
"OPENPYPE_OCIO_CONFIG"
]
}
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json
index eefc0e12b7..a801175031 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json
@@ -36,6 +36,25 @@
}
]
},
+ {
+ "type": "dict",
+ "key": "prepare_project",
+ "label": "Prepare Project",
+ "checkbox_key": "enabled",
+ "children": [
+ {
+ "type": "boolean",
+ "key": "enabled",
+ "label": "Enabled"
+ },
+ {
+ "type": "list",
+ "key": "role_list",
+ "label": "Roles",
+ "object_type": "text"
+ }
+ ]
+ },
{
"type": "dict",
"key": "sync_hier_entity_attributes",
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json b/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json
index fd728f3982..ea1b8fc9da 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json
@@ -1,7 +1,7 @@
{
"type": "dict",
"key": "sync_server",
- "label": "Sync Server (currently unused)",
+ "label": "Site Sync (beta testing)",
"collapsible": true,
"checkbox_key": "enabled",
"is_file": true,
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json
index b9fe26a57c..ab404f03ff 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json
@@ -17,8 +17,31 @@
"name": "template_publish_plugin",
"template_data": [
{
- "key": "ValidateMissingLayers",
- "label": "ValidateMissingLayers"
+ "key": "ValidateProjectSettings",
+ "label": "ValidateProjectSettings",
+ "docstring": "Validate if FPS and Resolution match shot data"
+ }
+ ]
+ },
+ {
+ "type": "schema_template",
+ "name": "template_publish_plugin",
+ "template_data": [
+ {
+ "key": "ValidateMarks",
+ "label": "Validate MarkIn/Out",
+ "docstring": "Validate MarkIn/Out match Frame start/end on shot data"
+ }
+ ]
+ },
+ {
+ "type": "schema_template",
+ "name": "template_publish_plugin",
+ "template_data": [
+ {
+ "key": "ValidateAssetName",
+ "label": "ValidateAssetName",
+ "docstring": "Validate if shot on instances metadata is same as workfiles shot"
}
]
}
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json
index 575e04c85d..d728f1def3 100644
--- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json
@@ -4,6 +4,31 @@
"key": "create",
"label": "Creator plugins",
"children": [
+ {
+ "type": "dict",
+ "collapsible": true,
+ "key": "CreateLook",
+ "label": "Create Look",
+ "checkbox_key": "enabled",
+ "children": [
+ {
+ "type": "boolean",
+ "key": "enabled",
+ "label": "Enabled"
+ },
+ {
+ "type": "boolean",
+ "key": "make_tx",
+ "label": "Make tx files"
+ },
+ {
+ "type": "list",
+ "key": "defaults",
+ "label": "Default Subsets",
+ "object_type": "text"
+ }
+ ]
+ },
{
"type": "schema_template",
"name": "template_create_plugin",
@@ -28,10 +53,6 @@
"key": "CreateLayout",
"label": "Create Layout"
},
- {
- "key": "CreateLook",
- "label": "Create Look"
- },
{
"key": "CreateMayaScene",
"label": "Create Maya Scene"
diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json
index 8512514ff3..a30cafd0c2 100644
--- a/openpype/settings/entities/schemas/system_schema/schema_modules.json
+++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json
@@ -82,7 +82,7 @@
}, {
"type": "dict",
"key": "sync_server",
- "label": "Sync Server",
+ "label": "Site Sync",
"collapsible": true,
"checkbox_key": "enabled",
"children": [{
diff --git a/openpype/tools/settings/settings/style/__init__.py b/openpype/tools/settings/settings/style/__init__.py
index 9bb5e851b4..5a57642ee1 100644
--- a/openpype/tools/settings/settings/style/__init__.py
+++ b/openpype/tools/settings/settings/style/__init__.py
@@ -1,4 +1,5 @@
import os
+from openpype import resources
def load_stylesheet():
@@ -9,4 +10,4 @@ def load_stylesheet():
def app_icon_path():
- return os.path.join(os.path.dirname(__file__), "openpype_icon.png")
+ return resources.pype_icon_filepath()
diff --git a/openpype/tools/settings/settings/style/pype_icon.png b/openpype/tools/settings/settings/style/pype_icon.png
deleted file mode 100644
index bfacf6eeed..0000000000
Binary files a/openpype/tools/settings/settings/style/pype_icon.png and /dev/null differ
diff --git a/openpype/tools/settings/settings/widgets/multiselection_combobox.py b/openpype/tools/settings/settings/widgets/multiselection_combobox.py
index da9cdd75cf..30ecb7b84b 100644
--- a/openpype/tools/settings/settings/widgets/multiselection_combobox.py
+++ b/openpype/tools/settings/settings/widgets/multiselection_combobox.py
@@ -262,7 +262,10 @@ class MultiSelectionComboBox(QtWidgets.QComboBox):
self.lines[line] = [item]
line += 1
else:
- self.lines[line].append(item)
+ if line in self.lines:
+ self.lines[line].append(item)
+ else:
+ self.lines[line] = [item]
left_x = left_x + width + self.item_spacing
self.update()
diff --git a/openpype/tools/tray/pype_info_widget.py b/openpype/tools/tray/pype_info_widget.py
index a70a360378..bbb92f175f 100644
--- a/openpype/tools/tray/pype_info_widget.py
+++ b/openpype/tools/tray/pype_info_widget.py
@@ -111,6 +111,13 @@ class EnvironmentsView(QtWidgets.QTreeView):
else:
return super(EnvironmentsView, self).keyPressEvent(event)
+ def wheelEvent(self, event):
+ if not self.hasFocus():
+ event.ignore()
+ return
+ return super(EnvironmentsView, self).wheelEvent(event)
+
+
class ClickableWidget(QtWidgets.QWidget):
clicked = QtCore.Signal()
@@ -195,8 +202,6 @@ class CollapsibleWidget(QtWidgets.QWidget):
class PypeInfoWidget(QtWidgets.QWidget):
- not_applicable = "N/A"
-
def __init__(self, parent=None):
super(PypeInfoWidget, self).__init__(parent)
@@ -206,17 +211,21 @@ class PypeInfoWidget(QtWidgets.QWidget):
self.setWindowIcon(icon)
self.setWindowTitle("OpenPype info")
+ scroll_area = QtWidgets.QScrollArea(self)
+ info_widget = PypeInfoSubWidget(scroll_area)
+
+ scroll_area.setWidget(info_widget)
+ scroll_area.setWidgetResizable(True)
+
main_layout = QtWidgets.QVBoxLayout(self)
- main_layout.setAlignment(QtCore.Qt.AlignTop)
- main_layout.addWidget(self._create_openpype_info_widget(), 0)
- main_layout.addWidget(self._create_separator(), 0)
- main_layout.addWidget(self._create_workstation_widget(), 0)
- main_layout.addWidget(self._create_separator(), 0)
- main_layout.addWidget(self._create_local_settings_widget(), 0)
- main_layout.addWidget(self._create_separator(), 0)
- main_layout.addWidget(self._create_environ_widget(), 1)
+ main_layout.addWidget(scroll_area, 1)
main_layout.addWidget(self._create_btns_section(), 0)
+ self.resize(740, 540)
+
+ self.scroll_area = scroll_area
+ self.info_widget = info_widget
+
def _create_btns_section(self):
btns_widget = QtWidgets.QWidget(self)
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
@@ -268,6 +277,24 @@ class PypeInfoWidget(QtWidgets.QWidget):
mime_data
)
+
+class PypeInfoSubWidget(QtWidgets.QWidget):
+ not_applicable = "N/A"
+
+ def __init__(self, parent=None):
+ super(PypeInfoSubWidget, self).__init__(parent)
+
+ main_layout = QtWidgets.QVBoxLayout(self)
+ main_layout.setContentsMargins(0, 0, 0, 0)
+ main_layout.setAlignment(QtCore.Qt.AlignTop)
+ main_layout.addWidget(self._create_openpype_info_widget(), 0)
+ main_layout.addWidget(self._create_separator(), 0)
+ main_layout.addWidget(self._create_workstation_widget(), 0)
+ main_layout.addWidget(self._create_separator(), 0)
+ main_layout.addWidget(self._create_local_settings_widget(), 0)
+ main_layout.addWidget(self._create_separator(), 0)
+ main_layout.addWidget(self._create_environ_widget(), 1)
+
def _create_separator(self):
separator_widget = QtWidgets.QWidget(self)
separator_widget.setStyleSheet("background: #222222;")
@@ -322,6 +349,7 @@ class PypeInfoWidget(QtWidgets.QWidget):
)
wokstation_info_widget.set_content_widget(info_widget)
+ wokstation_info_widget.toggle_content()
return wokstation_info_widget
@@ -342,6 +370,7 @@ class PypeInfoWidget(QtWidgets.QWidget):
env_widget = CollapsibleWidget("Environments", self)
env_view = EnvironmentsView(env_widget)
+ env_view.setMinimumHeight(300)
env_widget.set_content_widget(env_view)
diff --git a/openpype/vendor/python/python_2/dns/__init__.py b/openpype/vendor/python/python_2/dns/__init__.py
new file mode 100644
index 0000000000..c1ce8e6061
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/__init__.py
@@ -0,0 +1,56 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython DNS toolkit"""
+
+__all__ = [
+ 'dnssec',
+ 'e164',
+ 'edns',
+ 'entropy',
+ 'exception',
+ 'flags',
+ 'hash',
+ 'inet',
+ 'ipv4',
+ 'ipv6',
+ 'message',
+ 'name',
+ 'namedict',
+ 'node',
+ 'opcode',
+ 'query',
+ 'rcode',
+ 'rdata',
+ 'rdataclass',
+ 'rdataset',
+ 'rdatatype',
+ 'renderer',
+ 'resolver',
+ 'reversename',
+ 'rrset',
+ 'set',
+ 'tokenizer',
+ 'tsig',
+ 'tsigkeyring',
+ 'ttl',
+ 'rdtypes',
+ 'update',
+ 'version',
+ 'wiredata',
+ 'zone',
+]
diff --git a/openpype/vendor/python/python_2/dns/_compat.py b/openpype/vendor/python/python_2/dns/_compat.py
new file mode 100644
index 0000000000..ca0931c2b5
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/_compat.py
@@ -0,0 +1,59 @@
+import sys
+import decimal
+from decimal import Context
+
+PY3 = sys.version_info[0] == 3
+PY2 = sys.version_info[0] == 2
+
+
+if PY3:
+ long = int
+ xrange = range
+else:
+ long = long # pylint: disable=long-builtin
+ xrange = xrange # pylint: disable=xrange-builtin
+
+# unicode / binary types
+if PY3:
+ text_type = str
+ binary_type = bytes
+ string_types = (str,)
+ unichr = chr
+ def maybe_decode(x):
+ return x.decode()
+ def maybe_encode(x):
+ return x.encode()
+ def maybe_chr(x):
+ return x
+ def maybe_ord(x):
+ return x
+else:
+ text_type = unicode # pylint: disable=unicode-builtin, undefined-variable
+ binary_type = str
+ string_types = (
+ basestring, # pylint: disable=basestring-builtin, undefined-variable
+ )
+ unichr = unichr # pylint: disable=unichr-builtin
+ def maybe_decode(x):
+ return x
+ def maybe_encode(x):
+ return x
+ def maybe_chr(x):
+ return chr(x)
+ def maybe_ord(x):
+ return ord(x)
+
+
+def round_py2_compat(what):
+ """
+ Python 2 and Python 3 use different rounding strategies in round(). This
+ function ensures that results are python2/3 compatible and backward
+ compatible with previous py2 releases
+ :param what: float
+ :return: rounded long
+ """
+ d = Context(
+ prec=len(str(long(what))), # round to integer with max precision
+ rounding=decimal.ROUND_HALF_UP
+ ).create_decimal(str(what)) # str(): python 2.6 compat
+ return long(d)
diff --git a/openpype/vendor/python/python_2/dns/dnssec.py b/openpype/vendor/python/python_2/dns/dnssec.py
new file mode 100644
index 0000000000..35da6b5a81
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/dnssec.py
@@ -0,0 +1,519 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNSSEC-related functions and constants."""
+
+from io import BytesIO
+import struct
+import time
+
+import dns.exception
+import dns.name
+import dns.node
+import dns.rdataset
+import dns.rdata
+import dns.rdatatype
+import dns.rdataclass
+from ._compat import string_types
+
+
+class UnsupportedAlgorithm(dns.exception.DNSException):
+ """The DNSSEC algorithm is not supported."""
+
+
+class ValidationFailure(dns.exception.DNSException):
+ """The DNSSEC signature is invalid."""
+
+
+#: RSAMD5
+RSAMD5 = 1
+#: DH
+DH = 2
+#: DSA
+DSA = 3
+#: ECC
+ECC = 4
+#: RSASHA1
+RSASHA1 = 5
+#: DSANSEC3SHA1
+DSANSEC3SHA1 = 6
+#: RSASHA1NSEC3SHA1
+RSASHA1NSEC3SHA1 = 7
+#: RSASHA256
+RSASHA256 = 8
+#: RSASHA512
+RSASHA512 = 10
+#: ECDSAP256SHA256
+ECDSAP256SHA256 = 13
+#: ECDSAP384SHA384
+ECDSAP384SHA384 = 14
+#: INDIRECT
+INDIRECT = 252
+#: PRIVATEDNS
+PRIVATEDNS = 253
+#: PRIVATEOID
+PRIVATEOID = 254
+
+_algorithm_by_text = {
+ 'RSAMD5': RSAMD5,
+ 'DH': DH,
+ 'DSA': DSA,
+ 'ECC': ECC,
+ 'RSASHA1': RSASHA1,
+ 'DSANSEC3SHA1': DSANSEC3SHA1,
+ 'RSASHA1NSEC3SHA1': RSASHA1NSEC3SHA1,
+ 'RSASHA256': RSASHA256,
+ 'RSASHA512': RSASHA512,
+ 'INDIRECT': INDIRECT,
+ 'ECDSAP256SHA256': ECDSAP256SHA256,
+ 'ECDSAP384SHA384': ECDSAP384SHA384,
+ 'PRIVATEDNS': PRIVATEDNS,
+ 'PRIVATEOID': PRIVATEOID,
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_algorithm_by_value = {y: x for x, y in _algorithm_by_text.items()}
+
+
+def algorithm_from_text(text):
+ """Convert text into a DNSSEC algorithm value.
+
+ Returns an ``int``.
+ """
+
+ value = _algorithm_by_text.get(text.upper())
+ if value is None:
+ value = int(text)
+ return value
+
+
+def algorithm_to_text(value):
+ """Convert a DNSSEC algorithm value to text
+
+ Returns a ``str``.
+ """
+
+ text = _algorithm_by_value.get(value)
+ if text is None:
+ text = str(value)
+ return text
+
+
+def _to_rdata(record, origin):
+ s = BytesIO()
+ record.to_wire(s, origin=origin)
+ return s.getvalue()
+
+
+def key_id(key, origin=None):
+ """Return the key id (a 16-bit number) for the specified key.
+
+ Note the *origin* parameter of this function is historical and
+ is not needed.
+
+ Returns an ``int`` between 0 and 65535.
+ """
+
+ rdata = _to_rdata(key, origin)
+ rdata = bytearray(rdata)
+ if key.algorithm == RSAMD5:
+ return (rdata[-3] << 8) + rdata[-2]
+ else:
+ total = 0
+ for i in range(len(rdata) // 2):
+ total += (rdata[2 * i] << 8) + \
+ rdata[2 * i + 1]
+ if len(rdata) % 2 != 0:
+ total += rdata[len(rdata) - 1] << 8
+ total += ((total >> 16) & 0xffff)
+ return total & 0xffff
+
+
+def make_ds(name, key, algorithm, origin=None):
+ """Create a DS record for a DNSSEC key.
+
+ *name* is the owner name of the DS record.
+
+ *key* is a ``dns.rdtypes.ANY.DNSKEY``.
+
+ *algorithm* is a string describing which hash algorithm to use. The
+ currently supported hashes are "SHA1" and "SHA256". Case does not
+ matter for these strings.
+
+ *origin* is a ``dns.name.Name`` and will be used as the origin
+ if *key* is a relative name.
+
+ Returns a ``dns.rdtypes.ANY.DS``.
+ """
+
+ if algorithm.upper() == 'SHA1':
+ dsalg = 1
+ hash = SHA1.new()
+ elif algorithm.upper() == 'SHA256':
+ dsalg = 2
+ hash = SHA256.new()
+ else:
+ raise UnsupportedAlgorithm('unsupported algorithm "%s"' % algorithm)
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, origin)
+ hash.update(name.canonicalize().to_wire())
+ hash.update(_to_rdata(key, origin))
+ digest = hash.digest()
+
+ dsrdata = struct.pack("!HBB", key_id(key), key.algorithm, dsalg) + digest
+ return dns.rdata.from_wire(dns.rdataclass.IN, dns.rdatatype.DS, dsrdata, 0,
+ len(dsrdata))
+
+
+def _find_candidate_keys(keys, rrsig):
+ candidate_keys = []
+ value = keys.get(rrsig.signer)
+ if value is None:
+ return None
+ if isinstance(value, dns.node.Node):
+ try:
+ rdataset = value.find_rdataset(dns.rdataclass.IN,
+ dns.rdatatype.DNSKEY)
+ except KeyError:
+ return None
+ else:
+ rdataset = value
+ for rdata in rdataset:
+ if rdata.algorithm == rrsig.algorithm and \
+ key_id(rdata) == rrsig.key_tag:
+ candidate_keys.append(rdata)
+ return candidate_keys
+
+
+def _is_rsa(algorithm):
+ return algorithm in (RSAMD5, RSASHA1,
+ RSASHA1NSEC3SHA1, RSASHA256,
+ RSASHA512)
+
+
+def _is_dsa(algorithm):
+ return algorithm in (DSA, DSANSEC3SHA1)
+
+
+def _is_ecdsa(algorithm):
+ return _have_ecdsa and (algorithm in (ECDSAP256SHA256, ECDSAP384SHA384))
+
+
+def _is_md5(algorithm):
+ return algorithm == RSAMD5
+
+
+def _is_sha1(algorithm):
+ return algorithm in (DSA, RSASHA1,
+ DSANSEC3SHA1, RSASHA1NSEC3SHA1)
+
+
+def _is_sha256(algorithm):
+ return algorithm in (RSASHA256, ECDSAP256SHA256)
+
+
+def _is_sha384(algorithm):
+ return algorithm == ECDSAP384SHA384
+
+
+def _is_sha512(algorithm):
+ return algorithm == RSASHA512
+
+
+def _make_hash(algorithm):
+ if _is_md5(algorithm):
+ return MD5.new()
+ if _is_sha1(algorithm):
+ return SHA1.new()
+ if _is_sha256(algorithm):
+ return SHA256.new()
+ if _is_sha384(algorithm):
+ return SHA384.new()
+ if _is_sha512(algorithm):
+ return SHA512.new()
+ raise ValidationFailure('unknown hash for algorithm %u' % algorithm)
+
+
+def _make_algorithm_id(algorithm):
+ if _is_md5(algorithm):
+ oid = [0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05]
+ elif _is_sha1(algorithm):
+ oid = [0x2b, 0x0e, 0x03, 0x02, 0x1a]
+ elif _is_sha256(algorithm):
+ oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01]
+ elif _is_sha512(algorithm):
+ oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03]
+ else:
+ raise ValidationFailure('unknown algorithm %u' % algorithm)
+ olen = len(oid)
+ dlen = _make_hash(algorithm).digest_size
+ idbytes = [0x30] + [8 + olen + dlen] + \
+ [0x30, olen + 4] + [0x06, olen] + oid + \
+ [0x05, 0x00] + [0x04, dlen]
+ return struct.pack('!%dB' % len(idbytes), *idbytes)
+
+
+def _validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
+ """Validate an RRset against a single signature rdata
+
+ The owner name of *rrsig* is assumed to be the same as the owner name
+ of *rrset*.
+
+ *rrset* is the RRset to validate. It can be a ``dns.rrset.RRset`` or
+ a ``(dns.name.Name, dns.rdataset.Rdataset)`` tuple.
+
+ *rrsig* is a ``dns.rdata.Rdata``, the signature to validate.
+
+ *keys* is the key dictionary, used to find the DNSKEY associated with
+ a given name. The dictionary is keyed by a ``dns.name.Name``, and has
+ ``dns.node.Node`` or ``dns.rdataset.Rdataset`` values.
+
+ *origin* is a ``dns.name.Name``, the origin to use for relative names.
+
+ *now* is an ``int``, the time to use when validating the signatures,
+ in seconds since the UNIX epoch. The default is the current time.
+ """
+
+ if isinstance(origin, string_types):
+ origin = dns.name.from_text(origin, dns.name.root)
+
+ candidate_keys = _find_candidate_keys(keys, rrsig)
+ if candidate_keys is None:
+ raise ValidationFailure('unknown key')
+
+ for candidate_key in candidate_keys:
+ # For convenience, allow the rrset to be specified as a (name,
+ # rdataset) tuple as well as a proper rrset
+ if isinstance(rrset, tuple):
+ rrname = rrset[0]
+ rdataset = rrset[1]
+ else:
+ rrname = rrset.name
+ rdataset = rrset
+
+ if now is None:
+ now = time.time()
+ if rrsig.expiration < now:
+ raise ValidationFailure('expired')
+ if rrsig.inception > now:
+ raise ValidationFailure('not yet valid')
+
+ hash = _make_hash(rrsig.algorithm)
+
+ if _is_rsa(rrsig.algorithm):
+ keyptr = candidate_key.key
+ (bytes_,) = struct.unpack('!B', keyptr[0:1])
+ keyptr = keyptr[1:]
+ if bytes_ == 0:
+ (bytes_,) = struct.unpack('!H', keyptr[0:2])
+ keyptr = keyptr[2:]
+ rsa_e = keyptr[0:bytes_]
+ rsa_n = keyptr[bytes_:]
+ try:
+ pubkey = CryptoRSA.construct(
+ (number.bytes_to_long(rsa_n),
+ number.bytes_to_long(rsa_e)))
+ except ValueError:
+ raise ValidationFailure('invalid public key')
+ sig = rrsig.signature
+ elif _is_dsa(rrsig.algorithm):
+ keyptr = candidate_key.key
+ (t,) = struct.unpack('!B', keyptr[0:1])
+ keyptr = keyptr[1:]
+ octets = 64 + t * 8
+ dsa_q = keyptr[0:20]
+ keyptr = keyptr[20:]
+ dsa_p = keyptr[0:octets]
+ keyptr = keyptr[octets:]
+ dsa_g = keyptr[0:octets]
+ keyptr = keyptr[octets:]
+ dsa_y = keyptr[0:octets]
+ pubkey = CryptoDSA.construct(
+ (number.bytes_to_long(dsa_y),
+ number.bytes_to_long(dsa_g),
+ number.bytes_to_long(dsa_p),
+ number.bytes_to_long(dsa_q)))
+ sig = rrsig.signature[1:]
+ elif _is_ecdsa(rrsig.algorithm):
+ # use ecdsa for NIST-384p -- not currently supported by pycryptodome
+
+ keyptr = candidate_key.key
+
+ if rrsig.algorithm == ECDSAP256SHA256:
+ curve = ecdsa.curves.NIST256p
+ key_len = 32
+ elif rrsig.algorithm == ECDSAP384SHA384:
+ curve = ecdsa.curves.NIST384p
+ key_len = 48
+
+ x = number.bytes_to_long(keyptr[0:key_len])
+ y = number.bytes_to_long(keyptr[key_len:key_len * 2])
+ if not ecdsa.ecdsa.point_is_valid(curve.generator, x, y):
+ raise ValidationFailure('invalid ECDSA key')
+ point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order)
+ verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point,
+ curve)
+ pubkey = ECKeyWrapper(verifying_key, key_len)
+ r = rrsig.signature[:key_len]
+ s = rrsig.signature[key_len:]
+ sig = ecdsa.ecdsa.Signature(number.bytes_to_long(r),
+ number.bytes_to_long(s))
+
+ else:
+ raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
+
+ hash.update(_to_rdata(rrsig, origin)[:18])
+ hash.update(rrsig.signer.to_digestable(origin))
+
+ if rrsig.labels < len(rrname) - 1:
+ suffix = rrname.split(rrsig.labels + 1)[1]
+ rrname = dns.name.from_text('*', suffix)
+ rrnamebuf = rrname.to_digestable(origin)
+ rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
+ rrsig.original_ttl)
+ rrlist = sorted(rdataset)
+ for rr in rrlist:
+ hash.update(rrnamebuf)
+ hash.update(rrfixed)
+ rrdata = rr.to_digestable(origin)
+ rrlen = struct.pack('!H', len(rrdata))
+ hash.update(rrlen)
+ hash.update(rrdata)
+
+ try:
+ if _is_rsa(rrsig.algorithm):
+ verifier = pkcs1_15.new(pubkey)
+ # will raise ValueError if verify fails:
+ verifier.verify(hash, sig)
+ elif _is_dsa(rrsig.algorithm):
+ verifier = DSS.new(pubkey, 'fips-186-3')
+ verifier.verify(hash, sig)
+ elif _is_ecdsa(rrsig.algorithm):
+ digest = hash.digest()
+ if not pubkey.verify(digest, sig):
+ raise ValueError
+ else:
+ # Raise here for code clarity; this won't actually ever happen
+ # since if the algorithm is really unknown we'd already have
+ # raised an exception above
+ raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
+ # If we got here, we successfully verified so we can return without error
+ return
+ except ValueError:
+ # this happens on an individual validation failure
+ continue
+ # nothing verified -- raise failure:
+ raise ValidationFailure('verify failure')
+
+
+def _validate(rrset, rrsigset, keys, origin=None, now=None):
+ """Validate an RRset.
+
+ *rrset* is the RRset to validate. It can be a ``dns.rrset.RRset`` or
+ a ``(dns.name.Name, dns.rdataset.Rdataset)`` tuple.
+
+ *rrsigset* is the signature RRset to be validated. It can be a
+ ``dns.rrset.RRset`` or a ``(dns.name.Name, dns.rdataset.Rdataset)`` tuple.
+
+ *keys* is the key dictionary, used to find the DNSKEY associated with
+ a given name. The dictionary is keyed by a ``dns.name.Name``, and has
+ ``dns.node.Node`` or ``dns.rdataset.Rdataset`` values.
+
+ *origin* is a ``dns.name.Name``, the origin to use for relative names.
+
+ *now* is an ``int``, the time to use when validating the signatures,
+ in seconds since the UNIX epoch. The default is the current time.
+ """
+
+ if isinstance(origin, string_types):
+ origin = dns.name.from_text(origin, dns.name.root)
+
+ if isinstance(rrset, tuple):
+ rrname = rrset[0]
+ else:
+ rrname = rrset.name
+
+ if isinstance(rrsigset, tuple):
+ rrsigname = rrsigset[0]
+ rrsigrdataset = rrsigset[1]
+ else:
+ rrsigname = rrsigset.name
+ rrsigrdataset = rrsigset
+
+ rrname = rrname.choose_relativity(origin)
+ rrsigname = rrsigname.choose_relativity(origin)
+ if rrname != rrsigname:
+ raise ValidationFailure("owner names do not match")
+
+ for rrsig in rrsigrdataset:
+ try:
+ _validate_rrsig(rrset, rrsig, keys, origin, now)
+ return
+ except ValidationFailure:
+ pass
+ raise ValidationFailure("no RRSIGs validated")
+
+
+def _need_pycrypto(*args, **kwargs):
+ raise NotImplementedError("DNSSEC validation requires pycryptodome/pycryptodomex")
+
+
+try:
+ try:
+ # test we're using pycryptodome, not pycrypto (which misses SHA1 for example)
+ from Crypto.Hash import MD5, SHA1, SHA256, SHA384, SHA512
+ from Crypto.PublicKey import RSA as CryptoRSA, DSA as CryptoDSA
+ from Crypto.Signature import pkcs1_15, DSS
+ from Crypto.Util import number
+ except ImportError:
+ from Cryptodome.Hash import MD5, SHA1, SHA256, SHA384, SHA512
+ from Cryptodome.PublicKey import RSA as CryptoRSA, DSA as CryptoDSA
+ from Cryptodome.Signature import pkcs1_15, DSS
+ from Cryptodome.Util import number
+except ImportError:
+ validate = _need_pycrypto
+ validate_rrsig = _need_pycrypto
+ _have_pycrypto = False
+ _have_ecdsa = False
+else:
+ validate = _validate
+ validate_rrsig = _validate_rrsig
+ _have_pycrypto = True
+
+ try:
+ import ecdsa
+ import ecdsa.ecdsa
+ import ecdsa.ellipticcurve
+ import ecdsa.keys
+ except ImportError:
+ _have_ecdsa = False
+ else:
+ _have_ecdsa = True
+
+ class ECKeyWrapper(object):
+
+ def __init__(self, key, key_len):
+ self.key = key
+ self.key_len = key_len
+
+ def verify(self, digest, sig):
+ diglong = number.bytes_to_long(digest)
+ return self.key.pubkey.verifies(diglong, sig)
diff --git a/openpype/vendor/python/python_2/dns/e164.py b/openpype/vendor/python/python_2/dns/e164.py
new file mode 100644
index 0000000000..758c47a784
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/e164.py
@@ -0,0 +1,105 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS E.164 helpers."""
+
+import dns.exception
+import dns.name
+import dns.resolver
+from ._compat import string_types, maybe_decode
+
+#: The public E.164 domain.
+public_enum_domain = dns.name.from_text('e164.arpa.')
+
+
+def from_e164(text, origin=public_enum_domain):
+ """Convert an E.164 number in textual form into a Name object whose
+ value is the ENUM domain name for that number.
+
+ Non-digits in the text are ignored, i.e. "16505551212",
+ "+1.650.555.1212" and "1 (650) 555-1212" are all the same.
+
+ *text*, a ``text``, is an E.164 number in textual form.
+
+ *origin*, a ``dns.name.Name``, the domain in which the number
+ should be constructed. The default is ``e164.arpa.``.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ parts = [d for d in text if d.isdigit()]
+ parts.reverse()
+ return dns.name.from_text('.'.join(parts), origin=origin)
+
+
+def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
+ """Convert an ENUM domain name into an E.164 number.
+
+ Note that dnspython does not have any information about preferred
+ number formats within national numbering plans, so all numbers are
+ emitted as a simple string of digits, prefixed by a '+' (unless
+ *want_plus_prefix* is ``False``).
+
+ *name* is a ``dns.name.Name``, the ENUM domain name.
+
+ *origin* is a ``dns.name.Name``, a domain containing the ENUM
+ domain name. The name is relativized to this domain before being
+ converted to text. If ``None``, no relativization is done.
+
+ *want_plus_prefix* is a ``bool``. If True, add a '+' to the beginning of
+ the returned number.
+
+ Returns a ``text``.
+
+ """
+ if origin is not None:
+ name = name.relativize(origin)
+ dlabels = [d for d in name.labels if d.isdigit() and len(d) == 1]
+ if len(dlabels) != len(name.labels):
+ raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
+ dlabels.reverse()
+ text = b''.join(dlabels)
+ if want_plus_prefix:
+ text = b'+' + text
+ return maybe_decode(text)
+
+
+def query(number, domains, resolver=None):
+ """Look for NAPTR RRs for the specified number in the specified domains.
+
+ e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
+
+ *number*, a ``text`` is the number to look for.
+
+ *domains* is an iterable containing ``dns.name.Name`` values.
+
+ *resolver*, a ``dns.resolver.Resolver``, is the resolver to use. If
+ ``None``, the default resolver is used.
+ """
+
+ if resolver is None:
+ resolver = dns.resolver.get_default_resolver()
+ e_nx = dns.resolver.NXDOMAIN()
+ for domain in domains:
+ if isinstance(domain, string_types):
+ domain = dns.name.from_text(domain)
+ qname = dns.e164.from_e164(number, domain)
+ try:
+ return resolver.query(qname, 'NAPTR')
+ except dns.resolver.NXDOMAIN as e:
+ e_nx += e
+ raise e_nx
diff --git a/openpype/vendor/python/python_2/dns/edns.py b/openpype/vendor/python/python_2/dns/edns.py
new file mode 100644
index 0000000000..5660f7bb7a
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/edns.py
@@ -0,0 +1,269 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2009-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""EDNS Options"""
+
+from __future__ import absolute_import
+
+import math
+import struct
+
+import dns.inet
+
+#: NSID
+NSID = 3
+#: DAU
+DAU = 5
+#: DHU
+DHU = 6
+#: N3U
+N3U = 7
+#: ECS (client-subnet)
+ECS = 8
+#: EXPIRE
+EXPIRE = 9
+#: COOKIE
+COOKIE = 10
+#: KEEPALIVE
+KEEPALIVE = 11
+#: PADDING
+PADDING = 12
+#: CHAIN
+CHAIN = 13
+
+class Option(object):
+
+ """Base class for all EDNS option types."""
+
+ def __init__(self, otype):
+ """Initialize an option.
+
+ *otype*, an ``int``, is the option type.
+ """
+ self.otype = otype
+
+ def to_wire(self, file):
+ """Convert an option to wire format.
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def from_wire(cls, otype, wire, current, olen):
+ """Build an EDNS option object from wire format.
+
+ *otype*, an ``int``, is the option type.
+
+ *wire*, a ``binary``, is the wire-format message.
+
+ *current*, an ``int``, is the offset in *wire* of the beginning
+ of the rdata.
+
+ *olen*, an ``int``, is the length of the wire-format option data
+
+ Returns a ``dns.edns.Option``.
+ """
+
+ raise NotImplementedError
+
+ def _cmp(self, other):
+ """Compare an EDNS option with another option of the same type.
+
+ Returns < 0 if < *other*, 0 if == *other*, and > 0 if > *other*.
+ """
+ raise NotImplementedError
+
+ def __eq__(self, other):
+ if not isinstance(other, Option):
+ return False
+ if self.otype != other.otype:
+ return False
+ return self._cmp(other) == 0
+
+ def __ne__(self, other):
+ if not isinstance(other, Option):
+ return False
+ if self.otype != other.otype:
+ return False
+ return self._cmp(other) != 0
+
+ def __lt__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) <= 0
+
+ def __ge__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) >= 0
+
+ def __gt__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) > 0
+
+
+class GenericOption(Option):
+
+ """Generic Option Class
+
+ This class is used for EDNS option types for which we have no better
+ implementation.
+ """
+
+ def __init__(self, otype, data):
+ super(GenericOption, self).__init__(otype)
+ self.data = data
+
+ def to_wire(self, file):
+ file.write(self.data)
+
+ def to_text(self):
+ return "Generic %d" % self.otype
+
+ @classmethod
+ def from_wire(cls, otype, wire, current, olen):
+ return cls(otype, wire[current: current + olen])
+
+ def _cmp(self, other):
+ if self.data == other.data:
+ return 0
+ if self.data > other.data:
+ return 1
+ return -1
+
+
+class ECSOption(Option):
+ """EDNS Client Subnet (ECS, RFC7871)"""
+
+ def __init__(self, address, srclen=None, scopelen=0):
+ """*address*, a ``text``, is the client address information.
+
+ *srclen*, an ``int``, the source prefix length, which is the
+ leftmost number of bits of the address to be used for the
+ lookup. The default is 24 for IPv4 and 56 for IPv6.
+
+ *scopelen*, an ``int``, the scope prefix length. This value
+ must be 0 in queries, and should be set in responses.
+ """
+
+ super(ECSOption, self).__init__(ECS)
+ af = dns.inet.af_for_address(address)
+
+ if af == dns.inet.AF_INET6:
+ self.family = 2
+ if srclen is None:
+ srclen = 56
+ elif af == dns.inet.AF_INET:
+ self.family = 1
+ if srclen is None:
+ srclen = 24
+ else:
+ raise ValueError('Bad ip family')
+
+ self.address = address
+ self.srclen = srclen
+ self.scopelen = scopelen
+
+ addrdata = dns.inet.inet_pton(af, address)
+ nbytes = int(math.ceil(srclen/8.0))
+
+ # Truncate to srclen and pad to the end of the last octet needed
+ # See RFC section 6
+ self.addrdata = addrdata[:nbytes]
+ nbits = srclen % 8
+ if nbits != 0:
+ last = struct.pack('B', ord(self.addrdata[-1:]) & (0xff << nbits))
+ self.addrdata = self.addrdata[:-1] + last
+
+ def to_text(self):
+ return "ECS {}/{} scope/{}".format(self.address, self.srclen,
+ self.scopelen)
+
+ def to_wire(self, file):
+ file.write(struct.pack('!H', self.family))
+ file.write(struct.pack('!BB', self.srclen, self.scopelen))
+ file.write(self.addrdata)
+
+ @classmethod
+ def from_wire(cls, otype, wire, cur, olen):
+ family, src, scope = struct.unpack('!HBB', wire[cur:cur+4])
+ cur += 4
+
+ addrlen = int(math.ceil(src/8.0))
+
+ if family == 1:
+ af = dns.inet.AF_INET
+ pad = 4 - addrlen
+ elif family == 2:
+ af = dns.inet.AF_INET6
+ pad = 16 - addrlen
+ else:
+ raise ValueError('unsupported family')
+
+ addr = dns.inet.inet_ntop(af, wire[cur:cur+addrlen] + b'\x00' * pad)
+ return cls(addr, src, scope)
+
+ def _cmp(self, other):
+ if self.addrdata == other.addrdata:
+ return 0
+ if self.addrdata > other.addrdata:
+ return 1
+ return -1
+
+_type_to_class = {
+ ECS: ECSOption
+}
+
+def get_option_class(otype):
+ """Return the class for the specified option type.
+
+ The GenericOption class is used if a more specific class is not
+ known.
+ """
+
+ cls = _type_to_class.get(otype)
+ if cls is None:
+ cls = GenericOption
+ return cls
+
+
+def option_from_wire(otype, wire, current, olen):
+ """Build an EDNS option object from wire format.
+
+ *otype*, an ``int``, is the option type.
+
+ *wire*, a ``binary``, is the wire-format message.
+
+ *current*, an ``int``, is the offset in *wire* of the beginning
+ of the rdata.
+
+ *olen*, an ``int``, is the length of the wire-format option data
+
+ Returns an instance of a subclass of ``dns.edns.Option``.
+ """
+
+ cls = get_option_class(otype)
+ return cls.from_wire(otype, wire, current, olen)
diff --git a/openpype/vendor/python/python_2/dns/entropy.py b/openpype/vendor/python/python_2/dns/entropy.py
new file mode 100644
index 0000000000..00c6a4b389
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/entropy.py
@@ -0,0 +1,148 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2009-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os
+import random
+import time
+from ._compat import long, binary_type
+try:
+ import threading as _threading
+except ImportError:
+ import dummy_threading as _threading
+
+
+class EntropyPool(object):
+
+ # This is an entropy pool for Python implementations that do not
+ # have a working SystemRandom. I'm not sure there are any, but
+ # leaving this code doesn't hurt anything as the library code
+ # is used if present.
+
+ def __init__(self, seed=None):
+ self.pool_index = 0
+ self.digest = None
+ self.next_byte = 0
+ self.lock = _threading.Lock()
+ try:
+ import hashlib
+ self.hash = hashlib.sha1()
+ self.hash_len = 20
+ except ImportError:
+ try:
+ import sha
+ self.hash = sha.new()
+ self.hash_len = 20
+ except ImportError:
+ import md5 # pylint: disable=import-error
+ self.hash = md5.new()
+ self.hash_len = 16
+ self.pool = bytearray(b'\0' * self.hash_len)
+ if seed is not None:
+ self.stir(bytearray(seed))
+ self.seeded = True
+ self.seed_pid = os.getpid()
+ else:
+ self.seeded = False
+ self.seed_pid = 0
+
+ def stir(self, entropy, already_locked=False):
+ if not already_locked:
+ self.lock.acquire()
+ try:
+ for c in entropy:
+ if self.pool_index == self.hash_len:
+ self.pool_index = 0
+ b = c & 0xff
+ self.pool[self.pool_index] ^= b
+ self.pool_index += 1
+ finally:
+ if not already_locked:
+ self.lock.release()
+
+ def _maybe_seed(self):
+ if not self.seeded or self.seed_pid != os.getpid():
+ try:
+ seed = os.urandom(16)
+ except Exception:
+ try:
+ r = open('/dev/urandom', 'rb', 0)
+ try:
+ seed = r.read(16)
+ finally:
+ r.close()
+ except Exception:
+ seed = str(time.time())
+ self.seeded = True
+ self.seed_pid = os.getpid()
+ self.digest = None
+ seed = bytearray(seed)
+ self.stir(seed, True)
+
+ def random_8(self):
+ self.lock.acquire()
+ try:
+ self._maybe_seed()
+ if self.digest is None or self.next_byte == self.hash_len:
+ self.hash.update(binary_type(self.pool))
+ self.digest = bytearray(self.hash.digest())
+ self.stir(self.digest, True)
+ self.next_byte = 0
+ value = self.digest[self.next_byte]
+ self.next_byte += 1
+ finally:
+ self.lock.release()
+ return value
+
+ def random_16(self):
+ return self.random_8() * 256 + self.random_8()
+
+ def random_32(self):
+ return self.random_16() * 65536 + self.random_16()
+
+ def random_between(self, first, last):
+ size = last - first + 1
+ if size > long(4294967296):
+ raise ValueError('too big')
+ if size > 65536:
+ rand = self.random_32
+ max = long(4294967295)
+ elif size > 256:
+ rand = self.random_16
+ max = 65535
+ else:
+ rand = self.random_8
+ max = 255
+ return first + size * rand() // (max + 1)
+
+pool = EntropyPool()
+
+try:
+ system_random = random.SystemRandom()
+except Exception:
+ system_random = None
+
+def random_16():
+ if system_random is not None:
+ return system_random.randrange(0, 65536)
+ else:
+ return pool.random_16()
+
+def between(first, last):
+ if system_random is not None:
+ return system_random.randrange(first, last + 1)
+ else:
+ return pool.random_between(first, last)
diff --git a/openpype/vendor/python/python_2/dns/exception.py b/openpype/vendor/python/python_2/dns/exception.py
new file mode 100644
index 0000000000..71ff04f148
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/exception.py
@@ -0,0 +1,128 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNS Exceptions.
+
+Dnspython modules may also define their own exceptions, which will
+always be subclasses of ``DNSException``.
+"""
+
+class DNSException(Exception):
+ """Abstract base class shared by all dnspython exceptions.
+
+ It supports two basic modes of operation:
+
+ a) Old/compatible mode is used if ``__init__`` was called with
+ empty *kwargs*. In compatible mode all *args* are passed
+ to the standard Python Exception class as before and all *args* are
+ printed by the standard ``__str__`` implementation. Class variable
+ ``msg`` (or doc string if ``msg`` is ``None``) is returned from ``str()``
+ if *args* is empty.
+
+ b) New/parametrized mode is used if ``__init__`` was called with
+ non-empty *kwargs*.
+ In the new mode *args* must be empty and all kwargs must match
+ those set in class variable ``supp_kwargs``. All kwargs are stored inside
+ ``self.kwargs`` and used in a new ``__str__`` implementation to construct
+ a formatted message based on the ``fmt`` class variable, a ``string``.
+
+ In the simplest case it is enough to override the ``supp_kwargs``
+ and ``fmt`` class variables to get nice parametrized messages.
+ """
+
+ msg = None # non-parametrized message
+ supp_kwargs = set() # accepted parameters for _fmt_kwargs (sanity check)
+ fmt = None # message parametrized with results from _fmt_kwargs
+
+ def __init__(self, *args, **kwargs):
+ self._check_params(*args, **kwargs)
+ if kwargs:
+ self.kwargs = self._check_kwargs(**kwargs)
+ self.msg = str(self)
+ else:
+ self.kwargs = dict() # defined but empty for old mode exceptions
+ if self.msg is None:
+ # doc string is better implicit message than empty string
+ self.msg = self.__doc__
+ if args:
+ super(DNSException, self).__init__(*args)
+ else:
+ super(DNSException, self).__init__(self.msg)
+
+ def _check_params(self, *args, **kwargs):
+ """Old exceptions supported only args and not kwargs.
+
+ For sanity we do not allow to mix old and new behavior."""
+ if args or kwargs:
+ assert bool(args) != bool(kwargs), \
+ 'keyword arguments are mutually exclusive with positional args'
+
+ def _check_kwargs(self, **kwargs):
+ if kwargs:
+ assert set(kwargs.keys()) == self.supp_kwargs, \
+ 'following set of keyword args is required: %s' % (
+ self.supp_kwargs)
+ return kwargs
+
+ def _fmt_kwargs(self, **kwargs):
+ """Format kwargs before printing them.
+
+ Resulting dictionary has to have keys necessary for str.format call
+ on fmt class variable.
+ """
+ fmtargs = {}
+ for kw, data in kwargs.items():
+ if isinstance(data, (list, set)):
+ # convert list of to list of str()
+ fmtargs[kw] = list(map(str, data))
+ if len(fmtargs[kw]) == 1:
+ # remove list brackets [] from single-item lists
+ fmtargs[kw] = fmtargs[kw].pop()
+ else:
+ fmtargs[kw] = data
+ return fmtargs
+
+ def __str__(self):
+ if self.kwargs and self.fmt:
+ # provide custom message constructed from keyword arguments
+ fmtargs = self._fmt_kwargs(**self.kwargs)
+ return self.fmt.format(**fmtargs)
+ else:
+ # print *args directly in the same way as old DNSException
+ return super(DNSException, self).__str__()
+
+
+class FormError(DNSException):
+ """DNS message is malformed."""
+
+
+class SyntaxError(DNSException):
+ """Text input is malformed."""
+
+
+class UnexpectedEnd(SyntaxError):
+ """Text input ended unexpectedly."""
+
+
+class TooBig(DNSException):
+ """The DNS message is too big."""
+
+
+class Timeout(DNSException):
+ """The DNS operation timed out."""
+ supp_kwargs = {'timeout'}
+ fmt = "The DNS operation timed out after {timeout} seconds"
diff --git a/openpype/vendor/python/python_2/dns/flags.py b/openpype/vendor/python/python_2/dns/flags.py
new file mode 100644
index 0000000000..0119dec71f
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/flags.py
@@ -0,0 +1,130 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Message Flags."""
+
+# Standard DNS flags
+
+#: Query Response
+QR = 0x8000
+#: Authoritative Answer
+AA = 0x0400
+#: Truncated Response
+TC = 0x0200
+#: Recursion Desired
+RD = 0x0100
+#: Recursion Available
+RA = 0x0080
+#: Authentic Data
+AD = 0x0020
+#: Checking Disabled
+CD = 0x0010
+
+# EDNS flags
+
+#: DNSSEC answer OK
+DO = 0x8000
+
+_by_text = {
+ 'QR': QR,
+ 'AA': AA,
+ 'TC': TC,
+ 'RD': RD,
+ 'RA': RA,
+ 'AD': AD,
+ 'CD': CD
+}
+
+_edns_by_text = {
+ 'DO': DO
+}
+
+
+# We construct the inverse mappings programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mappings not to be true inverses.
+
+_by_value = {y: x for x, y in _by_text.items()}
+
+_edns_by_value = {y: x for x, y in _edns_by_text.items()}
+
+
+def _order_flags(table):
+ order = list(table.items())
+ order.sort()
+ order.reverse()
+ return order
+
+_flags_order = _order_flags(_by_value)
+
+_edns_flags_order = _order_flags(_edns_by_value)
+
+
+def _from_text(text, table):
+ flags = 0
+ tokens = text.split()
+ for t in tokens:
+ flags = flags | table[t.upper()]
+ return flags
+
+
+def _to_text(flags, table, order):
+ text_flags = []
+ for k, v in order:
+ if flags & k != 0:
+ text_flags.append(v)
+ return ' '.join(text_flags)
+
+
+def from_text(text):
+ """Convert a space-separated list of flag text values into a flags
+ value.
+
+ Returns an ``int``
+ """
+
+ return _from_text(text, _by_text)
+
+
+def to_text(flags):
+ """Convert a flags value into a space-separated list of flag text
+ values.
+
+ Returns a ``text``.
+ """
+
+ return _to_text(flags, _by_value, _flags_order)
+
+
+def edns_from_text(text):
+ """Convert a space-separated list of EDNS flag text values into a EDNS
+ flags value.
+
+ Returns an ``int``
+ """
+
+ return _from_text(text, _edns_by_text)
+
+
+def edns_to_text(flags):
+ """Convert an EDNS flags value into a space-separated list of EDNS flag
+ text values.
+
+ Returns a ``text``.
+ """
+
+ return _to_text(flags, _edns_by_value, _edns_flags_order)
diff --git a/openpype/vendor/python/python_2/dns/grange.py b/openpype/vendor/python/python_2/dns/grange.py
new file mode 100644
index 0000000000..ffe8be7c46
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/grange.py
@@ -0,0 +1,69 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2012-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS GENERATE range conversion."""
+
+import dns
+
+def from_text(text):
+ """Convert the text form of a range in a ``$GENERATE`` statement to an
+ integer.
+
+ *text*, a ``str``, the textual range in ``$GENERATE`` form.
+
+ Returns a tuple of three ``int`` values ``(start, stop, step)``.
+ """
+
+ # TODO, figure out the bounds on start, stop and step.
+ step = 1
+ cur = ''
+ state = 0
+ # state 0 1 2 3 4
+ # x - y / z
+
+ if text and text[0] == '-':
+ raise dns.exception.SyntaxError("Start cannot be a negative number")
+
+ for c in text:
+ if c == '-' and state == 0:
+ start = int(cur)
+ cur = ''
+ state = 2
+ elif c == '/':
+ stop = int(cur)
+ cur = ''
+ state = 4
+ elif c.isdigit():
+ cur += c
+ else:
+ raise dns.exception.SyntaxError("Could not parse %s" % (c))
+
+ if state in (1, 3):
+ raise dns.exception.SyntaxError()
+
+ if state == 2:
+ stop = int(cur)
+
+ if state == 4:
+ step = int(cur)
+
+ assert step >= 1
+ assert start >= 0
+ assert start <= stop
+ # TODO, can start == stop?
+
+ return (start, stop, step)
diff --git a/openpype/vendor/python/python_2/dns/hash.py b/openpype/vendor/python/python_2/dns/hash.py
new file mode 100644
index 0000000000..1713e62894
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/hash.py
@@ -0,0 +1,37 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Hashing backwards compatibility wrapper"""
+
+import hashlib
+import warnings
+
+warnings.warn(
+ "dns.hash module will be removed in future versions. Please use hashlib instead.",
+ DeprecationWarning)
+
+hashes = {}
+hashes['MD5'] = hashlib.md5
+hashes['SHA1'] = hashlib.sha1
+hashes['SHA224'] = hashlib.sha224
+hashes['SHA256'] = hashlib.sha256
+hashes['SHA384'] = hashlib.sha384
+hashes['SHA512'] = hashlib.sha512
+
+
+def get(algorithm):
+ return hashes[algorithm.upper()]
diff --git a/openpype/vendor/python/python_2/dns/inet.py b/openpype/vendor/python/python_2/dns/inet.py
new file mode 100644
index 0000000000..c8d7c1b404
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/inet.py
@@ -0,0 +1,124 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Generic Internet address helper functions."""
+
+import socket
+
+import dns.ipv4
+import dns.ipv6
+
+from ._compat import maybe_ord
+
+# We assume that AF_INET is always defined.
+
+AF_INET = socket.AF_INET
+
+# AF_INET6 might not be defined in the socket module, but we need it.
+# We'll try to use the socket module's value, and if it doesn't work,
+# we'll use our own value.
+
+try:
+ AF_INET6 = socket.AF_INET6
+except AttributeError:
+ AF_INET6 = 9999
+
+
+def inet_pton(family, text):
+ """Convert the textual form of a network address into its binary form.
+
+ *family* is an ``int``, the address family.
+
+ *text* is a ``text``, the textual address.
+
+ Raises ``NotImplementedError`` if the address family specified is not
+ implemented.
+
+ Returns a ``binary``.
+ """
+
+ if family == AF_INET:
+ return dns.ipv4.inet_aton(text)
+ elif family == AF_INET6:
+ return dns.ipv6.inet_aton(text)
+ else:
+ raise NotImplementedError
+
+
+def inet_ntop(family, address):
+ """Convert the binary form of a network address into its textual form.
+
+ *family* is an ``int``, the address family.
+
+ *address* is a ``binary``, the network address in binary form.
+
+ Raises ``NotImplementedError`` if the address family specified is not
+ implemented.
+
+ Returns a ``text``.
+ """
+
+ if family == AF_INET:
+ return dns.ipv4.inet_ntoa(address)
+ elif family == AF_INET6:
+ return dns.ipv6.inet_ntoa(address)
+ else:
+ raise NotImplementedError
+
+
+def af_for_address(text):
+ """Determine the address family of a textual-form network address.
+
+ *text*, a ``text``, the textual address.
+
+ Raises ``ValueError`` if the address family cannot be determined
+ from the input.
+
+ Returns an ``int``.
+ """
+
+ try:
+ dns.ipv4.inet_aton(text)
+ return AF_INET
+ except Exception:
+ try:
+ dns.ipv6.inet_aton(text)
+ return AF_INET6
+ except:
+ raise ValueError
+
+
+def is_multicast(text):
+ """Is the textual-form network address a multicast address?
+
+ *text*, a ``text``, the textual address.
+
+ Raises ``ValueError`` if the address family cannot be determined
+ from the input.
+
+ Returns a ``bool``.
+ """
+
+ try:
+ first = maybe_ord(dns.ipv4.inet_aton(text)[0])
+ return first >= 224 and first <= 239
+ except Exception:
+ try:
+ first = maybe_ord(dns.ipv6.inet_aton(text)[0])
+ return first == 255
+ except Exception:
+ raise ValueError
diff --git a/openpype/vendor/python/python_2/dns/ipv4.py b/openpype/vendor/python/python_2/dns/ipv4.py
new file mode 100644
index 0000000000..8fc4f7dcfd
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/ipv4.py
@@ -0,0 +1,63 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv4 helper functions."""
+
+import struct
+
+import dns.exception
+from ._compat import binary_type
+
+def inet_ntoa(address):
+ """Convert an IPv4 address in binary form to text form.
+
+ *address*, a ``binary``, the IPv4 address in binary form.
+
+ Returns a ``text``.
+ """
+
+ if len(address) != 4:
+ raise dns.exception.SyntaxError
+ if not isinstance(address, bytearray):
+ address = bytearray(address)
+ return ('%u.%u.%u.%u' % (address[0], address[1],
+ address[2], address[3]))
+
+def inet_aton(text):
+ """Convert an IPv4 address in text form to binary form.
+
+ *text*, a ``text``, the IPv4 address in textual form.
+
+ Returns a ``binary``.
+ """
+
+ if not isinstance(text, binary_type):
+ text = text.encode()
+ parts = text.split(b'.')
+ if len(parts) != 4:
+ raise dns.exception.SyntaxError
+ for part in parts:
+ if not part.isdigit():
+ raise dns.exception.SyntaxError
+ if len(part) > 1 and part[0] == '0':
+ # No leading zeros
+ raise dns.exception.SyntaxError
+ try:
+ bytes = [int(part) for part in parts]
+ return struct.pack('BBBB', *bytes)
+ except:
+ raise dns.exception.SyntaxError
diff --git a/openpype/vendor/python/python_2/dns/ipv6.py b/openpype/vendor/python/python_2/dns/ipv6.py
new file mode 100644
index 0000000000..128e56c8f1
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/ipv6.py
@@ -0,0 +1,181 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv6 helper functions."""
+
+import re
+import binascii
+
+import dns.exception
+import dns.ipv4
+from ._compat import xrange, binary_type, maybe_decode
+
+_leading_zero = re.compile(r'0+([0-9a-f]+)')
+
+def inet_ntoa(address):
+ """Convert an IPv6 address in binary form to text form.
+
+ *address*, a ``binary``, the IPv6 address in binary form.
+
+ Raises ``ValueError`` if the address isn't 16 bytes long.
+ Returns a ``text``.
+ """
+
+ if len(address) != 16:
+ raise ValueError("IPv6 addresses are 16 bytes long")
+ hex = binascii.hexlify(address)
+ chunks = []
+ i = 0
+ l = len(hex)
+ while i < l:
+ chunk = maybe_decode(hex[i : i + 4])
+ # strip leading zeros. we do this with an re instead of
+ # with lstrip() because lstrip() didn't support chars until
+ # python 2.2.2
+ m = _leading_zero.match(chunk)
+ if not m is None:
+ chunk = m.group(1)
+ chunks.append(chunk)
+ i += 4
+ #
+ # Compress the longest subsequence of 0-value chunks to ::
+ #
+ best_start = 0
+ best_len = 0
+ start = -1
+ last_was_zero = False
+ for i in xrange(8):
+ if chunks[i] != '0':
+ if last_was_zero:
+ end = i
+ current_len = end - start
+ if current_len > best_len:
+ best_start = start
+ best_len = current_len
+ last_was_zero = False
+ elif not last_was_zero:
+ start = i
+ last_was_zero = True
+ if last_was_zero:
+ end = 8
+ current_len = end - start
+ if current_len > best_len:
+ best_start = start
+ best_len = current_len
+ if best_len > 1:
+ if best_start == 0 and \
+ (best_len == 6 or
+ best_len == 5 and chunks[5] == 'ffff'):
+ # We have an embedded IPv4 address
+ if best_len == 6:
+ prefix = '::'
+ else:
+ prefix = '::ffff:'
+ hex = prefix + dns.ipv4.inet_ntoa(address[12:])
+ else:
+ hex = ':'.join(chunks[:best_start]) + '::' + \
+ ':'.join(chunks[best_start + best_len:])
+ else:
+ hex = ':'.join(chunks)
+ return hex
+
+_v4_ending = re.compile(br'(.*):(\d+\.\d+\.\d+\.\d+)$')
+_colon_colon_start = re.compile(br'::.*')
+_colon_colon_end = re.compile(br'.*::$')
+
+def inet_aton(text):
+ """Convert an IPv6 address in text form to binary form.
+
+ *text*, a ``text``, the IPv6 address in textual form.
+
+ Returns a ``binary``.
+ """
+
+ #
+ # Our aim here is not something fast; we just want something that works.
+ #
+ if not isinstance(text, binary_type):
+ text = text.encode()
+
+ if text == b'::':
+ text = b'0::'
+ #
+ # Get rid of the icky dot-quad syntax if we have it.
+ #
+ m = _v4_ending.match(text)
+ if not m is None:
+ b = bytearray(dns.ipv4.inet_aton(m.group(2)))
+ text = (u"{}:{:02x}{:02x}:{:02x}{:02x}".format(m.group(1).decode(),
+ b[0], b[1], b[2],
+ b[3])).encode()
+ #
+ # Try to turn '::' into ':'; if no match try to
+ # turn '::' into ':'
+ #
+ m = _colon_colon_start.match(text)
+ if not m is None:
+ text = text[1:]
+ else:
+ m = _colon_colon_end.match(text)
+ if not m is None:
+ text = text[:-1]
+ #
+ # Now canonicalize into 8 chunks of 4 hex digits each
+ #
+ chunks = text.split(b':')
+ l = len(chunks)
+ if l > 8:
+ raise dns.exception.SyntaxError
+ seen_empty = False
+ canonical = []
+ for c in chunks:
+ if c == b'':
+ if seen_empty:
+ raise dns.exception.SyntaxError
+ seen_empty = True
+ for i in xrange(0, 8 - l + 1):
+ canonical.append(b'0000')
+ else:
+ lc = len(c)
+ if lc > 4:
+ raise dns.exception.SyntaxError
+ if lc != 4:
+ c = (b'0' * (4 - lc)) + c
+ canonical.append(c)
+ if l < 8 and not seen_empty:
+ raise dns.exception.SyntaxError
+ text = b''.join(canonical)
+
+ #
+ # Finally we can go to binary.
+ #
+ try:
+ return binascii.unhexlify(text)
+ except (binascii.Error, TypeError):
+ raise dns.exception.SyntaxError
+
+_mapped_prefix = b'\x00' * 10 + b'\xff\xff'
+
+def is_mapped(address):
+ """Is the specified address a mapped IPv4 address?
+
+ *address*, a ``binary`` is an IPv6 address in binary form.
+
+ Returns a ``bool``.
+ """
+
+ return address.startswith(_mapped_prefix)
diff --git a/openpype/vendor/python/python_2/dns/message.py b/openpype/vendor/python/python_2/dns/message.py
new file mode 100644
index 0000000000..9d2b2f43c9
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/message.py
@@ -0,0 +1,1175 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Messages"""
+
+from __future__ import absolute_import
+
+from io import StringIO
+import struct
+import time
+
+import dns.edns
+import dns.exception
+import dns.flags
+import dns.name
+import dns.opcode
+import dns.entropy
+import dns.rcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.rrset
+import dns.renderer
+import dns.tsig
+import dns.wiredata
+
+from ._compat import long, xrange, string_types
+
+
+class ShortHeader(dns.exception.FormError):
+ """The DNS packet passed to from_wire() is too short."""
+
+
+class TrailingJunk(dns.exception.FormError):
+ """The DNS packet passed to from_wire() has extra junk at the end of it."""
+
+
+class UnknownHeaderField(dns.exception.DNSException):
+ """The header field name was not recognized when converting from text
+ into a message."""
+
+
+class BadEDNS(dns.exception.FormError):
+ """An OPT record occurred somewhere other than the start of
+ the additional data section."""
+
+
+class BadTSIG(dns.exception.FormError):
+ """A TSIG record occurred somewhere other than the end of
+ the additional data section."""
+
+
+class UnknownTSIGKey(dns.exception.DNSException):
+ """A TSIG with an unknown key was received."""
+
+
+#: The question section number
+QUESTION = 0
+
+#: The answer section number
+ANSWER = 1
+
+#: The authority section number
+AUTHORITY = 2
+
+#: The additional section number
+ADDITIONAL = 3
+
+class Message(object):
+ """A DNS message."""
+
+ def __init__(self, id=None):
+ if id is None:
+ self.id = dns.entropy.random_16()
+ else:
+ self.id = id
+ self.flags = 0
+ self.question = []
+ self.answer = []
+ self.authority = []
+ self.additional = []
+ self.edns = -1
+ self.ednsflags = 0
+ self.payload = 0
+ self.options = []
+ self.request_payload = 0
+ self.keyring = None
+ self.keyname = None
+ self.keyalgorithm = dns.tsig.default_algorithm
+ self.request_mac = b''
+ self.other_data = b''
+ self.tsig_error = 0
+ self.fudge = 300
+ self.original_id = self.id
+ self.mac = b''
+ self.xfr = False
+ self.origin = None
+ self.tsig_ctx = None
+ self.had_tsig = False
+ self.multi = False
+ self.first = True
+ self.index = {}
+
+ def __repr__(self):
+ return ''
+
+ def __str__(self):
+ return self.to_text()
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ """Convert the message to text.
+
+ The *origin*, *relativize*, and any other keyword
+ arguments are passed to the RRset ``to_wire()`` method.
+
+ Returns a ``text``.
+ """
+
+ s = StringIO()
+ s.write(u'id %d\n' % self.id)
+ s.write(u'opcode %s\n' %
+ dns.opcode.to_text(dns.opcode.from_flags(self.flags)))
+ rc = dns.rcode.from_flags(self.flags, self.ednsflags)
+ s.write(u'rcode %s\n' % dns.rcode.to_text(rc))
+ s.write(u'flags %s\n' % dns.flags.to_text(self.flags))
+ if self.edns >= 0:
+ s.write(u'edns %s\n' % self.edns)
+ if self.ednsflags != 0:
+ s.write(u'eflags %s\n' %
+ dns.flags.edns_to_text(self.ednsflags))
+ s.write(u'payload %d\n' % self.payload)
+ for opt in self.options:
+ s.write(u'option %s\n' % opt.to_text())
+ is_update = dns.opcode.is_update(self.flags)
+ if is_update:
+ s.write(u';ZONE\n')
+ else:
+ s.write(u';QUESTION\n')
+ for rrset in self.question:
+ s.write(rrset.to_text(origin, relativize, **kw))
+ s.write(u'\n')
+ if is_update:
+ s.write(u';PREREQ\n')
+ else:
+ s.write(u';ANSWER\n')
+ for rrset in self.answer:
+ s.write(rrset.to_text(origin, relativize, **kw))
+ s.write(u'\n')
+ if is_update:
+ s.write(u';UPDATE\n')
+ else:
+ s.write(u';AUTHORITY\n')
+ for rrset in self.authority:
+ s.write(rrset.to_text(origin, relativize, **kw))
+ s.write(u'\n')
+ s.write(u';ADDITIONAL\n')
+ for rrset in self.additional:
+ s.write(rrset.to_text(origin, relativize, **kw))
+ s.write(u'\n')
+ #
+ # We strip off the final \n so the caller can print the result without
+ # doing weird things to get around eccentricities in Python print
+ # formatting
+ #
+ return s.getvalue()[:-1]
+
+ def __eq__(self, other):
+ """Two messages are equal if they have the same content in the
+ header, question, answer, and authority sections.
+
+ Returns a ``bool``.
+ """
+
+ if not isinstance(other, Message):
+ return False
+ if self.id != other.id:
+ return False
+ if self.flags != other.flags:
+ return False
+ for n in self.question:
+ if n not in other.question:
+ return False
+ for n in other.question:
+ if n not in self.question:
+ return False
+ for n in self.answer:
+ if n not in other.answer:
+ return False
+ for n in other.answer:
+ if n not in self.answer:
+ return False
+ for n in self.authority:
+ if n not in other.authority:
+ return False
+ for n in other.authority:
+ if n not in self.authority:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def is_response(self, other):
+ """Is this message a response to *other*?
+
+ Returns a ``bool``.
+ """
+
+ if other.flags & dns.flags.QR == 0 or \
+ self.id != other.id or \
+ dns.opcode.from_flags(self.flags) != \
+ dns.opcode.from_flags(other.flags):
+ return False
+ if dns.rcode.from_flags(other.flags, other.ednsflags) != \
+ dns.rcode.NOERROR:
+ return True
+ if dns.opcode.is_update(self.flags):
+ return True
+ for n in self.question:
+ if n not in other.question:
+ return False
+ for n in other.question:
+ if n not in self.question:
+ return False
+ return True
+
+ def section_number(self, section):
+ """Return the "section number" of the specified section for use
+ in indexing. The question section is 0, the answer section is 1,
+ the authority section is 2, and the additional section is 3.
+
+ *section* is one of the section attributes of this message.
+
+ Raises ``ValueError`` if the section isn't known.
+
+ Returns an ``int``.
+ """
+
+ if section is self.question:
+ return QUESTION
+ elif section is self.answer:
+ return ANSWER
+ elif section is self.authority:
+ return AUTHORITY
+ elif section is self.additional:
+ return ADDITIONAL
+ else:
+ raise ValueError('unknown section')
+
+ def section_from_number(self, number):
+ """Return the "section number" of the specified section for use
+ in indexing. The question section is 0, the answer section is 1,
+ the authority section is 2, and the additional section is 3.
+
+ *section* is one of the section attributes of this message.
+
+ Raises ``ValueError`` if the section isn't known.
+
+ Returns an ``int``.
+ """
+
+ if number == QUESTION:
+ return self.question
+ elif number == ANSWER:
+ return self.answer
+ elif number == AUTHORITY:
+ return self.authority
+ elif number == ADDITIONAL:
+ return self.additional
+ else:
+ raise ValueError('unknown section')
+
+ def find_rrset(self, section, name, rdclass, rdtype,
+ covers=dns.rdatatype.NONE, deleting=None, create=False,
+ force_unique=False):
+ """Find the RRset with the given attributes in the specified section.
+
+ *section*, an ``int`` section number, or one of the section
+ attributes of this message. This specifies the
+ the section of the message to search. For example::
+
+ my_message.find_rrset(my_message.answer, name, rdclass, rdtype)
+ my_message.find_rrset(dns.message.ANSWER, name, rdclass, rdtype)
+
+ *name*, a ``dns.name.Name``, the name of the RRset.
+
+ *rdclass*, an ``int``, the class of the RRset.
+
+ *rdtype*, an ``int``, the type of the RRset.
+
+ *covers*, an ``int`` or ``None``, the covers value of the RRset.
+ The default is ``None``.
+
+ *deleting*, an ``int`` or ``None``, the deleting value of the RRset.
+ The default is ``None``.
+
+ *create*, a ``bool``. If ``True``, create the RRset if it is not found.
+ The created RRset is appended to *section*.
+
+ *force_unique*, a ``bool``. If ``True`` and *create* is also ``True``,
+ create a new RRset regardless of whether a matching RRset exists
+ already. The default is ``False``. This is useful when creating
+ DDNS Update messages, as order matters for them.
+
+ Raises ``KeyError`` if the RRset was not found and create was
+ ``False``.
+
+ Returns a ``dns.rrset.RRset object``.
+ """
+
+ if isinstance(section, int):
+ section_number = section
+ section = self.section_from_number(section_number)
+ else:
+ section_number = self.section_number(section)
+ key = (section_number, name, rdclass, rdtype, covers, deleting)
+ if not force_unique:
+ if self.index is not None:
+ rrset = self.index.get(key)
+ if rrset is not None:
+ return rrset
+ else:
+ for rrset in section:
+ if rrset.match(name, rdclass, rdtype, covers, deleting):
+ return rrset
+ if not create:
+ raise KeyError
+ rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting)
+ section.append(rrset)
+ if self.index is not None:
+ self.index[key] = rrset
+ return rrset
+
+ def get_rrset(self, section, name, rdclass, rdtype,
+ covers=dns.rdatatype.NONE, deleting=None, create=False,
+ force_unique=False):
+ """Get the RRset with the given attributes in the specified section.
+
+ If the RRset is not found, None is returned.
+
+ *section*, an ``int`` section number, or one of the section
+ attributes of this message. This specifies the
+ the section of the message to search. For example::
+
+ my_message.get_rrset(my_message.answer, name, rdclass, rdtype)
+ my_message.get_rrset(dns.message.ANSWER, name, rdclass, rdtype)
+
+ *name*, a ``dns.name.Name``, the name of the RRset.
+
+ *rdclass*, an ``int``, the class of the RRset.
+
+ *rdtype*, an ``int``, the type of the RRset.
+
+ *covers*, an ``int`` or ``None``, the covers value of the RRset.
+ The default is ``None``.
+
+ *deleting*, an ``int`` or ``None``, the deleting value of the RRset.
+ The default is ``None``.
+
+ *create*, a ``bool``. If ``True``, create the RRset if it is not found.
+ The created RRset is appended to *section*.
+
+ *force_unique*, a ``bool``. If ``True`` and *create* is also ``True``,
+ create a new RRset regardless of whether a matching RRset exists
+ already. The default is ``False``. This is useful when creating
+ DDNS Update messages, as order matters for them.
+
+ Returns a ``dns.rrset.RRset object`` or ``None``.
+ """
+
+ try:
+ rrset = self.find_rrset(section, name, rdclass, rdtype, covers,
+ deleting, create, force_unique)
+ except KeyError:
+ rrset = None
+ return rrset
+
+ def to_wire(self, origin=None, max_size=0, **kw):
+ """Return a string containing the message in DNS compressed wire
+ format.
+
+ Additional keyword arguments are passed to the RRset ``to_wire()``
+ method.
+
+ *origin*, a ``dns.name.Name`` or ``None``, the origin to be appended
+ to any relative names.
+
+ *max_size*, an ``int``, the maximum size of the wire format
+ output; default is 0, which means "the message's request
+ payload, if nonzero, or 65535".
+
+ Raises ``dns.exception.TooBig`` if *max_size* was exceeded.
+
+ Returns a ``binary``.
+ """
+
+ if max_size == 0:
+ if self.request_payload != 0:
+ max_size = self.request_payload
+ else:
+ max_size = 65535
+ if max_size < 512:
+ max_size = 512
+ elif max_size > 65535:
+ max_size = 65535
+ r = dns.renderer.Renderer(self.id, self.flags, max_size, origin)
+ for rrset in self.question:
+ r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
+ for rrset in self.answer:
+ r.add_rrset(dns.renderer.ANSWER, rrset, **kw)
+ for rrset in self.authority:
+ r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
+ if self.edns >= 0:
+ r.add_edns(self.edns, self.ednsflags, self.payload, self.options)
+ for rrset in self.additional:
+ r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
+ r.write_header()
+ if self.keyname is not None:
+ r.add_tsig(self.keyname, self.keyring[self.keyname],
+ self.fudge, self.original_id, self.tsig_error,
+ self.other_data, self.request_mac,
+ self.keyalgorithm)
+ self.mac = r.mac
+ return r.get_wire()
+
+ def use_tsig(self, keyring, keyname=None, fudge=300,
+ original_id=None, tsig_error=0, other_data=b'',
+ algorithm=dns.tsig.default_algorithm):
+ """When sending, a TSIG signature using the specified keyring
+ and keyname should be added.
+
+ See the documentation of the Message class for a complete
+ description of the keyring dictionary.
+
+ *keyring*, a ``dict``, the TSIG keyring to use. If a
+ *keyring* is specified but a *keyname* is not, then the key
+ used will be the first key in the *keyring*. Note that the
+ order of keys in a dictionary is not defined, so applications
+ should supply a keyname when a keyring is used, unless they
+ know the keyring contains only one key.
+
+ *keyname*, a ``dns.name.Name`` or ``None``, the name of the TSIG key
+ to use; defaults to ``None``. The key must be defined in the keyring.
+
+ *fudge*, an ``int``, the TSIG time fudge.
+
+ *original_id*, an ``int``, the TSIG original id. If ``None``,
+ the message's id is used.
+
+ *tsig_error*, an ``int``, the TSIG error code.
+
+ *other_data*, a ``binary``, the TSIG other data.
+
+ *algorithm*, a ``dns.name.Name``, the TSIG algorithm to use.
+ """
+
+ self.keyring = keyring
+ if keyname is None:
+ self.keyname = list(self.keyring.keys())[0]
+ else:
+ if isinstance(keyname, string_types):
+ keyname = dns.name.from_text(keyname)
+ self.keyname = keyname
+ self.keyalgorithm = algorithm
+ self.fudge = fudge
+ if original_id is None:
+ self.original_id = self.id
+ else:
+ self.original_id = original_id
+ self.tsig_error = tsig_error
+ self.other_data = other_data
+
+ def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None,
+ options=None):
+ """Configure EDNS behavior.
+
+ *edns*, an ``int``, is the EDNS level to use. Specifying
+ ``None``, ``False``, or ``-1`` means "do not use EDNS", and in this case
+ the other parameters are ignored. Specifying ``True`` is
+ equivalent to specifying 0, i.e. "use EDNS0".
+
+ *ednsflags*, an ``int``, the EDNS flag values.
+
+ *payload*, an ``int``, is the EDNS sender's payload field, which is the
+ maximum size of UDP datagram the sender can handle. I.e. how big
+ a response to this message can be.
+
+ *request_payload*, an ``int``, is the EDNS payload size to use when
+ sending this message. If not specified, defaults to the value of
+ *payload*.
+
+ *options*, a list of ``dns.edns.Option`` objects or ``None``, the EDNS
+ options.
+ """
+
+ if edns is None or edns is False:
+ edns = -1
+ if edns is True:
+ edns = 0
+ if request_payload is None:
+ request_payload = payload
+ if edns < 0:
+ ednsflags = 0
+ payload = 0
+ request_payload = 0
+ options = []
+ else:
+ # make sure the EDNS version in ednsflags agrees with edns
+ ednsflags &= long(0xFF00FFFF)
+ ednsflags |= (edns << 16)
+ if options is None:
+ options = []
+ self.edns = edns
+ self.ednsflags = ednsflags
+ self.payload = payload
+ self.options = options
+ self.request_payload = request_payload
+
+ def want_dnssec(self, wanted=True):
+ """Enable or disable 'DNSSEC desired' flag in requests.
+
+ *wanted*, a ``bool``. If ``True``, then DNSSEC data is
+ desired in the response, EDNS is enabled if required, and then
+ the DO bit is set. If ``False``, the DO bit is cleared if
+ EDNS is enabled.
+ """
+
+ if wanted:
+ if self.edns < 0:
+ self.use_edns()
+ self.ednsflags |= dns.flags.DO
+ elif self.edns >= 0:
+ self.ednsflags &= ~dns.flags.DO
+
+ def rcode(self):
+ """Return the rcode.
+
+ Returns an ``int``.
+ """
+ return dns.rcode.from_flags(self.flags, self.ednsflags)
+
+ def set_rcode(self, rcode):
+ """Set the rcode.
+
+ *rcode*, an ``int``, is the rcode to set.
+ """
+ (value, evalue) = dns.rcode.to_flags(rcode)
+ self.flags &= 0xFFF0
+ self.flags |= value
+ self.ednsflags &= long(0x00FFFFFF)
+ self.ednsflags |= evalue
+ if self.ednsflags != 0 and self.edns < 0:
+ self.edns = 0
+
+ def opcode(self):
+ """Return the opcode.
+
+ Returns an ``int``.
+ """
+ return dns.opcode.from_flags(self.flags)
+
+ def set_opcode(self, opcode):
+ """Set the opcode.
+
+ *opcode*, an ``int``, is the opcode to set.
+ """
+ self.flags &= 0x87FF
+ self.flags |= dns.opcode.to_flags(opcode)
+
+
+class _WireReader(object):
+
+ """Wire format reader.
+
+ wire: a binary, is the wire-format message.
+ message: The message object being built
+ current: When building a message object from wire format, this
+ variable contains the offset from the beginning of wire of the next octet
+ to be read.
+ updating: Is the message a dynamic update?
+ one_rr_per_rrset: Put each RR into its own RRset?
+ ignore_trailing: Ignore trailing junk at end of request?
+ zone_rdclass: The class of the zone in messages which are
+ DNS dynamic updates.
+ """
+
+ def __init__(self, wire, message, question_only=False,
+ one_rr_per_rrset=False, ignore_trailing=False):
+ self.wire = dns.wiredata.maybe_wrap(wire)
+ self.message = message
+ self.current = 0
+ self.updating = False
+ self.zone_rdclass = dns.rdataclass.IN
+ self.question_only = question_only
+ self.one_rr_per_rrset = one_rr_per_rrset
+ self.ignore_trailing = ignore_trailing
+
+ def _get_question(self, qcount):
+ """Read the next *qcount* records from the wire data and add them to
+ the question section.
+ """
+
+ if self.updating and qcount > 1:
+ raise dns.exception.FormError
+
+ for i in xrange(0, qcount):
+ (qname, used) = dns.name.from_wire(self.wire, self.current)
+ if self.message.origin is not None:
+ qname = qname.relativize(self.message.origin)
+ self.current = self.current + used
+ (rdtype, rdclass) = \
+ struct.unpack('!HH',
+ self.wire[self.current:self.current + 4])
+ self.current = self.current + 4
+ self.message.find_rrset(self.message.question, qname,
+ rdclass, rdtype, create=True,
+ force_unique=True)
+ if self.updating:
+ self.zone_rdclass = rdclass
+
+ def _get_section(self, section, count):
+ """Read the next I{count} records from the wire data and add them to
+ the specified section.
+
+ section: the section of the message to which to add records
+ count: the number of records to read
+ """
+
+ if self.updating or self.one_rr_per_rrset:
+ force_unique = True
+ else:
+ force_unique = False
+ seen_opt = False
+ for i in xrange(0, count):
+ rr_start = self.current
+ (name, used) = dns.name.from_wire(self.wire, self.current)
+ absolute_name = name
+ if self.message.origin is not None:
+ name = name.relativize(self.message.origin)
+ self.current = self.current + used
+ (rdtype, rdclass, ttl, rdlen) = \
+ struct.unpack('!HHIH',
+ self.wire[self.current:self.current + 10])
+ self.current = self.current + 10
+ if rdtype == dns.rdatatype.OPT:
+ if section is not self.message.additional or seen_opt:
+ raise BadEDNS
+ self.message.payload = rdclass
+ self.message.ednsflags = ttl
+ self.message.edns = (ttl & 0xff0000) >> 16
+ self.message.options = []
+ current = self.current
+ optslen = rdlen
+ while optslen > 0:
+ (otype, olen) = \
+ struct.unpack('!HH',
+ self.wire[current:current + 4])
+ current = current + 4
+ opt = dns.edns.option_from_wire(
+ otype, self.wire, current, olen)
+ self.message.options.append(opt)
+ current = current + olen
+ optslen = optslen - 4 - olen
+ seen_opt = True
+ elif rdtype == dns.rdatatype.TSIG:
+ if not (section is self.message.additional and
+ i == (count - 1)):
+ raise BadTSIG
+ if self.message.keyring is None:
+ raise UnknownTSIGKey('got signed message without keyring')
+ secret = self.message.keyring.get(absolute_name)
+ if secret is None:
+ raise UnknownTSIGKey("key '%s' unknown" % name)
+ self.message.keyname = absolute_name
+ (self.message.keyalgorithm, self.message.mac) = \
+ dns.tsig.get_algorithm_and_mac(self.wire, self.current,
+ rdlen)
+ self.message.tsig_ctx = \
+ dns.tsig.validate(self.wire,
+ absolute_name,
+ secret,
+ int(time.time()),
+ self.message.request_mac,
+ rr_start,
+ self.current,
+ rdlen,
+ self.message.tsig_ctx,
+ self.message.multi,
+ self.message.first)
+ self.message.had_tsig = True
+ else:
+ if ttl < 0:
+ ttl = 0
+ if self.updating and \
+ (rdclass == dns.rdataclass.ANY or
+ rdclass == dns.rdataclass.NONE):
+ deleting = rdclass
+ rdclass = self.zone_rdclass
+ else:
+ deleting = None
+ if deleting == dns.rdataclass.ANY or \
+ (deleting == dns.rdataclass.NONE and
+ section is self.message.answer):
+ covers = dns.rdatatype.NONE
+ rd = None
+ else:
+ rd = dns.rdata.from_wire(rdclass, rdtype, self.wire,
+ self.current, rdlen,
+ self.message.origin)
+ covers = rd.covers()
+ if self.message.xfr and rdtype == dns.rdatatype.SOA:
+ force_unique = True
+ rrset = self.message.find_rrset(section, name,
+ rdclass, rdtype, covers,
+ deleting, True, force_unique)
+ if rd is not None:
+ rrset.add(rd, ttl)
+ self.current = self.current + rdlen
+
+ def read(self):
+ """Read a wire format DNS message and build a dns.message.Message
+ object."""
+
+ l = len(self.wire)
+ if l < 12:
+ raise ShortHeader
+ (self.message.id, self.message.flags, qcount, ancount,
+ aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12])
+ self.current = 12
+ if dns.opcode.is_update(self.message.flags):
+ self.updating = True
+ self._get_question(qcount)
+ if self.question_only:
+ return
+ self._get_section(self.message.answer, ancount)
+ self._get_section(self.message.authority, aucount)
+ self._get_section(self.message.additional, adcount)
+ if not self.ignore_trailing and self.current != l:
+ raise TrailingJunk
+ if self.message.multi and self.message.tsig_ctx and \
+ not self.message.had_tsig:
+ self.message.tsig_ctx.update(self.wire)
+
+
+def from_wire(wire, keyring=None, request_mac=b'', xfr=False, origin=None,
+ tsig_ctx=None, multi=False, first=True,
+ question_only=False, one_rr_per_rrset=False,
+ ignore_trailing=False):
+ """Convert a DNS wire format message into a message
+ object.
+
+ *keyring*, a ``dict``, the keyring to use if the message is signed.
+
+ *request_mac*, a ``binary``. If the message is a response to a
+ TSIG-signed request, *request_mac* should be set to the MAC of
+ that request.
+
+ *xfr*, a ``bool``, should be set to ``True`` if this message is part of
+ a zone transfer.
+
+ *origin*, a ``dns.name.Name`` or ``None``. If the message is part
+ of a zone transfer, *origin* should be the origin name of the
+ zone.
+
+ *tsig_ctx*, a ``hmac.HMAC`` objext, the ongoing TSIG context, used
+ when validating zone transfers.
+
+ *multi*, a ``bool``, should be set to ``True`` if this message
+ part of a multiple message sequence.
+
+ *first*, a ``bool``, should be set to ``True`` if this message is
+ stand-alone, or the first message in a multi-message sequence.
+
+ *question_only*, a ``bool``. If ``True``, read only up to
+ the end of the question section.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its
+ own RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the message.
+
+ Raises ``dns.message.ShortHeader`` if the message is less than 12 octets
+ long.
+
+ Raises ``dns.messaage.TrailingJunk`` if there were octets in the message
+ past the end of the proper DNS message, and *ignore_trailing* is ``False``.
+
+ Raises ``dns.message.BadEDNS`` if an OPT record was in the
+ wrong section, or occurred more than once.
+
+ Raises ``dns.message.BadTSIG`` if a TSIG record was not the last
+ record of the additional data section.
+
+ Returns a ``dns.message.Message``.
+ """
+
+ m = Message(id=0)
+ m.keyring = keyring
+ m.request_mac = request_mac
+ m.xfr = xfr
+ m.origin = origin
+ m.tsig_ctx = tsig_ctx
+ m.multi = multi
+ m.first = first
+
+ reader = _WireReader(wire, m, question_only, one_rr_per_rrset,
+ ignore_trailing)
+ reader.read()
+
+ return m
+
+
+class _TextReader(object):
+
+ """Text format reader.
+
+ tok: the tokenizer.
+ message: The message object being built.
+ updating: Is the message a dynamic update?
+ zone_rdclass: The class of the zone in messages which are
+ DNS dynamic updates.
+ last_name: The most recently read name when building a message object.
+ """
+
+ def __init__(self, text, message):
+ self.message = message
+ self.tok = dns.tokenizer.Tokenizer(text)
+ self.last_name = None
+ self.zone_rdclass = dns.rdataclass.IN
+ self.updating = False
+
+ def _header_line(self, section):
+ """Process one line from the text format header section."""
+
+ token = self.tok.get()
+ what = token.value
+ if what == 'id':
+ self.message.id = self.tok.get_int()
+ elif what == 'flags':
+ while True:
+ token = self.tok.get()
+ if not token.is_identifier():
+ self.tok.unget(token)
+ break
+ self.message.flags = self.message.flags | \
+ dns.flags.from_text(token.value)
+ if dns.opcode.is_update(self.message.flags):
+ self.updating = True
+ elif what == 'edns':
+ self.message.edns = self.tok.get_int()
+ self.message.ednsflags = self.message.ednsflags | \
+ (self.message.edns << 16)
+ elif what == 'eflags':
+ if self.message.edns < 0:
+ self.message.edns = 0
+ while True:
+ token = self.tok.get()
+ if not token.is_identifier():
+ self.tok.unget(token)
+ break
+ self.message.ednsflags = self.message.ednsflags | \
+ dns.flags.edns_from_text(token.value)
+ elif what == 'payload':
+ self.message.payload = self.tok.get_int()
+ if self.message.edns < 0:
+ self.message.edns = 0
+ elif what == 'opcode':
+ text = self.tok.get_string()
+ self.message.flags = self.message.flags | \
+ dns.opcode.to_flags(dns.opcode.from_text(text))
+ elif what == 'rcode':
+ text = self.tok.get_string()
+ self.message.set_rcode(dns.rcode.from_text(text))
+ else:
+ raise UnknownHeaderField
+ self.tok.get_eol()
+
+ def _question_line(self, section):
+ """Process one line from the text format question section."""
+
+ token = self.tok.get(want_leading=True)
+ if not token.is_whitespace():
+ self.last_name = dns.name.from_text(token.value, None)
+ name = self.last_name
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except Exception:
+ rdclass = dns.rdataclass.IN
+ # Type
+ rdtype = dns.rdatatype.from_text(token.value)
+ self.message.find_rrset(self.message.question, name,
+ rdclass, rdtype, create=True,
+ force_unique=True)
+ if self.updating:
+ self.zone_rdclass = rdclass
+ self.tok.get_eol()
+
+ def _rr_line(self, section):
+ """Process one line from the text format answer, authority, or
+ additional data sections.
+ """
+
+ deleting = None
+ # Name
+ token = self.tok.get(want_leading=True)
+ if not token.is_whitespace():
+ self.last_name = dns.name.from_text(token.value, None)
+ name = self.last_name
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # TTL
+ try:
+ ttl = int(token.value, 0)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except Exception:
+ ttl = 0
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
+ deleting = rdclass
+ rdclass = self.zone_rdclass
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except Exception:
+ rdclass = dns.rdataclass.IN
+ # Type
+ rdtype = dns.rdatatype.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_eol_or_eof():
+ self.tok.unget(token)
+ rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
+ covers = rd.covers()
+ else:
+ rd = None
+ covers = dns.rdatatype.NONE
+ rrset = self.message.find_rrset(section, name,
+ rdclass, rdtype, covers,
+ deleting, True, self.updating)
+ if rd is not None:
+ rrset.add(rd, ttl)
+
+ def read(self):
+ """Read a text format DNS message and build a dns.message.Message
+ object."""
+
+ line_method = self._header_line
+ section = None
+ while 1:
+ token = self.tok.get(True, True)
+ if token.is_eol_or_eof():
+ break
+ if token.is_comment():
+ u = token.value.upper()
+ if u == 'HEADER':
+ line_method = self._header_line
+ elif u == 'QUESTION' or u == 'ZONE':
+ line_method = self._question_line
+ section = self.message.question
+ elif u == 'ANSWER' or u == 'PREREQ':
+ line_method = self._rr_line
+ section = self.message.answer
+ elif u == 'AUTHORITY' or u == 'UPDATE':
+ line_method = self._rr_line
+ section = self.message.authority
+ elif u == 'ADDITIONAL':
+ line_method = self._rr_line
+ section = self.message.additional
+ self.tok.get_eol()
+ continue
+ self.tok.unget(token)
+ line_method(section)
+
+
+def from_text(text):
+ """Convert the text format message into a message object.
+
+ *text*, a ``text``, the text format message.
+
+ Raises ``dns.message.UnknownHeaderField`` if a header is unknown.
+
+ Raises ``dns.exception.SyntaxError`` if the text is badly formed.
+
+ Returns a ``dns.message.Message object``
+ """
+
+ # 'text' can also be a file, but we don't publish that fact
+ # since it's an implementation detail. The official file
+ # interface is from_file().
+
+ m = Message()
+
+ reader = _TextReader(text, m)
+ reader.read()
+
+ return m
+
+
+def from_file(f):
+ """Read the next text format message from the specified file.
+
+ *f*, a ``file`` or ``text``. If *f* is text, it is treated as the
+ pathname of a file to open.
+
+ Raises ``dns.message.UnknownHeaderField`` if a header is unknown.
+
+ Raises ``dns.exception.SyntaxError`` if the text is badly formed.
+
+ Returns a ``dns.message.Message object``
+ """
+
+ str_type = string_types
+ opts = 'rU'
+
+ if isinstance(f, str_type):
+ f = open(f, opts)
+ want_close = True
+ else:
+ want_close = False
+
+ try:
+ m = from_text(f)
+ finally:
+ if want_close:
+ f.close()
+ return m
+
+
+def make_query(qname, rdtype, rdclass=dns.rdataclass.IN, use_edns=None,
+ want_dnssec=False, ednsflags=None, payload=None,
+ request_payload=None, options=None):
+ """Make a query message.
+
+ The query name, type, and class may all be specified either
+ as objects of the appropriate type, or as strings.
+
+ The query will have a randomly chosen query id, and its DNS flags
+ will be set to dns.flags.RD.
+
+ qname, a ``dns.name.Name`` or ``text``, the query name.
+
+ *rdtype*, an ``int`` or ``text``, the desired rdata type.
+
+ *rdclass*, an ``int`` or ``text``, the desired rdata class; the default
+ is class IN.
+
+ *use_edns*, an ``int``, ``bool`` or ``None``. The EDNS level to use; the
+ default is None (no EDNS).
+ See the description of dns.message.Message.use_edns() for the possible
+ values for use_edns and their meanings.
+
+ *want_dnssec*, a ``bool``. If ``True``, DNSSEC data is desired.
+
+ *ednsflags*, an ``int``, the EDNS flag values.
+
+ *payload*, an ``int``, is the EDNS sender's payload field, which is the
+ maximum size of UDP datagram the sender can handle. I.e. how big
+ a response to this message can be.
+
+ *request_payload*, an ``int``, is the EDNS payload size to use when
+ sending this message. If not specified, defaults to the value of
+ *payload*.
+
+ *options*, a list of ``dns.edns.Option`` objects or ``None``, the EDNS
+ options.
+
+ Returns a ``dns.message.Message``
+ """
+
+ if isinstance(qname, string_types):
+ qname = dns.name.from_text(qname)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ m = Message()
+ m.flags |= dns.flags.RD
+ m.find_rrset(m.question, qname, rdclass, rdtype, create=True,
+ force_unique=True)
+ # only pass keywords on to use_edns if they have been set to a
+ # non-None value. Setting a field will turn EDNS on if it hasn't
+ # been configured.
+ kwargs = {}
+ if ednsflags is not None:
+ kwargs['ednsflags'] = ednsflags
+ if use_edns is None:
+ use_edns = 0
+ if payload is not None:
+ kwargs['payload'] = payload
+ if use_edns is None:
+ use_edns = 0
+ if request_payload is not None:
+ kwargs['request_payload'] = request_payload
+ if use_edns is None:
+ use_edns = 0
+ if options is not None:
+ kwargs['options'] = options
+ if use_edns is None:
+ use_edns = 0
+ kwargs['edns'] = use_edns
+ m.use_edns(**kwargs)
+ m.want_dnssec(want_dnssec)
+ return m
+
+
+def make_response(query, recursion_available=False, our_payload=8192,
+ fudge=300):
+ """Make a message which is a response for the specified query.
+ The message returned is really a response skeleton; it has all
+ of the infrastructure required of a response, but none of the
+ content.
+
+ The response's question section is a shallow copy of the query's
+ question section, so the query's question RRsets should not be
+ changed.
+
+ *query*, a ``dns.message.Message``, the query to respond to.
+
+ *recursion_available*, a ``bool``, should RA be set in the response?
+
+ *our_payload*, an ``int``, the payload size to advertise in EDNS
+ responses.
+
+ *fudge*, an ``int``, the TSIG time fudge.
+
+ Returns a ``dns.message.Message`` object.
+ """
+
+ if query.flags & dns.flags.QR:
+ raise dns.exception.FormError('specified query message is not a query')
+ response = dns.message.Message(query.id)
+ response.flags = dns.flags.QR | (query.flags & dns.flags.RD)
+ if recursion_available:
+ response.flags |= dns.flags.RA
+ response.set_opcode(query.opcode())
+ response.question = list(query.question)
+ if query.edns >= 0:
+ response.use_edns(0, 0, our_payload, query.payload)
+ if query.had_tsig:
+ response.use_tsig(query.keyring, query.keyname, fudge, None, 0, b'',
+ query.keyalgorithm)
+ response.request_mac = query.mac
+ return response
diff --git a/openpype/vendor/python/python_2/dns/name.py b/openpype/vendor/python/python_2/dns/name.py
new file mode 100644
index 0000000000..0bcfd83432
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/name.py
@@ -0,0 +1,994 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Names.
+"""
+
+from io import BytesIO
+import struct
+import sys
+import copy
+import encodings.idna
+try:
+ import idna
+ have_idna_2008 = True
+except ImportError:
+ have_idna_2008 = False
+
+import dns.exception
+import dns.wiredata
+
+from ._compat import long, binary_type, text_type, unichr, maybe_decode
+
+try:
+ maxint = sys.maxint # pylint: disable=sys-max-int
+except AttributeError:
+ maxint = (1 << (8 * struct.calcsize("P"))) // 2 - 1
+
+
+# fullcompare() result values
+
+#: The compared names have no relationship to each other.
+NAMERELN_NONE = 0
+#: the first name is a superdomain of the second.
+NAMERELN_SUPERDOMAIN = 1
+#: The first name is a subdomain of the second.
+NAMERELN_SUBDOMAIN = 2
+#: The compared names are equal.
+NAMERELN_EQUAL = 3
+#: The compared names have a common ancestor.
+NAMERELN_COMMONANCESTOR = 4
+
+
+class EmptyLabel(dns.exception.SyntaxError):
+ """A DNS label is empty."""
+
+
+class BadEscape(dns.exception.SyntaxError):
+ """An escaped code in a text format of DNS name is invalid."""
+
+
+class BadPointer(dns.exception.FormError):
+ """A DNS compression pointer points forward instead of backward."""
+
+
+class BadLabelType(dns.exception.FormError):
+ """The label type in DNS name wire format is unknown."""
+
+
+class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
+ """An attempt was made to convert a non-absolute name to
+ wire when there was also a non-absolute (or missing) origin."""
+
+
+class NameTooLong(dns.exception.FormError):
+ """A DNS name is > 255 octets long."""
+
+
+class LabelTooLong(dns.exception.SyntaxError):
+ """A DNS label is > 63 octets long."""
+
+
+class AbsoluteConcatenation(dns.exception.DNSException):
+ """An attempt was made to append anything other than the
+ empty name to an absolute DNS name."""
+
+
+class NoParent(dns.exception.DNSException):
+ """An attempt was made to get the parent of the root name
+ or the empty name."""
+
+class NoIDNA2008(dns.exception.DNSException):
+ """IDNA 2008 processing was requested but the idna module is not
+ available."""
+
+
+class IDNAException(dns.exception.DNSException):
+ """IDNA processing raised an exception."""
+
+ supp_kwargs = {'idna_exception'}
+ fmt = "IDNA processing exception: {idna_exception}"
+
+
+class IDNACodec(object):
+ """Abstract base class for IDNA encoder/decoders."""
+
+ def __init__(self):
+ pass
+
+ def encode(self, label):
+ raise NotImplementedError
+
+ def decode(self, label):
+ # We do not apply any IDNA policy on decode; we just
+ downcased = label.lower()
+ if downcased.startswith(b'xn--'):
+ try:
+ label = downcased[4:].decode('punycode')
+ except Exception as e:
+ raise IDNAException(idna_exception=e)
+ else:
+ label = maybe_decode(label)
+ return _escapify(label, True)
+
+
+class IDNA2003Codec(IDNACodec):
+ """IDNA 2003 encoder/decoder."""
+
+ def __init__(self, strict_decode=False):
+ """Initialize the IDNA 2003 encoder/decoder.
+
+ *strict_decode* is a ``bool``. If `True`, then IDNA2003 checking
+ is done when decoding. This can cause failures if the name
+ was encoded with IDNA2008. The default is `False`.
+ """
+
+ super(IDNA2003Codec, self).__init__()
+ self.strict_decode = strict_decode
+
+ def encode(self, label):
+ """Encode *label*."""
+
+ if label == '':
+ return b''
+ try:
+ return encodings.idna.ToASCII(label)
+ except UnicodeError:
+ raise LabelTooLong
+
+ def decode(self, label):
+ """Decode *label*."""
+ if not self.strict_decode:
+ return super(IDNA2003Codec, self).decode(label)
+ if label == b'':
+ return u''
+ try:
+ return _escapify(encodings.idna.ToUnicode(label), True)
+ except Exception as e:
+ raise IDNAException(idna_exception=e)
+
+
+class IDNA2008Codec(IDNACodec):
+ """IDNA 2008 encoder/decoder.
+
+ *uts_46* is a ``bool``. If True, apply Unicode IDNA
+ compatibility processing as described in Unicode Technical
+ Standard #46 (http://unicode.org/reports/tr46/).
+ If False, do not apply the mapping. The default is False.
+
+ *transitional* is a ``bool``: If True, use the
+ "transitional" mode described in Unicode Technical Standard
+ #46. The default is False.
+
+ *allow_pure_ascii* is a ``bool``. If True, then a label which
+ consists of only ASCII characters is allowed. This is less
+ strict than regular IDNA 2008, but is also necessary for mixed
+ names, e.g. a name with starting with "_sip._tcp." and ending
+ in an IDN suffix which would otherwise be disallowed. The
+ default is False.
+
+ *strict_decode* is a ``bool``: If True, then IDNA2008 checking
+ is done when decoding. This can cause failures if the name
+ was encoded with IDNA2003. The default is False.
+ """
+
+ def __init__(self, uts_46=False, transitional=False,
+ allow_pure_ascii=False, strict_decode=False):
+ """Initialize the IDNA 2008 encoder/decoder."""
+ super(IDNA2008Codec, self).__init__()
+ self.uts_46 = uts_46
+ self.transitional = transitional
+ self.allow_pure_ascii = allow_pure_ascii
+ self.strict_decode = strict_decode
+
+ def is_all_ascii(self, label):
+ for c in label:
+ if ord(c) > 0x7f:
+ return False
+ return True
+
+ def encode(self, label):
+ if label == '':
+ return b''
+ if self.allow_pure_ascii and self.is_all_ascii(label):
+ return label.encode('ascii')
+ if not have_idna_2008:
+ raise NoIDNA2008
+ try:
+ if self.uts_46:
+ label = idna.uts46_remap(label, False, self.transitional)
+ return idna.alabel(label)
+ except idna.IDNAError as e:
+ raise IDNAException(idna_exception=e)
+
+ def decode(self, label):
+ if not self.strict_decode:
+ return super(IDNA2008Codec, self).decode(label)
+ if label == b'':
+ return u''
+ if not have_idna_2008:
+ raise NoIDNA2008
+ try:
+ if self.uts_46:
+ label = idna.uts46_remap(label, False, False)
+ return _escapify(idna.ulabel(label), True)
+ except idna.IDNAError as e:
+ raise IDNAException(idna_exception=e)
+
+_escaped = bytearray(b'"().;\\@$')
+
+IDNA_2003_Practical = IDNA2003Codec(False)
+IDNA_2003_Strict = IDNA2003Codec(True)
+IDNA_2003 = IDNA_2003_Practical
+IDNA_2008_Practical = IDNA2008Codec(True, False, True, False)
+IDNA_2008_UTS_46 = IDNA2008Codec(True, False, False, False)
+IDNA_2008_Strict = IDNA2008Codec(False, False, False, True)
+IDNA_2008_Transitional = IDNA2008Codec(True, True, False, False)
+IDNA_2008 = IDNA_2008_Practical
+
+def _escapify(label, unicode_mode=False):
+ """Escape the characters in label which need it.
+ @param unicode_mode: escapify only special and whitespace (<= 0x20)
+ characters
+ @returns: the escaped string
+ @rtype: string"""
+ if not unicode_mode:
+ text = ''
+ if isinstance(label, text_type):
+ label = label.encode()
+ for c in bytearray(label):
+ if c in _escaped:
+ text += '\\' + chr(c)
+ elif c > 0x20 and c < 0x7F:
+ text += chr(c)
+ else:
+ text += '\\%03d' % c
+ return text.encode()
+
+ text = u''
+ if isinstance(label, binary_type):
+ label = label.decode()
+ for c in label:
+ if c > u'\x20' and c < u'\x7f':
+ text += c
+ else:
+ if c >= u'\x7f':
+ text += c
+ else:
+ text += u'\\%03d' % ord(c)
+ return text
+
+def _validate_labels(labels):
+ """Check for empty labels in the middle of a label sequence,
+ labels that are too long, and for too many labels.
+
+ Raises ``dns.name.NameTooLong`` if the name as a whole is too long.
+
+ Raises ``dns.name.EmptyLabel`` if a label is empty (i.e. the root
+ label) and appears in a position other than the end of the label
+ sequence
+
+ """
+
+ l = len(labels)
+ total = 0
+ i = -1
+ j = 0
+ for label in labels:
+ ll = len(label)
+ total += ll + 1
+ if ll > 63:
+ raise LabelTooLong
+ if i < 0 and label == b'':
+ i = j
+ j += 1
+ if total > 255:
+ raise NameTooLong
+ if i >= 0 and i != l - 1:
+ raise EmptyLabel
+
+
+def _maybe_convert_to_binary(label):
+ """If label is ``text``, convert it to ``binary``. If it is already
+ ``binary`` just return it.
+
+ """
+
+ if isinstance(label, binary_type):
+ return label
+ if isinstance(label, text_type):
+ return label.encode()
+ raise ValueError
+
+
+class Name(object):
+
+ """A DNS name.
+
+ The dns.name.Name class represents a DNS name as a tuple of
+ labels. Each label is a `binary` in DNS wire format. Instances
+ of the class are immutable.
+ """
+
+ __slots__ = ['labels']
+
+ def __init__(self, labels):
+ """*labels* is any iterable whose values are ``text`` or ``binary``.
+ """
+
+ labels = [_maybe_convert_to_binary(x) for x in labels]
+ super(Name, self).__setattr__('labels', tuple(labels))
+ _validate_labels(self.labels)
+
+ def __setattr__(self, name, value):
+ # Names are immutable
+ raise TypeError("object doesn't support attribute assignment")
+
+ def __copy__(self):
+ return Name(self.labels)
+
+ def __deepcopy__(self, memo):
+ return Name(copy.deepcopy(self.labels, memo))
+
+ def __getstate__(self):
+ # Names can be pickled
+ return {'labels': self.labels}
+
+ def __setstate__(self, state):
+ super(Name, self).__setattr__('labels', state['labels'])
+ _validate_labels(self.labels)
+
+ def is_absolute(self):
+ """Is the most significant label of this name the root label?
+
+ Returns a ``bool``.
+ """
+
+ return len(self.labels) > 0 and self.labels[-1] == b''
+
+ def is_wild(self):
+ """Is this name wild? (I.e. Is the least significant label '*'?)
+
+ Returns a ``bool``.
+ """
+
+ return len(self.labels) > 0 and self.labels[0] == b'*'
+
+ def __hash__(self):
+ """Return a case-insensitive hash of the name.
+
+ Returns an ``int``.
+ """
+
+ h = long(0)
+ for label in self.labels:
+ for c in bytearray(label.lower()):
+ h += (h << 3) + c
+ return int(h % maxint)
+
+ def fullcompare(self, other):
+ """Compare two names, returning a 3-tuple
+ ``(relation, order, nlabels)``.
+
+ *relation* describes the relation ship between the names,
+ and is one of: ``dns.name.NAMERELN_NONE``,
+ ``dns.name.NAMERELN_SUPERDOMAIN``, ``dns.name.NAMERELN_SUBDOMAIN``,
+ ``dns.name.NAMERELN_EQUAL``, or ``dns.name.NAMERELN_COMMONANCESTOR``.
+
+ *order* is < 0 if *self* < *other*, > 0 if *self* > *other*, and ==
+ 0 if *self* == *other*. A relative name is always less than an
+ absolute name. If both names have the same relativity, then
+ the DNSSEC order relation is used to order them.
+
+ *nlabels* is the number of significant labels that the two names
+ have in common.
+
+ Here are some examples. Names ending in "." are absolute names,
+ those not ending in "." are relative names.
+
+ ============= ============= =========== ===== =======
+ self other relation order nlabels
+ ============= ============= =========== ===== =======
+ www.example. www.example. equal 0 3
+ www.example. example. subdomain > 0 2
+ example. www.example. superdomain < 0 2
+ example1.com. example2.com. common anc. < 0 2
+ example1 example2. none < 0 0
+ example1. example2 none > 0 0
+ ============= ============= =========== ===== =======
+ """
+
+ sabs = self.is_absolute()
+ oabs = other.is_absolute()
+ if sabs != oabs:
+ if sabs:
+ return (NAMERELN_NONE, 1, 0)
+ else:
+ return (NAMERELN_NONE, -1, 0)
+ l1 = len(self.labels)
+ l2 = len(other.labels)
+ ldiff = l1 - l2
+ if ldiff < 0:
+ l = l1
+ else:
+ l = l2
+
+ order = 0
+ nlabels = 0
+ namereln = NAMERELN_NONE
+ while l > 0:
+ l -= 1
+ l1 -= 1
+ l2 -= 1
+ label1 = self.labels[l1].lower()
+ label2 = other.labels[l2].lower()
+ if label1 < label2:
+ order = -1
+ if nlabels > 0:
+ namereln = NAMERELN_COMMONANCESTOR
+ return (namereln, order, nlabels)
+ elif label1 > label2:
+ order = 1
+ if nlabels > 0:
+ namereln = NAMERELN_COMMONANCESTOR
+ return (namereln, order, nlabels)
+ nlabels += 1
+ order = ldiff
+ if ldiff < 0:
+ namereln = NAMERELN_SUPERDOMAIN
+ elif ldiff > 0:
+ namereln = NAMERELN_SUBDOMAIN
+ else:
+ namereln = NAMERELN_EQUAL
+ return (namereln, order, nlabels)
+
+ def is_subdomain(self, other):
+ """Is self a subdomain of other?
+
+ Note that the notion of subdomain includes equality, e.g.
+ "dnpython.org" is a subdomain of itself.
+
+ Returns a ``bool``.
+ """
+
+ (nr, o, nl) = self.fullcompare(other)
+ if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL:
+ return True
+ return False
+
+ def is_superdomain(self, other):
+ """Is self a superdomain of other?
+
+ Note that the notion of superdomain includes equality, e.g.
+ "dnpython.org" is a superdomain of itself.
+
+ Returns a ``bool``.
+ """
+
+ (nr, o, nl) = self.fullcompare(other)
+ if nr == NAMERELN_SUPERDOMAIN or nr == NAMERELN_EQUAL:
+ return True
+ return False
+
+ def canonicalize(self):
+ """Return a name which is equal to the current name, but is in
+ DNSSEC canonical form.
+ """
+
+ return Name([x.lower() for x in self.labels])
+
+ def __eq__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] == 0
+ else:
+ return False
+
+ def __ne__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] != 0
+ else:
+ return True
+
+ def __lt__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] < 0
+ else:
+ return NotImplemented
+
+ def __le__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] <= 0
+ else:
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] >= 0
+ else:
+ return NotImplemented
+
+ def __gt__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] > 0
+ else:
+ return NotImplemented
+
+ def __repr__(self):
+ return ''
+
+ def __str__(self):
+ return self.to_text(False)
+
+ def to_text(self, omit_final_dot=False):
+ """Convert name to DNS text format.
+
+ *omit_final_dot* is a ``bool``. If True, don't emit the final
+ dot (denoting the root label) for absolute names. The default
+ is False.
+
+ Returns a ``text``.
+ """
+
+ if len(self.labels) == 0:
+ return maybe_decode(b'@')
+ if len(self.labels) == 1 and self.labels[0] == b'':
+ return maybe_decode(b'.')
+ if omit_final_dot and self.is_absolute():
+ l = self.labels[:-1]
+ else:
+ l = self.labels
+ s = b'.'.join(map(_escapify, l))
+ return maybe_decode(s)
+
+ def to_unicode(self, omit_final_dot=False, idna_codec=None):
+ """Convert name to Unicode text format.
+
+ IDN ACE labels are converted to Unicode.
+
+ *omit_final_dot* is a ``bool``. If True, don't emit the final
+ dot (denoting the root label) for absolute names. The default
+ is False.
+ *idna_codec* specifies the IDNA encoder/decoder. If None, the
+ dns.name.IDNA_2003_Practical encoder/decoder is used.
+ The IDNA_2003_Practical decoder does
+ not impose any policy, it just decodes punycode, so if you
+ don't want checking for compliance, you can use this decoder
+ for IDNA2008 as well.
+
+ Returns a ``text``.
+ """
+
+ if len(self.labels) == 0:
+ return u'@'
+ if len(self.labels) == 1 and self.labels[0] == b'':
+ return u'.'
+ if omit_final_dot and self.is_absolute():
+ l = self.labels[:-1]
+ else:
+ l = self.labels
+ if idna_codec is None:
+ idna_codec = IDNA_2003_Practical
+ return u'.'.join([idna_codec.decode(x) for x in l])
+
+ def to_digestable(self, origin=None):
+ """Convert name to a format suitable for digesting in hashes.
+
+ The name is canonicalized and converted to uncompressed wire
+ format. All names in wire format are absolute. If the name
+ is a relative name, then an origin must be supplied.
+
+ *origin* is a ``dns.name.Name`` or ``None``. If the name is
+ relative and origin is not ``None``, then origin will be appended
+ to the name.
+
+ Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is
+ relative and no origin was provided.
+
+ Returns a ``binary``.
+ """
+
+ if not self.is_absolute():
+ if origin is None or not origin.is_absolute():
+ raise NeedAbsoluteNameOrOrigin
+ labels = list(self.labels)
+ labels.extend(list(origin.labels))
+ else:
+ labels = self.labels
+ dlabels = [struct.pack('!B%ds' % len(x), len(x), x.lower())
+ for x in labels]
+ return b''.join(dlabels)
+
+ def to_wire(self, file=None, compress=None, origin=None):
+ """Convert name to wire format, possibly compressing it.
+
+ *file* is the file where the name is emitted (typically a
+ BytesIO file). If ``None`` (the default), a ``binary``
+ containing the wire name will be returned.
+
+ *compress*, a ``dict``, is the compression table to use. If
+ ``None`` (the default), names will not be compressed.
+
+ *origin* is a ``dns.name.Name`` or ``None``. If the name is
+ relative and origin is not ``None``, then *origin* will be appended
+ to it.
+
+ Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is
+ relative and no origin was provided.
+
+ Returns a ``binary`` or ``None``.
+ """
+
+ if file is None:
+ file = BytesIO()
+ want_return = True
+ else:
+ want_return = False
+
+ if not self.is_absolute():
+ if origin is None or not origin.is_absolute():
+ raise NeedAbsoluteNameOrOrigin
+ labels = list(self.labels)
+ labels.extend(list(origin.labels))
+ else:
+ labels = self.labels
+ i = 0
+ for label in labels:
+ n = Name(labels[i:])
+ i += 1
+ if compress is not None:
+ pos = compress.get(n)
+ else:
+ pos = None
+ if pos is not None:
+ value = 0xc000 + pos
+ s = struct.pack('!H', value)
+ file.write(s)
+ break
+ else:
+ if compress is not None and len(n) > 1:
+ pos = file.tell()
+ if pos <= 0x3fff:
+ compress[n] = pos
+ l = len(label)
+ file.write(struct.pack('!B', l))
+ if l > 0:
+ file.write(label)
+ if want_return:
+ return file.getvalue()
+
+ def __len__(self):
+ """The length of the name (in labels).
+
+ Returns an ``int``.
+ """
+
+ return len(self.labels)
+
+ def __getitem__(self, index):
+ return self.labels[index]
+
+ def __add__(self, other):
+ return self.concatenate(other)
+
+ def __sub__(self, other):
+ return self.relativize(other)
+
+ def split(self, depth):
+ """Split a name into a prefix and suffix names at the specified depth.
+
+ *depth* is an ``int`` specifying the number of labels in the suffix
+
+ Raises ``ValueError`` if *depth* was not >= 0 and <= the length of the
+ name.
+
+ Returns the tuple ``(prefix, suffix)``.
+ """
+
+ l = len(self.labels)
+ if depth == 0:
+ return (self, dns.name.empty)
+ elif depth == l:
+ return (dns.name.empty, self)
+ elif depth < 0 or depth > l:
+ raise ValueError(
+ 'depth must be >= 0 and <= the length of the name')
+ return (Name(self[: -depth]), Name(self[-depth:]))
+
+ def concatenate(self, other):
+ """Return a new name which is the concatenation of self and other.
+
+ Raises ``dns.name.AbsoluteConcatenation`` if the name is
+ absolute and *other* is not the empty name.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if self.is_absolute() and len(other) > 0:
+ raise AbsoluteConcatenation
+ labels = list(self.labels)
+ labels.extend(list(other.labels))
+ return Name(labels)
+
+ def relativize(self, origin):
+ """If the name is a subdomain of *origin*, return a new name which is
+ the name relative to origin. Otherwise return the name.
+
+ For example, relativizing ``www.dnspython.org.`` to origin
+ ``dnspython.org.`` returns the name ``www``. Relativizing ``example.``
+ to origin ``dnspython.org.`` returns ``example.``.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if origin is not None and self.is_subdomain(origin):
+ return Name(self[: -len(origin)])
+ else:
+ return self
+
+ def derelativize(self, origin):
+ """If the name is a relative name, return a new name which is the
+ concatenation of the name and origin. Otherwise return the name.
+
+ For example, derelativizing ``www`` to origin ``dnspython.org.``
+ returns the name ``www.dnspython.org.``. Derelativizing ``example.``
+ to origin ``dnspython.org.`` returns ``example.``.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if not self.is_absolute():
+ return self.concatenate(origin)
+ else:
+ return self
+
+ def choose_relativity(self, origin=None, relativize=True):
+ """Return a name with the relativity desired by the caller.
+
+ If *origin* is ``None``, then the name is returned.
+ Otherwise, if *relativize* is ``True`` the name is
+ relativized, and if *relativize* is ``False`` the name is
+ derelativized.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if origin:
+ if relativize:
+ return self.relativize(origin)
+ else:
+ return self.derelativize(origin)
+ else:
+ return self
+
+ def parent(self):
+ """Return the parent of the name.
+
+ For example, the parent of ``www.dnspython.org.`` is ``dnspython.org``.
+
+ Raises ``dns.name.NoParent`` if the name is either the root name or the
+ empty name, and thus has no parent.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if self == root or self == empty:
+ raise NoParent
+ return Name(self.labels[1:])
+
+#: The root name, '.'
+root = Name([b''])
+
+#: The empty name.
+empty = Name([])
+
+def from_unicode(text, origin=root, idna_codec=None):
+ """Convert unicode text into a Name object.
+
+ Labels are encoded in IDN ACE form according to rules specified by
+ the IDNA codec.
+
+ *text*, a ``text``, is the text to convert into a name.
+
+ *origin*, a ``dns.name.Name``, specifies the origin to
+ append to non-absolute names. The default is the root name.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if not isinstance(text, text_type):
+ raise ValueError("input to from_unicode() must be a unicode string")
+ if not (origin is None or isinstance(origin, Name)):
+ raise ValueError("origin must be a Name or None")
+ labels = []
+ label = u''
+ escaping = False
+ edigits = 0
+ total = 0
+ if idna_codec is None:
+ idna_codec = IDNA_2003
+ if text == u'@':
+ text = u''
+ if text:
+ if text == u'.':
+ return Name([b'']) # no Unicode "u" on this constant!
+ for c in text:
+ if escaping:
+ if edigits == 0:
+ if c.isdigit():
+ total = int(c)
+ edigits += 1
+ else:
+ label += c
+ escaping = False
+ else:
+ if not c.isdigit():
+ raise BadEscape
+ total *= 10
+ total += int(c)
+ edigits += 1
+ if edigits == 3:
+ escaping = False
+ label += unichr(total)
+ elif c in [u'.', u'\u3002', u'\uff0e', u'\uff61']:
+ if len(label) == 0:
+ raise EmptyLabel
+ labels.append(idna_codec.encode(label))
+ label = u''
+ elif c == u'\\':
+ escaping = True
+ edigits = 0
+ total = 0
+ else:
+ label += c
+ if escaping:
+ raise BadEscape
+ if len(label) > 0:
+ labels.append(idna_codec.encode(label))
+ else:
+ labels.append(b'')
+
+ if (len(labels) == 0 or labels[-1] != b'') and origin is not None:
+ labels.extend(list(origin.labels))
+ return Name(labels)
+
+
+def from_text(text, origin=root, idna_codec=None):
+ """Convert text into a Name object.
+
+ *text*, a ``text``, is the text to convert into a name.
+
+ *origin*, a ``dns.name.Name``, specifies the origin to
+ append to non-absolute names. The default is the root name.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if isinstance(text, text_type):
+ return from_unicode(text, origin, idna_codec)
+ if not isinstance(text, binary_type):
+ raise ValueError("input to from_text() must be a string")
+ if not (origin is None or isinstance(origin, Name)):
+ raise ValueError("origin must be a Name or None")
+ labels = []
+ label = b''
+ escaping = False
+ edigits = 0
+ total = 0
+ if text == b'@':
+ text = b''
+ if text:
+ if text == b'.':
+ return Name([b''])
+ for c in bytearray(text):
+ byte_ = struct.pack('!B', c)
+ if escaping:
+ if edigits == 0:
+ if byte_.isdigit():
+ total = int(byte_)
+ edigits += 1
+ else:
+ label += byte_
+ escaping = False
+ else:
+ if not byte_.isdigit():
+ raise BadEscape
+ total *= 10
+ total += int(byte_)
+ edigits += 1
+ if edigits == 3:
+ escaping = False
+ label += struct.pack('!B', total)
+ elif byte_ == b'.':
+ if len(label) == 0:
+ raise EmptyLabel
+ labels.append(label)
+ label = b''
+ elif byte_ == b'\\':
+ escaping = True
+ edigits = 0
+ total = 0
+ else:
+ label += byte_
+ if escaping:
+ raise BadEscape
+ if len(label) > 0:
+ labels.append(label)
+ else:
+ labels.append(b'')
+ if (len(labels) == 0 or labels[-1] != b'') and origin is not None:
+ labels.extend(list(origin.labels))
+ return Name(labels)
+
+
+def from_wire(message, current):
+ """Convert possibly compressed wire format into a Name.
+
+ *message* is a ``binary`` containing an entire DNS message in DNS
+ wire form.
+
+ *current*, an ``int``, is the offset of the beginning of the name
+ from the start of the message
+
+ Raises ``dns.name.BadPointer`` if a compression pointer did not
+ point backwards in the message.
+
+ Raises ``dns.name.BadLabelType`` if an invalid label type was encountered.
+
+ Returns a ``(dns.name.Name, int)`` tuple consisting of the name
+ that was read and the number of bytes of the wire format message
+ which were consumed reading it.
+ """
+
+ if not isinstance(message, binary_type):
+ raise ValueError("input to from_wire() must be a byte string")
+ message = dns.wiredata.maybe_wrap(message)
+ labels = []
+ biggest_pointer = current
+ hops = 0
+ count = message[current]
+ current += 1
+ cused = 1
+ while count != 0:
+ if count < 64:
+ labels.append(message[current: current + count].unwrap())
+ current += count
+ if hops == 0:
+ cused += count
+ elif count >= 192:
+ current = (count & 0x3f) * 256 + message[current]
+ if hops == 0:
+ cused += 1
+ if current >= biggest_pointer:
+ raise BadPointer
+ biggest_pointer = current
+ hops += 1
+ else:
+ raise BadLabelType
+ count = message[current]
+ current += 1
+ if hops == 0:
+ cused += 1
+ labels.append('')
+ return (Name(labels), cused)
diff --git a/openpype/vendor/python/python_2/dns/namedict.py b/openpype/vendor/python/python_2/dns/namedict.py
new file mode 100644
index 0000000000..37a13104e6
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/namedict.py
@@ -0,0 +1,108 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+# Copyright (C) 2016 Coresec Systems AB
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND CORESEC SYSTEMS AB DISCLAIMS ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL CORESEC
+# SYSTEMS AB BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS name dictionary"""
+
+import collections
+import dns.name
+from ._compat import xrange
+
+
+class NameDict(collections.MutableMapping):
+ """A dictionary whose keys are dns.name.Name objects.
+
+ In addition to being like a regular Python dictionary, this
+ dictionary can also get the deepest match for a given key.
+ """
+
+ __slots__ = ["max_depth", "max_depth_items", "__store"]
+
+ def __init__(self, *args, **kwargs):
+ super(NameDict, self).__init__()
+ self.__store = dict()
+ #: the maximum depth of the keys that have ever been added
+ self.max_depth = 0
+ #: the number of items of maximum depth
+ self.max_depth_items = 0
+ self.update(dict(*args, **kwargs))
+
+ def __update_max_depth(self, key):
+ if len(key) == self.max_depth:
+ self.max_depth_items = self.max_depth_items + 1
+ elif len(key) > self.max_depth:
+ self.max_depth = len(key)
+ self.max_depth_items = 1
+
+ def __getitem__(self, key):
+ return self.__store[key]
+
+ def __setitem__(self, key, value):
+ if not isinstance(key, dns.name.Name):
+ raise ValueError('NameDict key must be a name')
+ self.__store[key] = value
+ self.__update_max_depth(key)
+
+ def __delitem__(self, key):
+ value = self.__store.pop(key)
+ if len(value) == self.max_depth:
+ self.max_depth_items = self.max_depth_items - 1
+ if self.max_depth_items == 0:
+ self.max_depth = 0
+ for k in self.__store:
+ self.__update_max_depth(k)
+
+ def __iter__(self):
+ return iter(self.__store)
+
+ def __len__(self):
+ return len(self.__store)
+
+ def has_key(self, key):
+ return key in self.__store
+
+ def get_deepest_match(self, name):
+ """Find the deepest match to *fname* in the dictionary.
+
+ The deepest match is the longest name in the dictionary which is
+ a superdomain of *name*. Note that *superdomain* includes matching
+ *name* itself.
+
+ *name*, a ``dns.name.Name``, the name to find.
+
+ Returns a ``(key, value)`` where *key* is the deepest
+ ``dns.name.Name``, and *value* is the value associated with *key*.
+ """
+
+ depth = len(name)
+ if depth > self.max_depth:
+ depth = self.max_depth
+ for i in xrange(-depth, 0):
+ n = dns.name.Name(name[i:])
+ if n in self:
+ return (n, self[n])
+ v = self[dns.name.empty]
+ return (dns.name.empty, v)
diff --git a/openpype/vendor/python/python_2/dns/node.py b/openpype/vendor/python/python_2/dns/node.py
new file mode 100644
index 0000000000..8a7f19f523
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/node.py
@@ -0,0 +1,182 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS nodes. A node is a set of rdatasets."""
+
+from io import StringIO
+
+import dns.rdataset
+import dns.rdatatype
+import dns.renderer
+
+
+class Node(object):
+
+ """A Node is a set of rdatasets."""
+
+ __slots__ = ['rdatasets']
+
+ def __init__(self):
+ #: the set of rdatsets, represented as a list.
+ self.rdatasets = []
+
+ def to_text(self, name, **kw):
+ """Convert a node to text format.
+
+ Each rdataset at the node is printed. Any keyword arguments
+ to this method are passed on to the rdataset's to_text() method.
+
+ *name*, a ``dns.name.Name`` or ``text``, the owner name of the rdatasets.
+
+ Returns a ``text``.
+ """
+
+ s = StringIO()
+ for rds in self.rdatasets:
+ if len(rds) > 0:
+ s.write(rds.to_text(name, **kw))
+ s.write(u'\n')
+ return s.getvalue()[:-1]
+
+ def __repr__(self):
+ return ''
+
+ def __eq__(self, other):
+ #
+ # This is inefficient. Good thing we don't need to do it much.
+ #
+ for rd in self.rdatasets:
+ if rd not in other.rdatasets:
+ return False
+ for rd in other.rdatasets:
+ if rd not in self.rdatasets:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __len__(self):
+ return len(self.rdatasets)
+
+ def __iter__(self):
+ return iter(self.rdatasets)
+
+ def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Find an rdataset matching the specified properties in the
+ current node.
+
+ *rdclass*, an ``int``, the class of the rdataset.
+
+ *rdtype*, an ``int``, the type of the rdataset.
+
+ *covers*, an ``int``, the covered type. Usually this value is
+ dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
+ dns.rdatatype.RRSIG, then the covers value will be the rdata
+ type the SIG/RRSIG covers. The library treats the SIG and RRSIG
+ types as if they were a family of
+ types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
+ easier to work with than if RRSIGs covering different rdata
+ types were aggregated into a single RRSIG rdataset.
+
+ *create*, a ``bool``. If True, create the rdataset if it is not found.
+
+ Raises ``KeyError`` if an rdataset of the desired type and class does
+ not exist and *create* is not ``True``.
+
+ Returns a ``dns.rdataset.Rdataset``.
+ """
+
+ for rds in self.rdatasets:
+ if rds.match(rdclass, rdtype, covers):
+ return rds
+ if not create:
+ raise KeyError
+ rds = dns.rdataset.Rdataset(rdclass, rdtype)
+ self.rdatasets.append(rds)
+ return rds
+
+ def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Get an rdataset matching the specified properties in the
+ current node.
+
+ None is returned if an rdataset of the specified type and
+ class does not exist and *create* is not ``True``.
+
+ *rdclass*, an ``int``, the class of the rdataset.
+
+ *rdtype*, an ``int``, the type of the rdataset.
+
+ *covers*, an ``int``, the covered type. Usually this value is
+ dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
+ dns.rdatatype.RRSIG, then the covers value will be the rdata
+ type the SIG/RRSIG covers. The library treats the SIG and RRSIG
+ types as if they were a family of
+ types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
+ easier to work with than if RRSIGs covering different rdata
+ types were aggregated into a single RRSIG rdataset.
+
+ *create*, a ``bool``. If True, create the rdataset if it is not found.
+
+ Returns a ``dns.rdataset.Rdataset`` or ``None``.
+ """
+
+ try:
+ rds = self.find_rdataset(rdclass, rdtype, covers, create)
+ except KeyError:
+ rds = None
+ return rds
+
+ def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
+ """Delete the rdataset matching the specified properties in the
+ current node.
+
+ If a matching rdataset does not exist, it is not an error.
+
+ *rdclass*, an ``int``, the class of the rdataset.
+
+ *rdtype*, an ``int``, the type of the rdataset.
+
+ *covers*, an ``int``, the covered type.
+ """
+
+ rds = self.get_rdataset(rdclass, rdtype, covers)
+ if rds is not None:
+ self.rdatasets.remove(rds)
+
+ def replace_rdataset(self, replacement):
+ """Replace an rdataset.
+
+ It is not an error if there is no rdataset matching *replacement*.
+
+ Ownership of the *replacement* object is transferred to the node;
+ in other words, this method does not store a copy of *replacement*
+ at the node, it stores *replacement* itself.
+
+ *replacement*, a ``dns.rdataset.Rdataset``.
+
+ Raises ``ValueError`` if *replacement* is not a
+ ``dns.rdataset.Rdataset``.
+ """
+
+ if not isinstance(replacement, dns.rdataset.Rdataset):
+ raise ValueError('replacement is not an rdataset')
+ self.delete_rdataset(replacement.rdclass, replacement.rdtype,
+ replacement.covers)
+ self.rdatasets.append(replacement)
diff --git a/openpype/vendor/python/python_2/dns/opcode.py b/openpype/vendor/python/python_2/dns/opcode.py
new file mode 100644
index 0000000000..c0735ba47b
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/opcode.py
@@ -0,0 +1,119 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Opcodes."""
+
+import dns.exception
+
+#: Query
+QUERY = 0
+#: Inverse Query (historical)
+IQUERY = 1
+#: Server Status (unspecified and unimplemented anywhere)
+STATUS = 2
+#: Notify
+NOTIFY = 4
+#: Dynamic Update
+UPDATE = 5
+
+_by_text = {
+ 'QUERY': QUERY,
+ 'IQUERY': IQUERY,
+ 'STATUS': STATUS,
+ 'NOTIFY': NOTIFY,
+ 'UPDATE': UPDATE
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = {y: x for x, y in _by_text.items()}
+
+
+class UnknownOpcode(dns.exception.DNSException):
+ """An DNS opcode is unknown."""
+
+
+def from_text(text):
+ """Convert text into an opcode.
+
+ *text*, a ``text``, the textual opcode
+
+ Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown.
+
+ Returns an ``int``.
+ """
+
+ if text.isdigit():
+ value = int(text)
+ if value >= 0 and value <= 15:
+ return value
+ value = _by_text.get(text.upper())
+ if value is None:
+ raise UnknownOpcode
+ return value
+
+
+def from_flags(flags):
+ """Extract an opcode from DNS message flags.
+
+ *flags*, an ``int``, the DNS flags.
+
+ Returns an ``int``.
+ """
+
+ return (flags & 0x7800) >> 11
+
+
+def to_flags(value):
+ """Convert an opcode to a value suitable for ORing into DNS message
+ flags.
+
+ *value*, an ``int``, the DNS opcode value.
+
+ Returns an ``int``.
+ """
+
+ return (value << 11) & 0x7800
+
+
+def to_text(value):
+ """Convert an opcode to text.
+
+ *value*, an ``int`` the opcode value,
+
+ Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown.
+
+ Returns a ``text``.
+ """
+
+ text = _by_value.get(value)
+ if text is None:
+ text = str(value)
+ return text
+
+
+def is_update(flags):
+ """Is the opcode in flags UPDATE?
+
+ *flags*, an ``int``, the DNS message flags.
+
+ Returns a ``bool``.
+ """
+
+ return from_flags(flags) == UPDATE
diff --git a/openpype/vendor/python/python_2/dns/py.typed b/openpype/vendor/python/python_2/dns/py.typed
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/openpype/vendor/python/python_2/dns/query.py b/openpype/vendor/python/python_2/dns/query.py
new file mode 100644
index 0000000000..c0c517ccd4
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/query.py
@@ -0,0 +1,683 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Talk to a DNS server."""
+
+from __future__ import generators
+
+import errno
+import select
+import socket
+import struct
+import sys
+import time
+
+import dns.exception
+import dns.inet
+import dns.name
+import dns.message
+import dns.rcode
+import dns.rdataclass
+import dns.rdatatype
+from ._compat import long, string_types, PY3
+
+if PY3:
+ select_error = OSError
+else:
+ select_error = select.error
+
+# Function used to create a socket. Can be overridden if needed in special
+# situations.
+socket_factory = socket.socket
+
+class UnexpectedSource(dns.exception.DNSException):
+ """A DNS query response came from an unexpected address or port."""
+
+
+class BadResponse(dns.exception.FormError):
+ """A DNS query response does not respond to the question asked."""
+
+
+class TransferError(dns.exception.DNSException):
+ """A zone transfer response got a non-zero rcode."""
+
+ def __init__(self, rcode):
+ message = 'Zone transfer error: %s' % dns.rcode.to_text(rcode)
+ super(TransferError, self).__init__(message)
+ self.rcode = rcode
+
+
+def _compute_expiration(timeout):
+ if timeout is None:
+ return None
+ else:
+ return time.time() + timeout
+
+# This module can use either poll() or select() as the "polling backend".
+#
+# A backend function takes an fd, bools for readability, writablity, and
+# error detection, and a timeout.
+
+def _poll_for(fd, readable, writable, error, timeout):
+ """Poll polling backend."""
+
+ event_mask = 0
+ if readable:
+ event_mask |= select.POLLIN
+ if writable:
+ event_mask |= select.POLLOUT
+ if error:
+ event_mask |= select.POLLERR
+
+ pollable = select.poll()
+ pollable.register(fd, event_mask)
+
+ if timeout:
+ event_list = pollable.poll(long(timeout * 1000))
+ else:
+ event_list = pollable.poll()
+
+ return bool(event_list)
+
+
+def _select_for(fd, readable, writable, error, timeout):
+ """Select polling backend."""
+
+ rset, wset, xset = [], [], []
+
+ if readable:
+ rset = [fd]
+ if writable:
+ wset = [fd]
+ if error:
+ xset = [fd]
+
+ if timeout is None:
+ (rcount, wcount, xcount) = select.select(rset, wset, xset)
+ else:
+ (rcount, wcount, xcount) = select.select(rset, wset, xset, timeout)
+
+ return bool((rcount or wcount or xcount))
+
+
+def _wait_for(fd, readable, writable, error, expiration):
+ # Use the selected polling backend to wait for any of the specified
+ # events. An "expiration" absolute time is converted into a relative
+ # timeout.
+
+ done = False
+ while not done:
+ if expiration is None:
+ timeout = None
+ else:
+ timeout = expiration - time.time()
+ if timeout <= 0.0:
+ raise dns.exception.Timeout
+ try:
+ if not _polling_backend(fd, readable, writable, error, timeout):
+ raise dns.exception.Timeout
+ except select_error as e:
+ if e.args[0] != errno.EINTR:
+ raise e
+ done = True
+
+
+def _set_polling_backend(fn):
+ # Internal API. Do not use.
+
+ global _polling_backend
+
+ _polling_backend = fn
+
+if hasattr(select, 'poll'):
+ # Prefer poll() on platforms that support it because it has no
+ # limits on the maximum value of a file descriptor (plus it will
+ # be more efficient for high values).
+ _polling_backend = _poll_for
+else:
+ _polling_backend = _select_for
+
+
+def _wait_for_readable(s, expiration):
+ _wait_for(s, True, False, True, expiration)
+
+
+def _wait_for_writable(s, expiration):
+ _wait_for(s, False, True, True, expiration)
+
+
+def _addresses_equal(af, a1, a2):
+ # Convert the first value of the tuple, which is a textual format
+ # address into binary form, so that we are not confused by different
+ # textual representations of the same address
+ try:
+ n1 = dns.inet.inet_pton(af, a1[0])
+ n2 = dns.inet.inet_pton(af, a2[0])
+ except dns.exception.SyntaxError:
+ return False
+ return n1 == n2 and a1[1:] == a2[1:]
+
+
+def _destination_and_source(af, where, port, source, source_port):
+ # Apply defaults and compute destination and source tuples
+ # suitable for use in connect(), sendto(), or bind().
+ if af is None:
+ try:
+ af = dns.inet.af_for_address(where)
+ except Exception:
+ af = dns.inet.AF_INET
+ if af == dns.inet.AF_INET:
+ destination = (where, port)
+ if source is not None or source_port != 0:
+ if source is None:
+ source = '0.0.0.0'
+ source = (source, source_port)
+ elif af == dns.inet.AF_INET6:
+ destination = (where, port, 0, 0)
+ if source is not None or source_port != 0:
+ if source is None:
+ source = '::'
+ source = (source, source_port, 0, 0)
+ return (af, destination, source)
+
+
+def send_udp(sock, what, destination, expiration=None):
+ """Send a DNS message to the specified UDP socket.
+
+ *sock*, a ``socket``.
+
+ *what*, a ``binary`` or ``dns.message.Message``, the message to send.
+
+ *destination*, a destination tuple appropriate for the address family
+ of the socket, specifying where to send the query.
+
+ *expiration*, a ``float`` or ``None``, the absolute time at which
+ a timeout exception should be raised. If ``None``, no timeout will
+ occur.
+
+ Returns an ``(int, float)`` tuple of bytes sent and the sent time.
+ """
+
+ if isinstance(what, dns.message.Message):
+ what = what.to_wire()
+ _wait_for_writable(sock, expiration)
+ sent_time = time.time()
+ n = sock.sendto(what, destination)
+ return (n, sent_time)
+
+
+def receive_udp(sock, destination, expiration=None,
+ ignore_unexpected=False, one_rr_per_rrset=False,
+ keyring=None, request_mac=b'', ignore_trailing=False):
+ """Read a DNS message from a UDP socket.
+
+ *sock*, a ``socket``.
+
+ *destination*, a destination tuple appropriate for the address family
+ of the socket, specifying where the associated query was sent.
+
+ *expiration*, a ``float`` or ``None``, the absolute time at which
+ a timeout exception should be raised. If ``None``, no timeout will
+ occur.
+
+ *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from
+ unexpected sources.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
+ RRset.
+
+ *keyring*, a ``dict``, the keyring to use for TSIG.
+
+ *request_mac*, a ``binary``, the MAC of the request (for TSIG).
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the received message.
+
+ Raises if the message is malformed, if network errors occur, of if
+ there is a timeout.
+
+ Returns a ``dns.message.Message`` object.
+ """
+
+ wire = b''
+ while 1:
+ _wait_for_readable(sock, expiration)
+ (wire, from_address) = sock.recvfrom(65535)
+ if _addresses_equal(sock.family, from_address, destination) or \
+ (dns.inet.is_multicast(destination[0]) and
+ from_address[1:] == destination[1:]):
+ break
+ if not ignore_unexpected:
+ raise UnexpectedSource('got a response from '
+ '%s instead of %s' % (from_address,
+ destination))
+ received_time = time.time()
+ r = dns.message.from_wire(wire, keyring=keyring, request_mac=request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing)
+ return (r, received_time)
+
+def udp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
+ ignore_unexpected=False, one_rr_per_rrset=False, ignore_trailing=False):
+ """Return the response obtained after sending a query via UDP.
+
+ *q*, a ``dns.message.Message``, the query to send
+
+ *where*, a ``text`` containing an IPv4 or IPv6 address, where
+ to send the message.
+
+ *timeout*, a ``float`` or ``None``, the number of seconds to wait before the
+ query times out. If ``None``, the default, wait forever.
+
+ *port*, an ``int``, the port send the message to. The default is 53.
+
+ *af*, an ``int``, the address family to use. The default is ``None``,
+ which causes the address family to use to be inferred from the form of
+ *where*. If the inference attempt fails, AF_INET is used. This
+ parameter is historical; you need never set it.
+
+ *source*, a ``text`` containing an IPv4 or IPv6 address, specifying
+ the source address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message.
+ The default is 0.
+
+ *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from
+ unexpected sources.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
+ RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the received message.
+
+ Returns a ``dns.message.Message``.
+ """
+
+ wire = q.to_wire()
+ (af, destination, source) = _destination_and_source(af, where, port,
+ source, source_port)
+ s = socket_factory(af, socket.SOCK_DGRAM, 0)
+ received_time = None
+ sent_time = None
+ try:
+ expiration = _compute_expiration(timeout)
+ s.setblocking(0)
+ if source is not None:
+ s.bind(source)
+ (_, sent_time) = send_udp(s, wire, destination, expiration)
+ (r, received_time) = receive_udp(s, destination, expiration,
+ ignore_unexpected, one_rr_per_rrset,
+ q.keyring, q.mac, ignore_trailing)
+ finally:
+ if sent_time is None or received_time is None:
+ response_time = 0
+ else:
+ response_time = received_time - sent_time
+ s.close()
+ r.time = response_time
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+def _net_read(sock, count, expiration):
+ """Read the specified number of bytes from sock. Keep trying until we
+ either get the desired amount, or we hit EOF.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ s = b''
+ while count > 0:
+ _wait_for_readable(sock, expiration)
+ n = sock.recv(count)
+ if n == b'':
+ raise EOFError
+ count = count - len(n)
+ s = s + n
+ return s
+
+
+def _net_write(sock, data, expiration):
+ """Write the specified data to the socket.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ current = 0
+ l = len(data)
+ while current < l:
+ _wait_for_writable(sock, expiration)
+ current += sock.send(data[current:])
+
+
+def send_tcp(sock, what, expiration=None):
+ """Send a DNS message to the specified TCP socket.
+
+ *sock*, a ``socket``.
+
+ *what*, a ``binary`` or ``dns.message.Message``, the message to send.
+
+ *expiration*, a ``float`` or ``None``, the absolute time at which
+ a timeout exception should be raised. If ``None``, no timeout will
+ occur.
+
+ Returns an ``(int, float)`` tuple of bytes sent and the sent time.
+ """
+
+ if isinstance(what, dns.message.Message):
+ what = what.to_wire()
+ l = len(what)
+ # copying the wire into tcpmsg is inefficient, but lets us
+ # avoid writev() or doing a short write that would get pushed
+ # onto the net
+ tcpmsg = struct.pack("!H", l) + what
+ _wait_for_writable(sock, expiration)
+ sent_time = time.time()
+ _net_write(sock, tcpmsg, expiration)
+ return (len(tcpmsg), sent_time)
+
+def receive_tcp(sock, expiration=None, one_rr_per_rrset=False,
+ keyring=None, request_mac=b'', ignore_trailing=False):
+ """Read a DNS message from a TCP socket.
+
+ *sock*, a ``socket``.
+
+ *expiration*, a ``float`` or ``None``, the absolute time at which
+ a timeout exception should be raised. If ``None``, no timeout will
+ occur.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
+ RRset.
+
+ *keyring*, a ``dict``, the keyring to use for TSIG.
+
+ *request_mac*, a ``binary``, the MAC of the request (for TSIG).
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the received message.
+
+ Raises if the message is malformed, if network errors occur, of if
+ there is a timeout.
+
+ Returns a ``dns.message.Message`` object.
+ """
+
+ ldata = _net_read(sock, 2, expiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = _net_read(sock, l, expiration)
+ received_time = time.time()
+ r = dns.message.from_wire(wire, keyring=keyring, request_mac=request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing)
+ return (r, received_time)
+
+def _connect(s, address):
+ try:
+ s.connect(address)
+ except socket.error:
+ (ty, v) = sys.exc_info()[:2]
+
+ if hasattr(v, 'errno'):
+ v_err = v.errno
+ else:
+ v_err = v[0]
+ if v_err not in [errno.EINPROGRESS, errno.EWOULDBLOCK, errno.EALREADY]:
+ raise v
+
+
+def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
+ one_rr_per_rrset=False, ignore_trailing=False):
+ """Return the response obtained after sending a query via TCP.
+
+ *q*, a ``dns.message.Message``, the query to send
+
+ *where*, a ``text`` containing an IPv4 or IPv6 address, where
+ to send the message.
+
+ *timeout*, a ``float`` or ``None``, the number of seconds to wait before the
+ query times out. If ``None``, the default, wait forever.
+
+ *port*, an ``int``, the port send the message to. The default is 53.
+
+ *af*, an ``int``, the address family to use. The default is ``None``,
+ which causes the address family to use to be inferred from the form of
+ *where*. If the inference attempt fails, AF_INET is used. This
+ parameter is historical; you need never set it.
+
+ *source*, a ``text`` containing an IPv4 or IPv6 address, specifying
+ the source address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message.
+ The default is 0.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
+ RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the received message.
+
+ Returns a ``dns.message.Message``.
+ """
+
+ wire = q.to_wire()
+ (af, destination, source) = _destination_and_source(af, where, port,
+ source, source_port)
+ s = socket_factory(af, socket.SOCK_STREAM, 0)
+ begin_time = None
+ received_time = None
+ try:
+ expiration = _compute_expiration(timeout)
+ s.setblocking(0)
+ begin_time = time.time()
+ if source is not None:
+ s.bind(source)
+ _connect(s, destination)
+ send_tcp(s, wire, expiration)
+ (r, received_time) = receive_tcp(s, expiration, one_rr_per_rrset,
+ q.keyring, q.mac, ignore_trailing)
+ finally:
+ if begin_time is None or received_time is None:
+ response_time = 0
+ else:
+ response_time = received_time - begin_time
+ s.close()
+ r.time = response_time
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+def xfr(where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN,
+ timeout=None, port=53, keyring=None, keyname=None, relativize=True,
+ af=None, lifetime=None, source=None, source_port=0, serial=0,
+ use_udp=False, keyalgorithm=dns.tsig.default_algorithm):
+ """Return a generator for the responses to a zone transfer.
+
+ *where*. If the inference attempt fails, AF_INET is used. This
+ parameter is historical; you need never set it.
+
+ *zone*, a ``dns.name.Name`` or ``text``, the name of the zone to transfer.
+
+ *rdtype*, an ``int`` or ``text``, the type of zone transfer. The
+ default is ``dns.rdatatype.AXFR``. ``dns.rdatatype.IXFR`` can be
+ used to do an incremental transfer instead.
+
+ *rdclass*, an ``int`` or ``text``, the class of the zone transfer.
+ The default is ``dns.rdataclass.IN``.
+
+ *timeout*, a ``float``, the number of seconds to wait for each
+ response message. If None, the default, wait forever.
+
+ *port*, an ``int``, the port send the message to. The default is 53.
+
+ *keyring*, a ``dict``, the keyring to use for TSIG.
+
+ *keyname*, a ``dns.name.Name`` or ``text``, the name of the TSIG
+ key to use.
+
+ *relativize*, a ``bool``. If ``True``, all names in the zone will be
+ relativized to the zone origin. It is essential that the
+ relativize setting matches the one specified to
+ ``dns.zone.from_xfr()`` if using this generator to make a zone.
+
+ *af*, an ``int``, the address family to use. The default is ``None``,
+ which causes the address family to use to be inferred from the form of
+ *where*. If the inference attempt fails, AF_INET is used. This
+ parameter is historical; you need never set it.
+
+ *lifetime*, a ``float``, the total number of seconds to spend
+ doing the transfer. If ``None``, the default, then there is no
+ limit on the time the transfer may take.
+
+ *source*, a ``text`` containing an IPv4 or IPv6 address, specifying
+ the source address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message.
+ The default is 0.
+
+ *serial*, an ``int``, the SOA serial number to use as the base for
+ an IXFR diff sequence (only meaningful if *rdtype* is
+ ``dns.rdatatype.IXFR``).
+
+ *use_udp*, a ``bool``. If ``True``, use UDP (only meaningful for IXFR).
+
+ *keyalgorithm*, a ``dns.name.Name`` or ``text``, the TSIG algorithm to use.
+
+ Raises on errors, and so does the generator.
+
+ Returns a generator of ``dns.message.Message`` objects.
+ """
+
+ if isinstance(zone, string_types):
+ zone = dns.name.from_text(zone)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ q = dns.message.make_query(zone, rdtype, rdclass)
+ if rdtype == dns.rdatatype.IXFR:
+ rrset = dns.rrset.from_text(zone, 0, 'IN', 'SOA',
+ '. . %u 0 0 0 0' % serial)
+ q.authority.append(rrset)
+ if keyring is not None:
+ q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+ wire = q.to_wire()
+ (af, destination, source) = _destination_and_source(af, where, port,
+ source, source_port)
+ if use_udp:
+ if rdtype != dns.rdatatype.IXFR:
+ raise ValueError('cannot do a UDP AXFR')
+ s = socket_factory(af, socket.SOCK_DGRAM, 0)
+ else:
+ s = socket_factory(af, socket.SOCK_STREAM, 0)
+ s.setblocking(0)
+ if source is not None:
+ s.bind(source)
+ expiration = _compute_expiration(lifetime)
+ _connect(s, destination)
+ l = len(wire)
+ if use_udp:
+ _wait_for_writable(s, expiration)
+ s.send(wire)
+ else:
+ tcpmsg = struct.pack("!H", l) + wire
+ _net_write(s, tcpmsg, expiration)
+ done = False
+ delete_mode = True
+ expecting_SOA = False
+ soa_rrset = None
+ if relativize:
+ origin = zone
+ oname = dns.name.empty
+ else:
+ origin = None
+ oname = zone
+ tsig_ctx = None
+ first = True
+ while not done:
+ mexpiration = _compute_expiration(timeout)
+ if mexpiration is None or mexpiration > expiration:
+ mexpiration = expiration
+ if use_udp:
+ _wait_for_readable(s, expiration)
+ (wire, from_address) = s.recvfrom(65535)
+ else:
+ ldata = _net_read(s, 2, mexpiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = _net_read(s, l, mexpiration)
+ is_ixfr = (rdtype == dns.rdatatype.IXFR)
+ r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+ xfr=True, origin=origin, tsig_ctx=tsig_ctx,
+ multi=True, first=first,
+ one_rr_per_rrset=is_ixfr)
+ rcode = r.rcode()
+ if rcode != dns.rcode.NOERROR:
+ raise TransferError(rcode)
+ tsig_ctx = r.tsig_ctx
+ first = False
+ answer_index = 0
+ if soa_rrset is None:
+ if not r.answer or r.answer[0].name != oname:
+ raise dns.exception.FormError(
+ "No answer or RRset not for qname")
+ rrset = r.answer[0]
+ if rrset.rdtype != dns.rdatatype.SOA:
+ raise dns.exception.FormError("first RRset is not an SOA")
+ answer_index = 1
+ soa_rrset = rrset.copy()
+ if rdtype == dns.rdatatype.IXFR:
+ if soa_rrset[0].serial <= serial:
+ #
+ # We're already up-to-date.
+ #
+ done = True
+ else:
+ expecting_SOA = True
+ #
+ # Process SOAs in the answer section (other than the initial
+ # SOA in the first message).
+ #
+ for rrset in r.answer[answer_index:]:
+ if done:
+ raise dns.exception.FormError("answers after final SOA")
+ if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname:
+ if expecting_SOA:
+ if rrset[0].serial != serial:
+ raise dns.exception.FormError(
+ "IXFR base serial mismatch")
+ expecting_SOA = False
+ elif rdtype == dns.rdatatype.IXFR:
+ delete_mode = not delete_mode
+ #
+ # If this SOA RRset is equal to the first we saw then we're
+ # finished. If this is an IXFR we also check that we're seeing
+ # the record in the expected part of the response.
+ #
+ if rrset == soa_rrset and \
+ (rdtype == dns.rdatatype.AXFR or
+ (rdtype == dns.rdatatype.IXFR and delete_mode)):
+ done = True
+ elif expecting_SOA:
+ #
+ # We made an IXFR request and are expecting another
+ # SOA RR, but saw something else, so this must be an
+ # AXFR response.
+ #
+ rdtype = dns.rdatatype.AXFR
+ expecting_SOA = False
+ if done and q.keyring and not r.had_tsig:
+ raise dns.exception.FormError("missing TSIG")
+ yield r
+ s.close()
diff --git a/openpype/vendor/python/python_2/dns/rcode.py b/openpype/vendor/python/python_2/dns/rcode.py
new file mode 100644
index 0000000000..5191e1b18c
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rcode.py
@@ -0,0 +1,144 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Result Codes."""
+
+import dns.exception
+from ._compat import long
+
+#: No error
+NOERROR = 0
+#: Form error
+FORMERR = 1
+#: Server failure
+SERVFAIL = 2
+#: Name does not exist ("Name Error" in RFC 1025 terminology).
+NXDOMAIN = 3
+#: Not implemented
+NOTIMP = 4
+#: Refused
+REFUSED = 5
+#: Name exists.
+YXDOMAIN = 6
+#: RRset exists.
+YXRRSET = 7
+#: RRset does not exist.
+NXRRSET = 8
+#: Not authoritative.
+NOTAUTH = 9
+#: Name not in zone.
+NOTZONE = 10
+#: Bad EDNS version.
+BADVERS = 16
+
+_by_text = {
+ 'NOERROR': NOERROR,
+ 'FORMERR': FORMERR,
+ 'SERVFAIL': SERVFAIL,
+ 'NXDOMAIN': NXDOMAIN,
+ 'NOTIMP': NOTIMP,
+ 'REFUSED': REFUSED,
+ 'YXDOMAIN': YXDOMAIN,
+ 'YXRRSET': YXRRSET,
+ 'NXRRSET': NXRRSET,
+ 'NOTAUTH': NOTAUTH,
+ 'NOTZONE': NOTZONE,
+ 'BADVERS': BADVERS
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be a true inverse.
+
+_by_value = {y: x for x, y in _by_text.items()}
+
+
+class UnknownRcode(dns.exception.DNSException):
+ """A DNS rcode is unknown."""
+
+
+def from_text(text):
+ """Convert text into an rcode.
+
+ *text*, a ``text``, the textual rcode or an integer in textual form.
+
+ Raises ``dns.rcode.UnknownRcode`` if the rcode mnemonic is unknown.
+
+ Returns an ``int``.
+ """
+
+ if text.isdigit():
+ v = int(text)
+ if v >= 0 and v <= 4095:
+ return v
+ v = _by_text.get(text.upper())
+ if v is None:
+ raise UnknownRcode
+ return v
+
+
+def from_flags(flags, ednsflags):
+ """Return the rcode value encoded by flags and ednsflags.
+
+ *flags*, an ``int``, the DNS flags field.
+
+ *ednsflags*, an ``int``, the EDNS flags field.
+
+ Raises ``ValueError`` if rcode is < 0 or > 4095
+
+ Returns an ``int``.
+ """
+
+ value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
+ if value < 0 or value > 4095:
+ raise ValueError('rcode must be >= 0 and <= 4095')
+ return value
+
+
+def to_flags(value):
+ """Return a (flags, ednsflags) tuple which encodes the rcode.
+
+ *value*, an ``int``, the rcode.
+
+ Raises ``ValueError`` if rcode is < 0 or > 4095.
+
+ Returns an ``(int, int)`` tuple.
+ """
+
+ if value < 0 or value > 4095:
+ raise ValueError('rcode must be >= 0 and <= 4095')
+ v = value & 0xf
+ ev = long(value & 0xff0) << 20
+ return (v, ev)
+
+
+def to_text(value):
+ """Convert rcode into text.
+
+ *value*, and ``int``, the rcode.
+
+ Raises ``ValueError`` if rcode is < 0 or > 4095.
+
+ Returns a ``text``.
+ """
+
+ if value < 0 or value > 4095:
+ raise ValueError('rcode must be >= 0 and <= 4095')
+ text = _by_value.get(value)
+ if text is None:
+ text = str(value)
+ return text
diff --git a/openpype/vendor/python/python_2/dns/rdata.py b/openpype/vendor/python/python_2/dns/rdata.py
new file mode 100644
index 0000000000..ea1971dc5f
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdata.py
@@ -0,0 +1,456 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata."""
+
+from io import BytesIO
+import base64
+import binascii
+
+import dns.exception
+import dns.name
+import dns.rdataclass
+import dns.rdatatype
+import dns.tokenizer
+import dns.wiredata
+from ._compat import xrange, string_types, text_type
+
+try:
+ import threading as _threading
+except ImportError:
+ import dummy_threading as _threading
+
+_hex_chunksize = 32
+
+
+def _hexify(data, chunksize=_hex_chunksize):
+ """Convert a binary string into its hex encoding, broken up into chunks
+ of chunksize characters separated by a space.
+ """
+
+ line = binascii.hexlify(data)
+ return b' '.join([line[i:i + chunksize]
+ for i
+ in range(0, len(line), chunksize)]).decode()
+
+_base64_chunksize = 32
+
+
+def _base64ify(data, chunksize=_base64_chunksize):
+ """Convert a binary string into its base64 encoding, broken up into chunks
+ of chunksize characters separated by a space.
+ """
+
+ line = base64.b64encode(data)
+ return b' '.join([line[i:i + chunksize]
+ for i
+ in range(0, len(line), chunksize)]).decode()
+
+__escaped = bytearray(b'"\\')
+
+def _escapify(qstring):
+ """Escape the characters in a quoted string which need it."""
+
+ if isinstance(qstring, text_type):
+ qstring = qstring.encode()
+ if not isinstance(qstring, bytearray):
+ qstring = bytearray(qstring)
+
+ text = ''
+ for c in qstring:
+ if c in __escaped:
+ text += '\\' + chr(c)
+ elif c >= 0x20 and c < 0x7F:
+ text += chr(c)
+ else:
+ text += '\\%03d' % c
+ return text
+
+
+def _truncate_bitmap(what):
+ """Determine the index of greatest byte that isn't all zeros, and
+ return the bitmap that contains all the bytes less than that index.
+ """
+
+ for i in xrange(len(what) - 1, -1, -1):
+ if what[i] != 0:
+ return what[0: i + 1]
+ return what[0:1]
+
+
+class Rdata(object):
+ """Base class for all DNS rdata types."""
+
+ __slots__ = ['rdclass', 'rdtype']
+
+ def __init__(self, rdclass, rdtype):
+ """Initialize an rdata.
+
+ *rdclass*, an ``int`` is the rdataclass of the Rdata.
+ *rdtype*, an ``int`` is the rdatatype of the Rdata.
+ """
+
+ self.rdclass = rdclass
+ self.rdtype = rdtype
+
+ def covers(self):
+ """Return the type a Rdata covers.
+
+ DNS SIG/RRSIG rdatas apply to a specific type; this type is
+ returned by the covers() function. If the rdata type is not
+ SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
+ creating rdatasets, allowing the rdataset to contain only RRSIGs
+ of a particular type, e.g. RRSIG(NS).
+
+ Returns an ``int``.
+ """
+
+ return dns.rdatatype.NONE
+
+ def extended_rdatatype(self):
+ """Return a 32-bit type value, the least significant 16 bits of
+ which are the ordinary DNS type, and the upper 16 bits of which are
+ the "covered" type, if any.
+
+ Returns an ``int``.
+ """
+
+ return self.covers() << 16 | self.rdtype
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ """Convert an rdata to text format.
+
+ Returns a ``text``.
+ """
+
+ raise NotImplementedError
+
+ def to_wire(self, file, compress=None, origin=None):
+ """Convert an rdata to wire format.
+
+ Returns a ``binary``.
+ """
+
+ raise NotImplementedError
+
+ def to_digestable(self, origin=None):
+ """Convert rdata to a format suitable for digesting in hashes. This
+ is also the DNSSEC canonical form.
+
+ Returns a ``binary``.
+ """
+
+ f = BytesIO()
+ self.to_wire(f, None, origin)
+ return f.getvalue()
+
+ def validate(self):
+ """Check that the current contents of the rdata's fields are
+ valid.
+
+ If you change an rdata by assigning to its fields,
+ it is a good idea to call validate() when you are done making
+ changes.
+
+ Raises various exceptions if there are problems.
+
+ Returns ``None``.
+ """
+
+ dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
+
+ def __repr__(self):
+ covers = self.covers()
+ if covers == dns.rdatatype.NONE:
+ ctext = ''
+ else:
+ ctext = '(' + dns.rdatatype.to_text(covers) + ')'
+ return ''
+
+ def __str__(self):
+ return self.to_text()
+
+ def _cmp(self, other):
+ """Compare an rdata with another rdata of the same rdtype and
+ rdclass.
+
+ Return < 0 if self < other in the DNSSEC ordering, 0 if self
+ == other, and > 0 if self > other.
+
+ """
+ our = self.to_digestable(dns.name.root)
+ their = other.to_digestable(dns.name.root)
+ if our == their:
+ return 0
+ elif our > their:
+ return 1
+ else:
+ return -1
+
+ def __eq__(self, other):
+ if not isinstance(other, Rdata):
+ return False
+ if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return False
+ return self._cmp(other) == 0
+
+ def __ne__(self, other):
+ if not isinstance(other, Rdata):
+ return True
+ if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return True
+ return self._cmp(other) != 0
+
+ def __lt__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+
+ return NotImplemented
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) <= 0
+
+ def __ge__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) >= 0
+
+ def __gt__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) > 0
+
+ def __hash__(self):
+ return hash(self.to_digestable(dns.name.root))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ raise NotImplementedError
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ raise NotImplementedError
+
+ def choose_relativity(self, origin=None, relativize=True):
+ """Convert any domain names in the rdata to the specified
+ relativization.
+ """
+
+class GenericRdata(Rdata):
+
+ """Generic Rdata Class
+
+ This class is used for rdata types for which we have no better
+ implementation. It implements the DNS "unknown RRs" scheme.
+ """
+
+ __slots__ = ['data']
+
+ def __init__(self, rdclass, rdtype, data):
+ super(GenericRdata, self).__init__(rdclass, rdtype)
+ self.data = data
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return r'\# %d ' % len(self.data) + _hexify(self.data)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ token = tok.get()
+ if not token.is_identifier() or token.value != r'\#':
+ raise dns.exception.SyntaxError(
+ r'generic rdata does not start with \#')
+ length = tok.get_int()
+ chunks = []
+ while 1:
+ token = tok.get()
+ if token.is_eol_or_eof():
+ break
+ chunks.append(token.value.encode())
+ hex = b''.join(chunks)
+ data = binascii.unhexlify(hex)
+ if len(data) != length:
+ raise dns.exception.SyntaxError(
+ 'generic rdata hex data has wrong length')
+ return cls(rdclass, rdtype, data)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(self.data)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ return cls(rdclass, rdtype, wire[current: current + rdlen])
+
+_rdata_modules = {}
+_module_prefix = 'dns.rdtypes'
+_import_lock = _threading.Lock()
+
+def get_rdata_class(rdclass, rdtype):
+
+ def import_module(name):
+ with _import_lock:
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+ mod = _rdata_modules.get((rdclass, rdtype))
+ rdclass_text = dns.rdataclass.to_text(rdclass)
+ rdtype_text = dns.rdatatype.to_text(rdtype)
+ rdtype_text = rdtype_text.replace('-', '_')
+ if not mod:
+ mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
+ if not mod:
+ try:
+ mod = import_module('.'.join([_module_prefix,
+ rdclass_text, rdtype_text]))
+ _rdata_modules[(rdclass, rdtype)] = mod
+ except ImportError:
+ try:
+ mod = import_module('.'.join([_module_prefix,
+ 'ANY', rdtype_text]))
+ _rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
+ except ImportError:
+ mod = None
+ if mod:
+ cls = getattr(mod, rdtype_text)
+ else:
+ cls = GenericRdata
+ return cls
+
+
+def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
+ """Build an rdata object from text format.
+
+ This function attempts to dynamically load a class which
+ implements the specified rdata class and type. If there is no
+ class-and-type-specific implementation, the GenericRdata class
+ is used.
+
+ Once a class is chosen, its from_text() class method is called
+ with the parameters to this function.
+
+ If *tok* is a ``text``, then a tokenizer is created and the string
+ is used as its input.
+
+ *rdclass*, an ``int``, the rdataclass.
+
+ *rdtype*, an ``int``, the rdatatype.
+
+ *tok*, a ``dns.tokenizer.Tokenizer`` or a ``text``.
+
+ *origin*, a ``dns.name.Name`` (or ``None``), the
+ origin to use for relative names.
+
+ *relativize*, a ``bool``. If true, name will be relativized to
+ the specified origin.
+
+ Returns an instance of the chosen Rdata subclass.
+ """
+
+ if isinstance(tok, string_types):
+ tok = dns.tokenizer.Tokenizer(tok)
+ cls = get_rdata_class(rdclass, rdtype)
+ if cls != GenericRdata:
+ # peek at first token
+ token = tok.get()
+ tok.unget(token)
+ if token.is_identifier() and \
+ token.value == r'\#':
+ #
+ # Known type using the generic syntax. Extract the
+ # wire form from the generic syntax, and then run
+ # from_wire on it.
+ #
+ rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
+ relativize)
+ return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
+ origin)
+ return cls.from_text(rdclass, rdtype, tok, origin, relativize)
+
+
+def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
+ """Build an rdata object from wire format
+
+ This function attempts to dynamically load a class which
+ implements the specified rdata class and type. If there is no
+ class-and-type-specific implementation, the GenericRdata class
+ is used.
+
+ Once a class is chosen, its from_wire() class method is called
+ with the parameters to this function.
+
+ *rdclass*, an ``int``, the rdataclass.
+
+ *rdtype*, an ``int``, the rdatatype.
+
+ *wire*, a ``binary``, the wire-format message.
+
+ *current*, an ``int``, the offset in wire of the beginning of
+ the rdata.
+
+ *rdlen*, an ``int``, the length of the wire-format rdata
+
+ *origin*, a ``dns.name.Name`` (or ``None``). If not ``None``,
+ then names will be relativized to this origin.
+
+ Returns an instance of the chosen Rdata subclass.
+ """
+
+ wire = dns.wiredata.maybe_wrap(wire)
+ cls = get_rdata_class(rdclass, rdtype)
+ return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
+
+
+class RdatatypeExists(dns.exception.DNSException):
+ """DNS rdatatype already exists."""
+ supp_kwargs = {'rdclass', 'rdtype'}
+ fmt = "The rdata type with class {rdclass} and rdtype {rdtype} " + \
+ "already exists."
+
+
+def register_type(implementation, rdtype, rdtype_text, is_singleton=False,
+ rdclass=dns.rdataclass.IN):
+ """Dynamically register a module to handle an rdatatype.
+
+ *implementation*, a module implementing the type in the usual dnspython
+ way.
+
+ *rdtype*, an ``int``, the rdatatype to register.
+
+ *rdtype_text*, a ``text``, the textual form of the rdatatype.
+
+ *is_singleton*, a ``bool``, indicating if the type is a singleton (i.e.
+ RRsets of the type can have only one member.)
+
+ *rdclass*, the rdataclass of the type, or ``dns.rdataclass.ANY`` if
+ it applies to all classes.
+ """
+
+ existing_cls = get_rdata_class(rdclass, rdtype)
+ if existing_cls != GenericRdata:
+ raise RdatatypeExists(rdclass=rdclass, rdtype=rdtype)
+ _rdata_modules[(rdclass, rdtype)] = implementation
+ dns.rdatatype.register_type(rdtype, rdtype_text, is_singleton)
diff --git a/openpype/vendor/python/python_2/dns/rdataclass.py b/openpype/vendor/python/python_2/dns/rdataclass.py
new file mode 100644
index 0000000000..b88aa85b7b
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdataclass.py
@@ -0,0 +1,122 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Classes."""
+
+import re
+
+import dns.exception
+
+RESERVED0 = 0
+IN = 1
+CH = 3
+HS = 4
+NONE = 254
+ANY = 255
+
+_by_text = {
+ 'RESERVED0': RESERVED0,
+ 'IN': IN,
+ 'CH': CH,
+ 'HS': HS,
+ 'NONE': NONE,
+ 'ANY': ANY
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = {y: x for x, y in _by_text.items()}
+
+# Now that we've built the inverse map, we can add class aliases to
+# the _by_text mapping.
+
+_by_text.update({
+ 'INTERNET': IN,
+ 'CHAOS': CH,
+ 'HESIOD': HS
+})
+
+_metaclasses = {
+ NONE: True,
+ ANY: True
+}
+
+_unknown_class_pattern = re.compile('CLASS([0-9]+)$', re.I)
+
+
+class UnknownRdataclass(dns.exception.DNSException):
+ """A DNS class is unknown."""
+
+
+def from_text(text):
+ """Convert text into a DNS rdata class value.
+
+ The input text can be a defined DNS RR class mnemonic or
+ instance of the DNS generic class syntax.
+
+ For example, "IN" and "CLASS1" will both result in a value of 1.
+
+ Raises ``dns.rdatatype.UnknownRdataclass`` if the class is unknown.
+
+ Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535.
+
+ Returns an ``int``.
+ """
+
+ value = _by_text.get(text.upper())
+ if value is None:
+ match = _unknown_class_pattern.match(text)
+ if match is None:
+ raise UnknownRdataclass
+ value = int(match.group(1))
+ if value < 0 or value > 65535:
+ raise ValueError("class must be between >= 0 and <= 65535")
+ return value
+
+
+def to_text(value):
+ """Convert a DNS rdata type value to text.
+
+ If the value has a known mnemonic, it will be used, otherwise the
+ DNS generic class syntax will be used.
+
+ Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535.
+
+ Returns a ``str``.
+ """
+
+ if value < 0 or value > 65535:
+ raise ValueError("class must be between >= 0 and <= 65535")
+ text = _by_value.get(value)
+ if text is None:
+ text = 'CLASS' + repr(value)
+ return text
+
+
+def is_metaclass(rdclass):
+ """True if the specified class is a metaclass.
+
+ The currently defined metaclasses are ANY and NONE.
+
+ *rdclass* is an ``int``.
+ """
+
+ if rdclass in _metaclasses:
+ return True
+ return False
diff --git a/openpype/vendor/python/python_2/dns/rdataset.py b/openpype/vendor/python/python_2/dns/rdataset.py
new file mode 100644
index 0000000000..f1afe24198
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdataset.py
@@ -0,0 +1,347 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
+
+import random
+from io import StringIO
+import struct
+
+import dns.exception
+import dns.rdatatype
+import dns.rdataclass
+import dns.rdata
+import dns.set
+from ._compat import string_types
+
+# define SimpleSet here for backwards compatibility
+SimpleSet = dns.set.Set
+
+
+class DifferingCovers(dns.exception.DNSException):
+ """An attempt was made to add a DNS SIG/RRSIG whose covered type
+ is not the same as that of the other rdatas in the rdataset."""
+
+
+class IncompatibleTypes(dns.exception.DNSException):
+ """An attempt was made to add DNS RR data of an incompatible type."""
+
+
+class Rdataset(dns.set.Set):
+
+ """A DNS rdataset."""
+
+ __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
+
+ def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE, ttl=0):
+ """Create a new rdataset of the specified class and type.
+
+ *rdclass*, an ``int``, the rdataclass.
+
+ *rdtype*, an ``int``, the rdatatype.
+
+ *covers*, an ``int``, the covered rdatatype.
+
+ *ttl*, an ``int``, the TTL.
+ """
+
+ super(Rdataset, self).__init__()
+ self.rdclass = rdclass
+ self.rdtype = rdtype
+ self.covers = covers
+ self.ttl = ttl
+
+ def _clone(self):
+ obj = super(Rdataset, self)._clone()
+ obj.rdclass = self.rdclass
+ obj.rdtype = self.rdtype
+ obj.covers = self.covers
+ obj.ttl = self.ttl
+ return obj
+
+ def update_ttl(self, ttl):
+ """Perform TTL minimization.
+
+ Set the TTL of the rdataset to be the lesser of the set's current
+ TTL or the specified TTL. If the set contains no rdatas, set the TTL
+ to the specified TTL.
+
+ *ttl*, an ``int``.
+ """
+
+ if len(self) == 0:
+ self.ttl = ttl
+ elif ttl < self.ttl:
+ self.ttl = ttl
+
+ def add(self, rd, ttl=None):
+ """Add the specified rdata to the rdataset.
+
+ If the optional *ttl* parameter is supplied, then
+ ``self.update_ttl(ttl)`` will be called prior to adding the rdata.
+
+ *rd*, a ``dns.rdata.Rdata``, the rdata
+
+ *ttl*, an ``int``, the TTL.
+
+ Raises ``dns.rdataset.IncompatibleTypes`` if the type and class
+ do not match the type and class of the rdataset.
+
+ Raises ``dns.rdataset.DifferingCovers`` if the type is a signature
+ type and the covered type does not match that of the rdataset.
+ """
+
+ #
+ # If we're adding a signature, do some special handling to
+ # check that the signature covers the same type as the
+ # other rdatas in this rdataset. If this is the first rdata
+ # in the set, initialize the covers field.
+ #
+ if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
+ raise IncompatibleTypes
+ if ttl is not None:
+ self.update_ttl(ttl)
+ if self.rdtype == dns.rdatatype.RRSIG or \
+ self.rdtype == dns.rdatatype.SIG:
+ covers = rd.covers()
+ if len(self) == 0 and self.covers == dns.rdatatype.NONE:
+ self.covers = covers
+ elif self.covers != covers:
+ raise DifferingCovers
+ if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
+ self.clear()
+ super(Rdataset, self).add(rd)
+
+ def union_update(self, other):
+ self.update_ttl(other.ttl)
+ super(Rdataset, self).union_update(other)
+
+ def intersection_update(self, other):
+ self.update_ttl(other.ttl)
+ super(Rdataset, self).intersection_update(other)
+
+ def update(self, other):
+ """Add all rdatas in other to self.
+
+ *other*, a ``dns.rdataset.Rdataset``, the rdataset from which
+ to update.
+ """
+
+ self.update_ttl(other.ttl)
+ super(Rdataset, self).update(other)
+
+ def __repr__(self):
+ if self.covers == 0:
+ ctext = ''
+ else:
+ ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
+ return ''
+
+ def __str__(self):
+ return self.to_text()
+
+ def __eq__(self, other):
+ if not isinstance(other, Rdataset):
+ return False
+ if self.rdclass != other.rdclass or \
+ self.rdtype != other.rdtype or \
+ self.covers != other.covers:
+ return False
+ return super(Rdataset, self).__eq__(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_text(self, name=None, origin=None, relativize=True,
+ override_rdclass=None, **kw):
+ """Convert the rdataset into DNS master file format.
+
+ See ``dns.name.Name.choose_relativity`` for more information
+ on how *origin* and *relativize* determine the way names
+ are emitted.
+
+ Any additional keyword arguments are passed on to the rdata
+ ``to_text()`` method.
+
+ *name*, a ``dns.name.Name``. If name is not ``None``, emit RRs with
+ *name* as the owner name.
+
+ *origin*, a ``dns.name.Name`` or ``None``, the origin for relative
+ names.
+
+ *relativize*, a ``bool``. If ``True``, names will be relativized
+ to *origin*.
+ """
+
+ if name is not None:
+ name = name.choose_relativity(origin, relativize)
+ ntext = str(name)
+ pad = ' '
+ else:
+ ntext = ''
+ pad = ''
+ s = StringIO()
+ if override_rdclass is not None:
+ rdclass = override_rdclass
+ else:
+ rdclass = self.rdclass
+ if len(self) == 0:
+ #
+ # Empty rdatasets are used for the question section, and in
+ # some dynamic updates, so we don't need to print out the TTL
+ # (which is meaningless anyway).
+ #
+ s.write(u'{}{}{} {}\n'.format(ntext, pad,
+ dns.rdataclass.to_text(rdclass),
+ dns.rdatatype.to_text(self.rdtype)))
+ else:
+ for rd in self:
+ s.write(u'%s%s%d %s %s %s\n' %
+ (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
+ dns.rdatatype.to_text(self.rdtype),
+ rd.to_text(origin=origin, relativize=relativize,
+ **kw)))
+ #
+ # We strip off the final \n for the caller's convenience in printing
+ #
+ return s.getvalue()[:-1]
+
+ def to_wire(self, name, file, compress=None, origin=None,
+ override_rdclass=None, want_shuffle=True):
+ """Convert the rdataset to wire format.
+
+ *name*, a ``dns.name.Name`` is the owner name to use.
+
+ *file* is the file where the name is emitted (typically a
+ BytesIO file).
+
+ *compress*, a ``dict``, is the compression table to use. If
+ ``None`` (the default), names will not be compressed.
+
+ *origin* is a ``dns.name.Name`` or ``None``. If the name is
+ relative and origin is not ``None``, then *origin* will be appended
+ to it.
+
+ *override_rdclass*, an ``int``, is used as the class instead of the
+ class of the rdataset. This is useful when rendering rdatasets
+ associated with dynamic updates.
+
+ *want_shuffle*, a ``bool``. If ``True``, then the order of the
+ Rdatas within the Rdataset will be shuffled before rendering.
+
+ Returns an ``int``, the number of records emitted.
+ """
+
+ if override_rdclass is not None:
+ rdclass = override_rdclass
+ want_shuffle = False
+ else:
+ rdclass = self.rdclass
+ file.seek(0, 2)
+ if len(self) == 0:
+ name.to_wire(file, compress, origin)
+ stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
+ file.write(stuff)
+ return 1
+ else:
+ if want_shuffle:
+ l = list(self)
+ random.shuffle(l)
+ else:
+ l = self
+ for rd in l:
+ name.to_wire(file, compress, origin)
+ stuff = struct.pack("!HHIH", self.rdtype, rdclass,
+ self.ttl, 0)
+ file.write(stuff)
+ start = file.tell()
+ rd.to_wire(file, compress, origin)
+ end = file.tell()
+ assert end - start < 65536
+ file.seek(start - 2)
+ stuff = struct.pack("!H", end - start)
+ file.write(stuff)
+ file.seek(0, 2)
+ return len(self)
+
+ def match(self, rdclass, rdtype, covers):
+ """Returns ``True`` if this rdataset matches the specified class,
+ type, and covers.
+ """
+ if self.rdclass == rdclass and \
+ self.rdtype == rdtype and \
+ self.covers == covers:
+ return True
+ return False
+
+
+def from_text_list(rdclass, rdtype, ttl, text_rdatas):
+ """Create an rdataset with the specified class, type, and TTL, and with
+ the specified list of rdatas in text format.
+
+ Returns a ``dns.rdataset.Rdataset`` object.
+ """
+
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ r = Rdataset(rdclass, rdtype)
+ r.update_ttl(ttl)
+ for t in text_rdatas:
+ rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
+ r.add(rd)
+ return r
+
+
+def from_text(rdclass, rdtype, ttl, *text_rdatas):
+ """Create an rdataset with the specified class, type, and TTL, and with
+ the specified rdatas in text format.
+
+ Returns a ``dns.rdataset.Rdataset`` object.
+ """
+
+ return from_text_list(rdclass, rdtype, ttl, text_rdatas)
+
+
+def from_rdata_list(ttl, rdatas):
+ """Create an rdataset with the specified TTL, and with
+ the specified list of rdata objects.
+
+ Returns a ``dns.rdataset.Rdataset`` object.
+ """
+
+ if len(rdatas) == 0:
+ raise ValueError("rdata list must not be empty")
+ r = None
+ for rd in rdatas:
+ if r is None:
+ r = Rdataset(rd.rdclass, rd.rdtype)
+ r.update_ttl(ttl)
+ r.add(rd)
+ return r
+
+
+def from_rdata(ttl, *rdatas):
+ """Create an rdataset with the specified TTL, and with
+ the specified rdata objects.
+
+ Returns a ``dns.rdataset.Rdataset`` object.
+ """
+
+ return from_rdata_list(ttl, rdatas)
diff --git a/openpype/vendor/python/python_2/dns/rdatatype.py b/openpype/vendor/python/python_2/dns/rdatatype.py
new file mode 100644
index 0000000000..b247bc9c42
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdatatype.py
@@ -0,0 +1,287 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Types."""
+
+import re
+
+import dns.exception
+
+NONE = 0
+A = 1
+NS = 2
+MD = 3
+MF = 4
+CNAME = 5
+SOA = 6
+MB = 7
+MG = 8
+MR = 9
+NULL = 10
+WKS = 11
+PTR = 12
+HINFO = 13
+MINFO = 14
+MX = 15
+TXT = 16
+RP = 17
+AFSDB = 18
+X25 = 19
+ISDN = 20
+RT = 21
+NSAP = 22
+NSAP_PTR = 23
+SIG = 24
+KEY = 25
+PX = 26
+GPOS = 27
+AAAA = 28
+LOC = 29
+NXT = 30
+SRV = 33
+NAPTR = 35
+KX = 36
+CERT = 37
+A6 = 38
+DNAME = 39
+OPT = 41
+APL = 42
+DS = 43
+SSHFP = 44
+IPSECKEY = 45
+RRSIG = 46
+NSEC = 47
+DNSKEY = 48
+DHCID = 49
+NSEC3 = 50
+NSEC3PARAM = 51
+TLSA = 52
+HIP = 55
+CDS = 59
+CDNSKEY = 60
+OPENPGPKEY = 61
+CSYNC = 62
+SPF = 99
+UNSPEC = 103
+EUI48 = 108
+EUI64 = 109
+TKEY = 249
+TSIG = 250
+IXFR = 251
+AXFR = 252
+MAILB = 253
+MAILA = 254
+ANY = 255
+URI = 256
+CAA = 257
+AVC = 258
+TA = 32768
+DLV = 32769
+
+_by_text = {
+ 'NONE': NONE,
+ 'A': A,
+ 'NS': NS,
+ 'MD': MD,
+ 'MF': MF,
+ 'CNAME': CNAME,
+ 'SOA': SOA,
+ 'MB': MB,
+ 'MG': MG,
+ 'MR': MR,
+ 'NULL': NULL,
+ 'WKS': WKS,
+ 'PTR': PTR,
+ 'HINFO': HINFO,
+ 'MINFO': MINFO,
+ 'MX': MX,
+ 'TXT': TXT,
+ 'RP': RP,
+ 'AFSDB': AFSDB,
+ 'X25': X25,
+ 'ISDN': ISDN,
+ 'RT': RT,
+ 'NSAP': NSAP,
+ 'NSAP-PTR': NSAP_PTR,
+ 'SIG': SIG,
+ 'KEY': KEY,
+ 'PX': PX,
+ 'GPOS': GPOS,
+ 'AAAA': AAAA,
+ 'LOC': LOC,
+ 'NXT': NXT,
+ 'SRV': SRV,
+ 'NAPTR': NAPTR,
+ 'KX': KX,
+ 'CERT': CERT,
+ 'A6': A6,
+ 'DNAME': DNAME,
+ 'OPT': OPT,
+ 'APL': APL,
+ 'DS': DS,
+ 'SSHFP': SSHFP,
+ 'IPSECKEY': IPSECKEY,
+ 'RRSIG': RRSIG,
+ 'NSEC': NSEC,
+ 'DNSKEY': DNSKEY,
+ 'DHCID': DHCID,
+ 'NSEC3': NSEC3,
+ 'NSEC3PARAM': NSEC3PARAM,
+ 'TLSA': TLSA,
+ 'HIP': HIP,
+ 'CDS': CDS,
+ 'CDNSKEY': CDNSKEY,
+ 'OPENPGPKEY': OPENPGPKEY,
+ 'CSYNC': CSYNC,
+ 'SPF': SPF,
+ 'UNSPEC': UNSPEC,
+ 'EUI48': EUI48,
+ 'EUI64': EUI64,
+ 'TKEY': TKEY,
+ 'TSIG': TSIG,
+ 'IXFR': IXFR,
+ 'AXFR': AXFR,
+ 'MAILB': MAILB,
+ 'MAILA': MAILA,
+ 'ANY': ANY,
+ 'URI': URI,
+ 'CAA': CAA,
+ 'AVC': AVC,
+ 'TA': TA,
+ 'DLV': DLV,
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = {y: x for x, y in _by_text.items()}
+
+_metatypes = {
+ OPT: True
+}
+
+_singletons = {
+ SOA: True,
+ NXT: True,
+ DNAME: True,
+ NSEC: True,
+ CNAME: True,
+}
+
+_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I)
+
+
+class UnknownRdatatype(dns.exception.DNSException):
+ """DNS resource record type is unknown."""
+
+
+def from_text(text):
+ """Convert text into a DNS rdata type value.
+
+ The input text can be a defined DNS RR type mnemonic or
+ instance of the DNS generic type syntax.
+
+ For example, "NS" and "TYPE2" will both result in a value of 2.
+
+ Raises ``dns.rdatatype.UnknownRdatatype`` if the type is unknown.
+
+ Raises ``ValueError`` if the rdata type value is not >= 0 and <= 65535.
+
+ Returns an ``int``.
+ """
+
+ value = _by_text.get(text.upper())
+ if value is None:
+ match = _unknown_type_pattern.match(text)
+ if match is None:
+ raise UnknownRdatatype
+ value = int(match.group(1))
+ if value < 0 or value > 65535:
+ raise ValueError("type must be between >= 0 and <= 65535")
+ return value
+
+
+def to_text(value):
+ """Convert a DNS rdata type value to text.
+
+ If the value has a known mnemonic, it will be used, otherwise the
+ DNS generic type syntax will be used.
+
+ Raises ``ValueError`` if the rdata type value is not >= 0 and <= 65535.
+
+ Returns a ``str``.
+ """
+
+ if value < 0 or value > 65535:
+ raise ValueError("type must be between >= 0 and <= 65535")
+ text = _by_value.get(value)
+ if text is None:
+ text = 'TYPE' + repr(value)
+ return text
+
+
+def is_metatype(rdtype):
+ """True if the specified type is a metatype.
+
+ *rdtype* is an ``int``.
+
+ The currently defined metatypes are TKEY, TSIG, IXFR, AXFR, MAILA,
+ MAILB, ANY, and OPT.
+
+ Returns a ``bool``.
+ """
+
+ if rdtype >= TKEY and rdtype <= ANY or rdtype in _metatypes:
+ return True
+ return False
+
+
+def is_singleton(rdtype):
+ """Is the specified type a singleton type?
+
+ Singleton types can only have a single rdata in an rdataset, or a single
+ RR in an RRset.
+
+ The currently defined singleton types are CNAME, DNAME, NSEC, NXT, and
+ SOA.
+
+ *rdtype* is an ``int``.
+
+ Returns a ``bool``.
+ """
+
+ if rdtype in _singletons:
+ return True
+ return False
+
+
+def register_type(rdtype, rdtype_text, is_singleton=False): # pylint: disable=redefined-outer-name
+ """Dynamically register an rdatatype.
+
+ *rdtype*, an ``int``, the rdatatype to register.
+
+ *rdtype_text*, a ``text``, the textual form of the rdatatype.
+
+ *is_singleton*, a ``bool``, indicating if the type is a singleton (i.e.
+ RRsets of the type can have only one member.)
+ """
+
+ _by_text[rdtype_text] = rdtype
+ _by_value[rdtype] = rdtype_text
+ if is_singleton:
+ _singletons[rdtype] = True
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/AFSDB.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/AFSDB.py
new file mode 100644
index 0000000000..c6a700cf56
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/AFSDB.py
@@ -0,0 +1,55 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+
+class AFSDB(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+
+ """AFSDB record
+
+ @ivar subtype: the subtype value
+ @type subtype: int
+ @ivar hostname: the hostname name
+ @type hostname: dns.name.Name object"""
+
+ # Use the property mechanism to make "subtype" an alias for the
+ # "preference" attribute, and "hostname" an alias for the "exchange"
+ # attribute.
+ #
+ # This lets us inherit the UncompressedMX implementation but lets
+ # the caller use appropriate attribute names for the rdata type.
+ #
+ # We probably lose some performance vs. a cut-and-paste
+ # implementation, but this way we don't copy code, and that's
+ # good.
+
+ def get_subtype(self):
+ return self.preference
+
+ def set_subtype(self, subtype):
+ self.preference = subtype
+
+ subtype = property(get_subtype, set_subtype)
+
+ def get_hostname(self):
+ return self.exchange
+
+ def set_hostname(self, hostname):
+ self.exchange = hostname
+
+ hostname = property(get_hostname, set_hostname)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/AVC.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/AVC.py
new file mode 100644
index 0000000000..7f340b39d2
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/AVC.py
@@ -0,0 +1,25 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2016 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.txtbase
+
+
+class AVC(dns.rdtypes.txtbase.TXTBase):
+
+ """AVC record
+
+ @see: U{http://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template}"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CAA.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CAA.py
new file mode 100644
index 0000000000..0acf201ab1
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CAA.py
@@ -0,0 +1,75 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+
+class CAA(dns.rdata.Rdata):
+
+ """CAA (Certification Authority Authorization) record
+
+ @ivar flags: the flags
+ @type flags: int
+ @ivar tag: the tag
+ @type tag: string
+ @ivar value: the value
+ @type value: string
+ @see: RFC 6844"""
+
+ __slots__ = ['flags', 'tag', 'value']
+
+ def __init__(self, rdclass, rdtype, flags, tag, value):
+ super(CAA, self).__init__(rdclass, rdtype)
+ self.flags = flags
+ self.tag = tag
+ self.value = value
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%u %s "%s"' % (self.flags,
+ dns.rdata._escapify(self.tag),
+ dns.rdata._escapify(self.value))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ flags = tok.get_uint8()
+ tag = tok.get_string().encode()
+ if len(tag) > 255:
+ raise dns.exception.SyntaxError("tag too long")
+ if not tag.isalnum():
+ raise dns.exception.SyntaxError("tag is not alphanumeric")
+ value = tok.get_string().encode()
+ return cls(rdclass, rdtype, flags, tag, value)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(struct.pack('!B', self.flags))
+ l = len(self.tag)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.tag)
+ file.write(self.value)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (flags, l) = struct.unpack('!BB', wire[current: current + 2])
+ current += 2
+ tag = wire[current: current + l]
+ value = wire[current + l:current + rdlen - 2]
+ return cls(rdclass, rdtype, flags, tag, value)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CDNSKEY.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CDNSKEY.py
new file mode 100644
index 0000000000..653ae1be16
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CDNSKEY.py
@@ -0,0 +1,27 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dnskeybase
+from dns.rdtypes.dnskeybase import flags_to_text_set, flags_from_text_set
+
+
+__all__ = ['flags_to_text_set', 'flags_from_text_set']
+
+
+class CDNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase):
+
+ """CDNSKEY record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CDS.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CDS.py
new file mode 100644
index 0000000000..a63041dd79
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CDS.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+
+class CDS(dns.rdtypes.dsbase.DSBase):
+
+ """CDS record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CERT.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CERT.py
new file mode 100644
index 0000000000..eea27b52c3
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CERT.py
@@ -0,0 +1,123 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import base64
+
+import dns.exception
+import dns.dnssec
+import dns.rdata
+import dns.tokenizer
+
+_ctype_by_value = {
+ 1: 'PKIX',
+ 2: 'SPKI',
+ 3: 'PGP',
+ 253: 'URI',
+ 254: 'OID',
+}
+
+_ctype_by_name = {
+ 'PKIX': 1,
+ 'SPKI': 2,
+ 'PGP': 3,
+ 'URI': 253,
+ 'OID': 254,
+}
+
+
+def _ctype_from_text(what):
+ v = _ctype_by_name.get(what)
+ if v is not None:
+ return v
+ return int(what)
+
+
+def _ctype_to_text(what):
+ v = _ctype_by_value.get(what)
+ if v is not None:
+ return v
+ return str(what)
+
+
+class CERT(dns.rdata.Rdata):
+
+ """CERT record
+
+ @ivar certificate_type: certificate type
+ @type certificate_type: int
+ @ivar key_tag: key tag
+ @type key_tag: int
+ @ivar algorithm: algorithm
+ @type algorithm: int
+ @ivar certificate: the certificate or CRL
+ @type certificate: string
+ @see: RFC 2538"""
+
+ __slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
+
+ def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
+ certificate):
+ super(CERT, self).__init__(rdclass, rdtype)
+ self.certificate_type = certificate_type
+ self.key_tag = key_tag
+ self.algorithm = algorithm
+ self.certificate = certificate
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ certificate_type = _ctype_to_text(self.certificate_type)
+ return "%s %d %s %s" % (certificate_type, self.key_tag,
+ dns.dnssec.algorithm_to_text(self.algorithm),
+ dns.rdata._base64ify(self.certificate))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ certificate_type = _ctype_from_text(tok.get_string())
+ key_tag = tok.get_uint16()
+ algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+ if algorithm < 0 or algorithm > 255:
+ raise dns.exception.SyntaxError("bad algorithm type")
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ certificate = base64.b64decode(b64)
+ return cls(rdclass, rdtype, certificate_type, key_tag,
+ algorithm, certificate)
+
+ def to_wire(self, file, compress=None, origin=None):
+ prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
+ self.algorithm)
+ file.write(prefix)
+ file.write(self.certificate)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ prefix = wire[current: current + 5].unwrap()
+ current += 5
+ rdlen -= 5
+ if rdlen < 0:
+ raise dns.exception.FormError
+ (certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
+ certificate = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
+ certificate)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CNAME.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CNAME.py
new file mode 100644
index 0000000000..11d42aa7fd
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CNAME.py
@@ -0,0 +1,27 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class CNAME(dns.rdtypes.nsbase.NSBase):
+
+ """CNAME record
+
+ Note: although CNAME is officially a singleton type, dnspython allows
+ non-singleton CNAME rdatasets because such sets have been commonly
+ used by BIND and other nameservers for load balancing."""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/CSYNC.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CSYNC.py
new file mode 100644
index 0000000000..06292fb28c
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/CSYNC.py
@@ -0,0 +1,126 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011, 2016 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+import dns.name
+from dns._compat import xrange
+
+class CSYNC(dns.rdata.Rdata):
+
+ """CSYNC record
+
+ @ivar serial: the SOA serial number
+ @type serial: int
+ @ivar flags: the CSYNC flags
+ @type flags: int
+ @ivar windows: the windowed bitmap list
+ @type windows: list of (window number, string) tuples"""
+
+ __slots__ = ['serial', 'flags', 'windows']
+
+ def __init__(self, rdclass, rdtype, serial, flags, windows):
+ super(CSYNC, self).__init__(rdclass, rdtype)
+ self.serial = serial
+ self.flags = flags
+ self.windows = windows
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ text = ''
+ for (window, bitmap) in self.windows:
+ bits = []
+ for i in xrange(0, len(bitmap)):
+ byte = bitmap[i]
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(dns.rdatatype.to_text(window * 256 +
+ i * 8 + j))
+ text += (' ' + ' '.join(bits))
+ return '%d %d%s' % (self.serial, self.flags, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ serial = tok.get_uint32()
+ flags = tok.get_uint16()
+ rdtypes = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ nrdtype = dns.rdatatype.from_text(token.value)
+ if nrdtype == 0:
+ raise dns.exception.SyntaxError("CSYNC with bit 0")
+ if nrdtype > 65535:
+ raise dns.exception.SyntaxError("CSYNC with bit > 65535")
+ rdtypes.append(nrdtype)
+ rdtypes.sort()
+ window = 0
+ octets = 0
+ prior_rdtype = 0
+ bitmap = bytearray(b'\0' * 32)
+ windows = []
+ for nrdtype in rdtypes:
+ if nrdtype == prior_rdtype:
+ continue
+ prior_rdtype = nrdtype
+ new_window = nrdtype // 256
+ if new_window != window:
+ windows.append((window, bitmap[0:octets]))
+ bitmap = bytearray(b'\0' * 32)
+ window = new_window
+ offset = nrdtype % 256
+ byte = offset // 8
+ bit = offset % 8
+ octets = byte + 1
+ bitmap[byte] = bitmap[byte] | (0x80 >> bit)
+
+ windows.append((window, bitmap[0:octets]))
+ return cls(rdclass, rdtype, serial, flags, windows)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(struct.pack('!IH', self.serial, self.flags))
+ for (window, bitmap) in self.windows:
+ file.write(struct.pack('!BB', window, len(bitmap)))
+ file.write(bitmap)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ if rdlen < 6:
+ raise dns.exception.FormError("CSYNC too short")
+ (serial, flags) = struct.unpack("!IH", wire[current: current + 6])
+ current += 6
+ rdlen -= 6
+ windows = []
+ while rdlen > 0:
+ if rdlen < 3:
+ raise dns.exception.FormError("CSYNC too short")
+ window = wire[current]
+ octets = wire[current + 1]
+ if octets == 0 or octets > 32:
+ raise dns.exception.FormError("bad CSYNC octets")
+ current += 2
+ rdlen -= 2
+ if rdlen < octets:
+ raise dns.exception.FormError("bad CSYNC bitmap length")
+ bitmap = bytearray(wire[current: current + octets].unwrap())
+ current += octets
+ rdlen -= octets
+ windows.append((window, bitmap))
+ return cls(rdclass, rdtype, serial, flags, windows)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/DLV.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/DLV.py
new file mode 100644
index 0000000000..1635212583
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/DLV.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+
+class DLV(dns.rdtypes.dsbase.DSBase):
+
+ """DLV record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/DNAME.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/DNAME.py
new file mode 100644
index 0000000000..2499283cfa
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/DNAME.py
@@ -0,0 +1,26 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class DNAME(dns.rdtypes.nsbase.UncompressedNS):
+
+ """DNAME record"""
+
+ def to_digestable(self, origin=None):
+ return self.target.to_digestable(origin)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/DNSKEY.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/DNSKEY.py
new file mode 100644
index 0000000000..e36f7bc5b1
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/DNSKEY.py
@@ -0,0 +1,27 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dnskeybase
+from dns.rdtypes.dnskeybase import flags_to_text_set, flags_from_text_set
+
+
+__all__ = ['flags_to_text_set', 'flags_from_text_set']
+
+
+class DNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase):
+
+ """DNSKEY record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/DS.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/DS.py
new file mode 100644
index 0000000000..7d457b2281
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/DS.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+
+class DS(dns.rdtypes.dsbase.DSBase):
+
+ """DS record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI48.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI48.py
new file mode 100644
index 0000000000..aa260e205d
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI48.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2015 Red Hat, Inc.
+# Author: Petr Spacek
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.euibase
+
+
+class EUI48(dns.rdtypes.euibase.EUIBase):
+
+ """EUI48 record
+
+ @ivar fingerprint: 48-bit Extended Unique Identifier (EUI-48)
+ @type fingerprint: string
+ @see: rfc7043.txt"""
+
+ byte_len = 6 # 0123456789ab (in hex)
+ text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI64.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI64.py
new file mode 100644
index 0000000000..5eba350d8f
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/EUI64.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2015 Red Hat, Inc.
+# Author: Petr Spacek
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.euibase
+
+
+class EUI64(dns.rdtypes.euibase.EUIBase):
+
+ """EUI64 record
+
+ @ivar fingerprint: 64-bit Extended Unique Identifier (EUI-64)
+ @type fingerprint: string
+ @see: rfc7043.txt"""
+
+ byte_len = 8 # 0123456789abcdef (in hex)
+ text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab-cd-ef
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/GPOS.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/GPOS.py
new file mode 100644
index 0000000000..422822f03b
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/GPOS.py
@@ -0,0 +1,162 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import long, text_type
+
+
+def _validate_float_string(what):
+ if what[0] == b'-'[0] or what[0] == b'+'[0]:
+ what = what[1:]
+ if what.isdigit():
+ return
+ (left, right) = what.split(b'.')
+ if left == b'' and right == b'':
+ raise dns.exception.FormError
+ if not left == b'' and not left.decode().isdigit():
+ raise dns.exception.FormError
+ if not right == b'' and not right.decode().isdigit():
+ raise dns.exception.FormError
+
+
+def _sanitize(value):
+ if isinstance(value, text_type):
+ return value.encode()
+ return value
+
+
+class GPOS(dns.rdata.Rdata):
+
+ """GPOS record
+
+ @ivar latitude: latitude
+ @type latitude: string
+ @ivar longitude: longitude
+ @type longitude: string
+ @ivar altitude: altitude
+ @type altitude: string
+ @see: RFC 1712"""
+
+ __slots__ = ['latitude', 'longitude', 'altitude']
+
+ def __init__(self, rdclass, rdtype, latitude, longitude, altitude):
+ super(GPOS, self).__init__(rdclass, rdtype)
+ if isinstance(latitude, float) or \
+ isinstance(latitude, int) or \
+ isinstance(latitude, long):
+ latitude = str(latitude)
+ if isinstance(longitude, float) or \
+ isinstance(longitude, int) or \
+ isinstance(longitude, long):
+ longitude = str(longitude)
+ if isinstance(altitude, float) or \
+ isinstance(altitude, int) or \
+ isinstance(altitude, long):
+ altitude = str(altitude)
+ latitude = _sanitize(latitude)
+ longitude = _sanitize(longitude)
+ altitude = _sanitize(altitude)
+ _validate_float_string(latitude)
+ _validate_float_string(longitude)
+ _validate_float_string(altitude)
+ self.latitude = latitude
+ self.longitude = longitude
+ self.altitude = altitude
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '{} {} {}'.format(self.latitude.decode(),
+ self.longitude.decode(),
+ self.altitude.decode())
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ latitude = tok.get_string()
+ longitude = tok.get_string()
+ altitude = tok.get_string()
+ tok.get_eol()
+ return cls(rdclass, rdtype, latitude, longitude, altitude)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.latitude)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.latitude)
+ l = len(self.longitude)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.longitude)
+ l = len(self.altitude)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.altitude)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ latitude = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ longitude = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ altitude = wire[current: current + l].unwrap()
+ return cls(rdclass, rdtype, latitude, longitude, altitude)
+
+ def _get_float_latitude(self):
+ return float(self.latitude)
+
+ def _set_float_latitude(self, value):
+ self.latitude = str(value)
+
+ float_latitude = property(_get_float_latitude, _set_float_latitude,
+ doc="latitude as a floating point value")
+
+ def _get_float_longitude(self):
+ return float(self.longitude)
+
+ def _set_float_longitude(self, value):
+ self.longitude = str(value)
+
+ float_longitude = property(_get_float_longitude, _set_float_longitude,
+ doc="longitude as a floating point value")
+
+ def _get_float_altitude(self):
+ return float(self.altitude)
+
+ def _set_float_altitude(self, value):
+ self.altitude = str(value)
+
+ float_altitude = property(_get_float_altitude, _set_float_altitude,
+ doc="altitude as a floating point value")
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/HINFO.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/HINFO.py
new file mode 100644
index 0000000000..e4e0b34a49
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/HINFO.py
@@ -0,0 +1,86 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import text_type
+
+
+class HINFO(dns.rdata.Rdata):
+
+ """HINFO record
+
+ @ivar cpu: the CPU type
+ @type cpu: string
+ @ivar os: the OS type
+ @type os: string
+ @see: RFC 1035"""
+
+ __slots__ = ['cpu', 'os']
+
+ def __init__(self, rdclass, rdtype, cpu, os):
+ super(HINFO, self).__init__(rdclass, rdtype)
+ if isinstance(cpu, text_type):
+ self.cpu = cpu.encode()
+ else:
+ self.cpu = cpu
+ if isinstance(os, text_type):
+ self.os = os.encode()
+ else:
+ self.os = os
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '"{}" "{}"'.format(dns.rdata._escapify(self.cpu),
+ dns.rdata._escapify(self.os))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ cpu = tok.get_string()
+ os = tok.get_string()
+ tok.get_eol()
+ return cls(rdclass, rdtype, cpu, os)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.cpu)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.cpu)
+ l = len(self.os)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.os)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ cpu = wire[current:current + l].unwrap()
+ current += l
+ rdlen -= l
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ os = wire[current: current + l].unwrap()
+ return cls(rdclass, rdtype, cpu, os)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/HIP.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/HIP.py
new file mode 100644
index 0000000000..7c876b2d2f
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/HIP.py
@@ -0,0 +1,115 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2010, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import base64
+import binascii
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+
+class HIP(dns.rdata.Rdata):
+
+ """HIP record
+
+ @ivar hit: the host identity tag
+ @type hit: string
+ @ivar algorithm: the public key cryptographic algorithm
+ @type algorithm: int
+ @ivar key: the public key
+ @type key: string
+ @ivar servers: the rendezvous servers
+ @type servers: list of dns.name.Name objects
+ @see: RFC 5205"""
+
+ __slots__ = ['hit', 'algorithm', 'key', 'servers']
+
+ def __init__(self, rdclass, rdtype, hit, algorithm, key, servers):
+ super(HIP, self).__init__(rdclass, rdtype)
+ self.hit = hit
+ self.algorithm = algorithm
+ self.key = key
+ self.servers = servers
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ hit = binascii.hexlify(self.hit).decode()
+ key = base64.b64encode(self.key).replace(b'\n', b'').decode()
+ text = u''
+ servers = []
+ for server in self.servers:
+ servers.append(server.choose_relativity(origin, relativize))
+ if len(servers) > 0:
+ text += (u' ' + u' '.join((x.to_unicode() for x in servers)))
+ return u'%u %s %s%s' % (self.algorithm, hit, key, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ algorithm = tok.get_uint8()
+ hit = binascii.unhexlify(tok.get_string().encode())
+ if len(hit) > 255:
+ raise dns.exception.SyntaxError("HIT too long")
+ key = base64.b64decode(tok.get_string().encode())
+ servers = []
+ while 1:
+ token = tok.get()
+ if token.is_eol_or_eof():
+ break
+ server = dns.name.from_text(token.value, origin)
+ server.choose_relativity(origin, relativize)
+ servers.append(server)
+ return cls(rdclass, rdtype, hit, algorithm, key, servers)
+
+ def to_wire(self, file, compress=None, origin=None):
+ lh = len(self.hit)
+ lk = len(self.key)
+ file.write(struct.pack("!BBH", lh, self.algorithm, lk))
+ file.write(self.hit)
+ file.write(self.key)
+ for server in self.servers:
+ server.to_wire(file, None, origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (lh, algorithm, lk) = struct.unpack('!BBH',
+ wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ hit = wire[current: current + lh].unwrap()
+ current += lh
+ rdlen -= lh
+ key = wire[current: current + lk].unwrap()
+ current += lk
+ rdlen -= lk
+ servers = []
+ while rdlen > 0:
+ (server, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ current += cused
+ rdlen -= cused
+ if origin is not None:
+ server = server.relativize(origin)
+ servers.append(server)
+ return cls(rdclass, rdtype, hit, algorithm, key, servers)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ servers = []
+ for server in self.servers:
+ server = server.choose_relativity(origin, relativize)
+ servers.append(server)
+ self.servers = servers
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/ISDN.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/ISDN.py
new file mode 100644
index 0000000000..f5f5f8b9ea
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/ISDN.py
@@ -0,0 +1,99 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import text_type
+
+
+class ISDN(dns.rdata.Rdata):
+
+ """ISDN record
+
+ @ivar address: the ISDN address
+ @type address: string
+ @ivar subaddress: the ISDN subaddress (or '' if not present)
+ @type subaddress: string
+ @see: RFC 1183"""
+
+ __slots__ = ['address', 'subaddress']
+
+ def __init__(self, rdclass, rdtype, address, subaddress):
+ super(ISDN, self).__init__(rdclass, rdtype)
+ if isinstance(address, text_type):
+ self.address = address.encode()
+ else:
+ self.address = address
+ if isinstance(address, text_type):
+ self.subaddress = subaddress.encode()
+ else:
+ self.subaddress = subaddress
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.subaddress:
+ return '"{}" "{}"'.format(dns.rdata._escapify(self.address),
+ dns.rdata._escapify(self.subaddress))
+ else:
+ return '"%s"' % dns.rdata._escapify(self.address)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_string()
+ t = tok.get()
+ if not t.is_eol_or_eof():
+ tok.unget(t)
+ subaddress = tok.get_string()
+ else:
+ tok.unget(t)
+ subaddress = ''
+ tok.get_eol()
+ return cls(rdclass, rdtype, address, subaddress)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.address)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.address)
+ l = len(self.subaddress)
+ if l > 0:
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.subaddress)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ address = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ if rdlen > 0:
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ subaddress = wire[current: current + l].unwrap()
+ else:
+ subaddress = ''
+ return cls(rdclass, rdtype, address, subaddress)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/LOC.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/LOC.py
new file mode 100644
index 0000000000..da9bb03a95
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/LOC.py
@@ -0,0 +1,327 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import division
+
+import struct
+
+import dns.exception
+import dns.rdata
+from dns._compat import long, xrange, round_py2_compat
+
+
+_pows = tuple(long(10**i) for i in range(0, 11))
+
+# default values are in centimeters
+_default_size = 100.0
+_default_hprec = 1000000.0
+_default_vprec = 1000.0
+
+
+def _exponent_of(what, desc):
+ if what == 0:
+ return 0
+ exp = None
+ for i in xrange(len(_pows)):
+ if what // _pows[i] == long(0):
+ exp = i - 1
+ break
+ if exp is None or exp < 0:
+ raise dns.exception.SyntaxError("%s value out of bounds" % desc)
+ return exp
+
+
+def _float_to_tuple(what):
+ if what < 0:
+ sign = -1
+ what *= -1
+ else:
+ sign = 1
+ what = round_py2_compat(what * 3600000)
+ degrees = int(what // 3600000)
+ what -= degrees * 3600000
+ minutes = int(what // 60000)
+ what -= minutes * 60000
+ seconds = int(what // 1000)
+ what -= int(seconds * 1000)
+ what = int(what)
+ return (degrees, minutes, seconds, what, sign)
+
+
+def _tuple_to_float(what):
+ value = float(what[0])
+ value += float(what[1]) / 60.0
+ value += float(what[2]) / 3600.0
+ value += float(what[3]) / 3600000.0
+ return float(what[4]) * value
+
+
+def _encode_size(what, desc):
+ what = long(what)
+ exponent = _exponent_of(what, desc) & 0xF
+ base = what // pow(10, exponent) & 0xF
+ return base * 16 + exponent
+
+
+def _decode_size(what, desc):
+ exponent = what & 0x0F
+ if exponent > 9:
+ raise dns.exception.SyntaxError("bad %s exponent" % desc)
+ base = (what & 0xF0) >> 4
+ if base > 9:
+ raise dns.exception.SyntaxError("bad %s base" % desc)
+ return long(base) * pow(10, exponent)
+
+
+class LOC(dns.rdata.Rdata):
+
+ """LOC record
+
+ @ivar latitude: latitude
+ @type latitude: (int, int, int, int, sign) tuple specifying the degrees, minutes,
+ seconds, milliseconds, and sign of the coordinate.
+ @ivar longitude: longitude
+ @type longitude: (int, int, int, int, sign) tuple specifying the degrees,
+ minutes, seconds, milliseconds, and sign of the coordinate.
+ @ivar altitude: altitude
+ @type altitude: float
+ @ivar size: size of the sphere
+ @type size: float
+ @ivar horizontal_precision: horizontal precision
+ @type horizontal_precision: float
+ @ivar vertical_precision: vertical precision
+ @type vertical_precision: float
+ @see: RFC 1876"""
+
+ __slots__ = ['latitude', 'longitude', 'altitude', 'size',
+ 'horizontal_precision', 'vertical_precision']
+
+ def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
+ size=_default_size, hprec=_default_hprec,
+ vprec=_default_vprec):
+ """Initialize a LOC record instance.
+
+ The parameters I{latitude} and I{longitude} may be either a 4-tuple
+ of integers specifying (degrees, minutes, seconds, milliseconds),
+ or they may be floating point values specifying the number of
+ degrees. The other parameters are floats. Size, horizontal precision,
+ and vertical precision are specified in centimeters."""
+
+ super(LOC, self).__init__(rdclass, rdtype)
+ if isinstance(latitude, int) or isinstance(latitude, long):
+ latitude = float(latitude)
+ if isinstance(latitude, float):
+ latitude = _float_to_tuple(latitude)
+ self.latitude = latitude
+ if isinstance(longitude, int) or isinstance(longitude, long):
+ longitude = float(longitude)
+ if isinstance(longitude, float):
+ longitude = _float_to_tuple(longitude)
+ self.longitude = longitude
+ self.altitude = float(altitude)
+ self.size = float(size)
+ self.horizontal_precision = float(hprec)
+ self.vertical_precision = float(vprec)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.latitude[4] > 0:
+ lat_hemisphere = 'N'
+ else:
+ lat_hemisphere = 'S'
+ if self.longitude[4] > 0:
+ long_hemisphere = 'E'
+ else:
+ long_hemisphere = 'W'
+ text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
+ self.latitude[0], self.latitude[1],
+ self.latitude[2], self.latitude[3], lat_hemisphere,
+ self.longitude[0], self.longitude[1], self.longitude[2],
+ self.longitude[3], long_hemisphere,
+ self.altitude / 100.0
+ )
+
+ # do not print default values
+ if self.size != _default_size or \
+ self.horizontal_precision != _default_hprec or \
+ self.vertical_precision != _default_vprec:
+ text += " {:0.2f}m {:0.2f}m {:0.2f}m".format(
+ self.size / 100.0, self.horizontal_precision / 100.0,
+ self.vertical_precision / 100.0
+ )
+ return text
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ latitude = [0, 0, 0, 0, 1]
+ longitude = [0, 0, 0, 0, 1]
+ size = _default_size
+ hprec = _default_hprec
+ vprec = _default_vprec
+
+ latitude[0] = tok.get_int()
+ t = tok.get_string()
+ if t.isdigit():
+ latitude[1] = int(t)
+ t = tok.get_string()
+ if '.' in t:
+ (seconds, milliseconds) = t.split('.')
+ if not seconds.isdigit():
+ raise dns.exception.SyntaxError(
+ 'bad latitude seconds value')
+ latitude[2] = int(seconds)
+ if latitude[2] >= 60:
+ raise dns.exception.SyntaxError('latitude seconds >= 60')
+ l = len(milliseconds)
+ if l == 0 or l > 3 or not milliseconds.isdigit():
+ raise dns.exception.SyntaxError(
+ 'bad latitude milliseconds value')
+ if l == 1:
+ m = 100
+ elif l == 2:
+ m = 10
+ else:
+ m = 1
+ latitude[3] = m * int(milliseconds)
+ t = tok.get_string()
+ elif t.isdigit():
+ latitude[2] = int(t)
+ t = tok.get_string()
+ if t == 'S':
+ latitude[4] = -1
+ elif t != 'N':
+ raise dns.exception.SyntaxError('bad latitude hemisphere value')
+
+ longitude[0] = tok.get_int()
+ t = tok.get_string()
+ if t.isdigit():
+ longitude[1] = int(t)
+ t = tok.get_string()
+ if '.' in t:
+ (seconds, milliseconds) = t.split('.')
+ if not seconds.isdigit():
+ raise dns.exception.SyntaxError(
+ 'bad longitude seconds value')
+ longitude[2] = int(seconds)
+ if longitude[2] >= 60:
+ raise dns.exception.SyntaxError('longitude seconds >= 60')
+ l = len(milliseconds)
+ if l == 0 or l > 3 or not milliseconds.isdigit():
+ raise dns.exception.SyntaxError(
+ 'bad longitude milliseconds value')
+ if l == 1:
+ m = 100
+ elif l == 2:
+ m = 10
+ else:
+ m = 1
+ longitude[3] = m * int(milliseconds)
+ t = tok.get_string()
+ elif t.isdigit():
+ longitude[2] = int(t)
+ t = tok.get_string()
+ if t == 'W':
+ longitude[4] = -1
+ elif t != 'E':
+ raise dns.exception.SyntaxError('bad longitude hemisphere value')
+
+ t = tok.get_string()
+ if t[-1] == 'm':
+ t = t[0: -1]
+ altitude = float(t) * 100.0 # m -> cm
+
+ token = tok.get().unescape()
+ if not token.is_eol_or_eof():
+ value = token.value
+ if value[-1] == 'm':
+ value = value[0: -1]
+ size = float(value) * 100.0 # m -> cm
+ token = tok.get().unescape()
+ if not token.is_eol_or_eof():
+ value = token.value
+ if value[-1] == 'm':
+ value = value[0: -1]
+ hprec = float(value) * 100.0 # m -> cm
+ token = tok.get().unescape()
+ if not token.is_eol_or_eof():
+ value = token.value
+ if value[-1] == 'm':
+ value = value[0: -1]
+ vprec = float(value) * 100.0 # m -> cm
+ tok.get_eol()
+
+ return cls(rdclass, rdtype, latitude, longitude, altitude,
+ size, hprec, vprec)
+
+ def to_wire(self, file, compress=None, origin=None):
+ milliseconds = (self.latitude[0] * 3600000 +
+ self.latitude[1] * 60000 +
+ self.latitude[2] * 1000 +
+ self.latitude[3]) * self.latitude[4]
+ latitude = long(0x80000000) + milliseconds
+ milliseconds = (self.longitude[0] * 3600000 +
+ self.longitude[1] * 60000 +
+ self.longitude[2] * 1000 +
+ self.longitude[3]) * self.longitude[4]
+ longitude = long(0x80000000) + milliseconds
+ altitude = long(self.altitude) + long(10000000)
+ size = _encode_size(self.size, "size")
+ hprec = _encode_size(self.horizontal_precision, "horizontal precision")
+ vprec = _encode_size(self.vertical_precision, "vertical precision")
+ wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
+ longitude, altitude)
+ file.write(wire)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (version, size, hprec, vprec, latitude, longitude, altitude) = \
+ struct.unpack("!BBBBIII", wire[current: current + rdlen])
+ if latitude > long(0x80000000):
+ latitude = float(latitude - long(0x80000000)) / 3600000
+ else:
+ latitude = -1 * float(long(0x80000000) - latitude) / 3600000
+ if latitude < -90.0 or latitude > 90.0:
+ raise dns.exception.FormError("bad latitude")
+ if longitude > long(0x80000000):
+ longitude = float(longitude - long(0x80000000)) / 3600000
+ else:
+ longitude = -1 * float(long(0x80000000) - longitude) / 3600000
+ if longitude < -180.0 or longitude > 180.0:
+ raise dns.exception.FormError("bad longitude")
+ altitude = float(altitude) - 10000000.0
+ size = _decode_size(size, "size")
+ hprec = _decode_size(hprec, "horizontal precision")
+ vprec = _decode_size(vprec, "vertical precision")
+ return cls(rdclass, rdtype, latitude, longitude, altitude,
+ size, hprec, vprec)
+
+ def _get_float_latitude(self):
+ return _tuple_to_float(self.latitude)
+
+ def _set_float_latitude(self, value):
+ self.latitude = _float_to_tuple(value)
+
+ float_latitude = property(_get_float_latitude, _set_float_latitude,
+ doc="latitude as a floating point value")
+
+ def _get_float_longitude(self):
+ return _tuple_to_float(self.longitude)
+
+ def _set_float_longitude(self, value):
+ self.longitude = _float_to_tuple(value)
+
+ float_longitude = property(_get_float_longitude, _set_float_longitude,
+ doc="longitude as a floating point value")
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/MX.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/MX.py
new file mode 100644
index 0000000000..0a06494f73
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/MX.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+
+class MX(dns.rdtypes.mxbase.MXBase):
+
+ """MX record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/NS.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/NS.py
new file mode 100644
index 0000000000..f9fcf637f7
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/NS.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class NS(dns.rdtypes.nsbase.NSBase):
+
+ """NS record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC.py
new file mode 100644
index 0000000000..4e3da7296b
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC.py
@@ -0,0 +1,128 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+import dns.name
+from dns._compat import xrange
+
+
+class NSEC(dns.rdata.Rdata):
+
+ """NSEC record
+
+ @ivar next: the next name
+ @type next: dns.name.Name object
+ @ivar windows: the windowed bitmap list
+ @type windows: list of (window number, string) tuples"""
+
+ __slots__ = ['next', 'windows']
+
+ def __init__(self, rdclass, rdtype, next, windows):
+ super(NSEC, self).__init__(rdclass, rdtype)
+ self.next = next
+ self.windows = windows
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ next = self.next.choose_relativity(origin, relativize)
+ text = ''
+ for (window, bitmap) in self.windows:
+ bits = []
+ for i in xrange(0, len(bitmap)):
+ byte = bitmap[i]
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(dns.rdatatype.to_text(window * 256 +
+ i * 8 + j))
+ text += (' ' + ' '.join(bits))
+ return '{}{}'.format(next, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ next = tok.get_name()
+ next = next.choose_relativity(origin, relativize)
+ rdtypes = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ nrdtype = dns.rdatatype.from_text(token.value)
+ if nrdtype == 0:
+ raise dns.exception.SyntaxError("NSEC with bit 0")
+ if nrdtype > 65535:
+ raise dns.exception.SyntaxError("NSEC with bit > 65535")
+ rdtypes.append(nrdtype)
+ rdtypes.sort()
+ window = 0
+ octets = 0
+ prior_rdtype = 0
+ bitmap = bytearray(b'\0' * 32)
+ windows = []
+ for nrdtype in rdtypes:
+ if nrdtype == prior_rdtype:
+ continue
+ prior_rdtype = nrdtype
+ new_window = nrdtype // 256
+ if new_window != window:
+ windows.append((window, bitmap[0:octets]))
+ bitmap = bytearray(b'\0' * 32)
+ window = new_window
+ offset = nrdtype % 256
+ byte = offset // 8
+ bit = offset % 8
+ octets = byte + 1
+ bitmap[byte] = bitmap[byte] | (0x80 >> bit)
+
+ windows.append((window, bitmap[0:octets]))
+ return cls(rdclass, rdtype, next, windows)
+
+ def to_wire(self, file, compress=None, origin=None):
+ self.next.to_wire(file, None, origin)
+ for (window, bitmap) in self.windows:
+ file.write(struct.pack('!BB', window, len(bitmap)))
+ file.write(bitmap)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ windows = []
+ while rdlen > 0:
+ if rdlen < 3:
+ raise dns.exception.FormError("NSEC too short")
+ window = wire[current]
+ octets = wire[current + 1]
+ if octets == 0 or octets > 32:
+ raise dns.exception.FormError("bad NSEC octets")
+ current += 2
+ rdlen -= 2
+ if rdlen < octets:
+ raise dns.exception.FormError("bad NSEC bitmap length")
+ bitmap = bytearray(wire[current: current + octets].unwrap())
+ current += octets
+ rdlen -= octets
+ windows.append((window, bitmap))
+ if origin is not None:
+ next = next.relativize(origin)
+ return cls(rdclass, rdtype, next, windows)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.next = self.next.choose_relativity(origin, relativize)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3.py
new file mode 100644
index 0000000000..1c281c4a4d
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3.py
@@ -0,0 +1,196 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import binascii
+import string
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+from dns._compat import xrange, text_type, PY3
+
+# pylint: disable=deprecated-string-function
+if PY3:
+ b32_hex_to_normal = bytes.maketrans(b'0123456789ABCDEFGHIJKLMNOPQRSTUV',
+ b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
+ b32_normal_to_hex = bytes.maketrans(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
+ b'0123456789ABCDEFGHIJKLMNOPQRSTUV')
+else:
+ b32_hex_to_normal = string.maketrans('0123456789ABCDEFGHIJKLMNOPQRSTUV',
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
+ b32_normal_to_hex = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
+ '0123456789ABCDEFGHIJKLMNOPQRSTUV')
+# pylint: enable=deprecated-string-function
+
+
+# hash algorithm constants
+SHA1 = 1
+
+# flag constants
+OPTOUT = 1
+
+
+class NSEC3(dns.rdata.Rdata):
+
+ """NSEC3 record
+
+ @ivar algorithm: the hash algorithm number
+ @type algorithm: int
+ @ivar flags: the flags
+ @type flags: int
+ @ivar iterations: the number of iterations
+ @type iterations: int
+ @ivar salt: the salt
+ @type salt: string
+ @ivar next: the next name hash
+ @type next: string
+ @ivar windows: the windowed bitmap list
+ @type windows: list of (window number, string) tuples"""
+
+ __slots__ = ['algorithm', 'flags', 'iterations', 'salt', 'next', 'windows']
+
+ def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt,
+ next, windows):
+ super(NSEC3, self).__init__(rdclass, rdtype)
+ self.algorithm = algorithm
+ self.flags = flags
+ self.iterations = iterations
+ if isinstance(salt, text_type):
+ self.salt = salt.encode()
+ else:
+ self.salt = salt
+ self.next = next
+ self.windows = windows
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ next = base64.b32encode(self.next).translate(
+ b32_normal_to_hex).lower().decode()
+ if self.salt == b'':
+ salt = '-'
+ else:
+ salt = binascii.hexlify(self.salt).decode()
+ text = u''
+ for (window, bitmap) in self.windows:
+ bits = []
+ for i in xrange(0, len(bitmap)):
+ byte = bitmap[i]
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(dns.rdatatype.to_text(window * 256 +
+ i * 8 + j))
+ text += (u' ' + u' '.join(bits))
+ return u'%u %u %u %s %s%s' % (self.algorithm, self.flags,
+ self.iterations, salt, next, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ algorithm = tok.get_uint8()
+ flags = tok.get_uint8()
+ iterations = tok.get_uint16()
+ salt = tok.get_string()
+ if salt == u'-':
+ salt = b''
+ else:
+ salt = binascii.unhexlify(salt.encode('ascii'))
+ next = tok.get_string().encode(
+ 'ascii').upper().translate(b32_hex_to_normal)
+ next = base64.b32decode(next)
+ rdtypes = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ nrdtype = dns.rdatatype.from_text(token.value)
+ if nrdtype == 0:
+ raise dns.exception.SyntaxError("NSEC3 with bit 0")
+ if nrdtype > 65535:
+ raise dns.exception.SyntaxError("NSEC3 with bit > 65535")
+ rdtypes.append(nrdtype)
+ rdtypes.sort()
+ window = 0
+ octets = 0
+ prior_rdtype = 0
+ bitmap = bytearray(b'\0' * 32)
+ windows = []
+ for nrdtype in rdtypes:
+ if nrdtype == prior_rdtype:
+ continue
+ prior_rdtype = nrdtype
+ new_window = nrdtype // 256
+ if new_window != window:
+ if octets != 0:
+ windows.append((window, bitmap[0:octets]))
+ bitmap = bytearray(b'\0' * 32)
+ window = new_window
+ offset = nrdtype % 256
+ byte = offset // 8
+ bit = offset % 8
+ octets = byte + 1
+ bitmap[byte] = bitmap[byte] | (0x80 >> bit)
+ if octets != 0:
+ windows.append((window, bitmap[0:octets]))
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next,
+ windows)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.salt)
+ file.write(struct.pack("!BBHB", self.algorithm, self.flags,
+ self.iterations, l))
+ file.write(self.salt)
+ l = len(self.next)
+ file.write(struct.pack("!B", l))
+ file.write(self.next)
+ for (window, bitmap) in self.windows:
+ file.write(struct.pack("!BB", window, len(bitmap)))
+ file.write(bitmap)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (algorithm, flags, iterations, slen) = \
+ struct.unpack('!BBHB', wire[current: current + 5])
+
+ current += 5
+ rdlen -= 5
+ salt = wire[current: current + slen].unwrap()
+ current += slen
+ rdlen -= slen
+ nlen = wire[current]
+ current += 1
+ rdlen -= 1
+ next = wire[current: current + nlen].unwrap()
+ current += nlen
+ rdlen -= nlen
+ windows = []
+ while rdlen > 0:
+ if rdlen < 3:
+ raise dns.exception.FormError("NSEC3 too short")
+ window = wire[current]
+ octets = wire[current + 1]
+ if octets == 0 or octets > 32:
+ raise dns.exception.FormError("bad NSEC3 octets")
+ current += 2
+ rdlen -= 2
+ if rdlen < octets:
+ raise dns.exception.FormError("bad NSEC3 bitmap length")
+ bitmap = bytearray(wire[current: current + octets].unwrap())
+ current += octets
+ rdlen -= octets
+ windows.append((window, bitmap))
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next,
+ windows)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3PARAM.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3PARAM.py
new file mode 100644
index 0000000000..87c36e5675
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/NSEC3PARAM.py
@@ -0,0 +1,90 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import binascii
+
+import dns.exception
+import dns.rdata
+from dns._compat import text_type
+
+
+class NSEC3PARAM(dns.rdata.Rdata):
+
+ """NSEC3PARAM record
+
+ @ivar algorithm: the hash algorithm number
+ @type algorithm: int
+ @ivar flags: the flags
+ @type flags: int
+ @ivar iterations: the number of iterations
+ @type iterations: int
+ @ivar salt: the salt
+ @type salt: string"""
+
+ __slots__ = ['algorithm', 'flags', 'iterations', 'salt']
+
+ def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
+ super(NSEC3PARAM, self).__init__(rdclass, rdtype)
+ self.algorithm = algorithm
+ self.flags = flags
+ self.iterations = iterations
+ if isinstance(salt, text_type):
+ self.salt = salt.encode()
+ else:
+ self.salt = salt
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.salt == b'':
+ salt = '-'
+ else:
+ salt = binascii.hexlify(self.salt).decode()
+ return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations,
+ salt)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ algorithm = tok.get_uint8()
+ flags = tok.get_uint8()
+ iterations = tok.get_uint16()
+ salt = tok.get_string()
+ if salt == '-':
+ salt = ''
+ else:
+ salt = binascii.unhexlify(salt.encode())
+ tok.get_eol()
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.salt)
+ file.write(struct.pack("!BBHB", self.algorithm, self.flags,
+ self.iterations, l))
+ file.write(self.salt)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (algorithm, flags, iterations, slen) = \
+ struct.unpack('!BBHB',
+ wire[current: current + 5])
+ current += 5
+ rdlen -= 5
+ salt = wire[current: current + slen].unwrap()
+ current += slen
+ rdlen -= slen
+ if rdlen != 0:
+ raise dns.exception.FormError
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/OPENPGPKEY.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/OPENPGPKEY.py
new file mode 100644
index 0000000000..a066cf98df
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/OPENPGPKEY.py
@@ -0,0 +1,60 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2016 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class OPENPGPKEY(dns.rdata.Rdata):
+
+ """OPENPGPKEY record
+
+ @ivar key: the key
+ @type key: bytes
+ @see: RFC 7929
+ """
+
+ def __init__(self, rdclass, rdtype, key):
+ super(OPENPGPKEY, self).__init__(rdclass, rdtype)
+ self.key = key
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return dns.rdata._base64ify(self.key)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ key = base64.b64decode(b64)
+ return cls(rdclass, rdtype, key)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(self.key)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ key = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, key)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/PTR.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/PTR.py
new file mode 100644
index 0000000000..20cd50761d
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/PTR.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class PTR(dns.rdtypes.nsbase.NSBase):
+
+ """PTR record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/RP.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/RP.py
new file mode 100644
index 0000000000..8f07be9071
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/RP.py
@@ -0,0 +1,82 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class RP(dns.rdata.Rdata):
+
+ """RP record
+
+ @ivar mbox: The responsible person's mailbox
+ @type mbox: dns.name.Name object
+ @ivar txt: The owner name of a node with TXT records, or the root name
+ if no TXT records are associated with this RP.
+ @type txt: dns.name.Name object
+ @see: RFC 1183"""
+
+ __slots__ = ['mbox', 'txt']
+
+ def __init__(self, rdclass, rdtype, mbox, txt):
+ super(RP, self).__init__(rdclass, rdtype)
+ self.mbox = mbox
+ self.txt = txt
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ mbox = self.mbox.choose_relativity(origin, relativize)
+ txt = self.txt.choose_relativity(origin, relativize)
+ return "{} {}".format(str(mbox), str(txt))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ mbox = tok.get_name()
+ txt = tok.get_name()
+ mbox = mbox.choose_relativity(origin, relativize)
+ txt = txt.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, mbox, txt)
+
+ def to_wire(self, file, compress=None, origin=None):
+ self.mbox.to_wire(file, None, origin)
+ self.txt.to_wire(file, None, origin)
+
+ def to_digestable(self, origin=None):
+ return self.mbox.to_digestable(origin) + \
+ self.txt.to_digestable(origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (mbox, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ current += cused
+ rdlen -= cused
+ if rdlen <= 0:
+ raise dns.exception.FormError
+ (txt, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ mbox = mbox.relativize(origin)
+ txt = txt.relativize(origin)
+ return cls(rdclass, rdtype, mbox, txt)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.mbox = self.mbox.choose_relativity(origin, relativize)
+ self.txt = self.txt.choose_relativity(origin, relativize)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/RRSIG.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/RRSIG.py
new file mode 100644
index 0000000000..d3756ece4e
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/RRSIG.py
@@ -0,0 +1,158 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import calendar
+import struct
+import time
+
+import dns.dnssec
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+
+class BadSigTime(dns.exception.DNSException):
+
+ """Time in DNS SIG or RRSIG resource record cannot be parsed."""
+
+
+def sigtime_to_posixtime(what):
+ if len(what) != 14:
+ raise BadSigTime
+ year = int(what[0:4])
+ month = int(what[4:6])
+ day = int(what[6:8])
+ hour = int(what[8:10])
+ minute = int(what[10:12])
+ second = int(what[12:14])
+ return calendar.timegm((year, month, day, hour, minute, second,
+ 0, 0, 0))
+
+
+def posixtime_to_sigtime(what):
+ return time.strftime('%Y%m%d%H%M%S', time.gmtime(what))
+
+
+class RRSIG(dns.rdata.Rdata):
+
+ """RRSIG record
+
+ @ivar type_covered: the rdata type this signature covers
+ @type type_covered: int
+ @ivar algorithm: the algorithm used for the sig
+ @type algorithm: int
+ @ivar labels: number of labels
+ @type labels: int
+ @ivar original_ttl: the original TTL
+ @type original_ttl: long
+ @ivar expiration: signature expiration time
+ @type expiration: long
+ @ivar inception: signature inception time
+ @type inception: long
+ @ivar key_tag: the key tag
+ @type key_tag: int
+ @ivar signer: the signer
+ @type signer: dns.name.Name object
+ @ivar signature: the signature
+ @type signature: string"""
+
+ __slots__ = ['type_covered', 'algorithm', 'labels', 'original_ttl',
+ 'expiration', 'inception', 'key_tag', 'signer',
+ 'signature']
+
+ def __init__(self, rdclass, rdtype, type_covered, algorithm, labels,
+ original_ttl, expiration, inception, key_tag, signer,
+ signature):
+ super(RRSIG, self).__init__(rdclass, rdtype)
+ self.type_covered = type_covered
+ self.algorithm = algorithm
+ self.labels = labels
+ self.original_ttl = original_ttl
+ self.expiration = expiration
+ self.inception = inception
+ self.key_tag = key_tag
+ self.signer = signer
+ self.signature = signature
+
+ def covers(self):
+ return self.type_covered
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%s %d %d %d %s %s %d %s %s' % (
+ dns.rdatatype.to_text(self.type_covered),
+ self.algorithm,
+ self.labels,
+ self.original_ttl,
+ posixtime_to_sigtime(self.expiration),
+ posixtime_to_sigtime(self.inception),
+ self.key_tag,
+ self.signer.choose_relativity(origin, relativize),
+ dns.rdata._base64ify(self.signature)
+ )
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ type_covered = dns.rdatatype.from_text(tok.get_string())
+ algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+ labels = tok.get_int()
+ original_ttl = tok.get_ttl()
+ expiration = sigtime_to_posixtime(tok.get_string())
+ inception = sigtime_to_posixtime(tok.get_string())
+ key_tag = tok.get_int()
+ signer = tok.get_name()
+ signer = signer.choose_relativity(origin, relativize)
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ signature = base64.b64decode(b64)
+ return cls(rdclass, rdtype, type_covered, algorithm, labels,
+ original_ttl, expiration, inception, key_tag, signer,
+ signature)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack('!HBBIIIH', self.type_covered,
+ self.algorithm, self.labels,
+ self.original_ttl, self.expiration,
+ self.inception, self.key_tag)
+ file.write(header)
+ self.signer.to_wire(file, None, origin)
+ file.write(self.signature)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ header = struct.unpack('!HBBIIIH', wire[current: current + 18])
+ current += 18
+ rdlen -= 18
+ (signer, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ if origin is not None:
+ signer = signer.relativize(origin)
+ signature = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], header[2],
+ header[3], header[4], header[5], header[6], signer,
+ signature)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.signer = self.signer.choose_relativity(origin, relativize)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/RT.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/RT.py
new file mode 100644
index 0000000000..d0feb79e9d
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/RT.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+
+class RT(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+
+ """RT record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/SOA.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/SOA.py
new file mode 100644
index 0000000000..aec81cad8a
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/SOA.py
@@ -0,0 +1,116 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class SOA(dns.rdata.Rdata):
+
+ """SOA record
+
+ @ivar mname: the SOA MNAME (master name) field
+ @type mname: dns.name.Name object
+ @ivar rname: the SOA RNAME (responsible name) field
+ @type rname: dns.name.Name object
+ @ivar serial: The zone's serial number
+ @type serial: int
+ @ivar refresh: The zone's refresh value (in seconds)
+ @type refresh: int
+ @ivar retry: The zone's retry value (in seconds)
+ @type retry: int
+ @ivar expire: The zone's expiration value (in seconds)
+ @type expire: int
+ @ivar minimum: The zone's negative caching time (in seconds, called
+ "minimum" for historical reasons)
+ @type minimum: int
+ @see: RFC 1035"""
+
+ __slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire',
+ 'minimum']
+
+ def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry,
+ expire, minimum):
+ super(SOA, self).__init__(rdclass, rdtype)
+ self.mname = mname
+ self.rname = rname
+ self.serial = serial
+ self.refresh = refresh
+ self.retry = retry
+ self.expire = expire
+ self.minimum = minimum
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ mname = self.mname.choose_relativity(origin, relativize)
+ rname = self.rname.choose_relativity(origin, relativize)
+ return '%s %s %d %d %d %d %d' % (
+ mname, rname, self.serial, self.refresh, self.retry,
+ self.expire, self.minimum)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ mname = tok.get_name()
+ rname = tok.get_name()
+ mname = mname.choose_relativity(origin, relativize)
+ rname = rname.choose_relativity(origin, relativize)
+ serial = tok.get_uint32()
+ refresh = tok.get_ttl()
+ retry = tok.get_ttl()
+ expire = tok.get_ttl()
+ minimum = tok.get_ttl()
+ tok.get_eol()
+ return cls(rdclass, rdtype, mname, rname, serial, refresh, retry,
+ expire, minimum)
+
+ def to_wire(self, file, compress=None, origin=None):
+ self.mname.to_wire(file, compress, origin)
+ self.rname.to_wire(file, compress, origin)
+ five_ints = struct.pack('!IIIII', self.serial, self.refresh,
+ self.retry, self.expire, self.minimum)
+ file.write(five_ints)
+
+ def to_digestable(self, origin=None):
+ return self.mname.to_digestable(origin) + \
+ self.rname.to_digestable(origin) + \
+ struct.pack('!IIIII', self.serial, self.refresh,
+ self.retry, self.expire, self.minimum)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (mname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ (rname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ if rdlen != 20:
+ raise dns.exception.FormError
+ five_ints = struct.unpack('!IIIII',
+ wire[current: current + rdlen])
+ if origin is not None:
+ mname = mname.relativize(origin)
+ rname = rname.relativize(origin)
+ return cls(rdclass, rdtype, mname, rname,
+ five_ints[0], five_ints[1], five_ints[2], five_ints[3],
+ five_ints[4])
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.mname = self.mname.choose_relativity(origin, relativize)
+ self.rname = self.rname.choose_relativity(origin, relativize)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/SPF.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/SPF.py
new file mode 100644
index 0000000000..41dee62387
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/SPF.py
@@ -0,0 +1,25 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.txtbase
+
+
+class SPF(dns.rdtypes.txtbase.TXTBase):
+
+ """SPF record
+
+ @see: RFC 4408"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/SSHFP.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/SSHFP.py
new file mode 100644
index 0000000000..c18311e906
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/SSHFP.py
@@ -0,0 +1,79 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import binascii
+
+import dns.rdata
+import dns.rdatatype
+
+
+class SSHFP(dns.rdata.Rdata):
+
+ """SSHFP record
+
+ @ivar algorithm: the algorithm
+ @type algorithm: int
+ @ivar fp_type: the digest type
+ @type fp_type: int
+ @ivar fingerprint: the fingerprint
+ @type fingerprint: string
+ @see: draft-ietf-secsh-dns-05.txt"""
+
+ __slots__ = ['algorithm', 'fp_type', 'fingerprint']
+
+ def __init__(self, rdclass, rdtype, algorithm, fp_type,
+ fingerprint):
+ super(SSHFP, self).__init__(rdclass, rdtype)
+ self.algorithm = algorithm
+ self.fp_type = fp_type
+ self.fingerprint = fingerprint
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %s' % (self.algorithm,
+ self.fp_type,
+ dns.rdata._hexify(self.fingerprint,
+ chunksize=128))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ algorithm = tok.get_uint8()
+ fp_type = tok.get_uint8()
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ fingerprint = b''.join(chunks)
+ fingerprint = binascii.unhexlify(fingerprint)
+ return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!BB", self.algorithm, self.fp_type)
+ file.write(header)
+ file.write(self.fingerprint)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ header = struct.unpack("!BB", wire[current: current + 2])
+ current += 2
+ rdlen -= 2
+ fingerprint = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], fingerprint)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/TLSA.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/TLSA.py
new file mode 100644
index 0000000000..a135c2b3da
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/TLSA.py
@@ -0,0 +1,84 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import binascii
+
+import dns.rdata
+import dns.rdatatype
+
+
+class TLSA(dns.rdata.Rdata):
+
+ """TLSA record
+
+ @ivar usage: The certificate usage
+ @type usage: int
+ @ivar selector: The selector field
+ @type selector: int
+ @ivar mtype: The 'matching type' field
+ @type mtype: int
+ @ivar cert: The 'Certificate Association Data' field
+ @type cert: string
+ @see: RFC 6698"""
+
+ __slots__ = ['usage', 'selector', 'mtype', 'cert']
+
+ def __init__(self, rdclass, rdtype, usage, selector,
+ mtype, cert):
+ super(TLSA, self).__init__(rdclass, rdtype)
+ self.usage = usage
+ self.selector = selector
+ self.mtype = mtype
+ self.cert = cert
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %d %s' % (self.usage,
+ self.selector,
+ self.mtype,
+ dns.rdata._hexify(self.cert,
+ chunksize=128))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ usage = tok.get_uint8()
+ selector = tok.get_uint8()
+ mtype = tok.get_uint8()
+ cert_chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ cert_chunks.append(t.value.encode())
+ cert = b''.join(cert_chunks)
+ cert = binascii.unhexlify(cert)
+ return cls(rdclass, rdtype, usage, selector, mtype, cert)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!BBB", self.usage, self.selector, self.mtype)
+ file.write(header)
+ file.write(self.cert)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ header = struct.unpack("!BBB", wire[current: current + 3])
+ current += 3
+ rdlen -= 3
+ cert = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], header[2], cert)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/TXT.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/TXT.py
new file mode 100644
index 0000000000..c5ae919c5e
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/TXT.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.txtbase
+
+
+class TXT(dns.rdtypes.txtbase.TXTBase):
+
+ """TXT record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/URI.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/URI.py
new file mode 100644
index 0000000000..f5b65ed6a9
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/URI.py
@@ -0,0 +1,82 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+from dns._compat import text_type
+
+
+class URI(dns.rdata.Rdata):
+
+ """URI record
+
+ @ivar priority: the priority
+ @type priority: int
+ @ivar weight: the weight
+ @type weight: int
+ @ivar target: the target host
+ @type target: dns.name.Name object
+ @see: draft-faltstrom-uri-13"""
+
+ __slots__ = ['priority', 'weight', 'target']
+
+ def __init__(self, rdclass, rdtype, priority, weight, target):
+ super(URI, self).__init__(rdclass, rdtype)
+ self.priority = priority
+ self.weight = weight
+ if len(target) < 1:
+ raise dns.exception.SyntaxError("URI target cannot be empty")
+ if isinstance(target, text_type):
+ self.target = target.encode()
+ else:
+ self.target = target
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d "%s"' % (self.priority, self.weight,
+ self.target.decode())
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ priority = tok.get_uint16()
+ weight = tok.get_uint16()
+ target = tok.get().unescape()
+ if not (target.is_quoted_string() or target.is_identifier()):
+ raise dns.exception.SyntaxError("URI target must be a string")
+ tok.get_eol()
+ return cls(rdclass, rdtype, priority, weight, target.value)
+
+ def to_wire(self, file, compress=None, origin=None):
+ two_ints = struct.pack("!HH", self.priority, self.weight)
+ file.write(two_ints)
+ file.write(self.target)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ if rdlen < 5:
+ raise dns.exception.FormError('URI RR is shorter than 5 octets')
+
+ (priority, weight) = struct.unpack('!HH', wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ target = wire[current: current + rdlen]
+ current += rdlen
+
+ return cls(rdclass, rdtype, priority, weight, target)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/X25.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/X25.py
new file mode 100644
index 0000000000..e530a2c2a6
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/X25.py
@@ -0,0 +1,66 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import text_type
+
+
+class X25(dns.rdata.Rdata):
+
+ """X25 record
+
+ @ivar address: the PSDN address
+ @type address: string
+ @see: RFC 1183"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(X25, self).__init__(rdclass, rdtype)
+ if isinstance(address, text_type):
+ self.address = address.encode()
+ else:
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '"%s"' % dns.rdata._escapify(self.address)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_string()
+ tok.get_eol()
+ return cls(rdclass, rdtype, address)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.address)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.address)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ address = wire[current: current + l].unwrap()
+ return cls(rdclass, rdtype, address)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/ANY/__init__.py b/openpype/vendor/python/python_2/dns/rdtypes/ANY/__init__.py
new file mode 100644
index 0000000000..ca41ef8055
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/ANY/__init__.py
@@ -0,0 +1,57 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class ANY (generic) rdata type classes."""
+
+__all__ = [
+ 'AFSDB',
+ 'AVC',
+ 'CAA',
+ 'CDNSKEY',
+ 'CDS',
+ 'CERT',
+ 'CNAME',
+ 'CSYNC',
+ 'DLV',
+ 'DNAME',
+ 'DNSKEY',
+ 'DS',
+ 'EUI48',
+ 'EUI64',
+ 'GPOS',
+ 'HINFO',
+ 'HIP',
+ 'ISDN',
+ 'LOC',
+ 'MX',
+ 'NS',
+ 'NSEC',
+ 'NSEC3',
+ 'NSEC3PARAM',
+ 'OPENPGPKEY',
+ 'PTR',
+ 'RP',
+ 'RRSIG',
+ 'RT',
+ 'SOA',
+ 'SPF',
+ 'SSHFP',
+ 'TLSA',
+ 'TXT',
+ 'URI',
+ 'X25',
+]
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/CH/A.py b/openpype/vendor/python/python_2/dns/rdtypes/CH/A.py
new file mode 100644
index 0000000000..e65d192d82
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/CH/A.py
@@ -0,0 +1,70 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+import struct
+
+class A(dns.rdtypes.mxbase.MXBase):
+
+ """A record for Chaosnet
+ @ivar domain: the domain of the address
+ @type domain: dns.name.Name object
+ @ivar address: the 16-bit address
+ @type address: int"""
+
+ __slots__ = ['domain', 'address']
+
+ def __init__(self, rdclass, rdtype, address, domain):
+ super(A, self).__init__(rdclass, rdtype, address, domain)
+ self.domain = domain
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ domain = self.domain.choose_relativity(origin, relativize)
+ return '%s %o' % (domain, self.address)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ domain = tok.get_name()
+ address = tok.get_uint16(base=8)
+ domain = domain.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, address, domain)
+
+ def to_wire(self, file, compress=None, origin=None):
+ self.domain.to_wire(file, compress, origin)
+ pref = struct.pack("!H", self.address)
+ file.write(pref)
+
+ def to_digestable(self, origin=None):
+ return self.domain.to_digestable(origin) + \
+ struct.pack("!H", self.address)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (domain, cused) = dns.name.from_wire(wire[: current + rdlen-2],
+ current)
+ current += cused
+ (address,) = struct.unpack('!H', wire[current: current + 2])
+ if cused+2 != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ domain = domain.relativize(origin)
+ return cls(rdclass, rdtype, address, domain)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.domain = self.domain.choose_relativity(origin, relativize)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/CH/__init__.py b/openpype/vendor/python/python_2/dns/rdtypes/CH/__init__.py
new file mode 100644
index 0000000000..7184a7332a
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/CH/__init__.py
@@ -0,0 +1,22 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class CH rdata type classes."""
+
+__all__ = [
+ 'A',
+]
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/A.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/A.py
new file mode 100644
index 0000000000..8998982462
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/A.py
@@ -0,0 +1,54 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.ipv4
+import dns.rdata
+import dns.tokenizer
+
+
+class A(dns.rdata.Rdata):
+
+ """A record.
+
+ @ivar address: an IPv4 address
+ @type address: string (in the standard "dotted quad" format)"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(A, self).__init__(rdclass, rdtype)
+ # check that it's OK
+ dns.ipv4.inet_aton(address)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return self.address
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_identifier()
+ tok.get_eol()
+ return cls(rdclass, rdtype, address)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(dns.ipv4.inet_aton(self.address))
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ address = dns.ipv4.inet_ntoa(wire[current: current + rdlen])
+ return cls(rdclass, rdtype, address)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/AAAA.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/AAAA.py
new file mode 100644
index 0000000000..a77c5bf2a5
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/AAAA.py
@@ -0,0 +1,55 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.inet
+import dns.rdata
+import dns.tokenizer
+
+
+class AAAA(dns.rdata.Rdata):
+
+ """AAAA record.
+
+ @ivar address: an IPv6 address
+ @type address: string (in the standard IPv6 format)"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(AAAA, self).__init__(rdclass, rdtype)
+ # check that it's OK
+ dns.inet.inet_pton(dns.inet.AF_INET6, address)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return self.address
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_identifier()
+ tok.get_eol()
+ return cls(rdclass, rdtype, address)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.address))
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ address = dns.inet.inet_ntop(dns.inet.AF_INET6,
+ wire[current: current + rdlen])
+ return cls(rdclass, rdtype, address)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/APL.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/APL.py
new file mode 100644
index 0000000000..48faf88ab7
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/APL.py
@@ -0,0 +1,165 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+import codecs
+import struct
+
+import dns.exception
+import dns.inet
+import dns.rdata
+import dns.tokenizer
+from dns._compat import xrange, maybe_chr
+
+
+class APLItem(object):
+
+ """An APL list item.
+
+ @ivar family: the address family (IANA address family registry)
+ @type family: int
+ @ivar negation: is this item negated?
+ @type negation: bool
+ @ivar address: the address
+ @type address: string
+ @ivar prefix: the prefix length
+ @type prefix: int
+ """
+
+ __slots__ = ['family', 'negation', 'address', 'prefix']
+
+ def __init__(self, family, negation, address, prefix):
+ self.family = family
+ self.negation = negation
+ self.address = address
+ self.prefix = prefix
+
+ def __str__(self):
+ if self.negation:
+ return "!%d:%s/%s" % (self.family, self.address, self.prefix)
+ else:
+ return "%d:%s/%s" % (self.family, self.address, self.prefix)
+
+ def to_wire(self, file):
+ if self.family == 1:
+ address = dns.inet.inet_pton(dns.inet.AF_INET, self.address)
+ elif self.family == 2:
+ address = dns.inet.inet_pton(dns.inet.AF_INET6, self.address)
+ else:
+ address = binascii.unhexlify(self.address)
+ #
+ # Truncate least significant zero bytes.
+ #
+ last = 0
+ for i in xrange(len(address) - 1, -1, -1):
+ if address[i] != maybe_chr(0):
+ last = i + 1
+ break
+ address = address[0: last]
+ l = len(address)
+ assert l < 128
+ if self.negation:
+ l |= 0x80
+ header = struct.pack('!HBB', self.family, self.prefix, l)
+ file.write(header)
+ file.write(address)
+
+
+class APL(dns.rdata.Rdata):
+
+ """APL record.
+
+ @ivar items: a list of APL items
+ @type items: list of APL_Item
+ @see: RFC 3123"""
+
+ __slots__ = ['items']
+
+ def __init__(self, rdclass, rdtype, items):
+ super(APL, self).__init__(rdclass, rdtype)
+ self.items = items
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return ' '.join(map(str, self.items))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ items = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ item = token.value
+ if item[0] == '!':
+ negation = True
+ item = item[1:]
+ else:
+ negation = False
+ (family, rest) = item.split(':', 1)
+ family = int(family)
+ (address, prefix) = rest.split('/', 1)
+ prefix = int(prefix)
+ item = APLItem(family, negation, address, prefix)
+ items.append(item)
+
+ return cls(rdclass, rdtype, items)
+
+ def to_wire(self, file, compress=None, origin=None):
+ for item in self.items:
+ item.to_wire(file)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+
+ items = []
+ while 1:
+ if rdlen == 0:
+ break
+ if rdlen < 4:
+ raise dns.exception.FormError
+ header = struct.unpack('!HBB', wire[current: current + 4])
+ afdlen = header[2]
+ if afdlen > 127:
+ negation = True
+ afdlen -= 128
+ else:
+ negation = False
+ current += 4
+ rdlen -= 4
+ if rdlen < afdlen:
+ raise dns.exception.FormError
+ address = wire[current: current + afdlen].unwrap()
+ l = len(address)
+ if header[0] == 1:
+ if l < 4:
+ address += b'\x00' * (4 - l)
+ address = dns.inet.inet_ntop(dns.inet.AF_INET, address)
+ elif header[0] == 2:
+ if l < 16:
+ address += b'\x00' * (16 - l)
+ address = dns.inet.inet_ntop(dns.inet.AF_INET6, address)
+ else:
+ #
+ # This isn't really right according to the RFC, but it
+ # seems better than throwing an exception
+ #
+ address = codecs.encode(address, 'hex_codec')
+ current += afdlen
+ rdlen -= afdlen
+ item = APLItem(header[0], negation, address, header[1])
+ items.append(item)
+ return cls(rdclass, rdtype, items)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/DHCID.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/DHCID.py
new file mode 100644
index 0000000000..cec64590f0
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/DHCID.py
@@ -0,0 +1,61 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+
+import dns.exception
+
+
+class DHCID(dns.rdata.Rdata):
+
+ """DHCID record
+
+ @ivar data: the data (the content of the RR is opaque as far as the
+ DNS is concerned)
+ @type data: string
+ @see: RFC 4701"""
+
+ __slots__ = ['data']
+
+ def __init__(self, rdclass, rdtype, data):
+ super(DHCID, self).__init__(rdclass, rdtype)
+ self.data = data
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return dns.rdata._base64ify(self.data)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ data = base64.b64decode(b64)
+ return cls(rdclass, rdtype, data)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(self.data)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ data = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, data)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/IPSECKEY.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/IPSECKEY.py
new file mode 100644
index 0000000000..8f49ba137d
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/IPSECKEY.py
@@ -0,0 +1,150 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import base64
+
+import dns.exception
+import dns.inet
+import dns.name
+
+
+class IPSECKEY(dns.rdata.Rdata):
+
+ """IPSECKEY record
+
+ @ivar precedence: the precedence for this key data
+ @type precedence: int
+ @ivar gateway_type: the gateway type
+ @type gateway_type: int
+ @ivar algorithm: the algorithm to use
+ @type algorithm: int
+ @ivar gateway: the public key
+ @type gateway: None, IPv4 address, IPV6 address, or domain name
+ @ivar key: the public key
+ @type key: string
+ @see: RFC 4025"""
+
+ __slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
+
+ def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
+ gateway, key):
+ super(IPSECKEY, self).__init__(rdclass, rdtype)
+ if gateway_type == 0:
+ if gateway != '.' and gateway is not None:
+ raise SyntaxError('invalid gateway for gateway type 0')
+ gateway = None
+ elif gateway_type == 1:
+ # check that it's OK
+ dns.inet.inet_pton(dns.inet.AF_INET, gateway)
+ elif gateway_type == 2:
+ # check that it's OK
+ dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
+ elif gateway_type == 3:
+ pass
+ else:
+ raise SyntaxError(
+ 'invalid IPSECKEY gateway type: %d' % gateway_type)
+ self.precedence = precedence
+ self.gateway_type = gateway_type
+ self.algorithm = algorithm
+ self.gateway = gateway
+ self.key = key
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.gateway_type == 0:
+ gateway = '.'
+ elif self.gateway_type == 1:
+ gateway = self.gateway
+ elif self.gateway_type == 2:
+ gateway = self.gateway
+ elif self.gateway_type == 3:
+ gateway = str(self.gateway.choose_relativity(origin, relativize))
+ else:
+ raise ValueError('invalid gateway type')
+ return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
+ self.algorithm, gateway,
+ dns.rdata._base64ify(self.key))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ precedence = tok.get_uint8()
+ gateway_type = tok.get_uint8()
+ algorithm = tok.get_uint8()
+ if gateway_type == 3:
+ gateway = tok.get_name().choose_relativity(origin, relativize)
+ else:
+ gateway = tok.get_string()
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ key = base64.b64decode(b64)
+ return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
+ gateway, key)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!BBB", self.precedence, self.gateway_type,
+ self.algorithm)
+ file.write(header)
+ if self.gateway_type == 0:
+ pass
+ elif self.gateway_type == 1:
+ file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
+ elif self.gateway_type == 2:
+ file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
+ elif self.gateway_type == 3:
+ self.gateway.to_wire(file, None, origin)
+ else:
+ raise ValueError('invalid gateway type')
+ file.write(self.key)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ if rdlen < 3:
+ raise dns.exception.FormError
+ header = struct.unpack('!BBB', wire[current: current + 3])
+ gateway_type = header[1]
+ current += 3
+ rdlen -= 3
+ if gateway_type == 0:
+ gateway = None
+ elif gateway_type == 1:
+ gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
+ wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ elif gateway_type == 2:
+ gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
+ wire[current: current + 16])
+ current += 16
+ rdlen -= 16
+ elif gateway_type == 3:
+ (gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ current += cused
+ rdlen -= cused
+ else:
+ raise dns.exception.FormError('invalid IPSECKEY gateway type')
+ key = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], gateway_type, header[2],
+ gateway, key)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/KX.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/KX.py
new file mode 100644
index 0000000000..1318a582e7
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/KX.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+
+class KX(dns.rdtypes.mxbase.UncompressedMX):
+
+ """KX record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/NAPTR.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/NAPTR.py
new file mode 100644
index 0000000000..32fa4745ea
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/NAPTR.py
@@ -0,0 +1,127 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.name
+import dns.rdata
+from dns._compat import xrange, text_type
+
+
+def _write_string(file, s):
+ l = len(s)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(s)
+
+
+def _sanitize(value):
+ if isinstance(value, text_type):
+ return value.encode()
+ return value
+
+
+class NAPTR(dns.rdata.Rdata):
+
+ """NAPTR record
+
+ @ivar order: order
+ @type order: int
+ @ivar preference: preference
+ @type preference: int
+ @ivar flags: flags
+ @type flags: string
+ @ivar service: service
+ @type service: string
+ @ivar regexp: regular expression
+ @type regexp: string
+ @ivar replacement: replacement name
+ @type replacement: dns.name.Name object
+ @see: RFC 3403"""
+
+ __slots__ = ['order', 'preference', 'flags', 'service', 'regexp',
+ 'replacement']
+
+ def __init__(self, rdclass, rdtype, order, preference, flags, service,
+ regexp, replacement):
+ super(NAPTR, self).__init__(rdclass, rdtype)
+ self.flags = _sanitize(flags)
+ self.service = _sanitize(service)
+ self.regexp = _sanitize(regexp)
+ self.order = order
+ self.preference = preference
+ self.replacement = replacement
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ replacement = self.replacement.choose_relativity(origin, relativize)
+ return '%d %d "%s" "%s" "%s" %s' % \
+ (self.order, self.preference,
+ dns.rdata._escapify(self.flags),
+ dns.rdata._escapify(self.service),
+ dns.rdata._escapify(self.regexp),
+ replacement)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ order = tok.get_uint16()
+ preference = tok.get_uint16()
+ flags = tok.get_string()
+ service = tok.get_string()
+ regexp = tok.get_string()
+ replacement = tok.get_name()
+ replacement = replacement.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, order, preference, flags, service,
+ regexp, replacement)
+
+ def to_wire(self, file, compress=None, origin=None):
+ two_ints = struct.pack("!HH", self.order, self.preference)
+ file.write(two_ints)
+ _write_string(file, self.flags)
+ _write_string(file, self.service)
+ _write_string(file, self.regexp)
+ self.replacement.to_wire(file, compress, origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (order, preference) = struct.unpack('!HH', wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ strings = []
+ for i in xrange(3):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen or rdlen < 0:
+ raise dns.exception.FormError
+ s = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ strings.append(s)
+ (replacement, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ replacement = replacement.relativize(origin)
+ return cls(rdclass, rdtype, order, preference, strings[0], strings[1],
+ strings[2], replacement)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.replacement = self.replacement.choose_relativity(origin,
+ relativize)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP.py
new file mode 100644
index 0000000000..336befc7f2
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP.py
@@ -0,0 +1,60 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+
+class NSAP(dns.rdata.Rdata):
+
+ """NSAP record.
+
+ @ivar address: a NASP
+ @type address: string
+ @see: RFC 1706"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(NSAP, self).__init__(rdclass, rdtype)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return "0x%s" % binascii.hexlify(self.address).decode()
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_string()
+ tok.get_eol()
+ if address[0:2] != '0x':
+ raise dns.exception.SyntaxError('string does not start with 0x')
+ address = address[2:].replace('.', '')
+ if len(address) % 2 != 0:
+ raise dns.exception.SyntaxError('hexstring has odd length')
+ address = binascii.unhexlify(address.encode())
+ return cls(rdclass, rdtype, address)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(self.address)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ address = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, address)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP_PTR.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP_PTR.py
new file mode 100644
index 0000000000..a5b66c803f
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/NSAP_PTR.py
@@ -0,0 +1,23 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class NSAP_PTR(dns.rdtypes.nsbase.UncompressedNS):
+
+ """NSAP-PTR record"""
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/PX.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/PX.py
new file mode 100644
index 0000000000..2dbaee6ce8
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/PX.py
@@ -0,0 +1,89 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class PX(dns.rdata.Rdata):
+
+ """PX record.
+
+ @ivar preference: the preference value
+ @type preference: int
+ @ivar map822: the map822 name
+ @type map822: dns.name.Name object
+ @ivar mapx400: the mapx400 name
+ @type mapx400: dns.name.Name object
+ @see: RFC 2163"""
+
+ __slots__ = ['preference', 'map822', 'mapx400']
+
+ def __init__(self, rdclass, rdtype, preference, map822, mapx400):
+ super(PX, self).__init__(rdclass, rdtype)
+ self.preference = preference
+ self.map822 = map822
+ self.mapx400 = mapx400
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ map822 = self.map822.choose_relativity(origin, relativize)
+ mapx400 = self.mapx400.choose_relativity(origin, relativize)
+ return '%d %s %s' % (self.preference, map822, mapx400)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ preference = tok.get_uint16()
+ map822 = tok.get_name()
+ map822 = map822.choose_relativity(origin, relativize)
+ mapx400 = tok.get_name(None)
+ mapx400 = mapx400.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, preference, map822, mapx400)
+
+ def to_wire(self, file, compress=None, origin=None):
+ pref = struct.pack("!H", self.preference)
+ file.write(pref)
+ self.map822.to_wire(file, None, origin)
+ self.mapx400.to_wire(file, None, origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (preference, ) = struct.unpack('!H', wire[current: current + 2])
+ current += 2
+ rdlen -= 2
+ (map822, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused > rdlen:
+ raise dns.exception.FormError
+ current += cused
+ rdlen -= cused
+ if origin is not None:
+ map822 = map822.relativize(origin)
+ (mapx400, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ mapx400 = mapx400.relativize(origin)
+ return cls(rdclass, rdtype, preference, map822, mapx400)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.map822 = self.map822.choose_relativity(origin, relativize)
+ self.mapx400 = self.mapx400.choose_relativity(origin, relativize)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/SRV.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/SRV.py
new file mode 100644
index 0000000000..b2c1bc9f0b
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/SRV.py
@@ -0,0 +1,83 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class SRV(dns.rdata.Rdata):
+
+ """SRV record
+
+ @ivar priority: the priority
+ @type priority: int
+ @ivar weight: the weight
+ @type weight: int
+ @ivar port: the port of the service
+ @type port: int
+ @ivar target: the target host
+ @type target: dns.name.Name object
+ @see: RFC 2782"""
+
+ __slots__ = ['priority', 'weight', 'port', 'target']
+
+ def __init__(self, rdclass, rdtype, priority, weight, port, target):
+ super(SRV, self).__init__(rdclass, rdtype)
+ self.priority = priority
+ self.weight = weight
+ self.port = port
+ self.target = target
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ return '%d %d %d %s' % (self.priority, self.weight, self.port,
+ target)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ priority = tok.get_uint16()
+ weight = tok.get_uint16()
+ port = tok.get_uint16()
+ target = tok.get_name(None)
+ target = target.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, priority, weight, port, target)
+
+ def to_wire(self, file, compress=None, origin=None):
+ three_ints = struct.pack("!HHH", self.priority, self.weight, self.port)
+ file.write(three_ints)
+ self.target.to_wire(file, compress, origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (priority, weight, port) = struct.unpack('!HHH',
+ wire[current: current + 6])
+ current += 6
+ rdlen -= 6
+ (target, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ target = target.relativize(origin)
+ return cls(rdclass, rdtype, priority, weight, port, target)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.target = self.target.choose_relativity(origin, relativize)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/WKS.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/WKS.py
new file mode 100644
index 0000000000..96f98ada70
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/WKS.py
@@ -0,0 +1,107 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+
+import dns.ipv4
+import dns.rdata
+from dns._compat import xrange
+
+_proto_tcp = socket.getprotobyname('tcp')
+_proto_udp = socket.getprotobyname('udp')
+
+
+class WKS(dns.rdata.Rdata):
+
+ """WKS record
+
+ @ivar address: the address
+ @type address: string
+ @ivar protocol: the protocol
+ @type protocol: int
+ @ivar bitmap: the bitmap
+ @type bitmap: string
+ @see: RFC 1035"""
+
+ __slots__ = ['address', 'protocol', 'bitmap']
+
+ def __init__(self, rdclass, rdtype, address, protocol, bitmap):
+ super(WKS, self).__init__(rdclass, rdtype)
+ self.address = address
+ self.protocol = protocol
+ if not isinstance(bitmap, bytearray):
+ self.bitmap = bytearray(bitmap)
+ else:
+ self.bitmap = bitmap
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ bits = []
+ for i in xrange(0, len(self.bitmap)):
+ byte = self.bitmap[i]
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(str(i * 8 + j))
+ text = ' '.join(bits)
+ return '%s %d %s' % (self.address, self.protocol, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_string()
+ protocol = tok.get_string()
+ if protocol.isdigit():
+ protocol = int(protocol)
+ else:
+ protocol = socket.getprotobyname(protocol)
+ bitmap = bytearray()
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ if token.value.isdigit():
+ serv = int(token.value)
+ else:
+ if protocol != _proto_udp and protocol != _proto_tcp:
+ raise NotImplementedError("protocol must be TCP or UDP")
+ if protocol == _proto_udp:
+ protocol_text = "udp"
+ else:
+ protocol_text = "tcp"
+ serv = socket.getservbyname(token.value, protocol_text)
+ i = serv // 8
+ l = len(bitmap)
+ if l < i + 1:
+ for j in xrange(l, i + 1):
+ bitmap.append(0)
+ bitmap[i] = bitmap[i] | (0x80 >> (serv % 8))
+ bitmap = dns.rdata._truncate_bitmap(bitmap)
+ return cls(rdclass, rdtype, address, protocol, bitmap)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(dns.ipv4.inet_aton(self.address))
+ protocol = struct.pack('!B', self.protocol)
+ file.write(protocol)
+ file.write(self.bitmap)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ address = dns.ipv4.inet_ntoa(wire[current: current + 4])
+ protocol, = struct.unpack('!B', wire[current + 4: current + 5])
+ current += 5
+ rdlen -= 5
+ bitmap = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, address, protocol, bitmap)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/IN/__init__.py b/openpype/vendor/python/python_2/dns/rdtypes/IN/__init__.py
new file mode 100644
index 0000000000..d7e69c9f60
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/IN/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class IN rdata type classes."""
+
+__all__ = [
+ 'A',
+ 'AAAA',
+ 'APL',
+ 'DHCID',
+ 'IPSECKEY',
+ 'KX',
+ 'NAPTR',
+ 'NSAP',
+ 'NSAP_PTR',
+ 'PX',
+ 'SRV',
+ 'WKS',
+]
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/__init__.py b/openpype/vendor/python/python_2/dns/rdtypes/__init__.py
new file mode 100644
index 0000000000..1ac137f1fe
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/__init__.py
@@ -0,0 +1,27 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata type classes"""
+
+__all__ = [
+ 'ANY',
+ 'IN',
+ 'CH',
+ 'euibase',
+ 'mxbase',
+ 'nsbase',
+]
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/dnskeybase.py b/openpype/vendor/python/python_2/dns/rdtypes/dnskeybase.py
new file mode 100644
index 0000000000..3e7e87ef15
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/dnskeybase.py
@@ -0,0 +1,138 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import struct
+
+import dns.exception
+import dns.dnssec
+import dns.rdata
+
+# wildcard import
+__all__ = ["SEP", "REVOKE", "ZONE",
+ "flags_to_text_set", "flags_from_text_set"]
+
+# flag constants
+SEP = 0x0001
+REVOKE = 0x0080
+ZONE = 0x0100
+
+_flag_by_text = {
+ 'SEP': SEP,
+ 'REVOKE': REVOKE,
+ 'ZONE': ZONE
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+_flag_by_value = {y: x for x, y in _flag_by_text.items()}
+
+
+def flags_to_text_set(flags):
+ """Convert a DNSKEY flags value to set texts
+ @rtype: set([string])"""
+
+ flags_set = set()
+ mask = 0x1
+ while mask <= 0x8000:
+ if flags & mask:
+ text = _flag_by_value.get(mask)
+ if not text:
+ text = hex(mask)
+ flags_set.add(text)
+ mask <<= 1
+ return flags_set
+
+
+def flags_from_text_set(texts_set):
+ """Convert set of DNSKEY flag mnemonic texts to DNSKEY flag value
+ @rtype: int"""
+
+ flags = 0
+ for text in texts_set:
+ try:
+ flags += _flag_by_text[text]
+ except KeyError:
+ raise NotImplementedError(
+ "DNSKEY flag '%s' is not supported" % text)
+ return flags
+
+
+class DNSKEYBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like a DNSKEY record
+
+ @ivar flags: the key flags
+ @type flags: int
+ @ivar protocol: the protocol for which this key may be used
+ @type protocol: int
+ @ivar algorithm: the algorithm used for the key
+ @type algorithm: int
+ @ivar key: the public key
+ @type key: string"""
+
+ __slots__ = ['flags', 'protocol', 'algorithm', 'key']
+
+ def __init__(self, rdclass, rdtype, flags, protocol, algorithm, key):
+ super(DNSKEYBase, self).__init__(rdclass, rdtype)
+ self.flags = flags
+ self.protocol = protocol
+ self.algorithm = algorithm
+ self.key = key
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %d %s' % (self.flags, self.protocol, self.algorithm,
+ dns.rdata._base64ify(self.key))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ flags = tok.get_uint16()
+ protocol = tok.get_uint8()
+ algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ key = base64.b64decode(b64)
+ return cls(rdclass, rdtype, flags, protocol, algorithm, key)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!HBB", self.flags, self.protocol, self.algorithm)
+ file.write(header)
+ file.write(self.key)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ if rdlen < 4:
+ raise dns.exception.FormError
+ header = struct.unpack('!HBB', wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ key = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], header[2],
+ key)
+
+ def flags_to_text_set(self):
+ """Convert a DNSKEY flags value to set texts
+ @rtype: set([string])"""
+ return flags_to_text_set(self.flags)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/dsbase.py b/openpype/vendor/python/python_2/dns/rdtypes/dsbase.py
new file mode 100644
index 0000000000..26ae9d5c7d
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/dsbase.py
@@ -0,0 +1,85 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2010, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import binascii
+
+import dns.rdata
+import dns.rdatatype
+
+
+class DSBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like a DS record
+
+ @ivar key_tag: the key tag
+ @type key_tag: int
+ @ivar algorithm: the algorithm
+ @type algorithm: int
+ @ivar digest_type: the digest type
+ @type digest_type: int
+ @ivar digest: the digest
+ @type digest: int
+ @see: draft-ietf-dnsext-delegation-signer-14.txt"""
+
+ __slots__ = ['key_tag', 'algorithm', 'digest_type', 'digest']
+
+ def __init__(self, rdclass, rdtype, key_tag, algorithm, digest_type,
+ digest):
+ super(DSBase, self).__init__(rdclass, rdtype)
+ self.key_tag = key_tag
+ self.algorithm = algorithm
+ self.digest_type = digest_type
+ self.digest = digest
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %d %s' % (self.key_tag, self.algorithm,
+ self.digest_type,
+ dns.rdata._hexify(self.digest,
+ chunksize=128))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ key_tag = tok.get_uint16()
+ algorithm = tok.get_uint8()
+ digest_type = tok.get_uint8()
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ digest = b''.join(chunks)
+ digest = binascii.unhexlify(digest)
+ return cls(rdclass, rdtype, key_tag, algorithm, digest_type,
+ digest)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!HBB", self.key_tag, self.algorithm,
+ self.digest_type)
+ file.write(header)
+ file.write(self.digest)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ header = struct.unpack("!HBB", wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ digest = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], header[2], digest)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/euibase.py b/openpype/vendor/python/python_2/dns/rdtypes/euibase.py
new file mode 100644
index 0000000000..cc5fdaa63b
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/euibase.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2015 Red Hat, Inc.
+# Author: Petr Spacek
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+
+import dns.rdata
+from dns._compat import xrange
+
+
+class EUIBase(dns.rdata.Rdata):
+
+ """EUIxx record
+
+ @ivar fingerprint: xx-bit Extended Unique Identifier (EUI-xx)
+ @type fingerprint: string
+ @see: rfc7043.txt"""
+
+ __slots__ = ['eui']
+ # define these in subclasses
+ # byte_len = 6 # 0123456789ab (in hex)
+ # text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab
+
+ def __init__(self, rdclass, rdtype, eui):
+ super(EUIBase, self).__init__(rdclass, rdtype)
+ if len(eui) != self.byte_len:
+ raise dns.exception.FormError('EUI%s rdata has to have %s bytes'
+ % (self.byte_len * 8, self.byte_len))
+ self.eui = eui
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return dns.rdata._hexify(self.eui, chunksize=2).replace(' ', '-')
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ text = tok.get_string()
+ tok.get_eol()
+ if len(text) != cls.text_len:
+ raise dns.exception.SyntaxError(
+ 'Input text must have %s characters' % cls.text_len)
+ expected_dash_idxs = xrange(2, cls.byte_len * 3 - 1, 3)
+ for i in expected_dash_idxs:
+ if text[i] != '-':
+ raise dns.exception.SyntaxError('Dash expected at position %s'
+ % i)
+ text = text.replace('-', '')
+ try:
+ data = binascii.unhexlify(text.encode())
+ except (ValueError, TypeError) as ex:
+ raise dns.exception.SyntaxError('Hex decoding error: %s' % str(ex))
+ return cls(rdclass, rdtype, data)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(self.eui)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ eui = wire[current:current + rdlen].unwrap()
+ return cls(rdclass, rdtype, eui)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/mxbase.py b/openpype/vendor/python/python_2/dns/rdtypes/mxbase.py
new file mode 100644
index 0000000000..9a3fa62360
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/mxbase.py
@@ -0,0 +1,103 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""MX-like base classes."""
+
+from io import BytesIO
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class MXBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like an MX record.
+
+ @ivar preference: the preference value
+ @type preference: int
+ @ivar exchange: the exchange name
+ @type exchange: dns.name.Name object"""
+
+ __slots__ = ['preference', 'exchange']
+
+ def __init__(self, rdclass, rdtype, preference, exchange):
+ super(MXBase, self).__init__(rdclass, rdtype)
+ self.preference = preference
+ self.exchange = exchange
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ exchange = self.exchange.choose_relativity(origin, relativize)
+ return '%d %s' % (self.preference, exchange)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ preference = tok.get_uint16()
+ exchange = tok.get_name()
+ exchange = exchange.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, preference, exchange)
+
+ def to_wire(self, file, compress=None, origin=None):
+ pref = struct.pack("!H", self.preference)
+ file.write(pref)
+ self.exchange.to_wire(file, compress, origin)
+
+ def to_digestable(self, origin=None):
+ return struct.pack("!H", self.preference) + \
+ self.exchange.to_digestable(origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (preference, ) = struct.unpack('!H', wire[current: current + 2])
+ current += 2
+ rdlen -= 2
+ (exchange, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ exchange = exchange.relativize(origin)
+ return cls(rdclass, rdtype, preference, exchange)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.exchange = self.exchange.choose_relativity(origin, relativize)
+
+
+class UncompressedMX(MXBase):
+
+ """Base class for rdata that is like an MX record, but whose name
+ is not compressed when converted to DNS wire format, and whose
+ digestable form is not downcased."""
+
+ def to_wire(self, file, compress=None, origin=None):
+ super(UncompressedMX, self).to_wire(file, None, origin)
+
+ def to_digestable(self, origin=None):
+ f = BytesIO()
+ self.to_wire(f, None, origin)
+ return f.getvalue()
+
+
+class UncompressedDowncasingMX(MXBase):
+
+ """Base class for rdata that is like an MX record, but whose name
+ is not compressed when convert to DNS wire format."""
+
+ def to_wire(self, file, compress=None, origin=None):
+ super(UncompressedDowncasingMX, self).to_wire(file, None, origin)
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/nsbase.py b/openpype/vendor/python/python_2/dns/rdtypes/nsbase.py
new file mode 100644
index 0000000000..97a2232638
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/nsbase.py
@@ -0,0 +1,83 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""NS-like base classes."""
+
+from io import BytesIO
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class NSBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like an NS record.
+
+ @ivar target: the target name of the rdata
+ @type target: dns.name.Name object"""
+
+ __slots__ = ['target']
+
+ def __init__(self, rdclass, rdtype, target):
+ super(NSBase, self).__init__(rdclass, rdtype)
+ self.target = target
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ return str(target)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ target = tok.get_name()
+ target = target.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, target)
+
+ def to_wire(self, file, compress=None, origin=None):
+ self.target.to_wire(file, compress, origin)
+
+ def to_digestable(self, origin=None):
+ return self.target.to_digestable(origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (target, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ target = target.relativize(origin)
+ return cls(rdclass, rdtype, target)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.target = self.target.choose_relativity(origin, relativize)
+
+
+class UncompressedNS(NSBase):
+
+ """Base class for rdata that is like an NS record, but whose name
+ is not compressed when convert to DNS wire format, and whose
+ digestable form is not downcased."""
+
+ def to_wire(self, file, compress=None, origin=None):
+ super(UncompressedNS, self).to_wire(file, None, origin)
+
+ def to_digestable(self, origin=None):
+ f = BytesIO()
+ self.to_wire(f, None, origin)
+ return f.getvalue()
diff --git a/openpype/vendor/python/python_2/dns/rdtypes/txtbase.py b/openpype/vendor/python/python_2/dns/rdtypes/txtbase.py
new file mode 100644
index 0000000000..645a57ecfc
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rdtypes/txtbase.py
@@ -0,0 +1,97 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""TXT-like base class."""
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import binary_type, string_types
+
+
+class TXTBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like a TXT record
+
+ @ivar strings: the strings
+ @type strings: list of binary
+ @see: RFC 1035"""
+
+ __slots__ = ['strings']
+
+ def __init__(self, rdclass, rdtype, strings):
+ super(TXTBase, self).__init__(rdclass, rdtype)
+ if isinstance(strings, binary_type) or \
+ isinstance(strings, string_types):
+ strings = [strings]
+ self.strings = []
+ for string in strings:
+ if isinstance(string, string_types):
+ string = string.encode()
+ self.strings.append(string)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ txt = ''
+ prefix = ''
+ for s in self.strings:
+ txt += '{}"{}"'.format(prefix, dns.rdata._escapify(s))
+ prefix = ' '
+ return txt
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ strings = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ if not (token.is_quoted_string() or token.is_identifier()):
+ raise dns.exception.SyntaxError("expected a string")
+ if len(token.value) > 255:
+ raise dns.exception.SyntaxError("string too long")
+ value = token.value
+ if isinstance(value, binary_type):
+ strings.append(value)
+ else:
+ strings.append(value.encode())
+ if len(strings) == 0:
+ raise dns.exception.UnexpectedEnd
+ return cls(rdclass, rdtype, strings)
+
+ def to_wire(self, file, compress=None, origin=None):
+ for s in self.strings:
+ l = len(s)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(s)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ strings = []
+ while rdlen > 0:
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ s = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ strings.append(s)
+ return cls(rdclass, rdtype, strings)
diff --git a/openpype/vendor/python/python_2/dns/renderer.py b/openpype/vendor/python/python_2/dns/renderer.py
new file mode 100644
index 0000000000..d7ef8c7f09
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/renderer.py
@@ -0,0 +1,291 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Help for building DNS wire format messages"""
+
+from io import BytesIO
+import struct
+import random
+import time
+
+import dns.exception
+import dns.tsig
+from ._compat import long
+
+
+QUESTION = 0
+ANSWER = 1
+AUTHORITY = 2
+ADDITIONAL = 3
+
+
+class Renderer(object):
+ """Helper class for building DNS wire-format messages.
+
+ Most applications can use the higher-level L{dns.message.Message}
+ class and its to_wire() method to generate wire-format messages.
+ This class is for those applications which need finer control
+ over the generation of messages.
+
+ Typical use::
+
+ r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
+ r.add_question(qname, qtype, qclass)
+ r.add_rrset(dns.renderer.ANSWER, rrset_1)
+ r.add_rrset(dns.renderer.ANSWER, rrset_2)
+ r.add_rrset(dns.renderer.AUTHORITY, ns_rrset)
+ r.add_edns(0, 0, 4096)
+ r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1)
+ r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2)
+ r.write_header()
+ r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
+ wire = r.get_wire()
+
+ output, a BytesIO, where rendering is written
+
+ id: the message id
+
+ flags: the message flags
+
+ max_size: the maximum size of the message
+
+ origin: the origin to use when rendering relative names
+
+ compress: the compression table
+
+ section: an int, the section currently being rendered
+
+ counts: list of the number of RRs in each section
+
+ mac: the MAC of the rendered message (if TSIG was used)
+ """
+
+ def __init__(self, id=None, flags=0, max_size=65535, origin=None):
+ """Initialize a new renderer."""
+
+ self.output = BytesIO()
+ if id is None:
+ self.id = random.randint(0, 65535)
+ else:
+ self.id = id
+ self.flags = flags
+ self.max_size = max_size
+ self.origin = origin
+ self.compress = {}
+ self.section = QUESTION
+ self.counts = [0, 0, 0, 0]
+ self.output.write(b'\x00' * 12)
+ self.mac = ''
+
+ def _rollback(self, where):
+ """Truncate the output buffer at offset *where*, and remove any
+ compression table entries that pointed beyond the truncation
+ point.
+ """
+
+ self.output.seek(where)
+ self.output.truncate()
+ keys_to_delete = []
+ for k, v in self.compress.items():
+ if v >= where:
+ keys_to_delete.append(k)
+ for k in keys_to_delete:
+ del self.compress[k]
+
+ def _set_section(self, section):
+ """Set the renderer's current section.
+
+ Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
+ ADDITIONAL. Sections may be empty.
+
+ Raises dns.exception.FormError if an attempt was made to set
+ a section value less than the current section.
+ """
+
+ if self.section != section:
+ if self.section > section:
+ raise dns.exception.FormError
+ self.section = section
+
+ def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN):
+ """Add a question to the message."""
+
+ self._set_section(QUESTION)
+ before = self.output.tell()
+ qname.to_wire(self.output, self.compress, self.origin)
+ self.output.write(struct.pack("!HH", rdtype, rdclass))
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[QUESTION] += 1
+
+ def add_rrset(self, section, rrset, **kw):
+ """Add the rrset to the specified section.
+
+ Any keyword arguments are passed on to the rdataset's to_wire()
+ routine.
+ """
+
+ self._set_section(section)
+ before = self.output.tell()
+ n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[section] += n
+
+ def add_rdataset(self, section, name, rdataset, **kw):
+ """Add the rdataset to the specified section, using the specified
+ name as the owner name.
+
+ Any keyword arguments are passed on to the rdataset's to_wire()
+ routine.
+ """
+
+ self._set_section(section)
+ before = self.output.tell()
+ n = rdataset.to_wire(name, self.output, self.compress, self.origin,
+ **kw)
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[section] += n
+
+ def add_edns(self, edns, ednsflags, payload, options=None):
+ """Add an EDNS OPT record to the message."""
+
+ # make sure the EDNS version in ednsflags agrees with edns
+ ednsflags &= long(0xFF00FFFF)
+ ednsflags |= (edns << 16)
+ self._set_section(ADDITIONAL)
+ before = self.output.tell()
+ self.output.write(struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload,
+ ednsflags, 0))
+ if options is not None:
+ lstart = self.output.tell()
+ for opt in options:
+ stuff = struct.pack("!HH", opt.otype, 0)
+ self.output.write(stuff)
+ start = self.output.tell()
+ opt.to_wire(self.output)
+ end = self.output.tell()
+ assert end - start < 65536
+ self.output.seek(start - 2)
+ stuff = struct.pack("!H", end - start)
+ self.output.write(stuff)
+ self.output.seek(0, 2)
+ lend = self.output.tell()
+ assert lend - lstart < 65536
+ self.output.seek(lstart - 2)
+ stuff = struct.pack("!H", lend - lstart)
+ self.output.write(stuff)
+ self.output.seek(0, 2)
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[ADDITIONAL] += 1
+
+ def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
+ request_mac, algorithm=dns.tsig.default_algorithm):
+ """Add a TSIG signature to the message."""
+
+ s = self.output.getvalue()
+ (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s,
+ keyname,
+ secret,
+ int(time.time()),
+ fudge,
+ id,
+ tsig_error,
+ other_data,
+ request_mac,
+ algorithm=algorithm)
+ self._write_tsig(tsig_rdata, keyname)
+
+ def add_multi_tsig(self, ctx, keyname, secret, fudge, id, tsig_error,
+ other_data, request_mac,
+ algorithm=dns.tsig.default_algorithm):
+ """Add a TSIG signature to the message. Unlike add_tsig(), this can be
+ used for a series of consecutive DNS envelopes, e.g. for a zone
+ transfer over TCP [RFC2845, 4.4].
+
+ For the first message in the sequence, give ctx=None. For each
+ subsequent message, give the ctx that was returned from the
+ add_multi_tsig() call for the previous message."""
+
+ s = self.output.getvalue()
+ (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s,
+ keyname,
+ secret,
+ int(time.time()),
+ fudge,
+ id,
+ tsig_error,
+ other_data,
+ request_mac,
+ ctx=ctx,
+ first=ctx is None,
+ multi=True,
+ algorithm=algorithm)
+ self._write_tsig(tsig_rdata, keyname)
+ return ctx
+
+ def _write_tsig(self, tsig_rdata, keyname):
+ self._set_section(ADDITIONAL)
+ before = self.output.tell()
+
+ keyname.to_wire(self.output, self.compress, self.origin)
+ self.output.write(struct.pack('!HHIH', dns.rdatatype.TSIG,
+ dns.rdataclass.ANY, 0, 0))
+ rdata_start = self.output.tell()
+ self.output.write(tsig_rdata)
+
+ after = self.output.tell()
+ assert after - rdata_start < 65536
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+
+ self.output.seek(rdata_start - 2)
+ self.output.write(struct.pack('!H', after - rdata_start))
+ self.counts[ADDITIONAL] += 1
+ self.output.seek(10)
+ self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
+ self.output.seek(0, 2)
+
+ def write_header(self):
+ """Write the DNS message header.
+
+ Writing the DNS message header is done after all sections
+ have been rendered, but before the optional TSIG signature
+ is added.
+ """
+
+ self.output.seek(0)
+ self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
+ self.counts[0], self.counts[1],
+ self.counts[2], self.counts[3]))
+ self.output.seek(0, 2)
+
+ def get_wire(self):
+ """Return the wire format message."""
+
+ return self.output.getvalue()
diff --git a/openpype/vendor/python/python_2/dns/resolver.py b/openpype/vendor/python/python_2/dns/resolver.py
new file mode 100644
index 0000000000..806e5b2b45
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/resolver.py
@@ -0,0 +1,1383 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS stub resolver."""
+
+import socket
+import sys
+import time
+import random
+
+try:
+ import threading as _threading
+except ImportError:
+ import dummy_threading as _threading
+
+import dns.exception
+import dns.flags
+import dns.ipv4
+import dns.ipv6
+import dns.message
+import dns.name
+import dns.query
+import dns.rcode
+import dns.rdataclass
+import dns.rdatatype
+import dns.reversename
+import dns.tsig
+from ._compat import xrange, string_types
+
+if sys.platform == 'win32':
+ try:
+ import winreg as _winreg
+ except ImportError:
+ import _winreg # pylint: disable=import-error
+
+class NXDOMAIN(dns.exception.DNSException):
+ """The DNS query name does not exist."""
+ supp_kwargs = {'qnames', 'responses'}
+ fmt = None # we have our own __str__ implementation
+
+ def _check_kwargs(self, qnames, responses=None):
+ if not isinstance(qnames, (list, tuple, set)):
+ raise AttributeError("qnames must be a list, tuple or set")
+ if len(qnames) == 0:
+ raise AttributeError("qnames must contain at least one element")
+ if responses is None:
+ responses = {}
+ elif not isinstance(responses, dict):
+ raise AttributeError("responses must be a dict(qname=response)")
+ kwargs = dict(qnames=qnames, responses=responses)
+ return kwargs
+
+ def __str__(self):
+ if 'qnames' not in self.kwargs:
+ return super(NXDOMAIN, self).__str__()
+ qnames = self.kwargs['qnames']
+ if len(qnames) > 1:
+ msg = 'None of DNS query names exist'
+ else:
+ msg = 'The DNS query name does not exist'
+ qnames = ', '.join(map(str, qnames))
+ return "{}: {}".format(msg, qnames)
+
+ def canonical_name(self):
+ if not 'qnames' in self.kwargs:
+ raise TypeError("parametrized exception required")
+ IN = dns.rdataclass.IN
+ CNAME = dns.rdatatype.CNAME
+ cname = None
+ for qname in self.kwargs['qnames']:
+ response = self.kwargs['responses'][qname]
+ for answer in response.answer:
+ if answer.rdtype != CNAME or answer.rdclass != IN:
+ continue
+ cname = answer.items[0].target.to_text()
+ if cname is not None:
+ return dns.name.from_text(cname)
+ return self.kwargs['qnames'][0]
+ canonical_name = property(canonical_name, doc=(
+ "Return the unresolved canonical name."))
+
+ def __add__(self, e_nx):
+ """Augment by results from another NXDOMAIN exception."""
+ qnames0 = list(self.kwargs.get('qnames', []))
+ responses0 = dict(self.kwargs.get('responses', {}))
+ responses1 = e_nx.kwargs.get('responses', {})
+ for qname1 in e_nx.kwargs.get('qnames', []):
+ if qname1 not in qnames0:
+ qnames0.append(qname1)
+ if qname1 in responses1:
+ responses0[qname1] = responses1[qname1]
+ return NXDOMAIN(qnames=qnames0, responses=responses0)
+
+ def qnames(self):
+ """All of the names that were tried.
+
+ Returns a list of ``dns.name.Name``.
+ """
+ return self.kwargs['qnames']
+
+ def responses(self):
+ """A map from queried names to their NXDOMAIN responses.
+
+ Returns a dict mapping a ``dns.name.Name`` to a
+ ``dns.message.Message``.
+ """
+ return self.kwargs['responses']
+
+ def response(self, qname):
+ """The response for query *qname*.
+
+ Returns a ``dns.message.Message``.
+ """
+ return self.kwargs['responses'][qname]
+
+
+class YXDOMAIN(dns.exception.DNSException):
+ """The DNS query name is too long after DNAME substitution."""
+
+# The definition of the Timeout exception has moved from here to the
+# dns.exception module. We keep dns.resolver.Timeout defined for
+# backwards compatibility.
+
+Timeout = dns.exception.Timeout
+
+
+class NoAnswer(dns.exception.DNSException):
+ """The DNS response does not contain an answer to the question."""
+ fmt = 'The DNS response does not contain an answer ' + \
+ 'to the question: {query}'
+ supp_kwargs = {'response'}
+
+ def _fmt_kwargs(self, **kwargs):
+ return super(NoAnswer, self)._fmt_kwargs(
+ query=kwargs['response'].question)
+
+
+class NoNameservers(dns.exception.DNSException):
+ """All nameservers failed to answer the query.
+
+ errors: list of servers and respective errors
+ The type of errors is
+ [(server IP address, any object convertible to string)].
+ Non-empty errors list will add explanatory message ()
+ """
+
+ msg = "All nameservers failed to answer the query."
+ fmt = "%s {query}: {errors}" % msg[:-1]
+ supp_kwargs = {'request', 'errors'}
+
+ def _fmt_kwargs(self, **kwargs):
+ srv_msgs = []
+ for err in kwargs['errors']:
+ srv_msgs.append('Server {} {} port {} answered {}'.format(err[0],
+ 'TCP' if err[1] else 'UDP', err[2], err[3]))
+ return super(NoNameservers, self)._fmt_kwargs(
+ query=kwargs['request'].question, errors='; '.join(srv_msgs))
+
+
+class NotAbsolute(dns.exception.DNSException):
+ """An absolute domain name is required but a relative name was provided."""
+
+
+class NoRootSOA(dns.exception.DNSException):
+ """There is no SOA RR at the DNS root name. This should never happen!"""
+
+
+class NoMetaqueries(dns.exception.DNSException):
+ """DNS metaqueries are not allowed."""
+
+
+class Answer(object):
+ """DNS stub resolver answer.
+
+ Instances of this class bundle up the result of a successful DNS
+ resolution.
+
+ For convenience, the answer object implements much of the sequence
+ protocol, forwarding to its ``rrset`` attribute. E.g.
+ ``for a in answer`` is equivalent to ``for a in answer.rrset``.
+ ``answer[i]`` is equivalent to ``answer.rrset[i]``, and
+ ``answer[i:j]`` is equivalent to ``answer.rrset[i:j]``.
+
+ Note that CNAMEs or DNAMEs in the response may mean that answer
+ RRset's name might not be the query name.
+ """
+
+ def __init__(self, qname, rdtype, rdclass, response,
+ raise_on_no_answer=True):
+ self.qname = qname
+ self.rdtype = rdtype
+ self.rdclass = rdclass
+ self.response = response
+ min_ttl = -1
+ rrset = None
+ for count in xrange(0, 15):
+ try:
+ rrset = response.find_rrset(response.answer, qname,
+ rdclass, rdtype)
+ if min_ttl == -1 or rrset.ttl < min_ttl:
+ min_ttl = rrset.ttl
+ break
+ except KeyError:
+ if rdtype != dns.rdatatype.CNAME:
+ try:
+ crrset = response.find_rrset(response.answer,
+ qname,
+ rdclass,
+ dns.rdatatype.CNAME)
+ if min_ttl == -1 or crrset.ttl < min_ttl:
+ min_ttl = crrset.ttl
+ for rd in crrset:
+ qname = rd.target
+ break
+ continue
+ except KeyError:
+ if raise_on_no_answer:
+ raise NoAnswer(response=response)
+ if raise_on_no_answer:
+ raise NoAnswer(response=response)
+ if rrset is None and raise_on_no_answer:
+ raise NoAnswer(response=response)
+ self.canonical_name = qname
+ self.rrset = rrset
+ if rrset is None:
+ while 1:
+ # Look for a SOA RR whose owner name is a superdomain
+ # of qname.
+ try:
+ srrset = response.find_rrset(response.authority, qname,
+ rdclass, dns.rdatatype.SOA)
+ if min_ttl == -1 or srrset.ttl < min_ttl:
+ min_ttl = srrset.ttl
+ if srrset[0].minimum < min_ttl:
+ min_ttl = srrset[0].minimum
+ break
+ except KeyError:
+ try:
+ qname = qname.parent()
+ except dns.name.NoParent:
+ break
+ self.expiration = time.time() + min_ttl
+
+ def __getattr__(self, attr):
+ if attr == 'name':
+ return self.rrset.name
+ elif attr == 'ttl':
+ return self.rrset.ttl
+ elif attr == 'covers':
+ return self.rrset.covers
+ elif attr == 'rdclass':
+ return self.rrset.rdclass
+ elif attr == 'rdtype':
+ return self.rrset.rdtype
+ else:
+ raise AttributeError(attr)
+
+ def __len__(self):
+ return self.rrset and len(self.rrset) or 0
+
+ def __iter__(self):
+ return self.rrset and iter(self.rrset) or iter(tuple())
+
+ def __getitem__(self, i):
+ if self.rrset is None:
+ raise IndexError
+ return self.rrset[i]
+
+ def __delitem__(self, i):
+ if self.rrset is None:
+ raise IndexError
+ del self.rrset[i]
+
+
+class Cache(object):
+ """Simple thread-safe DNS answer cache."""
+
+ def __init__(self, cleaning_interval=300.0):
+ """*cleaning_interval*, a ``float`` is the number of seconds between
+ periodic cleanings.
+ """
+
+ self.data = {}
+ self.cleaning_interval = cleaning_interval
+ self.next_cleaning = time.time() + self.cleaning_interval
+ self.lock = _threading.Lock()
+
+ def _maybe_clean(self):
+ """Clean the cache if it's time to do so."""
+
+ now = time.time()
+ if self.next_cleaning <= now:
+ keys_to_delete = []
+ for (k, v) in self.data.items():
+ if v.expiration <= now:
+ keys_to_delete.append(k)
+ for k in keys_to_delete:
+ del self.data[k]
+ now = time.time()
+ self.next_cleaning = now + self.cleaning_interval
+
+ def get(self, key):
+ """Get the answer associated with *key*.
+
+ Returns None if no answer is cached for the key.
+
+ *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the
+ query name, rdtype, and rdclass respectively.
+
+ Returns a ``dns.resolver.Answer`` or ``None``.
+ """
+
+ try:
+ self.lock.acquire()
+ self._maybe_clean()
+ v = self.data.get(key)
+ if v is None or v.expiration <= time.time():
+ return None
+ return v
+ finally:
+ self.lock.release()
+
+ def put(self, key, value):
+ """Associate key and value in the cache.
+
+ *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the
+ query name, rdtype, and rdclass respectively.
+
+ *value*, a ``dns.resolver.Answer``, the answer.
+ """
+
+ try:
+ self.lock.acquire()
+ self._maybe_clean()
+ self.data[key] = value
+ finally:
+ self.lock.release()
+
+ def flush(self, key=None):
+ """Flush the cache.
+
+ If *key* is not ``None``, only that item is flushed. Otherwise
+ the entire cache is flushed.
+
+ *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the
+ query name, rdtype, and rdclass respectively.
+ """
+
+ try:
+ self.lock.acquire()
+ if key is not None:
+ if key in self.data:
+ del self.data[key]
+ else:
+ self.data = {}
+ self.next_cleaning = time.time() + self.cleaning_interval
+ finally:
+ self.lock.release()
+
+
+class LRUCacheNode(object):
+ """LRUCache node."""
+
+ def __init__(self, key, value):
+ self.key = key
+ self.value = value
+ self.prev = self
+ self.next = self
+
+ def link_before(self, node):
+ self.prev = node.prev
+ self.next = node
+ node.prev.next = self
+ node.prev = self
+
+ def link_after(self, node):
+ self.prev = node
+ self.next = node.next
+ node.next.prev = self
+ node.next = self
+
+ def unlink(self):
+ self.next.prev = self.prev
+ self.prev.next = self.next
+
+
+class LRUCache(object):
+ """Thread-safe, bounded, least-recently-used DNS answer cache.
+
+ This cache is better than the simple cache (above) if you're
+ running a web crawler or other process that does a lot of
+ resolutions. The LRUCache has a maximum number of nodes, and when
+ it is full, the least-recently used node is removed to make space
+ for a new one.
+ """
+
+ def __init__(self, max_size=100000):
+ """*max_size*, an ``int``, is the maximum number of nodes to cache;
+ it must be greater than 0.
+ """
+
+ self.data = {}
+ self.set_max_size(max_size)
+ self.sentinel = LRUCacheNode(None, None)
+ self.lock = _threading.Lock()
+
+ def set_max_size(self, max_size):
+ if max_size < 1:
+ max_size = 1
+ self.max_size = max_size
+
+ def get(self, key):
+ """Get the answer associated with *key*.
+
+ Returns None if no answer is cached for the key.
+
+ *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the
+ query name, rdtype, and rdclass respectively.
+
+ Returns a ``dns.resolver.Answer`` or ``None``.
+ """
+
+ try:
+ self.lock.acquire()
+ node = self.data.get(key)
+ if node is None:
+ return None
+ # Unlink because we're either going to move the node to the front
+ # of the LRU list or we're going to free it.
+ node.unlink()
+ if node.value.expiration <= time.time():
+ del self.data[node.key]
+ return None
+ node.link_after(self.sentinel)
+ return node.value
+ finally:
+ self.lock.release()
+
+ def put(self, key, value):
+ """Associate key and value in the cache.
+
+ *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the
+ query name, rdtype, and rdclass respectively.
+
+ *value*, a ``dns.resolver.Answer``, the answer.
+ """
+
+ try:
+ self.lock.acquire()
+ node = self.data.get(key)
+ if node is not None:
+ node.unlink()
+ del self.data[node.key]
+ while len(self.data) >= self.max_size:
+ node = self.sentinel.prev
+ node.unlink()
+ del self.data[node.key]
+ node = LRUCacheNode(key, value)
+ node.link_after(self.sentinel)
+ self.data[key] = node
+ finally:
+ self.lock.release()
+
+ def flush(self, key=None):
+ """Flush the cache.
+
+ If *key* is not ``None``, only that item is flushed. Otherwise
+ the entire cache is flushed.
+
+ *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the
+ query name, rdtype, and rdclass respectively.
+ """
+
+ try:
+ self.lock.acquire()
+ if key is not None:
+ node = self.data.get(key)
+ if node is not None:
+ node.unlink()
+ del self.data[node.key]
+ else:
+ node = self.sentinel.next
+ while node != self.sentinel:
+ next = node.next
+ node.prev = None
+ node.next = None
+ node = next
+ self.data = {}
+ finally:
+ self.lock.release()
+
+
+class Resolver(object):
+ """DNS stub resolver."""
+
+ def __init__(self, filename='/etc/resolv.conf', configure=True):
+ """*filename*, a ``text`` or file object, specifying a file
+ in standard /etc/resolv.conf format. This parameter is meaningful
+ only when *configure* is true and the platform is POSIX.
+
+ *configure*, a ``bool``. If True (the default), the resolver
+ instance is configured in the normal fashion for the operating
+ system the resolver is running on. (I.e. by reading a
+ /etc/resolv.conf file on POSIX systems and from the registry
+ on Windows systems.)
+ """
+
+ self.domain = None
+ self.nameservers = None
+ self.nameserver_ports = None
+ self.port = None
+ self.search = None
+ self.timeout = None
+ self.lifetime = None
+ self.keyring = None
+ self.keyname = None
+ self.keyalgorithm = None
+ self.edns = None
+ self.ednsflags = None
+ self.payload = None
+ self.cache = None
+ self.flags = None
+ self.retry_servfail = False
+ self.rotate = False
+
+ self.reset()
+ if configure:
+ if sys.platform == 'win32':
+ self.read_registry()
+ elif filename:
+ self.read_resolv_conf(filename)
+
+ def reset(self):
+ """Reset all resolver configuration to the defaults."""
+
+ self.domain = \
+ dns.name.Name(dns.name.from_text(socket.gethostname())[1:])
+ if len(self.domain) == 0:
+ self.domain = dns.name.root
+ self.nameservers = []
+ self.nameserver_ports = {}
+ self.port = 53
+ self.search = []
+ self.timeout = 2.0
+ self.lifetime = 30.0
+ self.keyring = None
+ self.keyname = None
+ self.keyalgorithm = dns.tsig.default_algorithm
+ self.edns = -1
+ self.ednsflags = 0
+ self.payload = 0
+ self.cache = None
+ self.flags = None
+ self.retry_servfail = False
+ self.rotate = False
+
+ def read_resolv_conf(self, f):
+ """Process *f* as a file in the /etc/resolv.conf format. If f is
+ a ``text``, it is used as the name of the file to open; otherwise it
+ is treated as the file itself."""
+
+ if isinstance(f, string_types):
+ try:
+ f = open(f, 'r')
+ except IOError:
+ # /etc/resolv.conf doesn't exist, can't be read, etc.
+ # We'll just use the default resolver configuration.
+ self.nameservers = ['127.0.0.1']
+ return
+ want_close = True
+ else:
+ want_close = False
+ try:
+ for l in f:
+ if len(l) == 0 or l[0] == '#' or l[0] == ';':
+ continue
+ tokens = l.split()
+
+ # Any line containing less than 2 tokens is malformed
+ if len(tokens) < 2:
+ continue
+
+ if tokens[0] == 'nameserver':
+ self.nameservers.append(tokens[1])
+ elif tokens[0] == 'domain':
+ self.domain = dns.name.from_text(tokens[1])
+ elif tokens[0] == 'search':
+ for suffix in tokens[1:]:
+ self.search.append(dns.name.from_text(suffix))
+ elif tokens[0] == 'options':
+ if 'rotate' in tokens[1:]:
+ self.rotate = True
+ finally:
+ if want_close:
+ f.close()
+ if len(self.nameservers) == 0:
+ self.nameservers.append('127.0.0.1')
+
+ def _determine_split_char(self, entry):
+ #
+ # The windows registry irritatingly changes the list element
+ # delimiter in between ' ' and ',' (and vice-versa) in various
+ # versions of windows.
+ #
+ if entry.find(' ') >= 0:
+ split_char = ' '
+ elif entry.find(',') >= 0:
+ split_char = ','
+ else:
+ # probably a singleton; treat as a space-separated list.
+ split_char = ' '
+ return split_char
+
+ def _config_win32_nameservers(self, nameservers):
+ # we call str() on nameservers to convert it from unicode to ascii
+ nameservers = str(nameservers)
+ split_char = self._determine_split_char(nameservers)
+ ns_list = nameservers.split(split_char)
+ for ns in ns_list:
+ if ns not in self.nameservers:
+ self.nameservers.append(ns)
+
+ def _config_win32_domain(self, domain):
+ # we call str() on domain to convert it from unicode to ascii
+ self.domain = dns.name.from_text(str(domain))
+
+ def _config_win32_search(self, search):
+ # we call str() on search to convert it from unicode to ascii
+ search = str(search)
+ split_char = self._determine_split_char(search)
+ search_list = search.split(split_char)
+ for s in search_list:
+ if s not in self.search:
+ self.search.append(dns.name.from_text(s))
+
+ def _config_win32_fromkey(self, key, always_try_domain):
+ try:
+ servers, rtype = _winreg.QueryValueEx(key, 'NameServer')
+ except WindowsError: # pylint: disable=undefined-variable
+ servers = None
+ if servers:
+ self._config_win32_nameservers(servers)
+ if servers or always_try_domain:
+ try:
+ dom, rtype = _winreg.QueryValueEx(key, 'Domain')
+ if dom:
+ self._config_win32_domain(dom)
+ except WindowsError: # pylint: disable=undefined-variable
+ pass
+ else:
+ try:
+ servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer')
+ except WindowsError: # pylint: disable=undefined-variable
+ servers = None
+ if servers:
+ self._config_win32_nameservers(servers)
+ try:
+ dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain')
+ if dom:
+ self._config_win32_domain(dom)
+ except WindowsError: # pylint: disable=undefined-variable
+ pass
+ try:
+ search, rtype = _winreg.QueryValueEx(key, 'SearchList')
+ except WindowsError: # pylint: disable=undefined-variable
+ search = None
+ if search:
+ self._config_win32_search(search)
+
+ def read_registry(self):
+ """Extract resolver configuration from the Windows registry."""
+
+ lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
+ want_scan = False
+ try:
+ try:
+ # XP, 2000
+ tcp_params = _winreg.OpenKey(lm,
+ r'SYSTEM\CurrentControlSet'
+ r'\Services\Tcpip\Parameters')
+ want_scan = True
+ except EnvironmentError:
+ # ME
+ tcp_params = _winreg.OpenKey(lm,
+ r'SYSTEM\CurrentControlSet'
+ r'\Services\VxD\MSTCP')
+ try:
+ self._config_win32_fromkey(tcp_params, True)
+ finally:
+ tcp_params.Close()
+ if want_scan:
+ interfaces = _winreg.OpenKey(lm,
+ r'SYSTEM\CurrentControlSet'
+ r'\Services\Tcpip\Parameters'
+ r'\Interfaces')
+ try:
+ i = 0
+ while True:
+ try:
+ guid = _winreg.EnumKey(interfaces, i)
+ i += 1
+ key = _winreg.OpenKey(interfaces, guid)
+ if not self._win32_is_nic_enabled(lm, guid, key):
+ continue
+ try:
+ self._config_win32_fromkey(key, False)
+ finally:
+ key.Close()
+ except EnvironmentError:
+ break
+ finally:
+ interfaces.Close()
+ finally:
+ lm.Close()
+
+ def _win32_is_nic_enabled(self, lm, guid, interface_key):
+ # Look in the Windows Registry to determine whether the network
+ # interface corresponding to the given guid is enabled.
+ #
+ # (Code contributed by Paul Marks, thanks!)
+ #
+ try:
+ # This hard-coded location seems to be consistent, at least
+ # from Windows 2000 through Vista.
+ connection_key = _winreg.OpenKey(
+ lm,
+ r'SYSTEM\CurrentControlSet\Control\Network'
+ r'\{4D36E972-E325-11CE-BFC1-08002BE10318}'
+ r'\%s\Connection' % guid)
+
+ try:
+ # The PnpInstanceID points to a key inside Enum
+ (pnp_id, ttype) = _winreg.QueryValueEx(
+ connection_key, 'PnpInstanceID')
+
+ if ttype != _winreg.REG_SZ:
+ raise ValueError
+
+ device_key = _winreg.OpenKey(
+ lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id)
+
+ try:
+ # Get ConfigFlags for this device
+ (flags, ttype) = _winreg.QueryValueEx(
+ device_key, 'ConfigFlags')
+
+ if ttype != _winreg.REG_DWORD:
+ raise ValueError
+
+ # Based on experimentation, bit 0x1 indicates that the
+ # device is disabled.
+ return not flags & 0x1
+
+ finally:
+ device_key.Close()
+ finally:
+ connection_key.Close()
+ except (EnvironmentError, ValueError):
+ # Pre-vista, enabled interfaces seem to have a non-empty
+ # NTEContextList; this was how dnspython detected enabled
+ # nics before the code above was contributed. We've retained
+ # the old method since we don't know if the code above works
+ # on Windows 95/98/ME.
+ try:
+ (nte, ttype) = _winreg.QueryValueEx(interface_key,
+ 'NTEContextList')
+ return nte is not None
+ except WindowsError: # pylint: disable=undefined-variable
+ return False
+
+ def _compute_timeout(self, start, lifetime=None):
+ lifetime = self.lifetime if lifetime is None else lifetime
+ now = time.time()
+ duration = now - start
+ if duration < 0:
+ if duration < -1:
+ # Time going backwards is bad. Just give up.
+ raise Timeout(timeout=duration)
+ else:
+ # Time went backwards, but only a little. This can
+ # happen, e.g. under vmware with older linux kernels.
+ # Pretend it didn't happen.
+ now = start
+ if duration >= lifetime:
+ raise Timeout(timeout=duration)
+ return min(lifetime - duration, self.timeout)
+
+ def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None, raise_on_no_answer=True, source_port=0,
+ lifetime=None):
+ """Query nameservers to find the answer to the question.
+
+ The *qname*, *rdtype*, and *rdclass* parameters may be objects
+ of the appropriate type, or strings that can be converted into objects
+ of the appropriate type.
+
+ *qname*, a ``dns.name.Name`` or ``text``, the query name.
+
+ *rdtype*, an ``int`` or ``text``, the query type.
+
+ *rdclass*, an ``int`` or ``text``, the query class.
+
+ *tcp*, a ``bool``. If ``True``, use TCP to make the query.
+
+ *source*, a ``text`` or ``None``. If not ``None``, bind to this IP
+ address when making queries.
+
+ *raise_on_no_answer*, a ``bool``. If ``True``, raise
+ ``dns.resolver.NoAnswer`` if there's no answer to the question.
+
+ *source_port*, an ``int``, the port from which to send the message.
+
+ *lifetime*, a ``float``, how long query should run before timing out.
+
+ Raises ``dns.exception.Timeout`` if no answers could be found
+ in the specified lifetime.
+
+ Raises ``dns.resolver.NXDOMAIN`` if the query name does not exist.
+
+ Raises ``dns.resolver.YXDOMAIN`` if the query name is too long after
+ DNAME substitution.
+
+ Raises ``dns.resolver.NoAnswer`` if *raise_on_no_answer* is
+ ``True`` and the query name exists but has no RRset of the
+ desired type and class.
+
+ Raises ``dns.resolver.NoNameservers`` if no non-broken
+ nameservers are available to answer the question.
+
+ Returns a ``dns.resolver.Answer`` instance.
+ """
+
+ if isinstance(qname, string_types):
+ qname = dns.name.from_text(qname, None)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if dns.rdatatype.is_metatype(rdtype):
+ raise NoMetaqueries
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ if dns.rdataclass.is_metaclass(rdclass):
+ raise NoMetaqueries
+ qnames_to_try = []
+ if qname.is_absolute():
+ qnames_to_try.append(qname)
+ else:
+ if len(qname) > 1:
+ qnames_to_try.append(qname.concatenate(dns.name.root))
+ if self.search:
+ for suffix in self.search:
+ qnames_to_try.append(qname.concatenate(suffix))
+ else:
+ qnames_to_try.append(qname.concatenate(self.domain))
+ all_nxdomain = True
+ nxdomain_responses = {}
+ start = time.time()
+ _qname = None # make pylint happy
+ for _qname in qnames_to_try:
+ if self.cache:
+ answer = self.cache.get((_qname, rdtype, rdclass))
+ if answer is not None:
+ if answer.rrset is None and raise_on_no_answer:
+ raise NoAnswer(response=answer.response)
+ else:
+ return answer
+ request = dns.message.make_query(_qname, rdtype, rdclass)
+ if self.keyname is not None:
+ request.use_tsig(self.keyring, self.keyname,
+ algorithm=self.keyalgorithm)
+ request.use_edns(self.edns, self.ednsflags, self.payload)
+ if self.flags is not None:
+ request.flags = self.flags
+ response = None
+ #
+ # make a copy of the servers list so we can alter it later.
+ #
+ nameservers = self.nameservers[:]
+ errors = []
+ if self.rotate:
+ random.shuffle(nameservers)
+ backoff = 0.10
+ while response is None:
+ if len(nameservers) == 0:
+ raise NoNameservers(request=request, errors=errors)
+ for nameserver in nameservers[:]:
+ timeout = self._compute_timeout(start, lifetime)
+ port = self.nameserver_ports.get(nameserver, self.port)
+ try:
+ tcp_attempt = tcp
+ if tcp:
+ response = dns.query.tcp(request, nameserver,
+ timeout, port,
+ source=source,
+ source_port=source_port)
+ else:
+ response = dns.query.udp(request, nameserver,
+ timeout, port,
+ source=source,
+ source_port=source_port)
+ if response.flags & dns.flags.TC:
+ # Response truncated; retry with TCP.
+ tcp_attempt = True
+ timeout = self._compute_timeout(start, lifetime)
+ response = \
+ dns.query.tcp(request, nameserver,
+ timeout, port,
+ source=source,
+ source_port=source_port)
+ except (socket.error, dns.exception.Timeout) as ex:
+ #
+ # Communication failure or timeout. Go to the
+ # next server
+ #
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ response = None
+ continue
+ except dns.query.UnexpectedSource as ex:
+ #
+ # Who knows? Keep going.
+ #
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ response = None
+ continue
+ except dns.exception.FormError as ex:
+ #
+ # We don't understand what this server is
+ # saying. Take it out of the mix and
+ # continue.
+ #
+ nameservers.remove(nameserver)
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ response = None
+ continue
+ except EOFError as ex:
+ #
+ # We're using TCP and they hung up on us.
+ # Probably they don't support TCP (though
+ # they're supposed to!). Take it out of the
+ # mix and continue.
+ #
+ nameservers.remove(nameserver)
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ response = None
+ continue
+ rcode = response.rcode()
+ if rcode == dns.rcode.YXDOMAIN:
+ ex = YXDOMAIN()
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ raise ex
+ if rcode == dns.rcode.NOERROR or \
+ rcode == dns.rcode.NXDOMAIN:
+ break
+ #
+ # We got a response, but we're not happy with the
+ # rcode in it. Remove the server from the mix if
+ # the rcode isn't SERVFAIL.
+ #
+ if rcode != dns.rcode.SERVFAIL or not self.retry_servfail:
+ nameservers.remove(nameserver)
+ errors.append((nameserver, tcp_attempt, port,
+ dns.rcode.to_text(rcode), response))
+ response = None
+ if response is not None:
+ break
+ #
+ # All nameservers failed!
+ #
+ if len(nameservers) > 0:
+ #
+ # But we still have servers to try. Sleep a bit
+ # so we don't pound them!
+ #
+ timeout = self._compute_timeout(start, lifetime)
+ sleep_time = min(timeout, backoff)
+ backoff *= 2
+ time.sleep(sleep_time)
+ if response.rcode() == dns.rcode.NXDOMAIN:
+ nxdomain_responses[_qname] = response
+ continue
+ all_nxdomain = False
+ break
+ if all_nxdomain:
+ raise NXDOMAIN(qnames=qnames_to_try, responses=nxdomain_responses)
+ answer = Answer(_qname, rdtype, rdclass, response,
+ raise_on_no_answer)
+ if self.cache:
+ self.cache.put((_qname, rdtype, rdclass), answer)
+ return answer
+
+ def use_tsig(self, keyring, keyname=None,
+ algorithm=dns.tsig.default_algorithm):
+ """Add a TSIG signature to the query.
+
+ See the documentation of the Message class for a complete
+ description of the keyring dictionary.
+
+ *keyring*, a ``dict``, the TSIG keyring to use. If a
+ *keyring* is specified but a *keyname* is not, then the key
+ used will be the first key in the *keyring*. Note that the
+ order of keys in a dictionary is not defined, so applications
+ should supply a keyname when a keyring is used, unless they
+ know the keyring contains only one key.
+
+ *keyname*, a ``dns.name.Name`` or ``None``, the name of the TSIG key
+ to use; defaults to ``None``. The key must be defined in the keyring.
+
+ *algorithm*, a ``dns.name.Name``, the TSIG algorithm to use.
+ """
+
+ self.keyring = keyring
+ if keyname is None:
+ self.keyname = list(self.keyring.keys())[0]
+ else:
+ self.keyname = keyname
+ self.keyalgorithm = algorithm
+
+ def use_edns(self, edns, ednsflags, payload):
+ """Configure EDNS behavior.
+
+ *edns*, an ``int``, is the EDNS level to use. Specifying
+ ``None``, ``False``, or ``-1`` means "do not use EDNS", and in this case
+ the other parameters are ignored. Specifying ``True`` is
+ equivalent to specifying 0, i.e. "use EDNS0".
+
+ *ednsflags*, an ``int``, the EDNS flag values.
+
+ *payload*, an ``int``, is the EDNS sender's payload field, which is the
+ maximum size of UDP datagram the sender can handle. I.e. how big
+ a response to this message can be.
+ """
+
+ if edns is None:
+ edns = -1
+ self.edns = edns
+ self.ednsflags = ednsflags
+ self.payload = payload
+
+ def set_flags(self, flags):
+ """Overrides the default flags with your own.
+
+ *flags*, an ``int``, the message flags to use.
+ """
+
+ self.flags = flags
+
+
+#: The default resolver.
+default_resolver = None
+
+
+def get_default_resolver():
+ """Get the default resolver, initializing it if necessary."""
+ if default_resolver is None:
+ reset_default_resolver()
+ return default_resolver
+
+
+def reset_default_resolver():
+ """Re-initialize default resolver.
+
+ Note that the resolver configuration (i.e. /etc/resolv.conf on UNIX
+ systems) will be re-read immediately.
+ """
+
+ global default_resolver
+ default_resolver = Resolver()
+
+
+def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None, raise_on_no_answer=True,
+ source_port=0, lifetime=None):
+ """Query nameservers to find the answer to the question.
+
+ This is a convenience function that uses the default resolver
+ object to make the query.
+
+ See ``dns.resolver.Resolver.query`` for more information on the
+ parameters.
+ """
+
+ return get_default_resolver().query(qname, rdtype, rdclass, tcp, source,
+ raise_on_no_answer, source_port,
+ lifetime)
+
+
+def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):
+ """Find the name of the zone which contains the specified name.
+
+ *name*, an absolute ``dns.name.Name`` or ``text``, the query name.
+
+ *rdclass*, an ``int``, the query class.
+
+ *tcp*, a ``bool``. If ``True``, use TCP to make the query.
+
+ *resolver*, a ``dns.resolver.Resolver`` or ``None``, the resolver to use.
+ If ``None``, the default resolver is used.
+
+ Raises ``dns.resolver.NoRootSOA`` if there is no SOA RR at the DNS
+ root. (This is only likely to happen if you're using non-default
+ root servers in your network and they are misconfigured.)
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, dns.name.root)
+ if resolver is None:
+ resolver = get_default_resolver()
+ if not name.is_absolute():
+ raise NotAbsolute(name)
+ while 1:
+ try:
+ answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
+ if answer.rrset.name == name:
+ return name
+ # otherwise we were CNAMEd or DNAMEd and need to look higher
+ except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+ pass
+ try:
+ name = name.parent()
+ except dns.name.NoParent:
+ raise NoRootSOA
+
+#
+# Support for overriding the system resolver for all python code in the
+# running process.
+#
+
+_protocols_for_socktype = {
+ socket.SOCK_DGRAM: [socket.SOL_UDP],
+ socket.SOCK_STREAM: [socket.SOL_TCP],
+}
+
+_resolver = None
+_original_getaddrinfo = socket.getaddrinfo
+_original_getnameinfo = socket.getnameinfo
+_original_getfqdn = socket.getfqdn
+_original_gethostbyname = socket.gethostbyname
+_original_gethostbyname_ex = socket.gethostbyname_ex
+_original_gethostbyaddr = socket.gethostbyaddr
+
+
+def _getaddrinfo(host=None, service=None, family=socket.AF_UNSPEC, socktype=0,
+ proto=0, flags=0):
+ if flags & (socket.AI_ADDRCONFIG | socket.AI_V4MAPPED) != 0:
+ raise NotImplementedError
+ if host is None and service is None:
+ raise socket.gaierror(socket.EAI_NONAME)
+ v6addrs = []
+ v4addrs = []
+ canonical_name = None
+ try:
+ # Is host None or a V6 address literal?
+ if host is None:
+ canonical_name = 'localhost'
+ if flags & socket.AI_PASSIVE != 0:
+ v6addrs.append('::')
+ v4addrs.append('0.0.0.0')
+ else:
+ v6addrs.append('::1')
+ v4addrs.append('127.0.0.1')
+ else:
+ parts = host.split('%')
+ if len(parts) == 2:
+ ahost = parts[0]
+ else:
+ ahost = host
+ addr = dns.ipv6.inet_aton(ahost)
+ v6addrs.append(host)
+ canonical_name = host
+ except Exception:
+ try:
+ # Is it a V4 address literal?
+ addr = dns.ipv4.inet_aton(host)
+ v4addrs.append(host)
+ canonical_name = host
+ except Exception:
+ if flags & socket.AI_NUMERICHOST == 0:
+ try:
+ if family == socket.AF_INET6 or family == socket.AF_UNSPEC:
+ v6 = _resolver.query(host, dns.rdatatype.AAAA,
+ raise_on_no_answer=False)
+ # Note that setting host ensures we query the same name
+ # for A as we did for AAAA.
+ host = v6.qname
+ canonical_name = v6.canonical_name.to_text(True)
+ if v6.rrset is not None:
+ for rdata in v6.rrset:
+ v6addrs.append(rdata.address)
+ if family == socket.AF_INET or family == socket.AF_UNSPEC:
+ v4 = _resolver.query(host, dns.rdatatype.A,
+ raise_on_no_answer=False)
+ host = v4.qname
+ canonical_name = v4.canonical_name.to_text(True)
+ if v4.rrset is not None:
+ for rdata in v4.rrset:
+ v4addrs.append(rdata.address)
+ except dns.resolver.NXDOMAIN:
+ raise socket.gaierror(socket.EAI_NONAME)
+ except Exception:
+ raise socket.gaierror(socket.EAI_SYSTEM)
+ port = None
+ try:
+ # Is it a port literal?
+ if service is None:
+ port = 0
+ else:
+ port = int(service)
+ except Exception:
+ if flags & socket.AI_NUMERICSERV == 0:
+ try:
+ port = socket.getservbyname(service)
+ except Exception:
+ pass
+ if port is None:
+ raise socket.gaierror(socket.EAI_NONAME)
+ tuples = []
+ if socktype == 0:
+ socktypes = [socket.SOCK_DGRAM, socket.SOCK_STREAM]
+ else:
+ socktypes = [socktype]
+ if flags & socket.AI_CANONNAME != 0:
+ cname = canonical_name
+ else:
+ cname = ''
+ if family == socket.AF_INET6 or family == socket.AF_UNSPEC:
+ for addr in v6addrs:
+ for socktype in socktypes:
+ for proto in _protocols_for_socktype[socktype]:
+ tuples.append((socket.AF_INET6, socktype, proto,
+ cname, (addr, port, 0, 0)))
+ if family == socket.AF_INET or family == socket.AF_UNSPEC:
+ for addr in v4addrs:
+ for socktype in socktypes:
+ for proto in _protocols_for_socktype[socktype]:
+ tuples.append((socket.AF_INET, socktype, proto,
+ cname, (addr, port)))
+ if len(tuples) == 0:
+ raise socket.gaierror(socket.EAI_NONAME)
+ return tuples
+
+
+def _getnameinfo(sockaddr, flags=0):
+ host = sockaddr[0]
+ port = sockaddr[1]
+ if len(sockaddr) == 4:
+ scope = sockaddr[3]
+ family = socket.AF_INET6
+ else:
+ scope = None
+ family = socket.AF_INET
+ tuples = _getaddrinfo(host, port, family, socket.SOCK_STREAM,
+ socket.SOL_TCP, 0)
+ if len(tuples) > 1:
+ raise socket.error('sockaddr resolved to multiple addresses')
+ addr = tuples[0][4][0]
+ if flags & socket.NI_DGRAM:
+ pname = 'udp'
+ else:
+ pname = 'tcp'
+ qname = dns.reversename.from_address(addr)
+ if flags & socket.NI_NUMERICHOST == 0:
+ try:
+ answer = _resolver.query(qname, 'PTR')
+ hostname = answer.rrset[0].target.to_text(True)
+ except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+ if flags & socket.NI_NAMEREQD:
+ raise socket.gaierror(socket.EAI_NONAME)
+ hostname = addr
+ if scope is not None:
+ hostname += '%' + str(scope)
+ else:
+ hostname = addr
+ if scope is not None:
+ hostname += '%' + str(scope)
+ if flags & socket.NI_NUMERICSERV:
+ service = str(port)
+ else:
+ service = socket.getservbyport(port, pname)
+ return (hostname, service)
+
+
+def _getfqdn(name=None):
+ if name is None:
+ name = socket.gethostname()
+ try:
+ return _getnameinfo(_getaddrinfo(name, 80)[0][4])[0]
+ except Exception:
+ return name
+
+
+def _gethostbyname(name):
+ return _gethostbyname_ex(name)[2][0]
+
+
+def _gethostbyname_ex(name):
+ aliases = []
+ addresses = []
+ tuples = _getaddrinfo(name, 0, socket.AF_INET, socket.SOCK_STREAM,
+ socket.SOL_TCP, socket.AI_CANONNAME)
+ canonical = tuples[0][3]
+ for item in tuples:
+ addresses.append(item[4][0])
+ # XXX we just ignore aliases
+ return (canonical, aliases, addresses)
+
+
+def _gethostbyaddr(ip):
+ try:
+ dns.ipv6.inet_aton(ip)
+ sockaddr = (ip, 80, 0, 0)
+ family = socket.AF_INET6
+ except Exception:
+ sockaddr = (ip, 80)
+ family = socket.AF_INET
+ (name, port) = _getnameinfo(sockaddr, socket.NI_NAMEREQD)
+ aliases = []
+ addresses = []
+ tuples = _getaddrinfo(name, 0, family, socket.SOCK_STREAM, socket.SOL_TCP,
+ socket.AI_CANONNAME)
+ canonical = tuples[0][3]
+ for item in tuples:
+ addresses.append(item[4][0])
+ # XXX we just ignore aliases
+ return (canonical, aliases, addresses)
+
+
+def override_system_resolver(resolver=None):
+ """Override the system resolver routines in the socket module with
+ versions which use dnspython's resolver.
+
+ This can be useful in testing situations where you want to control
+ the resolution behavior of python code without having to change
+ the system's resolver settings (e.g. /etc/resolv.conf).
+
+ The resolver to use may be specified; if it's not, the default
+ resolver will be used.
+
+ resolver, a ``dns.resolver.Resolver`` or ``None``, the resolver to use.
+ """
+
+ if resolver is None:
+ resolver = get_default_resolver()
+ global _resolver
+ _resolver = resolver
+ socket.getaddrinfo = _getaddrinfo
+ socket.getnameinfo = _getnameinfo
+ socket.getfqdn = _getfqdn
+ socket.gethostbyname = _gethostbyname
+ socket.gethostbyname_ex = _gethostbyname_ex
+ socket.gethostbyaddr = _gethostbyaddr
+
+
+def restore_system_resolver():
+ """Undo the effects of prior override_system_resolver()."""
+
+ global _resolver
+ _resolver = None
+ socket.getaddrinfo = _original_getaddrinfo
+ socket.getnameinfo = _original_getnameinfo
+ socket.getfqdn = _original_getfqdn
+ socket.gethostbyname = _original_gethostbyname
+ socket.gethostbyname_ex = _original_gethostbyname_ex
+ socket.gethostbyaddr = _original_gethostbyaddr
diff --git a/openpype/vendor/python/python_2/dns/reversename.py b/openpype/vendor/python/python_2/dns/reversename.py
new file mode 100644
index 0000000000..8f095fa91e
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/reversename.py
@@ -0,0 +1,96 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Reverse Map Names."""
+
+import binascii
+
+import dns.name
+import dns.ipv6
+import dns.ipv4
+
+from dns._compat import PY3
+
+ipv4_reverse_domain = dns.name.from_text('in-addr.arpa.')
+ipv6_reverse_domain = dns.name.from_text('ip6.arpa.')
+
+
+def from_address(text):
+ """Convert an IPv4 or IPv6 address in textual form into a Name object whose
+ value is the reverse-map domain name of the address.
+
+ *text*, a ``text``, is an IPv4 or IPv6 address in textual form
+ (e.g. '127.0.0.1', '::1')
+
+ Raises ``dns.exception.SyntaxError`` if the address is badly formed.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ try:
+ v6 = dns.ipv6.inet_aton(text)
+ if dns.ipv6.is_mapped(v6):
+ if PY3:
+ parts = ['%d' % byte for byte in v6[12:]]
+ else:
+ parts = ['%d' % ord(byte) for byte in v6[12:]]
+ origin = ipv4_reverse_domain
+ else:
+ parts = [x for x in str(binascii.hexlify(v6).decode())]
+ origin = ipv6_reverse_domain
+ except Exception:
+ parts = ['%d' %
+ byte for byte in bytearray(dns.ipv4.inet_aton(text))]
+ origin = ipv4_reverse_domain
+ parts.reverse()
+ return dns.name.from_text('.'.join(parts), origin=origin)
+
+
+def to_address(name):
+ """Convert a reverse map domain name into textual address form.
+
+ *name*, a ``dns.name.Name``, an IPv4 or IPv6 address in reverse-map name
+ form.
+
+ Raises ``dns.exception.SyntaxError`` if the name does not have a
+ reverse-map form.
+
+ Returns a ``text``.
+ """
+
+ if name.is_subdomain(ipv4_reverse_domain):
+ name = name.relativize(ipv4_reverse_domain)
+ labels = list(name.labels)
+ labels.reverse()
+ text = b'.'.join(labels)
+ # run through inet_aton() to check syntax and make pretty.
+ return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text))
+ elif name.is_subdomain(ipv6_reverse_domain):
+ name = name.relativize(ipv6_reverse_domain)
+ labels = list(name.labels)
+ labels.reverse()
+ parts = []
+ i = 0
+ l = len(labels)
+ while i < l:
+ parts.append(b''.join(labels[i:i + 4]))
+ i += 4
+ text = b':'.join(parts)
+ # run through inet_aton() to check syntax and make pretty.
+ return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text))
+ else:
+ raise dns.exception.SyntaxError('unknown reverse-map address family')
diff --git a/openpype/vendor/python/python_2/dns/rrset.py b/openpype/vendor/python/python_2/dns/rrset.py
new file mode 100644
index 0000000000..a53ec324b8
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/rrset.py
@@ -0,0 +1,189 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS RRsets (an RRset is a named rdataset)"""
+
+
+import dns.name
+import dns.rdataset
+import dns.rdataclass
+import dns.renderer
+from ._compat import string_types
+
+
+class RRset(dns.rdataset.Rdataset):
+
+ """A DNS RRset (named rdataset).
+
+ RRset inherits from Rdataset, and RRsets can be treated as
+ Rdatasets in most cases. There are, however, a few notable
+ exceptions. RRsets have different to_wire() and to_text() method
+ arguments, reflecting the fact that RRsets always have an owner
+ name.
+ """
+
+ __slots__ = ['name', 'deleting']
+
+ def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE,
+ deleting=None):
+ """Create a new RRset."""
+
+ super(RRset, self).__init__(rdclass, rdtype, covers)
+ self.name = name
+ self.deleting = deleting
+
+ def _clone(self):
+ obj = super(RRset, self)._clone()
+ obj.name = self.name
+ obj.deleting = self.deleting
+ return obj
+
+ def __repr__(self):
+ if self.covers == 0:
+ ctext = ''
+ else:
+ ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
+ if self.deleting is not None:
+ dtext = ' delete=' + dns.rdataclass.to_text(self.deleting)
+ else:
+ dtext = ''
+ return ''
+
+ def __str__(self):
+ return self.to_text()
+
+ def __eq__(self, other):
+ if not isinstance(other, RRset):
+ return False
+ if self.name != other.name:
+ return False
+ return super(RRset, self).__eq__(other)
+
+ def match(self, name, rdclass, rdtype, covers, deleting=None):
+ """Returns ``True`` if this rrset matches the specified class, type,
+ covers, and deletion state.
+ """
+
+ if not super(RRset, self).match(rdclass, rdtype, covers):
+ return False
+ if self.name != name or self.deleting != deleting:
+ return False
+ return True
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ """Convert the RRset into DNS master file format.
+
+ See ``dns.name.Name.choose_relativity`` for more information
+ on how *origin* and *relativize* determine the way names
+ are emitted.
+
+ Any additional keyword arguments are passed on to the rdata
+ ``to_text()`` method.
+
+ *origin*, a ``dns.name.Name`` or ``None``, the origin for relative
+ names.
+
+ *relativize*, a ``bool``. If ``True``, names will be relativized
+ to *origin*.
+ """
+
+ return super(RRset, self).to_text(self.name, origin, relativize,
+ self.deleting, **kw)
+
+ def to_wire(self, file, compress=None, origin=None, **kw):
+ """Convert the RRset to wire format.
+
+ All keyword arguments are passed to ``dns.rdataset.to_wire()``; see
+ that function for details.
+
+ Returns an ``int``, the number of records emitted.
+ """
+
+ return super(RRset, self).to_wire(self.name, file, compress, origin,
+ self.deleting, **kw)
+
+ def to_rdataset(self):
+ """Convert an RRset into an Rdataset.
+
+ Returns a ``dns.rdataset.Rdataset``.
+ """
+ return dns.rdataset.from_rdata_list(self.ttl, list(self))
+
+
+def from_text_list(name, ttl, rdclass, rdtype, text_rdatas,
+ idna_codec=None):
+ """Create an RRset with the specified name, TTL, class, and type, and with
+ the specified list of rdatas in text format.
+
+ Returns a ``dns.rrset.RRset`` object.
+ """
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None, idna_codec=idna_codec)
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ r = RRset(name, rdclass, rdtype)
+ r.update_ttl(ttl)
+ for t in text_rdatas:
+ rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
+ r.add(rd)
+ return r
+
+
+def from_text(name, ttl, rdclass, rdtype, *text_rdatas):
+ """Create an RRset with the specified name, TTL, class, and type and with
+ the specified rdatas in text format.
+
+ Returns a ``dns.rrset.RRset`` object.
+ """
+
+ return from_text_list(name, ttl, rdclass, rdtype, text_rdatas)
+
+
+def from_rdata_list(name, ttl, rdatas, idna_codec=None):
+ """Create an RRset with the specified name and TTL, and with
+ the specified list of rdata objects.
+
+ Returns a ``dns.rrset.RRset`` object.
+ """
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None, idna_codec=idna_codec)
+
+ if len(rdatas) == 0:
+ raise ValueError("rdata list must not be empty")
+ r = None
+ for rd in rdatas:
+ if r is None:
+ r = RRset(name, rd.rdclass, rd.rdtype)
+ r.update_ttl(ttl)
+ r.add(rd)
+ return r
+
+
+def from_rdata(name, ttl, *rdatas):
+ """Create an RRset with the specified name and TTL, and with
+ the specified rdata objects.
+
+ Returns a ``dns.rrset.RRset`` object.
+ """
+
+ return from_rdata_list(name, ttl, rdatas)
diff --git a/openpype/vendor/python/python_2/dns/set.py b/openpype/vendor/python/python_2/dns/set.py
new file mode 100644
index 0000000000..81329bf457
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/set.py
@@ -0,0 +1,261 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+class Set(object):
+
+ """A simple set class.
+
+ This class was originally used to deal with sets being missing in
+ ancient versions of python, but dnspython will continue to use it
+ as these sets are based on lists and are thus indexable, and this
+ ability is widely used in dnspython applications.
+ """
+
+ __slots__ = ['items']
+
+ def __init__(self, items=None):
+ """Initialize the set.
+
+ *items*, an iterable or ``None``, the initial set of items.
+ """
+
+ self.items = []
+ if items is not None:
+ for item in items:
+ self.add(item)
+
+ def __repr__(self):
+ return "dns.simpleset.Set(%s)" % repr(self.items)
+
+ def add(self, item):
+ """Add an item to the set.
+ """
+
+ if item not in self.items:
+ self.items.append(item)
+
+ def remove(self, item):
+ """Remove an item from the set.
+ """
+
+ self.items.remove(item)
+
+ def discard(self, item):
+ """Remove an item from the set if present.
+ """
+
+ try:
+ self.items.remove(item)
+ except ValueError:
+ pass
+
+ def _clone(self):
+ """Make a (shallow) copy of the set.
+
+ There is a 'clone protocol' that subclasses of this class
+ should use. To make a copy, first call your super's _clone()
+ method, and use the object returned as the new instance. Then
+ make shallow copies of the attributes defined in the subclass.
+
+ This protocol allows us to write the set algorithms that
+ return new instances (e.g. union) once, and keep using them in
+ subclasses.
+ """
+
+ cls = self.__class__
+ obj = cls.__new__(cls)
+ obj.items = list(self.items)
+ return obj
+
+ def __copy__(self):
+ """Make a (shallow) copy of the set.
+ """
+
+ return self._clone()
+
+ def copy(self):
+ """Make a (shallow) copy of the set.
+ """
+
+ return self._clone()
+
+ def union_update(self, other):
+ """Update the set, adding any elements from other which are not
+ already in the set.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ if self is other:
+ return
+ for item in other.items:
+ self.add(item)
+
+ def intersection_update(self, other):
+ """Update the set, removing any elements from other which are not
+ in both sets.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ if self is other:
+ return
+ # we make a copy of the list so that we can remove items from
+ # the list without breaking the iterator.
+ for item in list(self.items):
+ if item not in other.items:
+ self.items.remove(item)
+
+ def difference_update(self, other):
+ """Update the set, removing any elements from other which are in
+ the set.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ if self is other:
+ self.items = []
+ else:
+ for item in other.items:
+ self.discard(item)
+
+ def union(self, other):
+ """Return a new set which is the union of ``self`` and ``other``.
+
+ Returns the same Set type as this set.
+ """
+
+ obj = self._clone()
+ obj.union_update(other)
+ return obj
+
+ def intersection(self, other):
+ """Return a new set which is the intersection of ``self`` and
+ ``other``.
+
+ Returns the same Set type as this set.
+ """
+
+ obj = self._clone()
+ obj.intersection_update(other)
+ return obj
+
+ def difference(self, other):
+ """Return a new set which ``self`` - ``other``, i.e. the items
+ in ``self`` which are not also in ``other``.
+
+ Returns the same Set type as this set.
+ """
+
+ obj = self._clone()
+ obj.difference_update(other)
+ return obj
+
+ def __or__(self, other):
+ return self.union(other)
+
+ def __and__(self, other):
+ return self.intersection(other)
+
+ def __add__(self, other):
+ return self.union(other)
+
+ def __sub__(self, other):
+ return self.difference(other)
+
+ def __ior__(self, other):
+ self.union_update(other)
+ return self
+
+ def __iand__(self, other):
+ self.intersection_update(other)
+ return self
+
+ def __iadd__(self, other):
+ self.union_update(other)
+ return self
+
+ def __isub__(self, other):
+ self.difference_update(other)
+ return self
+
+ def update(self, other):
+ """Update the set, adding any elements from other which are not
+ already in the set.
+
+ *other*, the collection of items with which to update the set, which
+ may be any iterable type.
+ """
+
+ for item in other:
+ self.add(item)
+
+ def clear(self):
+ """Make the set empty."""
+ self.items = []
+
+ def __eq__(self, other):
+ # Yes, this is inefficient but the sets we're dealing with are
+ # usually quite small, so it shouldn't hurt too much.
+ for item in self.items:
+ if item not in other.items:
+ return False
+ for item in other.items:
+ if item not in self.items:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __len__(self):
+ return len(self.items)
+
+ def __iter__(self):
+ return iter(self.items)
+
+ def __getitem__(self, i):
+ return self.items[i]
+
+ def __delitem__(self, i):
+ del self.items[i]
+
+ def issubset(self, other):
+ """Is this set a subset of *other*?
+
+ Returns a ``bool``.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ for item in self.items:
+ if item not in other.items:
+ return False
+ return True
+
+ def issuperset(self, other):
+ """Is this set a superset of *other*?
+
+ Returns a ``bool``.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ for item in other.items:
+ if item not in self.items:
+ return False
+ return True
diff --git a/openpype/vendor/python/python_2/dns/tokenizer.py b/openpype/vendor/python/python_2/dns/tokenizer.py
new file mode 100644
index 0000000000..880b71ce7a
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/tokenizer.py
@@ -0,0 +1,571 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Tokenize DNS master file format"""
+
+from io import StringIO
+import sys
+
+import dns.exception
+import dns.name
+import dns.ttl
+from ._compat import long, text_type, binary_type
+
+_DELIMITERS = {
+ ' ': True,
+ '\t': True,
+ '\n': True,
+ ';': True,
+ '(': True,
+ ')': True,
+ '"': True}
+
+_QUOTING_DELIMITERS = {'"': True}
+
+EOF = 0
+EOL = 1
+WHITESPACE = 2
+IDENTIFIER = 3
+QUOTED_STRING = 4
+COMMENT = 5
+DELIMITER = 6
+
+
+class UngetBufferFull(dns.exception.DNSException):
+ """An attempt was made to unget a token when the unget buffer was full."""
+
+
+class Token(object):
+ """A DNS master file format token.
+
+ ttype: The token type
+ value: The token value
+ has_escape: Does the token value contain escapes?
+ """
+
+ def __init__(self, ttype, value='', has_escape=False):
+ """Initialize a token instance."""
+
+ self.ttype = ttype
+ self.value = value
+ self.has_escape = has_escape
+
+ def is_eof(self):
+ return self.ttype == EOF
+
+ def is_eol(self):
+ return self.ttype == EOL
+
+ def is_whitespace(self):
+ return self.ttype == WHITESPACE
+
+ def is_identifier(self):
+ return self.ttype == IDENTIFIER
+
+ def is_quoted_string(self):
+ return self.ttype == QUOTED_STRING
+
+ def is_comment(self):
+ return self.ttype == COMMENT
+
+ def is_delimiter(self):
+ return self.ttype == DELIMITER
+
+ def is_eol_or_eof(self):
+ return self.ttype == EOL or self.ttype == EOF
+
+ def __eq__(self, other):
+ if not isinstance(other, Token):
+ return False
+ return (self.ttype == other.ttype and
+ self.value == other.value)
+
+ def __ne__(self, other):
+ if not isinstance(other, Token):
+ return True
+ return (self.ttype != other.ttype or
+ self.value != other.value)
+
+ def __str__(self):
+ return '%d "%s"' % (self.ttype, self.value)
+
+ def unescape(self):
+ if not self.has_escape:
+ return self
+ unescaped = ''
+ l = len(self.value)
+ i = 0
+ while i < l:
+ c = self.value[i]
+ i += 1
+ if c == '\\':
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c = self.value[i]
+ i += 1
+ if c.isdigit():
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c2 = self.value[i]
+ i += 1
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c3 = self.value[i]
+ i += 1
+ if not (c2.isdigit() and c3.isdigit()):
+ raise dns.exception.SyntaxError
+ c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
+ unescaped += c
+ return Token(self.ttype, unescaped)
+
+ # compatibility for old-style tuple tokens
+
+ def __len__(self):
+ return 2
+
+ def __iter__(self):
+ return iter((self.ttype, self.value))
+
+ def __getitem__(self, i):
+ if i == 0:
+ return self.ttype
+ elif i == 1:
+ return self.value
+ else:
+ raise IndexError
+
+
+class Tokenizer(object):
+ """A DNS master file format tokenizer.
+
+ A token object is basically a (type, value) tuple. The valid
+ types are EOF, EOL, WHITESPACE, IDENTIFIER, QUOTED_STRING,
+ COMMENT, and DELIMITER.
+
+ file: The file to tokenize
+
+ ungotten_char: The most recently ungotten character, or None.
+
+ ungotten_token: The most recently ungotten token, or None.
+
+ multiline: The current multiline level. This value is increased
+ by one every time a '(' delimiter is read, and decreased by one every time
+ a ')' delimiter is read.
+
+ quoting: This variable is true if the tokenizer is currently
+ reading a quoted string.
+
+ eof: This variable is true if the tokenizer has encountered EOF.
+
+ delimiters: The current delimiter dictionary.
+
+ line_number: The current line number
+
+ filename: A filename that will be returned by the where() method.
+ """
+
+ def __init__(self, f=sys.stdin, filename=None):
+ """Initialize a tokenizer instance.
+
+ f: The file to tokenize. The default is sys.stdin.
+ This parameter may also be a string, in which case the tokenizer
+ will take its input from the contents of the string.
+
+ filename: the name of the filename that the where() method
+ will return.
+ """
+
+ if isinstance(f, text_type):
+ f = StringIO(f)
+ if filename is None:
+ filename = ''
+ elif isinstance(f, binary_type):
+ f = StringIO(f.decode())
+ if filename is None:
+ filename = ''
+ else:
+ if filename is None:
+ if f is sys.stdin:
+ filename = ''
+ else:
+ filename = ''
+ self.file = f
+ self.ungotten_char = None
+ self.ungotten_token = None
+ self.multiline = 0
+ self.quoting = False
+ self.eof = False
+ self.delimiters = _DELIMITERS
+ self.line_number = 1
+ self.filename = filename
+
+ def _get_char(self):
+ """Read a character from input.
+ """
+
+ if self.ungotten_char is None:
+ if self.eof:
+ c = ''
+ else:
+ c = self.file.read(1)
+ if c == '':
+ self.eof = True
+ elif c == '\n':
+ self.line_number += 1
+ else:
+ c = self.ungotten_char
+ self.ungotten_char = None
+ return c
+
+ def where(self):
+ """Return the current location in the input.
+
+ Returns a (string, int) tuple. The first item is the filename of
+ the input, the second is the current line number.
+ """
+
+ return (self.filename, self.line_number)
+
+ def _unget_char(self, c):
+ """Unget a character.
+
+ The unget buffer for characters is only one character large; it is
+ an error to try to unget a character when the unget buffer is not
+ empty.
+
+ c: the character to unget
+ raises UngetBufferFull: there is already an ungotten char
+ """
+
+ if self.ungotten_char is not None:
+ raise UngetBufferFull
+ self.ungotten_char = c
+
+ def skip_whitespace(self):
+ """Consume input until a non-whitespace character is encountered.
+
+ The non-whitespace character is then ungotten, and the number of
+ whitespace characters consumed is returned.
+
+ If the tokenizer is in multiline mode, then newlines are whitespace.
+
+ Returns the number of characters skipped.
+ """
+
+ skipped = 0
+ while True:
+ c = self._get_char()
+ if c != ' ' and c != '\t':
+ if (c != '\n') or not self.multiline:
+ self._unget_char(c)
+ return skipped
+ skipped += 1
+
+ def get(self, want_leading=False, want_comment=False):
+ """Get the next token.
+
+ want_leading: If True, return a WHITESPACE token if the
+ first character read is whitespace. The default is False.
+
+ want_comment: If True, return a COMMENT token if the
+ first token read is a comment. The default is False.
+
+ Raises dns.exception.UnexpectedEnd: input ended prematurely
+
+ Raises dns.exception.SyntaxError: input was badly formed
+
+ Returns a Token.
+ """
+
+ if self.ungotten_token is not None:
+ token = self.ungotten_token
+ self.ungotten_token = None
+ if token.is_whitespace():
+ if want_leading:
+ return token
+ elif token.is_comment():
+ if want_comment:
+ return token
+ else:
+ return token
+ skipped = self.skip_whitespace()
+ if want_leading and skipped > 0:
+ return Token(WHITESPACE, ' ')
+ token = ''
+ ttype = IDENTIFIER
+ has_escape = False
+ while True:
+ c = self._get_char()
+ if c == '' or c in self.delimiters:
+ if c == '' and self.quoting:
+ raise dns.exception.UnexpectedEnd
+ if token == '' and ttype != QUOTED_STRING:
+ if c == '(':
+ self.multiline += 1
+ self.skip_whitespace()
+ continue
+ elif c == ')':
+ if self.multiline <= 0:
+ raise dns.exception.SyntaxError
+ self.multiline -= 1
+ self.skip_whitespace()
+ continue
+ elif c == '"':
+ if not self.quoting:
+ self.quoting = True
+ self.delimiters = _QUOTING_DELIMITERS
+ ttype = QUOTED_STRING
+ continue
+ else:
+ self.quoting = False
+ self.delimiters = _DELIMITERS
+ self.skip_whitespace()
+ continue
+ elif c == '\n':
+ return Token(EOL, '\n')
+ elif c == ';':
+ while 1:
+ c = self._get_char()
+ if c == '\n' or c == '':
+ break
+ token += c
+ if want_comment:
+ self._unget_char(c)
+ return Token(COMMENT, token)
+ elif c == '':
+ if self.multiline:
+ raise dns.exception.SyntaxError(
+ 'unbalanced parentheses')
+ return Token(EOF)
+ elif self.multiline:
+ self.skip_whitespace()
+ token = ''
+ continue
+ else:
+ return Token(EOL, '\n')
+ else:
+ # This code exists in case we ever want a
+ # delimiter to be returned. It never produces
+ # a token currently.
+ token = c
+ ttype = DELIMITER
+ else:
+ self._unget_char(c)
+ break
+ elif self.quoting:
+ if c == '\\':
+ c = self._get_char()
+ if c == '':
+ raise dns.exception.UnexpectedEnd
+ if c.isdigit():
+ c2 = self._get_char()
+ if c2 == '':
+ raise dns.exception.UnexpectedEnd
+ c3 = self._get_char()
+ if c == '':
+ raise dns.exception.UnexpectedEnd
+ if not (c2.isdigit() and c3.isdigit()):
+ raise dns.exception.SyntaxError
+ c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
+ elif c == '\n':
+ raise dns.exception.SyntaxError('newline in quoted string')
+ elif c == '\\':
+ #
+ # It's an escape. Put it and the next character into
+ # the token; it will be checked later for goodness.
+ #
+ token += c
+ has_escape = True
+ c = self._get_char()
+ if c == '' or c == '\n':
+ raise dns.exception.UnexpectedEnd
+ token += c
+ if token == '' and ttype != QUOTED_STRING:
+ if self.multiline:
+ raise dns.exception.SyntaxError('unbalanced parentheses')
+ ttype = EOF
+ return Token(ttype, token, has_escape)
+
+ def unget(self, token):
+ """Unget a token.
+
+ The unget buffer for tokens is only one token large; it is
+ an error to try to unget a token when the unget buffer is not
+ empty.
+
+ token: the token to unget
+
+ Raises UngetBufferFull: there is already an ungotten token
+ """
+
+ if self.ungotten_token is not None:
+ raise UngetBufferFull
+ self.ungotten_token = token
+
+ def next(self):
+ """Return the next item in an iteration.
+
+ Returns a Token.
+ """
+
+ token = self.get()
+ if token.is_eof():
+ raise StopIteration
+ return token
+
+ __next__ = next
+
+ def __iter__(self):
+ return self
+
+ # Helpers
+
+ def get_int(self, base=10):
+ """Read the next token and interpret it as an integer.
+
+ Raises dns.exception.SyntaxError if not an integer.
+
+ Returns an int.
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ if not token.value.isdigit():
+ raise dns.exception.SyntaxError('expecting an integer')
+ return int(token.value, base)
+
+ def get_uint8(self):
+ """Read the next token and interpret it as an 8-bit unsigned
+ integer.
+
+ Raises dns.exception.SyntaxError if not an 8-bit unsigned integer.
+
+ Returns an int.
+ """
+
+ value = self.get_int()
+ if value < 0 or value > 255:
+ raise dns.exception.SyntaxError(
+ '%d is not an unsigned 8-bit integer' % value)
+ return value
+
+ def get_uint16(self, base=10):
+ """Read the next token and interpret it as a 16-bit unsigned
+ integer.
+
+ Raises dns.exception.SyntaxError if not a 16-bit unsigned integer.
+
+ Returns an int.
+ """
+
+ value = self.get_int(base=base)
+ if value < 0 or value > 65535:
+ if base == 8:
+ raise dns.exception.SyntaxError(
+ '%o is not an octal unsigned 16-bit integer' % value)
+ else:
+ raise dns.exception.SyntaxError(
+ '%d is not an unsigned 16-bit integer' % value)
+ return value
+
+ def get_uint32(self):
+ """Read the next token and interpret it as a 32-bit unsigned
+ integer.
+
+ Raises dns.exception.SyntaxError if not a 32-bit unsigned integer.
+
+ Returns an int.
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ if not token.value.isdigit():
+ raise dns.exception.SyntaxError('expecting an integer')
+ value = long(token.value)
+ if value < 0 or value > long(4294967296):
+ raise dns.exception.SyntaxError(
+ '%d is not an unsigned 32-bit integer' % value)
+ return value
+
+ def get_string(self, origin=None):
+ """Read the next token and interpret it as a string.
+
+ Raises dns.exception.SyntaxError if not a string.
+
+ Returns a string.
+ """
+
+ token = self.get().unescape()
+ if not (token.is_identifier() or token.is_quoted_string()):
+ raise dns.exception.SyntaxError('expecting a string')
+ return token.value
+
+ def get_identifier(self, origin=None):
+ """Read the next token, which should be an identifier.
+
+ Raises dns.exception.SyntaxError if not an identifier.
+
+ Returns a string.
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ return token.value
+
+ def get_name(self, origin=None):
+ """Read the next token and interpret it as a DNS name.
+
+ Raises dns.exception.SyntaxError if not a name.
+
+ Returns a dns.name.Name.
+ """
+
+ token = self.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ return dns.name.from_text(token.value, origin)
+
+ def get_eol(self):
+ """Read the next token and raise an exception if it isn't EOL or
+ EOF.
+
+ Returns a string.
+ """
+
+ token = self.get()
+ if not token.is_eol_or_eof():
+ raise dns.exception.SyntaxError(
+ 'expected EOL or EOF, got %d "%s"' % (token.ttype,
+ token.value))
+ return token.value
+
+ def get_ttl(self):
+ """Read the next token and interpret it as a DNS TTL.
+
+ Raises dns.exception.SyntaxError or dns.ttl.BadTTL if not an
+ identifier or badly formed.
+
+ Returns an int.
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ return dns.ttl.from_text(token.value)
diff --git a/openpype/vendor/python/python_2/dns/tsig.py b/openpype/vendor/python/python_2/dns/tsig.py
new file mode 100644
index 0000000000..3daa387855
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/tsig.py
@@ -0,0 +1,236 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TSIG support."""
+
+import hashlib
+import hmac
+import struct
+
+import dns.exception
+import dns.rdataclass
+import dns.name
+from ._compat import long, string_types, text_type
+
+class BadTime(dns.exception.DNSException):
+
+ """The current time is not within the TSIG's validity time."""
+
+
+class BadSignature(dns.exception.DNSException):
+
+ """The TSIG signature fails to verify."""
+
+
+class PeerError(dns.exception.DNSException):
+
+ """Base class for all TSIG errors generated by the remote peer"""
+
+
+class PeerBadKey(PeerError):
+
+ """The peer didn't know the key we used"""
+
+
+class PeerBadSignature(PeerError):
+
+ """The peer didn't like the signature we sent"""
+
+
+class PeerBadTime(PeerError):
+
+ """The peer didn't like the time we sent"""
+
+
+class PeerBadTruncation(PeerError):
+
+ """The peer didn't like amount of truncation in the TSIG we sent"""
+
+# TSIG Algorithms
+
+HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT")
+HMAC_SHA1 = dns.name.from_text("hmac-sha1")
+HMAC_SHA224 = dns.name.from_text("hmac-sha224")
+HMAC_SHA256 = dns.name.from_text("hmac-sha256")
+HMAC_SHA384 = dns.name.from_text("hmac-sha384")
+HMAC_SHA512 = dns.name.from_text("hmac-sha512")
+
+_hashes = {
+ HMAC_SHA224: hashlib.sha224,
+ HMAC_SHA256: hashlib.sha256,
+ HMAC_SHA384: hashlib.sha384,
+ HMAC_SHA512: hashlib.sha512,
+ HMAC_SHA1: hashlib.sha1,
+ HMAC_MD5: hashlib.md5,
+}
+
+default_algorithm = HMAC_MD5
+
+BADSIG = 16
+BADKEY = 17
+BADTIME = 18
+BADTRUNC = 22
+
+
+def sign(wire, keyname, secret, time, fudge, original_id, error,
+ other_data, request_mac, ctx=None, multi=False, first=True,
+ algorithm=default_algorithm):
+ """Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
+ for the input parameters, the HMAC MAC calculated by applying the
+ TSIG signature algorithm, and the TSIG digest context.
+ @rtype: (string, string, hmac.HMAC object)
+ @raises ValueError: I{other_data} is too long
+ @raises NotImplementedError: I{algorithm} is not supported
+ """
+
+ if isinstance(other_data, text_type):
+ other_data = other_data.encode()
+ (algorithm_name, digestmod) = get_algorithm(algorithm)
+ if first:
+ ctx = hmac.new(secret, digestmod=digestmod)
+ ml = len(request_mac)
+ if ml > 0:
+ ctx.update(struct.pack('!H', ml))
+ ctx.update(request_mac)
+ id = struct.pack('!H', original_id)
+ ctx.update(id)
+ ctx.update(wire[2:])
+ if first:
+ ctx.update(keyname.to_digestable())
+ ctx.update(struct.pack('!H', dns.rdataclass.ANY))
+ ctx.update(struct.pack('!I', 0))
+ long_time = time + long(0)
+ upper_time = (long_time >> 32) & long(0xffff)
+ lower_time = long_time & long(0xffffffff)
+ time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
+ pre_mac = algorithm_name + time_mac
+ ol = len(other_data)
+ if ol > 65535:
+ raise ValueError('TSIG Other Data is > 65535 bytes')
+ post_mac = struct.pack('!HH', error, ol) + other_data
+ if first:
+ ctx.update(pre_mac)
+ ctx.update(post_mac)
+ else:
+ ctx.update(time_mac)
+ mac = ctx.digest()
+ mpack = struct.pack('!H', len(mac))
+ tsig_rdata = pre_mac + mpack + mac + id + post_mac
+ if multi:
+ ctx = hmac.new(secret, digestmod=digestmod)
+ ml = len(mac)
+ ctx.update(struct.pack('!H', ml))
+ ctx.update(mac)
+ else:
+ ctx = None
+ return (tsig_rdata, mac, ctx)
+
+
+def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
+ other_data, request_mac, ctx=None, multi=False, first=True,
+ algorithm=default_algorithm):
+ return sign(wire, keyname, secret, time, fudge, original_id, error,
+ other_data, request_mac, ctx, multi, first, algorithm)
+
+
+def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
+ tsig_rdlen, ctx=None, multi=False, first=True):
+ """Validate the specified TSIG rdata against the other input parameters.
+
+ @raises FormError: The TSIG is badly formed.
+ @raises BadTime: There is too much time skew between the client and the
+ server.
+ @raises BadSignature: The TSIG signature did not validate
+ @rtype: hmac.HMAC object"""
+
+ (adcount,) = struct.unpack("!H", wire[10:12])
+ if adcount == 0:
+ raise dns.exception.FormError
+ adcount -= 1
+ new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
+ current = tsig_rdata
+ (aname, used) = dns.name.from_wire(wire, current)
+ current = current + used
+ (upper_time, lower_time, fudge, mac_size) = \
+ struct.unpack("!HIHH", wire[current:current + 10])
+ time = ((upper_time + long(0)) << 32) + (lower_time + long(0))
+ current += 10
+ mac = wire[current:current + mac_size]
+ current += mac_size
+ (original_id, error, other_size) = \
+ struct.unpack("!HHH", wire[current:current + 6])
+ current += 6
+ other_data = wire[current:current + other_size]
+ current += other_size
+ if current != tsig_rdata + tsig_rdlen:
+ raise dns.exception.FormError
+ if error != 0:
+ if error == BADSIG:
+ raise PeerBadSignature
+ elif error == BADKEY:
+ raise PeerBadKey
+ elif error == BADTIME:
+ raise PeerBadTime
+ elif error == BADTRUNC:
+ raise PeerBadTruncation
+ else:
+ raise PeerError('unknown TSIG error code %d' % error)
+ time_low = time - fudge
+ time_high = time + fudge
+ if now < time_low or now > time_high:
+ raise BadTime
+ (junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
+ original_id, error, other_data,
+ request_mac, ctx, multi, first, aname)
+ if our_mac != mac:
+ raise BadSignature
+ return ctx
+
+
+def get_algorithm(algorithm):
+ """Returns the wire format string and the hash module to use for the
+ specified TSIG algorithm
+
+ @rtype: (string, hash constructor)
+ @raises NotImplementedError: I{algorithm} is not supported
+ """
+
+ if isinstance(algorithm, string_types):
+ algorithm = dns.name.from_text(algorithm)
+
+ try:
+ return (algorithm.to_digestable(), _hashes[algorithm])
+ except KeyError:
+ raise NotImplementedError("TSIG algorithm " + str(algorithm) +
+ " is not supported")
+
+
+def get_algorithm_and_mac(wire, tsig_rdata, tsig_rdlen):
+ """Return the tsig algorithm for the specified tsig_rdata
+ @raises FormError: The TSIG is badly formed.
+ """
+ current = tsig_rdata
+ (aname, used) = dns.name.from_wire(wire, current)
+ current = current + used
+ (upper_time, lower_time, fudge, mac_size) = \
+ struct.unpack("!HIHH", wire[current:current + 10])
+ current += 10
+ mac = wire[current:current + mac_size]
+ current += mac_size
+ if current > tsig_rdata + tsig_rdlen:
+ raise dns.exception.FormError
+ return (aname, mac)
diff --git a/openpype/vendor/python/python_2/dns/tsigkeyring.py b/openpype/vendor/python/python_2/dns/tsigkeyring.py
new file mode 100644
index 0000000000..5e5fe1cbe4
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/tsigkeyring.py
@@ -0,0 +1,50 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""A place to store TSIG keys."""
+
+from dns._compat import maybe_decode, maybe_encode
+
+import base64
+
+import dns.name
+
+
+def from_text(textring):
+ """Convert a dictionary containing (textual DNS name, base64 secret) pairs
+ into a binary keyring which has (dns.name.Name, binary secret) pairs.
+ @rtype: dict"""
+
+ keyring = {}
+ for keytext in textring:
+ keyname = dns.name.from_text(keytext)
+ secret = base64.decodestring(maybe_encode(textring[keytext]))
+ keyring[keyname] = secret
+ return keyring
+
+
+def to_text(keyring):
+ """Convert a dictionary containing (dns.name.Name, binary secret) pairs
+ into a text keyring which has (textual DNS name, base64 secret) pairs.
+ @rtype: dict"""
+
+ textring = {}
+ for keyname in keyring:
+ keytext = maybe_decode(keyname.to_text())
+ secret = maybe_decode(base64.encodestring(keyring[keyname]))
+ textring[keytext] = secret
+ return textring
diff --git a/openpype/vendor/python/python_2/dns/ttl.py b/openpype/vendor/python/python_2/dns/ttl.py
new file mode 100644
index 0000000000..4be16bee5b
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/ttl.py
@@ -0,0 +1,70 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TTL conversion."""
+
+import dns.exception
+from ._compat import long
+
+
+class BadTTL(dns.exception.SyntaxError):
+ """DNS TTL value is not well-formed."""
+
+
+def from_text(text):
+ """Convert the text form of a TTL to an integer.
+
+ The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
+
+ *text*, a ``text``, the textual TTL.
+
+ Raises ``dns.ttl.BadTTL`` if the TTL is not well-formed.
+
+ Returns an ``int``.
+ """
+
+ if text.isdigit():
+ total = long(text)
+ else:
+ if not text[0].isdigit():
+ raise BadTTL
+ total = long(0)
+ current = long(0)
+ for c in text:
+ if c.isdigit():
+ current *= 10
+ current += long(c)
+ else:
+ c = c.lower()
+ if c == 'w':
+ total += current * long(604800)
+ elif c == 'd':
+ total += current * long(86400)
+ elif c == 'h':
+ total += current * long(3600)
+ elif c == 'm':
+ total += current * long(60)
+ elif c == 's':
+ total += current
+ else:
+ raise BadTTL("unknown unit '%s'" % c)
+ current = 0
+ if not current == 0:
+ raise BadTTL("trailing integer")
+ if total < long(0) or total > long(2147483647):
+ raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)")
+ return total
diff --git a/openpype/vendor/python/python_2/dns/update.py b/openpype/vendor/python/python_2/dns/update.py
new file mode 100644
index 0000000000..96a00d5dbe
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/update.py
@@ -0,0 +1,279 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Dynamic Update Support"""
+
+
+import dns.message
+import dns.name
+import dns.opcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdataset
+import dns.tsig
+from ._compat import string_types
+
+
+class Update(dns.message.Message):
+
+ def __init__(self, zone, rdclass=dns.rdataclass.IN, keyring=None,
+ keyname=None, keyalgorithm=dns.tsig.default_algorithm):
+ """Initialize a new DNS Update object.
+
+ See the documentation of the Message class for a complete
+ description of the keyring dictionary.
+
+ *zone*, a ``dns.name.Name`` or ``text``, the zone which is being
+ updated.
+
+ *rdclass*, an ``int`` or ``text``, the class of the zone.
+
+ *keyring*, a ``dict``, the TSIG keyring to use. If a
+ *keyring* is specified but a *keyname* is not, then the key
+ used will be the first key in the *keyring*. Note that the
+ order of keys in a dictionary is not defined, so applications
+ should supply a keyname when a keyring is used, unless they
+ know the keyring contains only one key.
+
+ *keyname*, a ``dns.name.Name`` or ``None``, the name of the TSIG key
+ to use; defaults to ``None``. The key must be defined in the keyring.
+
+ *keyalgorithm*, a ``dns.name.Name``, the TSIG algorithm to use.
+ """
+ super(Update, self).__init__()
+ self.flags |= dns.opcode.to_flags(dns.opcode.UPDATE)
+ if isinstance(zone, string_types):
+ zone = dns.name.from_text(zone)
+ self.origin = zone
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ self.zone_rdclass = rdclass
+ self.find_rrset(self.question, self.origin, rdclass, dns.rdatatype.SOA,
+ create=True, force_unique=True)
+ if keyring is not None:
+ self.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+
+ def _add_rr(self, name, ttl, rd, deleting=None, section=None):
+ """Add a single RR to the update section."""
+
+ if section is None:
+ section = self.authority
+ covers = rd.covers()
+ rrset = self.find_rrset(section, name, self.zone_rdclass, rd.rdtype,
+ covers, deleting, True, True)
+ rrset.add(rd, ttl)
+
+ def _add(self, replace, section, name, *args):
+ """Add records.
+
+ *replace* is the replacement mode. If ``False``,
+ RRs are added to an existing RRset; if ``True``, the RRset
+ is replaced with the specified contents. The second
+ argument is the section to add to. The third argument
+ is always a name. The other arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string...
+ """
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ if isinstance(args[0], dns.rdataset.Rdataset):
+ for rds in args:
+ if replace:
+ self.delete(name, rds.rdtype)
+ for rd in rds:
+ self._add_rr(name, rds.ttl, rd, section=section)
+ else:
+ args = list(args)
+ ttl = int(args.pop(0))
+ if isinstance(args[0], dns.rdata.Rdata):
+ if replace:
+ self.delete(name, args[0].rdtype)
+ for rd in args:
+ self._add_rr(name, ttl, rd, section=section)
+ else:
+ rdtype = args.pop(0)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if replace:
+ self.delete(name, rdtype)
+ for s in args:
+ rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,
+ self.origin)
+ self._add_rr(name, ttl, rd, section=section)
+
+ def add(self, name, *args):
+ """Add records.
+
+ The first argument is always a name. The other
+ arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string...
+ """
+
+ self._add(False, self.authority, name, *args)
+
+ def delete(self, name, *args):
+ """Delete records.
+
+ The first argument is always a name. The other
+ arguments can be:
+
+ - *empty*
+
+ - rdataset...
+
+ - rdata...
+
+ - rdtype, [string...]
+ """
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ if len(args) == 0:
+ self.find_rrset(self.authority, name, dns.rdataclass.ANY,
+ dns.rdatatype.ANY, dns.rdatatype.NONE,
+ dns.rdatatype.ANY, True, True)
+ elif isinstance(args[0], dns.rdataset.Rdataset):
+ for rds in args:
+ for rd in rds:
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+ else:
+ args = list(args)
+ if isinstance(args[0], dns.rdata.Rdata):
+ for rd in args:
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+ else:
+ rdtype = args.pop(0)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if len(args) == 0:
+ self.find_rrset(self.authority, name,
+ self.zone_rdclass, rdtype,
+ dns.rdatatype.NONE,
+ dns.rdataclass.ANY,
+ True, True)
+ else:
+ for s in args:
+ rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,
+ self.origin)
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+
+ def replace(self, name, *args):
+ """Replace records.
+
+ The first argument is always a name. The other
+ arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string...
+
+ Note that if you want to replace the entire node, you should do
+ a delete of the name followed by one or more calls to add.
+ """
+
+ self._add(True, self.authority, name, *args)
+
+ def present(self, name, *args):
+ """Require that an owner name (and optionally an rdata type,
+ or specific rdataset) exists as a prerequisite to the
+ execution of the update.
+
+ The first argument is always a name.
+ The other arguments can be:
+
+ - rdataset...
+
+ - rdata...
+
+ - rdtype, string...
+ """
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ if len(args) == 0:
+ self.find_rrset(self.answer, name,
+ dns.rdataclass.ANY, dns.rdatatype.ANY,
+ dns.rdatatype.NONE, None,
+ True, True)
+ elif isinstance(args[0], dns.rdataset.Rdataset) or \
+ isinstance(args[0], dns.rdata.Rdata) or \
+ len(args) > 1:
+ if not isinstance(args[0], dns.rdataset.Rdataset):
+ # Add a 0 TTL
+ args = list(args)
+ args.insert(0, 0)
+ self._add(False, self.answer, name, *args)
+ else:
+ rdtype = args[0]
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ self.find_rrset(self.answer, name,
+ dns.rdataclass.ANY, rdtype,
+ dns.rdatatype.NONE, None,
+ True, True)
+
+ def absent(self, name, rdtype=None):
+ """Require that an owner name (and optionally an rdata type) does
+ not exist as a prerequisite to the execution of the update."""
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ if rdtype is None:
+ self.find_rrset(self.answer, name,
+ dns.rdataclass.NONE, dns.rdatatype.ANY,
+ dns.rdatatype.NONE, None,
+ True, True)
+ else:
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ self.find_rrset(self.answer, name,
+ dns.rdataclass.NONE, rdtype,
+ dns.rdatatype.NONE, None,
+ True, True)
+
+ def to_wire(self, origin=None, max_size=65535):
+ """Return a string containing the update in DNS compressed wire
+ format.
+
+ *origin*, a ``dns.name.Name`` or ``None``, the origin to be
+ appended to any relative names. If *origin* is ``None``, then
+ the origin of the ``dns.update.Update`` message object is used
+ (i.e. the *zone* parameter passed when the Update object was
+ created).
+
+ *max_size*, an ``int``, the maximum size of the wire format
+ output; default is 0, which means "the message's request
+ payload, if nonzero, or 65535".
+
+ Returns a ``binary``.
+ """
+
+ if origin is None:
+ origin = self.origin
+ return super(Update, self).to_wire(origin, max_size)
diff --git a/openpype/vendor/python/python_2/dns/version.py b/openpype/vendor/python/python_2/dns/version.py
new file mode 100644
index 0000000000..f116904b46
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/version.py
@@ -0,0 +1,43 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython release version information."""
+
+#: MAJOR
+MAJOR = 1
+#: MINOR
+MINOR = 16
+#: MICRO
+MICRO = 0
+#: RELEASELEVEL
+RELEASELEVEL = 0x0f
+#: SERIAL
+SERIAL = 0
+
+if RELEASELEVEL == 0x0f:
+ #: version
+ version = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
+elif RELEASELEVEL == 0x00:
+ version = '%d.%d.%dx%d' % \
+ (MAJOR, MINOR, MICRO, SERIAL)
+else:
+ version = '%d.%d.%d%x%d' % \
+ (MAJOR, MINOR, MICRO, RELEASELEVEL, SERIAL)
+
+#: hexversion
+hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | \
+ SERIAL
diff --git a/openpype/vendor/python/python_2/dns/wiredata.py b/openpype/vendor/python/python_2/dns/wiredata.py
new file mode 100644
index 0000000000..ea3c1e67d6
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/wiredata.py
@@ -0,0 +1,103 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2011,2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Wire Data Helper"""
+
+import dns.exception
+from ._compat import binary_type, string_types, PY2
+
+# Figure out what constant python passes for an unspecified slice bound.
+# It's supposed to be sys.maxint, yet on 64-bit windows sys.maxint is 2^31 - 1
+# but Python uses 2^63 - 1 as the constant. Rather than making pointless
+# extra comparisons, duplicating code, or weakening WireData, we just figure
+# out what constant Python will use.
+
+
+class _SliceUnspecifiedBound(binary_type):
+
+ def __getitem__(self, key):
+ return key.stop
+
+ if PY2:
+ def __getslice__(self, i, j): # pylint: disable=getslice-method
+ return self.__getitem__(slice(i, j))
+
+_unspecified_bound = _SliceUnspecifiedBound()[1:]
+
+
+class WireData(binary_type):
+ # WireData is a binary type with stricter slicing
+
+ def __getitem__(self, key):
+ try:
+ if isinstance(key, slice):
+ # make sure we are not going outside of valid ranges,
+ # do stricter control of boundaries than python does
+ # by default
+ start = key.start
+ stop = key.stop
+
+ if PY2:
+ if stop == _unspecified_bound:
+ # handle the case where the right bound is unspecified
+ stop = len(self)
+
+ if start < 0 or stop < 0:
+ raise dns.exception.FormError
+ # If it's not an empty slice, access left and right bounds
+ # to make sure they're valid
+ if start != stop:
+ super(WireData, self).__getitem__(start)
+ super(WireData, self).__getitem__(stop - 1)
+ else:
+ for index in (start, stop):
+ if index is None:
+ continue
+ elif abs(index) > len(self):
+ raise dns.exception.FormError
+
+ return WireData(super(WireData, self).__getitem__(
+ slice(start, stop)))
+ return bytearray(self.unwrap())[key]
+ except IndexError:
+ raise dns.exception.FormError
+
+ if PY2:
+ def __getslice__(self, i, j): # pylint: disable=getslice-method
+ return self.__getitem__(slice(i, j))
+
+ def __iter__(self):
+ i = 0
+ while 1:
+ try:
+ yield self[i]
+ i += 1
+ except dns.exception.FormError:
+ raise StopIteration
+
+ def unwrap(self):
+ return binary_type(self)
+
+
+def maybe_wrap(wire):
+ if isinstance(wire, WireData):
+ return wire
+ elif isinstance(wire, binary_type):
+ return WireData(wire)
+ elif isinstance(wire, string_types):
+ return WireData(wire.encode())
+ raise ValueError("unhandled type %s" % type(wire))
diff --git a/openpype/vendor/python/python_2/dns/zone.py b/openpype/vendor/python/python_2/dns/zone.py
new file mode 100644
index 0000000000..1e2fe78168
--- /dev/null
+++ b/openpype/vendor/python/python_2/dns/zone.py
@@ -0,0 +1,1127 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Zones."""
+
+from __future__ import generators
+
+import sys
+import re
+import os
+from io import BytesIO
+
+import dns.exception
+import dns.name
+import dns.node
+import dns.rdataclass
+import dns.rdatatype
+import dns.rdata
+import dns.rdtypes.ANY.SOA
+import dns.rrset
+import dns.tokenizer
+import dns.ttl
+import dns.grange
+from ._compat import string_types, text_type, PY3
+
+
+class BadZone(dns.exception.DNSException):
+
+ """The DNS zone is malformed."""
+
+
+class NoSOA(BadZone):
+
+ """The DNS zone has no SOA RR at its origin."""
+
+
+class NoNS(BadZone):
+
+ """The DNS zone has no NS RRset at its origin."""
+
+
+class UnknownOrigin(BadZone):
+
+ """The DNS zone's origin is unknown."""
+
+
+class Zone(object):
+
+ """A DNS zone.
+
+ A Zone is a mapping from names to nodes. The zone object may be
+ treated like a Python dictionary, e.g. zone[name] will retrieve
+ the node associated with that name. The I{name} may be a
+ dns.name.Name object, or it may be a string. In the either case,
+ if the name is relative it is treated as relative to the origin of
+ the zone.
+
+ @ivar rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int
+ @ivar origin: The origin of the zone.
+ @type origin: dns.name.Name object
+ @ivar nodes: A dictionary mapping the names of nodes in the zone to the
+ nodes themselves.
+ @type nodes: dict
+ @ivar relativize: should names in the zone be relativized?
+ @type relativize: bool
+ @cvar node_factory: the factory used to create a new node
+ @type node_factory: class or callable
+ """
+
+ node_factory = dns.node.Node
+
+ __slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
+
+ def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
+ """Initialize a zone object.
+
+ @param origin: The origin of the zone.
+ @type origin: dns.name.Name object
+ @param rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int"""
+
+ if origin is not None:
+ if isinstance(origin, string_types):
+ origin = dns.name.from_text(origin)
+ elif not isinstance(origin, dns.name.Name):
+ raise ValueError("origin parameter must be convertible to a "
+ "DNS name")
+ if not origin.is_absolute():
+ raise ValueError("origin parameter must be an absolute name")
+ self.origin = origin
+ self.rdclass = rdclass
+ self.nodes = {}
+ self.relativize = relativize
+
+ def __eq__(self, other):
+ """Two zones are equal if they have the same origin, class, and
+ nodes.
+ @rtype: bool
+ """
+
+ if not isinstance(other, Zone):
+ return False
+ if self.rdclass != other.rdclass or \
+ self.origin != other.origin or \
+ self.nodes != other.nodes:
+ return False
+ return True
+
+ def __ne__(self, other):
+ """Are two zones not equal?
+ @rtype: bool
+ """
+
+ return not self.__eq__(other)
+
+ def _validate_name(self, name):
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ elif not isinstance(name, dns.name.Name):
+ raise KeyError("name parameter must be convertible to a DNS name")
+ if name.is_absolute():
+ if not name.is_subdomain(self.origin):
+ raise KeyError(
+ "name parameter must be a subdomain of the zone origin")
+ if self.relativize:
+ name = name.relativize(self.origin)
+ return name
+
+ def __getitem__(self, key):
+ key = self._validate_name(key)
+ return self.nodes[key]
+
+ def __setitem__(self, key, value):
+ key = self._validate_name(key)
+ self.nodes[key] = value
+
+ def __delitem__(self, key):
+ key = self._validate_name(key)
+ del self.nodes[key]
+
+ def __iter__(self):
+ return self.nodes.__iter__()
+
+ def iterkeys(self):
+ if PY3:
+ return self.nodes.keys() # pylint: disable=dict-keys-not-iterating
+ else:
+ return self.nodes.iterkeys() # pylint: disable=dict-iter-method
+
+ def keys(self):
+ return self.nodes.keys() # pylint: disable=dict-keys-not-iterating
+
+ def itervalues(self):
+ if PY3:
+ return self.nodes.values() # pylint: disable=dict-values-not-iterating
+ else:
+ return self.nodes.itervalues() # pylint: disable=dict-iter-method
+
+ def values(self):
+ return self.nodes.values() # pylint: disable=dict-values-not-iterating
+
+ def items(self):
+ return self.nodes.items() # pylint: disable=dict-items-not-iterating
+
+ iteritems = items
+
+ def get(self, key):
+ key = self._validate_name(key)
+ return self.nodes.get(key)
+
+ def __contains__(self, other):
+ return other in self.nodes
+
+ def find_node(self, name, create=False):
+ """Find a node in the zone, possibly creating it.
+
+ @param name: the name of the node to find
+ @type name: dns.name.Name object or string
+ @param create: should the node be created if it doesn't exist?
+ @type create: bool
+ @raises KeyError: the name is not known and create was not specified.
+ @rtype: dns.node.Node object
+ """
+
+ name = self._validate_name(name)
+ node = self.nodes.get(name)
+ if node is None:
+ if not create:
+ raise KeyError
+ node = self.node_factory()
+ self.nodes[name] = node
+ return node
+
+ def get_node(self, name, create=False):
+ """Get a node in the zone, possibly creating it.
+
+ This method is like L{find_node}, except it returns None instead
+ of raising an exception if the node does not exist and creation
+ has not been requested.
+
+ @param name: the name of the node to find
+ @type name: dns.name.Name object or string
+ @param create: should the node be created if it doesn't exist?
+ @type create: bool
+ @rtype: dns.node.Node object or None
+ """
+
+ try:
+ node = self.find_node(name, create)
+ except KeyError:
+ node = None
+ return node
+
+ def delete_node(self, name):
+ """Delete the specified node if it exists.
+
+ It is not an error if the node does not exist.
+ """
+
+ name = self._validate_name(name)
+ if name in self.nodes:
+ del self.nodes[name]
+
+ def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Look for rdata with the specified name and type in the zone,
+ and return an rdataset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ The rdataset returned is not a copy; changes to it will change
+ the zone.
+
+ KeyError is raised if the name or type are not found.
+ Use L{get_rdataset} if you want to have None returned instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @param create: should the node and rdataset be created if they do not
+ exist?
+ @type create: bool
+ @raises KeyError: the node or rdata could not be found
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ name = self._validate_name(name)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ node = self.find_node(name, create)
+ return node.find_rdataset(self.rdclass, rdtype, covers, create)
+
+ def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Look for rdata with the specified name and type in the zone,
+ and return an rdataset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ The rdataset returned is not a copy; changes to it will change
+ the zone.
+
+ None is returned if the name or type are not found.
+ Use L{find_rdataset} if you want to have KeyError raised instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @param create: should the node and rdataset be created if they do not
+ exist?
+ @type create: bool
+ @rtype: dns.rdataset.Rdataset object or None
+ """
+
+ try:
+ rdataset = self.find_rdataset(name, rdtype, covers, create)
+ except KeyError:
+ rdataset = None
+ return rdataset
+
+ def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
+ """Delete the rdataset matching I{rdtype} and I{covers}, if it
+ exists at the node specified by I{name}.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ It is not an error if the node does not exist, or if there is no
+ matching rdataset at the node.
+
+ If the node has no rdatasets after the deletion, it will itself
+ be deleted.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ """
+
+ name = self._validate_name(name)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ node = self.get_node(name)
+ if node is not None:
+ node.delete_rdataset(self.rdclass, rdtype, covers)
+ if len(node) == 0:
+ self.delete_node(name)
+
+ def replace_rdataset(self, name, replacement):
+ """Replace an rdataset at name.
+
+ It is not an error if there is no rdataset matching I{replacement}.
+
+ Ownership of the I{replacement} object is transferred to the zone;
+ in other words, this method does not store a copy of I{replacement}
+ at the node, it stores I{replacement} itself.
+
+ If the I{name} node does not exist, it is created.
+
+ @param name: the owner name
+ @type name: DNS.name.Name object or string
+ @param replacement: the replacement rdataset
+ @type replacement: dns.rdataset.Rdataset
+ """
+
+ if replacement.rdclass != self.rdclass:
+ raise ValueError('replacement.rdclass != zone.rdclass')
+ node = self.find_node(name, True)
+ node.replace_rdataset(replacement)
+
+ def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
+ """Look for rdata with the specified name and type in the zone,
+ and return an RRset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ This method is less efficient than the similar
+ L{find_rdataset} because it creates an RRset instead of
+ returning the matching rdataset. It may be more convenient
+ for some uses since it returns an object which binds the owner
+ name to the rdata.
+
+ This method may not be used to create new nodes or rdatasets;
+ use L{find_rdataset} instead.
+
+ KeyError is raised if the name or type are not found.
+ Use L{get_rrset} if you want to have None returned instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @raises KeyError: the node or rdata could not be found
+ @rtype: dns.rrset.RRset object
+ """
+
+ name = self._validate_name(name)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
+ rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
+ rrset.update(rdataset)
+ return rrset
+
+ def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
+ """Look for rdata with the specified name and type in the zone,
+ and return an RRset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ This method is less efficient than the similar L{get_rdataset}
+ because it creates an RRset instead of returning the matching
+ rdataset. It may be more convenient for some uses since it
+ returns an object which binds the owner name to the rdata.
+
+ This method may not be used to create new nodes or rdatasets;
+ use L{find_rdataset} instead.
+
+ None is returned if the name or type are not found.
+ Use L{find_rrset} if you want to have KeyError raised instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @rtype: dns.rrset.RRset object
+ """
+
+ try:
+ rrset = self.find_rrset(name, rdtype, covers)
+ except KeyError:
+ rrset = None
+ return rrset
+
+ def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
+ covers=dns.rdatatype.NONE):
+ """Return a generator which yields (name, rdataset) tuples for
+ all rdatasets in the zone which have the specified I{rdtype}
+ and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
+ then all rdatasets will be matched.
+
+ @param rdtype: int or string
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ """
+
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ for (name, node) in self.iteritems(): # pylint: disable=dict-iter-method
+ for rds in node:
+ if rdtype == dns.rdatatype.ANY or \
+ (rds.rdtype == rdtype and rds.covers == covers):
+ yield (name, rds)
+
+ def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
+ covers=dns.rdatatype.NONE):
+ """Return a generator which yields (name, ttl, rdata) tuples for
+ all rdatas in the zone which have the specified I{rdtype}
+ and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
+ then all rdatas will be matched.
+
+ @param rdtype: int or string
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ """
+
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ for (name, node) in self.iteritems(): # pylint: disable=dict-iter-method
+ for rds in node:
+ if rdtype == dns.rdatatype.ANY or \
+ (rds.rdtype == rdtype and rds.covers == covers):
+ for rdata in rds:
+ yield (name, rds.ttl, rdata)
+
+ def to_file(self, f, sorted=True, relativize=True, nl=None):
+ """Write a zone to a file.
+
+ @param f: file or string. If I{f} is a string, it is treated
+ as the name of a file to open.
+ @param sorted: if True, the file will be written with the
+ names sorted in DNSSEC order from least to greatest. Otherwise
+ the names will be written in whatever order they happen to have
+ in the zone's dictionary.
+ @param relativize: if True, domain names in the output will be
+ relativized to the zone's origin (if possible).
+ @type relativize: bool
+ @param nl: The end of line string. If not specified, the
+ output will use the platform's native end-of-line marker (i.e.
+ LF on POSIX, CRLF on Windows, CR on Macintosh).
+ @type nl: string or None
+ """
+
+ if isinstance(f, string_types):
+ f = open(f, 'wb')
+ want_close = True
+ else:
+ want_close = False
+
+ # must be in this way, f.encoding may contain None, or even attribute
+ # may not be there
+ file_enc = getattr(f, 'encoding', None)
+ if file_enc is None:
+ file_enc = 'utf-8'
+
+ if nl is None:
+ nl_b = os.linesep.encode(file_enc) # binary mode, '\n' is not enough
+ nl = u'\n'
+ elif isinstance(nl, string_types):
+ nl_b = nl.encode(file_enc)
+ else:
+ nl_b = nl
+ nl = nl.decode()
+
+ try:
+ if sorted:
+ names = list(self.keys())
+ names.sort()
+ else:
+ names = self.iterkeys() # pylint: disable=dict-iter-method
+ for n in names:
+ l = self[n].to_text(n, origin=self.origin,
+ relativize=relativize)
+ if isinstance(l, text_type):
+ l_b = l.encode(file_enc)
+ else:
+ l_b = l
+ l = l.decode()
+
+ try:
+ f.write(l_b)
+ f.write(nl_b)
+ except TypeError: # textual mode
+ f.write(l)
+ f.write(nl)
+ finally:
+ if want_close:
+ f.close()
+
+ def to_text(self, sorted=True, relativize=True, nl=None):
+ """Return a zone's text as though it were written to a file.
+
+ @param sorted: if True, the file will be written with the
+ names sorted in DNSSEC order from least to greatest. Otherwise
+ the names will be written in whatever order they happen to have
+ in the zone's dictionary.
+ @param relativize: if True, domain names in the output will be
+ relativized to the zone's origin (if possible).
+ @type relativize: bool
+ @param nl: The end of line string. If not specified, the
+ output will use the platform's native end-of-line marker (i.e.
+ LF on POSIX, CRLF on Windows, CR on Macintosh).
+ @type nl: string or None
+ """
+ temp_buffer = BytesIO()
+ self.to_file(temp_buffer, sorted, relativize, nl)
+ return_value = temp_buffer.getvalue()
+ temp_buffer.close()
+ return return_value
+
+ def check_origin(self):
+ """Do some simple checking of the zone's origin.
+
+ @raises dns.zone.NoSOA: there is no SOA RR
+ @raises dns.zone.NoNS: there is no NS RRset
+ @raises KeyError: there is no origin node
+ """
+ if self.relativize:
+ name = dns.name.empty
+ else:
+ name = self.origin
+ if self.get_rdataset(name, dns.rdatatype.SOA) is None:
+ raise NoSOA
+ if self.get_rdataset(name, dns.rdatatype.NS) is None:
+ raise NoNS
+
+
+class _MasterReader(object):
+
+ """Read a DNS master file
+
+ @ivar tok: The tokenizer
+ @type tok: dns.tokenizer.Tokenizer object
+ @ivar last_ttl: The last seen explicit TTL for an RR
+ @type last_ttl: int
+ @ivar last_ttl_known: Has last TTL been detected
+ @type last_ttl_known: bool
+ @ivar default_ttl: The default TTL from a $TTL directive or SOA RR
+ @type default_ttl: int
+ @ivar default_ttl_known: Has default TTL been detected
+ @type default_ttl_known: bool
+ @ivar last_name: The last name read
+ @type last_name: dns.name.Name object
+ @ivar current_origin: The current origin
+ @type current_origin: dns.name.Name object
+ @ivar relativize: should names in the zone be relativized?
+ @type relativize: bool
+ @ivar zone: the zone
+ @type zone: dns.zone.Zone object
+ @ivar saved_state: saved reader state (used when processing $INCLUDE)
+ @type saved_state: list of (tokenizer, current_origin, last_name, file,
+ last_ttl, last_ttl_known, default_ttl, default_ttl_known) tuples.
+ @ivar current_file: the file object of the $INCLUDed file being parsed
+ (None if no $INCLUDE is active).
+ @ivar allow_include: is $INCLUDE allowed?
+ @type allow_include: bool
+ @ivar check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ """
+
+ def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
+ allow_include=False, check_origin=True):
+ if isinstance(origin, string_types):
+ origin = dns.name.from_text(origin)
+ self.tok = tok
+ self.current_origin = origin
+ self.relativize = relativize
+ self.last_ttl = 0
+ self.last_ttl_known = False
+ self.default_ttl = 0
+ self.default_ttl_known = False
+ self.last_name = self.current_origin
+ self.zone = zone_factory(origin, rdclass, relativize=relativize)
+ self.saved_state = []
+ self.current_file = None
+ self.allow_include = allow_include
+ self.check_origin = check_origin
+
+ def _eat_line(self):
+ while 1:
+ token = self.tok.get()
+ if token.is_eol_or_eof():
+ break
+
+ def _rr_line(self):
+ """Process one line from a DNS master file."""
+ # Name
+ if self.current_origin is None:
+ raise UnknownOrigin
+ token = self.tok.get(want_leading=True)
+ if not token.is_whitespace():
+ self.last_name = dns.name.from_text(
+ token.value, self.current_origin)
+ else:
+ token = self.tok.get()
+ if token.is_eol_or_eof():
+ # treat leading WS followed by EOL/EOF as if they were EOL/EOF.
+ return
+ self.tok.unget(token)
+ name = self.last_name
+ if not name.is_subdomain(self.zone.origin):
+ self._eat_line()
+ return
+ if self.relativize:
+ name = name.relativize(self.zone.origin)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # TTL
+ try:
+ ttl = dns.ttl.from_text(token.value)
+ self.last_ttl = ttl
+ self.last_ttl_known = True
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.ttl.BadTTL:
+ if not (self.last_ttl_known or self.default_ttl_known):
+ raise dns.exception.SyntaxError("Missing default TTL value")
+ if self.default_ttl_known:
+ ttl = self.default_ttl
+ else:
+ ttl = self.last_ttl
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except Exception:
+ rdclass = self.zone.rdclass
+ if rdclass != self.zone.rdclass:
+ raise dns.exception.SyntaxError("RR class is not zone's class")
+ # Type
+ try:
+ rdtype = dns.rdatatype.from_text(token.value)
+ except:
+ raise dns.exception.SyntaxError(
+ "unknown rdatatype '%s'" % token.value)
+ n = self.zone.nodes.get(name)
+ if n is None:
+ n = self.zone.node_factory()
+ self.zone.nodes[name] = n
+ try:
+ rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
+ self.current_origin, False)
+ except dns.exception.SyntaxError:
+ # Catch and reraise.
+ (ty, va) = sys.exc_info()[:2]
+ raise va
+ except:
+ # All exceptions that occur in the processing of rdata
+ # are treated as syntax errors. This is not strictly
+ # correct, but it is correct almost all of the time.
+ # We convert them to syntax errors so that we can emit
+ # helpful filename:line info.
+ (ty, va) = sys.exc_info()[:2]
+ raise dns.exception.SyntaxError(
+ "caught exception {}: {}".format(str(ty), str(va)))
+
+ if not self.default_ttl_known and isinstance(rd, dns.rdtypes.ANY.SOA.SOA):
+ # The pre-RFC2308 and pre-BIND9 behavior inherits the zone default
+ # TTL from the SOA minttl if no $TTL statement is present before the
+ # SOA is parsed.
+ self.default_ttl = rd.minimum
+ self.default_ttl_known = True
+
+ rd.choose_relativity(self.zone.origin, self.relativize)
+ covers = rd.covers()
+ rds = n.find_rdataset(rdclass, rdtype, covers, True)
+ rds.add(rd, ttl)
+
+ def _parse_modify(self, side):
+ # Here we catch everything in '{' '}' in a group so we can replace it
+ # with ''.
+ is_generate1 = re.compile("^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$")
+ is_generate2 = re.compile("^.*\$({(\+|-?)(\d+)}).*$")
+ is_generate3 = re.compile("^.*\$({(\+|-?)(\d+),(\d+)}).*$")
+ # Sometimes there are modifiers in the hostname. These come after
+ # the dollar sign. They are in the form: ${offset[,width[,base]]}.
+ # Make names
+ g1 = is_generate1.match(side)
+ if g1:
+ mod, sign, offset, width, base = g1.groups()
+ if sign == '':
+ sign = '+'
+ g2 = is_generate2.match(side)
+ if g2:
+ mod, sign, offset = g2.groups()
+ if sign == '':
+ sign = '+'
+ width = 0
+ base = 'd'
+ g3 = is_generate3.match(side)
+ if g3:
+ mod, sign, offset, width = g1.groups()
+ if sign == '':
+ sign = '+'
+ width = g1.groups()[2]
+ base = 'd'
+
+ if not (g1 or g2 or g3):
+ mod = ''
+ sign = '+'
+ offset = 0
+ width = 0
+ base = 'd'
+
+ if base != 'd':
+ raise NotImplementedError()
+
+ return mod, sign, offset, width, base
+
+ def _generate_line(self):
+ # range lhs [ttl] [class] type rhs [ comment ]
+ """Process one line containing the GENERATE statement from a DNS
+ master file."""
+ if self.current_origin is None:
+ raise UnknownOrigin
+
+ token = self.tok.get()
+ # Range (required)
+ try:
+ start, stop, step = dns.grange.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except:
+ raise dns.exception.SyntaxError
+
+ # lhs (required)
+ try:
+ lhs = token.value
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except:
+ raise dns.exception.SyntaxError
+
+ # TTL
+ try:
+ ttl = dns.ttl.from_text(token.value)
+ self.last_ttl = ttl
+ self.last_ttl_known = True
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.ttl.BadTTL:
+ if not (self.last_ttl_known or self.default_ttl_known):
+ raise dns.exception.SyntaxError("Missing default TTL value")
+ if self.default_ttl_known:
+ ttl = self.default_ttl
+ else:
+ ttl = self.last_ttl
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except Exception:
+ rdclass = self.zone.rdclass
+ if rdclass != self.zone.rdclass:
+ raise dns.exception.SyntaxError("RR class is not zone's class")
+ # Type
+ try:
+ rdtype = dns.rdatatype.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except Exception:
+ raise dns.exception.SyntaxError("unknown rdatatype '%s'" %
+ token.value)
+
+ # lhs (required)
+ try:
+ rhs = token.value
+ except:
+ raise dns.exception.SyntaxError
+
+ lmod, lsign, loffset, lwidth, lbase = self._parse_modify(lhs)
+ rmod, rsign, roffset, rwidth, rbase = self._parse_modify(rhs)
+ for i in range(start, stop + 1, step):
+ # +1 because bind is inclusive and python is exclusive
+
+ if lsign == u'+':
+ lindex = i + int(loffset)
+ elif lsign == u'-':
+ lindex = i - int(loffset)
+
+ if rsign == u'-':
+ rindex = i - int(roffset)
+ elif rsign == u'+':
+ rindex = i + int(roffset)
+
+ lzfindex = str(lindex).zfill(int(lwidth))
+ rzfindex = str(rindex).zfill(int(rwidth))
+
+ name = lhs.replace(u'$%s' % (lmod), lzfindex)
+ rdata = rhs.replace(u'$%s' % (rmod), rzfindex)
+
+ self.last_name = dns.name.from_text(name, self.current_origin)
+ name = self.last_name
+ if not name.is_subdomain(self.zone.origin):
+ self._eat_line()
+ return
+ if self.relativize:
+ name = name.relativize(self.zone.origin)
+
+ n = self.zone.nodes.get(name)
+ if n is None:
+ n = self.zone.node_factory()
+ self.zone.nodes[name] = n
+ try:
+ rd = dns.rdata.from_text(rdclass, rdtype, rdata,
+ self.current_origin, False)
+ except dns.exception.SyntaxError:
+ # Catch and reraise.
+ (ty, va) = sys.exc_info()[:2]
+ raise va
+ except:
+ # All exceptions that occur in the processing of rdata
+ # are treated as syntax errors. This is not strictly
+ # correct, but it is correct almost all of the time.
+ # We convert them to syntax errors so that we can emit
+ # helpful filename:line info.
+ (ty, va) = sys.exc_info()[:2]
+ raise dns.exception.SyntaxError("caught exception %s: %s" %
+ (str(ty), str(va)))
+
+ rd.choose_relativity(self.zone.origin, self.relativize)
+ covers = rd.covers()
+ rds = n.find_rdataset(rdclass, rdtype, covers, True)
+ rds.add(rd, ttl)
+
+ def read(self):
+ """Read a DNS master file and build a zone object.
+
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ """
+
+ try:
+ while 1:
+ token = self.tok.get(True, True)
+ if token.is_eof():
+ if self.current_file is not None:
+ self.current_file.close()
+ if len(self.saved_state) > 0:
+ (self.tok,
+ self.current_origin,
+ self.last_name,
+ self.current_file,
+ self.last_ttl,
+ self.last_ttl_known,
+ self.default_ttl,
+ self.default_ttl_known) = self.saved_state.pop(-1)
+ continue
+ break
+ elif token.is_eol():
+ continue
+ elif token.is_comment():
+ self.tok.get_eol()
+ continue
+ elif token.value[0] == u'$':
+ c = token.value.upper()
+ if c == u'$TTL':
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError("bad $TTL")
+ self.default_ttl = dns.ttl.from_text(token.value)
+ self.default_ttl_known = True
+ self.tok.get_eol()
+ elif c == u'$ORIGIN':
+ self.current_origin = self.tok.get_name()
+ self.tok.get_eol()
+ if self.zone.origin is None:
+ self.zone.origin = self.current_origin
+ elif c == u'$INCLUDE' and self.allow_include:
+ token = self.tok.get()
+ filename = token.value
+ token = self.tok.get()
+ if token.is_identifier():
+ new_origin =\
+ dns.name.from_text(token.value,
+ self.current_origin)
+ self.tok.get_eol()
+ elif not token.is_eol_or_eof():
+ raise dns.exception.SyntaxError(
+ "bad origin in $INCLUDE")
+ else:
+ new_origin = self.current_origin
+ self.saved_state.append((self.tok,
+ self.current_origin,
+ self.last_name,
+ self.current_file,
+ self.last_ttl,
+ self.last_ttl_known,
+ self.default_ttl,
+ self.default_ttl_known))
+ self.current_file = open(filename, 'r')
+ self.tok = dns.tokenizer.Tokenizer(self.current_file,
+ filename)
+ self.current_origin = new_origin
+ elif c == u'$GENERATE':
+ self._generate_line()
+ else:
+ raise dns.exception.SyntaxError(
+ "Unknown master file directive '" + c + "'")
+ continue
+ self.tok.unget(token)
+ self._rr_line()
+ except dns.exception.SyntaxError as detail:
+ (filename, line_number) = self.tok.where()
+ if detail is None:
+ detail = "syntax error"
+ raise dns.exception.SyntaxError(
+ "%s:%d: %s" % (filename, line_number, detail))
+
+ # Now that we're done reading, do some basic checking of the zone.
+ if self.check_origin:
+ self.zone.check_origin()
+
+
+def from_text(text, origin=None, rdclass=dns.rdataclass.IN,
+ relativize=True, zone_factory=Zone, filename=None,
+ allow_include=False, check_origin=True):
+ """Build a zone object from a master file format string.
+
+ @param text: the master file format input
+ @type text: string.
+ @param origin: The origin of the zone; if not specified, the first
+ $ORIGIN statement in the master file will determine the origin of the
+ zone.
+ @type origin: dns.name.Name object or string
+ @param rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int
+ @param relativize: should names be relativized? The default is True
+ @type relativize: bool
+ @param zone_factory: The zone factory to use
+ @type zone_factory: function returning a Zone
+ @param filename: The filename to emit when describing where an error
+ occurred; the default is ''.
+ @type filename: string
+ @param allow_include: is $INCLUDE allowed?
+ @type allow_include: bool
+ @param check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ @rtype: dns.zone.Zone object
+ """
+
+ # 'text' can also be a file, but we don't publish that fact
+ # since it's an implementation detail. The official file
+ # interface is from_file().
+
+ if filename is None:
+ filename = ''
+ tok = dns.tokenizer.Tokenizer(text, filename)
+ reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
+ allow_include=allow_include,
+ check_origin=check_origin)
+ reader.read()
+ return reader.zone
+
+
+def from_file(f, origin=None, rdclass=dns.rdataclass.IN,
+ relativize=True, zone_factory=Zone, filename=None,
+ allow_include=True, check_origin=True):
+ """Read a master file and build a zone object.
+
+ @param f: file or string. If I{f} is a string, it is treated
+ as the name of a file to open.
+ @param origin: The origin of the zone; if not specified, the first
+ $ORIGIN statement in the master file will determine the origin of the
+ zone.
+ @type origin: dns.name.Name object or string
+ @param rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int
+ @param relativize: should names be relativized? The default is True
+ @type relativize: bool
+ @param zone_factory: The zone factory to use
+ @type zone_factory: function returning a Zone
+ @param filename: The filename to emit when describing where an error
+ occurred; the default is '', or the value of I{f} if I{f} is a
+ string.
+ @type filename: string
+ @param allow_include: is $INCLUDE allowed?
+ @type allow_include: bool
+ @param check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ @rtype: dns.zone.Zone object
+ """
+
+ str_type = string_types
+ if PY3:
+ opts = 'r'
+ else:
+ opts = 'rU'
+
+ if isinstance(f, str_type):
+ if filename is None:
+ filename = f
+ f = open(f, opts)
+ want_close = True
+ else:
+ if filename is None:
+ filename = ''
+ want_close = False
+
+ try:
+ z = from_text(f, origin, rdclass, relativize, zone_factory,
+ filename, allow_include, check_origin)
+ finally:
+ if want_close:
+ f.close()
+ return z
+
+
+def from_xfr(xfr, zone_factory=Zone, relativize=True, check_origin=True):
+ """Convert the output of a zone transfer generator into a zone object.
+
+ @param xfr: The xfr generator
+ @type xfr: generator of dns.message.Message objects
+ @param relativize: should names be relativized? The default is True.
+ It is essential that the relativize setting matches the one specified
+ to dns.query.xfr().
+ @type relativize: bool
+ @param check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ @rtype: dns.zone.Zone object
+ """
+
+ z = None
+ for r in xfr:
+ if z is None:
+ if relativize:
+ origin = r.origin
+ else:
+ origin = r.answer[0].name
+ rdclass = r.answer[0].rdclass
+ z = zone_factory(origin, rdclass, relativize=relativize)
+ for rrset in r.answer:
+ znode = z.nodes.get(rrset.name)
+ if not znode:
+ znode = z.node_factory()
+ z.nodes[rrset.name] = znode
+ zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
+ rrset.covers, True)
+ zrds.update_ttl(rrset.ttl)
+ for rd in rrset:
+ rd.choose_relativity(z.origin, relativize)
+ zrds.add(rd)
+ if check_origin:
+ z.check_origin()
+ return z
diff --git a/openpype/version.py b/openpype/version.py
index f85ea13ac8..dedf799055 100644
--- a/openpype/version.py
+++ b/openpype/version.py
@@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
-__version__ = "3.0.0-beta"
+__version__ = "3.0.0-beta2"
diff --git a/poetry.lock b/poetry.lock
index 767aeee500..41a1f636ec 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -80,7 +80,7 @@ python-dateutil = ">=2.7.0"
[[package]]
name = "astroid"
-version = "2.5.2"
+version = "2.5.3"
description = "An abstract syntax tree for Python with inference support."
category = "dev"
optional = false
@@ -272,15 +272,24 @@ test = ["pytest (>=6.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pret
[[package]]
name = "cx-freeze"
-version = "6.5.3"
+version = "6.6"
description = "Create standalone executables from Python scripts"
category = "dev"
optional = false
python-versions = ">=3.6"
[package.dependencies]
+cx-Logging = {version = ">=3.0", markers = "sys_platform == \"win32\""}
importlib-metadata = ">=3.1.1"
+[[package]]
+name = "cx-logging"
+version = "3.0"
+description = "Python and C interfaces for logging"
+category = "dev"
+optional = false
+python-versions = "*"
+
[[package]]
name = "dnspython"
version = "2.1.0"
@@ -298,12 +307,24 @@ trio = ["trio (>=0.14.0)", "sniffio (>=1.1)"]
[[package]]
name = "docutils"
-version = "0.17"
+version = "0.16"
description = "Docutils -- Python Documentation Utilities"
category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+[[package]]
+name = "enlighten"
+version = "1.9.0"
+description = "Enlighten Progress Bar"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+blessed = ">=1.17.7"
+prefixed = ">=0.3.2"
+
[[package]]
name = "evdev"
version = "1.4.0"
@@ -314,7 +335,7 @@ python-versions = "*"
[[package]]
name = "flake8"
-version = "3.9.0"
+version = "3.9.1"
description = "the modular source code checker: pep8 pyflakes and co"
category = "dev"
optional = false
@@ -392,7 +413,7 @@ uritemplate = ">=3.0.0,<4dev"
[[package]]
name = "google-auth"
-version = "1.28.0"
+version = "1.29.0"
description = "Google Authentication Library"
category = "main"
optional = false
@@ -407,6 +428,7 @@ six = ">=1.9.0"
[package.extras]
aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)"]
pyopenssl = ["pyopenssl (>=20.0.0)"]
+reauth = ["pyu2f (>=0.1.5)"]
[[package]]
name = "google-auth-httplib2"
@@ -464,7 +486,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
[[package]]
name = "importlib-metadata"
-version = "3.10.0"
+version = "4.0.0"
description = "Read metadata from Python packages"
category = "main"
optional = false
@@ -540,7 +562,7 @@ i18n = ["Babel (>=0.8)"]
[[package]]
name = "jinxed"
-version = "1.0.1"
+version = "1.1.0"
description = "Jinxed Terminal Library"
category = "main"
optional = false
@@ -704,9 +726,17 @@ importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
[package.extras]
dev = ["pre-commit", "tox"]
+[[package]]
+name = "prefixed"
+version = "0.3.2"
+description = "Prefixed alternative numeric library"
+category = "main"
+optional = false
+python-versions = "*"
+
[[package]]
name = "protobuf"
-version = "3.15.7"
+version = "3.15.8"
description = "Protocol Buffers"
category = "main"
optional = false
@@ -1120,7 +1150,7 @@ python-versions = "*"
[[package]]
name = "sphinx"
-version = "3.5.3"
+version = "3.5.4"
description = "Python documentation generator"
category = "dev"
optional = false
@@ -1130,7 +1160,7 @@ python-versions = ">=3.5"
alabaster = ">=0.7,<0.8"
babel = ">=1.3"
colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""}
-docutils = ">=0.12"
+docutils = ">=0.12,<0.17"
imagesize = "*"
Jinja2 = ">=2.3"
packaging = "*"
@@ -1163,13 +1193,14 @@ sphinx = "*"
[[package]]
name = "sphinx-rtd-theme"
-version = "0.5.1"
+version = "0.5.2"
description = "Read the Docs theme for Sphinx"
category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
+docutils = "<0.17"
sphinx = "*"
[package.extras]
@@ -1277,22 +1308,9 @@ category = "dev"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
-[[package]]
-name = "tqdm"
-version = "4.60.0"
-description = "Fast, Extensible Progress Meter"
-category = "dev"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
-
-[package.extras]
-dev = ["py-make (>=0.1.0)", "twine", "wheel"]
-notebook = ["ipywidgets (>=6)"]
-telegram = ["requests"]
-
[[package]]
name = "typed-ast"
-version = "1.4.2"
+version = "1.4.3"
description = "a fork of Python 2 and 3 ast modules with type comment support"
category = "dev"
optional = false
@@ -1399,7 +1417,7 @@ testing = ["pytest (>=4.6)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pyt
[metadata]
lock-version = "1.1"
python-versions = "3.7.*"
-content-hash = "a8c9915ce3096b74b9328a632911a759780844d368fa1d6d0fbd7c5d7d4536cf"
+content-hash = "80fde42aade7fc90bb68d85f0d9b3feb27fc3744d72eb5af6a11b6c9d9836aca"
[metadata.files]
acre = []
@@ -1463,8 +1481,8 @@ arrow = [
{file = "arrow-0.17.0.tar.gz", hash = "sha256:ff08d10cda1d36c68657d6ad20d74fbea493d980f8b2d45344e00d6ed2bf6ed4"},
]
astroid = [
- {file = "astroid-2.5.2-py3-none-any.whl", hash = "sha256:cd80bf957c49765dce6d92c43163ff9d2abc43132ce64d4b1b47717c6d2522df"},
- {file = "astroid-2.5.2.tar.gz", hash = "sha256:6b0ed1af831570e500e2437625979eaa3b36011f66ddfc4ce930128610258ca9"},
+ {file = "astroid-2.5.3-py3-none-any.whl", hash = "sha256:bea3f32799fbb8581f58431c12591bc20ce11cbc90ad82e2ea5717d94f2080d5"},
+ {file = "astroid-2.5.3.tar.gz", hash = "sha256:ad63b8552c70939568966811a088ef0bc880f99a24a00834abd0e3681b514f91"},
]
async-timeout = [
{file = "async-timeout-3.0.1.tar.gz", hash = "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f"},
@@ -1630,30 +1648,49 @@ cryptography = [
{file = "cryptography-3.4.7.tar.gz", hash = "sha256:3d10de8116d25649631977cb37da6cbdd2d6fa0e0281d014a5b7d337255ca713"},
]
cx-freeze = [
- {file = "cx_Freeze-6.5.3-cp36-cp36m-win32.whl", hash = "sha256:0a1babae574546b622303da53e1a9829aa3a7e53e62b41eb260250220f83164b"},
- {file = "cx_Freeze-6.5.3-cp36-cp36m-win_amd64.whl", hash = "sha256:2671e46cd491c181c632df3f0df2847bad7066897faa07eb1d50f60f5082596f"},
- {file = "cx_Freeze-6.5.3-cp37-cp37m-win32.whl", hash = "sha256:abf5f95f914573cdff5bd9845144977b875fc655417d0e66f935865af1de64d5"},
- {file = "cx_Freeze-6.5.3-cp37-cp37m-win_amd64.whl", hash = "sha256:65c4560bc7b18e2a7bbe3546313cbc01d3fca244d199b39508cfa2ae561887ce"},
- {file = "cx_Freeze-6.5.3-cp38-cp38-win32.whl", hash = "sha256:7e2592fe1b65bd45c729934b391579fde5aed6b4c9e3e4d990738fc7fec718ea"},
- {file = "cx_Freeze-6.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:d3bb71349dace28e545eb1e4549255f0dd915f925f8505b1a342b3d2fbd4734b"},
- {file = "cx_Freeze-6.5.3-cp39-cp39-win32.whl", hash = "sha256:df3872d8e8f87a3f89e6758bed130b5b95ee7473054e2a7eee5b1a8d1c4ecf9e"},
- {file = "cx_Freeze-6.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:507bbaace2fd27edb0e6b024898ab2e4831d45d7238264f578a5e4fa70f065e5"},
- {file = "cx_Freeze-6.5.3.tar.gz", hash = "sha256:e0d03cabcdf9b9c21354807ed9f06fa9481a8fd5a0838968a830f01a70820ff1"},
+ {file = "cx_Freeze-6.6-cp36-cp36m-win32.whl", hash = "sha256:b3d3a6bcd1a07c50b4e1c907f14842642156110e63a99cd5c73b8a24751e9b97"},
+ {file = "cx_Freeze-6.6-cp36-cp36m-win_amd64.whl", hash = "sha256:1935266ec644ea4f7e584985f44cefc0622a449a09980d990833a1a2afcadac8"},
+ {file = "cx_Freeze-6.6-cp37-cp37m-win32.whl", hash = "sha256:1eac2b0f254319cc641ce25bd83337effd7936092562fde701f3ffb40e0274ec"},
+ {file = "cx_Freeze-6.6-cp37-cp37m-win_amd64.whl", hash = "sha256:2bc46ef6d510811b6002f34a3ae4cbfdea44e18644febd2a404d3ee8e48a9fc4"},
+ {file = "cx_Freeze-6.6-cp38-cp38-win32.whl", hash = "sha256:46eb50ebc46f7ae236d16c6a52671ab0f7bb479bea668da19f4b6de3cc413e9e"},
+ {file = "cx_Freeze-6.6-cp38-cp38-win_amd64.whl", hash = "sha256:8c3b00476ce385bb58595bffce55aed031e5a6e16ab6e14d8bee9d1d569e46c3"},
+ {file = "cx_Freeze-6.6-cp39-cp39-win32.whl", hash = "sha256:6e9340cbcf52d4836980ecc83ddba4f7704ff6654dd41168c146b74f512977ce"},
+ {file = "cx_Freeze-6.6-cp39-cp39-win_amd64.whl", hash = "sha256:2fcf1c8b77ae5c06f45be3a9aff79e1dd808c0d624e97561f840dec5ea9b214a"},
+ {file = "cx_Freeze-6.6.tar.gz", hash = "sha256:c4af8ad3f7e7d71e291c1dec5d0fb26bbe92df834b098ed35434c901fbd6762f"},
+]
+cx-logging = [
+ {file = "cx_Logging-3.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:9fcd297e5c51470521c47eff0f86ba844aeca6be97e13c3e2114ebdf03fa3c96"},
+ {file = "cx_Logging-3.0-cp36-cp36m-win32.whl", hash = "sha256:0df4be47c5022cc54316949e283403214568ef599817ced0c0972183d6d4fabb"},
+ {file = "cx_Logging-3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:203ca92ee7c15d5dfe1fcdfcef7b39d0123eba5c6d8c2388b6e7db6b961a5362"},
+ {file = "cx_Logging-3.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:20daa71b2a30f61d09bcf55dbda002c10f0c7c691f53cb393fc6485410fa2484"},
+ {file = "cx_Logging-3.0-cp37-cp37m-win32.whl", hash = "sha256:5be5f905e8d34a3326e28d428674cdc2d57912fdf6e25b8676d63f76294eb4e0"},
+ {file = "cx_Logging-3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:04e4b61e2636dc8ae135937655af6626362aefc7f6175e86888a244b61001823"},
+ {file = "cx_Logging-3.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1bf0ebc79a7baa331c7deaf57088c234b82710286dfad453ff0c55eee0122b72"},
+ {file = "cx_Logging-3.0-cp38-cp38-win32.whl", hash = "sha256:d98a59a47e99fa430b3f6d2a979e27509852d2c43e204f43bd0168e7ec97f469"},
+ {file = "cx_Logging-3.0-cp38-cp38-win_amd64.whl", hash = "sha256:bb2e91019e5905415f795eef994de60ace5ae186fc4fe3d358e2d8feebb24992"},
+ {file = "cx_Logging-3.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:b6f4a9b750e02a180517f779d174a1c7db651981cd37e5623235b87da9774dfd"},
+ {file = "cx_Logging-3.0-cp39-cp39-win32.whl", hash = "sha256:e7cca28e8ee4082654b6062cc4d06f83d48f1a7e2d152bab020c9e3e373afb90"},
+ {file = "cx_Logging-3.0-cp39-cp39-win_amd64.whl", hash = "sha256:302e9c4f65a936c288a4fa59a90e7e142d9ef994aa29676731acafdcccdbb3f5"},
+ {file = "cx_Logging-3.0.tar.gz", hash = "sha256:ba8a7465facf7b98d8f494030fb481a2e8aeee29dc191e10383bb54ed42bdb34"},
]
dnspython = [
{file = "dnspython-2.1.0-py3-none-any.whl", hash = "sha256:95d12f6ef0317118d2a1a6fc49aac65ffec7eb8087474158f42f26a639135216"},
{file = "dnspython-2.1.0.zip", hash = "sha256:e4a87f0b573201a0f3727fa18a516b055fd1107e0e5477cded4a2de497df1dd4"},
]
docutils = [
- {file = "docutils-0.17-py2.py3-none-any.whl", hash = "sha256:a71042bb7207c03d5647f280427f14bfbd1a65c9eb84f4b341d85fafb6bb4bdf"},
- {file = "docutils-0.17.tar.gz", hash = "sha256:e2ffeea817964356ba4470efba7c2f42b6b0de0b04e66378507e3e2504bbff4c"},
+ {file = "docutils-0.16-py2.py3-none-any.whl", hash = "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af"},
+ {file = "docutils-0.16.tar.gz", hash = "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"},
+]
+enlighten = [
+ {file = "enlighten-1.9.0-py2.py3-none-any.whl", hash = "sha256:5c59e41505702243c6b26437403e371d2a146ac72de5f706376f738ea8f32659"},
+ {file = "enlighten-1.9.0.tar.gz", hash = "sha256:539cc308ccc0c3bfb50feb1b2da94c1a1ac21e80fe95e984221de8966d48f428"},
]
evdev = [
{file = "evdev-1.4.0.tar.gz", hash = "sha256:8782740eb1a86b187334c07feb5127d3faa0b236e113206dfe3ae8f77fb1aaf1"},
]
flake8 = [
- {file = "flake8-3.9.0-py2.py3-none-any.whl", hash = "sha256:12d05ab02614b6aee8df7c36b97d1a3b2372761222b19b58621355e82acddcff"},
- {file = "flake8-3.9.0.tar.gz", hash = "sha256:78873e372b12b093da7b5e5ed302e8ad9e988b38b063b61ad937f26ca58fc5f0"},
+ {file = "flake8-3.9.1-py2.py3-none-any.whl", hash = "sha256:3b9f848952dddccf635be78098ca75010f073bfe14d2c6bda867154bea728d2a"},
+ {file = "flake8-3.9.1.tar.gz", hash = "sha256:1aa8990be1e689d96c745c5682b687ea49f2e05a443aff1f8251092b0014e378"},
]
ftrack-python-api = [
{file = "ftrack-python-api-2.0.0.tar.gz", hash = "sha256:dd6f02c31daf5a10078196dc9eac4671e4297c762fbbf4df98de668ac12281d9"},
@@ -1671,8 +1708,8 @@ google-api-python-client = [
{file = "google_api_python_client-1.12.8-py2.py3-none-any.whl", hash = "sha256:3c4c4ca46b5c21196bec7ee93453443e477d82cbfa79234d1ce0645f81170eaf"},
]
google-auth = [
- {file = "google-auth-1.28.0.tar.gz", hash = "sha256:9bd436d19ab047001a1340720d2b629eb96dd503258c524921ec2af3ee88a80e"},
- {file = "google_auth-1.28.0-py2.py3-none-any.whl", hash = "sha256:dcaba3aa9d4e0e96fd945bf25a86b6f878fcb05770b67adbeb50a63ca4d28a5e"},
+ {file = "google-auth-1.29.0.tar.gz", hash = "sha256:010f011c4e27d3d5eb01106fba6aac39d164842dfcd8709955c4638f5b11ccf8"},
+ {file = "google_auth-1.29.0-py2.py3-none-any.whl", hash = "sha256:f30a672a64d91cc2e3137765d088c5deec26416246f7a9e956eaf69a8d7ed49c"},
]
google-auth-httplib2 = [
{file = "google-auth-httplib2-0.1.0.tar.gz", hash = "sha256:a07c39fd632becacd3f07718dfd6021bf396978f03ad3ce4321d060015cc30ac"},
@@ -1695,8 +1732,8 @@ imagesize = [
{file = "imagesize-1.2.0.tar.gz", hash = "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"},
]
importlib-metadata = [
- {file = "importlib_metadata-3.10.0-py3-none-any.whl", hash = "sha256:d2d46ef77ffc85cbf7dac7e81dd663fde71c45326131bea8033b9bad42268ebe"},
- {file = "importlib_metadata-3.10.0.tar.gz", hash = "sha256:c9db46394197244adf2f0b08ec5bc3cf16757e9590b02af1fca085c16c0d600a"},
+ {file = "importlib_metadata-4.0.0-py3-none-any.whl", hash = "sha256:19192b88d959336bfa6bdaaaef99aeafec179eca19c47c804e555703ee5f07ef"},
+ {file = "importlib_metadata-4.0.0.tar.gz", hash = "sha256:2e881981c9748d7282b374b68e759c87745c25427b67ecf0cc67fb6637a1bff9"},
]
iniconfig = [
{file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"},
@@ -1719,8 +1756,8 @@ jinja2 = [
{file = "Jinja2-2.11.3.tar.gz", hash = "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6"},
]
jinxed = [
- {file = "jinxed-1.0.1-py2.py3-none-any.whl", hash = "sha256:602f2cb3523c1045456f7b6d79ac19297fd8e933ae3bd9159845dc857f2d519c"},
- {file = "jinxed-1.0.1.tar.gz", hash = "sha256:bc523c74fe676c99ccc69c68c2dcd7d4d2d7b2541f6dbef74ef211aedd8ad0d3"},
+ {file = "jinxed-1.1.0-py2.py3-none-any.whl", hash = "sha256:6a61ccf963c16aa885304f27e6e5693783676897cea0c7f223270c8b8e78baf8"},
+ {file = "jinxed-1.1.0.tar.gz", hash = "sha256:d8f1731f134e9e6b04d95095845ae6c10eb15cb223a5f0cabdea87d4a279c305"},
]
jsonschema = [
{file = "jsonschema-3.2.0-py2.py3-none-any.whl", hash = "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163"},
@@ -1906,27 +1943,31 @@ pluggy = [
{file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"},
{file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"},
]
+prefixed = [
+ {file = "prefixed-0.3.2-py2.py3-none-any.whl", hash = "sha256:5e107306462d63f2f03c529dbf11b0026fdfec621a9a008ca639d71de22995c3"},
+ {file = "prefixed-0.3.2.tar.gz", hash = "sha256:ca48277ba5fa8346dd4b760847da930c7b84416387c39e93affef086add2c029"},
+]
protobuf = [
- {file = "protobuf-3.15.7-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a14141d5c967362d2eedff8825d2b69cc36a5b3ed6b1f618557a04e58a3cf787"},
- {file = "protobuf-3.15.7-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d54d78f621852ec4fdd1484d1263ca04d4bf5ffdf7abffdbb939e444b6ff3385"},
- {file = "protobuf-3.15.7-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:462085acdb410b06335315fe7e63cb281a1902856e0f4657f341c283cedc1d56"},
- {file = "protobuf-3.15.7-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:849c92ce112e1ef648705c29ce044248e350f71d9d54a2026830623198f0bd38"},
- {file = "protobuf-3.15.7-cp35-cp35m-win32.whl", hash = "sha256:1f6083382f7714700deadf3014e921711e2f807de7f27e40c32b744701ae5b99"},
- {file = "protobuf-3.15.7-cp35-cp35m-win_amd64.whl", hash = "sha256:e17f60f00081adcb32068ee0bb51e418f6474acf83424244ff3512ffd2166385"},
- {file = "protobuf-3.15.7-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6c75e563c6fb2ca5b8f21dd75c15659aa2c4a0025b9da3a7711ae661cd6a488d"},
- {file = "protobuf-3.15.7-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d939f41b4108350841c4790ebbadb61729e1363522fdb8434eb4e6f2065d0db1"},
- {file = "protobuf-3.15.7-cp36-cp36m-win32.whl", hash = "sha256:24f14c09d4c0a3641f1b0e9b552d026361de65b01686fdd3e5fdf8f9512cd79b"},
- {file = "protobuf-3.15.7-cp36-cp36m-win_amd64.whl", hash = "sha256:1247170191bcb2a8d978d11a58afe391004ec6c2184e4d961baf8102d43ff500"},
- {file = "protobuf-3.15.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:364cadaeec0756afdc099cbd88cb5659bd1bb7d547168d063abcb0272ccbb2f6"},
- {file = "protobuf-3.15.7-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:0c3a6941b1e6e6e22d812a8e5c46bfe83082ea60d262a46f2cfb22d9b9fb17db"},
- {file = "protobuf-3.15.7-cp37-cp37m-win32.whl", hash = "sha256:eb5668f3f6a83b6603ca2e09be5b20de89521ea5914aabe032cce981e4129cc8"},
- {file = "protobuf-3.15.7-cp37-cp37m-win_amd64.whl", hash = "sha256:1001e671cf8476edce7fb72778358d026390649cc35a79d47b2a291684ccfbb2"},
- {file = "protobuf-3.15.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a5ba7dd6f97964655aa7b234c95d80886425a31b7010764f042cdeb985314d18"},
- {file = "protobuf-3.15.7-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:46674bd6fcf8c63b4b9869ba579685db67cf51ae966443dd6bd9a8fa00fcef62"},
- {file = "protobuf-3.15.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4c4399156fb27e3768313b7a59352c861a893252bda6fb9f3643beb3ebb7047e"},
- {file = "protobuf-3.15.7-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:85cd29faf056036167d87445d5a5059034c298881c044e71a73d3b61a4be1c23"},
- {file = "protobuf-3.15.7-py2.py3-none-any.whl", hash = "sha256:22054432b923c0086f9cf1e1c0c52d39bf3c6e31014ea42eec2dabc22ee26d78"},
- {file = "protobuf-3.15.7.tar.gz", hash = "sha256:2d03fc2591543cd2456d0b72230b50c4519546a8d379ac6fd3ecd84c6df61e5d"},
+ {file = "protobuf-3.15.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fad4f971ec38d8df7f4b632c819bf9bbf4f57cfd7312cf526c69ce17ef32436a"},
+ {file = "protobuf-3.15.8-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:f17b352d7ce33c81773cf81d536ca70849de6f73c96413f17309f4b43ae7040b"},
+ {file = "protobuf-3.15.8-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:4a054b0b5900b7ea7014099e783fb8c4618e4209fffcd6050857517b3f156e18"},
+ {file = "protobuf-3.15.8-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:efa4c4d4fc9ba734e5e85eaced70e1b63fb3c8d08482d839eb838566346f1737"},
+ {file = "protobuf-3.15.8-cp35-cp35m-win32.whl", hash = "sha256:07eec4e2ccbc74e95bb9b3afe7da67957947ee95bdac2b2e91b038b832dd71f0"},
+ {file = "protobuf-3.15.8-cp35-cp35m-win_amd64.whl", hash = "sha256:f9cadaaa4065d5dd4d15245c3b68b967b3652a3108e77f292b58b8c35114b56c"},
+ {file = "protobuf-3.15.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2dc0e8a9e4962207bdc46a365b63a3f1aca6f9681a5082a326c5837ef8f4b745"},
+ {file = "protobuf-3.15.8-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f80afc0a0ba13339bbab25ca0409e9e2836b12bb012364c06e97c2df250c3343"},
+ {file = "protobuf-3.15.8-cp36-cp36m-win32.whl", hash = "sha256:c5566f956a26cda3abdfacc0ca2e21db6c9f3d18f47d8d4751f2209d6c1a5297"},
+ {file = "protobuf-3.15.8-cp36-cp36m-win_amd64.whl", hash = "sha256:dab75b56a12b1ceb3e40808b5bd9dfdaef3a1330251956e6744e5b6ed8f8830b"},
+ {file = "protobuf-3.15.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3053f13207e7f13dc7be5e9071b59b02020172f09f648e85dc77e3fcb50d1044"},
+ {file = "protobuf-3.15.8-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1f0b5d156c3df08cc54bc2c8b8b875648ea4cd7ebb2a9a130669f7547ec3488c"},
+ {file = "protobuf-3.15.8-cp37-cp37m-win32.whl", hash = "sha256:90270fe5732c1f1ff664a3bd7123a16456d69b4e66a09a139a00443a32f210b8"},
+ {file = "protobuf-3.15.8-cp37-cp37m-win_amd64.whl", hash = "sha256:f42c2f5fb67da5905bfc03733a311f72fa309252bcd77c32d1462a1ad519521e"},
+ {file = "protobuf-3.15.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6077db37bfa16494dca58a4a02bfdacd87662247ad6bc1f7f8d13ff3f0013e1"},
+ {file = "protobuf-3.15.8-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:510e66491f1a5ac5953c908aa8300ec47f793130097e4557482803b187a8ee05"},
+ {file = "protobuf-3.15.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5ff9fa0e67fcab442af9bc8d4ec3f82cb2ff3be0af62dba047ed4187f0088b7d"},
+ {file = "protobuf-3.15.8-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:1c0e9e56202b9dccbc094353285a252e2b7940b74fdf75f1b4e1b137833fabd7"},
+ {file = "protobuf-3.15.8-py2.py3-none-any.whl", hash = "sha256:a0a08c6b2e6d6c74a6eb5bf6184968eefb1569279e78714e239d33126e753403"},
+ {file = "protobuf-3.15.8.tar.gz", hash = "sha256:0277f62b1e42210cafe79a71628c1d553348da81cbd553402a7f7549c50b11d0"},
]
py = [
{file = "py-1.10.0-py2.py3-none-any.whl", hash = "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"},
@@ -2208,16 +2249,16 @@ speedcopy = [
{file = "speedcopy-2.1.0.tar.gz", hash = "sha256:8bb1a6c735900b83901a7be84ba2175ed3887c13c6786f97dea48f2ea7d504c2"},
]
sphinx = [
- {file = "Sphinx-3.5.3-py3-none-any.whl", hash = "sha256:3f01732296465648da43dec8fb40dc451ba79eb3e2cc5c6d79005fd98197107d"},
- {file = "Sphinx-3.5.3.tar.gz", hash = "sha256:ce9c228456131bab09a3d7d10ae58474de562a6f79abb3dc811ae401cf8c1abc"},
+ {file = "Sphinx-3.5.4-py3-none-any.whl", hash = "sha256:2320d4e994a191f4b4be27da514e46b3d6b420f2ff895d064f52415d342461e8"},
+ {file = "Sphinx-3.5.4.tar.gz", hash = "sha256:19010b7b9fa0dc7756a6e105b2aacd3a80f798af3c25c273be64d7beeb482cb1"},
]
sphinx-qt-documentation = [
{file = "sphinx_qt_documentation-0.3-py3-none-any.whl", hash = "sha256:bee247cb9e4fc03fc496d07adfdb943100e1103320c3e5e820e0cfa7c790d9b6"},
{file = "sphinx_qt_documentation-0.3.tar.gz", hash = "sha256:f09a0c9d9e989172ba3e282b92bf55613bb23ad47315ec5b0d38536b343ac6c8"},
]
sphinx-rtd-theme = [
- {file = "sphinx_rtd_theme-0.5.1-py2.py3-none-any.whl", hash = "sha256:fa6bebd5ab9a73da8e102509a86f3fcc36dec04a0b52ea80e5a033b2aba00113"},
- {file = "sphinx_rtd_theme-0.5.1.tar.gz", hash = "sha256:eda689eda0c7301a80cf122dad28b1861e5605cbf455558f3775e1e8200e83a5"},
+ {file = "sphinx_rtd_theme-0.5.2-py2.py3-none-any.whl", hash = "sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f"},
+ {file = "sphinx_rtd_theme-0.5.2.tar.gz", hash = "sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a"},
]
sphinxcontrib-applehelp = [
{file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"},
@@ -2254,41 +2295,37 @@ toml = [
{file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
]
-tqdm = [
- {file = "tqdm-4.60.0-py2.py3-none-any.whl", hash = "sha256:daec693491c52e9498632dfbe9ccfc4882a557f5fa08982db1b4d3adbe0887c3"},
- {file = "tqdm-4.60.0.tar.gz", hash = "sha256:ebdebdb95e3477ceea267decfc0784859aa3df3e27e22d23b83e9b272bf157ae"},
-]
typed-ast = [
- {file = "typed_ast-1.4.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:7703620125e4fb79b64aa52427ec192822e9f45d37d4b6625ab37ef403e1df70"},
- {file = "typed_ast-1.4.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c9aadc4924d4b5799112837b226160428524a9a45f830e0d0f184b19e4090487"},
- {file = "typed_ast-1.4.2-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:9ec45db0c766f196ae629e509f059ff05fc3148f9ffd28f3cfe75d4afb485412"},
- {file = "typed_ast-1.4.2-cp35-cp35m-win32.whl", hash = "sha256:85f95aa97a35bdb2f2f7d10ec5bbdac0aeb9dafdaf88e17492da0504de2e6400"},
- {file = "typed_ast-1.4.2-cp35-cp35m-win_amd64.whl", hash = "sha256:9044ef2df88d7f33692ae3f18d3be63dec69c4fb1b5a4a9ac950f9b4ba571606"},
- {file = "typed_ast-1.4.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c1c876fd795b36126f773db9cbb393f19808edd2637e00fd6caba0e25f2c7b64"},
- {file = "typed_ast-1.4.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:5dcfc2e264bd8a1db8b11a892bd1647154ce03eeba94b461effe68790d8b8e07"},
- {file = "typed_ast-1.4.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:8db0e856712f79c45956da0c9a40ca4246abc3485ae0d7ecc86a20f5e4c09abc"},
- {file = "typed_ast-1.4.2-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:d003156bb6a59cda9050e983441b7fa2487f7800d76bdc065566b7d728b4581a"},
- {file = "typed_ast-1.4.2-cp36-cp36m-win32.whl", hash = "sha256:4c790331247081ea7c632a76d5b2a265e6d325ecd3179d06e9cf8d46d90dd151"},
- {file = "typed_ast-1.4.2-cp36-cp36m-win_amd64.whl", hash = "sha256:d175297e9533d8d37437abc14e8a83cbc68af93cc9c1c59c2c292ec59a0697a3"},
- {file = "typed_ast-1.4.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf54cfa843f297991b7388c281cb3855d911137223c6b6d2dd82a47ae5125a41"},
- {file = "typed_ast-1.4.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:b4fcdcfa302538f70929eb7b392f536a237cbe2ed9cba88e3bf5027b39f5f77f"},
- {file = "typed_ast-1.4.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:987f15737aba2ab5f3928c617ccf1ce412e2e321c77ab16ca5a293e7bbffd581"},
- {file = "typed_ast-1.4.2-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:37f48d46d733d57cc70fd5f30572d11ab8ed92da6e6b28e024e4a3edfb456e37"},
- {file = "typed_ast-1.4.2-cp37-cp37m-win32.whl", hash = "sha256:36d829b31ab67d6fcb30e185ec996e1f72b892255a745d3a82138c97d21ed1cd"},
- {file = "typed_ast-1.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8368f83e93c7156ccd40e49a783a6a6850ca25b556c0fa0240ed0f659d2fe496"},
- {file = "typed_ast-1.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:963c80b583b0661918718b095e02303d8078950b26cc00b5e5ea9ababe0de1fc"},
- {file = "typed_ast-1.4.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e683e409e5c45d5c9082dc1daf13f6374300806240719f95dc783d1fc942af10"},
- {file = "typed_ast-1.4.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:84aa6223d71012c68d577c83f4e7db50d11d6b1399a9c779046d75e24bed74ea"},
- {file = "typed_ast-1.4.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a38878a223bdd37c9709d07cd357bb79f4c760b29210e14ad0fb395294583787"},
- {file = "typed_ast-1.4.2-cp38-cp38-win32.whl", hash = "sha256:a2c927c49f2029291fbabd673d51a2180038f8cd5a5b2f290f78c4516be48be2"},
- {file = "typed_ast-1.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0c74e5579af4b977c8b932f40a5464764b2f86681327410aa028a22d2f54937"},
- {file = "typed_ast-1.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:07d49388d5bf7e863f7fa2f124b1b1d89d8aa0e2f7812faff0a5658c01c59aa1"},
- {file = "typed_ast-1.4.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:240296b27397e4e37874abb1df2a608a92df85cf3e2a04d0d4d61055c8305ba6"},
- {file = "typed_ast-1.4.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:d746a437cdbca200622385305aedd9aef68e8a645e385cc483bdc5e488f07166"},
- {file = "typed_ast-1.4.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:14bf1522cdee369e8f5581238edac09150c765ec1cb33615855889cf33dcb92d"},
- {file = "typed_ast-1.4.2-cp39-cp39-win32.whl", hash = "sha256:cc7b98bf58167b7f2db91a4327da24fb93368838eb84a44c472283778fc2446b"},
- {file = "typed_ast-1.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:7147e2a76c75f0f64c4319886e7639e490fee87c9d25cb1d4faef1d8cf83a440"},
- {file = "typed_ast-1.4.2.tar.gz", hash = "sha256:9fc0b3cb5d1720e7141d103cf4819aea239f7d136acf9ee4a69b047b7986175a"},
+ {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6"},
+ {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075"},
+ {file = "typed_ast-1.4.3-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528"},
+ {file = "typed_ast-1.4.3-cp35-cp35m-win32.whl", hash = "sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428"},
+ {file = "typed_ast-1.4.3-cp35-cp35m-win_amd64.whl", hash = "sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3"},
+ {file = "typed_ast-1.4.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f"},
+ {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341"},
+ {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace"},
+ {file = "typed_ast-1.4.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f"},
+ {file = "typed_ast-1.4.3-cp36-cp36m-win32.whl", hash = "sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363"},
+ {file = "typed_ast-1.4.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7"},
+ {file = "typed_ast-1.4.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266"},
+ {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e"},
+ {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04"},
+ {file = "typed_ast-1.4.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899"},
+ {file = "typed_ast-1.4.3-cp37-cp37m-win32.whl", hash = "sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c"},
+ {file = "typed_ast-1.4.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805"},
+ {file = "typed_ast-1.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a"},
+ {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff"},
+ {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41"},
+ {file = "typed_ast-1.4.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39"},
+ {file = "typed_ast-1.4.3-cp38-cp38-win32.whl", hash = "sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927"},
+ {file = "typed_ast-1.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40"},
+ {file = "typed_ast-1.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3"},
+ {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4"},
+ {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0"},
+ {file = "typed_ast-1.4.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3"},
+ {file = "typed_ast-1.4.3-cp39-cp39-win32.whl", hash = "sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808"},
+ {file = "typed_ast-1.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c"},
+ {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"},
]
typing-extensions = [
{file = "typing_extensions-3.7.4.3-py2-none-any.whl", hash = "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"},
diff --git a/pyproject.toml b/pyproject.toml
index 6df6db5a18..12b9c4446d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "OpenPype"
-version = "3.0.0-alpha1"
+version = "3.0.0-beta2"
description = "Multi-platform open-source pipeline built around the Avalon platform, expanding it with extra features and integrations."
authors = ["OpenPype Team "]
license = "MIT License"
@@ -43,12 +43,13 @@ jinxed = [
{ version = "^1.0.1", markers = "sys_platform == 'linux'" }
]
python3-xlib = { version="*", markers = "sys_platform == 'linux'"}
+enlighten = "^1.9.0"
[tool.poetry.dev-dependencies]
flake8 = "^3.7"
autopep8 = "^1.4"
coverage = "*"
-cx_freeze = "^6.5"
+cx_freeze = "^6.6"
jedi = "^0.13"
Jinja2 = "^2.11"
pycodestyle = "^2.5.0"
@@ -62,8 +63,8 @@ sphinx-rtd-theme = "*"
sphinxcontrib-websupport = "*"
sphinx-qt-documentation = "*"
recommonmark = "*"
-tqdm = "*"
wheel = "*"
+enlighten = "*" # cool terminal progress bars
[tool.poetry.urls]
"Bug Tracker" = "https://github.com/pypeclub/openpype/issues"
@@ -76,3 +77,29 @@ url = "https://distribute.openpype.io/wheels/"
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
+
+[openpype]
+
+[openpype.thirdparty.ffmpeg.windows]
+url = "https://distribute.openpype.io/thirdparty/ffmpeg-4.4-windows.zip"
+hash = "dd51ba29d64ee238e7c4c3c7301b19754c3f0ee2e2a729c20a0e2789e72db925"
+
+[openpype.thirdparty.ffmpeg.linux]
+url = "https://distribute.openpype.io/thirdparty/ffmpeg-4.4-linux.tgz"
+hash = "10b9beda57cfbb69b9ed0ce896c0c8d99227b26ca8b9f611040c4752e365cbe9"
+
+[openpype.thirdparty.ffmpeg.darwin]
+url = "https://distribute.openpype.io/thirdparty/ffmpeg-4.4-macos.tgz"
+hash = "95f43568338c275f80dc0cab1e1836a2e2270f856f0e7b204440d881dd74fbdb"
+
+[openpype.thirdparty.oiio.windows]
+url = "https://distribute.openpype.io/thirdparty/oiio_tools-2.2.0-windows.zip"
+hash = "fd2e00278e01e85dcee7b4a6969d1a16f13016ec16700fb0366dbb1b1f3c37ad"
+
+[openpype.thirdparty.oiio.linux]
+url = "https://distribute.openpype.io/thirdparty/oiio-2.2.0-linux.tgz"
+hash = "sha256:..."
+
+[openpype.thirdparty.oiio.darwin]
+url = "https://distribute.openpype.io/thirdparty/oiio-2.2.0-darwin.tgz"
+hash = "sha256:..."
\ No newline at end of file
diff --git a/repos/avalon-core b/repos/avalon-core
index bbba8765c4..911bd8999a 160000
--- a/repos/avalon-core
+++ b/repos/avalon-core
@@ -1 +1 @@
-Subproject commit bbba8765c431ee124590e4f12d2e56db4d62eacd
+Subproject commit 911bd8999ab0030d0f7412dde6fd545c1a73b62d
diff --git a/start.py b/start.py
index a892d3de8e..a2a03f112c 100644
--- a/start.py
+++ b/start.py
@@ -100,10 +100,20 @@ import subprocess
import site
from pathlib import Path
-# add dependencies folder to sys.pat for frozen code
-if getattr(sys, 'frozen', False):
+# OPENPYPE_ROOT is variable pointing to build (or code) directory
+# WARNING `OPENPYPE_ROOT` must be defined before igniter import
+# - igniter changes cwd which cause that filepath of this script won't lead
+# to right directory
+if not getattr(sys, 'frozen', False):
+ # Code root defined by `start.py` directory
+ OPENPYPE_ROOT = os.path.dirname(os.path.abspath(__file__))
+else:
+ OPENPYPE_ROOT = os.path.dirname(sys.executable)
+
+ # add dependencies folder to sys.pat for frozen code
frozen_libs = os.path.normpath(
- os.path.join(os.path.dirname(sys.executable), "dependencies"))
+ os.path.join(OPENPYPE_ROOT, "dependencies")
+ )
sys.path.append(frozen_libs)
# add stuff from `/dependencies` to PYTHONPATH.
pythonpath = os.getenv("PYTHONPATH", "")
@@ -316,23 +326,25 @@ def _determine_mongodb() -> str:
def _initialize_environment(openpype_version: OpenPypeVersion) -> None:
version_path = openpype_version.path
os.environ["OPENPYPE_VERSION"] = openpype_version.version
- # set OPENPYPE_ROOT to point to currently used OpenPype version.
- os.environ["OPENPYPE_ROOT"] = os.path.normpath(version_path.as_posix())
+ # set OPENPYPE_REPOS_ROOT to point to currently used OpenPype version.
+ os.environ["OPENPYPE_REPOS_ROOT"] = os.path.normpath(
+ version_path.as_posix()
+ )
# inject version to Python environment (sys.path, ...)
print(">>> Injecting OpenPype version to running environment ...")
bootstrap.add_paths_from_directory(version_path)
- # Additional sys paths related to OPENPYPE_ROOT directory
- # TODO move additional paths to `boot` part when OPENPYPE_ROOT will point
- # to same hierarchy from code and from frozen OpenPype
+ # Additional sys paths related to OPENPYPE_REPOS_ROOT directory
+ # TODO move additional paths to `boot` part when OPENPYPE_REPOS_ROOT will
+ # point to same hierarchy from code and from frozen OpenPype
additional_paths = [
- os.environ["OPENPYPE_ROOT"],
+ os.environ["OPENPYPE_REPOS_ROOT"],
# add OpenPype tools
- os.path.join(os.environ["OPENPYPE_ROOT"], "openpype", "tools"),
+ os.path.join(os.environ["OPENPYPE_REPOS_ROOT"], "openpype", "tools"),
# add common OpenPype vendor
# (common for multiple Python interpreter versions)
os.path.join(
- os.environ["OPENPYPE_ROOT"],
+ os.environ["OPENPYPE_REPOS_ROOT"],
"openpype",
"vendor",
"python",
@@ -353,7 +365,7 @@ def _find_frozen_openpype(use_version: str = None,
"""Find OpenPype to run from frozen code.
This will process and modify environment variables:
- ``PYTHONPATH``, ``OPENPYPE_VERSION``, ``OPENPYPE_ROOT``
+ ``PYTHONPATH``, ``OPENPYPE_VERSION``, ``OPENPYPE_REPOS_ROOT``
Args:
use_version (str, optional): Try to use specified version.
@@ -465,16 +477,10 @@ def _bootstrap_from_code(use_version):
# run through repos and add them to `sys.path` and `PYTHONPATH`
# set root
if getattr(sys, 'frozen', False):
- openpype_root = os.path.normpath(
- os.path.dirname(sys.executable))
- local_version = bootstrap.get_version(Path(openpype_root))
+ local_version = bootstrap.get_version(Path(OPENPYPE_ROOT))
print(f" - running version: {local_version}")
assert local_version
else:
- openpype_root = os.path.normpath(
- os.path.dirname(
- os.path.dirname(
- os.path.realpath(igniter.__file__))))
# get current version of OpenPype
local_version = bootstrap.get_local_live_version()
@@ -488,15 +494,18 @@ def _bootstrap_from_code(use_version):
bootstrap.add_paths_from_directory(version_path)
os.environ["OPENPYPE_VERSION"] = use_version
else:
- version_path = openpype_root
- os.environ["OPENPYPE_ROOT"] = openpype_root
- repos = os.listdir(os.path.join(openpype_root, "repos"))
- repos = [os.path.join(openpype_root, "repos", repo) for repo in repos]
+ version_path = OPENPYPE_ROOT
+
+ repos = os.listdir(os.path.join(OPENPYPE_ROOT, "repos"))
+ repos = [os.path.join(OPENPYPE_ROOT, "repos", repo) for repo in repos]
# add self to python paths
- repos.insert(0, openpype_root)
+ repos.insert(0, OPENPYPE_ROOT)
for repo in repos:
sys.path.insert(0, repo)
+ # Set OPENPYPE_REPOS_ROOT to code root
+ os.environ["OPENPYPE_REPOS_ROOT"] = OPENPYPE_ROOT
+
# add venv 'site-packages' to PYTHONPATH
python_path = os.getenv("PYTHONPATH", "")
split_paths = python_path.split(os.pathsep)
@@ -507,15 +516,15 @@ def _bootstrap_from_code(use_version):
# in case when we are running without any version installed.
if not getattr(sys, 'frozen', False):
split_paths.append(site.getsitepackages()[-1])
- # TODO move additional paths to `boot` part when OPENPYPE_ROOT will point
- # to same hierarchy from code and from frozen OpenPype
+ # TODO move additional paths to `boot` part when OPENPYPE_ROOT will
+ # point to same hierarchy from code and from frozen OpenPype
additional_paths = [
# add OpenPype tools
- os.path.join(os.environ["OPENPYPE_ROOT"], "openpype", "tools"),
+ os.path.join(OPENPYPE_ROOT, "openpype", "tools"),
# add common OpenPype vendor
# (common for multiple Python interpreter versions)
os.path.join(
- os.environ["OPENPYPE_ROOT"],
+ OPENPYPE_ROOT,
"openpype",
"vendor",
"python",
@@ -534,6 +543,11 @@ def _bootstrap_from_code(use_version):
def boot():
"""Bootstrap OpenPype."""
+ # ------------------------------------------------------------------------
+ # Set environment to OpenPype root path
+ # ------------------------------------------------------------------------
+ os.environ["OPENPYPE_ROOT"] = OPENPYPE_ROOT
+
# ------------------------------------------------------------------------
# Play animation
# ------------------------------------------------------------------------
@@ -564,16 +578,6 @@ def boot():
os.environ["OPENPYPE_MONGO"] = openpype_mongo
os.environ["OPENPYPE_DATABASE_NAME"] = "openpype" # name of Pype database
- # ------------------------------------------------------------------------
- # Set environments - load OpenPype path from database (if set)
- # ------------------------------------------------------------------------
- # set OPENPYPE_ROOT to running location until proper version can be
- # determined.
- if getattr(sys, 'frozen', False):
- os.environ["OPENPYPE_ROOT"] = os.path.dirname(sys.executable)
- else:
- os.environ["OPENPYPE_ROOT"] = os.path.dirname(__file__)
-
# Get openpype path from database and set it to environment so openpype can
# find its versions there and bootstrap them.
openpype_path = get_openpype_path_from_db(openpype_mongo)
@@ -586,7 +590,7 @@ def boot():
# ------------------------------------------------------------------------
# Find OpenPype versions
# ------------------------------------------------------------------------
- # WARNING: Environment OPENPYPE_ROOT may change if frozen OpenPype
+ # WARNING: Environment OPENPYPE_REPOS_ROOT may change if frozen OpenPype
# is executed
if getattr(sys, 'frozen', False):
# find versions of OpenPype to be used with frozen code
@@ -603,12 +607,6 @@ def boot():
# or to `openpype` or `openpype_console` in case of frozen code
os.environ["OPENPYPE_EXECUTABLE"] = sys.executable
- if getattr(sys, 'frozen', False):
- os.environ["OPENPYPE_REPOS_ROOT"] = os.environ["OPENPYPE_ROOT"]
- else:
- os.environ["OPENPYPE_REPOS_ROOT"] = os.path.join(
- os.environ["OPENPYPE_ROOT"], "repos")
-
# delete OpenPype module and it's submodules from cache so it is used from
# specific version
modules_to_del = [
@@ -678,7 +676,9 @@ def get_info() -> list:
inf.append(("OpenPype variant", "staging"))
else:
inf.append(("OpenPype variant", "production"))
- inf.append(("Running OpenPype from", os.environ.get('OPENPYPE_ROOT')))
+ inf.append(
+ ("Running OpenPype from", os.environ.get('OPENPYPE_REPOS_ROOT'))
+ )
inf.append(("Using mongodb", components["host"]))
if os.environ.get("FTRACK_SERVER"):
diff --git a/tools/fetch_thirdparty_libs.ps1 b/tools/fetch_thirdparty_libs.ps1
new file mode 100644
index 0000000000..d1b914fac2
--- /dev/null
+++ b/tools/fetch_thirdparty_libs.ps1
@@ -0,0 +1,20 @@
+<#
+.SYNOPSIS
+ Download and extract third-party dependencies for OpenPype.
+
+.DESCRIPTION
+ This will download third-party dependencies specified in pyproject.toml
+ and extract them to vendor/bin folder.
+
+.EXAMPLE
+
+PS> .\fetch_thirdparty_libs.ps1
+
+#>
+$current_dir = Get-Location
+$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent
+$openpype_root = (Get-Item $script_dir).parent.FullName
+Set-Location -Path $openpype_root
+
+& poetry run python "$($openpype_root)\tools\fetch_thirdparty_libs.py"
+Set-Location -Path $current_dir
diff --git a/tools/fetch_thirdparty_libs.py b/tools/fetch_thirdparty_libs.py
new file mode 100644
index 0000000000..75ee052950
--- /dev/null
+++ b/tools/fetch_thirdparty_libs.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+"""Fetch, verify and process third-party dependencies of OpenPype.
+
+Those should be defined in `pyproject.toml` in OpenPype sources root.
+
+"""
+import os
+import sys
+import toml
+import shutil
+from pathlib import Path
+from urllib.parse import urlparse
+import requests
+import enlighten
+import platform
+import blessed
+import tempfile
+import math
+import hashlib
+import tarfile
+import zipfile
+import time
+
+
+term = blessed.Terminal()
+manager = enlighten.get_manager()
+hash_buffer_size = 65536
+
+
+def sha256_sum(filename: Path):
+ """Calculate sha256 hash for given file.
+
+ Args:
+ filename (Path): path to file.
+
+ Returns:
+ str: hex hash.
+
+ """
+ _hash = hashlib.sha256()
+ with open(filename, 'rb', buffering=0) as f:
+ buffer = bytearray(128 * 1024)
+ mv = memoryview(buffer)
+ for n in iter(lambda: f.readinto(mv), 0):
+ _hash.update(mv[:n])
+ return _hash.hexdigest()
+
+
+def _print(msg: str, message_type: int = 0) -> None:
+ """Print message to console.
+
+ Args:
+ msg (str): message to print
+ message_type (int): type of message (0 info, 1 error, 2 note)
+
+ """
+ if message_type == 0:
+ header = term.aquamarine3(">>> ")
+ elif message_type == 1:
+ header = term.orangered2("!!! ")
+ elif message_type == 2:
+ header = term.tan1("... ")
+ else:
+ header = term.darkolivegreen3("--- ")
+
+ print("{}{}".format(header, msg))
+
+
+_print("Processing third-party dependencies ...")
+start_time = time.time_ns()
+openpype_root = Path(os.path.dirname(__file__)).parent
+pyproject = toml.load(openpype_root / "pyproject.toml")
+platform_name = platform.system().lower()
+
+try:
+ thirdparty = pyproject["openpype"]["thirdparty"]
+except AttributeError:
+ _print("No third-party libraries specified in pyproject.toml", 1)
+ sys.exit(1)
+
+for k, v in thirdparty.items():
+ _print(f"processing {k}")
+ destination_path = openpype_root / "vendor" / "bin" / k / platform_name
+ url = v.get(platform_name).get("url")
+
+ if not v.get(platform_name):
+ _print(("missing definition for current "
+ f"platform [ {platform_name} ]"), 1)
+ sys.exit(1)
+
+ parsed_url = urlparse(url)
+
+ # check if file is already extracted in /vendor/bin
+ if destination_path.exists():
+ _print("destination path already exists, deleting ...", 2)
+ if destination_path.is_dir():
+ try:
+ shutil.rmtree(destination_path)
+ except OSError as e:
+ _print("cannot delete folder.", 1)
+ raise SystemExit(e)
+
+ # download file
+ _print(f"Downloading {url} ...")
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_file = Path(temp_dir) / Path(parsed_url.path).name
+
+ r = requests.get(url, stream=True)
+ content_len = int(r.headers.get('Content-Length', '0')) or None
+ with manager.counter(color='green',
+ total=content_len and math.ceil(content_len / 2 ** 20), # noqa: E501
+ unit='MiB', leave=False) as counter:
+ with open(temp_file, 'wb', buffering=2 ** 24) as file_handle:
+ for chunk in r.iter_content(chunk_size=2 ** 20):
+ file_handle.write(chunk)
+ counter.update()
+
+ # get file with checksum
+ _print("Calculating sha256 ...", 2)
+ calc_checksum = sha256_sum(temp_file)
+ if v.get(platform_name).get("hash") != calc_checksum:
+ _print("Downloaded files checksum invalid.")
+ sys.exit(1)
+
+ _print("File OK", 3)
+ if not destination_path.exists():
+ destination_path.mkdir(parents=True)
+
+ # extract to destination
+ archive_type = temp_file.suffix.lstrip(".")
+ _print(f"Extracting {archive_type} file to {destination_path}")
+ if archive_type in ['zip']:
+ zip_file = zipfile.ZipFile(temp_file)
+ zip_file.extractall(destination_path)
+ zip_file.close()
+
+ elif archive_type in [
+ 'tar', 'tgz', 'tar.gz', 'tar.xz', 'tar.bz2'
+ ]:
+ if archive_type == 'tar':
+ tar_type = 'r:'
+ elif archive_type.endswith('xz'):
+ tar_type = 'r:xz'
+ elif archive_type.endswith('gz'):
+ tar_type = 'r:gz'
+ elif archive_type.endswith('bz2'):
+ tar_type = 'r:bz2'
+ else:
+ tar_type = 'r:*'
+ try:
+ tar_file = tarfile.open(temp_file, tar_type)
+ except tarfile.ReadError:
+ raise SystemExit("corrupted archive")
+ tar_file.extractall(destination_path)
+ tar_file.close()
+ _print("Extraction OK", 3)
+
+end_time = time.time_ns()
+total_time = (end_time - start_time) / 1000000000
+_print(f"Downloading and extracting took {total_time} secs.")
diff --git a/tools/fetch_thirdparty_libs.sh b/tools/fetch_thirdparty_libs.sh
new file mode 100755
index 0000000000..e305b4b3e4
--- /dev/null
+++ b/tools/fetch_thirdparty_libs.sh
@@ -0,0 +1,129 @@
+#!/usr/bin/env bash
+
+# Run Pype Tray
+
+
+art () {
+ cat <<-EOF
+ ____________
+ /\\ ___ \\
+ \\ \\ \\/_\\ \\
+ \\ \\ _____/ ______ ___ ___ ___
+ \\ \\ \\___/ /\\ \\ \\ \\\\ \\\\ \\
+ \\ \\____\\ \\ \\_____\\ \\__\\\\__\\\\__\\
+ \\/____/ \\/_____/ . PYPE Club .
+
+EOF
+}
+
+# Colors for terminal
+
+RST='\033[0m' # Text Reset
+
+# Regular Colors
+Black='\033[0;30m' # Black
+Red='\033[0;31m' # Red
+Green='\033[0;32m' # Green
+Yellow='\033[0;33m' # Yellow
+Blue='\033[0;34m' # Blue
+Purple='\033[0;35m' # Purple
+Cyan='\033[0;36m' # Cyan
+White='\033[0;37m' # White
+
+# Bold
+BBlack='\033[1;30m' # Black
+BRed='\033[1;31m' # Red
+BGreen='\033[1;32m' # Green
+BYellow='\033[1;33m' # Yellow
+BBlue='\033[1;34m' # Blue
+BPurple='\033[1;35m' # Purple
+BCyan='\033[1;36m' # Cyan
+BWhite='\033[1;37m' # White
+
+# Bold High Intensity
+BIBlack='\033[1;90m' # Black
+BIRed='\033[1;91m' # Red
+BIGreen='\033[1;92m' # Green
+BIYellow='\033[1;93m' # Yellow
+BIBlue='\033[1;94m' # Blue
+BIPurple='\033[1;95m' # Purple
+BICyan='\033[1;96m' # Cyan
+BIWhite='\033[1;97m' # White
+
+
+##############################################################################
+# Detect required version of python
+# Globals:
+# colors
+# PYTHON
+# Arguments:
+# None
+# Returns:
+# None
+###############################################################################
+detect_python () {
+ echo -e "${BIGreen}>>>${RST} Using python \c"
+ local version_command="import sys;print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1]))"
+ local python_version="$(python3 <<< ${version_command})"
+ oIFS="$IFS"
+ IFS=.
+ set -- $python_version
+ IFS="$oIFS"
+ if [ "$1" -ge "3" ] && [ "$2" -ge "6" ] ; then
+ if [ "$2" -gt "7" ] ; then
+ echo -e "${BIWhite}[${RST} ${BIRed}$1.$2 ${BIWhite}]${RST} - ${BIRed}FAILED${RST} ${BIYellow}Version is new and unsupported, use${RST} ${BIPurple}3.7.x${RST}"; return 1;
+ else
+ echo -e "${BIWhite}[${RST} ${BIGreen}$1.$2${RST} ${BIWhite}]${RST}"
+ fi
+ PYTHON="python3"
+ else
+ command -v python3 >/dev/null 2>&1 || { echo -e "${BIRed}$1.$2$ - ${BIRed}FAILED${RST} ${BIYellow}Version is old and unsupported${RST}"; return 1; }
+ fi
+}
+
+##############################################################################
+# Clean pyc files in specified directory
+# Globals:
+# None
+# Arguments:
+# Optional path to clean
+# Returns:
+# None
+###############################################################################
+clean_pyc () {
+ local path
+ path=$pype_root
+ echo -e "${BIGreen}>>>${RST} Cleaning pyc at [ ${BIWhite}$path${RST} ] ... \c"
+ find "$path" -regex '^.*\(__pycache__\|\.py[co]\)$' -delete
+ echo -e "${BIGreen}DONE${RST}"
+}
+
+##############################################################################
+# Return absolute path
+# Globals:
+# None
+# Arguments:
+# Path to resolve
+# Returns:
+# None
+###############################################################################
+realpath () {
+ echo $(cd $(dirname "$1"); pwd)/$(basename "$1")
+}
+
+# Main
+main () {
+ echo -e "${BGreen}"
+ art
+ echo -e "${RST}"
+ detect_python || return 1
+
+ # Directories
+ pype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}")))
+ pushd "$pype_root" > /dev/null || return > /dev/null
+
+ echo -e "${BIGreen}>>>${RST} Running Pype tool ..."
+ poetry run python3 "$pype_root/tools/fetch_thirdparty_libs.py"
+}
+
+main
\ No newline at end of file
diff --git a/website/docs/artist_hosts_blender.md b/website/docs/artist_hosts_blender.md
new file mode 100644
index 0000000000..877e99bff4
--- /dev/null
+++ b/website/docs/artist_hosts_blender.md
@@ -0,0 +1,226 @@
+---
+id: artist_hosts_blender
+title: Blender
+sidebar_label: Blender
+---
+
+## OpenPype global tools
+
+- [Set Context](artist_tools.md#set-context)
+- [Work Files](artist_tools.md#workfiles)
+- [Create](artist_tools.md#creator)
+- [Load](artist_tools.md#loader)
+- [Manage (Inventory)](artist_tools.md#inventory)
+- [Publish](artist_tools.md#publisher)
+- [Library Loader](artist_tools.md#library-loader)
+
+## Working with OpenPype in Blender
+
+OpenPype is here to ease you the burden of working on project with lots of
+collaborators, worrying about naming, setting stuff, browsing through endless
+directories, loading and exporting and so on. To achieve that, OpenPype is using
+concept of being _"data driven"_. This means that what happens when publishing
+is influenced by data in scene. This can by slightly confusing so let's get to
+it with few examples.
+
+
+## Setting scene data
+
+Blender settings concerning framerate, resolution and frame range are handled
+by OpenPype. If set correctly in Ftrack, Blender will automatically set the
+values for you.
+
+
+## Publishing models
+
+### Intro
+
+Publishing models in Blender is pretty straightforward. Create your model as you
+need. You might need to adhere to specifications of your studio that can be different
+between studios and projects but by default your geometry does not need any
+other convention.
+
+
+
+### Creating instance
+
+Now create **Model instance** from it to let OpenPype know what in the scene you want to
+publish. Go **OpenPype → Create... → Model**.
+
+
+
+`Asset` field is a name of asset you are working on - it should be already filled
+with correct name as you've started Blender or switched context to specific asset. You
+can edit that field to change it to different asset (but that one must already exists).
+
+`Subset` field is a name you can decide on. It should describe what kind of data you
+have in the model. For example, you can name it `Proxy` to indicate that this is
+low resolution stuff. See [Subset](artist_concepts#subset).
+
+
+
+Read-only field just under it show final subset name, adding subset field to
+name of the group you have selected.
+
+`Use selection` checkbox will use whatever you have selected in Outliner to be
+wrapped in Model instance. This is usually what you want. Click on **Create** button.
+
+You'll notice then after you've created new Model instance, there is a new
+collection in Outliner called after your asset and subset, in our case it is
+`character1_modelDefault`. The assets selected when creating the Model instance
+are linked in the new collection.
+
+And that's it, you have your first model ready to publish.
+
+Now save your scene (if you didn't do it already). You will notice that path
+in Save dialog is already set to place where scenes related to modeling task on
+your asset should reside. As in our case we are working on asset called
+**character1** and on task **modeling**, path relative to your project directory will be
+`project_XY/assets/character1/work/modeling`. The default name for the file will
+be `project_XY_asset_task_version`, so in our case
+`simonetest_character1_modeling_v001.blend`. Let's save it.
+
+
+
+### Publishing models
+
+Now let's publish it. Go **OpenPype → Publish...**. You will be presented with following window:
+
+
+
+Note that content of this window can differs by your pipeline configuration.
+For more detail see [Publisher](artist_tools#publisher).
+
+Items in left column are instances you will be publishing. You can disable them
+by clicking on square next to them. White filled square indicate they are ready for
+publishing, red means something went wrong either during collection phase
+or publishing phase. Empty one with gray text is disabled.
+
+See that in this case we are publishing from the scene file
+`simonetest_character1_modeling_v001.blend` the Blender model named
+`character1_modelDefault`.
+
+Right column lists all tasks that are run during collection, validation,
+extraction and integration phase. White items are optional and you can disable
+them by clicking on them.
+
+Lets do dry-run on publishing to see if we pass all validators. Click on flask
+icon at the bottom. Validators are run. Ideally you will end up with everything
+green in validator section.
+
+### Fixing problems
+
+For the sake of demonstration, I intentionally kept the model in Edit Mode, to
+trigger the validator designed to check just this.
+
+
+
+You can see our model is now marked red in left column and in right we have
+red box next to `Mesh is in Object Mode` validator.
+
+You can click on arrow next to it to see more details:
+
+
+
+From there you can see in **Records** entry that there is problem with the
+object `Suzanne`. Some validators have option to fix problem for you or just
+select objects that cause trouble. This is the case with our failed validator.
+
+In main overview you can notice little A in a circle next to validator
+name. Right click on it and you can see menu item `select invalid`. This
+will select offending object in Blender.
+
+Fix is easy. Without closing Publisher window we just turn back the Object Mode.
+Then we need to reset it to make it notice changes we've made. Click on arrow
+circle button at the bottom and it will reset the Publisher to initial state. Run
+validators again (flask icon) to see if everything is ok.
+
+It should OK be now. Write some comment if you want and click play icon button
+when ready.
+
+Publish process will now take its course. Depending on data you are publishing
+it can take a while. You should end up with everything green and message
+**Finished successfully ...** You can now close publisher window.
+
+To check for yourself that model is published, open
+[Asset Loader](artist_tools#loader) - **OpenPype → Load...**.
+There you should see your model, named `modelDefault`.
+
+### Loading models
+
+You can load model with [Loader](artist_tools.md#loader). Go **OpenPype → Load...**,
+select your rig, right click on it and click **Link model (blend)**.
+
+## Creating Rigs
+
+Creating and publishing rigs with OpenPype follows similar workflow as with
+other data types. Create your rig and mark parts of your hierarchy in sets to
+help OpenPype validators and extractors to check it and publish it.
+
+### Preparing rig for publish
+
+When creating rigs in Blender, it is important to keep a specific structure for
+the bones and the geometry. Let's first create a model and its rig. For
+demonstration, I'll create a simple model for a robotic arm made of simple boxes.
+
+
+
+I have now created the armature `RIG_RobotArm`. While the naming is not important,
+you can just adhere to your naming conventions, the hierarchy is. Once the models
+are skinned to the armature, the geometry must be organized in a separate Collection.
+In this case, I have the armature in the main Collection, and the geometry in
+the `Geometry` Collection.
+
+
+
+When you've prepared your hierarchy, it's time to create *Rig instance* in OpenPype.
+Select your whole rig hierarchy and go **OpenPype → Create...**. Select **Rig**.
+
+
+
+A new collection named after the selected Asset and Subset should have been created.
+In our case, it is `character1_rigDefault`. All the selected armature and models
+have been linked in this new collection. You should end up with something like
+this:
+
+
+
+### Publishing rigs
+
+Publishing rig is done in same way as publishing everything else. Save your scene
+and go **OpenPype → Publish**. For more detail see [Publisher](artist_tools#publisher).
+
+### Loading rigs
+
+You can load rig with [Loader](artist_tools.md#loader). Go **OpenPype → Load...**,
+select your rig, right click on it and click **Link rig (blend)**.
+
+## Layouts in Blender
+
+A layout is a set of elements that populate a scene. OpenPype allows to version
+and manage those sets.
+
+### Publishing a layout
+
+Working with Layout is easy. Just load your assets into scene with
+[Loader](artist_tools.md#loader) (**OpenPype → Load...**). Populate your scene as
+you wish, translate each piece to fit your need. When ready, select all imported
+stuff and go **OpenPype → Create...** and select **Layout**. When selecting rigs,
+you need to select only the armature, the geometry will automatically be included.
+This will create set containing your selection and marking it for publishing.
+
+Now you can publish is with **OpenPype → Publish**.
+
+### Loading layouts
+
+You can load a Layout using [Loader](artist_tools.md#loader)
+(**OpenPype → Load...**). Select your layout, right click on it and
+select **Link Layout (blend)**. This will populate your scene with all those
+models you've put into layout.
\ No newline at end of file
diff --git a/website/docs/assets/blender-model_create_instance.jpg b/website/docs/assets/blender-model_create_instance.jpg
new file mode 100644
index 0000000000..d0891c5d05
Binary files /dev/null and b/website/docs/assets/blender-model_create_instance.jpg differ
diff --git a/website/docs/assets/blender-model_error_details.jpg b/website/docs/assets/blender-model_error_details.jpg
new file mode 100644
index 0000000000..1756254e5f
Binary files /dev/null and b/website/docs/assets/blender-model_error_details.jpg differ
diff --git a/website/docs/assets/blender-model_example.jpg b/website/docs/assets/blender-model_example.jpg
new file mode 100644
index 0000000000..98d98e903f
Binary files /dev/null and b/website/docs/assets/blender-model_example.jpg differ
diff --git a/website/docs/assets/blender-model_pre_publish.jpg b/website/docs/assets/blender-model_pre_publish.jpg
new file mode 100644
index 0000000000..11233229c5
Binary files /dev/null and b/website/docs/assets/blender-model_pre_publish.jpg differ
diff --git a/website/docs/assets/blender-model_publish_error.jpg b/website/docs/assets/blender-model_publish_error.jpg
new file mode 100644
index 0000000000..260d9b9996
Binary files /dev/null and b/website/docs/assets/blender-model_publish_error.jpg differ
diff --git a/website/docs/assets/blender-rig_create.jpg b/website/docs/assets/blender-rig_create.jpg
new file mode 100644
index 0000000000..169ddae84f
Binary files /dev/null and b/website/docs/assets/blender-rig_create.jpg differ
diff --git a/website/docs/assets/blender-rig_hierarchy_before_publish.jpg b/website/docs/assets/blender-rig_hierarchy_before_publish.jpg
new file mode 100644
index 0000000000..81f3916c9e
Binary files /dev/null and b/website/docs/assets/blender-rig_hierarchy_before_publish.jpg differ
diff --git a/website/docs/assets/blender-rig_hierarchy_example.jpg b/website/docs/assets/blender-rig_hierarchy_example.jpg
new file mode 100644
index 0000000000..6ab6897650
Binary files /dev/null and b/website/docs/assets/blender-rig_hierarchy_example.jpg differ
diff --git a/website/docs/assets/blender-rig_model_setup.jpg b/website/docs/assets/blender-rig_model_setup.jpg
new file mode 100644
index 0000000000..6f967cdab4
Binary files /dev/null and b/website/docs/assets/blender-rig_model_setup.jpg differ
diff --git a/website/docs/assets/blender-save_modelling_file.jpg b/website/docs/assets/blender-save_modelling_file.jpg
new file mode 100644
index 0000000000..d7f2401c51
Binary files /dev/null and b/website/docs/assets/blender-save_modelling_file.jpg differ
diff --git a/website/sidebars.js b/website/sidebars.js
index ec608f0a13..82f063e252 100644
--- a/website/sidebars.js
+++ b/website/sidebars.js
@@ -19,6 +19,7 @@ module.exports = {
"artist_hosts_nukestudio",
"artist_hosts_nuke",
"artist_hosts_maya",
+ "artist_hosts_blender",
"artist_hosts_harmony",
"artist_hosts_aftereffects",
"artist_hosts_photoshop",