mirror of
https://github.com/wakatime/sublime-wakatime.git
synced 2023-08-10 21:13:02 +03:00
Compare commits
240 Commits
7.0.6
...
feature/ap
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a387c08b44 | ||
![]() |
3c2947cf79 | ||
![]() |
54e6772a80 | ||
![]() |
b576dfafe6 | ||
![]() |
5299efd6fa | ||
![]() |
74583a6845 | ||
![]() |
31f1f8ecdc | ||
![]() |
c1f58fd05d | ||
![]() |
28063e3ac4 | ||
![]() |
4d56aca1a1 | ||
![]() |
e15c514ef3 | ||
![]() |
fe582c84b9 | ||
![]() |
d7ee8675d8 | ||
![]() |
da17125b97 | ||
![]() |
847223cdce | ||
![]() |
b405f99cff | ||
![]() |
b5210b77ce | ||
![]() |
fac1192228 | ||
![]() |
4d6533b2ee | ||
![]() |
7d2bd0b7c5 | ||
![]() |
66f5f48f33 | ||
![]() |
e337afcc53 | ||
![]() |
033c07f070 | ||
![]() |
c09767b58d | ||
![]() |
ac80c4268b | ||
![]() |
e5331d3086 | ||
![]() |
bebb46dda6 | ||
![]() |
9767790063 | ||
![]() |
75c219055d | ||
![]() |
ef77e1f178 | ||
![]() |
4025decc12 | ||
![]() |
086c700151 | ||
![]() |
650bb6fa26 | ||
![]() |
389c84673e | ||
![]() |
6fa1321a95 | ||
![]() |
f1a8fcab44 | ||
![]() |
3937b083c5 | ||
![]() |
711aab0d18 | ||
![]() |
cb8ce3a54e | ||
![]() |
7db5fe0a5d | ||
![]() |
cebcfaa0e9 | ||
![]() |
f8faed6e47 | ||
![]() |
fb303e048f | ||
![]() |
4f11222c2b | ||
![]() |
72b72dc9f0 | ||
![]() |
1b07d0442b | ||
![]() |
fbd8e84ea1 | ||
![]() |
01c0e7758e | ||
![]() |
28556de3b6 | ||
![]() |
809e43cfe5 | ||
![]() |
22ddbe27b0 | ||
![]() |
ddaf60b8b0 | ||
![]() |
2e6a87c67e | ||
![]() |
01503b1c20 | ||
![]() |
5a4ac9c11d | ||
![]() |
fa0a3aacb5 | ||
![]() |
d588451468 | ||
![]() |
483d8f596e | ||
![]() |
03f2d6d580 | ||
![]() |
e1390d7647 | ||
![]() |
c87bdd041c | ||
![]() |
3a65395636 | ||
![]() |
0b2f3aa9a4 | ||
![]() |
58ef2cd794 | ||
![]() |
04173d3bcc | ||
![]() |
3206a07476 | ||
![]() |
9330236816 | ||
![]() |
885c11f01a | ||
![]() |
8acda0157a | ||
![]() |
935ddbd5f6 | ||
![]() |
b57b1eb696 | ||
![]() |
6ec097b9d1 | ||
![]() |
b3ed36d3b2 | ||
![]() |
3669e4df6a | ||
![]() |
3504096082 | ||
![]() |
5990947706 | ||
![]() |
2246e31244 | ||
![]() |
b55fe702d3 | ||
![]() |
e0fbbb50bb | ||
![]() |
32c0cb5a97 | ||
![]() |
67d8b0d24f | ||
![]() |
b8b2f4944b | ||
![]() |
a20161164c | ||
![]() |
405211bb07 | ||
![]() |
ffc879c4eb | ||
![]() |
1e23919694 | ||
![]() |
b2086a3cd2 | ||
![]() |
005b07520c | ||
![]() |
60608bd322 | ||
![]() |
cde8f8f1de | ||
![]() |
4adfca154c | ||
![]() |
f7b3924a30 | ||
![]() |
db00024455 | ||
![]() |
9a6be7ca4e | ||
![]() |
1ea9b2a761 | ||
![]() |
bd5e87e030 | ||
![]() |
0256ff4a6a | ||
![]() |
9d170b3276 | ||
![]() |
c54e575210 | ||
![]() |
07513d8f10 | ||
![]() |
30902cc050 | ||
![]() |
aa7962d49a | ||
![]() |
d8c662f3db | ||
![]() |
10d88ebf2d | ||
![]() |
2f28c561b1 | ||
![]() |
24968507df | ||
![]() |
641cd539ed | ||
![]() |
0c65d7e5b2 | ||
![]() |
f0532f5b8e | ||
![]() |
8094db9680 | ||
![]() |
bf20551849 | ||
![]() |
2b6e32b578 | ||
![]() |
363c3d38e2 | ||
![]() |
88466d7db2 | ||
![]() |
122fcbbee5 | ||
![]() |
c41fcec5d8 | ||
![]() |
be09b34d44 | ||
![]() |
e1ee1c1216 | ||
![]() |
a37061924b | ||
![]() |
da01fa268b | ||
![]() |
c279418651 | ||
![]() |
5cf2c8f7ac | ||
![]() |
d1455e77a8 | ||
![]() |
8499e7bafe | ||
![]() |
abc26a0864 | ||
![]() |
71ad97ffe9 | ||
![]() |
3ec5995c99 | ||
![]() |
195cf4de36 | ||
![]() |
b39eefb4f5 | ||
![]() |
bbf5761e26 | ||
![]() |
c4df1dc633 | ||
![]() |
360a491cda | ||
![]() |
f61a34eda7 | ||
![]() |
48123d7409 | ||
![]() |
c8a15d7ac0 | ||
![]() |
202df81e04 | ||
![]() |
5e34f3f6a7 | ||
![]() |
d4441e5575 | ||
![]() |
9eac8e2bd3 | ||
![]() |
11d8fc3a09 | ||
![]() |
d1f1f51f23 | ||
![]() |
b10bb36c09 | ||
![]() |
dc9474befa | ||
![]() |
b910807e98 | ||
![]() |
bc770515f0 | ||
![]() |
9e102d7c5c | ||
![]() |
5c1770fb48 | ||
![]() |
683397534c | ||
![]() |
1c92017543 | ||
![]() |
fda1307668 | ||
![]() |
1c84d457c5 | ||
![]() |
1e680ce739 | ||
![]() |
376adbb7d7 | ||
![]() |
e0040e185b | ||
![]() |
c4a88541d0 | ||
![]() |
0cf621d177 | ||
![]() |
db9d6cec97 | ||
![]() |
2c17f49a6b | ||
![]() |
95116d6007 | ||
![]() |
8c52596f8f | ||
![]() |
3109817dc7 | ||
![]() |
0c0f965763 | ||
![]() |
1573e9c825 | ||
![]() |
a0b8f349c2 | ||
![]() |
2fb60b1589 | ||
![]() |
02786a744e | ||
![]() |
729a4360ba | ||
![]() |
8f45de85ec | ||
![]() |
4672f70c87 | ||
![]() |
46a9aae942 | ||
![]() |
9e77ce2697 | ||
![]() |
385ba818cc | ||
![]() |
7492c3ce12 | ||
![]() |
03eed88917 | ||
![]() |
60a7ad96b5 | ||
![]() |
2d1d5d336a | ||
![]() |
e659759b2d | ||
![]() |
a290e5d86d | ||
![]() |
d5b922bb10 | ||
![]() |
ec7b5e3530 | ||
![]() |
aa3f2e8af6 | ||
![]() |
f4e53cd682 | ||
![]() |
aba72b0f1e | ||
![]() |
5b9d86a57d | ||
![]() |
fa40874635 | ||
![]() |
6d4a4cf9eb | ||
![]() |
f628b8dd11 | ||
![]() |
f932ee9fc6 | ||
![]() |
2f14009279 | ||
![]() |
453d96bf9c | ||
![]() |
9de153f156 | ||
![]() |
dcc782338d | ||
![]() |
9d0dba988a | ||
![]() |
e76f2e514e | ||
![]() |
224f7cd82a | ||
![]() |
3cce525a84 | ||
![]() |
ce885501ad | ||
![]() |
c9448a9a19 | ||
![]() |
04f8c61ebc | ||
![]() |
04a4630024 | ||
![]() |
02138220fd | ||
![]() |
d0b162bdd8 | ||
![]() |
1b8895cd38 | ||
![]() |
938bbb73d1 | ||
![]() |
008fdc6b49 | ||
![]() |
a788625dd0 | ||
![]() |
bcbce681c3 | ||
![]() |
35299db832 | ||
![]() |
eb7814624c | ||
![]() |
1c092b2fd8 | ||
![]() |
507ef95f71 | ||
![]() |
9777bc7788 | ||
![]() |
20b78defa6 | ||
![]() |
8cb1c557d9 | ||
![]() |
20a1965f13 | ||
![]() |
0b802a554e | ||
![]() |
30186c9b2c | ||
![]() |
311a0b5309 | ||
![]() |
b7602d89fb | ||
![]() |
305de46e32 | ||
![]() |
c574234927 | ||
![]() |
a69c50f470 | ||
![]() |
f4b40089f3 | ||
![]() |
08394357b7 | ||
![]() |
205d4eb163 | ||
![]() |
c4c27e4e9e | ||
![]() |
9167eb2558 | ||
![]() |
eaa3bb5180 | ||
![]() |
7755971d11 | ||
![]() |
7634be5446 | ||
![]() |
5e17ad88f6 | ||
![]() |
24d0f65116 | ||
![]() |
a326046733 | ||
![]() |
9bab00fd8b | ||
![]() |
b4a13a48b9 | ||
![]() |
21601f9688 | ||
![]() |
4c3ec87341 | ||
![]() |
b149d7fc87 | ||
![]() |
52e6107c6e | ||
![]() |
b340637331 |
2
AUTHORS
2
AUTHORS
@@ -13,3 +13,5 @@ Patches and Suggestions
|
||||
|
||||
- Jimmy Selgen Nielsen <jimmy.selgen@gmail.com>
|
||||
- Patrik Kernstock <info@pkern.at>
|
||||
- Krishna Glick <krishnaglick@gmail.com>
|
||||
- Carlos Henrique Gandarez <gandarez@gmail.com>
|
||||
|
529
HISTORY.rst
529
HISTORY.rst
@@ -3,6 +3,535 @@ History
|
||||
-------
|
||||
|
||||
|
||||
11.1.0 (2022-11-11)
|
||||
++++++++++++++++++
|
||||
|
||||
- Support for api key vault cmd config
|
||||
|
||||
|
||||
11.0.8 (2022-08-23)
|
||||
++++++++++++++++++
|
||||
|
||||
- Bugfix to prevent using empty selection object.
|
||||
`#116 <https://github.com/wakatime/sublime-wakatime/issues/116>`_
|
||||
|
||||
|
||||
11.0.7 (2022-06-25)
|
||||
++++++++++++++++++
|
||||
|
||||
- Check wakatime-cli versions in background thread.
|
||||
`#115 <https://github.com/wakatime/sublime-wakatime/issues/115>`_
|
||||
|
||||
|
||||
11.0.6 (2022-06-08)
|
||||
++++++++++++++++++
|
||||
|
||||
- Fix call to log helper.
|
||||
`#113 <https://github.com/wakatime/sublime-wakatime/issues/113>`_
|
||||
|
||||
|
||||
11.0.5 (2022-04-29)
|
||||
++++++++++++++++++
|
||||
|
||||
- Bugfix to not overwrite global urlopener in embedded Python.
|
||||
`#110 <https://github.com/wakatime/sublime-wakatime/issues/110>`_
|
||||
- Chmod wakatime-cli to be executable after updating on non-Windows platforms.
|
||||
|
||||
|
||||
11.0.4 (2022-01-06)
|
||||
++++++++++++++++++
|
||||
|
||||
- Copy wakatime-cli when symlink fails on Windows.
|
||||
`vim-wakatime#122 <https://github.com/wakatime/vim-wakatime/issues/122>`_
|
||||
- Fix lineno, cursorpos, and lines-in-file arguments.
|
||||
|
||||
|
||||
11.0.3 (2021-12-31)
|
||||
++++++++++++++++++
|
||||
|
||||
- Bugfix to not delete old wakatime-cli until finished downloading new version.
|
||||
`#107 <https://github.com/wakatime/sublime-wakatime/issues/107>`_
|
||||
|
||||
|
||||
11.0.2 (2021-11-17)
|
||||
++++++++++++++++++
|
||||
|
||||
- Bugfix to encode extra heartbeats cursorpos as int not str when sending to wakatime-cli.
|
||||
|
||||
|
||||
11.0.1 (2021-11-16)
|
||||
++++++++++++++++++
|
||||
|
||||
- Bugfix for install script when using system Python3 and duplicat INI keys.
|
||||
|
||||
|
||||
11.0.0 (2021-10-31)
|
||||
++++++++++++++++++
|
||||
|
||||
- Use new Go wakatime-cli.
|
||||
|
||||
|
||||
10.0.1 (2020-12-28)
|
||||
++++++++++++++++++
|
||||
|
||||
- Improve readme subtitle.
|
||||
|
||||
|
||||
10.0.0 (2020-12-28)
|
||||
++++++++++++++++++
|
||||
|
||||
- Support for standalone wakatime-cli, disabled by default.
|
||||
|
||||
|
||||
9.1.2 (2020-02-13)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v13.0.7.
|
||||
- Split bundled pygments library for Python 2.7+.
|
||||
- Upgrade pygments for py27+ to v2.5.2 development master.
|
||||
- Force requests to use bundled ca cert from certifi by default.
|
||||
- Upgrade bundled certifi to v2019.11.28.
|
||||
|
||||
|
||||
9.1.1 (2020-02-11)
|
||||
++++++++++++++++++
|
||||
|
||||
- Fix typo in python detection on Windows platform.
|
||||
|
||||
|
||||
9.1.0 (2020-02-09)
|
||||
++++++++++++++++++
|
||||
|
||||
- Detect python in Windows LocalAppData install locations.
|
||||
- Upgrade wakatime-cli to v13.0.4.
|
||||
- Bundle cryptography, pyopenssl, and ipaddress packages for improved SSL
|
||||
support on Python2.
|
||||
|
||||
|
||||
9.0.2 (2019-12-04)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v13.0.3.
|
||||
- Support slashes in Mercurial and Git branch names.
|
||||
`wakatime#199 <https://github.com/wakatime/wakatime/issues/199>`_
|
||||
|
||||
|
||||
9.0.1 (2019-11-24)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v13.0.2.
|
||||
- Filter dependencies longer than 200 characters.
|
||||
- Close sqlite connection even when error raised.
|
||||
`wakatime#196 <https://github.com/wakatime/wakatime/issues/196>`_
|
||||
- Detect ColdFusion as root language instead of HTML.
|
||||
- New arguments for reading and writing ini config file.
|
||||
- Today argument shows categories when available.
|
||||
- Prevent unnecessarily debug log when syncing offline heartbeats.
|
||||
- Support for Python 3.7.
|
||||
|
||||
|
||||
9.0.0 (2019-06-23)
|
||||
++++++++++++++++++
|
||||
|
||||
- New optional config option hide_branch_names.
|
||||
`wakatime#183 <https://github.com/wakatime/wakatime/issues/183>`_
|
||||
|
||||
|
||||
8.7.0 (2019-05-29)
|
||||
++++++++++++++++++
|
||||
|
||||
- Prevent creating user sublime-settings file when api key already exists in
|
||||
common wakatime.cfg file.
|
||||
`#98 <https://github.com/wakatime/sublime-wakatime/issues/98>`_
|
||||
|
||||
|
||||
8.6.1 (2019-05-28)
|
||||
++++++++++++++++++
|
||||
|
||||
- Fix parsing common wakatime.cfg file.
|
||||
`#98 <https://github.com/wakatime/sublime-wakatime/issues/98>`_
|
||||
|
||||
|
||||
8.6.0 (2019-05-27)
|
||||
++++++++++++++++++
|
||||
|
||||
- Prevent prompting for api key when found from config file.
|
||||
`#98 <https://github.com/wakatime/sublime-wakatime/issues/98>`_
|
||||
|
||||
|
||||
8.5.0 (2019-05-10)
|
||||
++++++++++++++++++
|
||||
|
||||
- Remove clock icon from status bar.
|
||||
- Use wakatime-cli to fetch status bar coding time.
|
||||
|
||||
|
||||
8.4.2 (2019-05-07)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v11.0.0.
|
||||
- Rename argument --show-time-today to --today.
|
||||
- New argument --show-time-today for printing Today's coding time.
|
||||
|
||||
|
||||
8.4.1 (2019-05-01)
|
||||
++++++++++++++++++
|
||||
|
||||
- Use api subdomain for fetching today's coding activity.
|
||||
|
||||
|
||||
8.4.0 (2019-05-01)
|
||||
++++++++++++++++++
|
||||
|
||||
- Show today's coding time in status bar.
|
||||
|
||||
|
||||
8.3.6 (2019-04-30)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.8.4.
|
||||
- Use wakatime fork of certifi package.
|
||||
`#95 <https://github.com/wakatime/sublime-wakatime/issues/95>`_
|
||||
|
||||
|
||||
8.3.5 (2019-04-30)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.8.3.
|
||||
- Upgrade certifi to version 2019.03.09.
|
||||
|
||||
|
||||
8.3.4 (2019-03-30)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.8.2.
|
||||
- Detect go.mod files as Go language.
|
||||
`jetbrains-wakatime#119 <https://github.com/wakatime/jetbrains-wakatime/issues/119>`_
|
||||
- Detect C++ language from all C++ file extensions.
|
||||
`vscode-wakatime#87 <https://github.com/wakatime/vscode-wakatime/issues/87>`_
|
||||
- Add ssl_certs_file arg and config for custom ca bundles.
|
||||
`wakatime#164 <https://github.com/wakatime/wakatime/issues/164>`_
|
||||
- Fix bug causing random project names when hide project names enabled.
|
||||
`vscode-wakatime#162 <https://github.com/wakatime/vscode-wakatime/issues/61>`_
|
||||
- Add support for UNC network shares without drive letter mapped on Winows.
|
||||
`wakatime#162 <https://github.com/wakatime/wakatime/issues/162>`_
|
||||
|
||||
|
||||
8.3.3 (2018-12-19)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.6.1.
|
||||
- Correctly parse include_only_with_project_file when set to false.
|
||||
`wakatime#161 <https://github.com/wakatime/wakatime/issues/161>`_
|
||||
- Support language argument for non-file entity types.
|
||||
- Send 25 heartbeats per API request.
|
||||
- New category "Writing Tests".
|
||||
`wakatime#156 <https://github.com/wakatime/wakatime/issues/156>`_
|
||||
- Fix bug caused by git config section without any submodule option defined.
|
||||
`wakatime#152 <https://github.com/wakatime/wakatime/issues/152>`_
|
||||
|
||||
|
||||
8.3.2 (2018-10-06)
|
||||
++++++++++++++++++
|
||||
|
||||
- Send buffered heartbeats to API every 30 seconds.
|
||||
|
||||
|
||||
8.3.1 (2018-10-05)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.4.1.
|
||||
- Send 50 offline heartbeats to API per request with 1 second delay in between.
|
||||
|
||||
|
||||
8.3.0 (2018-10-03)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.4.0.
|
||||
- Support logging coding activity to remote network drive files on Windows
|
||||
platform by detecting UNC path from drive letter.
|
||||
`wakatime#72 <https://github.com/wakatime/wakatime/issues/72>`_
|
||||
|
||||
|
||||
8.2.0 (2018-09-30)
|
||||
++++++++++++++++++
|
||||
|
||||
- Prevent opening cmd window on Windows when running wakatime-cli.
|
||||
`#91 <https://github.com/wakatime/sublime-wakatime/issues/91>`_
|
||||
- Upgrade wakatime-cli to v10.3.0.
|
||||
- Re-enable detecting projects from Subversion folder on Windows platform.
|
||||
- Prevent opening cmd window on Windows when detecting project from Subversion.
|
||||
- Run tests on Windows using Appveyor.
|
||||
|
||||
|
||||
8.1.2 (2018-09-20)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.2.4.
|
||||
- Default --sync-offline-activity to 100 instead of 5, so offline coding is
|
||||
synced to dashboard faster.
|
||||
- Batch heartbeats in groups of 10 per api request.
|
||||
- New config hide_project_name and argument --hide-project-names for
|
||||
obfuscating project names when sending coding activity to api.
|
||||
- Fix mispelled Gosu language.
|
||||
`wakatime#137 <https://github.com/wakatime/wakatime/issues/137>`_
|
||||
- Remove metadata when hiding project or file names.
|
||||
- New --local-file argument to be used when --entity is a remote file.
|
||||
- New argument --sync-offline-activity for configuring the maximum offline
|
||||
heartbeats to sync to the WakaTime API.
|
||||
|
||||
|
||||
8.1.1 (2018-04-26)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.2.1.
|
||||
- Force forward slash for file paths.
|
||||
- New --category argument.
|
||||
- New --exclude-unknown-project argument and corresponding config setting.
|
||||
- Support for project detection from git worktree folders.
|
||||
|
||||
|
||||
8.1.0 (2018-04-03)
|
||||
++++++++++++++++++
|
||||
|
||||
- Prefer Python3 over Python2 when running wakatime-cli core.
|
||||
- Improve detection of Python3 on Ubuntu 17.10 platforms.
|
||||
|
||||
|
||||
8.0.8 (2018-03-15)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.1.3.
|
||||
- Smarter C vs C++ vs Objective-C language detection.
|
||||
|
||||
|
||||
8.0.7 (2018-03-15)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.1.2.
|
||||
- Detect dependencies from Swift, Objective-C, TypeScript and JavaScript files.
|
||||
- Categorize .mjs files as JavaScript.
|
||||
`wakatime#121 <https://github.com/wakatime/wakatime/issues/121>`_
|
||||
- Detect dependencies from Elm, Haskell, Haxe, Kotlin, Rust, and Scala files.
|
||||
- Improved Matlab vs Objective-C language detection.
|
||||
`wakatime#129 <https://github.com/wakatime/wakatime/issues/129>`_
|
||||
|
||||
|
||||
8.0.6 (2018-01-04)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.1.0.
|
||||
- Ability to only track folders containing a .wakatime-project file using new
|
||||
include_only_with_project_file argument and config option.
|
||||
|
||||
|
||||
8.0.5 (2017-11-24)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.0.5.
|
||||
- Fix bug that caused heartbeats to be cached locally instead of sent to API.
|
||||
|
||||
|
||||
8.0.4 (2017-11-23)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.0.4.
|
||||
- Improve Java dependency detection.
|
||||
- Skip null or missing heartbeats from extra heartbeats argument.
|
||||
|
||||
|
||||
8.0.3 (2017-11-22)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.0.3.
|
||||
- Support saving unicode heartbeats when working offline.
|
||||
`wakatime#112 <https://github.com/wakatime/wakatime/issues/112>`_
|
||||
|
||||
|
||||
8.0.2 (2017-11-15)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.0.2.
|
||||
- Limit bulk syncing to 5 heartbeats per request.
|
||||
`wakatime#109 <https://github.com/wakatime/wakatime/issues/109>`_
|
||||
|
||||
|
||||
8.0.1 (2017-11-09)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.0.1.
|
||||
- Parse array of results from bulk heartbeats endpoint, only saving heartbeats
|
||||
to local offline cache when they were not accepted by the api.
|
||||
|
||||
|
||||
8.0.0 (2017-11-08)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v10.0.0.
|
||||
- Upload multiple heartbeats to bulk endpoint for improved network performance.
|
||||
`wakatime#107 <https://github.com/wakatime/wakatime/issues/107>`_
|
||||
|
||||
|
||||
7.0.26 (2017-11-07)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v9.0.1.
|
||||
- Fix bug causing 401 response when hidefilenames is enabled.
|
||||
`wakatime#106 <https://github.com/wakatime/wakatime/issues/106>`_
|
||||
|
||||
|
||||
7.0.25 (2017-11-05)
|
||||
++++++++++++++++++
|
||||
|
||||
- Ability to override python binary location in sublime-settings file.
|
||||
`#78 <https://github.com/wakatime/sublime-wakatime/issues/78>`_
|
||||
- Upgrade wakatime-cli to v9.0.0.
|
||||
- Detect project and branch names from git submodules.
|
||||
`wakatime#105 <https://github.com/wakatime/wakatime/issues/105>`_
|
||||
|
||||
|
||||
7.0.24 (2017-10-29)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v8.0.5.
|
||||
- Allow passing string arguments wrapped in extra quotes for plugins which
|
||||
cannot properly escape spaces in arguments.
|
||||
- Upgrade pytz to v2017.2.
|
||||
- Upgrade requests to v2.18.4.
|
||||
- Upgrade tzlocal to v1.4.
|
||||
- Use WAKATIME_HOME env variable for offline and session caching.
|
||||
`wakatime#102 <https://github.com/wakatime/wakatime/issues/102>`_
|
||||
|
||||
|
||||
7.0.23 (2017-09-14)
|
||||
++++++++++++++++++
|
||||
|
||||
- Add "include" setting to bypass ignored files.
|
||||
`#89 <https://github.com/wakatime/sublime-wakatime/issues/89>`_
|
||||
|
||||
|
||||
7.0.22 (2017-06-08)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v8.0.3.
|
||||
- Improve Matlab language detection.
|
||||
|
||||
|
||||
7.0.21 (2017-05-24)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v8.0.2.
|
||||
- Only treat proxy string as NTLM proxy after unable to connect with HTTPS and
|
||||
SOCKS proxy.
|
||||
- Support running automated tests on Linux, OS X, and Windows.
|
||||
- Ability to disable SSL cert verification.
|
||||
`wakatime#90 <https://github.com/wakatime/wakatime/issues/90>`_
|
||||
- Disable line count stats for files larger than 2MB to improve performance.
|
||||
- Print error saying Python needs upgrading when requests can't be imported.
|
||||
|
||||
|
||||
7.0.20 (2017-04-10)
|
||||
++++++++++++++++++
|
||||
|
||||
- Fix install instructions formatting.
|
||||
|
||||
|
||||
7.0.19 (2017-04-10)
|
||||
++++++++++++++++++
|
||||
|
||||
- Remove /var/www/ from default ignored folders.
|
||||
|
||||
|
||||
7.0.18 (2017-03-16)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v8.0.0.
|
||||
- No longer creating ~/.wakatime.cfg file, since only using Sublime settings.
|
||||
|
||||
|
||||
7.0.17 (2017-03-01)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v7.0.4.
|
||||
|
||||
|
||||
7.0.16 (2017-02-20)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v7.0.2.
|
||||
|
||||
|
||||
7.0.15 (2017-02-13)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v6.2.2.
|
||||
- Upgrade pygments library to v2.2.0 for improved language detection.
|
||||
|
||||
|
||||
7.0.14 (2017-02-08)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v6.2.1.
|
||||
- Allow boolean or list of regex patterns for hidefilenames config setting.
|
||||
|
||||
|
||||
7.0.13 (2016-11-11)
|
||||
++++++++++++++++++
|
||||
|
||||
- Support old Sublime Text with Python 2.6.
|
||||
- Fix bug that prevented reading default api key from existing config file.
|
||||
|
||||
|
||||
7.0.12 (2016-10-24)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v6.2.0.
|
||||
- Exit with status code 104 when api key is missing or invalid. Exit with
|
||||
status code 103 when config file missing or invalid.
|
||||
- New WAKATIME_HOME env variable for setting path to config and log files.
|
||||
- Improve debug warning message from unsupported dependency parsers.
|
||||
|
||||
|
||||
7.0.11 (2016-09-23)
|
||||
++++++++++++++++++
|
||||
|
||||
- Handle UnicodeDecodeError when when logging.
|
||||
`#68 <https://github.com/wakatime/sublime-wakatime/issues/68>`_
|
||||
|
||||
|
||||
7.0.10 (2016-09-22)
|
||||
++++++++++++++++++
|
||||
|
||||
- Handle UnicodeDecodeError when looking for python.
|
||||
`#68 <https://github.com/wakatime/sublime-wakatime/issues/68>`_
|
||||
- Upgrade wakatime-cli to v6.0.9.
|
||||
|
||||
|
||||
7.0.9 (2016-09-02)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v6.0.8.
|
||||
|
||||
|
||||
7.0.8 (2016-07-21)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to master version to fix debug logging encoding bug.
|
||||
|
||||
|
||||
7.0.7 (2016-07-06)
|
||||
++++++++++++++++++
|
||||
|
||||
- Upgrade wakatime-cli to v6.0.7.
|
||||
- Handle unknown exceptions from requests library by deleting cached session
|
||||
object because it could be from a previous conflicting version.
|
||||
- New hostname setting in config file to set machine hostname. Hostname
|
||||
argument takes priority over hostname from config file.
|
||||
- Prevent logging unrelated exception when logging tracebacks.
|
||||
- Use correct namespace for pygments.lexers.ClassNotFound exception so it is
|
||||
caught when dependency detection not available for a language.
|
||||
|
||||
|
||||
7.0.6 (2016-06-13)
|
||||
++++++++++++++++++
|
||||
|
||||
|
3
LICENSE
3
LICENSE
@@ -1,7 +1,6 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2014 by the respective authors (see AUTHORS file).
|
||||
All rights reserved.
|
||||
Copyright (c) 2014 Alan Hamlett.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
48
README.md
48
README.md
@@ -1,49 +1,38 @@
|
||||
sublime-wakatime
|
||||
================
|
||||
# sublime-wakatime
|
||||
|
||||
Metrics, insights, and time tracking automatically generated from your programming activity.
|
||||
[](https://wakatime.com/badge/github/wakatime/sublime-wakatime)
|
||||
|
||||
[WakaTime][wakatime] is an open source Sublime Text plugin for metrics, insights, and time tracking automatically generated from your programming activity.
|
||||
|
||||
Installation
|
||||
------------
|
||||
## Installation
|
||||
|
||||
1. Install [Package Control](https://packagecontrol.io/installation).
|
||||
|
||||
2. Using [Package Control](https://packagecontrol.io/docs/usage):
|
||||
2. In Sublime, press `ctrl+shift+p`(Windows, Linux) or `cmd+shift+p`(OS X).
|
||||
|
||||
a) Inside Sublime, press `ctrl+shift+p`(Windows, Linux) or `cmd+shift+p`(OS X).
|
||||
3. Type `install`, then press `enter` with `Package Control: Install Package` selected.
|
||||
|
||||
b) Type `install`, then press `enter` with `Package Control: Install Package` selected.
|
||||
4. Type `wakatime`, then press `enter` with the `WakaTime` plugin selected.
|
||||
|
||||
c) Type `wakatime`, then press `enter` with the `WakaTime` plugin selected.
|
||||
5. Enter your [api key](https://wakatime.com/settings#apikey), then press `enter`.
|
||||
|
||||
3. Enter your [api key](https://wakatime.com/settings#apikey), then press `enter`.
|
||||
6. Use Sublime and your coding activity will be displayed on your [WakaTime dashboard](https://wakatime.com).
|
||||
|
||||
4. Use Sublime and your time will be tracked for you automatically.
|
||||
|
||||
5. Visit https://wakatime.com/dashboard to see your logged time.
|
||||
|
||||
|
||||
Screen Shots
|
||||
------------
|
||||
## Screen Shots
|
||||
|
||||

|
||||
|
||||
|
||||
Unresponsive Plugin Warning
|
||||
---------------------------
|
||||
## Unresponsive Plugin Warning
|
||||
|
||||
In Sublime Text 2, if you get a warning message:
|
||||
|
||||
A plugin (WakaTime) may be making Sublime Text unresponsive by taking too long (0.017332s) in its on_modified callback.
|
||||
|
||||
To fix this, go to `Preferences > Settings - User` then add the following setting:
|
||||
To fix this, go to `Preferences → Settings - User` then add the following setting:
|
||||
|
||||
`"detect_slow_plugins": false`
|
||||
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
## Troubleshooting
|
||||
|
||||
First, turn on debug mode in your `WakaTime.sublime-settings` file.
|
||||
|
||||
@@ -51,6 +40,13 @@ First, turn on debug mode in your `WakaTime.sublime-settings` file.
|
||||
|
||||
Add the line: `"debug": true`
|
||||
|
||||
Then, open your Sublime Console with `View -> Show Console` to see the plugin executing the wakatime cli process when sending a heartbeat. Also, tail your `$HOME/.wakatime.log` file to debug wakatime cli problems.
|
||||
Then, open your Sublime Console with `View → Show Console` ( CTRL + \` ) to see the plugin executing the wakatime cli process when sending a heartbeat.
|
||||
Also, tail your `$HOME/.wakatime.log` file to debug wakatime cli problems.
|
||||
|
||||
For more general troubleshooting information, see [wakatime/wakatime#troubleshooting](https://github.com/wakatime/wakatime#troubleshooting).
|
||||
The [How to Debug Plugins][how to debug] guide shows how to check when coding activity was last received from your editor using the [User Agents API][user agents api].
|
||||
For more general troubleshooting info, see the [wakatime-cli Troubleshooting Section][wakatime-cli-help].
|
||||
|
||||
[wakatime]: https://wakatime.com/sublime-text
|
||||
[wakatime-cli-help]: https://github.com/wakatime/wakatime#troubleshooting
|
||||
[how to debug]: https://wakatime.com/faq#debug-plugins
|
||||
[user agents api]: https://wakatime.com/developers#user_agents
|
||||
|
965
WakaTime.py
965
WakaTime.py
File diff suppressed because it is too large
Load Diff
@@ -3,21 +3,38 @@
|
||||
// This settings file will be overwritten when upgrading.
|
||||
|
||||
{
|
||||
// Your api key from https://wakatime.com/#apikey
|
||||
// Your api key from https://wakatime.com/api-key
|
||||
// Set this in your User specific WakaTime.sublime-settings file.
|
||||
"api_key": "",
|
||||
|
||||
// Ignore files; Files (including absolute paths) that match one of these
|
||||
// POSIX regular expressions will not be logged.
|
||||
"ignore": ["^/tmp/", "^/etc/", "^/var/", "COMMIT_EDITMSG$", "PULLREQ_EDITMSG$", "MERGE_MSG$", "TAG_EDITMSG$"],
|
||||
|
||||
|
||||
// Debug mode. Set to true for verbose logging. Defaults to false.
|
||||
"debug": false,
|
||||
|
||||
// Status bar message. Set to false to hide status bar message.
|
||||
|
||||
// Proxy with format https://user:pass@host:port or socks5://user:pass@host:port or domain\\user:pass.
|
||||
"proxy": "",
|
||||
|
||||
// Ignore files; Files (including absolute paths) that match one of these
|
||||
// POSIX regular expressions will not be logged.
|
||||
"ignore": ["^/tmp/", "^/etc/", "^/var/(?!www/).*", "COMMIT_EDITMSG$", "PULLREQ_EDITMSG$", "MERGE_MSG$", "TAG_EDITMSG$"],
|
||||
|
||||
// Include files; Files (including absolute paths) that match one of these
|
||||
// POSIX regular expressions will bypass your ignore setting.
|
||||
"include": [".*"],
|
||||
|
||||
// Status bar for surfacing errors and displaying today's coding time. Set
|
||||
// to false to hide. Defaults to true.
|
||||
"status_bar_enabled": true,
|
||||
|
||||
// Show today's coding activity in WakaTime status bar item.
|
||||
// Defaults to true.
|
||||
"status_bar_message": true,
|
||||
|
||||
// Status bar message format.
|
||||
"status_bar_message_fmt": "WakaTime {status} %I:%M %p"
|
||||
"status_bar_coding_activity": true,
|
||||
|
||||
// Obfuscate file paths when sending to API. Your dashboard will no longer display coding activity per file.
|
||||
"hidefilenames": false,
|
||||
|
||||
// Python binary location. Uses python from your PATH by default.
|
||||
"python_binary": "",
|
||||
|
||||
// Use standalone compiled Python wakatime-cli (Will not work on ARM Macs)
|
||||
"standalone": false
|
||||
}
|
||||
|
@@ -1,9 +0,0 @@
|
||||
__title__ = 'wakatime'
|
||||
__description__ = 'Common interface to the WakaTime api.'
|
||||
__url__ = 'https://github.com/wakatime/wakatime'
|
||||
__version_info__ = ('6', '0', '5')
|
||||
__version__ = '.'.join(__version_info__)
|
||||
__author__ = 'Alan Hamlett'
|
||||
__author_email__ = 'alan@wakatime.com'
|
||||
__license__ = 'BSD'
|
||||
__copyright__ = 'Copyright 2016 Alan Hamlett'
|
@@ -1,17 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime
|
||||
~~~~~~~~
|
||||
|
||||
Common interface to the WakaTime api.
|
||||
http://wakatime.com
|
||||
|
||||
:copyright: (c) 2013 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
|
||||
__all__ = ['main']
|
||||
|
||||
|
||||
from .main import execute
|
@@ -1,35 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.cli
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Command-line entry point.
|
||||
|
||||
:copyright: (c) 2013 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
# get path to local wakatime package
|
||||
package_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
# add local wakatime package to sys.path
|
||||
sys.path.insert(0, package_folder)
|
||||
|
||||
# import local wakatime package
|
||||
try:
|
||||
import wakatime
|
||||
except (TypeError, ImportError):
|
||||
# on Windows, non-ASCII characters in import path can be fixed using
|
||||
# the script path from sys.argv[0].
|
||||
# More info at https://github.com/wakatime/wakatime/issues/32
|
||||
package_folder = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
|
||||
sys.path.insert(0, package_folder)
|
||||
import wakatime
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(wakatime.execute(sys.argv[1:]))
|
@@ -1,93 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.compat
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
For working with Python2 and Python3.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import codecs
|
||||
import sys
|
||||
|
||||
|
||||
is_py2 = (sys.version_info[0] == 2)
|
||||
is_py3 = (sys.version_info[0] == 3)
|
||||
|
||||
|
||||
if is_py2: # pragma: nocover
|
||||
|
||||
def u(text):
|
||||
if text is None:
|
||||
return None
|
||||
try:
|
||||
return text.decode('utf-8')
|
||||
except:
|
||||
try:
|
||||
return text.decode(sys.getdefaultencoding())
|
||||
except:
|
||||
try:
|
||||
return unicode(text)
|
||||
except:
|
||||
return text
|
||||
open = codecs.open
|
||||
basestring = basestring
|
||||
|
||||
|
||||
elif is_py3: # pragma: nocover
|
||||
|
||||
def u(text):
|
||||
if text is None:
|
||||
return None
|
||||
if isinstance(text, bytes):
|
||||
try:
|
||||
return text.decode('utf-8')
|
||||
except:
|
||||
try:
|
||||
return text.decode(sys.getdefaultencoding())
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
return str(text)
|
||||
except:
|
||||
return text
|
||||
open = open
|
||||
basestring = (str, bytes)
|
||||
|
||||
|
||||
try:
|
||||
from importlib import import_module
|
||||
except ImportError: # pragma: nocover
|
||||
def _resolve_name(name, package, level):
|
||||
"""Return the absolute name of the module to be imported."""
|
||||
if not hasattr(package, 'rindex'):
|
||||
raise ValueError("'package' not set to a string")
|
||||
dot = len(package)
|
||||
for x in xrange(level, 1, -1):
|
||||
try:
|
||||
dot = package.rindex('.', 0, dot)
|
||||
except ValueError:
|
||||
raise ValueError("attempted relative import beyond top-level "
|
||||
"package")
|
||||
return "%s.%s" % (package[:dot], name)
|
||||
|
||||
def import_module(name, package=None):
|
||||
"""Import a module.
|
||||
The 'package' argument is required when performing a relative import.
|
||||
It specifies the package to use as the anchor point from which to
|
||||
resolve the relative import to an absolute import.
|
||||
"""
|
||||
if name.startswith('.'):
|
||||
if not package:
|
||||
raise TypeError("relative imports require the 'package' "
|
||||
+ "argument")
|
||||
level = 0
|
||||
for character in name:
|
||||
if character != '.':
|
||||
break
|
||||
level += 1
|
||||
name = _resolve_name(name[level:], package, level)
|
||||
__import__(name)
|
||||
return sys.modules[name]
|
@@ -1,18 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.constants
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Constant variable definitions.
|
||||
|
||||
:copyright: (c) 2016 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
|
||||
SUCCESS = 0
|
||||
API_ERROR = 102
|
||||
CONFIG_FILE_PARSE_ERROR = 103
|
||||
AUTH_ERROR = 104
|
||||
UNKNOWN_ERROR = 105
|
||||
MALFORMED_HEARTBEAT_ERROR = 106
|
@@ -1,130 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.dependencies
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from a source code file.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from ..compat import u, open, import_module
|
||||
from ..exceptions import NotYetImplemented
|
||||
|
||||
|
||||
log = logging.getLogger('WakaTime')
|
||||
|
||||
|
||||
class TokenParser(object):
|
||||
"""The base class for all dependency parsers. To add support for your
|
||||
language, inherit from this class and implement the :meth:`parse` method
|
||||
to return a list of dependency strings.
|
||||
"""
|
||||
exclude = []
|
||||
|
||||
def __init__(self, source_file, lexer=None):
|
||||
self._tokens = None
|
||||
self.dependencies = []
|
||||
self.source_file = source_file
|
||||
self.lexer = lexer
|
||||
self.exclude = [re.compile(x, re.IGNORECASE) for x in self.exclude]
|
||||
|
||||
@property
|
||||
def tokens(self):
|
||||
if self._tokens is None:
|
||||
self._tokens = self._extract_tokens()
|
||||
return self._tokens
|
||||
|
||||
def parse(self, tokens=[]):
|
||||
""" Should return a list of dependencies.
|
||||
"""
|
||||
raise NotYetImplemented()
|
||||
|
||||
def append(self, dep, truncate=False, separator=None, truncate_to=None,
|
||||
strip_whitespace=True):
|
||||
self._save_dependency(
|
||||
dep,
|
||||
truncate=truncate,
|
||||
truncate_to=truncate_to,
|
||||
separator=separator,
|
||||
strip_whitespace=strip_whitespace,
|
||||
)
|
||||
|
||||
def partial(self, token):
|
||||
return u(token).split('.')[-1]
|
||||
|
||||
def _extract_tokens(self):
|
||||
if self.lexer:
|
||||
try:
|
||||
with open(self.source_file, 'r', encoding='utf-8') as fh:
|
||||
return self.lexer.get_tokens_unprocessed(fh.read(512000))
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
with open(self.source_file, 'r', encoding=sys.getfilesystemencoding()) as fh:
|
||||
return self.lexer.get_tokens_unprocessed(fh.read(512000))
|
||||
except:
|
||||
pass
|
||||
return []
|
||||
|
||||
def _save_dependency(self, dep, truncate=False, separator=None,
|
||||
truncate_to=None, strip_whitespace=True):
|
||||
if truncate:
|
||||
if separator is None:
|
||||
separator = u('.')
|
||||
separator = u(separator)
|
||||
dep = dep.split(separator)
|
||||
if truncate_to is None or truncate_to < 1:
|
||||
truncate_to = 1
|
||||
if truncate_to > len(dep):
|
||||
truncate_to = len(dep)
|
||||
dep = dep[0] if len(dep) == 1 else separator.join(dep[:truncate_to])
|
||||
if strip_whitespace:
|
||||
dep = dep.strip()
|
||||
if dep and (not separator or not dep.startswith(separator)):
|
||||
should_exclude = False
|
||||
for compiled in self.exclude:
|
||||
if compiled.search(dep):
|
||||
should_exclude = True
|
||||
break
|
||||
if not should_exclude:
|
||||
self.dependencies.append(dep)
|
||||
|
||||
|
||||
class DependencyParser(object):
|
||||
source_file = None
|
||||
lexer = None
|
||||
parser = None
|
||||
|
||||
def __init__(self, source_file, lexer):
|
||||
self.source_file = source_file
|
||||
self.lexer = lexer
|
||||
|
||||
if self.lexer:
|
||||
module_name = self.lexer.__module__.rsplit('.', 1)[-1]
|
||||
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
|
||||
else:
|
||||
module_name = 'unknown'
|
||||
class_name = 'UnknownParser'
|
||||
|
||||
try:
|
||||
module = import_module('.%s' % module_name, package=__package__)
|
||||
try:
|
||||
self.parser = getattr(module, class_name)
|
||||
except AttributeError:
|
||||
log.debug('Module {0} is missing class {1}'.format(module.__name__, class_name))
|
||||
except ImportError:
|
||||
log.debug(traceback.format_exc())
|
||||
|
||||
def parse(self):
|
||||
if self.parser:
|
||||
plugin = self.parser(self.source_file, lexer=self.lexer)
|
||||
dependencies = plugin.parse()
|
||||
return list(set(dependencies))
|
||||
return []
|
@@ -1,51 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.c_cpp
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from C++ code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
class CParser(TokenParser):
|
||||
exclude = [
|
||||
r'^stdio\.h$',
|
||||
r'^stdlib\.h$',
|
||||
r'^string\.h$',
|
||||
r'^time\.h$',
|
||||
]
|
||||
state = None
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Preproc' or self.partial(token) == 'PreprocFile':
|
||||
self._process_preproc(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_preproc(self, token, content):
|
||||
if self.state == 'include':
|
||||
if content != '\n' and content != '#':
|
||||
content = content.strip().strip('"').strip('<').strip('>').strip()
|
||||
self.append(content, truncate=True, separator='/')
|
||||
self.state = None
|
||||
elif content.strip().startswith('include'):
|
||||
self.state = 'include'
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
||||
|
||||
|
||||
class CppParser(CParser):
|
||||
pass
|
@@ -1,64 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.data
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from data files.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
FILES = {
|
||||
'bower.json': {'exact': True, 'dependency': 'bower'},
|
||||
'component.json': {'exact': True, 'dependency': 'bower'},
|
||||
'package.json': {'exact': True, 'dependency': 'npm'},
|
||||
}
|
||||
|
||||
|
||||
class JsonParser(TokenParser):
|
||||
state = None
|
||||
level = 0
|
||||
|
||||
def parse(self):
|
||||
self._process_file_name(os.path.basename(self.source_file))
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_file_name(self, file_name):
|
||||
for key, value in FILES.items():
|
||||
found = (key == file_name) if value.get('exact') else (key.lower() in file_name.lower())
|
||||
if found:
|
||||
self.append(value['dependency'])
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token) == 'Token.Name.Tag':
|
||||
self._process_tag(token, content)
|
||||
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
|
||||
self._process_literal_string(token, content)
|
||||
elif u(token) == 'Token.Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
|
||||
def _process_tag(self, token, content):
|
||||
if content.strip('"').strip("'") == 'dependencies' or content.strip('"').strip("'") == 'devDependencies':
|
||||
self.state = 'dependencies'
|
||||
elif self.state == 'dependencies' and self.level == 2:
|
||||
self.append(content.strip('"').strip("'"))
|
||||
|
||||
def _process_literal_string(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == '{':
|
||||
self.level += 1
|
||||
elif content == '}':
|
||||
self.level -= 1
|
||||
if self.state is not None and self.level <= 1:
|
||||
self.state = None
|
@@ -1,64 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.dotnet
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from .NET code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class CSharpParser(TokenParser):
|
||||
exclude = [
|
||||
r'^system$',
|
||||
r'^microsoft$',
|
||||
]
|
||||
state = None
|
||||
buffer = u('')
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Keyword':
|
||||
self._process_keyword(token, content)
|
||||
if self.partial(token) == 'Namespace' or self.partial(token) == 'Name':
|
||||
self._process_namespace(token, content)
|
||||
elif self.partial(token) == 'Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_keyword(self, token, content):
|
||||
if content == 'using':
|
||||
self.state = 'import'
|
||||
self.buffer = u('')
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if self.state == 'import':
|
||||
if u(content) != u('import') and u(content) != u('package') and u(content) != u('namespace') and u(content) != u('static'):
|
||||
if u(content) == u(';'): # pragma: nocover
|
||||
self._process_punctuation(token, content)
|
||||
else:
|
||||
self.buffer += u(content)
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if self.state == 'import':
|
||||
if u(content) == u(';'):
|
||||
self.append(self.buffer, truncate=True)
|
||||
self.buffer = u('')
|
||||
self.state = None
|
||||
elif u(content) == u('='):
|
||||
self.buffer = u('')
|
||||
else:
|
||||
self.buffer += u(content)
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
@@ -1,77 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.go
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Go code.
|
||||
|
||||
:copyright: (c) 2016 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
class GoParser(TokenParser):
|
||||
state = None
|
||||
parens = 0
|
||||
aliases = 0
|
||||
exclude = [
|
||||
r'^"fmt"$',
|
||||
]
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
elif self.partial(token) == 'Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
elif self.partial(token) == 'String':
|
||||
self._process_string(token, content)
|
||||
elif self.partial(token) == 'Text':
|
||||
self._process_text(token, content)
|
||||
elif self.partial(token) == 'Other':
|
||||
self._process_other(token, content)
|
||||
else:
|
||||
self._process_misc(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
self.state = content
|
||||
self.parens = 0
|
||||
self.aliases = 0
|
||||
|
||||
def _process_string(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.append(content, truncate=False)
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == '(':
|
||||
self.parens += 1
|
||||
elif content == ')':
|
||||
self.parens -= 1
|
||||
elif content == '.':
|
||||
self.aliases += 1
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_text(self, token, content):
|
||||
if self.state == 'import':
|
||||
if content == "\n" and self.parens <= 0:
|
||||
self.state = None
|
||||
self.parens = 0
|
||||
self.aliases = 0
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_other(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.aliases += 1
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_misc(self, token, content):
|
||||
self.state = None
|
@@ -1,96 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.java
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Java code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class JavaParser(TokenParser):
|
||||
exclude = [
|
||||
r'^java\.',
|
||||
r'^javax\.',
|
||||
r'^import$',
|
||||
r'^package$',
|
||||
r'^namespace$',
|
||||
r'^static$',
|
||||
]
|
||||
state = None
|
||||
buffer = u('')
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
if self.partial(token) == 'Name':
|
||||
self._process_name(token, content)
|
||||
elif self.partial(token) == 'Attribute':
|
||||
self._process_attribute(token, content)
|
||||
elif self.partial(token) == 'Operator':
|
||||
self._process_operator(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if u(content) == u('import'):
|
||||
self.state = 'import'
|
||||
|
||||
elif self.state == 'import':
|
||||
keywords = [
|
||||
u('package'),
|
||||
u('namespace'),
|
||||
u('static'),
|
||||
]
|
||||
if u(content) in keywords:
|
||||
return
|
||||
self.buffer = u('{0}{1}').format(self.buffer, u(content))
|
||||
|
||||
elif self.state == 'import-finished':
|
||||
content = content.split(u('.'))
|
||||
|
||||
if len(content) == 1:
|
||||
self.append(content[0])
|
||||
|
||||
elif len(content) > 1:
|
||||
if len(content[0]) == 3:
|
||||
content = content[1:]
|
||||
if content[-1] == u('*'):
|
||||
content = content[:len(content) - 1]
|
||||
|
||||
if len(content) == 1:
|
||||
self.append(content[0])
|
||||
elif len(content) > 1:
|
||||
self.append(u('.').join(content[:2]))
|
||||
|
||||
self.state = None
|
||||
|
||||
def _process_name(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.buffer = u('{0}{1}').format(self.buffer, u(content))
|
||||
|
||||
def _process_attribute(self, token, content):
|
||||
if self.state == 'import':
|
||||
self.buffer = u('{0}{1}').format(self.buffer, u(content))
|
||||
|
||||
def _process_operator(self, token, content):
|
||||
if u(content) == u(';'):
|
||||
self.state = 'import-finished'
|
||||
self._process_namespace(token, self.buffer)
|
||||
self.state = None
|
||||
self.buffer = u('')
|
||||
elif self.state == 'import':
|
||||
self.buffer = u('{0}{1}').format(self.buffer, u(content))
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
@@ -1,85 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.php
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from PHP code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
class PhpParser(TokenParser):
|
||||
state = None
|
||||
parens = 0
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Keyword':
|
||||
self._process_keyword(token, content)
|
||||
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
|
||||
self._process_literal_string(token, content)
|
||||
elif u(token) == 'Token.Name.Other':
|
||||
self._process_name(token, content)
|
||||
elif u(token) == 'Token.Name.Function':
|
||||
self._process_function(token, content)
|
||||
elif self.partial(token) == 'Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
elif self.partial(token) == 'Text':
|
||||
self._process_text(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_name(self, token, content):
|
||||
if self.state == 'use':
|
||||
self.append(content, truncate=True, separator=u("\\"))
|
||||
|
||||
def _process_function(self, token, content):
|
||||
if self.state == 'use function':
|
||||
self.append(content, truncate=True, separator=u("\\"))
|
||||
self.state = 'use'
|
||||
|
||||
def _process_keyword(self, token, content):
|
||||
if content == 'include' or content == 'include_once' or content == 'require' or content == 'require_once':
|
||||
self.state = 'include'
|
||||
elif content == 'use':
|
||||
self.state = 'use'
|
||||
elif content == 'as':
|
||||
self.state = 'as'
|
||||
elif self.state == 'use' and content == 'function':
|
||||
self.state = 'use function'
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_literal_string(self, token, content):
|
||||
if self.state == 'include':
|
||||
if content != '"' and content != "'":
|
||||
content = content.strip()
|
||||
if u(token) == 'Token.Literal.String.Double':
|
||||
content = u("'{0}'").format(content)
|
||||
self.append(content)
|
||||
self.state = None
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == '(':
|
||||
self.parens += 1
|
||||
elif content == ')':
|
||||
self.parens -= 1
|
||||
elif (self.state == 'use' or self.state == 'as') and content == ',':
|
||||
self.state = 'use'
|
||||
else:
|
||||
self.state = None
|
||||
|
||||
def _process_text(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_other(self, token, content):
|
||||
self.state = None
|
@@ -1,86 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.python
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Python code.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
class PythonParser(TokenParser):
|
||||
state = None
|
||||
parens = 0
|
||||
nonpackage = False
|
||||
exclude = [
|
||||
r'^os$',
|
||||
r'^sys$',
|
||||
r'^sys\.',
|
||||
]
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if self.partial(token) == 'Namespace':
|
||||
self._process_namespace(token, content)
|
||||
elif self.partial(token) == 'Operator':
|
||||
self._process_operator(token, content)
|
||||
elif self.partial(token) == 'Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
elif self.partial(token) == 'Text':
|
||||
self._process_text(token, content)
|
||||
else:
|
||||
self._process_other(token, content)
|
||||
|
||||
def _process_namespace(self, token, content):
|
||||
if self.state is None:
|
||||
self.state = content
|
||||
else:
|
||||
if content == 'as':
|
||||
self.nonpackage = True
|
||||
else:
|
||||
self._process_import(token, content)
|
||||
|
||||
def _process_operator(self, token, content):
|
||||
if self.state is not None:
|
||||
if content == '.':
|
||||
self.nonpackage = True
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content == '(':
|
||||
self.parens += 1
|
||||
elif content == ')':
|
||||
self.parens -= 1
|
||||
self.nonpackage = False
|
||||
|
||||
def _process_text(self, token, content):
|
||||
if self.state is not None:
|
||||
if content == "\n" and self.parens == 0:
|
||||
self.state = None
|
||||
self.nonpackage = False
|
||||
|
||||
def _process_other(self, token, content):
|
||||
pass
|
||||
|
||||
def _process_import(self, token, content):
|
||||
if not self.nonpackage:
|
||||
if self.state == 'from':
|
||||
self.append(content, truncate=True, truncate_to=1)
|
||||
self.state = 'from-2'
|
||||
elif self.state == 'from-2' and content != 'import':
|
||||
self.append(content, truncate=True, truncate_to=1)
|
||||
elif self.state == 'import':
|
||||
self.append(content, truncate=True, truncate_to=1)
|
||||
self.state = 'import-2'
|
||||
elif self.state == 'import-2':
|
||||
self.append(content, truncate=True, truncate_to=1)
|
||||
else:
|
||||
self.state = None
|
||||
self.nonpackage = False
|
@@ -1,210 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.templates
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from Templates.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from . import TokenParser
|
||||
from ..compat import u
|
||||
|
||||
|
||||
""" If these keywords are found in the source file, treat them as a dependency.
|
||||
Must be lower-case strings.
|
||||
"""
|
||||
KEYWORDS = [
|
||||
'_',
|
||||
'$',
|
||||
'angular',
|
||||
'assert', # probably mocha
|
||||
'backbone',
|
||||
'batman',
|
||||
'c3',
|
||||
'can',
|
||||
'casper',
|
||||
'chai',
|
||||
'chaplin',
|
||||
'd3',
|
||||
'define', # probably require
|
||||
'describe', # mocha or jasmine
|
||||
'eco',
|
||||
'ember',
|
||||
'espresso',
|
||||
'expect', # probably jasmine
|
||||
'exports', # probably npm
|
||||
'express',
|
||||
'gulp',
|
||||
'handlebars',
|
||||
'highcharts',
|
||||
'jasmine',
|
||||
'jquery',
|
||||
'jstz',
|
||||
'ko', # probably knockout
|
||||
'm', # probably mithril
|
||||
'marionette',
|
||||
'meteor',
|
||||
'moment',
|
||||
'monitorio',
|
||||
'mustache',
|
||||
'phantom',
|
||||
'pickadate',
|
||||
'pikaday',
|
||||
'qunit',
|
||||
'react',
|
||||
'reactive',
|
||||
'require', # probably the commonjs spec
|
||||
'ripple',
|
||||
'rivets',
|
||||
'socketio',
|
||||
'spine',
|
||||
'thorax',
|
||||
'underscore',
|
||||
'vue',
|
||||
'way',
|
||||
'zombie',
|
||||
]
|
||||
|
||||
|
||||
class HtmlDjangoParser(TokenParser):
|
||||
tags = []
|
||||
opening_tag = False
|
||||
getting_attrs = False
|
||||
current_attr = None
|
||||
current_attr_value = None
|
||||
|
||||
def parse(self):
|
||||
for index, token, content in self.tokens:
|
||||
self._process_token(token, content)
|
||||
return self.dependencies
|
||||
|
||||
def _process_token(self, token, content):
|
||||
if u(token) == 'Token.Punctuation':
|
||||
self._process_punctuation(token, content)
|
||||
elif u(token) == 'Token.Name.Tag':
|
||||
self._process_tag(token, content)
|
||||
elif u(token) == 'Token.Literal.String':
|
||||
self._process_string(token, content)
|
||||
elif u(token) == 'Token.Name.Attribute':
|
||||
self._process_attribute(token, content)
|
||||
|
||||
@property
|
||||
def current_tag(self):
|
||||
return None if len(self.tags) == 0 else self.tags[0]
|
||||
|
||||
def _process_punctuation(self, token, content):
|
||||
if content.startswith('</') or content.startswith('/'):
|
||||
try:
|
||||
self.tags.pop(0)
|
||||
except IndexError:
|
||||
# ignore errors from malformed markup
|
||||
pass
|
||||
self.opening_tag = False
|
||||
self.getting_attrs = False
|
||||
elif content.startswith('<'):
|
||||
self.opening_tag = True
|
||||
elif content.startswith('>'):
|
||||
self.opening_tag = False
|
||||
self.getting_attrs = False
|
||||
|
||||
def _process_tag(self, token, content):
|
||||
if self.opening_tag:
|
||||
self.tags.insert(0, content.replace('<', '', 1).strip().lower())
|
||||
self.getting_attrs = True
|
||||
elif content.startswith('>'):
|
||||
self.opening_tag = False
|
||||
self.getting_attrs = False
|
||||
self.current_attr = None
|
||||
|
||||
def _process_attribute(self, token, content):
|
||||
if self.getting_attrs:
|
||||
self.current_attr = content.lower().strip('=')
|
||||
else:
|
||||
self.current_attr = None
|
||||
self.current_attr_value = None
|
||||
|
||||
def _process_string(self, token, content):
|
||||
if self.getting_attrs and self.current_attr is not None:
|
||||
if content.endswith('"') or content.endswith("'"):
|
||||
if self.current_attr_value is not None:
|
||||
self.current_attr_value += content
|
||||
if self.current_tag == 'script' and self.current_attr == 'src':
|
||||
self.append(self.current_attr_value)
|
||||
self.current_attr = None
|
||||
self.current_attr_value = None
|
||||
else:
|
||||
if len(content) == 1:
|
||||
self.current_attr_value = content
|
||||
else:
|
||||
if self.current_tag == 'script' and self.current_attr == 'src':
|
||||
self.append(content)
|
||||
self.current_attr = None
|
||||
self.current_attr_value = None
|
||||
elif content.startswith('"') or content.startswith("'"):
|
||||
if self.current_attr_value is None:
|
||||
self.current_attr_value = content
|
||||
else:
|
||||
self.current_attr_value += content
|
||||
|
||||
|
||||
class VelocityHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class MyghtyHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class MasonParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class MakoHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class CheetahHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HtmlGenshiParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class RhtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HtmlPhpParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HtmlSmartyParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class EvoqueHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class ColdfusionHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class LassoHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class HandlebarsHtmlParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class YamlJinjaParser(HtmlDjangoParser):
|
||||
pass
|
||||
|
||||
|
||||
class TwigHtmlParser(HtmlDjangoParser):
|
||||
pass
|
@@ -1,33 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.languages.unknown
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parse dependencies from files of unknown language.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from . import TokenParser
|
||||
|
||||
|
||||
FILES = {
|
||||
'bower': {'exact': False, 'dependency': 'bower'},
|
||||
'grunt': {'exact': False, 'dependency': 'grunt'},
|
||||
}
|
||||
|
||||
|
||||
class UnknownParser(TokenParser):
|
||||
|
||||
def parse(self):
|
||||
self._process_file_name(os.path.basename(self.source_file))
|
||||
return self.dependencies
|
||||
|
||||
def _process_file_name(self, file_name):
|
||||
for key, value in FILES.items():
|
||||
found = (key == file_name) if value.get('exact') else (key.lower() in file_name.lower())
|
||||
if found:
|
||||
self.append(value['dependency'])
|
@@ -1,14 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Custom exceptions.
|
||||
|
||||
:copyright: (c) 2015 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
|
||||
class NotYetImplemented(Exception):
|
||||
"""This method needs to be implemented."""
|
@@ -1,80 +0,0 @@
|
||||
{
|
||||
"ActionScript": "ActionScript",
|
||||
"ApacheConf": "ApacheConf",
|
||||
"AppleScript": "AppleScript",
|
||||
"ASP": "ASP",
|
||||
"Assembly": "Assembly",
|
||||
"Awk": "Awk",
|
||||
"Bash": "Bash",
|
||||
"Basic": "Basic",
|
||||
"BrightScript": "BrightScript",
|
||||
"C": "C",
|
||||
"C#": "C#",
|
||||
"C++": "C++",
|
||||
"Clojure": "Clojure",
|
||||
"Cocoa": "Cocoa",
|
||||
"CoffeeScript": "CoffeeScript",
|
||||
"ColdFusion": "ColdFusion",
|
||||
"Common Lisp": "Common Lisp",
|
||||
"CSHTML": "CSHTML",
|
||||
"CSS": "CSS",
|
||||
"Dart": "Dart",
|
||||
"Delphi": "Delphi",
|
||||
"Elixir": "Elixir",
|
||||
"Elm": "Elm",
|
||||
"Emacs Lisp": "Emacs Lisp",
|
||||
"Erlang": "Erlang",
|
||||
"F#": "F#",
|
||||
"Fortran": "Fortran",
|
||||
"Go": "Go",
|
||||
"Gous": "Gosu",
|
||||
"Groovy": "Groovy",
|
||||
"Haml": "Haml",
|
||||
"HaXe": "HaXe",
|
||||
"Haskell": "Haskell",
|
||||
"HTML": "HTML",
|
||||
"INI": "INI",
|
||||
"Jade": "Jade",
|
||||
"Java": "Java",
|
||||
"JavaScript": "JavaScript",
|
||||
"JSON": "JSON",
|
||||
"JSX": "JSX",
|
||||
"Kotlin": "Kotlin",
|
||||
"LESS": "LESS",
|
||||
"Lua": "Lua",
|
||||
"Markdown": "Markdown",
|
||||
"Matlab": "Matlab",
|
||||
"Mustache": "Mustache",
|
||||
"OCaml": "OCaml",
|
||||
"Objective-C": "Objective-C",
|
||||
"Objective-C++": "Objective-C++",
|
||||
"Objective-J": "Objective-J",
|
||||
"Perl": "Perl",
|
||||
"PHP": "PHP",
|
||||
"PowerShell": "PowerShell",
|
||||
"Prolog": "Prolog",
|
||||
"Puppet": "Puppet",
|
||||
"Python": "Python",
|
||||
"R": "R",
|
||||
"reStructuredText": "reStructuredText",
|
||||
"Ruby": "Ruby",
|
||||
"Rust": "Rust",
|
||||
"Sass": "Sass",
|
||||
"Scala": "Scala",
|
||||
"Scheme": "Scheme",
|
||||
"SCSS": "SCSS",
|
||||
"Shell": "Shell",
|
||||
"Slim": "Slim",
|
||||
"Smalltalk": "Smalltalk",
|
||||
"SQL": "SQL",
|
||||
"Swift": "Swift",
|
||||
"Text": "Text",
|
||||
"Turing": "Turing",
|
||||
"Twig": "Twig",
|
||||
"TypeScript": "TypeScript",
|
||||
"VB.net": "VB.net",
|
||||
"VimL": "VimL",
|
||||
"XAML": "XAML",
|
||||
"XML": "XML",
|
||||
"YAML": "YAML"
|
||||
}
|
@@ -1,531 +0,0 @@
|
||||
{
|
||||
"a2ps": null,
|
||||
"a65": "Assembly",
|
||||
"aap": null,
|
||||
"abap": null,
|
||||
"abaqus": null,
|
||||
"abc": null,
|
||||
"abel": null,
|
||||
"acedb": null,
|
||||
"ada": null,
|
||||
"aflex": null,
|
||||
"ahdl": null,
|
||||
"alsaconf": null,
|
||||
"amiga": null,
|
||||
"aml": null,
|
||||
"ampl": null,
|
||||
"ant": null,
|
||||
"antlr": null,
|
||||
"apache": null,
|
||||
"apachestyle": null,
|
||||
"arch": null,
|
||||
"art": null,
|
||||
"asm": "Assembly",
|
||||
"asm68k": "Assembly",
|
||||
"asmh8300": "Assembly",
|
||||
"asn": null,
|
||||
"aspperl": null,
|
||||
"aspvbs": null,
|
||||
"asterisk": null,
|
||||
"asteriskvm": null,
|
||||
"atlas": null,
|
||||
"autohotkey": null,
|
||||
"autoit": null,
|
||||
"automake": null,
|
||||
"ave": null,
|
||||
"awk": null,
|
||||
"ayacc": null,
|
||||
"b": null,
|
||||
"baan": null,
|
||||
"basic": "Basic",
|
||||
"bc": null,
|
||||
"bdf": null,
|
||||
"bib": null,
|
||||
"bindzone": null,
|
||||
"blank": null,
|
||||
"bst": null,
|
||||
"btm": null,
|
||||
"bzr": null,
|
||||
"c": "C",
|
||||
"cabal": null,
|
||||
"calendar": null,
|
||||
"catalog": null,
|
||||
"cdl": null,
|
||||
"cdrdaoconf": null,
|
||||
"cdrtoc": null,
|
||||
"cf": null,
|
||||
"cfg": null,
|
||||
"ch": null,
|
||||
"chaiscript": null,
|
||||
"change": null,
|
||||
"changelog": null,
|
||||
"chaskell": null,
|
||||
"cheetah": null,
|
||||
"chill": null,
|
||||
"chordpro": null,
|
||||
"cl": null,
|
||||
"clean": null,
|
||||
"clipper": null,
|
||||
"cmake": null,
|
||||
"cmusrc": null,
|
||||
"cobol": null,
|
||||
"coco": null,
|
||||
"conaryrecipe": null,
|
||||
"conf": null,
|
||||
"config": null,
|
||||
"context": null,
|
||||
"cpp": "C++",
|
||||
"crm": null,
|
||||
"crontab": "Crontab",
|
||||
"cs": "C#",
|
||||
"csc": null,
|
||||
"csh": null,
|
||||
"csp": null,
|
||||
"css": null,
|
||||
"cterm": null,
|
||||
"ctrlh": null,
|
||||
"cucumber": null,
|
||||
"cuda": null,
|
||||
"cupl": null,
|
||||
"cuplsim": null,
|
||||
"cvs": null,
|
||||
"cvsrc": null,
|
||||
"cweb": null,
|
||||
"cynlib": null,
|
||||
"cynpp": null,
|
||||
"d": null,
|
||||
"datascript": null,
|
||||
"dcd": null,
|
||||
"dcl": null,
|
||||
"debchangelog": null,
|
||||
"debcontrol": null,
|
||||
"debsources": null,
|
||||
"def": null,
|
||||
"denyhosts": null,
|
||||
"desc": null,
|
||||
"desktop": null,
|
||||
"dictconf": null,
|
||||
"dictdconf": null,
|
||||
"diff": null,
|
||||
"dircolors": null,
|
||||
"diva": null,
|
||||
"django": null,
|
||||
"dns": null,
|
||||
"docbk": null,
|
||||
"docbksgml": null,
|
||||
"docbkxml": null,
|
||||
"dosbatch": null,
|
||||
"dosini": null,
|
||||
"dot": null,
|
||||
"doxygen": null,
|
||||
"dracula": null,
|
||||
"dsl": null,
|
||||
"dtd": null,
|
||||
"dtml": null,
|
||||
"dtrace": null,
|
||||
"dylan": null,
|
||||
"dylanintr": null,
|
||||
"dylanlid": null,
|
||||
"ecd": null,
|
||||
"edif": null,
|
||||
"eiffel": null,
|
||||
"elf": null,
|
||||
"elinks": null,
|
||||
"elmfilt": null,
|
||||
"erlang": null,
|
||||
"eruby": null,
|
||||
"esmtprc": null,
|
||||
"esqlc": null,
|
||||
"esterel": null,
|
||||
"eterm": null,
|
||||
"eviews": null,
|
||||
"exim": null,
|
||||
"expect": null,
|
||||
"exports": null,
|
||||
"fan": null,
|
||||
"fasm": null,
|
||||
"fdcc": null,
|
||||
"fetchmail": null,
|
||||
"fgl": null,
|
||||
"flexwiki": null,
|
||||
"focexec": null,
|
||||
"form": null,
|
||||
"forth": null,
|
||||
"fortran": null,
|
||||
"foxpro": null,
|
||||
"framescript": null,
|
||||
"freebasic": null,
|
||||
"fstab": null,
|
||||
"fvwm": null,
|
||||
"fvwm2m4": null,
|
||||
"gdb": null,
|
||||
"gdmo": null,
|
||||
"gedcom": null,
|
||||
"git": null,
|
||||
"gitcommit": null,
|
||||
"gitconfig": null,
|
||||
"gitrebase": null,
|
||||
"gitsendemail": null,
|
||||
"gkrellmrc": null,
|
||||
"gnuplot": null,
|
||||
"gp": null,
|
||||
"gpg": null,
|
||||
"grads": null,
|
||||
"gretl": null,
|
||||
"groff": null,
|
||||
"groovy": null,
|
||||
"group": null,
|
||||
"grub": null,
|
||||
"gsp": null,
|
||||
"gtkrc": null,
|
||||
"haml": "Haml",
|
||||
"hamster": null,
|
||||
"haskell": "Haskell",
|
||||
"haste": null,
|
||||
"hastepreproc": null,
|
||||
"hb": null,
|
||||
"help": null,
|
||||
"hercules": null,
|
||||
"hex": null,
|
||||
"hog": null,
|
||||
"hostconf": null,
|
||||
"html": "HTML",
|
||||
"htmlcheetah": "HTML",
|
||||
"htmldjango": "HTML",
|
||||
"htmlm4": "HTML",
|
||||
"htmlos": null,
|
||||
"ia64": null,
|
||||
"ibasic": null,
|
||||
"icemenu": null,
|
||||
"icon": null,
|
||||
"idl": null,
|
||||
"idlang": null,
|
||||
"indent": null,
|
||||
"inform": null,
|
||||
"initex": null,
|
||||
"initng": null,
|
||||
"inittab": null,
|
||||
"ipfilter": null,
|
||||
"ishd": null,
|
||||
"iss": null,
|
||||
"ist": null,
|
||||
"jal": null,
|
||||
"jam": null,
|
||||
"jargon": null,
|
||||
"java": "Java",
|
||||
"javacc": null,
|
||||
"javascript": "JavaScript",
|
||||
"jess": null,
|
||||
"jgraph": null,
|
||||
"jproperties": null,
|
||||
"jsp": null,
|
||||
"kconfig": null,
|
||||
"kix": null,
|
||||
"kscript": null,
|
||||
"kwt": null,
|
||||
"lace": null,
|
||||
"latte": null,
|
||||
"ld": null,
|
||||
"ldapconf": null,
|
||||
"ldif": null,
|
||||
"lex": null,
|
||||
"lftp": null,
|
||||
"lhaskell": "Haskell",
|
||||
"libao": null,
|
||||
"lifelines": null,
|
||||
"lilo": null,
|
||||
"limits": null,
|
||||
"liquid": null,
|
||||
"lisp": null,
|
||||
"lite": null,
|
||||
"litestep": null,
|
||||
"loginaccess": null,
|
||||
"logindefs": null,
|
||||
"logtalk": null,
|
||||
"lotos": null,
|
||||
"lout": null,
|
||||
"lpc": null,
|
||||
"lprolog": null,
|
||||
"lscript": null,
|
||||
"lsl": null,
|
||||
"lss": null,
|
||||
"lua": null,
|
||||
"lynx": null,
|
||||
"m4": null,
|
||||
"mail": null,
|
||||
"mailaliases": null,
|
||||
"mailcap": null,
|
||||
"make": null,
|
||||
"man": null,
|
||||
"manconf": null,
|
||||
"manual": null,
|
||||
"maple": null,
|
||||
"markdown": "Markdown",
|
||||
"masm": null,
|
||||
"mason": null,
|
||||
"master": null,
|
||||
"matlab": null,
|
||||
"maxima": null,
|
||||
"mel": null,
|
||||
"messages": null,
|
||||
"mf": null,
|
||||
"mgl": null,
|
||||
"mgp": null,
|
||||
"mib": null,
|
||||
"mma": null,
|
||||
"mmix": null,
|
||||
"mmp": null,
|
||||
"modconf": null,
|
||||
"model": null,
|
||||
"modsim3": null,
|
||||
"modula2": null,
|
||||
"modula3": null,
|
||||
"monk": null,
|
||||
"moo": null,
|
||||
"mp": null,
|
||||
"mplayerconf": null,
|
||||
"mrxvtrc": null,
|
||||
"msidl": null,
|
||||
"msmessages": null,
|
||||
"msql": null,
|
||||
"mupad": null,
|
||||
"mush": null,
|
||||
"muttrc": null,
|
||||
"mysql": null,
|
||||
"named": null,
|
||||
"nanorc": null,
|
||||
"nasm": null,
|
||||
"nastran": null,
|
||||
"natural": null,
|
||||
"ncf": null,
|
||||
"netrc": null,
|
||||
"netrw": null,
|
||||
"nosyntax": null,
|
||||
"nqc": null,
|
||||
"nroff": null,
|
||||
"nsis": null,
|
||||
"obj": null,
|
||||
"objc": "Objective-C",
|
||||
"objcpp": "Objective-C++",
|
||||
"ocaml": "OCaml",
|
||||
"occam": null,
|
||||
"omnimark": null,
|
||||
"openroad": null,
|
||||
"opl": null,
|
||||
"ora": null,
|
||||
"pamconf": null,
|
||||
"papp": null,
|
||||
"pascal": null,
|
||||
"passwd": null,
|
||||
"pcap": null,
|
||||
"pccts": null,
|
||||
"pdf": null,
|
||||
"perl": "Perl",
|
||||
"perl6": "Perl",
|
||||
"pf": null,
|
||||
"pfmain": null,
|
||||
"php": "PHP",
|
||||
"phtml": "PHP",
|
||||
"pic": null,
|
||||
"pike": null,
|
||||
"pilrc": null,
|
||||
"pine": null,
|
||||
"pinfo": null,
|
||||
"plaintex": null,
|
||||
"plm": null,
|
||||
"plp": null,
|
||||
"plsql": null,
|
||||
"po": null,
|
||||
"pod": null,
|
||||
"postscr": null,
|
||||
"pov": null,
|
||||
"povini": null,
|
||||
"ppd": null,
|
||||
"ppwiz": null,
|
||||
"prescribe": null,
|
||||
"privoxy": null,
|
||||
"procmail": null,
|
||||
"progress": null,
|
||||
"prolog": "Prolog",
|
||||
"promela": null,
|
||||
"protocols": null,
|
||||
"psf": null,
|
||||
"ptcap": null,
|
||||
"purifylog": null,
|
||||
"pyrex": null,
|
||||
"python": "Python",
|
||||
"qf": null,
|
||||
"quake": null,
|
||||
"r": "R",
|
||||
"racc": null,
|
||||
"radiance": null,
|
||||
"ratpoison": null,
|
||||
"rc": null,
|
||||
"rcs": null,
|
||||
"rcslog": null,
|
||||
"readline": null,
|
||||
"rebol": null,
|
||||
"registry": null,
|
||||
"remind": null,
|
||||
"resolv": null,
|
||||
"reva": null,
|
||||
"rexx": null,
|
||||
"rhelp": null,
|
||||
"rib": null,
|
||||
"rnc": null,
|
||||
"rnoweb": null,
|
||||
"robots": null,
|
||||
"rpcgen": null,
|
||||
"rpl": null,
|
||||
"rst": null,
|
||||
"rtf": null,
|
||||
"ruby": "Ruby",
|
||||
"samba": null,
|
||||
"sas": null,
|
||||
"sass": "Sass",
|
||||
"sather": null,
|
||||
"scheme": "Scheme",
|
||||
"scilab": null,
|
||||
"screen": null,
|
||||
"scss": "SCSS",
|
||||
"sd": null,
|
||||
"sdc": null,
|
||||
"sdl": null,
|
||||
"sed": null,
|
||||
"sendpr": null,
|
||||
"sensors": null,
|
||||
"services": null,
|
||||
"setserial": null,
|
||||
"sgml": null,
|
||||
"sgmldecl": null,
|
||||
"sgmllnx": null,
|
||||
"sh": null,
|
||||
"sicad": null,
|
||||
"sieve": null,
|
||||
"simula": null,
|
||||
"sinda": null,
|
||||
"sindacmp": null,
|
||||
"sindaout": null,
|
||||
"sisu": null,
|
||||
"skill": "SKILL",
|
||||
"sl": null,
|
||||
"slang": null,
|
||||
"slice": null,
|
||||
"slpconf": null,
|
||||
"slpreg": null,
|
||||
"slpspi": null,
|
||||
"slrnrc": null,
|
||||
"slrnsc": null,
|
||||
"sm": null,
|
||||
"smarty": null,
|
||||
"smcl": null,
|
||||
"smil": null,
|
||||
"smith": null,
|
||||
"sml": null,
|
||||
"snnsnet": null,
|
||||
"snnspat": null,
|
||||
"snnsres": null,
|
||||
"snobol4": null,
|
||||
"spec": null,
|
||||
"specman": null,
|
||||
"spice": null,
|
||||
"splint": null,
|
||||
"spup": null,
|
||||
"spyce": null,
|
||||
"sql": null,
|
||||
"sqlanywhere": null,
|
||||
"sqlforms": null,
|
||||
"sqlinformix": null,
|
||||
"sqlj": null,
|
||||
"sqloracle": null,
|
||||
"sqr": null,
|
||||
"squid": null,
|
||||
"sshconfig": null,
|
||||
"sshdconfig": null,
|
||||
"st": null,
|
||||
"stata": null,
|
||||
"stp": null,
|
||||
"strace": null,
|
||||
"sudoers": null,
|
||||
"svg": null,
|
||||
"svn": null,
|
||||
"syncolor": null,
|
||||
"synload": null,
|
||||
"syntax": null,
|
||||
"sysctl": null,
|
||||
"tads": null,
|
||||
"tags": null,
|
||||
"tak": null,
|
||||
"takcmp": null,
|
||||
"takout": null,
|
||||
"tar": null,
|
||||
"taskdata": null,
|
||||
"taskedit": null,
|
||||
"tasm": null,
|
||||
"tcl": null,
|
||||
"tcsh": null,
|
||||
"terminfo": null,
|
||||
"tex": null,
|
||||
"texinfo": null,
|
||||
"texmf": null,
|
||||
"tf": null,
|
||||
"tidy": null,
|
||||
"tilde": null,
|
||||
"tli": null,
|
||||
"tpp": null,
|
||||
"trasys": null,
|
||||
"trustees": null,
|
||||
"tsalt": null,
|
||||
"tsscl": null,
|
||||
"tssgm": null,
|
||||
"tssop": null,
|
||||
"uc": null,
|
||||
"udevconf": null,
|
||||
"udevperm": null,
|
||||
"udevrules": null,
|
||||
"uil": null,
|
||||
"updatedb": null,
|
||||
"valgrind": null,
|
||||
"vb": "VB.net",
|
||||
"vera": null,
|
||||
"verilog": null,
|
||||
"verilogams": null,
|
||||
"vgrindefs": null,
|
||||
"vhdl": null,
|
||||
"vim": "VimL",
|
||||
"viminfo": null,
|
||||
"virata": null,
|
||||
"vmasm": null,
|
||||
"voscm": null,
|
||||
"vrml": null,
|
||||
"vsejcl": null,
|
||||
"wdiff": null,
|
||||
"web": null,
|
||||
"webmacro": null,
|
||||
"wget": null,
|
||||
"winbatch": null,
|
||||
"wml": null,
|
||||
"wsh": null,
|
||||
"wsml": null,
|
||||
"wvdial": null,
|
||||
"xbl": null,
|
||||
"xdefaults": null,
|
||||
"xf86conf": null,
|
||||
"xhtml": "HTML",
|
||||
"xinetd": null,
|
||||
"xkb": null,
|
||||
"xmath": null,
|
||||
"xml": "XML",
|
||||
"xmodmap": null,
|
||||
"xpm": null,
|
||||
"xpm2": null,
|
||||
"xquery": null,
|
||||
"xs": null,
|
||||
"xsd": null,
|
||||
"xslt": null,
|
||||
"xxd": null,
|
||||
"yacc": null,
|
||||
"yaml": "YAML",
|
||||
"z8a": null,
|
||||
"zsh": null
|
||||
}
|
@@ -1,136 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.logger
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Provides the configured logger for writing JSON to the log file.
|
||||
|
||||
:copyright: (c) 2013 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from .compat import u
|
||||
from .packages.requests.packages import urllib3
|
||||
try:
|
||||
from collections import OrderedDict # pragma: nocover
|
||||
except ImportError: # pragma: nocover
|
||||
from .packages.ordereddict import OrderedDict
|
||||
try:
|
||||
from .packages import simplejson as json # pragma: nocover
|
||||
except (ImportError, SyntaxError): # pragma: nocover
|
||||
import json
|
||||
|
||||
|
||||
class CustomEncoder(json.JSONEncoder):
|
||||
|
||||
def default(self, obj):
|
||||
if isinstance(obj, bytes): # pragma: nocover
|
||||
obj = u(obj)
|
||||
return json.dumps(obj)
|
||||
try: # pragma: nocover
|
||||
encoded = super(CustomEncoder, self).default(obj)
|
||||
except UnicodeDecodeError: # pragma: nocover
|
||||
obj = u(obj)
|
||||
encoded = super(CustomEncoder, self).default(obj)
|
||||
return encoded
|
||||
|
||||
|
||||
class JsonFormatter(logging.Formatter):
|
||||
|
||||
def setup(self, timestamp, is_write, entity, version, plugin, verbose,
|
||||
warnings=False):
|
||||
self.timestamp = timestamp
|
||||
self.is_write = is_write
|
||||
self.entity = entity
|
||||
self.version = version
|
||||
self.plugin = plugin
|
||||
self.verbose = verbose
|
||||
self.warnings = warnings
|
||||
|
||||
def format(self, record, *args):
|
||||
data = OrderedDict([
|
||||
('now', self.formatTime(record, self.datefmt)),
|
||||
])
|
||||
data['version'] = self.version
|
||||
data['plugin'] = self.plugin
|
||||
data['time'] = self.timestamp
|
||||
if self.verbose:
|
||||
data['caller'] = record.pathname
|
||||
data['lineno'] = record.lineno
|
||||
data['is_write'] = self.is_write
|
||||
data['file'] = self.entity
|
||||
if not self.is_write:
|
||||
del data['is_write']
|
||||
data['level'] = record.levelname
|
||||
data['message'] = record.getMessage() if self.warnings else record.msg
|
||||
if not self.plugin:
|
||||
del data['plugin']
|
||||
return CustomEncoder().encode(data)
|
||||
|
||||
|
||||
def traceback_formatter(*args, **kwargs):
|
||||
if 'level' in kwargs and (kwargs['level'].lower() == 'warn' or kwargs['level'].lower() == 'warning'):
|
||||
logging.getLogger('WakaTime').warning(traceback.format_exc())
|
||||
elif 'level' in kwargs and kwargs['level'].lower() == 'info':
|
||||
logging.getLogger('WakaTime').info(traceback.format_exc())
|
||||
elif 'level' in kwargs and kwargs['level'].lower() == 'debug':
|
||||
logging.getLogger('WakaTime').debug(traceback.format_exc())
|
||||
else:
|
||||
logging.getLogger('WakaTime').error(traceback.format_exc())
|
||||
|
||||
|
||||
def set_log_level(logger, args):
|
||||
level = logging.WARN
|
||||
if args.verbose:
|
||||
level = logging.DEBUG
|
||||
logger.setLevel(level)
|
||||
|
||||
|
||||
def setup_logging(args, version):
|
||||
urllib3.disable_warnings()
|
||||
logger = logging.getLogger('WakaTime')
|
||||
for handler in logger.handlers:
|
||||
logger.removeHandler(handler)
|
||||
set_log_level(logger, args)
|
||||
logfile = args.logfile
|
||||
if not logfile:
|
||||
logfile = '~/.wakatime.log'
|
||||
handler = logging.FileHandler(os.path.expanduser(logfile))
|
||||
formatter = JsonFormatter(datefmt='%Y/%m/%d %H:%M:%S %z')
|
||||
formatter.setup(
|
||||
timestamp=args.timestamp,
|
||||
is_write=args.is_write,
|
||||
entity=args.entity,
|
||||
version=version,
|
||||
plugin=args.plugin,
|
||||
verbose=args.verbose,
|
||||
)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
# add custom traceback logging method
|
||||
logger.traceback = traceback_formatter
|
||||
|
||||
warnings_formatter = JsonFormatter(datefmt='%Y/%m/%d %H:%M:%S %z')
|
||||
warnings_formatter.setup(
|
||||
timestamp=args.timestamp,
|
||||
is_write=args.is_write,
|
||||
entity=args.entity,
|
||||
version=version,
|
||||
plugin=args.plugin,
|
||||
verbose=args.verbose,
|
||||
warnings=True,
|
||||
)
|
||||
warnings_handler = logging.FileHandler(os.path.expanduser(logfile))
|
||||
warnings_handler.setFormatter(warnings_formatter)
|
||||
logging.getLogger('py.warnings').addHandler(warnings_handler)
|
||||
try:
|
||||
logging.captureWarnings(True)
|
||||
except AttributeError: # pragma: nocover
|
||||
pass # Python >= 2.7 is needed to capture warnings
|
||||
|
||||
return logger
|
@@ -1,535 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.main
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
wakatime module entry point.
|
||||
|
||||
:copyright: (c) 2013 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import socket
|
||||
try:
|
||||
import ConfigParser as configparser
|
||||
except ImportError: # pragma: nocover
|
||||
import configparser
|
||||
|
||||
pwd = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.insert(0, os.path.dirname(pwd))
|
||||
sys.path.insert(0, os.path.join(pwd, 'packages'))
|
||||
|
||||
from .__about__ import __version__
|
||||
from .compat import u, open, is_py3
|
||||
from .constants import (
|
||||
API_ERROR,
|
||||
AUTH_ERROR,
|
||||
CONFIG_FILE_PARSE_ERROR,
|
||||
SUCCESS,
|
||||
UNKNOWN_ERROR,
|
||||
MALFORMED_HEARTBEAT_ERROR,
|
||||
)
|
||||
from .logger import setup_logging
|
||||
from .offlinequeue import Queue
|
||||
from .packages import argparse
|
||||
from .packages import requests
|
||||
from .packages.requests.exceptions import RequestException
|
||||
from .project import get_project_info
|
||||
from .session_cache import SessionCache
|
||||
from .stats import get_file_stats
|
||||
try:
|
||||
from .packages import simplejson as json # pragma: nocover
|
||||
except (ImportError, SyntaxError): # pragma: nocover
|
||||
import json
|
||||
from .packages import tzlocal
|
||||
|
||||
|
||||
log = logging.getLogger('WakaTime')
|
||||
|
||||
|
||||
class FileAction(argparse.Action):
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
try:
|
||||
if os.path.isfile(values):
|
||||
values = os.path.realpath(values)
|
||||
except: # pragma: nocover
|
||||
pass
|
||||
setattr(namespace, self.dest, values)
|
||||
|
||||
|
||||
def parseConfigFile(configFile=None):
|
||||
"""Returns a configparser.SafeConfigParser instance with configs
|
||||
read from the config file. Default location of the config file is
|
||||
at ~/.wakatime.cfg.
|
||||
"""
|
||||
|
||||
if not configFile:
|
||||
configFile = os.path.join(os.path.expanduser('~'), '.wakatime.cfg')
|
||||
|
||||
configs = configparser.SafeConfigParser()
|
||||
try:
|
||||
with open(configFile, 'r', encoding='utf-8') as fh:
|
||||
try:
|
||||
configs.readfp(fh)
|
||||
except configparser.Error:
|
||||
print(traceback.format_exc())
|
||||
return None
|
||||
except IOError:
|
||||
print(u('Error: Could not read from config file {0}').format(u(configFile)))
|
||||
return configs
|
||||
|
||||
|
||||
def parseArguments():
|
||||
"""Parse command line arguments and configs from ~/.wakatime.cfg.
|
||||
Command line arguments take precedence over config file settings.
|
||||
Returns instances of ArgumentParser and SafeConfigParser.
|
||||
"""
|
||||
|
||||
# define supported command line arguments
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Common interface for the WakaTime api.')
|
||||
parser.add_argument('--entity', dest='entity', metavar='FILE',
|
||||
action=FileAction,
|
||||
help='absolute path to file for the heartbeat; can also be a '+
|
||||
'url, domain, or app when --entity-type is not file')
|
||||
parser.add_argument('--file', dest='file', action=FileAction,
|
||||
help=argparse.SUPPRESS)
|
||||
parser.add_argument('--key', dest='key',
|
||||
help='your wakatime api key; uses api_key from '+
|
||||
'~/.wakatime.cfg by default')
|
||||
parser.add_argument('--write', dest='is_write',
|
||||
action='store_true',
|
||||
help='when set, tells api this heartbeat was triggered from '+
|
||||
'writing to a file')
|
||||
parser.add_argument('--plugin', dest='plugin',
|
||||
help='optional text editor plugin name and version '+
|
||||
'for User-Agent header')
|
||||
parser.add_argument('--time', dest='timestamp', metavar='time',
|
||||
type=float,
|
||||
help='optional floating-point unix epoch timestamp; '+
|
||||
'uses current time by default')
|
||||
parser.add_argument('--lineno', dest='lineno',
|
||||
help='optional line number; current line being edited')
|
||||
parser.add_argument('--cursorpos', dest='cursorpos',
|
||||
help='optional cursor position in the current file')
|
||||
parser.add_argument('--entity-type', dest='entity_type',
|
||||
help='entity type for this heartbeat. can be one of "file", '+
|
||||
'"domain", or "app"; defaults to file.')
|
||||
parser.add_argument('--proxy', dest='proxy',
|
||||
help='optional proxy configuration. Supports HTTPS '+
|
||||
'and SOCKS proxies. For example: '+
|
||||
'https://user:pass@host:port or '+
|
||||
'socks5://user:pass@host:port')
|
||||
parser.add_argument('--project', dest='project',
|
||||
help='optional project name')
|
||||
parser.add_argument('--alternate-project', dest='alternate_project',
|
||||
help='optional alternate project name; auto-discovered project '+
|
||||
'takes priority')
|
||||
parser.add_argument('--alternate-language', dest='alternate_language',
|
||||
help='optional alternate language name; auto-detected language'+
|
||||
'takes priority')
|
||||
parser.add_argument('--hostname', dest='hostname', help='hostname of '+
|
||||
'current machine.')
|
||||
parser.add_argument('--disableoffline', dest='offline',
|
||||
action='store_false',
|
||||
help='disables offline time logging instead of queuing logged time')
|
||||
parser.add_argument('--hidefilenames', dest='hidefilenames',
|
||||
action='store_true',
|
||||
help='obfuscate file names; will not send file names to api')
|
||||
parser.add_argument('--exclude', dest='exclude', action='append',
|
||||
help='filename patterns to exclude from logging; POSIX regex '+
|
||||
'syntax; can be used more than once')
|
||||
parser.add_argument('--include', dest='include', action='append',
|
||||
help='filename patterns to log; when used in combination with '+
|
||||
'--exclude, files matching include will still be logged; '+
|
||||
'POSIX regex syntax; can be used more than once')
|
||||
parser.add_argument('--ignore', dest='ignore', action='append',
|
||||
help=argparse.SUPPRESS)
|
||||
parser.add_argument('--extra-heartbeats', dest='extra_heartbeats',
|
||||
action='store_true',
|
||||
help='reads extra heartbeats from STDIN as a JSON array until EOF')
|
||||
parser.add_argument('--logfile', dest='logfile',
|
||||
help='defaults to ~/.wakatime.log')
|
||||
parser.add_argument('--apiurl', dest='api_url',
|
||||
help='heartbeats api url; for debugging with a local server')
|
||||
parser.add_argument('--timeout', dest='timeout', type=int,
|
||||
help='number of seconds to wait when sending heartbeats to api; '+
|
||||
'defaults to 60 seconds')
|
||||
parser.add_argument('--config', dest='config',
|
||||
help='defaults to ~/.wakatime.cfg')
|
||||
parser.add_argument('--verbose', dest='verbose', action='store_true',
|
||||
help='turns on debug messages in log file')
|
||||
parser.add_argument('--version', action='version', version=__version__)
|
||||
|
||||
# parse command line arguments
|
||||
args = parser.parse_args()
|
||||
|
||||
# use current unix epoch timestamp by default
|
||||
if not args.timestamp:
|
||||
args.timestamp = time.time()
|
||||
|
||||
# parse ~/.wakatime.cfg file
|
||||
configs = parseConfigFile(args.config)
|
||||
if configs is None:
|
||||
return args, configs
|
||||
|
||||
# update args from configs
|
||||
if not args.key:
|
||||
default_key = None
|
||||
if configs.has_option('settings', 'api_key'):
|
||||
default_key = configs.get('settings', 'api_key')
|
||||
elif configs.has_option('settings', 'apikey'):
|
||||
default_key = configs.get('settings', 'apikey')
|
||||
if default_key:
|
||||
args.key = default_key
|
||||
else:
|
||||
parser.error('Missing api key')
|
||||
if not args.entity:
|
||||
if args.file:
|
||||
args.entity = args.file
|
||||
else:
|
||||
parser.error('argument --entity is required')
|
||||
if not args.exclude:
|
||||
args.exclude = []
|
||||
if configs.has_option('settings', 'ignore'):
|
||||
try:
|
||||
for pattern in configs.get('settings', 'ignore').split("\n"):
|
||||
if pattern.strip() != '':
|
||||
args.exclude.append(pattern)
|
||||
except TypeError: # pragma: nocover
|
||||
pass
|
||||
if configs.has_option('settings', 'exclude'):
|
||||
try:
|
||||
for pattern in configs.get('settings', 'exclude').split("\n"):
|
||||
if pattern.strip() != '':
|
||||
args.exclude.append(pattern)
|
||||
except TypeError: # pragma: nocover
|
||||
pass
|
||||
if not args.include:
|
||||
args.include = []
|
||||
if configs.has_option('settings', 'include'):
|
||||
try:
|
||||
for pattern in configs.get('settings', 'include').split("\n"):
|
||||
if pattern.strip() != '':
|
||||
args.include.append(pattern)
|
||||
except TypeError: # pragma: nocover
|
||||
pass
|
||||
if args.offline and configs.has_option('settings', 'offline'):
|
||||
args.offline = configs.getboolean('settings', 'offline')
|
||||
if not args.hidefilenames and configs.has_option('settings', 'hidefilenames'):
|
||||
args.hidefilenames = configs.getboolean('settings', 'hidefilenames')
|
||||
if not args.proxy and configs.has_option('settings', 'proxy'):
|
||||
args.proxy = configs.get('settings', 'proxy')
|
||||
if not args.verbose and configs.has_option('settings', 'verbose'):
|
||||
args.verbose = configs.getboolean('settings', 'verbose')
|
||||
if not args.verbose and configs.has_option('settings', 'debug'):
|
||||
args.verbose = configs.getboolean('settings', 'debug')
|
||||
if not args.logfile and configs.has_option('settings', 'logfile'):
|
||||
args.logfile = configs.get('settings', 'logfile')
|
||||
if not args.api_url and configs.has_option('settings', 'api_url'):
|
||||
args.api_url = configs.get('settings', 'api_url')
|
||||
if not args.timeout and configs.has_option('settings', 'timeout'):
|
||||
try:
|
||||
args.timeout = int(configs.get('settings', 'timeout'))
|
||||
except ValueError:
|
||||
print(traceback.format_exc())
|
||||
|
||||
return args, configs
|
||||
|
||||
|
||||
def should_exclude(entity, include, exclude):
|
||||
if entity is not None and entity.strip() != '':
|
||||
try:
|
||||
for pattern in include:
|
||||
try:
|
||||
compiled = re.compile(pattern, re.IGNORECASE)
|
||||
if compiled.search(entity):
|
||||
return False
|
||||
except re.error as ex:
|
||||
log.warning(u('Regex error ({msg}) for include pattern: {pattern}').format(
|
||||
msg=u(ex),
|
||||
pattern=u(pattern),
|
||||
))
|
||||
except TypeError: # pragma: nocover
|
||||
pass
|
||||
try:
|
||||
for pattern in exclude:
|
||||
try:
|
||||
compiled = re.compile(pattern, re.IGNORECASE)
|
||||
if compiled.search(entity):
|
||||
return pattern
|
||||
except re.error as ex:
|
||||
log.warning(u('Regex error ({msg}) for exclude pattern: {pattern}').format(
|
||||
msg=u(ex),
|
||||
pattern=u(pattern),
|
||||
))
|
||||
except TypeError: # pragma: nocover
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
def get_user_agent(plugin):
|
||||
ver = sys.version_info
|
||||
python_version = '%d.%d.%d.%s.%d' % (ver[0], ver[1], ver[2], ver[3], ver[4])
|
||||
user_agent = u('wakatime/{ver} ({platform}) Python{py_ver}').format(
|
||||
ver=u(__version__),
|
||||
platform=u(platform.platform()),
|
||||
py_ver=python_version,
|
||||
)
|
||||
if plugin:
|
||||
user_agent = u('{user_agent} {plugin}').format(
|
||||
user_agent=user_agent,
|
||||
plugin=u(plugin),
|
||||
)
|
||||
else:
|
||||
user_agent = u('{user_agent} Unknown/0').format(
|
||||
user_agent=user_agent,
|
||||
)
|
||||
return user_agent
|
||||
|
||||
|
||||
def send_heartbeat(project=None, branch=None, hostname=None, stats={}, key=None,
|
||||
entity=None, timestamp=None, is_write=None, plugin=None,
|
||||
offline=None, entity_type='file', hidefilenames=None,
|
||||
proxy=None, api_url=None, timeout=None, **kwargs):
|
||||
"""Sends heartbeat as POST request to WakaTime api server.
|
||||
|
||||
Returns `SUCCESS` when heartbeat was sent, otherwise returns an
|
||||
error code constant.
|
||||
"""
|
||||
|
||||
if not api_url:
|
||||
api_url = 'https://api.wakatime.com/api/v1/heartbeats'
|
||||
if not timeout:
|
||||
timeout = 60
|
||||
log.debug('Sending heartbeat to api at %s' % api_url)
|
||||
data = {
|
||||
'time': timestamp,
|
||||
'entity': entity,
|
||||
'type': entity_type,
|
||||
}
|
||||
if hidefilenames and entity is not None and entity_type == 'file':
|
||||
extension = u(os.path.splitext(data['entity'])[1])
|
||||
data['entity'] = u('HIDDEN{0}').format(extension)
|
||||
if stats.get('lines'):
|
||||
data['lines'] = stats['lines']
|
||||
if stats.get('language'):
|
||||
data['language'] = stats['language']
|
||||
if stats.get('dependencies'):
|
||||
data['dependencies'] = stats['dependencies']
|
||||
if stats.get('lineno'):
|
||||
data['lineno'] = stats['lineno']
|
||||
if stats.get('cursorpos'):
|
||||
data['cursorpos'] = stats['cursorpos']
|
||||
if is_write:
|
||||
data['is_write'] = is_write
|
||||
if project:
|
||||
data['project'] = project
|
||||
if branch:
|
||||
data['branch'] = branch
|
||||
log.debug(data)
|
||||
|
||||
# setup api request
|
||||
request_body = json.dumps(data)
|
||||
api_key = u(base64.b64encode(str.encode(key) if is_py3 else key))
|
||||
auth = u('Basic {api_key}').format(api_key=api_key)
|
||||
headers = {
|
||||
'User-Agent': get_user_agent(plugin),
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json',
|
||||
'Authorization': auth,
|
||||
}
|
||||
if hostname:
|
||||
headers['X-Machine-Name'] = u(hostname).encode('utf-8')
|
||||
proxies = {}
|
||||
if proxy:
|
||||
proxies['https'] = proxy
|
||||
|
||||
# add Olson timezone to request
|
||||
try:
|
||||
tz = tzlocal.get_localzone()
|
||||
except:
|
||||
tz = None
|
||||
if tz:
|
||||
headers['TimeZone'] = u(tz.zone).encode('utf-8')
|
||||
|
||||
session_cache = SessionCache()
|
||||
session = session_cache.get()
|
||||
|
||||
# log time to api
|
||||
response = None
|
||||
try:
|
||||
response = session.post(api_url, data=request_body, headers=headers,
|
||||
proxies=proxies, timeout=timeout)
|
||||
except RequestException:
|
||||
exception_data = {
|
||||
sys.exc_info()[0].__name__: u(sys.exc_info()[1]),
|
||||
}
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
exception_data['traceback'] = traceback.format_exc()
|
||||
if offline:
|
||||
queue = Queue()
|
||||
queue.push(data, json.dumps(stats), plugin)
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
log.warn(exception_data)
|
||||
else:
|
||||
log.error(exception_data)
|
||||
else:
|
||||
code = response.status_code if response is not None else None
|
||||
content = response.text if response is not None else None
|
||||
if code == requests.codes.created or code == requests.codes.accepted:
|
||||
log.debug({
|
||||
'response_code': code,
|
||||
})
|
||||
session_cache.save(session)
|
||||
return SUCCESS
|
||||
if offline:
|
||||
if code != 400:
|
||||
queue = Queue()
|
||||
queue.push(data, json.dumps(stats), plugin)
|
||||
if code == 401:
|
||||
log.error({
|
||||
'response_code': code,
|
||||
'response_content': content,
|
||||
})
|
||||
session_cache.delete()
|
||||
return AUTH_ERROR
|
||||
elif log.isEnabledFor(logging.DEBUG):
|
||||
log.warn({
|
||||
'response_code': code,
|
||||
'response_content': content,
|
||||
})
|
||||
else:
|
||||
log.error({
|
||||
'response_code': code,
|
||||
'response_content': content,
|
||||
})
|
||||
else:
|
||||
log.error({
|
||||
'response_code': code,
|
||||
'response_content': content,
|
||||
})
|
||||
session_cache.delete()
|
||||
return API_ERROR
|
||||
|
||||
|
||||
def sync_offline_heartbeats(args, hostname):
|
||||
"""Sends all heartbeats which were cached in the offline Queue."""
|
||||
|
||||
queue = Queue()
|
||||
while True:
|
||||
heartbeat = queue.pop()
|
||||
if heartbeat is None:
|
||||
break
|
||||
status = send_heartbeat(
|
||||
project=heartbeat['project'],
|
||||
entity=heartbeat['entity'],
|
||||
timestamp=heartbeat['time'],
|
||||
branch=heartbeat['branch'],
|
||||
hostname=hostname,
|
||||
stats=json.loads(heartbeat['stats']),
|
||||
key=args.key,
|
||||
is_write=heartbeat['is_write'],
|
||||
plugin=heartbeat['plugin'],
|
||||
offline=args.offline,
|
||||
hidefilenames=args.hidefilenames,
|
||||
entity_type=heartbeat['type'],
|
||||
proxy=args.proxy,
|
||||
api_url=args.api_url,
|
||||
timeout=args.timeout,
|
||||
)
|
||||
if status != SUCCESS:
|
||||
if status == AUTH_ERROR:
|
||||
return AUTH_ERROR
|
||||
break
|
||||
return SUCCESS
|
||||
|
||||
|
||||
def process_heartbeat(args, configs, hostname, heartbeat):
|
||||
exclude = should_exclude(heartbeat['entity'], args.include, args.exclude)
|
||||
if exclude is not False:
|
||||
log.debug(u('Skipping because matches exclude pattern: {pattern}').format(
|
||||
pattern=u(exclude),
|
||||
))
|
||||
return SUCCESS
|
||||
|
||||
if heartbeat.get('entity_type') not in ['file', 'domain', 'app']:
|
||||
heartbeat['entity_type'] = 'file'
|
||||
|
||||
if heartbeat['entity_type'] != 'file' or os.path.isfile(heartbeat['entity']):
|
||||
|
||||
stats = get_file_stats(heartbeat['entity'],
|
||||
entity_type=heartbeat['entity_type'],
|
||||
lineno=heartbeat.get('lineno'),
|
||||
cursorpos=heartbeat.get('cursorpos'),
|
||||
plugin=args.plugin,
|
||||
alternate_language=heartbeat.get('alternate_language'))
|
||||
|
||||
project = heartbeat.get('project') or heartbeat.get('alternate_project')
|
||||
branch = None
|
||||
if heartbeat['entity_type'] == 'file':
|
||||
project, branch = get_project_info(configs, heartbeat)
|
||||
|
||||
heartbeat['project'] = project
|
||||
heartbeat['branch'] = branch
|
||||
heartbeat['stats'] = stats
|
||||
heartbeat['hostname'] = hostname
|
||||
heartbeat['timeout'] = args.timeout
|
||||
heartbeat['key'] = args.key
|
||||
heartbeat['plugin'] = args.plugin
|
||||
heartbeat['offline'] = args.offline
|
||||
heartbeat['hidefilenames'] = args.hidefilenames
|
||||
heartbeat['proxy'] = args.proxy
|
||||
heartbeat['api_url'] = args.api_url
|
||||
|
||||
return send_heartbeat(**heartbeat)
|
||||
|
||||
else:
|
||||
log.debug('File does not exist; ignoring this heartbeat.')
|
||||
return SUCCESS
|
||||
|
||||
|
||||
def execute(argv=None):
|
||||
if argv:
|
||||
sys.argv = ['wakatime'] + argv
|
||||
|
||||
args, configs = parseArguments()
|
||||
if configs is None:
|
||||
return CONFIG_FILE_PARSE_ERROR
|
||||
|
||||
setup_logging(args, __version__)
|
||||
|
||||
try:
|
||||
|
||||
hostname = args.hostname or socket.gethostname()
|
||||
|
||||
heartbeat = vars(args)
|
||||
retval = process_heartbeat(args, configs, hostname, heartbeat)
|
||||
|
||||
if args.extra_heartbeats:
|
||||
try:
|
||||
for heartbeat in json.loads(sys.stdin.readline()):
|
||||
retval = process_heartbeat(args, configs, hostname, heartbeat)
|
||||
except json.JSONDecodeError:
|
||||
retval = MALFORMED_HEARTBEAT_ERROR
|
||||
|
||||
if retval == SUCCESS:
|
||||
retval = sync_offline_heartbeats(args, hostname)
|
||||
|
||||
return retval
|
||||
|
||||
except:
|
||||
log.traceback()
|
||||
print(traceback.format_exc())
|
||||
return UNKNOWN_ERROR
|
@@ -1,129 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
wakatime.offlinequeue
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Queue for saving heartbeats while offline.
|
||||
|
||||
:copyright: (c) 2014 Alan Hamlett.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
|
||||
import logging
|
||||
import os
|
||||
import traceback
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
HAS_SQL = True
|
||||
except ImportError: # pragma: nocover
|
||||
HAS_SQL = False
|
||||
|
||||
from .compat import u
|
||||
|
||||
|
||||
log = logging.getLogger('WakaTime')
|
||||
|
||||
|
||||
class Queue(object):
|
||||
db_file = os.path.join(os.path.expanduser('~'), '.wakatime.db')
|
||||
table_name = 'heartbeat_1'
|
||||
|
||||
def get_db_file(self):
|
||||
return self.db_file
|
||||
|
||||
def connect(self):
|
||||
conn = sqlite3.connect(self.get_db_file())
|
||||
c = conn.cursor()
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS {0} (
|
||||
entity text,
|
||||
type text,
|
||||
time real,
|
||||
project text,
|
||||
branch text,
|
||||
is_write integer,
|
||||
stats text,
|
||||
misc text,
|
||||
plugin text)
|
||||
'''.format(self.table_name))
|
||||
return (conn, c)
|
||||
|
||||
def push(self, data, stats, plugin, misc=None):
|
||||
if not HAS_SQL: # pragma: nocover
|
||||
return
|
||||
try:
|
||||
conn, c = self.connect()
|
||||
heartbeat = {
|
||||
'entity': u(data.get('entity')),
|
||||
'type': u(data.get('type')),
|
||||
'time': data.get('time'),
|
||||
'project': u(data.get('project')),
|
||||
'branch': u(data.get('branch')),
|
||||
'is_write': 1 if data.get('is_write') else 0,
|
||||
'stats': u(stats),
|
||||
'misc': u(misc),
|
||||
'plugin': u(plugin),
|
||||
}
|
||||
c.execute('INSERT INTO {0} VALUES (:entity,:type,:time,:project,:branch,:is_write,:stats,:misc,:plugin)'.format(self.table_name), heartbeat)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except sqlite3.Error:
|
||||
log.error(traceback.format_exc())
|
||||
|
||||
def pop(self):
|
||||
if not HAS_SQL: # pragma: nocover
|
||||
return None
|
||||
tries = 3
|
||||
wait = 0.1
|
||||
heartbeat = None
|
||||
try:
|
||||
conn, c = self.connect()
|
||||
except sqlite3.Error:
|
||||
log.debug(traceback.format_exc())
|
||||
return None
|
||||
loop = True
|
||||
while loop and tries > -1:
|
||||
try:
|
||||
c.execute('BEGIN IMMEDIATE')
|
||||
c.execute('SELECT * FROM {0} LIMIT 1'.format(self.table_name))
|
||||
row = c.fetchone()
|
||||
if row is not None:
|
||||
values = []
|
||||
clauses = []
|
||||
index = 0
|
||||
for row_name in ['entity', 'type', 'time', 'project', 'branch', 'is_write']:
|
||||
if row[index] is not None:
|
||||
clauses.append('{0}=?'.format(row_name))
|
||||
values.append(row[index])
|
||||
else: # pragma: nocover
|
||||
clauses.append('{0} IS NULL'.format(row_name))
|
||||
index += 1
|
||||
if len(values) > 0:
|
||||
c.execute('DELETE FROM {0} WHERE {1}'.format(self.table_name, ' AND '.join(clauses)), values)
|
||||
else: # pragma: nocover
|
||||
c.execute('DELETE FROM {0} WHERE {1}'.format(self.table_name, ' AND '.join(clauses)))
|
||||
conn.commit()
|
||||
if row is not None:
|
||||
heartbeat = {
|
||||
'entity': row[0],
|
||||
'type': row[1],
|
||||
'time': row[2],
|
||||
'project': row[3],
|
||||
'branch': row[4],
|
||||
'is_write': True if row[5] is 1 else False,
|
||||
'stats': row[6],
|
||||
'misc': row[7],
|
||||
'plugin': row[8],
|
||||
}
|
||||
loop = False
|
||||
except sqlite3.Error: # pragma: nocover
|
||||
log.debug(traceback.format_exc())
|
||||
sleep(wait)
|
||||
tries -= 1
|
||||
try:
|
||||
conn.close()
|
||||
except sqlite3.Error: # pragma: nocover
|
||||
log.debug(traceback.format_exc())
|
||||
return heartbeat
|
@@ -1,14 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ..compat import is_py2
|
||||
|
||||
if is_py2:
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'py2'))
|
||||
else:
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'py3'))
|
||||
|
||||
import tzlocal
|
||||
from pygments.lexers import get_lexer_by_name, guess_lexer_for_filename
|
||||
from pygments.modeline import get_filetype_from_buffer
|
||||
from pygments.util import ClassNotFound
|
File diff suppressed because it is too large
Load Diff
@@ -1,127 +0,0 @@
|
||||
# Copyright (c) 2009 Raymond Hettinger
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person
|
||||
# obtaining a copy of this software and associated documentation files
|
||||
# (the "Software"), to deal in the Software without restriction,
|
||||
# including without limitation the rights to use, copy, modify, merge,
|
||||
# publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
# and to permit persons to whom the Software is furnished to do so,
|
||||
# subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
from UserDict import DictMixin
|
||||
|
||||
class OrderedDict(dict, DictMixin):
|
||||
|
||||
def __init__(self, *args, **kwds):
|
||||
if len(args) > 1:
|
||||
raise TypeError('expected at most 1 arguments, got %d' % len(args))
|
||||
try:
|
||||
self.__end
|
||||
except AttributeError:
|
||||
self.clear()
|
||||
self.update(*args, **kwds)
|
||||
|
||||
def clear(self):
|
||||
self.__end = end = []
|
||||
end += [None, end, end] # sentinel node for doubly linked list
|
||||
self.__map = {} # key --> [key, prev, next]
|
||||
dict.clear(self)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key not in self:
|
||||
end = self.__end
|
||||
curr = end[1]
|
||||
curr[2] = end[1] = self.__map[key] = [key, curr, end]
|
||||
dict.__setitem__(self, key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
dict.__delitem__(self, key)
|
||||
key, prev, next = self.__map.pop(key)
|
||||
prev[2] = next
|
||||
next[1] = prev
|
||||
|
||||
def __iter__(self):
|
||||
end = self.__end
|
||||
curr = end[2]
|
||||
while curr is not end:
|
||||
yield curr[0]
|
||||
curr = curr[2]
|
||||
|
||||
def __reversed__(self):
|
||||
end = self.__end
|
||||
curr = end[1]
|
||||
while curr is not end:
|
||||
yield curr[0]
|
||||
curr = curr[1]
|
||||
|
||||
def popitem(self, last=True):
|
||||
if not self:
|
||||
raise KeyError('dictionary is empty')
|
||||
if last:
|
||||
key = reversed(self).next()
|
||||
else:
|
||||
key = iter(self).next()
|
||||
value = self.pop(key)
|
||||
return key, value
|
||||
|
||||
def __reduce__(self):
|
||||
items = [[k, self[k]] for k in self]
|
||||
tmp = self.__map, self.__end
|
||||
del self.__map, self.__end
|
||||
inst_dict = vars(self).copy()
|
||||
self.__map, self.__end = tmp
|
||||
if inst_dict:
|
||||
return (self.__class__, (items,), inst_dict)
|
||||
return self.__class__, (items,)
|
||||
|
||||
def keys(self):
|
||||
return list(self)
|
||||
|
||||
setdefault = DictMixin.setdefault
|
||||
update = DictMixin.update
|
||||
pop = DictMixin.pop
|
||||
values = DictMixin.values
|
||||
items = DictMixin.items
|
||||
iterkeys = DictMixin.iterkeys
|
||||
itervalues = DictMixin.itervalues
|
||||
iteritems = DictMixin.iteritems
|
||||
|
||||
def __repr__(self):
|
||||
if not self:
|
||||
return '%s()' % (self.__class__.__name__,)
|
||||
return '%s(%r)' % (self.__class__.__name__, self.items())
|
||||
|
||||
def copy(self):
|
||||
return self.__class__(self)
|
||||
|
||||
@classmethod
|
||||
def fromkeys(cls, iterable, value=None):
|
||||
d = cls()
|
||||
for key in iterable:
|
||||
d[key] = value
|
||||
return d
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, OrderedDict):
|
||||
if len(self) != len(other):
|
||||
return False
|
||||
for p, q in zip(self.items(), other.items()):
|
||||
if p != q:
|
||||
return False
|
||||
return True
|
||||
return dict.__eq__(self, other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
File diff suppressed because it is too large
Load Diff
@@ -1,48 +0,0 @@
|
||||
'''
|
||||
Custom exceptions raised by pytz.
|
||||
'''
|
||||
|
||||
__all__ = [
|
||||
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
|
||||
'NonExistentTimeError',
|
||||
]
|
||||
|
||||
|
||||
class UnknownTimeZoneError(KeyError):
|
||||
'''Exception raised when pytz is passed an unknown timezone.
|
||||
|
||||
>>> isinstance(UnknownTimeZoneError(), LookupError)
|
||||
True
|
||||
|
||||
This class is actually a subclass of KeyError to provide backwards
|
||||
compatibility with code relying on the undocumented behavior of earlier
|
||||
pytz releases.
|
||||
|
||||
>>> isinstance(UnknownTimeZoneError(), KeyError)
|
||||
True
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
class InvalidTimeError(Exception):
|
||||
'''Base class for invalid time exceptions.'''
|
||||
|
||||
|
||||
class AmbiguousTimeError(InvalidTimeError):
|
||||
'''Exception raised when attempting to create an ambiguous wallclock time.
|
||||
|
||||
At the end of a DST transition period, a particular wallclock time will
|
||||
occur twice (once before the clocks are set back, once after). Both
|
||||
possibilities may be correct, unless further information is supplied.
|
||||
|
||||
See DstTzInfo.normalize() for more info
|
||||
'''
|
||||
|
||||
|
||||
class NonExistentTimeError(InvalidTimeError):
|
||||
'''Exception raised when attempting to create a wallclock time that
|
||||
cannot exist.
|
||||
|
||||
At the start of a DST transition period, the wallclock time jumps forward.
|
||||
The instants jumped over never occur.
|
||||
'''
|
@@ -1,168 +0,0 @@
|
||||
from threading import RLock
|
||||
try:
|
||||
from UserDict import DictMixin
|
||||
except ImportError:
|
||||
from collections import Mapping as DictMixin
|
||||
|
||||
|
||||
# With lazy loading, we might end up with multiple threads triggering
|
||||
# it at the same time. We need a lock.
|
||||
_fill_lock = RLock()
|
||||
|
||||
|
||||
class LazyDict(DictMixin):
|
||||
"""Dictionary populated on first use."""
|
||||
data = None
|
||||
def __getitem__(self, key):
|
||||
if self.data is None:
|
||||
_fill_lock.acquire()
|
||||
try:
|
||||
if self.data is None:
|
||||
self._fill()
|
||||
finally:
|
||||
_fill_lock.release()
|
||||
return self.data[key.upper()]
|
||||
|
||||
def __contains__(self, key):
|
||||
if self.data is None:
|
||||
_fill_lock.acquire()
|
||||
try:
|
||||
if self.data is None:
|
||||
self._fill()
|
||||
finally:
|
||||
_fill_lock.release()
|
||||
return key in self.data
|
||||
|
||||
def __iter__(self):
|
||||
if self.data is None:
|
||||
_fill_lock.acquire()
|
||||
try:
|
||||
if self.data is None:
|
||||
self._fill()
|
||||
finally:
|
||||
_fill_lock.release()
|
||||
return iter(self.data)
|
||||
|
||||
def __len__(self):
|
||||
if self.data is None:
|
||||
_fill_lock.acquire()
|
||||
try:
|
||||
if self.data is None:
|
||||
self._fill()
|
||||
finally:
|
||||
_fill_lock.release()
|
||||
return len(self.data)
|
||||
|
||||
def keys(self):
|
||||
if self.data is None:
|
||||
_fill_lock.acquire()
|
||||
try:
|
||||
if self.data is None:
|
||||
self._fill()
|
||||
finally:
|
||||
_fill_lock.release()
|
||||
return self.data.keys()
|
||||
|
||||
|
||||
class LazyList(list):
|
||||
"""List populated on first use."""
|
||||
|
||||
_props = [
|
||||
'__str__', '__repr__', '__unicode__',
|
||||
'__hash__', '__sizeof__', '__cmp__',
|
||||
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
|
||||
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
|
||||
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
|
||||
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
|
||||
'__getitem__', '__setitem__', '__delitem__', '__iter__',
|
||||
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
|
||||
|
||||
def __new__(cls, fill_iter=None):
|
||||
|
||||
if fill_iter is None:
|
||||
return list()
|
||||
|
||||
# We need a new class as we will be dynamically messing with its
|
||||
# methods.
|
||||
class LazyList(list):
|
||||
pass
|
||||
|
||||
fill_iter = [fill_iter]
|
||||
|
||||
def lazy(name):
|
||||
def _lazy(self, *args, **kw):
|
||||
_fill_lock.acquire()
|
||||
try:
|
||||
if len(fill_iter) > 0:
|
||||
list.extend(self, fill_iter.pop())
|
||||
for method_name in cls._props:
|
||||
delattr(LazyList, method_name)
|
||||
finally:
|
||||
_fill_lock.release()
|
||||
return getattr(list, name)(self, *args, **kw)
|
||||
return _lazy
|
||||
|
||||
for name in cls._props:
|
||||
setattr(LazyList, name, lazy(name))
|
||||
|
||||
new_list = LazyList()
|
||||
return new_list
|
||||
|
||||
# Not all versions of Python declare the same magic methods.
|
||||
# Filter out properties that don't exist in this version of Python
|
||||
# from the list.
|
||||
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
|
||||
|
||||
|
||||
class LazySet(set):
|
||||
"""Set populated on first use."""
|
||||
|
||||
_props = (
|
||||
'__str__', '__repr__', '__unicode__',
|
||||
'__hash__', '__sizeof__', '__cmp__',
|
||||
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
|
||||
'__contains__', '__len__', '__nonzero__',
|
||||
'__getitem__', '__setitem__', '__delitem__', '__iter__',
|
||||
'__sub__', '__and__', '__xor__', '__or__',
|
||||
'__rsub__', '__rand__', '__rxor__', '__ror__',
|
||||
'__isub__', '__iand__', '__ixor__', '__ior__',
|
||||
'add', 'clear', 'copy', 'difference', 'difference_update',
|
||||
'discard', 'intersection', 'intersection_update', 'isdisjoint',
|
||||
'issubset', 'issuperset', 'pop', 'remove',
|
||||
'symmetric_difference', 'symmetric_difference_update',
|
||||
'union', 'update')
|
||||
|
||||
def __new__(cls, fill_iter=None):
|
||||
|
||||
if fill_iter is None:
|
||||
return set()
|
||||
|
||||
class LazySet(set):
|
||||
pass
|
||||
|
||||
fill_iter = [fill_iter]
|
||||
|
||||
def lazy(name):
|
||||
def _lazy(self, *args, **kw):
|
||||
_fill_lock.acquire()
|
||||
try:
|
||||
if len(fill_iter) > 0:
|
||||
for i in fill_iter.pop():
|
||||
set.add(self, i)
|
||||
for method_name in cls._props:
|
||||
delattr(LazySet, method_name)
|
||||
finally:
|
||||
_fill_lock.release()
|
||||
return getattr(set, name)(self, *args, **kw)
|
||||
return _lazy
|
||||
|
||||
for name in cls._props:
|
||||
setattr(LazySet, name, lazy(name))
|
||||
|
||||
new_set = LazySet()
|
||||
return new_set
|
||||
|
||||
# Not all versions of Python declare the same magic methods.
|
||||
# Filter out properties that don't exist in this version of Python
|
||||
# from the list.
|
||||
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
|
@@ -1,127 +0,0 @@
|
||||
'''
|
||||
Reference tzinfo implementations from the Python docs.
|
||||
Used for testing against as they are only correct for the years
|
||||
1987 to 2006. Do not use these for real code.
|
||||
'''
|
||||
|
||||
from datetime import tzinfo, timedelta, datetime
|
||||
from pytz import utc, UTC, HOUR, ZERO
|
||||
|
||||
# A class building tzinfo objects for fixed-offset time zones.
|
||||
# Note that FixedOffset(0, "UTC") is a different way to build a
|
||||
# UTC tzinfo object.
|
||||
|
||||
class FixedOffset(tzinfo):
|
||||
"""Fixed offset in minutes east from UTC."""
|
||||
|
||||
def __init__(self, offset, name):
|
||||
self.__offset = timedelta(minutes = offset)
|
||||
self.__name = name
|
||||
|
||||
def utcoffset(self, dt):
|
||||
return self.__offset
|
||||
|
||||
def tzname(self, dt):
|
||||
return self.__name
|
||||
|
||||
def dst(self, dt):
|
||||
return ZERO
|
||||
|
||||
# A class capturing the platform's idea of local time.
|
||||
|
||||
import time as _time
|
||||
|
||||
STDOFFSET = timedelta(seconds = -_time.timezone)
|
||||
if _time.daylight:
|
||||
DSTOFFSET = timedelta(seconds = -_time.altzone)
|
||||
else:
|
||||
DSTOFFSET = STDOFFSET
|
||||
|
||||
DSTDIFF = DSTOFFSET - STDOFFSET
|
||||
|
||||
class LocalTimezone(tzinfo):
|
||||
|
||||
def utcoffset(self, dt):
|
||||
if self._isdst(dt):
|
||||
return DSTOFFSET
|
||||
else:
|
||||
return STDOFFSET
|
||||
|
||||
def dst(self, dt):
|
||||
if self._isdst(dt):
|
||||
return DSTDIFF
|
||||
else:
|
||||
return ZERO
|
||||
|
||||
def tzname(self, dt):
|
||||
return _time.tzname[self._isdst(dt)]
|
||||
|
||||
def _isdst(self, dt):
|
||||
tt = (dt.year, dt.month, dt.day,
|
||||
dt.hour, dt.minute, dt.second,
|
||||
dt.weekday(), 0, -1)
|
||||
stamp = _time.mktime(tt)
|
||||
tt = _time.localtime(stamp)
|
||||
return tt.tm_isdst > 0
|
||||
|
||||
Local = LocalTimezone()
|
||||
|
||||
# A complete implementation of current DST rules for major US time zones.
|
||||
|
||||
def first_sunday_on_or_after(dt):
|
||||
days_to_go = 6 - dt.weekday()
|
||||
if days_to_go:
|
||||
dt += timedelta(days_to_go)
|
||||
return dt
|
||||
|
||||
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
|
||||
DSTSTART = datetime(1, 4, 1, 2)
|
||||
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct.
|
||||
# which is the first Sunday on or after Oct 25.
|
||||
DSTEND = datetime(1, 10, 25, 1)
|
||||
|
||||
class USTimeZone(tzinfo):
|
||||
|
||||
def __init__(self, hours, reprname, stdname, dstname):
|
||||
self.stdoffset = timedelta(hours=hours)
|
||||
self.reprname = reprname
|
||||
self.stdname = stdname
|
||||
self.dstname = dstname
|
||||
|
||||
def __repr__(self):
|
||||
return self.reprname
|
||||
|
||||
def tzname(self, dt):
|
||||
if self.dst(dt):
|
||||
return self.dstname
|
||||
else:
|
||||
return self.stdname
|
||||
|
||||
def utcoffset(self, dt):
|
||||
return self.stdoffset + self.dst(dt)
|
||||
|
||||
def dst(self, dt):
|
||||
if dt is None or dt.tzinfo is None:
|
||||
# An exception may be sensible here, in one or both cases.
|
||||
# It depends on how you want to treat them. The default
|
||||
# fromutc() implementation (called by the default astimezone()
|
||||
# implementation) passes a datetime with dt.tzinfo is self.
|
||||
return ZERO
|
||||
assert dt.tzinfo is self
|
||||
|
||||
# Find first Sunday in April & the last in October.
|
||||
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
|
||||
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
|
||||
|
||||
# Can't compare naive to aware objects, so strip the timezone from
|
||||
# dt first.
|
||||
if start <= dt.replace(tzinfo=None) < end:
|
||||
return HOUR
|
||||
else:
|
||||
return ZERO
|
||||
|
||||
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
|
||||
Central = USTimeZone(-6, "Central", "CST", "CDT")
|
||||
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
|
||||
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
|
||||
|
@@ -1,137 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
'''
|
||||
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
|
||||
'''
|
||||
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
except ImportError:
|
||||
from io import StringIO
|
||||
from datetime import datetime, timedelta
|
||||
from struct import unpack, calcsize
|
||||
|
||||
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
|
||||
from pytz.tzinfo import memorized_datetime, memorized_timedelta
|
||||
|
||||
def _byte_string(s):
|
||||
"""Cast a string or byte string to an ASCII byte string."""
|
||||
return s.encode('US-ASCII')
|
||||
|
||||
_NULL = _byte_string('\0')
|
||||
|
||||
def _std_string(s):
|
||||
"""Cast a string or byte string to an ASCII string."""
|
||||
return str(s.decode('US-ASCII'))
|
||||
|
||||
def build_tzinfo(zone, fp):
|
||||
head_fmt = '>4s c 15x 6l'
|
||||
head_size = calcsize(head_fmt)
|
||||
(magic, format, ttisgmtcnt, ttisstdcnt,leapcnt, timecnt,
|
||||
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
|
||||
|
||||
# Make sure it is a tzfile(5) file
|
||||
assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic)
|
||||
|
||||
# Read out the transition times, localtime indices and ttinfo structures.
|
||||
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
|
||||
timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
|
||||
data_size = calcsize(data_fmt)
|
||||
data = unpack(data_fmt, fp.read(data_size))
|
||||
|
||||
# make sure we unpacked the right number of values
|
||||
assert len(data) == 2 * timecnt + 3 * typecnt + 1
|
||||
transitions = [memorized_datetime(trans)
|
||||
for trans in data[:timecnt]]
|
||||
lindexes = list(data[timecnt:2 * timecnt])
|
||||
ttinfo_raw = data[2 * timecnt:-1]
|
||||
tznames_raw = data[-1]
|
||||
del data
|
||||
|
||||
# Process ttinfo into separate structs
|
||||
ttinfo = []
|
||||
tznames = {}
|
||||
i = 0
|
||||
while i < len(ttinfo_raw):
|
||||
# have we looked up this timezone name yet?
|
||||
tzname_offset = ttinfo_raw[i+2]
|
||||
if tzname_offset not in tznames:
|
||||
nul = tznames_raw.find(_NULL, tzname_offset)
|
||||
if nul < 0:
|
||||
nul = len(tznames_raw)
|
||||
tznames[tzname_offset] = _std_string(
|
||||
tznames_raw[tzname_offset:nul])
|
||||
ttinfo.append((ttinfo_raw[i],
|
||||
bool(ttinfo_raw[i+1]),
|
||||
tznames[tzname_offset]))
|
||||
i += 3
|
||||
|
||||
# Now build the timezone object
|
||||
if len(transitions) == 0:
|
||||
ttinfo[0][0], ttinfo[0][2]
|
||||
cls = type(zone, (StaticTzInfo,), dict(
|
||||
zone=zone,
|
||||
_utcoffset=memorized_timedelta(ttinfo[0][0]),
|
||||
_tzname=ttinfo[0][2]))
|
||||
else:
|
||||
# Early dates use the first standard time ttinfo
|
||||
i = 0
|
||||
while ttinfo[i][1]:
|
||||
i += 1
|
||||
if ttinfo[i] == ttinfo[lindexes[0]]:
|
||||
transitions[0] = datetime.min
|
||||
else:
|
||||
transitions.insert(0, datetime.min)
|
||||
lindexes.insert(0, i)
|
||||
|
||||
# calculate transition info
|
||||
transition_info = []
|
||||
for i in range(len(transitions)):
|
||||
inf = ttinfo[lindexes[i]]
|
||||
utcoffset = inf[0]
|
||||
if not inf[1]:
|
||||
dst = 0
|
||||
else:
|
||||
for j in range(i-1, -1, -1):
|
||||
prev_inf = ttinfo[lindexes[j]]
|
||||
if not prev_inf[1]:
|
||||
break
|
||||
dst = inf[0] - prev_inf[0] # dst offset
|
||||
|
||||
# Bad dst? Look further. DST > 24 hours happens when
|
||||
# a timzone has moved across the international dateline.
|
||||
if dst <= 0 or dst > 3600*3:
|
||||
for j in range(i+1, len(transitions)):
|
||||
stdinf = ttinfo[lindexes[j]]
|
||||
if not stdinf[1]:
|
||||
dst = inf[0] - stdinf[0]
|
||||
if dst > 0:
|
||||
break # Found a useful std time.
|
||||
|
||||
tzname = inf[2]
|
||||
|
||||
# Round utcoffset and dst to the nearest minute or the
|
||||
# datetime library will complain. Conversions to these timezones
|
||||
# might be up to plus or minus 30 seconds out, but it is
|
||||
# the best we can do.
|
||||
utcoffset = int((utcoffset + 30) // 60) * 60
|
||||
dst = int((dst + 30) // 60) * 60
|
||||
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
|
||||
|
||||
cls = type(zone, (DstTzInfo,), dict(
|
||||
zone=zone,
|
||||
_utc_transition_times=transitions,
|
||||
_transition_info=transition_info))
|
||||
|
||||
return cls()
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os.path
|
||||
from pprint import pprint
|
||||
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
|
||||
tz = build_tzinfo('Australia/Melbourne',
|
||||
open(os.path.join(base,'Australia','Melbourne'), 'rb'))
|
||||
tz = build_tzinfo('US/Eastern',
|
||||
open(os.path.join(base,'US','Eastern'), 'rb'))
|
||||
pprint(tz._utc_transition_times)
|
||||
#print tz.asPython(4)
|
||||
#print tz.transitions_mapping
|
@@ -1,564 +0,0 @@
|
||||
'''Base classes and helpers for building zone specific tzinfo classes'''
|
||||
|
||||
from datetime import datetime, timedelta, tzinfo
|
||||
from bisect import bisect_right
|
||||
try:
|
||||
set
|
||||
except NameError:
|
||||
from sets import Set as set
|
||||
|
||||
import pytz
|
||||
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
|
||||
|
||||
__all__ = []
|
||||
|
||||
_timedelta_cache = {}
|
||||
def memorized_timedelta(seconds):
|
||||
'''Create only one instance of each distinct timedelta'''
|
||||
try:
|
||||
return _timedelta_cache[seconds]
|
||||
except KeyError:
|
||||
delta = timedelta(seconds=seconds)
|
||||
_timedelta_cache[seconds] = delta
|
||||
return delta
|
||||
|
||||
_epoch = datetime.utcfromtimestamp(0)
|
||||
_datetime_cache = {0: _epoch}
|
||||
def memorized_datetime(seconds):
|
||||
'''Create only one instance of each distinct datetime'''
|
||||
try:
|
||||
return _datetime_cache[seconds]
|
||||
except KeyError:
|
||||
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
|
||||
# fails with negative values under Windows (Bug #90096)
|
||||
dt = _epoch + timedelta(seconds=seconds)
|
||||
_datetime_cache[seconds] = dt
|
||||
return dt
|
||||
|
||||
_ttinfo_cache = {}
|
||||
def memorized_ttinfo(*args):
|
||||
'''Create only one instance of each distinct tuple'''
|
||||
try:
|
||||
return _ttinfo_cache[args]
|
||||
except KeyError:
|
||||
ttinfo = (
|
||||
memorized_timedelta(args[0]),
|
||||
memorized_timedelta(args[1]),
|
||||
args[2]
|
||||
)
|
||||
_ttinfo_cache[args] = ttinfo
|
||||
return ttinfo
|
||||
|
||||
_notime = memorized_timedelta(0)
|
||||
|
||||
def _to_seconds(td):
|
||||
'''Convert a timedelta to seconds'''
|
||||
return td.seconds + td.days * 24 * 60 * 60
|
||||
|
||||
|
||||
class BaseTzInfo(tzinfo):
|
||||
# Overridden in subclass
|
||||
_utcoffset = None
|
||||
_tzname = None
|
||||
zone = None
|
||||
|
||||
def __str__(self):
|
||||
return self.zone
|
||||
|
||||
|
||||
class StaticTzInfo(BaseTzInfo):
|
||||
'''A timezone that has a constant offset from UTC
|
||||
|
||||
These timezones are rare, as most locations have changed their
|
||||
offset at some point in their history
|
||||
'''
|
||||
def fromutc(self, dt):
|
||||
'''See datetime.tzinfo.fromutc'''
|
||||
if dt.tzinfo is not None and dt.tzinfo is not self:
|
||||
raise ValueError('fromutc: dt.tzinfo is not self')
|
||||
return (dt + self._utcoffset).replace(tzinfo=self)
|
||||
|
||||
def utcoffset(self, dt, is_dst=None):
|
||||
'''See datetime.tzinfo.utcoffset
|
||||
|
||||
is_dst is ignored for StaticTzInfo, and exists only to
|
||||
retain compatibility with DstTzInfo.
|
||||
'''
|
||||
return self._utcoffset
|
||||
|
||||
def dst(self, dt, is_dst=None):
|
||||
'''See datetime.tzinfo.dst
|
||||
|
||||
is_dst is ignored for StaticTzInfo, and exists only to
|
||||
retain compatibility with DstTzInfo.
|
||||
'''
|
||||
return _notime
|
||||
|
||||
def tzname(self, dt, is_dst=None):
|
||||
'''See datetime.tzinfo.tzname
|
||||
|
||||
is_dst is ignored for StaticTzInfo, and exists only to
|
||||
retain compatibility with DstTzInfo.
|
||||
'''
|
||||
return self._tzname
|
||||
|
||||
def localize(self, dt, is_dst=False):
|
||||
'''Convert naive time to local time'''
|
||||
if dt.tzinfo is not None:
|
||||
raise ValueError('Not naive datetime (tzinfo is already set)')
|
||||
return dt.replace(tzinfo=self)
|
||||
|
||||
def normalize(self, dt, is_dst=False):
|
||||
'''Correct the timezone information on the given datetime.
|
||||
|
||||
This is normally a no-op, as StaticTzInfo timezones never have
|
||||
ambiguous cases to correct:
|
||||
|
||||
>>> from pytz import timezone
|
||||
>>> gmt = timezone('GMT')
|
||||
>>> isinstance(gmt, StaticTzInfo)
|
||||
True
|
||||
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
|
||||
>>> gmt.normalize(dt) is dt
|
||||
True
|
||||
|
||||
The supported method of converting between timezones is to use
|
||||
datetime.astimezone(). Currently normalize() also works:
|
||||
|
||||
>>> la = timezone('America/Los_Angeles')
|
||||
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
|
||||
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
|
||||
>>> gmt.normalize(dt).strftime(fmt)
|
||||
'2011-05-07 08:02:03 GMT (+0000)'
|
||||
'''
|
||||
if dt.tzinfo is self:
|
||||
return dt
|
||||
if dt.tzinfo is None:
|
||||
raise ValueError('Naive time - no tzinfo set')
|
||||
return dt.astimezone(self)
|
||||
|
||||
def __repr__(self):
|
||||
return '<StaticTzInfo %r>' % (self.zone,)
|
||||
|
||||
def __reduce__(self):
|
||||
# Special pickle to zone remains a singleton and to cope with
|
||||
# database changes.
|
||||
return pytz._p, (self.zone,)
|
||||
|
||||
|
||||
class DstTzInfo(BaseTzInfo):
|
||||
'''A timezone that has a variable offset from UTC
|
||||
|
||||
The offset might change if daylight saving time comes into effect,
|
||||
or at a point in history when the region decides to change their
|
||||
timezone definition.
|
||||
'''
|
||||
# Overridden in subclass
|
||||
_utc_transition_times = None # Sorted list of DST transition times in UTC
|
||||
_transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding
|
||||
# to _utc_transition_times entries
|
||||
zone = None
|
||||
|
||||
# Set in __init__
|
||||
_tzinfos = None
|
||||
_dst = None # DST offset
|
||||
|
||||
def __init__(self, _inf=None, _tzinfos=None):
|
||||
if _inf:
|
||||
self._tzinfos = _tzinfos
|
||||
self._utcoffset, self._dst, self._tzname = _inf
|
||||
else:
|
||||
_tzinfos = {}
|
||||
self._tzinfos = _tzinfos
|
||||
self._utcoffset, self._dst, self._tzname = self._transition_info[0]
|
||||
_tzinfos[self._transition_info[0]] = self
|
||||
for inf in self._transition_info[1:]:
|
||||
if inf not in _tzinfos:
|
||||
_tzinfos[inf] = self.__class__(inf, _tzinfos)
|
||||
|
||||
def fromutc(self, dt):
|
||||
'''See datetime.tzinfo.fromutc'''
|
||||
if (dt.tzinfo is not None
|
||||
and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
|
||||
raise ValueError('fromutc: dt.tzinfo is not self')
|
||||
dt = dt.replace(tzinfo=None)
|
||||
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
|
||||
inf = self._transition_info[idx]
|
||||
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
|
||||
|
||||
def normalize(self, dt):
|
||||
'''Correct the timezone information on the given datetime
|
||||
|
||||
If date arithmetic crosses DST boundaries, the tzinfo
|
||||
is not magically adjusted. This method normalizes the
|
||||
tzinfo to the correct one.
|
||||
|
||||
To test, first we need to do some setup
|
||||
|
||||
>>> from pytz import timezone
|
||||
>>> utc = timezone('UTC')
|
||||
>>> eastern = timezone('US/Eastern')
|
||||
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
|
||||
|
||||
We next create a datetime right on an end-of-DST transition point,
|
||||
the instant when the wallclocks are wound back one hour.
|
||||
|
||||
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
|
||||
>>> loc_dt = utc_dt.astimezone(eastern)
|
||||
>>> loc_dt.strftime(fmt)
|
||||
'2002-10-27 01:00:00 EST (-0500)'
|
||||
|
||||
Now, if we subtract a few minutes from it, note that the timezone
|
||||
information has not changed.
|
||||
|
||||
>>> before = loc_dt - timedelta(minutes=10)
|
||||
>>> before.strftime(fmt)
|
||||
'2002-10-27 00:50:00 EST (-0500)'
|
||||
|
||||
But we can fix that by calling the normalize method
|
||||
|
||||
>>> before = eastern.normalize(before)
|
||||
>>> before.strftime(fmt)
|
||||
'2002-10-27 01:50:00 EDT (-0400)'
|
||||
|
||||
The supported method of converting between timezones is to use
|
||||
datetime.astimezone(). Currently, normalize() also works:
|
||||
|
||||
>>> th = timezone('Asia/Bangkok')
|
||||
>>> am = timezone('Europe/Amsterdam')
|
||||
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
|
||||
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
|
||||
>>> am.normalize(dt).strftime(fmt)
|
||||
'2011-05-06 20:02:03 CEST (+0200)'
|
||||
'''
|
||||
if dt.tzinfo is None:
|
||||
raise ValueError('Naive time - no tzinfo set')
|
||||
|
||||
# Convert dt in localtime to UTC
|
||||
offset = dt.tzinfo._utcoffset
|
||||
dt = dt.replace(tzinfo=None)
|
||||
dt = dt - offset
|
||||
# convert it back, and return it
|
||||
return self.fromutc(dt)
|
||||
|
||||
def localize(self, dt, is_dst=False):
|
||||
'''Convert naive time to local time.
|
||||
|
||||
This method should be used to construct localtimes, rather
|
||||
than passing a tzinfo argument to a datetime constructor.
|
||||
|
||||
is_dst is used to determine the correct timezone in the ambigous
|
||||
period at the end of daylight saving time.
|
||||
|
||||
>>> from pytz import timezone
|
||||
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
|
||||
>>> amdam = timezone('Europe/Amsterdam')
|
||||
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
|
||||
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
|
||||
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
|
||||
>>> loc_dt1.strftime(fmt)
|
||||
'2004-10-31 02:00:00 CEST (+0200)'
|
||||
>>> loc_dt2.strftime(fmt)
|
||||
'2004-10-31 02:00:00 CET (+0100)'
|
||||
>>> str(loc_dt2 - loc_dt1)
|
||||
'1:00:00'
|
||||
|
||||
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
|
||||
times at the end of daylight saving time
|
||||
|
||||
>>> try:
|
||||
... loc_dt1 = amdam.localize(dt, is_dst=None)
|
||||
... except AmbiguousTimeError:
|
||||
... print('Ambiguous')
|
||||
Ambiguous
|
||||
|
||||
is_dst defaults to False
|
||||
|
||||
>>> amdam.localize(dt) == amdam.localize(dt, False)
|
||||
True
|
||||
|
||||
is_dst is also used to determine the correct timezone in the
|
||||
wallclock times jumped over at the start of daylight saving time.
|
||||
|
||||
>>> pacific = timezone('US/Pacific')
|
||||
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
|
||||
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
|
||||
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
|
||||
>>> ploc_dt1.strftime(fmt)
|
||||
'2008-03-09 02:00:00 PDT (-0700)'
|
||||
>>> ploc_dt2.strftime(fmt)
|
||||
'2008-03-09 02:00:00 PST (-0800)'
|
||||
>>> str(ploc_dt2 - ploc_dt1)
|
||||
'1:00:00'
|
||||
|
||||
Use is_dst=None to raise a NonExistentTimeError for these skipped
|
||||
times.
|
||||
|
||||
>>> try:
|
||||
... loc_dt1 = pacific.localize(dt, is_dst=None)
|
||||
... except NonExistentTimeError:
|
||||
... print('Non-existent')
|
||||
Non-existent
|
||||
'''
|
||||
if dt.tzinfo is not None:
|
||||
raise ValueError('Not naive datetime (tzinfo is already set)')
|
||||
|
||||
# Find the two best possibilities.
|
||||
possible_loc_dt = set()
|
||||
for delta in [timedelta(days=-1), timedelta(days=1)]:
|
||||
loc_dt = dt + delta
|
||||
idx = max(0, bisect_right(
|
||||
self._utc_transition_times, loc_dt) - 1)
|
||||
inf = self._transition_info[idx]
|
||||
tzinfo = self._tzinfos[inf]
|
||||
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
|
||||
if loc_dt.replace(tzinfo=None) == dt:
|
||||
possible_loc_dt.add(loc_dt)
|
||||
|
||||
if len(possible_loc_dt) == 1:
|
||||
return possible_loc_dt.pop()
|
||||
|
||||
# If there are no possibly correct timezones, we are attempting
|
||||
# to convert a time that never happened - the time period jumped
|
||||
# during the start-of-DST transition period.
|
||||
if len(possible_loc_dt) == 0:
|
||||
# If we refuse to guess, raise an exception.
|
||||
if is_dst is None:
|
||||
raise NonExistentTimeError(dt)
|
||||
|
||||
# If we are forcing the pre-DST side of the DST transition, we
|
||||
# obtain the correct timezone by winding the clock forward a few
|
||||
# hours.
|
||||
elif is_dst:
|
||||
return self.localize(
|
||||
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
|
||||
|
||||
# If we are forcing the post-DST side of the DST transition, we
|
||||
# obtain the correct timezone by winding the clock back.
|
||||
else:
|
||||
return self.localize(
|
||||
dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6)
|
||||
|
||||
|
||||
# If we get this far, we have multiple possible timezones - this
|
||||
# is an ambiguous case occuring during the end-of-DST transition.
|
||||
|
||||
# If told to be strict, raise an exception since we have an
|
||||
# ambiguous case
|
||||
if is_dst is None:
|
||||
raise AmbiguousTimeError(dt)
|
||||
|
||||
# Filter out the possiblilities that don't match the requested
|
||||
# is_dst
|
||||
filtered_possible_loc_dt = [
|
||||
p for p in possible_loc_dt
|
||||
if bool(p.tzinfo._dst) == is_dst
|
||||
]
|
||||
|
||||
# Hopefully we only have one possibility left. Return it.
|
||||
if len(filtered_possible_loc_dt) == 1:
|
||||
return filtered_possible_loc_dt[0]
|
||||
|
||||
if len(filtered_possible_loc_dt) == 0:
|
||||
filtered_possible_loc_dt = list(possible_loc_dt)
|
||||
|
||||
# If we get this far, we have in a wierd timezone transition
|
||||
# where the clocks have been wound back but is_dst is the same
|
||||
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
|
||||
# At this point, we just have to guess unless we allow more
|
||||
# hints to be passed in (such as the UTC offset or abbreviation),
|
||||
# but that is just getting silly.
|
||||
#
|
||||
# Choose the earliest (by UTC) applicable timezone if is_dst=True
|
||||
# Choose the latest (by UTC) applicable timezone if is_dst=False
|
||||
# i.e., behave like end-of-DST transition
|
||||
dates = {} # utc -> local
|
||||
for local_dt in filtered_possible_loc_dt:
|
||||
utc_time = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset
|
||||
assert utc_time not in dates
|
||||
dates[utc_time] = local_dt
|
||||
return dates[[min, max][not is_dst](dates)]
|
||||
|
||||
def utcoffset(self, dt, is_dst=None):
|
||||
'''See datetime.tzinfo.utcoffset
|
||||
|
||||
The is_dst parameter may be used to remove ambiguity during DST
|
||||
transitions.
|
||||
|
||||
>>> from pytz import timezone
|
||||
>>> tz = timezone('America/St_Johns')
|
||||
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
|
||||
|
||||
>>> tz.utcoffset(ambiguous, is_dst=False)
|
||||
datetime.timedelta(-1, 73800)
|
||||
|
||||
>>> tz.utcoffset(ambiguous, is_dst=True)
|
||||
datetime.timedelta(-1, 77400)
|
||||
|
||||
>>> try:
|
||||
... tz.utcoffset(ambiguous)
|
||||
... except AmbiguousTimeError:
|
||||
... print('Ambiguous')
|
||||
Ambiguous
|
||||
|
||||
'''
|
||||
if dt is None:
|
||||
return None
|
||||
elif dt.tzinfo is not self:
|
||||
dt = self.localize(dt, is_dst)
|
||||
return dt.tzinfo._utcoffset
|
||||
else:
|
||||
return self._utcoffset
|
||||
|
||||
def dst(self, dt, is_dst=None):
|
||||
'''See datetime.tzinfo.dst
|
||||
|
||||
The is_dst parameter may be used to remove ambiguity during DST
|
||||
transitions.
|
||||
|
||||
>>> from pytz import timezone
|
||||
>>> tz = timezone('America/St_Johns')
|
||||
|
||||
>>> normal = datetime(2009, 9, 1)
|
||||
|
||||
>>> tz.dst(normal)
|
||||
datetime.timedelta(0, 3600)
|
||||
>>> tz.dst(normal, is_dst=False)
|
||||
datetime.timedelta(0, 3600)
|
||||
>>> tz.dst(normal, is_dst=True)
|
||||
datetime.timedelta(0, 3600)
|
||||
|
||||
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
|
||||
|
||||
>>> tz.dst(ambiguous, is_dst=False)
|
||||
datetime.timedelta(0)
|
||||
>>> tz.dst(ambiguous, is_dst=True)
|
||||
datetime.timedelta(0, 3600)
|
||||
>>> try:
|
||||
... tz.dst(ambiguous)
|
||||
... except AmbiguousTimeError:
|
||||
... print('Ambiguous')
|
||||
Ambiguous
|
||||
|
||||
'''
|
||||
if dt is None:
|
||||
return None
|
||||
elif dt.tzinfo is not self:
|
||||
dt = self.localize(dt, is_dst)
|
||||
return dt.tzinfo._dst
|
||||
else:
|
||||
return self._dst
|
||||
|
||||
def tzname(self, dt, is_dst=None):
|
||||
'''See datetime.tzinfo.tzname
|
||||
|
||||
The is_dst parameter may be used to remove ambiguity during DST
|
||||
transitions.
|
||||
|
||||
>>> from pytz import timezone
|
||||
>>> tz = timezone('America/St_Johns')
|
||||
|
||||
>>> normal = datetime(2009, 9, 1)
|
||||
|
||||
>>> tz.tzname(normal)
|
||||
'NDT'
|
||||
>>> tz.tzname(normal, is_dst=False)
|
||||
'NDT'
|
||||
>>> tz.tzname(normal, is_dst=True)
|
||||
'NDT'
|
||||
|
||||
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
|
||||
|
||||
>>> tz.tzname(ambiguous, is_dst=False)
|
||||
'NST'
|
||||
>>> tz.tzname(ambiguous, is_dst=True)
|
||||
'NDT'
|
||||
>>> try:
|
||||
... tz.tzname(ambiguous)
|
||||
... except AmbiguousTimeError:
|
||||
... print('Ambiguous')
|
||||
Ambiguous
|
||||
'''
|
||||
if dt is None:
|
||||
return self.zone
|
||||
elif dt.tzinfo is not self:
|
||||
dt = self.localize(dt, is_dst)
|
||||
return dt.tzinfo._tzname
|
||||
else:
|
||||
return self._tzname
|
||||
|
||||
def __repr__(self):
|
||||
if self._dst:
|
||||
dst = 'DST'
|
||||
else:
|
||||
dst = 'STD'
|
||||
if self._utcoffset > _notime:
|
||||
return '<DstTzInfo %r %s+%s %s>' % (
|
||||
self.zone, self._tzname, self._utcoffset, dst
|
||||
)
|
||||
else:
|
||||
return '<DstTzInfo %r %s%s %s>' % (
|
||||
self.zone, self._tzname, self._utcoffset, dst
|
||||
)
|
||||
|
||||
def __reduce__(self):
|
||||
# Special pickle to zone remains a singleton and to cope with
|
||||
# database changes.
|
||||
return pytz._p, (
|
||||
self.zone,
|
||||
_to_seconds(self._utcoffset),
|
||||
_to_seconds(self._dst),
|
||||
self._tzname
|
||||
)
|
||||
|
||||
|
||||
|
||||
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
|
||||
"""Factory function for unpickling pytz tzinfo instances.
|
||||
|
||||
This is shared for both StaticTzInfo and DstTzInfo instances, because
|
||||
database changes could cause a zones implementation to switch between
|
||||
these two base classes and we can't break pickles on a pytz version
|
||||
upgrade.
|
||||
"""
|
||||
# Raises a KeyError if zone no longer exists, which should never happen
|
||||
# and would be a bug.
|
||||
tz = pytz.timezone(zone)
|
||||
|
||||
# A StaticTzInfo - just return it
|
||||
if utcoffset is None:
|
||||
return tz
|
||||
|
||||
# This pickle was created from a DstTzInfo. We need to
|
||||
# determine which of the list of tzinfo instances for this zone
|
||||
# to use in order to restore the state of any datetime instances using
|
||||
# it correctly.
|
||||
utcoffset = memorized_timedelta(utcoffset)
|
||||
dstoffset = memorized_timedelta(dstoffset)
|
||||
try:
|
||||
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
|
||||
except KeyError:
|
||||
# The particular state requested in this timezone no longer exists.
|
||||
# This indicates a corrupt pickle, or the timezone database has been
|
||||
# corrected violently enough to make this particular
|
||||
# (utcoffset,dstoffset) no longer exist in the zone, or the
|
||||
# abbreviation has been changed.
|
||||
pass
|
||||
|
||||
# See if we can find an entry differing only by tzname. Abbreviations
|
||||
# get changed from the initial guess by the database maintainers to
|
||||
# match reality when this information is discovered.
|
||||
for localized_tz in tz._tzinfos.values():
|
||||
if (localized_tz._utcoffset == utcoffset
|
||||
and localized_tz._dst == dstoffset):
|
||||
return localized_tz
|
||||
|
||||
# This (utcoffset, dstoffset) information has been removed from the
|
||||
# zone. Add it back. This might occur when the database maintainers have
|
||||
# corrected incorrect information. datetime instances using this
|
||||
# incorrect information will continue to do so, exactly as they were
|
||||
# before being pickled. This is purely an overly paranoid safety net - I
|
||||
# doubt this will ever been needed in real life.
|
||||
inf = (utcoffset, dstoffset, tzname)
|
||||
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
|
||||
return tz._tzinfos[inf]
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user