Compare commits

...

256 Commits

Author SHA1 Message Date
c100213351 v11.1.0 2022-11-12 12:34:41 -06:00
43237a05c5 update changes 2022-11-12 12:34:07 -06:00
c34eccad4a Merge pull request #117 from wakatime/feature/api-key-vault-cmd
Support for api key vault cmd config
2022-11-12 12:32:59 -06:00
5077c5e290 read api key vault cmd from sublime settings and wakatime config file 2022-11-12 12:32:43 -06:00
a387c08b44 changes for v11.1.0 2022-11-11 21:57:09 -03:00
3c2947cf79 Support for api key vault cmd config 2022-11-11 21:55:29 -03:00
0744d7209b Exit code 112 should be ignored same as 102 2022-11-10 10:45:54 -06:00
54e6772a80 v11.0.8 2022-08-23 15:14:53 +02:00
b576dfafe6 changes for v11.0.8 2022-08-23 15:14:40 +02:00
5299efd6fa sometimes selection object can have no elements but eval truthy 2022-08-23 15:13:44 +02:00
74583a6845 v11.0.7 2022-06-25 13:46:38 -03:00
31f1f8ecdc changes for v11.0.7 2022-06-25 13:46:25 -03:00
c1f58fd05d check wakatime-cli versions in background thread to prevent blocking UI 2022-06-25 13:45:29 -03:00
28063e3ac4 v11.0.6 2022-06-08 07:44:41 -04:00
4d56aca1a1 changes for v11.0.6 2022-06-08 07:44:01 -04:00
e15c514ef3 Fix call to log helper 2022-06-08 07:43:01 -04:00
fe582c84b9 v11.0.5 2022-04-29 11:19:26 -07:00
d7ee8675d8 changes for v11.0.5 2022-04-29 11:19:05 -07:00
da17125b97 Prevent installing global url opener 2022-04-29 11:14:55 -07:00
847223cdce Clean up legacy wakatime-cli directory if exists 2022-02-21 20:08:48 -08:00
b405f99cff chmod wakatime-cli after copying 2022-01-21 15:12:51 -08:00
b5210b77ce v11.0.4 2022-01-06 18:40:32 -08:00
fac1192228 changes for v11.0.4 2022-01-06 18:40:18 -08:00
4d6533b2ee fix lineno, cursorpos, and lines-in-file arguments 2022-01-06 18:37:52 -08:00
7d2bd0b7c5 Copy wakatime-cli when symlink fails 2022-01-06 18:16:02 -08:00
66f5f48f33 create symlink to wakatime-cli 2022-01-06 10:23:09 -08:00
e337afcc53 Fix Popen override 2022-01-06 07:56:45 -08:00
033c07f070 prevent using empty cached version 2022-01-05 04:15:10 -08:00
c09767b58d v11.0.3 2021-12-31 16:55:16 -08:00
ac80c4268b changes for v11.0.3 2021-12-31 16:55:05 -08:00
e5331d3086 only remove wakatime-cli after finished downloading new one 2021-12-31 16:42:11 -08:00
bebb46dda6 v11.0.2 2021-11-17 14:01:24 -08:00
9767790063 changes for v11.0.2 2021-11-17 14:01:08 -08:00
75c219055d encode cursorpos in json as int not str 2021-11-17 13:52:49 -08:00
ef77e1f178 v11.0.1 2021-11-16 09:47:29 -08:00
4025decc12 changes for v11.0.1 2021-11-16 09:47:14 -08:00
086c700151 turn off strict config parsing in py3 2021-11-16 09:43:20 -08:00
650bb6fa26 log error response when fail to check wakatime-cli version or download binary 2021-11-01 17:27:12 -07:00
389c84673e retry httperrors in py2 without ssl 2021-11-01 17:18:06 -07:00
6fa1321a95 v11.0.0 2021-10-31 12:59:57 -07:00
f1a8fcab44 changes for v11.0.0 2021-10-31 12:59:36 -07:00
3937b083c5 Finish implementation of using new Go wakatime-cli 2021-10-31 12:57:28 -07:00
711aab0d18 Remove unused python packages folder 2021-10-31 11:42:11 -07:00
cb8ce3a54e Switch to new Go wakatime-cli 2021-10-31 11:41:21 -07:00
7db5fe0a5d strip newlines from today output before displaying in status bar 2021-10-30 15:18:26 -07:00
cebcfaa0e9 Merge pull request #106 from jamesa/master
Fix Python 3.8 warning when getting Unix time zone
2021-10-25 21:32:02 -07:00
f8faed6e47 Fix Python 3.8 warning when getting Unix time zone 2021-10-25 17:19:39 -04:00
fb303e048f v10.0.1 2020-12-28 18:54:50 -08:00
4f11222c2b changes for v10.0.1 2020-12-28 18:54:37 -08:00
72b72dc9f0 add sublime text to subtitle 2020-12-28 18:54:07 -08:00
1b07d0442b v10.0.0 2020-12-28 18:46:13 -08:00
fbd8e84ea1 changes for v10.0.0 2020-12-28 18:44:55 -08:00
01c0e7758e Partially revert "Use wakatime-cli standalone" to support both
standalone and Python source wakatime-cli versions.

This partially reverts commit d588451468.
2020-12-28 18:42:00 -08:00
28556de3b6 add back python binary location setting 2020-12-28 17:57:46 -08:00
809e43cfe5 fix link to api key 2020-12-28 17:49:31 -08:00
22ddbe27b0 link directly to wakatime install instructions 2020-12-28 17:44:00 -08:00
ddaf60b8b0 remove ide setting files from revision control 2020-07-30 15:53:04 -07:00
2e6a87c67e prevent executing wakatime-cli before it has been downloaded 2020-03-01 10:01:52 -08:00
01503b1c20 Merge pull request #99 from gandarez/standalone
Use wakatime-cli standalone
2020-03-01 10:57:14 -05:00
5a4ac9c11d check if cli is installed before sending heartbeat 2020-02-29 20:01:34 -05:00
fa0a3aacb5 add api_key into sublime-settings 2020-02-29 19:22:32 -05:00
d588451468 Use wakatime-cli standalone 2020-02-29 19:15:40 -05:00
483d8f596e v9.1.2 2020-02-13 09:24:11 -08:00
03f2d6d580 changes for v9.1.2 2020-02-13 09:23:58 -08:00
e1390d7647 Upgrade wakatime-cli to v13.0.7 2020-02-13 09:22:15 -08:00
c87bdd041c Upgrade wakatime-cli to v13.0.5 2020-02-11 21:26:01 -08:00
3a65395636 v9.1.1 2020-02-11 08:12:07 -08:00
0b2f3aa9a4 changes for v9.1.1 2020-02-11 08:11:52 -08:00
58ef2cd794 fix typo 2020-02-11 08:10:39 -08:00
04173d3bcc v9.1.0 2020-02-09 23:37:17 -08:00
3206a07476 changes for v9.1.0 2020-02-09 23:36:29 -08:00
9330236816 upgrade wakatime-cli to v13.0.4 2020-02-09 23:35:43 -08:00
885c11f01a Detect python in Windows LocalAppData install locations 2020-02-09 23:33:39 -08:00
8acda0157a upgrade embedded python to v3.8.1 2020-02-09 23:17:13 -08:00
935ddbd5f6 v9.0.2 2019-12-04 22:04:13 -08:00
b57b1eb696 changes for v9.0.2 2019-12-04 22:03:49 -08:00
6ec097b9d1 upgrade wakatime-cli to v13.0.3 2019-12-04 22:02:51 -08:00
b3ed36d3b2 fix badge locations 2019-11-27 22:28:20 -08:00
3669e4df6a add wakatime badge 2019-11-25 21:30:54 -08:00
3504096082 v9.0.1 2019-11-24 07:49:27 -08:00
5990947706 changes for v9.0.1 2019-11-24 07:48:58 -08:00
2246e31244 upgrade wakatime-cli to v13.0.2 2019-11-24 07:46:13 -08:00
b55fe702d3 v9.0.0 2019-06-23 19:03:26 -07:00
e0fbbb50bb changes for v9.0.0 2019-06-23 19:03:08 -07:00
32c0cb5a97 upgrade wakatime-cli to v12.0.0 2019-06-23 18:56:57 -07:00
67d8b0d24f v8.7.0 2019-05-29 06:44:07 -07:00
b8b2f4944b changes for v8.7.0 2019-05-29 06:43:54 -07:00
a20161164c prevent creating user settings file when api key found from common config 2019-05-29 06:42:38 -07:00
405211bb07 v8.6.1 2019-05-28 20:54:57 -07:00
ffc879c4eb changes for v8.6.1 2019-05-28 20:54:43 -07:00
1e23919694 fix import path of parseConfigFile 2019-05-28 20:53:58 -07:00
b2086a3cd2 v8.6.0 2019-05-27 12:41:14 -07:00
005b07520c changes for v8.6.0 2019-05-27 12:40:55 -07:00
60608bd322 Skip prompting for api key when found from common config file 2019-05-27 12:39:48 -07:00
cde8f8f1de v8.5.0 2019-05-10 09:45:11 -07:00
4adfca154c changes for v8.5.0 2019-05-10 09:44:57 -07:00
f7b3924a30 use wakatime-cli to fetch today coding time 2019-05-10 09:43:30 -07:00
db00024455 remove clock icon from status bar 2019-05-10 09:18:38 -07:00
9a6be7ca4e v8.4.2 2019-05-07 18:44:24 -07:00
1ea9b2a761 changes for v8.4.2 2019-05-07 18:43:57 -07:00
bd5e87e030 upgrade wakatime-cli to v11.0.0 2019-05-07 18:42:31 -07:00
0256ff4a6a v8.4.1 2019-05-01 10:57:50 -07:00
9d170b3276 changes for v8.4.1 2019-05-01 10:57:38 -07:00
c54e575210 use api subdomain for fetching summaries 2019-05-01 10:57:11 -07:00
07513d8f10 v8.4.0 2019-05-01 10:49:59 -07:00
30902cc050 changes for v8.4.0 2019-05-01 10:49:40 -07:00
aa7962d49a show today coding activity time in status bar 2019-05-01 10:48:28 -07:00
d8c662f3db v8.3.6 2019-04-30 16:25:28 -07:00
10d88ebf2d changes for v8.3.6 2019-04-30 16:25:14 -07:00
2f28c561b1 upgrade wakatime-cli to v10.8.4 2019-04-30 16:24:35 -07:00
24968507df v8.3.5 2019-04-30 09:21:47 -07:00
641cd539ed changes for v8.3.5 2019-04-30 09:21:33 -07:00
0c65d7e5b2 Upgrade wakatime-cli to v10.8.3 2019-04-30 09:20:29 -07:00
f0532f5b8e v8.3.4 2019-03-30 19:17:06 -07:00
8094db9680 changes for v8.3.4 2019-03-30 19:16:49 -07:00
bf20551849 upgrade wakatime-cli to v10.8.2 2019-03-30 18:53:40 -07:00
2b6e32b578 remove redundant line 2019-03-13 09:36:51 +09:00
363c3d38e2 update license file with copyright owner inline 2019-03-13 09:20:50 +09:00
88466d7db2 v8.3.3 2018-12-19 07:43:01 -08:00
122fcbbee5 changes for v8.3.3 2018-12-19 07:42:30 -08:00
c41fcec5d8 upgrade wakatime-cli to v10.6.1 2018-12-19 07:38:18 -08:00
be09b34d44 v8.3.2 2018-10-06 20:40:57 -07:00
e1ee1c1216 changes for v8.3.2 2018-10-06 20:40:37 -07:00
a37061924b Send buffered heartbeats to API every 30 seconds. 2018-10-06 20:40:12 -07:00
da01fa268b v8.3.1 2018-10-05 00:07:11 -07:00
c279418651 changes for v8.3.1 2018-10-05 00:06:46 -07:00
5cf2c8f7ac upgrade wakatime-cli to v10.4.1 2018-10-05 00:06:02 -07:00
d1455e77a8 v8.3.0 2018-10-03 00:55:02 -07:00
8499e7bafe changes for v8.3.0 2018-10-03 00:54:40 -07:00
abc26a0864 upgrade wakatime-cli to v10.4.0 2018-10-03 00:47:20 -07:00
71ad97ffe9 v8.2.0 2018-09-30 22:03:01 -07:00
3ec5995c99 changes for v8.2.0 2018-09-30 22:02:53 -07:00
195cf4de36 upgrade wakatime-cli to v10.3.0 2018-09-30 21:48:20 -07:00
b39eefb4f5 cross-platform Popen with hidden window 2018-09-30 21:29:04 -07:00
bbf5761e26 v8.1.2 2018-09-20 22:31:14 -07:00
c4df1dc633 changes for v8.1.2 2018-09-20 22:30:57 -07:00
360a491cda upgrade wakatime-cli to v10.2.4 2018-09-20 22:29:34 -07:00
f61a34eda7 build subprocess stdin near heartbeats 2018-09-13 19:39:05 -07:00
48123d7409 v8.1.1 2018-04-26 08:42:51 -07:00
c8a15d7ac0 changes for v8.1.1 2018-04-26 08:42:00 -07:00
202df81e04 upgrade wakatime-cli to v10.2.1 2018-04-26 08:40:02 -07:00
5e34f3f6a7 v8.1.0 2018-04-03 23:43:43 -07:00
d4441e5575 changes for v8.1.0 2018-04-03 23:43:26 -07:00
9eac8e2bd3 prefer python3 when running wakatime-cli 2018-04-03 23:42:02 -07:00
11d8fc3a09 v8.0.8 2018-03-15 01:51:31 -07:00
d1f1f51f23 changes for v8.0.8 2018-03-15 01:51:20 -07:00
b10bb36c09 Upgrade wakatime-cli to v10.1.3 2018-03-15 01:50:36 -07:00
dc9474befa v8.0.7 2018-03-15 01:32:56 -07:00
b910807e98 changes for v8.0.7 2018-03-15 01:32:37 -07:00
bc770515f0 Upgrade wakatime-cli to v10.1.2 2018-03-15 01:31:17 -07:00
9e102d7c5c v8.0.6 2018-01-04 23:34:40 -08:00
5c1770fb48 changes for v8.0.6 2018-01-04 23:34:13 -08:00
683397534c upgrade wakatime-cli to v10.1.0 2018-01-04 23:33:07 -08:00
1c92017543 v8.0.5 2017-11-24 16:16:47 -08:00
fda1307668 changes for v8.0.5 2017-11-24 16:16:34 -08:00
1c84d457c5 upgrade wakatime-cli to v10.0.5 2017-11-24 16:16:04 -08:00
1e680ce739 v8.0.4 2017-11-23 12:49:07 -08:00
376adbb7d7 changes for v8.0.4 2017-11-23 12:48:44 -08:00
e0040e185b upgrade wakatime-cli to v10.0.4 2017-11-23 12:41:59 -08:00
c4a88541d0 v8.0.3 2017-11-22 13:12:09 -08:00
0cf621d177 changes for v8.0.3 2017-11-22 13:11:48 -08:00
db9d6cec97 upgrade wakatime-cli to v10.0.3 2017-11-22 13:09:17 -08:00
2c17f49a6b v8.0.2 2017-11-15 18:36:43 -08:00
95116d6007 changes for v8.0.2 2017-11-15 18:36:28 -08:00
8c52596f8f upgrade wakatime-cli to v10.0.2 2017-11-15 18:35:43 -08:00
3109817dc7 v8.0.1 2017-11-09 09:10:19 -08:00
0c0f965763 changes for v8.0.1 2017-11-09 09:10:06 -08:00
1573e9c825 upgrade wakatime-cli to v10.0.1 2017-11-09 09:09:28 -08:00
a0b8f349c2 v8.0.0 2017-11-08 23:14:04 -08:00
2fb60b1589 changes for v8.0.0 2017-11-08 23:13:47 -08:00
02786a744e upgrade wakatime-cli to v10.0.0 2017-11-08 23:12:05 -08:00
729a4360ba v7.0.26 2017-11-07 18:55:25 -08:00
8f45de85ec changes for v7.0.26 2017-11-07 18:55:03 -08:00
4672f70c87 upgrade wakatime-cli to v9.0.1 2017-11-07 18:54:17 -08:00
46a9aae942 v7.0.25 2017-11-05 20:10:51 -08:00
9e77ce2697 changes for v7.0.25 2017-11-05 20:08:25 -08:00
385ba818cc ability to override python binary location 2017-11-05 20:04:48 -08:00
7492c3ce12 upgrade wakatime-cli to v9.0.0 2017-11-05 19:51:43 -08:00
03eed88917 v7.0.24 2017-10-29 11:38:11 -07:00
60a7ad96b5 changes for v7.0.24 2017-10-29 11:37:31 -07:00
2d1d5d336a link to issues from changelog 2017-10-29 11:35:38 -07:00
e659759b2d upgrade wakatime-cli to v8.0.5 2017-10-29 11:32:03 -07:00
a290e5d86d v7.0.23 2017-09-14 17:54:06 -07:00
d5b922bb10 changes for 7.0.23 2017-09-14 17:53:51 -07:00
ec7b5e3530 add missing setting description 2017-09-14 17:51:22 -07:00
aa3f2e8af6 Merge pull request #76 from krishnaglick/master
Adding "Include" Functionality
2017-09-14 17:46:55 -07:00
f4e53cd682 Added 'include' functionality 2017-09-14 13:54:50 -04:00
aba72b0f1e v7.0.22 2017-06-08 00:21:33 -07:00
5b9d86a57d changes for v7.0.22 2017-06-08 00:21:15 -07:00
fa40874635 upgrade wakatime-cli to v8.0.3 2017-06-08 00:20:10 -07:00
6d4a4cf9eb link to FAQ from troubleshooting section 2017-05-26 07:40:14 -07:00
f628b8dd11 v7.0.21 2017-05-24 23:55:36 -07:00
f932ee9fc6 changes for v7.0.21 2017-05-24 23:55:17 -07:00
2f14009279 Upgrade wakatime-cli to v8.0.2 2017-05-24 23:53:28 -07:00
453d96bf9c use unicode arrow 2017-05-02 19:24:54 -07:00
9de153f156 use unicode arrow 2017-04-15 10:13:42 -07:00
dcc782338d v7.0.20 2017-04-10 19:04:00 -07:00
9d0dba988a remove sub-list install instructions 2017-04-10 19:03:05 -07:00
e76f2e514e v7.0.19 2017-04-10 18:59:05 -07:00
224f7cd82a Remove /var/www/ from default ignored folders 2017-04-10 18:55:24 -07:00
3cce525a84 update dashboard step for install instructions 2017-03-30 00:23:41 -07:00
ce885501ad v7.0.18 2017-03-16 08:40:05 -07:00
c9448a9a19 changes for v7.0.18 2017-03-16 08:37:29 -07:00
04f8c61ebc no need to create empty common config file 2017-03-16 08:36:17 -07:00
04a4630024 upgrade wakatime-cli to v8.0.0 2017-03-16 08:26:24 -07:00
02138220fd allow setting a proxy in sublime-settings file 2017-03-10 08:11:40 -08:00
d0b162bdd8 Merge pull request #71 from marcus-at-localhorst/master
Use correct argument --exclude instead of --ignore
2017-03-10 08:02:44 -08:00
1b8895cd38 add --hidefilenames in config 2017-03-04 17:21:21 +01:00
938bbb73d1 Use correct argument --exclude instead of --ignore
https://github.com/wakatime/sublime-wakatime/issues/70
2017-03-04 17:02:04 +01:00
008fdc6b49 v7.0.17 2017-03-01 22:32:08 -08:00
a788625dd0 changes for v7.0.17 2017-03-01 22:31:49 -08:00
bcbce681c3 upgrade wakatime-cli to v7.0.4 2017-03-01 22:30:57 -08:00
35299db832 don't wrap ctrl in code blocks 2017-02-26 20:23:51 -08:00
eb7814624c escape backtick with backslash 2017-02-26 20:23:01 -08:00
1c092b2fd8 escape backslash correctly 2017-02-26 20:21:29 -08:00
507ef95f71 keyboard shortcut for opening console window 2017-02-26 20:08:15 -08:00
9777bc7788 v7.0.16 2017-02-20 16:20:09 -08:00
20b78defa6 changes for v7.0.16 2017-02-20 16:19:55 -08:00
8cb1c557d9 upgrade wakatime-cli to v7.0.2 2017-02-20 16:18:38 -08:00
20a1965f13 v7.0.15 2017-02-13 23:31:26 -08:00
0b802a554e changes for v7.0.15 2017-02-13 23:31:09 -08:00
30186c9b2c upgrade wakatime-cli to v6.2.2 2017-02-13 23:30:21 -08:00
311a0b5309 v7.0.14 2017-02-08 19:25:55 -08:00
b7602d89fb changes for v7.0.14 2017-02-08 19:25:42 -08:00
305de46e32 upgrade wakatime-cli to v6.2.1 2017-02-08 19:25:31 -08:00
c574234927 prevent crashing when logging object unable to be converted to unicode 2017-02-08 19:24:26 -08:00
a69c50f470 use str on objects which are not strings 2017-01-10 10:05:24 -08:00
f4b40089f3 v7.0.13 2016-11-11 11:31:43 +01:00
08394357b7 support old Sublime Text with Python 2.6 2016-11-11 11:29:51 +01:00
205d4eb163 fix import namespace 2016-11-11 11:16:17 +01:00
c4c27e4e9e v7.0.12 2016-10-24 12:41:46 +02:00
9167eb2558 upgrade wakatime-cli to v6.2.0 2016-10-24 12:39:37 +02:00
eaa3bb5180 use python v3.5.2 2016-10-15 16:16:39 +02:00
7755971d11 v7.0.11 2016-09-23 08:37:30 +02:00
7634be5446 handle UnicodeDecodeError exceptions when printing log messages 2016-09-23 08:36:23 +02:00
5e17ad88f6 v7.0.10 2016-09-22 10:28:04 +02:00
24d0f65116 upgrade wakatime-cli to v6.0.9 2016-09-22 10:26:57 +02:00
a326046733 handle UnicodeDecodeError when looking for python to fix #68 2016-09-22 10:24:49 +02:00
9bab00fd8b v7.0.9 2016-09-02 10:54:32 +02:00
b4a13a48b9 upgrade wakatime-cli to v6.0.8 2016-09-02 10:50:54 +02:00
21601f9688 v7.0.8 2016-07-21 15:17:59 +02:00
4c3ec87341 upgrade wakatime-cli to master 2016-07-21 15:16:03 +02:00
b149d7fc87 v7.0.7 2016-07-06 23:26:41 +02:00
52e6107c6e upgrade wakatime-cli to v6.0.7 2016-07-06 23:25:21 +02:00
b340637331 upgrade wakatime-cli to v6.0.6 2016-06-17 10:17:29 +02:00
044867449a v7.0.6 2016-06-13 16:50:10 +02:00
9e3f438823 upgrade wakatime-cli to v6.0.5 2016-06-13 16:48:53 +02:00
887d55c3f3 v7.0.5 2016-06-08 20:46:57 +02:00
19d54f3310 upgrade wakatime-cli to master version to fix unhandled retry exception 2016-06-08 20:43:24 +02:00
514a8762eb update settings screenshot 2016-05-21 15:56:16 +02:00
957c74d226 v7.0.4 2016-05-21 14:32:47 +02:00
7b0432d6ff upgrade wakatime-cli to v6.0.3 2016-05-21 14:28:50 +02:00
09754849be v7.0.3 2016-05-16 16:09:32 +02:00
25ad48a97a upgrade wakatime-cli to v6.0.2 2016-05-16 16:08:58 +02:00
3b2520afa9 use common resources folder location 2016-05-12 12:07:24 +02:00
77c2041ad3 rename activity callback 2016-04-30 00:01:25 +02:00
1655 changed files with 1286 additions and 179616 deletions

View File

@ -13,3 +13,5 @@ Patches and Suggestions
- Jimmy Selgen Nielsen <jimmy.selgen@gmail.com>
- Patrik Kernstock <info@pkern.at>
- Krishna Glick <krishnaglick@gmail.com>
- Carlos Henrique Gandarez <gandarez@gmail.com>

View File

@ -3,6 +3,566 @@ History
-------
11.1.0 (2022-11-12)
++++++++++++++++++
- Support for api key vault cmd config.
`#117 <https://github.com/wakatime/sublime-wakatime/pull/117>`_
11.0.8 (2022-08-23)
++++++++++++++++++
- Bugfix to prevent using empty selection object.
`#116 <https://github.com/wakatime/sublime-wakatime/issues/116>`_
11.0.7 (2022-06-25)
++++++++++++++++++
- Check wakatime-cli versions in background thread.
`#115 <https://github.com/wakatime/sublime-wakatime/issues/115>`_
11.0.6 (2022-06-08)
++++++++++++++++++
- Fix call to log helper.
`#113 <https://github.com/wakatime/sublime-wakatime/issues/113>`_
11.0.5 (2022-04-29)
++++++++++++++++++
- Bugfix to not overwrite global urlopener in embedded Python.
`#110 <https://github.com/wakatime/sublime-wakatime/issues/110>`_
- Chmod wakatime-cli to be executable after updating on non-Windows platforms.
11.0.4 (2022-01-06)
++++++++++++++++++
- Copy wakatime-cli when symlink fails on Windows.
`vim-wakatime#122 <https://github.com/wakatime/vim-wakatime/issues/122>`_
- Fix lineno, cursorpos, and lines-in-file arguments.
11.0.3 (2021-12-31)
++++++++++++++++++
- Bugfix to not delete old wakatime-cli until finished downloading new version.
`#107 <https://github.com/wakatime/sublime-wakatime/issues/107>`_
11.0.2 (2021-11-17)
++++++++++++++++++
- Bugfix to encode extra heartbeats cursorpos as int not str when sending to wakatime-cli.
11.0.1 (2021-11-16)
++++++++++++++++++
- Bugfix for install script when using system Python3 and duplicat INI keys.
11.0.0 (2021-10-31)
++++++++++++++++++
- Use new Go wakatime-cli.
10.0.1 (2020-12-28)
++++++++++++++++++
- Improve readme subtitle.
10.0.0 (2020-12-28)
++++++++++++++++++
- Support for standalone wakatime-cli, disabled by default.
9.1.2 (2020-02-13)
++++++++++++++++++
- Upgrade wakatime-cli to v13.0.7.
- Split bundled pygments library for Python 2.7+.
- Upgrade pygments for py27+ to v2.5.2 development master.
- Force requests to use bundled ca cert from certifi by default.
- Upgrade bundled certifi to v2019.11.28.
9.1.1 (2020-02-11)
++++++++++++++++++
- Fix typo in python detection on Windows platform.
9.1.0 (2020-02-09)
++++++++++++++++++
- Detect python in Windows LocalAppData install locations.
- Upgrade wakatime-cli to v13.0.4.
- Bundle cryptography, pyopenssl, and ipaddress packages for improved SSL
support on Python2.
9.0.2 (2019-12-04)
++++++++++++++++++
- Upgrade wakatime-cli to v13.0.3.
- Support slashes in Mercurial and Git branch names.
`wakatime#199 <https://github.com/wakatime/wakatime/issues/199>`_
9.0.1 (2019-11-24)
++++++++++++++++++
- Upgrade wakatime-cli to v13.0.2.
- Filter dependencies longer than 200 characters.
- Close sqlite connection even when error raised.
`wakatime#196 <https://github.com/wakatime/wakatime/issues/196>`_
- Detect ColdFusion as root language instead of HTML.
- New arguments for reading and writing ini config file.
- Today argument shows categories when available.
- Prevent unnecessarily debug log when syncing offline heartbeats.
- Support for Python 3.7.
9.0.0 (2019-06-23)
++++++++++++++++++
- New optional config option hide_branch_names.
`wakatime#183 <https://github.com/wakatime/wakatime/issues/183>`_
8.7.0 (2019-05-29)
++++++++++++++++++
- Prevent creating user sublime-settings file when api key already exists in
common wakatime.cfg file.
`#98 <https://github.com/wakatime/sublime-wakatime/issues/98>`_
8.6.1 (2019-05-28)
++++++++++++++++++
- Fix parsing common wakatime.cfg file.
`#98 <https://github.com/wakatime/sublime-wakatime/issues/98>`_
8.6.0 (2019-05-27)
++++++++++++++++++
- Prevent prompting for api key when found from config file.
`#98 <https://github.com/wakatime/sublime-wakatime/issues/98>`_
8.5.0 (2019-05-10)
++++++++++++++++++
- Remove clock icon from status bar.
- Use wakatime-cli to fetch status bar coding time.
8.4.2 (2019-05-07)
++++++++++++++++++
- Upgrade wakatime-cli to v11.0.0.
- Rename argument --show-time-today to --today.
- New argument --show-time-today for printing Today's coding time.
8.4.1 (2019-05-01)
++++++++++++++++++
- Use api subdomain for fetching today's coding activity.
8.4.0 (2019-05-01)
++++++++++++++++++
- Show today's coding time in status bar.
8.3.6 (2019-04-30)
++++++++++++++++++
- Upgrade wakatime-cli to v10.8.4.
- Use wakatime fork of certifi package.
`#95 <https://github.com/wakatime/sublime-wakatime/issues/95>`_
8.3.5 (2019-04-30)
++++++++++++++++++
- Upgrade wakatime-cli to v10.8.3.
- Upgrade certifi to version 2019.03.09.
8.3.4 (2019-03-30)
++++++++++++++++++
- Upgrade wakatime-cli to v10.8.2.
- Detect go.mod files as Go language.
`jetbrains-wakatime#119 <https://github.com/wakatime/jetbrains-wakatime/issues/119>`_
- Detect C++ language from all C++ file extensions.
`vscode-wakatime#87 <https://github.com/wakatime/vscode-wakatime/issues/87>`_
- Add ssl_certs_file arg and config for custom ca bundles.
`wakatime#164 <https://github.com/wakatime/wakatime/issues/164>`_
- Fix bug causing random project names when hide project names enabled.
`vscode-wakatime#162 <https://github.com/wakatime/vscode-wakatime/issues/61>`_
- Add support for UNC network shares without drive letter mapped on Winows.
`wakatime#162 <https://github.com/wakatime/wakatime/issues/162>`_
8.3.3 (2018-12-19)
++++++++++++++++++
- Upgrade wakatime-cli to v10.6.1.
- Correctly parse include_only_with_project_file when set to false.
`wakatime#161 <https://github.com/wakatime/wakatime/issues/161>`_
- Support language argument for non-file entity types.
- Send 25 heartbeats per API request.
- New category "Writing Tests".
`wakatime#156 <https://github.com/wakatime/wakatime/issues/156>`_
- Fix bug caused by git config section without any submodule option defined.
`wakatime#152 <https://github.com/wakatime/wakatime/issues/152>`_
8.3.2 (2018-10-06)
++++++++++++++++++
- Send buffered heartbeats to API every 30 seconds.
8.3.1 (2018-10-05)
++++++++++++++++++
- Upgrade wakatime-cli to v10.4.1.
- Send 50 offline heartbeats to API per request with 1 second delay in between.
8.3.0 (2018-10-03)
++++++++++++++++++
- Upgrade wakatime-cli to v10.4.0.
- Support logging coding activity to remote network drive files on Windows
platform by detecting UNC path from drive letter.
`wakatime#72 <https://github.com/wakatime/wakatime/issues/72>`_
8.2.0 (2018-09-30)
++++++++++++++++++
- Prevent opening cmd window on Windows when running wakatime-cli.
`#91 <https://github.com/wakatime/sublime-wakatime/issues/91>`_
- Upgrade wakatime-cli to v10.3.0.
- Re-enable detecting projects from Subversion folder on Windows platform.
- Prevent opening cmd window on Windows when detecting project from Subversion.
- Run tests on Windows using Appveyor.
8.1.2 (2018-09-20)
++++++++++++++++++
- Upgrade wakatime-cli to v10.2.4.
- Default --sync-offline-activity to 100 instead of 5, so offline coding is
synced to dashboard faster.
- Batch heartbeats in groups of 10 per api request.
- New config hide_project_name and argument --hide-project-names for
obfuscating project names when sending coding activity to api.
- Fix mispelled Gosu language.
`wakatime#137 <https://github.com/wakatime/wakatime/issues/137>`_
- Remove metadata when hiding project or file names.
- New --local-file argument to be used when --entity is a remote file.
- New argument --sync-offline-activity for configuring the maximum offline
heartbeats to sync to the WakaTime API.
8.1.1 (2018-04-26)
++++++++++++++++++
- Upgrade wakatime-cli to v10.2.1.
- Force forward slash for file paths.
- New --category argument.
- New --exclude-unknown-project argument and corresponding config setting.
- Support for project detection from git worktree folders.
8.1.0 (2018-04-03)
++++++++++++++++++
- Prefer Python3 over Python2 when running wakatime-cli core.
- Improve detection of Python3 on Ubuntu 17.10 platforms.
8.0.8 (2018-03-15)
++++++++++++++++++
- Upgrade wakatime-cli to v10.1.3.
- Smarter C vs C++ vs Objective-C language detection.
8.0.7 (2018-03-15)
++++++++++++++++++
- Upgrade wakatime-cli to v10.1.2.
- Detect dependencies from Swift, Objective-C, TypeScript and JavaScript files.
- Categorize .mjs files as JavaScript.
`wakatime#121 <https://github.com/wakatime/wakatime/issues/121>`_
- Detect dependencies from Elm, Haskell, Haxe, Kotlin, Rust, and Scala files.
- Improved Matlab vs Objective-C language detection.
`wakatime#129 <https://github.com/wakatime/wakatime/issues/129>`_
8.0.6 (2018-01-04)
++++++++++++++++++
- Upgrade wakatime-cli to v10.1.0.
- Ability to only track folders containing a .wakatime-project file using new
include_only_with_project_file argument and config option.
8.0.5 (2017-11-24)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.5.
- Fix bug that caused heartbeats to be cached locally instead of sent to API.
8.0.4 (2017-11-23)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.4.
- Improve Java dependency detection.
- Skip null or missing heartbeats from extra heartbeats argument.
8.0.3 (2017-11-22)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.3.
- Support saving unicode heartbeats when working offline.
`wakatime#112 <https://github.com/wakatime/wakatime/issues/112>`_
8.0.2 (2017-11-15)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.2.
- Limit bulk syncing to 5 heartbeats per request.
`wakatime#109 <https://github.com/wakatime/wakatime/issues/109>`_
8.0.1 (2017-11-09)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.1.
- Parse array of results from bulk heartbeats endpoint, only saving heartbeats
to local offline cache when they were not accepted by the api.
8.0.0 (2017-11-08)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.0.
- Upload multiple heartbeats to bulk endpoint for improved network performance.
`wakatime#107 <https://github.com/wakatime/wakatime/issues/107>`_
7.0.26 (2017-11-07)
++++++++++++++++++
- Upgrade wakatime-cli to v9.0.1.
- Fix bug causing 401 response when hidefilenames is enabled.
`wakatime#106 <https://github.com/wakatime/wakatime/issues/106>`_
7.0.25 (2017-11-05)
++++++++++++++++++
- Ability to override python binary location in sublime-settings file.
`#78 <https://github.com/wakatime/sublime-wakatime/issues/78>`_
- Upgrade wakatime-cli to v9.0.0.
- Detect project and branch names from git submodules.
`wakatime#105 <https://github.com/wakatime/wakatime/issues/105>`_
7.0.24 (2017-10-29)
++++++++++++++++++
- Upgrade wakatime-cli to v8.0.5.
- Allow passing string arguments wrapped in extra quotes for plugins which
cannot properly escape spaces in arguments.
- Upgrade pytz to v2017.2.
- Upgrade requests to v2.18.4.
- Upgrade tzlocal to v1.4.
- Use WAKATIME_HOME env variable for offline and session caching.
`wakatime#102 <https://github.com/wakatime/wakatime/issues/102>`_
7.0.23 (2017-09-14)
++++++++++++++++++
- Add "include" setting to bypass ignored files.
`#89 <https://github.com/wakatime/sublime-wakatime/issues/89>`_
7.0.22 (2017-06-08)
++++++++++++++++++
- Upgrade wakatime-cli to v8.0.3.
- Improve Matlab language detection.
7.0.21 (2017-05-24)
++++++++++++++++++
- Upgrade wakatime-cli to v8.0.2.
- Only treat proxy string as NTLM proxy after unable to connect with HTTPS and
SOCKS proxy.
- Support running automated tests on Linux, OS X, and Windows.
- Ability to disable SSL cert verification.
`wakatime#90 <https://github.com/wakatime/wakatime/issues/90>`_
- Disable line count stats for files larger than 2MB to improve performance.
- Print error saying Python needs upgrading when requests can't be imported.
7.0.20 (2017-04-10)
++++++++++++++++++
- Fix install instructions formatting.
7.0.19 (2017-04-10)
++++++++++++++++++
- Remove /var/www/ from default ignored folders.
7.0.18 (2017-03-16)
++++++++++++++++++
- Upgrade wakatime-cli to v8.0.0.
- No longer creating ~/.wakatime.cfg file, since only using Sublime settings.
7.0.17 (2017-03-01)
++++++++++++++++++
- Upgrade wakatime-cli to v7.0.4.
7.0.16 (2017-02-20)
++++++++++++++++++
- Upgrade wakatime-cli to v7.0.2.
7.0.15 (2017-02-13)
++++++++++++++++++
- Upgrade wakatime-cli to v6.2.2.
- Upgrade pygments library to v2.2.0 for improved language detection.
7.0.14 (2017-02-08)
++++++++++++++++++
- Upgrade wakatime-cli to v6.2.1.
- Allow boolean or list of regex patterns for hidefilenames config setting.
7.0.13 (2016-11-11)
++++++++++++++++++
- Support old Sublime Text with Python 2.6.
- Fix bug that prevented reading default api key from existing config file.
7.0.12 (2016-10-24)
++++++++++++++++++
- Upgrade wakatime-cli to v6.2.0.
- Exit with status code 104 when api key is missing or invalid. Exit with
status code 103 when config file missing or invalid.
- New WAKATIME_HOME env variable for setting path to config and log files.
- Improve debug warning message from unsupported dependency parsers.
7.0.11 (2016-09-23)
++++++++++++++++++
- Handle UnicodeDecodeError when when logging.
`#68 <https://github.com/wakatime/sublime-wakatime/issues/68>`_
7.0.10 (2016-09-22)
++++++++++++++++++
- Handle UnicodeDecodeError when looking for python.
`#68 <https://github.com/wakatime/sublime-wakatime/issues/68>`_
- Upgrade wakatime-cli to v6.0.9.
7.0.9 (2016-09-02)
++++++++++++++++++
- Upgrade wakatime-cli to v6.0.8.
7.0.8 (2016-07-21)
++++++++++++++++++
- Upgrade wakatime-cli to master version to fix debug logging encoding bug.
7.0.7 (2016-07-06)
++++++++++++++++++
- Upgrade wakatime-cli to v6.0.7.
- Handle unknown exceptions from requests library by deleting cached session
object because it could be from a previous conflicting version.
- New hostname setting in config file to set machine hostname. Hostname
argument takes priority over hostname from config file.
- Prevent logging unrelated exception when logging tracebacks.
- Use correct namespace for pygments.lexers.ClassNotFound exception so it is
caught when dependency detection not available for a language.
7.0.6 (2016-06-13)
++++++++++++++++++
- Upgrade wakatime-cli to v6.0.5.
- Upgrade pygments to v2.1.3 for better language coverage.
7.0.5 (2016-06-08)
++++++++++++++++++
- Upgrade wakatime-cli to master version to fix bug in urllib3 package causing
unhandled retry exceptions.
- Prevent tracking git branch with detached head.
7.0.4 (2016-05-21)
++++++++++++++++++
- Upgrade wakatime-cli to v6.0.3.
- Upgrade requests dependency to v2.10.0.
- Support for SOCKS proxies.
7.0.3 (2016-05-16)
++++++++++++++++++
- Upgrade wakatime-cli to v6.0.2.
- Prevent popup on Mac when xcode-tools is not installed.
7.0.2 (2016-04-29)
++++++++++++++++++

View File

@ -1,7 +1,6 @@
BSD 3-Clause License
Copyright (c) 2014 by the respective authors (see AUTHORS file).
All rights reserved.
Copyright (c) 2014 Alan Hamlett.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

View File

@ -1,56 +1,52 @@
sublime-wakatime
================
# sublime-wakatime
Metrics, insights, and time tracking automatically generated from your programming activity.
[![Coding time tracker](https://wakatime.com/badge/github/wakatime/sublime-wakatime.svg)](https://wakatime.com/badge/github/wakatime/sublime-wakatime)
[WakaTime][wakatime] is an open source Sublime Text plugin for metrics, insights, and time tracking automatically generated from your programming activity.
Installation
------------
## Installation
1. Install [Package Control](https://packagecontrol.io/installation).
2. Using [Package Control](https://packagecontrol.io/docs/usage):
2. In Sublime, press `ctrl+shift+p`(Windows, Linux) or `cmd+shift+p`(OS X).
a) Inside Sublime, press `ctrl+shift+p`(Windows, Linux) or `cmd+shift+p`(OS X).
3. Type `install`, then press `enter` with `Package Control: Install Package` selected.
b) Type `install`, then press `enter` with `Package Control: Install Package` selected.
4. Type `wakatime`, then press `enter` with the `WakaTime` plugin selected.
c) Type `wakatime`, then press `enter` with the `WakaTime` plugin selected.
5. Enter your [api key](https://wakatime.com/settings#apikey), then press `enter`.
3. Enter your [api key](https://wakatime.com/settings#apikey), then press `enter`.
6. Use Sublime and your coding activity will be displayed on your [WakaTime dashboard](https://wakatime.com).
4. Use Sublime and your time will be tracked for you automatically.
5. Visit https://wakatime.com/dashboard to see your logged time.
Screen Shots
------------
## Screen Shots
![Project Overview](https://wakatime.com/static/img/ScreenShots/Screen-Shot-2016-03-21.png)
Unresponsive Plugin Warning
---------------------------
## Unresponsive Plugin Warning
In Sublime Text 2, if you get a warning message:
A plugin (WakaTime) may be making Sublime Text unresponsive by taking too long (0.017332s) in its on_modified callback.
To fix this, go to `Preferences > Settings - User` then add the following setting:
To fix this, go to `Preferences Settings - User` then add the following setting:
`"detect_slow_plugins": false`
Troubleshooting
---------------
## Troubleshooting
First, turn on debug mode in your `WakaTime.sublime-settings` file.
![sublime user settings](https://wakatime.com/static/img/ScreenShots/sublime-wakatime-settings-menu.png?v=2)
![sublime user settings](https://wakatime.com/static/img/ScreenShots/sublime-wakatime-settings-menu.png?v=3)
Add the line: `"debug": true`
Then, open your Sublime Console with `View -> Show Console` to see the plugin executing the wakatime cli process when sending a heartbeat. Also, tail your `$HOME/.wakatime.log` file to debug wakatime cli problems.
Then, open your Sublime Console with `View Show Console` ( CTRL + \` ) to see the plugin executing the wakatime cli process when sending a heartbeat.
Also, tail your `$HOME/.wakatime.log` file to debug wakatime cli problems.
For more general troubleshooting information, see [wakatime/wakatime#troubleshooting](https://github.com/wakatime/wakatime#troubleshooting).
The [How to Debug Plugins][how to debug] guide shows how to check when coding activity was last received from your editor using the [User Agents API][user agents api].
For more general troubleshooting info, see the [wakatime-cli Troubleshooting Section][wakatime-cli-help].
[wakatime]: https://wakatime.com/sublime-text
[wakatime-cli-help]: https://github.com/wakatime/wakatime#troubleshooting
[how to debug]: https://wakatime.com/faq#debug-plugins
[user agents api]: https://wakatime.com/developers#user_agents

File diff suppressed because it is too large Load Diff

View File

@ -3,21 +3,38 @@
// This settings file will be overwritten when upgrading.
{
// Your api key from https://wakatime.com/#apikey
// Your api key from https://wakatime.com/api-key
// Set this in your User specific WakaTime.sublime-settings file.
"api_key": "",
// Ignore files; Files (including absolute paths) that match one of these
// POSIX regular expressions will not be logged.
"ignore": ["^/tmp/", "^/etc/", "^/var/", "COMMIT_EDITMSG$", "PULLREQ_EDITMSG$", "MERGE_MSG$", "TAG_EDITMSG$"],
// Debug mode. Set to true for verbose logging. Defaults to false.
"debug": false,
// Status bar message. Set to false to hide status bar message.
// Proxy with format https://user:pass@host:port or socks5://user:pass@host:port or domain\\user:pass.
"proxy": "",
// Ignore files; Files (including absolute paths) that match one of these
// POSIX regular expressions will not be logged.
"ignore": ["^/tmp/", "^/etc/", "^/var/(?!www/).*", "COMMIT_EDITMSG$", "PULLREQ_EDITMSG$", "MERGE_MSG$", "TAG_EDITMSG$"],
// Include files; Files (including absolute paths) that match one of these
// POSIX regular expressions will bypass your ignore setting.
"include": [".*"],
// Status bar for surfacing errors and displaying today's coding time. Set
// to false to hide. Defaults to true.
"status_bar_enabled": true,
// Show today's coding activity in WakaTime status bar item.
// Defaults to true.
"status_bar_message": true,
// Status bar message format.
"status_bar_message_fmt": "WakaTime {status} %I:%M %p"
"status_bar_coding_activity": true,
// Obfuscate file paths when sending to API. Your dashboard will no longer display coding activity per file.
"hidefilenames": false,
// Python binary location. Uses python from your PATH by default.
"python_binary": "",
// Use standalone compiled Python wakatime-cli (Will not work on ARM Macs)
"standalone": false
}

View File

@ -1,9 +0,0 @@
__title__ = 'wakatime'
__description__ = 'Common interface to the WakaTime api.'
__url__ = 'https://github.com/wakatime/wakatime'
__version_info__ = ('6', '0', '1')
__version__ = '.'.join(__version_info__)
__author__ = 'Alan Hamlett'
__author_email__ = 'alan@wakatime.com'
__license__ = 'BSD'
__copyright__ = 'Copyright 2016 Alan Hamlett'

View File

@ -1,17 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime
~~~~~~~~
Common interface to the WakaTime api.
http://wakatime.com
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
__all__ = ['main']
from .main import execute

View File

@ -1,35 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.cli
~~~~~~~~~~~~
Command-line entry point.
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
# get path to local wakatime package
package_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# add local wakatime package to sys.path
sys.path.insert(0, package_folder)
# import local wakatime package
try:
import wakatime
except (TypeError, ImportError):
# on Windows, non-ASCII characters in import path can be fixed using
# the script path from sys.argv[0].
# More info at https://github.com/wakatime/wakatime/issues/32
package_folder = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
sys.path.insert(0, package_folder)
import wakatime
if __name__ == '__main__':
sys.exit(wakatime.execute(sys.argv[1:]))

View File

@ -1,93 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.compat
~~~~~~~~~~~~~~~
For working with Python2 and Python3.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import codecs
import sys
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
if is_py2: # pragma: nocover
def u(text):
if text is None:
return None
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())
except:
try:
return unicode(text)
except:
return text
open = codecs.open
basestring = basestring
elif is_py3: # pragma: nocover
def u(text):
if text is None:
return None
if isinstance(text, bytes):
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())
except:
pass
try:
return str(text)
except:
return text
open = open
basestring = (str, bytes)
try:
from importlib import import_module
except ImportError: # pragma: nocover
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import.
It specifies the package to use as the anchor point from which to
resolve the relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' "
+ "argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]

View File

@ -1,18 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.constants
~~~~~~~~~~~~~~~~~~
Constant variable definitions.
:copyright: (c) 2016 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
SUCCESS = 0
API_ERROR = 102
CONFIG_FILE_PARSE_ERROR = 103
AUTH_ERROR = 104
UNKNOWN_ERROR = 105
MALFORMED_HEARTBEAT_ERROR = 106

View File

@ -1,130 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies
~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from a source code file.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import re
import sys
import traceback
from ..compat import u, open, import_module
from ..exceptions import NotYetImplemented
log = logging.getLogger('WakaTime')
class TokenParser(object):
"""The base class for all dependency parsers. To add support for your
language, inherit from this class and implement the :meth:`parse` method
to return a list of dependency strings.
"""
exclude = []
def __init__(self, source_file, lexer=None):
self._tokens = None
self.dependencies = []
self.source_file = source_file
self.lexer = lexer
self.exclude = [re.compile(x, re.IGNORECASE) for x in self.exclude]
@property
def tokens(self):
if self._tokens is None:
self._tokens = self._extract_tokens()
return self._tokens
def parse(self, tokens=[]):
""" Should return a list of dependencies.
"""
raise NotYetImplemented()
def append(self, dep, truncate=False, separator=None, truncate_to=None,
strip_whitespace=True):
self._save_dependency(
dep,
truncate=truncate,
truncate_to=truncate_to,
separator=separator,
strip_whitespace=strip_whitespace,
)
def partial(self, token):
return u(token).split('.')[-1]
def _extract_tokens(self):
if self.lexer:
try:
with open(self.source_file, 'r', encoding='utf-8') as fh:
return self.lexer.get_tokens_unprocessed(fh.read(512000))
except:
pass
try:
with open(self.source_file, 'r', encoding=sys.getfilesystemencoding()) as fh:
return self.lexer.get_tokens_unprocessed(fh.read(512000))
except:
pass
return []
def _save_dependency(self, dep, truncate=False, separator=None,
truncate_to=None, strip_whitespace=True):
if truncate:
if separator is None:
separator = u('.')
separator = u(separator)
dep = dep.split(separator)
if truncate_to is None or truncate_to < 1:
truncate_to = 1
if truncate_to > len(dep):
truncate_to = len(dep)
dep = dep[0] if len(dep) == 1 else separator.join(dep[:truncate_to])
if strip_whitespace:
dep = dep.strip()
if dep and (not separator or not dep.startswith(separator)):
should_exclude = False
for compiled in self.exclude:
if compiled.search(dep):
should_exclude = True
break
if not should_exclude:
self.dependencies.append(dep)
class DependencyParser(object):
source_file = None
lexer = None
parser = None
def __init__(self, source_file, lexer):
self.source_file = source_file
self.lexer = lexer
if self.lexer:
module_name = self.lexer.__module__.rsplit('.', 1)[-1]
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
else:
module_name = 'unknown'
class_name = 'UnknownParser'
try:
module = import_module('.%s' % module_name, package=__package__)
try:
self.parser = getattr(module, class_name)
except AttributeError:
log.debug('Module {0} is missing class {1}'.format(module.__name__, class_name))
except ImportError:
log.debug(traceback.format_exc())
def parse(self):
if self.parser:
plugin = self.parser(self.source_file, lexer=self.lexer)
dependencies = plugin.parse()
return list(set(dependencies))
return []

View File

@ -1,68 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.c_cpp
~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from C++ code.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class CppParser(TokenParser):
exclude = [
r'^stdio\.h$',
r'^stdlib\.h$',
r'^string\.h$',
r'^time\.h$',
]
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Preproc':
self._process_preproc(token, content)
else:
self._process_other(token, content)
def _process_preproc(self, token, content):
if content.strip().startswith('include ') or content.strip().startswith("include\t"):
content = content.replace('include', '', 1).strip().strip('"').strip('<').strip('>').strip()
self.append(content)
def _process_other(self, token, content):
pass
class CParser(TokenParser):
exclude = [
r'^stdio\.h$',
r'^stdlib\.h$',
r'^string\.h$',
r'^time\.h$',
]
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Preproc':
self._process_preproc(token, content)
else:
self._process_other(token, content)
def _process_preproc(self, token, content):
if content.strip().startswith('include ') or content.strip().startswith("include\t"):
content = content.replace('include', '', 1).strip().strip('"').strip('<').strip('>').strip()
self.append(content)
def _process_other(self, token, content):
pass

View File

@ -1,64 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.data
~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from data files.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import os
from . import TokenParser
from ..compat import u
FILES = {
'bower.json': {'exact': True, 'dependency': 'bower'},
'component.json': {'exact': True, 'dependency': 'bower'},
'package.json': {'exact': True, 'dependency': 'npm'},
}
class JsonParser(TokenParser):
state = None
level = 0
def parse(self):
self._process_file_name(os.path.basename(self.source_file))
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_file_name(self, file_name):
for key, value in FILES.items():
found = (key == file_name) if value.get('exact') else (key.lower() in file_name.lower())
if found:
self.append(value['dependency'])
def _process_token(self, token, content):
if u(token) == 'Token.Name.Tag':
self._process_tag(token, content)
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
self._process_literal_string(token, content)
elif u(token) == 'Token.Punctuation':
self._process_punctuation(token, content)
def _process_tag(self, token, content):
if content.strip('"').strip("'") == 'dependencies' or content.strip('"').strip("'") == 'devDependencies':
self.state = 'dependencies'
elif self.state == 'dependencies' and self.level == 2:
self.append(content.strip('"').strip("'"))
def _process_literal_string(self, token, content):
pass
def _process_punctuation(self, token, content):
if content == '{':
self.level += 1
elif content == '}':
self.level -= 1
if self.state is not None and self.level <= 1:
self.state = None

View File

@ -1,64 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.dotnet
~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from .NET code.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
from ..compat import u
class CSharpParser(TokenParser):
exclude = [
r'^system$',
r'^microsoft$',
]
state = None
buffer = u('')
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Keyword':
self._process_keyword(token, content)
if self.partial(token) == 'Namespace' or self.partial(token) == 'Name':
self._process_namespace(token, content)
elif self.partial(token) == 'Punctuation':
self._process_punctuation(token, content)
else:
self._process_other(token, content)
def _process_keyword(self, token, content):
if content == 'using':
self.state = 'import'
self.buffer = u('')
def _process_namespace(self, token, content):
if self.state == 'import':
if u(content) != u('import') and u(content) != u('package') and u(content) != u('namespace') and u(content) != u('static'):
if u(content) == u(';'): # pragma: nocover
self._process_punctuation(token, content)
else:
self.buffer += u(content)
def _process_punctuation(self, token, content):
if self.state == 'import':
if u(content) == u(';'):
self.append(self.buffer, truncate=True)
self.buffer = u('')
self.state = None
elif u(content) == u('='):
self.buffer = u('')
else:
self.buffer += u(content)
def _process_other(self, token, content):
pass

View File

@ -1,77 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.go
~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Go code.
:copyright: (c) 2016 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class GoParser(TokenParser):
state = None
parens = 0
aliases = 0
exclude = [
r'^"fmt"$',
]
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Namespace':
self._process_namespace(token, content)
elif self.partial(token) == 'Punctuation':
self._process_punctuation(token, content)
elif self.partial(token) == 'String':
self._process_string(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
elif self.partial(token) == 'Other':
self._process_other(token, content)
else:
self._process_misc(token, content)
def _process_namespace(self, token, content):
self.state = content
self.parens = 0
self.aliases = 0
def _process_string(self, token, content):
if self.state == 'import':
self.append(content, truncate=False)
def _process_punctuation(self, token, content):
if content == '(':
self.parens += 1
elif content == ')':
self.parens -= 1
elif content == '.':
self.aliases += 1
else:
self.state = None
def _process_text(self, token, content):
if self.state == 'import':
if content == "\n" and self.parens <= 0:
self.state = None
self.parens = 0
self.aliases = 0
else:
self.state = None
def _process_other(self, token, content):
if self.state == 'import':
self.aliases += 1
else:
self.state = None
def _process_misc(self, token, content):
self.state = None

View File

@ -1,96 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.java
~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Java code.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
from ..compat import u
class JavaParser(TokenParser):
exclude = [
r'^java\.',
r'^javax\.',
r'^import$',
r'^package$',
r'^namespace$',
r'^static$',
]
state = None
buffer = u('')
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Namespace':
self._process_namespace(token, content)
if self.partial(token) == 'Name':
self._process_name(token, content)
elif self.partial(token) == 'Attribute':
self._process_attribute(token, content)
elif self.partial(token) == 'Operator':
self._process_operator(token, content)
else:
self._process_other(token, content)
def _process_namespace(self, token, content):
if u(content) == u('import'):
self.state = 'import'
elif self.state == 'import':
keywords = [
u('package'),
u('namespace'),
u('static'),
]
if u(content) in keywords:
return
self.buffer = u('{0}{1}').format(self.buffer, u(content))
elif self.state == 'import-finished':
content = content.split(u('.'))
if len(content) == 1:
self.append(content[0])
elif len(content) > 1:
if len(content[0]) == 3:
content = content[1:]
if content[-1] == u('*'):
content = content[:len(content) - 1]
if len(content) == 1:
self.append(content[0])
elif len(content) > 1:
self.append(u('.').join(content[:2]))
self.state = None
def _process_name(self, token, content):
if self.state == 'import':
self.buffer = u('{0}{1}').format(self.buffer, u(content))
def _process_attribute(self, token, content):
if self.state == 'import':
self.buffer = u('{0}{1}').format(self.buffer, u(content))
def _process_operator(self, token, content):
if u(content) == u(';'):
self.state = 'import-finished'
self._process_namespace(token, self.buffer)
self.state = None
self.buffer = u('')
elif self.state == 'import':
self.buffer = u('{0}{1}').format(self.buffer, u(content))
def _process_other(self, token, content):
pass

View File

@ -1,85 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.php
~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from PHP code.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
from ..compat import u
class PhpParser(TokenParser):
state = None
parens = 0
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Keyword':
self._process_keyword(token, content)
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
self._process_literal_string(token, content)
elif u(token) == 'Token.Name.Other':
self._process_name(token, content)
elif u(token) == 'Token.Name.Function':
self._process_function(token, content)
elif self.partial(token) == 'Punctuation':
self._process_punctuation(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
else:
self._process_other(token, content)
def _process_name(self, token, content):
if self.state == 'use':
self.append(content, truncate=True, separator=u("\\"))
def _process_function(self, token, content):
if self.state == 'use function':
self.append(content, truncate=True, separator=u("\\"))
self.state = 'use'
def _process_keyword(self, token, content):
if content == 'include' or content == 'include_once' or content == 'require' or content == 'require_once':
self.state = 'include'
elif content == 'use':
self.state = 'use'
elif content == 'as':
self.state = 'as'
elif self.state == 'use' and content == 'function':
self.state = 'use function'
else:
self.state = None
def _process_literal_string(self, token, content):
if self.state == 'include':
if content != '"' and content != "'":
content = content.strip()
if u(token) == 'Token.Literal.String.Double':
content = u("'{0}'").format(content)
self.append(content)
self.state = None
def _process_punctuation(self, token, content):
if content == '(':
self.parens += 1
elif content == ')':
self.parens -= 1
elif (self.state == 'use' or self.state == 'as') and content == ',':
self.state = 'use'
else:
self.state = None
def _process_text(self, token, content):
pass
def _process_other(self, token, content):
self.state = None

View File

@ -1,85 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.python
~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Python code.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class PythonParser(TokenParser):
state = None
parens = 0
nonpackage = False
exclude = [
r'^os$',
r'^sys\.',
]
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Namespace':
self._process_namespace(token, content)
elif self.partial(token) == 'Operator':
self._process_operator(token, content)
elif self.partial(token) == 'Punctuation':
self._process_punctuation(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
else:
self._process_other(token, content)
def _process_namespace(self, token, content):
if self.state is None:
self.state = content
else:
if content == 'as':
self.nonpackage = True
else:
self._process_import(token, content)
def _process_operator(self, token, content):
if self.state is not None:
if content == '.':
self.nonpackage = True
def _process_punctuation(self, token, content):
if content == '(':
self.parens += 1
elif content == ')':
self.parens -= 1
self.nonpackage = False
def _process_text(self, token, content):
if self.state is not None:
if content == "\n" and self.parens == 0:
self.state = None
self.nonpackage = False
def _process_other(self, token, content):
pass
def _process_import(self, token, content):
if not self.nonpackage:
if self.state == 'from':
self.append(content, truncate=True, truncate_to=1)
self.state = 'from-2'
elif self.state == 'from-2' and content != 'import':
self.append(content, truncate=True, truncate_to=1)
elif self.state == 'import':
self.append(content, truncate=True, truncate_to=1)
self.state = 'import-2'
elif self.state == 'import-2':
self.append(content, truncate=True, truncate_to=1)
else:
self.state = None
self.nonpackage = False

View File

@ -1,220 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.templates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Templates.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
from ..compat import u
""" If these keywords are found in the source file, treat them as a dependency.
Must be lower-case strings.
"""
KEYWORDS = [
'_',
'$',
'angular',
'assert', # probably mocha
'backbone',
'batman',
'c3',
'can',
'casper',
'chai',
'chaplin',
'd3',
'define', # probably require
'describe', # mocha or jasmine
'eco',
'ember',
'espresso',
'expect', # probably jasmine
'exports', # probably npm
'express',
'gulp',
'handlebars',
'highcharts',
'jasmine',
'jquery',
'jstz',
'ko', # probably knockout
'm', # probably mithril
'marionette',
'meteor',
'moment',
'monitorio',
'mustache',
'phantom',
'pickadate',
'pikaday',
'qunit',
'react',
'reactive',
'require', # probably the commonjs spec
'ripple',
'rivets',
'socketio',
'spine',
'thorax',
'underscore',
'vue',
'way',
'zombie',
]
class LassoJavascriptParser(TokenParser):
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if u(token) == 'Token.Name.Other':
self._process_name(token, content)
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
self._process_literal_string(token, content)
def _process_name(self, token, content):
if content.lower() in KEYWORDS:
self.append(content.lower())
def _process_literal_string(self, token, content):
if 'famous/core/' in content.strip('"').strip("'"):
self.append('famous')
class HtmlDjangoParser(TokenParser):
tags = []
getting_attrs = False
current_attr = None
current_attr_value = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if u(token) == 'Token.Name.Tag':
self._process_tag(token, content)
elif u(token) == 'Token.Literal.String':
self._process_string(token, content)
elif u(token) == 'Token.Name.Attribute':
self._process_attribute(token, content)
@property
def current_tag(self):
return None if len(self.tags) == 0 else self.tags[0]
def _process_tag(self, token, content):
if content.startswith('</') or content.startswith('/'):
try:
self.tags.pop(0)
except IndexError:
# ignore errors from malformed markup
pass
self.getting_attrs = False
elif content.startswith('<'):
self.tags.insert(0, content.replace('<', '', 1).strip().lower())
self.getting_attrs = True
elif content.startswith('>'):
self.getting_attrs = False
self.current_attr = None
def _process_attribute(self, token, content):
if self.getting_attrs:
self.current_attr = content.lower().strip('=')
else:
self.current_attr = None
self.current_attr_value = None
def _process_string(self, token, content):
if self.getting_attrs and self.current_attr is not None:
if content.endswith('"') or content.endswith("'"):
if self.current_attr_value is not None:
self.current_attr_value += content
if self.current_tag == 'script' and self.current_attr == 'src':
self.append(self.current_attr_value)
self.current_attr = None
self.current_attr_value = None
else:
if len(content) == 1:
self.current_attr_value = content
else:
if self.current_tag == 'script' and self.current_attr == 'src':
self.append(content)
self.current_attr = None
self.current_attr_value = None
elif content.startswith('"') or content.startswith("'"):
if self.current_attr_value is None:
self.current_attr_value = content
else:
self.current_attr_value += content
class VelocityHtmlParser(HtmlDjangoParser):
pass
class MyghtyHtmlParser(HtmlDjangoParser):
pass
class MasonParser(HtmlDjangoParser):
pass
class MakoHtmlParser(HtmlDjangoParser):
pass
class CheetahHtmlParser(HtmlDjangoParser):
pass
class HtmlGenshiParser(HtmlDjangoParser):
pass
class RhtmlParser(HtmlDjangoParser):
pass
class HtmlPhpParser(HtmlDjangoParser):
pass
class HtmlSmartyParser(HtmlDjangoParser):
pass
class EvoqueHtmlParser(HtmlDjangoParser):
pass
class ColdfusionHtmlParser(HtmlDjangoParser):
pass
class LassoHtmlParser(HtmlDjangoParser):
pass
class HandlebarsHtmlParser(HtmlDjangoParser):
pass
class YamlJinjaParser(HtmlDjangoParser):
pass
class TwigHtmlParser(HtmlDjangoParser):
pass

View File

@ -1,33 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.unknown
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from files of unknown language.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import os
from . import TokenParser
FILES = {
'bower': {'exact': False, 'dependency': 'bower'},
'grunt': {'exact': False, 'dependency': 'grunt'},
}
class UnknownParser(TokenParser):
def parse(self):
self._process_file_name(os.path.basename(self.source_file))
return self.dependencies
def _process_file_name(self, file_name):
for key, value in FILES.items():
found = (key == file_name) if value.get('exact') else (key.lower() in file_name.lower())
if found:
self.append(value['dependency'])

View File

@ -1,14 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.exceptions
~~~~~~~~~~~~~~~~~~~
Custom exceptions.
:copyright: (c) 2015 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
class NotYetImplemented(Exception):
"""This method needs to be implemented."""

View File

@ -1,80 +0,0 @@
{
"ActionScript": "ActionScript",
"ApacheConf": "ApacheConf",
"AppleScript": "AppleScript",
"ASP": "ASP",
"Assembly": "Assembly",
"Awk": "Awk",
"Bash": "Bash",
"Basic": "Basic",
"BrightScript": "BrightScript",
"C": "C",
"C#": "C#",
"C++": "C++",
"Clojure": "Clojure",
"Cocoa": "Cocoa",
"CoffeeScript": "CoffeeScript",
"ColdFusion": "ColdFusion",
"Common Lisp": "Common Lisp",
"CSHTML": "CSHTML",
"CSS": "CSS",
"Dart": "Dart",
"Delphi": "Delphi",
"Elixir": "Elixir",
"Elm": "Elm",
"Emacs Lisp": "Emacs Lisp",
"Erlang": "Erlang",
"F#": "F#",
"Fortran": "Fortran",
"Go": "Go",
"Gous": "Gosu",
"Groovy": "Groovy",
"Haml": "Haml",
"HaXe": "HaXe",
"Haskell": "Haskell",
"HTML": "HTML",
"INI": "INI",
"Jade": "Jade",
"Java": "Java",
"JavaScript": "JavaScript",
"JSON": "JSON",
"JSX": "JSX",
"Kotlin": "Kotlin",
"LESS": "LESS",
"Lua": "Lua",
"Markdown": "Markdown",
"Matlab": "Matlab",
"Mustache": "Mustache",
"OCaml": "OCaml",
"Objective-C": "Objective-C",
"Objective-C++": "Objective-C++",
"Objective-J": "Objective-J",
"Perl": "Perl",
"PHP": "PHP",
"PowerShell": "PowerShell",
"Prolog": "Prolog",
"Puppet": "Puppet",
"Python": "Python",
"R": "R",
"reStructuredText": "reStructuredText",
"Ruby": "Ruby",
"Rust": "Rust",
"Sass": "Sass",
"Scala": "Scala",
"Scheme": "Scheme",
"SCSS": "SCSS",
"Shell": "Shell",
"Slim": "Slim",
"Smalltalk": "Smalltalk",
"SQL": "SQL",
"Swift": "Swift",
"Text": "Text",
"Turing": "Turing",
"Twig": "Twig",
"TypeScript": "TypeScript",
"VB.net": "VB.net",
"VimL": "VimL",
"XAML": "XAML",
"XML": "XML",
"YAML": "YAML"
}

View File

@ -1,531 +0,0 @@
{
"a2ps": null,
"a65": "Assembly",
"aap": null,
"abap": null,
"abaqus": null,
"abc": null,
"abel": null,
"acedb": null,
"ada": null,
"aflex": null,
"ahdl": null,
"alsaconf": null,
"amiga": null,
"aml": null,
"ampl": null,
"ant": null,
"antlr": null,
"apache": null,
"apachestyle": null,
"arch": null,
"art": null,
"asm": "Assembly",
"asm68k": "Assembly",
"asmh8300": "Assembly",
"asn": null,
"aspperl": null,
"aspvbs": null,
"asterisk": null,
"asteriskvm": null,
"atlas": null,
"autohotkey": null,
"autoit": null,
"automake": null,
"ave": null,
"awk": null,
"ayacc": null,
"b": null,
"baan": null,
"basic": "Basic",
"bc": null,
"bdf": null,
"bib": null,
"bindzone": null,
"blank": null,
"bst": null,
"btm": null,
"bzr": null,
"c": "C",
"cabal": null,
"calendar": null,
"catalog": null,
"cdl": null,
"cdrdaoconf": null,
"cdrtoc": null,
"cf": null,
"cfg": null,
"ch": null,
"chaiscript": null,
"change": null,
"changelog": null,
"chaskell": null,
"cheetah": null,
"chill": null,
"chordpro": null,
"cl": null,
"clean": null,
"clipper": null,
"cmake": null,
"cmusrc": null,
"cobol": null,
"coco": null,
"conaryrecipe": null,
"conf": null,
"config": null,
"context": null,
"cpp": "C++",
"crm": null,
"crontab": "Crontab",
"cs": "C#",
"csc": null,
"csh": null,
"csp": null,
"css": null,
"cterm": null,
"ctrlh": null,
"cucumber": null,
"cuda": null,
"cupl": null,
"cuplsim": null,
"cvs": null,
"cvsrc": null,
"cweb": null,
"cynlib": null,
"cynpp": null,
"d": null,
"datascript": null,
"dcd": null,
"dcl": null,
"debchangelog": null,
"debcontrol": null,
"debsources": null,
"def": null,
"denyhosts": null,
"desc": null,
"desktop": null,
"dictconf": null,
"dictdconf": null,
"diff": null,
"dircolors": null,
"diva": null,
"django": null,
"dns": null,
"docbk": null,
"docbksgml": null,
"docbkxml": null,
"dosbatch": null,
"dosini": null,
"dot": null,
"doxygen": null,
"dracula": null,
"dsl": null,
"dtd": null,
"dtml": null,
"dtrace": null,
"dylan": null,
"dylanintr": null,
"dylanlid": null,
"ecd": null,
"edif": null,
"eiffel": null,
"elf": null,
"elinks": null,
"elmfilt": null,
"erlang": null,
"eruby": null,
"esmtprc": null,
"esqlc": null,
"esterel": null,
"eterm": null,
"eviews": null,
"exim": null,
"expect": null,
"exports": null,
"fan": null,
"fasm": null,
"fdcc": null,
"fetchmail": null,
"fgl": null,
"flexwiki": null,
"focexec": null,
"form": null,
"forth": null,
"fortran": null,
"foxpro": null,
"framescript": null,
"freebasic": null,
"fstab": null,
"fvwm": null,
"fvwm2m4": null,
"gdb": null,
"gdmo": null,
"gedcom": null,
"git": null,
"gitcommit": null,
"gitconfig": null,
"gitrebase": null,
"gitsendemail": null,
"gkrellmrc": null,
"gnuplot": null,
"gp": null,
"gpg": null,
"grads": null,
"gretl": null,
"groff": null,
"groovy": null,
"group": null,
"grub": null,
"gsp": null,
"gtkrc": null,
"haml": "Haml",
"hamster": null,
"haskell": "Haskell",
"haste": null,
"hastepreproc": null,
"hb": null,
"help": null,
"hercules": null,
"hex": null,
"hog": null,
"hostconf": null,
"html": "HTML",
"htmlcheetah": "HTML",
"htmldjango": "HTML",
"htmlm4": "HTML",
"htmlos": null,
"ia64": null,
"ibasic": null,
"icemenu": null,
"icon": null,
"idl": null,
"idlang": null,
"indent": null,
"inform": null,
"initex": null,
"initng": null,
"inittab": null,
"ipfilter": null,
"ishd": null,
"iss": null,
"ist": null,
"jal": null,
"jam": null,
"jargon": null,
"java": "Java",
"javacc": null,
"javascript": "JavaScript",
"jess": null,
"jgraph": null,
"jproperties": null,
"jsp": null,
"kconfig": null,
"kix": null,
"kscript": null,
"kwt": null,
"lace": null,
"latte": null,
"ld": null,
"ldapconf": null,
"ldif": null,
"lex": null,
"lftp": null,
"lhaskell": "Haskell",
"libao": null,
"lifelines": null,
"lilo": null,
"limits": null,
"liquid": null,
"lisp": null,
"lite": null,
"litestep": null,
"loginaccess": null,
"logindefs": null,
"logtalk": null,
"lotos": null,
"lout": null,
"lpc": null,
"lprolog": null,
"lscript": null,
"lsl": null,
"lss": null,
"lua": null,
"lynx": null,
"m4": null,
"mail": null,
"mailaliases": null,
"mailcap": null,
"make": null,
"man": null,
"manconf": null,
"manual": null,
"maple": null,
"markdown": "Markdown",
"masm": null,
"mason": null,
"master": null,
"matlab": null,
"maxima": null,
"mel": null,
"messages": null,
"mf": null,
"mgl": null,
"mgp": null,
"mib": null,
"mma": null,
"mmix": null,
"mmp": null,
"modconf": null,
"model": null,
"modsim3": null,
"modula2": null,
"modula3": null,
"monk": null,
"moo": null,
"mp": null,
"mplayerconf": null,
"mrxvtrc": null,
"msidl": null,
"msmessages": null,
"msql": null,
"mupad": null,
"mush": null,
"muttrc": null,
"mysql": null,
"named": null,
"nanorc": null,
"nasm": null,
"nastran": null,
"natural": null,
"ncf": null,
"netrc": null,
"netrw": null,
"nosyntax": null,
"nqc": null,
"nroff": null,
"nsis": null,
"obj": null,
"objc": "Objective-C",
"objcpp": "Objective-C++",
"ocaml": "OCaml",
"occam": null,
"omnimark": null,
"openroad": null,
"opl": null,
"ora": null,
"pamconf": null,
"papp": null,
"pascal": null,
"passwd": null,
"pcap": null,
"pccts": null,
"pdf": null,
"perl": "Perl",
"perl6": "Perl",
"pf": null,
"pfmain": null,
"php": "PHP",
"phtml": "PHP",
"pic": null,
"pike": null,
"pilrc": null,
"pine": null,
"pinfo": null,
"plaintex": null,
"plm": null,
"plp": null,
"plsql": null,
"po": null,
"pod": null,
"postscr": null,
"pov": null,
"povini": null,
"ppd": null,
"ppwiz": null,
"prescribe": null,
"privoxy": null,
"procmail": null,
"progress": null,
"prolog": "Prolog",
"promela": null,
"protocols": null,
"psf": null,
"ptcap": null,
"purifylog": null,
"pyrex": null,
"python": "Python",
"qf": null,
"quake": null,
"r": "R",
"racc": null,
"radiance": null,
"ratpoison": null,
"rc": null,
"rcs": null,
"rcslog": null,
"readline": null,
"rebol": null,
"registry": null,
"remind": null,
"resolv": null,
"reva": null,
"rexx": null,
"rhelp": null,
"rib": null,
"rnc": null,
"rnoweb": null,
"robots": null,
"rpcgen": null,
"rpl": null,
"rst": null,
"rtf": null,
"ruby": "Ruby",
"samba": null,
"sas": null,
"sass": "Sass",
"sather": null,
"scheme": "Scheme",
"scilab": null,
"screen": null,
"scss": "SCSS",
"sd": null,
"sdc": null,
"sdl": null,
"sed": null,
"sendpr": null,
"sensors": null,
"services": null,
"setserial": null,
"sgml": null,
"sgmldecl": null,
"sgmllnx": null,
"sh": null,
"sicad": null,
"sieve": null,
"simula": null,
"sinda": null,
"sindacmp": null,
"sindaout": null,
"sisu": null,
"skill": "SKILL",
"sl": null,
"slang": null,
"slice": null,
"slpconf": null,
"slpreg": null,
"slpspi": null,
"slrnrc": null,
"slrnsc": null,
"sm": null,
"smarty": null,
"smcl": null,
"smil": null,
"smith": null,
"sml": null,
"snnsnet": null,
"snnspat": null,
"snnsres": null,
"snobol4": null,
"spec": null,
"specman": null,
"spice": null,
"splint": null,
"spup": null,
"spyce": null,
"sql": null,
"sqlanywhere": null,
"sqlforms": null,
"sqlinformix": null,
"sqlj": null,
"sqloracle": null,
"sqr": null,
"squid": null,
"sshconfig": null,
"sshdconfig": null,
"st": null,
"stata": null,
"stp": null,
"strace": null,
"sudoers": null,
"svg": null,
"svn": null,
"syncolor": null,
"synload": null,
"syntax": null,
"sysctl": null,
"tads": null,
"tags": null,
"tak": null,
"takcmp": null,
"takout": null,
"tar": null,
"taskdata": null,
"taskedit": null,
"tasm": null,
"tcl": null,
"tcsh": null,
"terminfo": null,
"tex": null,
"texinfo": null,
"texmf": null,
"tf": null,
"tidy": null,
"tilde": null,
"tli": null,
"tpp": null,
"trasys": null,
"trustees": null,
"tsalt": null,
"tsscl": null,
"tssgm": null,
"tssop": null,
"uc": null,
"udevconf": null,
"udevperm": null,
"udevrules": null,
"uil": null,
"updatedb": null,
"valgrind": null,
"vb": "VB.net",
"vera": null,
"verilog": null,
"verilogams": null,
"vgrindefs": null,
"vhdl": null,
"vim": "VimL",
"viminfo": null,
"virata": null,
"vmasm": null,
"voscm": null,
"vrml": null,
"vsejcl": null,
"wdiff": null,
"web": null,
"webmacro": null,
"wget": null,
"winbatch": null,
"wml": null,
"wsh": null,
"wsml": null,
"wvdial": null,
"xbl": null,
"xdefaults": null,
"xf86conf": null,
"xhtml": "HTML",
"xinetd": null,
"xkb": null,
"xmath": null,
"xml": "XML",
"xmodmap": null,
"xpm": null,
"xpm2": null,
"xquery": null,
"xs": null,
"xsd": null,
"xslt": null,
"xxd": null,
"yacc": null,
"yaml": "YAML",
"z8a": null,
"zsh": null
}

View File

@ -1,136 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.logger
~~~~~~~~~~~~~~~
Provides the configured logger for writing JSON to the log file.
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import traceback
from .compat import u
from .packages.requests.packages import urllib3
try:
from collections import OrderedDict # pragma: nocover
except ImportError: # pragma: nocover
from .packages.ordereddict import OrderedDict
try:
from .packages import simplejson as json # pragma: nocover
except (ImportError, SyntaxError): # pragma: nocover
import json
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes): # pragma: nocover
obj = u(obj)
return json.dumps(obj)
try: # pragma: nocover
encoded = super(CustomEncoder, self).default(obj)
except UnicodeDecodeError: # pragma: nocover
obj = u(obj)
encoded = super(CustomEncoder, self).default(obj)
return encoded
class JsonFormatter(logging.Formatter):
def setup(self, timestamp, is_write, entity, version, plugin, verbose,
warnings=False):
self.timestamp = timestamp
self.is_write = is_write
self.entity = entity
self.version = version
self.plugin = plugin
self.verbose = verbose
self.warnings = warnings
def format(self, record, *args):
data = OrderedDict([
('now', self.formatTime(record, self.datefmt)),
])
data['version'] = self.version
data['plugin'] = self.plugin
data['time'] = self.timestamp
if self.verbose:
data['caller'] = record.pathname
data['lineno'] = record.lineno
data['is_write'] = self.is_write
data['file'] = self.entity
if not self.is_write:
del data['is_write']
data['level'] = record.levelname
data['message'] = record.getMessage() if self.warnings else record.msg
if not self.plugin:
del data['plugin']
return CustomEncoder().encode(data)
def traceback_formatter(*args, **kwargs):
if 'level' in kwargs and (kwargs['level'].lower() == 'warn' or kwargs['level'].lower() == 'warning'):
logging.getLogger('WakaTime').warning(traceback.format_exc())
elif 'level' in kwargs and kwargs['level'].lower() == 'info':
logging.getLogger('WakaTime').info(traceback.format_exc())
elif 'level' in kwargs and kwargs['level'].lower() == 'debug':
logging.getLogger('WakaTime').debug(traceback.format_exc())
else:
logging.getLogger('WakaTime').error(traceback.format_exc())
def set_log_level(logger, args):
level = logging.WARN
if args.verbose:
level = logging.DEBUG
logger.setLevel(level)
def setup_logging(args, version):
urllib3.disable_warnings()
logger = logging.getLogger('WakaTime')
for handler in logger.handlers:
logger.removeHandler(handler)
set_log_level(logger, args)
logfile = args.logfile
if not logfile:
logfile = '~/.wakatime.log'
handler = logging.FileHandler(os.path.expanduser(logfile))
formatter = JsonFormatter(datefmt='%Y/%m/%d %H:%M:%S %z')
formatter.setup(
timestamp=args.timestamp,
is_write=args.is_write,
entity=args.entity,
version=version,
plugin=args.plugin,
verbose=args.verbose,
)
handler.setFormatter(formatter)
logger.addHandler(handler)
# add custom traceback logging method
logger.traceback = traceback_formatter
warnings_formatter = JsonFormatter(datefmt='%Y/%m/%d %H:%M:%S %z')
warnings_formatter.setup(
timestamp=args.timestamp,
is_write=args.is_write,
entity=args.entity,
version=version,
plugin=args.plugin,
verbose=args.verbose,
warnings=True,
)
warnings_handler = logging.FileHandler(os.path.expanduser(logfile))
warnings_handler.setFormatter(warnings_formatter)
logging.getLogger('py.warnings').addHandler(warnings_handler)
try:
logging.captureWarnings(True)
except AttributeError: # pragma: nocover
pass # Python >= 2.7 is needed to capture warnings
return logger

View File

@ -1,533 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.main
~~~~~~~~~~~~~
wakatime module entry point.
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import base64
import logging
import os
import platform
import re
import sys
import time
import traceback
import socket
try:
import ConfigParser as configparser
except ImportError: # pragma: nocover
import configparser
pwd = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(pwd))
sys.path.insert(0, os.path.join(pwd, 'packages'))
from .__about__ import __version__
from .compat import u, open, is_py3
from .constants import (
API_ERROR,
AUTH_ERROR,
CONFIG_FILE_PARSE_ERROR,
SUCCESS,
UNKNOWN_ERROR,
MALFORMED_HEARTBEAT_ERROR,
)
from .logger import setup_logging
from .offlinequeue import Queue
from .packages import argparse
from .packages import requests
from .packages.requests.exceptions import RequestException
from .project import get_project_info
from .session_cache import SessionCache
from .stats import get_file_stats
try:
from .packages import simplejson as json # pragma: nocover
except (ImportError, SyntaxError): # pragma: nocover
import json
from .packages import tzlocal
log = logging.getLogger('WakaTime')
class FileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
try:
if os.path.isfile(values):
values = os.path.realpath(values)
except: # pragma: nocover
pass
setattr(namespace, self.dest, values)
def parseConfigFile(configFile=None):
"""Returns a configparser.SafeConfigParser instance with configs
read from the config file. Default location of the config file is
at ~/.wakatime.cfg.
"""
if not configFile:
configFile = os.path.join(os.path.expanduser('~'), '.wakatime.cfg')
configs = configparser.SafeConfigParser()
try:
with open(configFile, 'r', encoding='utf-8') as fh:
try:
configs.readfp(fh)
except configparser.Error:
print(traceback.format_exc())
return None
except IOError:
print(u('Error: Could not read from config file {0}').format(u(configFile)))
return configs
def parseArguments():
"""Parse command line arguments and configs from ~/.wakatime.cfg.
Command line arguments take precedence over config file settings.
Returns instances of ArgumentParser and SafeConfigParser.
"""
# define supported command line arguments
parser = argparse.ArgumentParser(
description='Common interface for the WakaTime api.')
parser.add_argument('--entity', dest='entity', metavar='FILE',
action=FileAction,
help='absolute path to file for the heartbeat; can also be a '+
'url, domain, or app when --entity-type is not file')
parser.add_argument('--file', dest='file', action=FileAction,
help=argparse.SUPPRESS)
parser.add_argument('--key', dest='key',
help='your wakatime api key; uses api_key from '+
'~/.wakatime.cfg by default')
parser.add_argument('--write', dest='is_write',
action='store_true',
help='when set, tells api this heartbeat was triggered from '+
'writing to a file')
parser.add_argument('--plugin', dest='plugin',
help='optional text editor plugin name and version '+
'for User-Agent header')
parser.add_argument('--time', dest='timestamp', metavar='time',
type=float,
help='optional floating-point unix epoch timestamp; '+
'uses current time by default')
parser.add_argument('--lineno', dest='lineno',
help='optional line number; current line being edited')
parser.add_argument('--cursorpos', dest='cursorpos',
help='optional cursor position in the current file')
parser.add_argument('--entity-type', dest='entity_type',
help='entity type for this heartbeat. can be one of "file", '+
'"domain", or "app"; defaults to file.')
parser.add_argument('--proxy', dest='proxy',
help='optional https proxy url; for example: '+
'https://user:pass@localhost:8080')
parser.add_argument('--project', dest='project',
help='optional project name')
parser.add_argument('--alternate-project', dest='alternate_project',
help='optional alternate project name; auto-discovered project '+
'takes priority')
parser.add_argument('--alternate-language', dest='alternate_language',
help='optional alternate language name; auto-detected language'+
'takes priority')
parser.add_argument('--hostname', dest='hostname', help='hostname of '+
'current machine.')
parser.add_argument('--disableoffline', dest='offline',
action='store_false',
help='disables offline time logging instead of queuing logged time')
parser.add_argument('--hidefilenames', dest='hidefilenames',
action='store_true',
help='obfuscate file names; will not send file names to api')
parser.add_argument('--exclude', dest='exclude', action='append',
help='filename patterns to exclude from logging; POSIX regex '+
'syntax; can be used more than once')
parser.add_argument('--include', dest='include', action='append',
help='filename patterns to log; when used in combination with '+
'--exclude, files matching include will still be logged; '+
'POSIX regex syntax; can be used more than once')
parser.add_argument('--ignore', dest='ignore', action='append',
help=argparse.SUPPRESS)
parser.add_argument('--extra-heartbeats', dest='extra_heartbeats',
action='store_true',
help='reads extra heartbeats from STDIN as a JSON array until EOF')
parser.add_argument('--logfile', dest='logfile',
help='defaults to ~/.wakatime.log')
parser.add_argument('--apiurl', dest='api_url',
help='heartbeats api url; for debugging with a local server')
parser.add_argument('--timeout', dest='timeout', type=int,
help='number of seconds to wait when sending heartbeats to api; '+
'defaults to 60 seconds')
parser.add_argument('--config', dest='config',
help='defaults to ~/.wakatime.cfg')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='turns on debug messages in log file')
parser.add_argument('--version', action='version', version=__version__)
# parse command line arguments
args = parser.parse_args()
# use current unix epoch timestamp by default
if not args.timestamp:
args.timestamp = time.time()
# parse ~/.wakatime.cfg file
configs = parseConfigFile(args.config)
if configs is None:
return args, configs
# update args from configs
if not args.key:
default_key = None
if configs.has_option('settings', 'api_key'):
default_key = configs.get('settings', 'api_key')
elif configs.has_option('settings', 'apikey'):
default_key = configs.get('settings', 'apikey')
if default_key:
args.key = default_key
else:
parser.error('Missing api key')
if not args.entity:
if args.file:
args.entity = args.file
else:
parser.error('argument --entity is required')
if not args.exclude:
args.exclude = []
if configs.has_option('settings', 'ignore'):
try:
for pattern in configs.get('settings', 'ignore').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError: # pragma: nocover
pass
if configs.has_option('settings', 'exclude'):
try:
for pattern in configs.get('settings', 'exclude').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError: # pragma: nocover
pass
if not args.include:
args.include = []
if configs.has_option('settings', 'include'):
try:
for pattern in configs.get('settings', 'include').split("\n"):
if pattern.strip() != '':
args.include.append(pattern)
except TypeError: # pragma: nocover
pass
if args.offline and configs.has_option('settings', 'offline'):
args.offline = configs.getboolean('settings', 'offline')
if not args.hidefilenames and configs.has_option('settings', 'hidefilenames'):
args.hidefilenames = configs.getboolean('settings', 'hidefilenames')
if not args.proxy and configs.has_option('settings', 'proxy'):
args.proxy = configs.get('settings', 'proxy')
if not args.verbose and configs.has_option('settings', 'verbose'):
args.verbose = configs.getboolean('settings', 'verbose')
if not args.verbose and configs.has_option('settings', 'debug'):
args.verbose = configs.getboolean('settings', 'debug')
if not args.logfile and configs.has_option('settings', 'logfile'):
args.logfile = configs.get('settings', 'logfile')
if not args.api_url and configs.has_option('settings', 'api_url'):
args.api_url = configs.get('settings', 'api_url')
if not args.timeout and configs.has_option('settings', 'timeout'):
try:
args.timeout = int(configs.get('settings', 'timeout'))
except ValueError:
print(traceback.format_exc())
return args, configs
def should_exclude(entity, include, exclude):
if entity is not None and entity.strip() != '':
try:
for pattern in include:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(entity):
return False
except re.error as ex:
log.warning(u('Regex error ({msg}) for include pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
except TypeError: # pragma: nocover
pass
try:
for pattern in exclude:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(entity):
return pattern
except re.error as ex:
log.warning(u('Regex error ({msg}) for exclude pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
except TypeError: # pragma: nocover
pass
return False
def get_user_agent(plugin):
ver = sys.version_info
python_version = '%d.%d.%d.%s.%d' % (ver[0], ver[1], ver[2], ver[3], ver[4])
user_agent = u('wakatime/{ver} ({platform}) Python{py_ver}').format(
ver=u(__version__),
platform=u(platform.platform()),
py_ver=python_version,
)
if plugin:
user_agent = u('{user_agent} {plugin}').format(
user_agent=user_agent,
plugin=u(plugin),
)
else:
user_agent = u('{user_agent} Unknown/0').format(
user_agent=user_agent,
)
return user_agent
def send_heartbeat(project=None, branch=None, hostname=None, stats={}, key=None,
entity=None, timestamp=None, is_write=None, plugin=None,
offline=None, entity_type='file', hidefilenames=None,
proxy=None, api_url=None, timeout=None, **kwargs):
"""Sends heartbeat as POST request to WakaTime api server.
Returns `SUCCESS` when heartbeat was sent, otherwise returns an
error code constant.
"""
if not api_url:
api_url = 'https://api.wakatime.com/api/v1/heartbeats'
if not timeout:
timeout = 60
log.debug('Sending heartbeat to api at %s' % api_url)
data = {
'time': timestamp,
'entity': entity,
'type': entity_type,
}
if hidefilenames and entity is not None and entity_type == 'file':
extension = u(os.path.splitext(data['entity'])[1])
data['entity'] = u('HIDDEN{0}').format(extension)
if stats.get('lines'):
data['lines'] = stats['lines']
if stats.get('language'):
data['language'] = stats['language']
if stats.get('dependencies'):
data['dependencies'] = stats['dependencies']
if stats.get('lineno'):
data['lineno'] = stats['lineno']
if stats.get('cursorpos'):
data['cursorpos'] = stats['cursorpos']
if is_write:
data['is_write'] = is_write
if project:
data['project'] = project
if branch:
data['branch'] = branch
log.debug(data)
# setup api request
request_body = json.dumps(data)
api_key = u(base64.b64encode(str.encode(key) if is_py3 else key))
auth = u('Basic {api_key}').format(api_key=api_key)
headers = {
'User-Agent': get_user_agent(plugin),
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': auth,
}
if hostname:
headers['X-Machine-Name'] = u(hostname).encode('utf-8')
proxies = {}
if proxy:
proxies['https'] = proxy
# add Olson timezone to request
try:
tz = tzlocal.get_localzone()
except:
tz = None
if tz:
headers['TimeZone'] = u(tz.zone).encode('utf-8')
session_cache = SessionCache()
session = session_cache.get()
# log time to api
response = None
try:
response = session.post(api_url, data=request_body, headers=headers,
proxies=proxies, timeout=timeout)
except RequestException:
exception_data = {
sys.exc_info()[0].__name__: u(sys.exc_info()[1]),
}
if log.isEnabledFor(logging.DEBUG):
exception_data['traceback'] = traceback.format_exc()
if offline:
queue = Queue()
queue.push(data, json.dumps(stats), plugin)
if log.isEnabledFor(logging.DEBUG):
log.warn(exception_data)
else:
log.error(exception_data)
else:
code = response.status_code if response is not None else None
content = response.text if response is not None else None
if code == requests.codes.created or code == requests.codes.accepted:
log.debug({
'response_code': code,
})
session_cache.save(session)
return SUCCESS
if offline:
if code != 400:
queue = Queue()
queue.push(data, json.dumps(stats), plugin)
if code == 401:
log.error({
'response_code': code,
'response_content': content,
})
session_cache.delete()
return AUTH_ERROR
elif log.isEnabledFor(logging.DEBUG):
log.warn({
'response_code': code,
'response_content': content,
})
else:
log.error({
'response_code': code,
'response_content': content,
})
else:
log.error({
'response_code': code,
'response_content': content,
})
session_cache.delete()
return API_ERROR
def sync_offline_heartbeats(args, hostname):
"""Sends all heartbeats which were cached in the offline Queue."""
queue = Queue()
while True:
heartbeat = queue.pop()
if heartbeat is None:
break
status = send_heartbeat(
project=heartbeat['project'],
entity=heartbeat['entity'],
timestamp=heartbeat['time'],
branch=heartbeat['branch'],
hostname=hostname,
stats=json.loads(heartbeat['stats']),
key=args.key,
is_write=heartbeat['is_write'],
plugin=heartbeat['plugin'],
offline=args.offline,
hidefilenames=args.hidefilenames,
entity_type=heartbeat['type'],
proxy=args.proxy,
api_url=args.api_url,
timeout=args.timeout,
)
if status != SUCCESS:
if status == AUTH_ERROR:
return AUTH_ERROR
break
return SUCCESS
def process_heartbeat(args, configs, hostname, heartbeat):
exclude = should_exclude(heartbeat['entity'], args.include, args.exclude)
if exclude is not False:
log.debug(u('Skipping because matches exclude pattern: {pattern}').format(
pattern=u(exclude),
))
return SUCCESS
if heartbeat.get('entity_type') not in ['file', 'domain', 'app']:
heartbeat['entity_type'] = 'file'
if heartbeat['entity_type'] != 'file' or os.path.isfile(heartbeat['entity']):
stats = get_file_stats(heartbeat['entity'],
entity_type=heartbeat['entity_type'],
lineno=heartbeat.get('lineno'),
cursorpos=heartbeat.get('cursorpos'),
plugin=args.plugin,
alternate_language=heartbeat.get('alternate_language'))
project = heartbeat.get('project') or heartbeat.get('alternate_project')
branch = None
if heartbeat['entity_type'] == 'file':
project, branch = get_project_info(configs, heartbeat)
heartbeat['project'] = project
heartbeat['branch'] = branch
heartbeat['stats'] = stats
heartbeat['hostname'] = hostname
heartbeat['timeout'] = args.timeout
heartbeat['key'] = args.key
heartbeat['plugin'] = args.plugin
heartbeat['offline'] = args.offline
heartbeat['hidefilenames'] = args.hidefilenames
heartbeat['proxy'] = args.proxy
heartbeat['api_url'] = args.api_url
return send_heartbeat(**heartbeat)
else:
log.debug('File does not exist; ignoring this heartbeat.')
return SUCCESS
def execute(argv=None):
if argv:
sys.argv = ['wakatime'] + argv
args, configs = parseArguments()
if configs is None:
return CONFIG_FILE_PARSE_ERROR
setup_logging(args, __version__)
try:
hostname = args.hostname or socket.gethostname()
heartbeat = vars(args)
retval = process_heartbeat(args, configs, hostname, heartbeat)
if args.extra_heartbeats:
try:
for heartbeat in json.loads(sys.stdin.readline()):
retval = process_heartbeat(args, configs, hostname, heartbeat)
except json.JSONDecodeError:
retval = MALFORMED_HEARTBEAT_ERROR
if retval == SUCCESS:
retval = sync_offline_heartbeats(args, hostname)
return retval
except:
log.traceback()
print(traceback.format_exc())
return UNKNOWN_ERROR

View File

@ -1,129 +0,0 @@
# -*- coding: utf-8 -*-
"""
wakatime.offlinequeue
~~~~~~~~~~~~~~~~~~~~~
Queue for saving heartbeats while offline.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import traceback
from time import sleep
try:
import sqlite3
HAS_SQL = True
except ImportError: # pragma: nocover
HAS_SQL = False
from .compat import u
log = logging.getLogger('WakaTime')
class Queue(object):
db_file = os.path.join(os.path.expanduser('~'), '.wakatime.db')
table_name = 'heartbeat_1'
def get_db_file(self):
return self.db_file
def connect(self):
conn = sqlite3.connect(self.get_db_file())
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS {0} (
entity text,
type text,
time real,
project text,
branch text,
is_write integer,
stats text,
misc text,
plugin text)
'''.format(self.table_name))
return (conn, c)
def push(self, data, stats, plugin, misc=None):
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
heartbeat = {
'entity': u(data.get('entity')),
'type': u(data.get('type')),
'time': data.get('time'),
'project': u(data.get('project')),
'branch': u(data.get('branch')),
'is_write': 1 if data.get('is_write') else 0,
'stats': u(stats),
'misc': u(misc),
'plugin': u(plugin),
}
c.execute('INSERT INTO {0} VALUES (:entity,:type,:time,:project,:branch,:is_write,:stats,:misc,:plugin)'.format(self.table_name), heartbeat)
conn.commit()
conn.close()
except sqlite3.Error:
log.error(traceback.format_exc())
def pop(self):
if not HAS_SQL: # pragma: nocover
return None
tries = 3
wait = 0.1
heartbeat = None
try:
conn, c = self.connect()
except sqlite3.Error:
log.debug(traceback.format_exc())
return None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
if row is not None:
values = []
clauses = []
index = 0
for row_name in ['entity', 'type', 'time', 'project', 'branch', 'is_write']:
if row[index] is not None:
clauses.append('{0}=?'.format(row_name))
values.append(row[index])
else: # pragma: nocover
clauses.append('{0} IS NULL'.format(row_name))
index += 1
if len(values) > 0:
c.execute('DELETE FROM {0} WHERE {1}'.format(self.table_name, ' AND '.join(clauses)), values)
else: # pragma: nocover
c.execute('DELETE FROM {0} WHERE {1}'.format(self.table_name, ' AND '.join(clauses)))
conn.commit()
if row is not None:
heartbeat = {
'entity': row[0],
'type': row[1],
'time': row[2],
'project': row[3],
'branch': row[4],
'is_write': True if row[5] is 1 else False,
'stats': row[6],
'misc': row[7],
'plugin': row[8],
}
loop = False
except sqlite3.Error: # pragma: nocover
log.debug(traceback.format_exc())
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error: # pragma: nocover
log.debug(traceback.format_exc())
return heartbeat

View File

@ -1,14 +0,0 @@
import os
import sys
from ..compat import is_py2
if is_py2:
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'py2'))
else:
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'py3'))
import tzlocal
from pygments.lexers import get_lexer_by_name, guess_lexer_for_filename
from pygments.modeline import get_filetype_from_buffer
from pygments.util import ClassNotFound

File diff suppressed because it is too large Load Diff

View File

@ -1,127 +0,0 @@
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other

View File

@ -1,91 +0,0 @@
# -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '2.0.1'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
#print formatter, 'using', formatter.encoding
realoutfile = formatter.encoding and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__':
from pygments.cmdline import main
sys.exit(main(sys.argv))

View File

@ -1,509 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline, \
guess_decode, guess_decode_from_terminal, terminal_encoding
from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
get_lexer_for_filename, find_lexer_class, TextLexer
from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-s] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
If -s is passed, lexing will be done in "streaming" mode, reading and
highlighting one line at a time. This will only work properly with
lexers that have no constructs spanning multiple lines!
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -s option processes lines one at a time until EOF, rather than
waiting to process the entire file. This only works for stdin, and
is intended for streaming input such as you get from 'tail -f'.
Example usage: "tail -f sql.log | pygmentize -s -l sql"
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str:
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = find_lexer_class(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
except AttributeError:
print("%s not found!" % what, file=sys.stderr)
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main(args=sys.argv):
"""
Main command line entry point.
"""
# pylint: disable-msg=R0911,R0912,R0915
usage = USAGE % ((args[0],) * 6)
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHgs")
except getopt.GetoptError:
print(usage, file=sys.stderr)
return 2
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if opts.pop('-h', None) is not None:
print(usage)
return 0
if opts.pop('-V', None) is not None:
print('Pygments version %s, (c) 2006-2014 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print(usage, file=sys.stderr)
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print(usage, file=sys.stderr)
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
print(usage, file=sys.stderr)
return 2
_print_help(what, name)
return 0
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# encodings
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
try:
lexer = get_lexer_for_filename(infn, **parsed_opts)
except ClassNotFound as err:
lexer = TextLexer()
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print(usage, file=sys.stderr)
return 2
if opts or args:
print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
arg = a_opt or ''
try:
print(fmter.get_style_defs(arg))
except Exception as err:
print('Error:', err, file=sys.stderr)
return 1
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print(usage, file=sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
# select lexer
lexer = opts.pop('-l', None)
if lexer:
try:
lexer = get_lexer_by_name(lexer, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
# read input code
code = None
if args:
if len(args) > 1:
print(usage, file=sys.stderr)
return 2
if '-s' in opts:
print('Error: -s option not usable when input file specified',
file=sys.stderr)
return 1
infn = args[0]
try:
with open(infn, 'rb') as infp:
code = infp.read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not inencoding:
code, inencoding = guess_decode(code)
# do we have to guess the lexer?
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
elif '-s' not in opts: # treat stdin as full file (-s support is later)
# read code from terminal, always in binary mode since we want to
# decode ourselves and be tolerant with it
if sys.version_info > (3,):
# Python 3: we have to use .buffer to get a binary stream
code = sys.stdin.buffer.read()
else:
code = sys.stdin.read()
if not inencoding:
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
# else the lexer will do the decoding
if not lexer:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
fmter = TerminalFormatter(**parsed_opts)
if sys.version_info > (3,):
# Python 3: we have to use .buffer to get a binary stream
outfile = sys.stdout.buffer
else:
outfile = sys.stdout
# determine output encoding if not explicitly selected
if not outencoding:
if outfn:
# output file? use lexer encoding for now (can still be None)
fmter.encoding = inencoding
else:
# else use terminal encoding
fmter.encoding = terminal_encoding(sys.stdout)
# provide coloring under Windows, if possible
if not outfn and sys.platform in ('win32', 'cygwin') and \
fmter.name in ('Terminal', 'Terminal256'):
# unfortunately colorama doesn't support binary streams on Py3
if sys.version_info > (3,):
import io
outfile = io.TextIOWrapper(outfile, encoding=fmter.encoding)
fmter.encoding = None
try:
import colorama.initialise
except ImportError:
pass
else:
outfile = colorama.initialise.wrap_stream(
outfile, convert=None, strip=None, autoreset=False, wrap=True)
# When using the LaTeX formatter and the option `escapeinside` is
# specified, we need a special lexer which collects escaped text
# before running the chosen language lexer.
escapeinside = parsed_opts.get('escapeinside', '')
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
left = escapeinside[0]
right = escapeinside[1]
lexer = LatexEmbeddedLexer(left, right, lexer)
# ... and do it!
try:
# process filters
for fname, fopts in F_opts:
lexer.add_filter(fname, **fopts)
if '-s' not in opts:
# process whole input as per normal...
highlight(code, lexer, fmter, outfile)
else:
if not lexer:
print('Error: when using -s a lexer has to be selected with -l',
file=sys.stderr)
return 1
# line by line processing of stdin (eg: for 'tail -f')...
try:
while 1:
if sys.version_info > (3,):
# Python 3: we have to use .buffer to get a binary stream
line = sys.stdin.buffer.readline()
else:
line = sys.stdin.readline()
if not line:
break
if not inencoding:
line = guess_decode_from_terminal(line, sys.stdin)[0]
highlight(line, lexer, fmter, outfile)
if hasattr(outfile, 'flush'):
outfile.flush()
except KeyboardInterrupt:
return 0
except Exception:
raise
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
return 1
return 0

View File

@ -1,74 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
light_colors = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%i;01m" % x
x += 1
del d, l, x
codes["darkteal"] = codes["turquoise"]
codes["darkyellow"] = codes["brown"]
codes["fuscia"] = codes["fuchsia"]
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)

View File

@ -1,74 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.filter
~~~~~~~~~~~~~~~
Module that implements the default filter.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
def simplefilter(f):
"""
Decorator that converts a function into a filter::
@simplefilter
def lowercase(lexer, stream, options):
for ttype, value in stream:
yield ttype, value.lower()
"""
return type(f.__name__, (FunctionFilter,), {
'function': f,
'__module__': getattr(f, '__module__'),
'__doc__': f.__doc__
})
class Filter(object):
"""
Default filter. Subclass this class or use the `simplefilter`
decorator to create own filters.
"""
def __init__(self, **options):
self.options = options
def filter(self, lexer, stream):
raise NotImplementedError()
class FunctionFilter(Filter):
"""
Abstract class used by `simplefilter` to create simple
function filters on the fly. The `simplefilter` decorator
automatically creates subclasses of this class for
functions passed to it.
"""
function = None
def __init__(self, **options):
if not hasattr(self, 'function'):
raise TypeError('%r used without bound function' %
self.__class__.__name__)
Filter.__init__(self, **options)
def filter(self, lexer, stream):
# pylint: disable-msg=E1102
for ttype, value in self.function(lexer, stream, self.options):
yield ttype, value

View File

@ -1,350 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError, text_type, string_types
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""Lookup a filter by name. Return None if not found."""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""Return a generator of all filter names."""
for name in FILTERS:
yield name
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case',
['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(text_type, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in [('spaces', u'·'),
('tabs', u'»'),
('newlines', u'')]:
opt = options.get(name, False)
if isinstance(opt, string_types) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' ' * (tabsize - 1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or u' '
tabs = self.tabs or u'\t'
newlines = self.newlines or u'\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return u'', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = u'\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""Merges consecutive tokens with the same token type in the output
stream of a lexer.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
}

View File

@ -1,95 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatter
~~~~~~~~~~~~~~~~~~
Base formatter class.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import codecs
from pygments.util import get_bool_opt, string_types
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
if isinstance(style, string_types):
return get_style_by_name(style)
return style
class Formatter(object):
"""
Converts a token stream to text.
Options accepted:
``style``
The style to use, can be a string or a Style subclass
(default: "default"). Not used by e.g. the
TerminalFormatter.
``full``
Tells the formatter to output a "full" document, i.e.
a complete self-contained document. This doesn't have
any effect for some formatters (default: false).
``title``
If ``full`` is true, the title that should be used to
caption the document (default: '').
``encoding``
If given, must be an encoding name. This will be used to
convert the Unicode token strings to byte strings in the
output. If it is "" or None, Unicode strings will be written
to the output file, which most file-like objects do not
support (default: None).
``outencoding``
Overrides ``encoding`` if given.
"""
#: Name of the formatter
name = None
#: Shortcuts for the formatter
aliases = []
#: fn match rules
filenames = []
#: If True, this formatter outputs Unicode strings when no encoding
#: option is given.
unicodeoutput = True
def __init__(self, **options):
self.style = _lookup_style(options.get('style', 'default'))
self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
if self.encoding in ('guess', 'chardet'):
# can happen for e.g. pygmentize -O encoding=guess
self.encoding = 'utf-8'
self.encoding = options.get('outencoding') or self.encoding
self.options = options
def get_style_defs(self, arg=''):
"""
Return the style definitions for the current style as a string.
``arg`` is an additional argument whose meaning depends on the
formatter used. Note that ``arg`` can also be a list or tuple
for some formatters like the html formatter.
"""
return ''
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
"""
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile)

View File

@ -1,118 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters
~~~~~~~~~~~~~~~~~~~
Pygments formatters.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.formatters._mapping import FORMATTERS
from pygments.plugin import find_plugin_formatters
from pygments.util import ClassNotFound, itervalues
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
'get_all_formatters'] + list(FORMATTERS)
_formatter_cache = {} # classes by name
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_formatters(module_name):
"""Load a formatter (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for formatter_name in mod.__all__:
cls = getattr(mod, formatter_name)
_formatter_cache[cls.name] = cls
def get_all_formatters():
"""Return a generator for all formatter classes."""
# NB: this returns formatter classes, not info like get_all_lexers().
for info in itervalues(FORMATTERS):
if info[1] not in _formatter_cache:
_load_formatters(info[0])
yield _formatter_cache[info[1]]
for _, formatter in find_plugin_formatters():
yield formatter
def find_formatter_class(alias):
"""Lookup a formatter by alias.
Returns None if not found.
"""
for module_name, name, aliases, _, _ in itervalues(FORMATTERS):
if alias in aliases:
if name not in _formatter_cache:
_load_formatters(module_name)
return _formatter_cache[name]
for _, cls in find_plugin_formatters():
if alias in cls.aliases:
return cls
def get_formatter_by_name(_alias, **options):
"""Lookup and instantiate a formatter by alias.
Raises ClassNotFound if not found.
"""
cls = find_formatter_class(_alias)
if cls is None:
raise ClassNotFound("No formatter found for name %r" % _alias)
return cls(**options)
def get_formatter_for_filename(fn, **options):
"""Lookup and instantiate a formatter by filename pattern.
Raises ClassNotFound if not found.
"""
fn = basename(fn)
for modname, name, _, filenames, _ in itervalues(FORMATTERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _formatter_cache:
_load_formatters(modname)
return _formatter_cache[name](**options)
for cls in find_plugin_formatters():
for filename in cls.filenames:
if _fn_matches(fn, filename):
return cls(**options)
raise ClassNotFound("No formatter found for file name %r" % fn)
class _automodule(types.ModuleType):
"""Automatically import formatters."""
def __getattr__(self, name):
info = FORMATTERS.get(name)
if info:
_load_formatters(info[0])
cls = _formatter_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types

View File

@ -1,76 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters._mapping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter mapping definitions. This file is generated by itself. Everytime
you change something on a builtin formatter definition, run this script from
the formatters folder to update it.
Do not alter the FORMATTERS dictionary by hand.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
FORMATTERS = {
'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'),
'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.')
}
if __name__ == '__main__':
import sys
import os
# lookup formatters
found_formatters = []
imports = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from pygments.util import docstring_headline
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.formatters%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for formatter_name in module.__all__:
formatter = getattr(module, formatter_name)
found_formatters.append(
'%r: %r' % (formatter_name,
(module_name,
formatter.name,
tuple(formatter.aliases),
tuple(formatter.filenames),
docstring_headline(formatter))))
# sort them to make the diff minimal
found_formatters.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
header = content[:content.find('FORMATTERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('FORMATTERS = {\n %s\n}\n\n' % ',\n '.join(found_formatters))
fp.write(footer)
print ('=== %d formatters processed.' % len(found_formatters))

View File

@ -1,109 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.bbcode
~~~~~~~~~~~~~~~~~~~~~~~~~~
BBcode formatter.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt
__all__ = ['BBCodeFormatter']
class BBCodeFormatter(Formatter):
"""
Format tokens with BBcodes. These formatting codes are used by many
bulletin boards, so you can highlight your sourcecode with pygments before
posting it there.
This formatter has no support for background colors and borders, as there
are no common BBcode tags for that.
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
so you can't use the highlighting together with that tag.
Text in a [code] tag usually is shown with a monospace font (which this
formatter can do with the ``monofont`` option) and no spaces (which you
need for indentation) are removed.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`codetag`
If set to true, put the output into ``[code]`` tags (default:
``false``)
`monofont`
If set to true, add a tag to show the code with a monospace font
(default: ``false``).
"""
name = 'BBCode'
aliases = ['bbcode', 'bb']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self._code = get_bool_opt(options, 'codetag', False)
self._mono = get_bool_opt(options, 'monofont', False)
self.styles = {}
self._make_styles()
def _make_styles(self):
for ttype, ndef in self.style:
start = end = ''
if ndef['color']:
start += '[color=#%s]' % ndef['color']
end = '[/color]' + end
if ndef['bold']:
start += '[b]'
end = '[/b]' + end
if ndef['italic']:
start += '[i]'
end = '[/i]' + end
if ndef['underline']:
start += '[u]'
end = '[/u]' + end
# there are no common BBcodes for background-color and border
self.styles[ttype] = start, end
def format_unencoded(self, tokensource, outfile):
if self._code:
outfile.write('[code]')
if self._mono:
outfile.write('[font=monospace]')
lastval = ''
lasttype = None
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += value
else:
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
lastval = value
lasttype = ttype
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
if self._mono:
outfile.write('[/font]')
if self._code:
outfile.write('[/code]')
if self._code or self._mono:
outfile.write('\n')

View File

@ -1,839 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import sys
import os.path
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
StringIO, string_types, iteritems
try:
import ctags
except ImportError:
ctags = None
__all__ = ['HtmlFormatter']
_escape_html_table = {
ord('&'): u'&amp;',
ord('<'): u'&lt;',
ord('>'): u'&gt;',
ord('"'): u'&quot;',
ord("'"): u'&#39;',
}
def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
def get_random_id():
"""Return a random id for javascript fields."""
from random import random
from time import time
try:
from hashlib import sha1 as sha
except ImportError:
import sha
sha = sha.new
return sha('%s|%s' % (random(), time())).hexdigest()
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
pre { line-height: 125%%; }
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
When `tagsfile` is set to the path of a ctags index file, it is used to
generate hyperlinks from names to their definition. You must enable
`anchorlines` and run ctags with the `-n` option for this to work. The
`python-ctags` module from PyPI must be installed to use this feature;
otherwise a `RuntimeError` will be raised.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags will not use CSS classes, but
inline styles. This is not recommended for larger pieces of code since
it increases output size by quite a bit (default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
.. versionadded:: 0.9
If you select the ``'table'`` line numbers, the wrapping table will
have a CSS class of this string plus ``'table'``, the default is
accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``).
.. versionadded:: 0.11
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file.
.. versionadded:: 0.6
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
.. versionadded:: 1.1
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 0.11
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``).
.. versionadded:: 0.6
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks.
.. versionadded:: 0.7
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
This allows easy linking to certain lines.
.. versionadded:: 0.9
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
This allows easy access to lines via javascript.
.. versionadded:: 1.6
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
`tagsfile`
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
.. versionadded:: 1.6
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
.. versionadded:: 1.6
**Subclassing the HTML formatter**
.. versionadded:: 0.7
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
if self.tagsfile:
if not ctags:
raise RuntimeError('The "ctags" package must to be installed '
'to be able to use the "tagsfile" feature.')
self._ctags = ctags.CTags(self.tagsfile)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.linespans = options.get('linespans', '')
self.anchorlinenos = options.get('anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
for ttype, ndef in self.style:
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
style += 'color: #%s; ' % ndef['color']
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: #%s; ' % ndef['bgcolor']
if ndef['border']:
style += 'border: 1px solid #%s; ' % ndef['border']
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines)
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print('Note: Cannot determine output file name, ' \
'using current directory as base for the CSS file name',
file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
cf = open(cssfilename, "w")
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
cf.close()
except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title = self.title,
cssfile = self.cssfile,
encoding = self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title = self.title,
styledefs = self.get_style_defs('body'),
encoding = self.encoding))
for t, line in inner:
yield t, line
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
la = self.lineanchors
aln = self.anchorlinenos
nocls = self.noclasses
if sp:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if i % sp == 0:
if aln:
lines.append('<a href="#%s-%d" class="special">%*d</a>' %
(la, i, mw, i))
else:
lines.append('<span class="special">%*d</span>' % (mw, i))
else:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
else:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
if nocls:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td><div class="linenodiv" '
'style="background-color: #f0f0f0; padding-right: 10px">'
'<pre style="line-height: 125%">' +
ls + '</pre></div></td><td class="code">')
else:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">')
yield 0, dummyoutfile.getvalue()
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(lines) + num - 1))
if self.noclasses:
if sp:
for t, line in lines:
if num%sp == 0:
style = 'background-color: #ffffc0; padding: 0 5px 0 5px'
else:
style = 'background-color: #f0f0f0; padding: 0 5px 0 5px'
yield 1, '<span style="%s">%*s </span>' % (
style, mw, (num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, ('<span style="background-color: #f0f0f0; '
'padding: 0 5px 0 5px">%*s </span>' % (
mw, (num%st and ' ' or num)) + line)
num += 1
elif sp:
for t, line in lines:
yield 1, '<span class="lineno%s">%*s </span>' % (
num%sp == 0 and ' special' or '', mw,
(num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, '<span class="lineno">%*s </span>' % (
mw, (num%st and ' ' or num)) + line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
i = self.linenostart - 1 # subtract 1 since we have to increment i
# *before* yielding
for t, line in inner:
if t:
i += 1
yield 1, '<a name="%s-%d"></a>' % (s, i) + line
else:
yield 0, line
def _wrap_linespans(self, inner):
s = self.linespans
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass)
+ (style and (' style="%s"' % style)) + '>')
for tup in inner:
yield tup
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
for tup in inner:
yield tup
yield 0, '</pre>'
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = ''
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_class(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line += (lspan and '</span>') + cspan + part + \
(cspan and '</span>') + lsep
else: # both are the same
line += part + (lspan and '</span>') + lsep
yield 1, line
line = ''
elif part:
yield 1, cspan + part + (cspan and '</span>') + lsep
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line += (lspan and '</span>') + cspan + parts[-1]
lspan = cspan
else:
line += parts[-1]
elif parts[-1]:
line = cspan + parts[-1]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
yield 1, line + (lspan and '</span>') + lsep
def _lookup_ctag(self, token):
entry = ctags.TagEntry()
if self._ctags.find(entry, token, 0):
return entry['file'], entry['lineNumber']
else:
return None, None
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
def wrap(self, source, outfile):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
return self._wrap_div(self._wrap_pre(source))
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)

View File

@ -1,560 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
get_choice_opt, xrange
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
try:
from commands import getstatusoutput
except ImportError:
from subprocess import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
lines = out.splitlines()
if lines:
path = lines[0].strip().strip(':')
return path
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
_winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 0.10
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Bitstream Vera Sans Mono"
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 1.2
Default: empty list
`hl_color`
Specify the color for highlighting lines.
.. versionadded:: 1.2
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
self.encoding = 'latin1' # let pygments.format() do the right thing
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'

View File

@ -1,476 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
iteritems
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', u'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(u'\\begin{' + self.envname + u'}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while len(text) > 0:
a, sep1, text = text.partition(self.left)
if len(sep1) > 0:
b, sep2, text = text.partition(self.right)
if len(sep2) > 0:
value += escape_tex(a, self.commandprefix) + b
else:
value += escape_tex(a + sep1 + b, self.commandprefix)
else:
value = value + escape_tex(a, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
elif ttype not in Token.Escape:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{' + self.envname + u'}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'utf8',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
r"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buf = ''
idx = 0
for i, t, v in self.lang.get_tokens_unprocessed(text):
if t in Token.Comment or t in Token.String:
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
def get_tokens_aux(self, index, text):
while text:
a, sep1, text = text.partition(self.left)
if a:
for i, t, v in self.lang.get_tokens_unprocessed(a):
yield index + i, t, v
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b

View File

@ -1,160 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
:doc:`lexer list <lexers>`.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
.. versionadded:: 0.11
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
# We ignore self.encoding if it is set, since it gets set for lexer
# and formatter if given with -Oencoding on the command line.
# The RawTokenFormatter outputs only ASCII. Override here.
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b'')
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
TESTCASE_BEFORE = u'''\
def testNeedsName(self):
fragment = %r
tokens = [
'''
TESTCASE_AFTER = u'''\
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
'''
class TestcaseFormatter(Formatter):
"""
Format tokens as appropriate for a new testcase.
.. versionadded:: 2.0
"""
name = 'Testcase'
aliases = ['testcase']
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding is not None and self.encoding != 'utf-8':
raise ValueError("Only None and utf-8 are allowed encodings.")
def format(self, tokensource, outfile):
indentation = ' ' * 12
rawbuf = []
outbuf = []
for ttype, value in tokensource:
rawbuf.append(value)
outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
before = TESTCASE_BEFORE % (u''.join(rawbuf),)
during = u''.join(outbuf)
after = TESTCASE_AFTER
if self.encoding is None:
outfile.write(before + during + after)
else:
outfile.write(before.encode('utf-8'))
outfile.write(during.encode('utf-8'))
outfile.write(after.encode('utf-8'))
outfile.flush()

View File

@ -1,147 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_int_opt, _surrogatepair
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft(R) Word(R) documents.
Please note that ``encoding`` and ``outencoding`` options are ignored.
The RTF format is ASCII natively, but handles unicode characters correctly
thanks to escape sequences.
.. versionadded:: 0.6
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
`fontsize`
Size of the font used. Size is specified in half points. The
default is 24 half-points, giving a size 12 font.
.. versionadded:: 2.0
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
def __init__(self, **options):
r"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
self.fontsize = get_int_opt(options, 'fontsize', 0)
def _escape(self, text):
return text.replace(u'\\', u'\\\\') \
.replace(u'{', u'\\{') \
.replace(u'}', u'\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvment
if not text:
return u''
# escape text
text = self._escape(text)
buf = []
for c in text:
cn = ord(c)
if cn < (2**7):
# ASCII character
buf.append(str(c))
elif (2**7) <= cn < (2**16):
# single unicode escape sequence
buf.append(u'{\\u%d}' % cn)
elif (2**16) <= cn:
# RTF limits unicode to 16 bits.
# Force surrogate pairs
buf.append(u'{\\u%d}{\\u%d}' % _surrogatepair(cn))
return u''.join(buf).replace(u'\n', u'\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write(u'{\\rtf1\\ansi\\uc0\\deff0'
u'{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
u'{\\colortbl;' % (self.fontface and
u' ' + self._escape(self.fontface) or
u''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write(u'\\red%d\\green%d\\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write(u'}\\f0 ')
if self.fontsize:
outfile.write(u'\\fs%d' % (self.fontsize))
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append(u'\\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append(u'\\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append(u'\\b')
if style['italic']:
buf.append(u'\\i')
if style['underline']:
buf.append(u'\\ul')
if style['border']:
buf.append(u'\\chbrdr\\chcfpat%d' %
color_mapping[style['border']])
start = u''.join(buf)
if start:
outfile.write(u'{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write(u'}')
outfile.write(u'}')

View File

@ -1,153 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&amp;'). \
replace('<', '&lt;'). \
replace('>', '&gt;'). \
replace('"', '&quot;'). \
replace("'", '&#39;')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
.. versionadded:: 0.9
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to ``&#160;``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', '&#160;')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n<text x="%s" y="%s" '
'xml:space="preserve">' % (x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result

View File

@ -1,151 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.console import ansiformat
from pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('lightgray', 'darkgray'),
Comment: ('lightgray', 'darkgray'),
Comment.Preproc: ('teal', 'turquoise'),
Keyword: ('darkblue', 'blue'),
Keyword.Type: ('teal', 'turquoise'),
Operator.Word: ('purple', 'fuchsia'),
Name.Builtin: ('teal', 'turquoise'),
Name.Function: ('darkgreen', 'green'),
Name.Namespace: ('_teal_', '_turquoise_'),
Name.Class: ('_darkgreen_', '_green_'),
Name.Exception: ('teal', 'turquoise'),
Name.Decorator: ('darkgray', 'lightgray'),
Name.Variable: ('darkred', 'red'),
Name.Constant: ('darkred', 'red'),
Name.Attribute: ('teal', 'turquoise'),
Name.Tag: ('blue', 'blue'),
String: ('brown', 'brown'),
Number: ('darkblue', 'blue'),
Generic.Deleted: ('red', 'red'),
Generic.Inserted: ('darkgreen', 'green'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*purple*', '*fuchsia*'),
Generic.Error: ('red', 'red'),
Error: ('_red_', '_red_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers on the terminal output as well
(default: ``False`` = no line numbers).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
self.linenos = options.get('linenos', False)
self._lineno = 0
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty() and \
sys.version_info < (3,):
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("\n%04d: " % self._lineno)
def _format_unencoded_with_lineno(self, tokensource, outfile):
self._write_lineno(outfile)
for ttype, value in tokensource:
if value.endswith("\n"):
self._write_lineno(outfile)
value = value[:-1]
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
self._write_lineno(outfile)
if line:
outfile.write(ansiformat(color, line[:-1]))
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
outfile.write("\n")
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._format_unencoded_with_lineno(tokensource, outfile)
return
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)

View File

@ -1,223 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.terminal256
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for 256-color terminal output with ANSI sequences.
RGB-to-XTERM color conversion routines adapted from xterm256-conv
tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
by Wolfgang Frisch.
Formatter version 1.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# TODO:
# - Options to map style's bold/underline/italic/border attributes
# to some ANSI attrbutes (something like 'italic=underline')
# - An option to output "style RGB to xterm RGB/index" conversion table
# - An option to indicate that we are running in "reverse background"
# xterm. This means that default colors are white-on-black, not
# black-on-while, so colors like "white background" need to be converted
# to "white background, black foreground", etc...
import sys
from pygments.formatter import Formatter
__all__ = ['Terminal256Formatter']
class EscapeSequence:
def __init__(self, fg=None, bg=None, bold=False, underline=False):
self.fg = fg
self.bg = bg
self.bold = bold
self.underline = underline
def escape(self, attrs):
if len(attrs):
return "\x1b[" + ";".join(attrs) + "m"
return ""
def color_string(self):
attrs = []
if self.fg is not None:
attrs.extend(("38", "5", "%i" % self.fg))
if self.bg is not None:
attrs.extend(("48", "5", "%i" % self.bg))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
return self.escape(attrs)
def reset_string(self):
attrs = []
if self.fg is not None:
attrs.append("39")
if self.bg is not None:
attrs.append("49")
if self.bold or self.underline:
attrs.append("00")
return self.escape(attrs)
class Terminal256Formatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a 256-color
terminal or console. Like in `TerminalFormatter` color sequences
are terminated at newlines, so that paging the output works correctly.
The formatter takes colors from a style defined by the `style` option
and converts them to nearest ANSI 256-color escape sequences. Bold and
underline attributes from the style are preserved (and displayed).
.. versionadded:: 0.9
Options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
"""
name = 'Terminal256'
aliases = ['terminal256', 'console256', '256']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.xterm_colors = []
self.best_match = {}
self.style_string = {}
self.usebold = 'nobold' not in options
self.useunderline = 'nounderline' not in options
self._build_color_table() # build an RGB-to-256 color conversion table
self._setup_styles() # convert selected style's colors to term. colors
def _build_color_table(self):
# colors 0..15: 16 basic colors
self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
# colors 16..232: the 6x6x6 color cube
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(217):
r = valuerange[(i // 36) % 6]
g = valuerange[(i // 6) % 6]
b = valuerange[i % 6]
self.xterm_colors.append((r, g, b))
# colors 233..253: grayscale
for i in range(1, 22):
v = 8 + i * 10
self.xterm_colors.append((v, v, v))
def _closest_color(self, r, g, b):
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
match = 0
for i in range(0, 254):
values = self.xterm_colors[i]
rd = r - values[0]
gd = g - values[1]
bd = b - values[2]
d = rd*rd + gd*gd + bd*bd
if d < distance:
match = i
distance = d
return match
def _color_index(self, color):
index = self.best_match.get(color, None)
if index is None:
try:
rgb = int(str(color), 16)
except ValueError:
rgb = 0
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
index = self._closest_color(r, g, b)
self.best_match[color] = index
return index
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
if ndef['color']:
escape.fg = self._color_index(ndef['color'])
if ndef['bgcolor']:
escape.bg = self._color_index(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
if self.useunderline and ndef['underline']:
escape.underline = True
self.style_string[str(ttype)] = (escape.color_string(),
escape.reset_string())
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty() and \
sys.version_info < (3,):
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
not_found = True
while ttype and not_found:
try:
# outfile.write( "<" + str(ttype) + ">" )
on, off = self.style_string[str(ttype)]
# Like TerminalFormatter, add "reset colors" escape sequence
# on newline.
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(on + line + off)
outfile.write('\n')
if spl[-1]:
outfile.write(on + spl[-1] + off)
not_found = False
# outfile.write( '#' + str(ttype) + '#' )
except KeyError:
# ottype = ttype
ttype = ttype[:-1]
# outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
if not_found:
outfile.write(value)

View File

@ -1,870 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import re
import sys
import time
import itertools
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator, text_type, add_metaclass, iteritems, Future, guess_decode
from pygments.regexopt import regex_opt
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
'default', 'words']
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
(b'\xff\xfe\0\0', 'utf-32'),
(b'\0\0\xfe\xff', 'utf-32be'),
(b'\xff\xfe', 'utf-16'),
(b'\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(cls, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(cls, name, bases, d)
@add_metaclass(LexerMeta)
class Lexer(object):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
.. versionadded:: 1.3
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
Latin1 detection. Can also be ``'chardet'`` to use the chardet
library, if it is installed.
``inencoding``
Overrides the ``encoding`` if given.
"""
#: Name of the lexer
name = None
#: Shortcuts for the lexer
aliases = []
#: File name globs
filenames = []
#: Secondary file name globs
alias_filenames = []
#: MIME types
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'guess')
self.encoding = options.get('inencoding') or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, text_type):
if self.encoding == 'guess':
text, _ = guess_decode(text)
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = text[len(bom):].decode(encoding, 'replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = text.decode(enc.get('encoding') or 'utf-8',
'replace')
text = decoded
else:
text = text.decode(self.encoding)
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
else:
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (index, tokentype, value) pairs where "index"
is the starting position of the token within the input text.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
# ------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str):
"""
Indicates that a state should include rules from another state.
"""
pass
class _inherit(object):
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit()
class combined(tuple):
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch(object):
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer, _PseudoMatch(match.start(i + 1),
data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This(object):
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class default:
"""
Indicates a state or state action (e.g. #pop) to apply.
For example default('#pop') is equivalent to ('', Token, '#pop')
Note that state tuples may be used as well.
.. versionadded:: 2.0
"""
def __init__(self, state):
self.state = state
class words(Future):
"""
Indicates a list of literal words that is transformed into an optimized
regex that matches any of the words.
.. versionadded:: 2.0
"""
def __init__(self, words, prefix='', suffix=''):
self.words = words
self.prefix = prefix
self.suffix = suffix
def get(self):
return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_regex(cls, regex, rflags, state):
"""Preprocess the regular expression component of a token definition."""
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for state, items in iteritems(toks):
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
# deep hierarchies are processed incrementally (e.g. for
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
# will not see any inherits in B).
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
# N.b. this is the index in items (that is, the superclass
# copy), so offset required when storing below.
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
def __call__(cls, *args, **kwds):
"""Instantiate cls after preprocessing its token definitions."""
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds)
@add_metaclass(RegexLexerMeta)
class RegexLexer(Lexer):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: The initial state is 'root'.
#: ``new_state`` can be omitted to signify no state transition.
#: If it is a string, the state is pushed on the stack and changed.
#: If it is a tuple of strings, all states are pushed on the stack and
#: the current state will be the topmost.
#: It can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext(object):
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if action is not None:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, u'\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
class ProfilingRegexLexerMeta(RegexLexerMeta):
"""Metaclass for ProfilingRegexLexer, collects regex timing info."""
def _process_regex(cls, regex, rflags, state):
if isinstance(regex, words):
rex = regex_opt(regex.words, prefix=regex.prefix,
suffix=regex.suffix)
else:
rex = regex
compiled = re.compile(rex, rflags)
def match_func(text, pos, endpos=sys.maxsize):
info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
t0 = time.time()
res = compiled.match(text, pos, endpos)
t1 = time.time()
info[0] += 1
info[1] += t1 - t0
return res
return match_func
@add_metaclass(ProfilingRegexLexerMeta)
class ProfilingRegexLexer(RegexLexer):
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
_prof_data = []
_prof_sort_index = 4 # defaults to time per call
def get_tokens_unprocessed(self, text, stack=('root',)):
# this needs to be a stack, since using(this) will produce nested calls
self.__class__._prof_data.append({})
for tok in RegexLexer.get_tokens_unprocessed(self, text, stack):
yield tok
rawdata = self.__class__._prof_data.pop()
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
n, 1000 * t, 1000 * t / n)
for ((s, r), (n, t)) in rawdata.items()),
key=lambda x: x[self._prof_sort_index],
reverse=True)
sum_total = sum(x[3] for x in data)
print()
print('Profiling result for %s lexing %d chars in %.3f ms' %
(self.__class__.__name__, len(text), sum_total))
print('=' * 110)
print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
print('-' * 110)
for d in data:
print('%-20s %-65s %5d %8.4f %8.4f' % d)
print('=' * 110)

View File

@ -1,273 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, itervalues, guess_decode
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + list(LEXERS)
_lexer_cache = {}
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in itervalues(LEXERS):
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
Raises ClassNotFound if not found.
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus
return cls.priority + bonus
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0]
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types

File diff suppressed because it is too large Load Diff

View File

@ -1,232 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers._cl_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ANSI Common Lisp builtins.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTIN_FUNCTIONS = set(( # 638 functions
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
'adjustable-array-p', 'adjust-array', 'allocate-instance',
'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
'apropos-list', 'aref', 'arithmetic-error-operands',
'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
'characterp', 'char-code', 'char-downcase', 'char-equal',
'char-greaterp', 'char-int', 'char-lessp', 'char-name',
'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
'close', 'clrhash', 'code-char', 'coerce', 'compile',
'compiled-function-p', 'compile-file', 'compile-file-pathname',
'compiler-macro-function', 'complement', 'complex', 'complexp',
'compute-applicable-methods', 'compute-restarts', 'concatenate',
'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if',
'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
'delete-package', 'denominator', 'deposit-field', 'describe',
'describe-object', 'digit-char', 'digit-char-p', 'directory',
'directory-namestring', 'disassemble', 'documentation', 'dpb',
'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
'enough-namestring', 'ensure-directories-exist',
'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
'file-error-pathname', 'file-length', 'file-namestring',
'file-position', 'file-string-length', 'file-write-date',
'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
'fround', 'ftruncate', 'funcall', 'function-keywords',
'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
'gethash', 'get-internal-real-time', 'get-internal-run-time',
'get-macro-character', 'get-output-stream-string', 'get-properties',
'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'host-namestring', 'identity', 'imagpart', 'import',
'initialize-instance', 'input-stream-p', 'inspect',
'integer-decode-float', 'integer-length', 'integerp',
'interactive-stream-p', 'intern', 'intersection',
'invalid-method-error', 'invoke-debugger', 'invoke-restart',
'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
'listen', 'list-length', 'listp', 'load',
'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
'logical-pathname-translations', 'logior', 'lognand', 'lognor',
'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
'make-instance', 'make-instances-obsolete', 'make-list',
'make-load-form', 'make-load-form-saving-slots', 'make-package',
'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
'merge', 'merge-pathnames', 'method-combination-error',
'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
'package-name', 'package-nicknames', 'packagep',
'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
'pathname-device', 'pathname-directory', 'pathname-host',
'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
'read-from-string', 'read-line', 'read-preserving-whitespace',
'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
'remprop', 'rename-file', 'rename-package', 'replace', 'require',
'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
'search', 'second', 'set', 'set-difference',
'set-dispatch-macro-character', 'set-exclusive-or',
'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
'seventh', 'shadow', 'shadowing-import', 'shared-initialize',
'short-site-name', 'signal', 'signum', 'simple-bit-vector-p',
'simple-condition-format-arguments', 'simple-condition-format-control',
'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep',
'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing',
'slot-unbound', 'slot-value', 'software-type', 'software-version',
'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort',
'standard-char-p', 'store-value', 'stream-element-type',
'stream-error-stream', 'stream-external-format', 'streamp', 'string',
'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=',
'string-capitalize', 'string-downcase', 'string-equal',
'string-greaterp', 'string-left-trim', 'string-lessp',
'string-not-equal', 'string-not-greaterp', 'string-not-lessp',
'stringp', 'string-right-trim', 'string-trim', 'string-upcase',
'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not',
'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref',
'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package',
'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:',
'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third',
'translate-logical-pathname', 'translate-pathname', 'tree-equal',
'truename', 'truncate', 'two-way-stream-input-stream',
'two-way-stream-output-stream', 'type-error-datum',
'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance',
'unexport', 'unintern', 'union', 'unread-char', 'unuse-package',
'update-instance-for-different-class',
'update-instance-for-redefined-class', 'upgraded-array-element-type',
'upgraded-complex-part-type', 'upper-case-p', 'use-package',
'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector',
'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn',
'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
'y-or-n-p', 'zerop',
))
SPECIAL_FORMS = set((
'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
'unwind-protect',
))
MACROS = set((
'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
'define-compiler-macro', 'define-condition', 'define-method-combination',
'define-modify-macro', 'define-setf-expander', 'define-symbol-macro',
'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf',
'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do',
'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols',
'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind',
'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop',
'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list',
'multiple-value-setq', 'nth-value', 'or', 'pop',
'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop',
'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf',
'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case',
'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase',
'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit',
'with-condition-restarts', 'with-hash-table-iterator',
'with-input-from-string', 'with-open-file', 'with-open-stream',
'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
'with-slots', 'with-standard-io-syntax',
))
LAMBDA_LIST_KEYWORDS = set((
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
))
DECLARATIONS = set((
'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
'ignorable', 'notinline', 'type',
))
BUILTIN_TYPES = set((
'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string',
'simple-vector', 'standard-char', 'unsigned-byte',
# Condition Types
'arithmetic-error', 'cell-error', 'condition', 'control-error',
'division-by-zero', 'end-of-file', 'error', 'file-error',
'floating-point-inexact', 'floating-point-overflow',
'floating-point-underflow', 'floating-point-invalid-operation',
'parse-error', 'package-error', 'print-not-readable', 'program-error',
'reader-error', 'serious-condition', 'simple-condition', 'simple-error',
'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
'undefined-function', 'warning',
))
BUILTIN_CLASSES = set((
'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
'file-stream', 'float', 'function', 'generic-function', 'hash-table',
'integer', 'list', 'logical-pathname', 'method-combination', 'method',
'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable',
'real', 'random-state', 'restart', 'sequence', 'standard-class',
'standard-generic-function', 'standard-method', 'standard-object',
'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
))

File diff suppressed because one or more lines are too long

View File

@ -1,250 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers._lua_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
MODULES = {'basic': ('_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'),
'coroutine': ('coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'),
'debug': ('debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'),
'io': ('io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'),
'math': ('math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'),
'modules': ('module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'),
'os': ('os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'),
'string': ('string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'),
'table': ('table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort')}
if __name__ == '__main__':
import re
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
with open(filename) as fp:
content = fp.read()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(filename, 'w') as fp:
fp.write(header)
fp.write('MODULES = %s\n\n' % pprint.pformat(modules))
fp.write(footer)
def run():
version = get_newest_version()
print('> Downloading function index for Lua %s' % version)
functions = get_lua_functions(version)
print('> %d functions found:' % len(functions))
modules = {}
for full_function_name in functions:
print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()

View File

@ -1,413 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping definitions. This file is generated by itself. Everytime
you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.dsls', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru', '*.cr'), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp', 'elisp', 'emacs', 'emacs-lisp'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JadeLexer': ('pygments.lexers.html', 'Jade', ('jade',), ('*.jade',), ('text/x-jade',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.pascal', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.python', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.python', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml',), ('*.qml',), ('application/x-qml',)),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), ('*.txt',), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust',), ('*.rs',), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
}
if __name__ == '__main__':
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them to make the diff minimal
found_lexers.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
fp.write(footer)
print ('=== %d lexers processed.' % len(found_lexers))

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,620 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers._postgres_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Self-updating data files for PostgreSQL lexer.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated: please edit them if you like wasting your time.
KEYWORDS = (
'ABORT',
'ABSOLUTE',
'ACCESS',
'ACTION',
'ADD',
'ADMIN',
'AFTER',
'AGGREGATE',
'ALL',
'ALSO',
'ALTER',
'ALWAYS',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASSERTION',
'ASSIGNMENT',
'ASYMMETRIC',
'AT',
'ATTRIBUTE',
'AUTHORIZATION',
'BACKWARD',
'BEFORE',
'BEGIN',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BOOLEAN',
'BOTH',
'BY',
'CACHE',
'CALLED',
'CASCADE',
'CASCADED',
'CASE',
'CAST',
'CATALOG',
'CHAIN',
'CHAR',
'CHARACTER',
'CHARACTERISTICS',
'CHECK',
'CHECKPOINT',
'CLASS',
'CLOSE',
'CLUSTER',
'COALESCE',
'COLLATE',
'COLLATION',
'COLUMN',
'COMMENT',
'COMMENTS',
'COMMIT',
'COMMITTED',
'CONCURRENTLY',
'CONFIGURATION',
'CONNECTION',
'CONSTRAINT',
'CONSTRAINTS',
'CONTENT',
'CONTINUE',
'CONVERSION',
'COPY',
'COST',
'CREATE',
'CROSS',
'CSV',
'CURRENT',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'CYCLE',
'DATA',
'DATABASE',
'DAY',
'DEALLOCATE',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DEFAULTS',
'DEFERRABLE',
'DEFERRED',
'DEFINER',
'DELETE',
'DELIMITER',
'DELIMITERS',
'DESC',
'DICTIONARY',
'DISABLE',
'DISCARD',
'DISTINCT',
'DO',
'DOCUMENT',
'DOMAIN',
'DOUBLE',
'DROP',
'EACH',
'ELSE',
'ENABLE',
'ENCODING',
'ENCRYPTED',
'END',
'ENUM',
'ESCAPE',
'EVENT',
'EXCEPT',
'EXCLUDE',
'EXCLUDING',
'EXCLUSIVE',
'EXECUTE',
'EXISTS',
'EXPLAIN',
'EXTENSION',
'EXTERNAL',
'EXTRACT',
'FALSE',
'FAMILY',
'FETCH',
'FILTER',
'FIRST',
'FLOAT',
'FOLLOWING',
'FOR',
'FORCE',
'FOREIGN',
'FORWARD',
'FREEZE',
'FROM',
'FULL',
'FUNCTION',
'FUNCTIONS',
'GLOBAL',
'GRANT',
'GRANTED',
'GREATEST',
'GROUP',
'HANDLER',
'HAVING',
'HEADER',
'HOLD',
'HOUR',
'IDENTITY',
'IF',
'ILIKE',
'IMMEDIATE',
'IMMUTABLE',
'IMPLICIT',
'IN',
'INCLUDING',
'INCREMENT',
'INDEX',
'INDEXES',
'INHERIT',
'INHERITS',
'INITIALLY',
'INLINE',
'INNER',
'INOUT',
'INPUT',
'INSENSITIVE',
'INSERT',
'INSTEAD',
'INT',
'INTEGER',
'INTERSECT',
'INTERVAL',
'INTO',
'INVOKER',
'IS',
'ISNULL',
'ISOLATION',
'JOIN',
'KEY',
'LABEL',
'LANGUAGE',
'LARGE',
'LAST',
'LATERAL',
'LC_COLLATE',
'LC_CTYPE',
'LEADING',
'LEAKPROOF',
'LEAST',
'LEFT',
'LEVEL',
'LIKE',
'LIMIT',
'LISTEN',
'LOAD',
'LOCAL',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOCATION',
'LOCK',
'MAPPING',
'MATCH',
'MATERIALIZED',
'MAXVALUE',
'MINUTE',
'MINVALUE',
'MODE',
'MONTH',
'MOVE',
'NAME',
'NAMES',
'NATIONAL',
'NATURAL',
'NCHAR',
'NEXT',
'NO',
'NONE',
'NOT',
'NOTHING',
'NOTIFY',
'NOTNULL',
'NOWAIT',
'NULL',
'NULLIF',
'NULLS',
'NUMERIC',
'OBJECT',
'OF',
'OFF',
'OFFSET',
'OIDS',
'ON',
'ONLY',
'OPERATOR',
'OPTION',
'OPTIONS',
'OR',
'ORDER',
'ORDINALITY',
'OUT',
'OUTER',
'OVER',
'OVERLAPS',
'OVERLAY',
'OWNED',
'OWNER',
'PARSER',
'PARTIAL',
'PARTITION',
'PASSING',
'PASSWORD',
'PLACING',
'PLANS',
'POLICY',
'POSITION',
'PRECEDING',
'PRECISION',
'PREPARE',
'PREPARED',
'PRESERVE',
'PRIMARY',
'PRIOR',
'PRIVILEGES',
'PROCEDURAL',
'PROCEDURE',
'PROGRAM',
'QUOTE',
'RANGE',
'READ',
'REAL',
'REASSIGN',
'RECHECK',
'RECURSIVE',
'REF',
'REFERENCES',
'REFRESH',
'REINDEX',
'RELATIVE',
'RELEASE',
'RENAME',
'REPEATABLE',
'REPLACE',
'REPLICA',
'RESET',
'RESTART',
'RESTRICT',
'RETURNING',
'RETURNS',
'REVOKE',
'RIGHT',
'ROLE',
'ROLLBACK',
'ROW',
'ROWS',
'RULE',
'SAVEPOINT',
'SCHEMA',
'SCROLL',
'SEARCH',
'SECOND',
'SECURITY',
'SELECT',
'SEQUENCE',
'SEQUENCES',
'SERIALIZABLE',
'SERVER',
'SESSION',
'SESSION_USER',
'SET',
'SETOF',
'SHARE',
'SHOW',
'SIMILAR',
'SIMPLE',
'SMALLINT',
'SNAPSHOT',
'SOME',
'STABLE',
'STANDALONE',
'START',
'STATEMENT',
'STATISTICS',
'STDIN',
'STDOUT',
'STORAGE',
'STRICT',
'STRIP',
'SUBSTRING',
'SYMMETRIC',
'SYSID',
'SYSTEM',
'TABLE',
'TABLES',
'TABLESPACE',
'TEMP',
'TEMPLATE',
'TEMPORARY',
'TEXT',
'THEN',
'TIME',
'TIMESTAMP',
'TO',
'TRAILING',
'TRANSACTION',
'TREAT',
'TRIGGER',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUSTED',
'TYPE',
'TYPES',
'UNBOUNDED',
'UNCOMMITTED',
'UNENCRYPTED',
'UNION',
'UNIQUE',
'UNKNOWN',
'UNLISTEN',
'UNLOGGED',
'UNTIL',
'UPDATE',
'USER',
'USING',
'VACUUM',
'VALID',
'VALIDATE',
'VALIDATOR',
'VALUE',
'VALUES',
'VARCHAR',
'VARIADIC',
'VARYING',
'VERBOSE',
'VERSION',
'VIEW',
'VIEWS',
'VOLATILE',
'WHEN',
'WHERE',
'WHITESPACE',
'WINDOW',
'WITH',
'WITHIN',
'WITHOUT',
'WORK',
'WRAPPER',
'WRITE',
'XML',
'XMLATTRIBUTES',
'XMLCONCAT',
'XMLELEMENT',
'XMLEXISTS',
'XMLFOREST',
'XMLPARSE',
'XMLPI',
'XMLROOT',
'XMLSERIALIZE',
'YEAR',
'YES',
'ZONE',
)
DATATYPES = (
'bigint',
'bigserial',
'bit',
'bit varying',
'bool',
'boolean',
'box',
'bytea',
'char',
'character',
'character varying',
'cidr',
'circle',
'date',
'decimal',
'double precision',
'float4',
'float8',
'inet',
'int',
'int2',
'int4',
'int8',
'integer',
'interval',
'json',
'jsonb',
'line',
'lseg',
'macaddr',
'money',
'numeric',
'path',
'pg_lsn',
'point',
'polygon',
'real',
'serial',
'serial2',
'serial4',
'serial8',
'smallint',
'smallserial',
'text',
'time',
'timestamp',
'timestamptz',
'timetz',
'tsquery',
'tsvector',
'txid_snapshot',
'uuid',
'varbit',
'varchar',
'with time zone',
'without time zone',
'xml',
)
PSEUDO_TYPES = (
'any',
'anyelement',
'anyarray',
'anynonarray',
'anyenum',
'anyrange',
'cstring',
'internal',
'language_handler',
'fdw_handler',
'record',
'trigger',
'void',
'opaque',
)
# Remove 'trigger' from types
PSEUDO_TYPES = tuple(sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS))))
PLPGSQL_KEYWORDS = (
'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT',
'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE',
'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE',
)
if __name__ == '__main__':
import re
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
from pygments.util import format_lines
# One man's constant is another man's variable.
SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
KEYWORDS_URL = SOURCE_URL + '/doc/src/sgml/keywords.sgml'
DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
def update_myself():
data_file = list(urlopen(DATATYPES_URL))
datatypes = parse_datatypes(data_file)
pseudos = parse_pseudos(data_file)
keywords = parse_keywords(urlopen(KEYWORDS_URL))
update_consts(__file__, 'DATATYPES', datatypes)
update_consts(__file__, 'PSEUDO_TYPES', pseudos)
update_consts(__file__, 'KEYWORDS', keywords)
def parse_keywords(f):
kw = []
for m in re.finditer(
r'\s*<entry><token>([^<]+)</token></entry>\s*'
r'<entry>([^<]+)</entry>', f.read()):
kw.append(m.group(1))
if not kw:
raise ValueError('no keyword found')
kw.sort()
return kw
def parse_datatypes(f):
dt = set()
for line in f:
if '<sect1' in line:
break
if '<entry><type>' not in line:
continue
# Parse a string such as
# time [ (<replaceable>p</replaceable>) ] [ without time zone ]
# into types "time" and "without time zone"
# remove all the tags
line = re.sub("<replaceable>[^<]+</replaceable>", "", line)
line = re.sub("<[^>]+>", "", line)
# Drop the parts containing braces
for tmp in [t for tmp in line.split('[')
for t in tmp.split(']') if "(" not in t]:
for t in tmp.split(','):
t = t.strip()
if not t: continue
dt.add(" ".join(t.split()))
dt = list(dt)
dt.sort()
return dt
def parse_pseudos(f):
dt = []
re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">')
re_entry = re.compile(r'\s*<entry><type>([^<]+)</></entry>')
re_end = re.compile(r'\s*</table>')
f = iter(f)
for line in f:
if re_start.match(line) is not None:
break
else:
raise ValueError('pseudo datatypes table not found')
for line in f:
m = re_entry.match(line)
if m is not None:
dt.append(m.group(1))
if re_end.match(line) is not None:
break
else:
raise ValueError('end of pseudo datatypes table not found')
if not dt:
raise ValueError('pseudo datatypes not found')
return dt
def update_consts(filename, constname, content):
with open(filename) as f:
data = f.read()
# Line to start/end inserting
re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % constname, re.M | re.S)
m = re_match.search(data)
if not m:
raise ValueError('Could not find existing definition for %s' %
(constname,))
new_block = format_lines(constname, content)
data = data[:m.start()] + new_block + data[m.end():]
with open(filename, 'w') as f:
f.write(data)
update_myself()

View File

@ -1,498 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers._stan_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of functions for Stan used by
``pygments.lexers.math.StanLexer. This is for Stan language version 2.4.0.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = (
'else',
'for',
'if',
'in',
'increment_log_prob',
'lp__',
'print',
'return',
'while',
)
TYPES = (
'cholesky_factor_corr',
'cholesky_factor_cov',
'corr_matrix',
'cov_matrix',
'int',
'matrix',
'ordered',
'positive_ordered',
'real',
'row_vector',
'simplex',
'unit_vector',
'vector',
'void',
)
FUNCTIONS = (
'Phi',
'Phi_approx',
'abs',
'acos',
'acosh',
'asin',
'asinh',
'atan',
'atan2',
'atanh',
'bernoulli_ccdf_log',
'bernoulli_cdf',
'bernoulli_cdf_log',
'bernoulli_log',
'bernoulli_logit_log',
'bernoulli_rng',
'bessel_first_kind',
'bessel_second_kind',
'beta_binomial_ccdf_log',
'beta_binomial_cdf',
'beta_binomial_cdf_log',
'beta_binomial_log',
'beta_binomial_rng',
'beta_ccdf_log',
'beta_cdf',
'beta_cdf_log',
'beta_log',
'beta_rng',
'binary_log_loss',
'binomial_ccdf_log',
'binomial_cdf',
'binomial_cdf_log',
'binomial_coefficient_log',
'binomial_log',
'binomial_logit_log',
'binomial_rng',
'block',
'categorical_log',
'categorical_logit_log',
'categorical_rng',
'cauchy_ccdf_log',
'cauchy_cdf',
'cauchy_cdf_log',
'cauchy_log',
'cauchy_rng',
'cbrt',
'ceil',
'chi_square_ccdf_log',
'chi_square_cdf',
'chi_square_cdf_log',
'chi_square_log',
'chi_square_rng',
'cholesky_decompose',
'col',
'cols',
'columns_dot_product',
'columns_dot_self',
'cos',
'cosh',
'crossprod',
'cumulative_sum',
'determinant',
'diag_matrix',
'diag_post_multiply',
'diag_pre_multiply',
'diagonal',
'digamma',
'dims',
'dirichlet_log',
'dirichlet_rng',
'distance',
'dot_product',
'dot_self',
'double_exponential_ccdf_log',
'double_exponential_cdf',
'double_exponential_cdf_log',
'double_exponential_log',
'double_exponential_rng',
'e',
'eigenvalues_sym',
'eigenvectors_sym',
'erf',
'erfc',
'exp',
'exp2',
'exp_mod_normal_ccdf_log',
'exp_mod_normal_cdf',
'exp_mod_normal_cdf_log',
'exp_mod_normal_log',
'exp_mod_normal_rng',
'expm1',
'exponential_ccdf_log',
'exponential_cdf',
'exponential_cdf_log',
'exponential_log',
'exponential_rng',
'fabs',
'falling_factorial',
'fdim',
'floor',
'fma',
'fmax',
'fmin',
'fmod',
'gamma_ccdf_log',
'gamma_cdf',
'gamma_cdf_log',
'gamma_log',
'gamma_p',
'gamma_q',
'gamma_rng',
'gaussian_dlm_obs_log',
'gumbel_ccdf_log',
'gumbel_cdf',
'gumbel_cdf_log',
'gumbel_log',
'gumbel_rng',
'head',
'hypergeometric_log',
'hypergeometric_rng',
'hypot',
'if_else',
'int_step',
'inv',
'inv_chi_square_ccdf_log',
'inv_chi_square_cdf',
'inv_chi_square_cdf_log',
'inv_chi_square_log',
'inv_chi_square_rng',
'inv_cloglog',
'inv_gamma_ccdf_log',
'inv_gamma_cdf',
'inv_gamma_cdf_log',
'inv_gamma_log',
'inv_gamma_rng',
'inv_logit',
'inv_sqrt',
'inv_square',
'inv_wishart_log',
'inv_wishart_rng',
'inverse',
'inverse_spd',
'lbeta',
'lgamma',
'lkj_corr_cholesky_log',
'lkj_corr_cholesky_rng',
'lkj_corr_log',
'lkj_corr_rng',
'lkj_cov_log',
'lmgamma',
'log',
'log10',
'log1m',
'log1m_exp',
'log1m_inv_logit',
'log1p',
'log1p_exp',
'log2',
'log_determinant',
'log_diff_exp',
'log_falling_factorial',
'log_inv_logit',
'log_rising_factorial',
'log_softmax',
'log_sum_exp',
'logistic_ccdf_log',
'logistic_cdf',
'logistic_cdf_log',
'logistic_log',
'logistic_rng',
'logit',
'lognormal_ccdf_log',
'lognormal_cdf',
'lognormal_cdf_log',
'lognormal_log',
'lognormal_rng',
'machine_precision',
'max',
'mdivide_left_tri_low',
'mdivide_right_tri_low',
'mean',
'min',
'modified_bessel_first_kind',
'modified_bessel_second_kind',
'multi_gp_log',
'multi_normal_cholesky_log',
'multi_normal_cholesky_rng',
'multi_normal_log',
'multi_normal_prec_log',
'multi_normal_rng',
'multi_student_t_log',
'multi_student_t_rng',
'multinomial_log',
'multinomial_rng',
'multiply_log',
'multiply_lower_tri_self_transpose',
'neg_binomial_2_log',
'neg_binomial_2_log_log',
'neg_binomial_2_log_rng',
'neg_binomial_2_rng',
'neg_binomial_ccdf_log',
'neg_binomial_cdf',
'neg_binomial_cdf_log',
'neg_binomial_log',
'neg_binomial_rng',
'negative_infinity',
'normal_ccdf_log',
'normal_cdf',
'normal_cdf_log',
'normal_log',
'normal_rng',
'not_a_number',
'ordered_logistic_log',
'ordered_logistic_rng',
'owens_t',
'pareto_ccdf_log',
'pareto_cdf',
'pareto_cdf_log',
'pareto_log',
'pareto_rng',
'pi',
'poisson_ccdf_log',
'poisson_cdf',
'poisson_cdf_log',
'poisson_log',
'poisson_log_log',
'poisson_rng',
'positive_infinity',
'pow',
'prod',
'qr_Q',
'qr_R',
'quad_form',
'quad_form_diag',
'quad_form_sym',
'rank',
'rayleigh_ccdf_log',
'rayleigh_cdf',
'rayleigh_cdf_log',
'rayleigh_log',
'rayleigh_rng',
'rep_array',
'rep_matrix',
'rep_row_vector',
'rep_vector',
'rising_factorial',
'round',
'row',
'rows',
'rows_dot_product',
'rows_dot_self',
'scaled_inv_chi_square_ccdf_log',
'scaled_inv_chi_square_cdf',
'scaled_inv_chi_square_cdf_log',
'scaled_inv_chi_square_log',
'scaled_inv_chi_square_rng',
'sd',
'segment',
'sin',
'singular_values',
'sinh',
'size',
'skew_normal_ccdf_log',
'skew_normal_cdf',
'skew_normal_cdf_log',
'skew_normal_log',
'skew_normal_rng',
'softmax',
'sort_asc',
'sort_desc',
'sort_indices_asc',
'sort_indices_desc',
'sqrt',
'sqrt2',
'square',
'squared_distance',
'step',
'student_t_ccdf_log',
'student_t_cdf',
'student_t_cdf_log',
'student_t_log',
'student_t_rng',
'sub_col',
'sub_row',
'sum',
'tail',
'tan',
'tanh',
'tcrossprod',
'tgamma',
'to_array_1d',
'to_array_2d',
'to_matrix',
'to_row_vector',
'to_vector',
'trace',
'trace_gen_quad_form',
'trace_quad_form',
'trigamma',
'trunc',
'uniform_ccdf_log',
'uniform_cdf',
'uniform_cdf_log',
'uniform_log',
'uniform_rng',
'variance',
'von_mises_log',
'von_mises_rng',
'weibull_ccdf_log',
'weibull_cdf',
'weibull_cdf_log',
'weibull_log',
'weibull_rng',
'wishart_log',
'wishart_rng',
)
DISTRIBUTIONS = (
'bernoulli',
'bernoulli_logit',
'beta',
'beta_binomial',
'binomial',
'binomial_logit',
'categorical',
'categorical_logit',
'cauchy',
'chi_square',
'dirichlet',
'double_exponential',
'exp_mod_normal',
'exponential',
'gamma',
'gaussian_dlm_obs',
'gumbel',
'hypergeometric',
'inv_chi_square',
'inv_gamma',
'inv_wishart',
'lkj_corr',
'lkj_corr_cholesky',
'lkj_cov',
'logistic',
'lognormal',
'multi_gp',
'multi_normal',
'multi_normal_cholesky',
'multi_normal_prec',
'multi_student_t',
'multinomial',
'neg_binomial',
'neg_binomial_2',
'neg_binomial_2_log',
'normal',
'ordered_logistic',
'pareto',
'poisson',
'poisson_log',
'rayleigh',
'scaled_inv_chi_square',
'skew_normal',
'student_t',
'uniform',
'von_mises',
'weibull',
'wishart',
)
RESERVED = (
'alignas',
'alignof',
'and',
'and_eq',
'asm',
'auto',
'bitand',
'bitor',
'bool',
'break',
'case',
'catch',
'char',
'char16_t',
'char32_t',
'class',
'compl',
'const',
'const_cast',
'constexpr',
'continue',
'decltype',
'default',
'delete',
'do',
'double',
'dynamic_cast',
'enum',
'explicit',
'export',
'extern',
'false',
'false',
'float',
'friend',
'fvar',
'goto',
'inline',
'int',
'long',
'mutable',
'namespace',
'new',
'noexcept',
'not',
'not_eq',
'nullptr',
'operator',
'or',
'or_eq',
'private',
'protected',
'public',
'register',
'reinterpret_cast',
'repeat',
'short',
'signed',
'sizeof',
'static',
'static_assert',
'static_cast',
'struct',
'switch',
'template',
'then',
'this',
'thread_local',
'throw',
'true',
'true',
'try',
'typedef',
'typeid',
'typename',
'union',
'unsigned',
'until',
'using',
'var',
'virtual',
'void',
'volatile',
'wchar_t',
'xor',
'xor_eq',
)

File diff suppressed because it is too large Load Diff

View File

@ -1,240 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.actionscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for ActionScript and MXML.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, using, this, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ActionScriptLexer', 'ActionScript3Lexer', 'MxmlLexer']
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
.. versionadded:: 0.9
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~^*!%&<>|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(words((
'case', 'default', 'for', 'each', 'in', 'while', 'do', 'break',
'return', 'continue', 'if', 'else', 'throw', 'try', 'catch',
'var', 'with', 'new', 'typeof', 'arguments', 'instanceof', 'this',
'switch'), suffix=r'\b'),
Keyword),
(words((
'class', 'public', 'final', 'internal', 'native', 'override', 'private',
'protected', 'static', 'import', 'extends', 'implements', 'interface',
'intrinsic', 'return', 'super', 'dynamic', 'function', 'const', 'get',
'namespace', 'package', 'set'), suffix=r'\b'),
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(words((
'Accessibility', 'AccessibilityProperties', 'ActionScriptVersion',
'ActivityEvent', 'AntiAliasType', 'ApplicationDomain', 'AsBroadcaster', 'Array',
'AsyncErrorEvent', 'AVM1Movie', 'BevelFilter', 'Bitmap', 'BitmapData',
'BitmapDataChannel', 'BitmapFilter', 'BitmapFilterQuality', 'BitmapFilterType',
'BlendMode', 'BlurFilter', 'Boolean', 'ByteArray', 'Camera', 'Capabilities', 'CapsStyle',
'Class', 'Color', 'ColorMatrixFilter', 'ColorTransform', 'ContextMenu',
'ContextMenuBuiltInItems', 'ContextMenuEvent', 'ContextMenuItem',
'ConvultionFilter', 'CSMSettings', 'DataEvent', 'Date', 'DefinitionError',
'DeleteObjectSample', 'Dictionary', 'DisplacmentMapFilter', 'DisplayObject',
'DisplacmentMapFilterMode', 'DisplayObjectContainer', 'DropShadowFilter',
'Endian', 'EOFError', 'Error', 'ErrorEvent', 'EvalError', 'Event', 'EventDispatcher',
'EventPhase', 'ExternalInterface', 'FileFilter', 'FileReference',
'FileReferenceList', 'FocusDirection', 'FocusEvent', 'Font', 'FontStyle', 'FontType',
'FrameLabel', 'FullScreenEvent', 'Function', 'GlowFilter', 'GradientBevelFilter',
'GradientGlowFilter', 'GradientType', 'Graphics', 'GridFitType', 'HTTPStatusEvent',
'IBitmapDrawable', 'ID3Info', 'IDataInput', 'IDataOutput', 'IDynamicPropertyOutput'
'IDynamicPropertyWriter', 'IEventDispatcher', 'IExternalizable',
'IllegalOperationError', 'IME', 'IMEConversionMode', 'IMEEvent', 'int',
'InteractiveObject', 'InterpolationMethod', 'InvalidSWFError', 'InvokeEvent',
'IOError', 'IOErrorEvent', 'JointStyle', 'Key', 'Keyboard', 'KeyboardEvent', 'KeyLocation',
'LineScaleMode', 'Loader', 'LoaderContext', 'LoaderInfo', 'LoadVars', 'LocalConnection',
'Locale', 'Math', 'Matrix', 'MemoryError', 'Microphone', 'MorphShape', 'Mouse', 'MouseEvent',
'MovieClip', 'MovieClipLoader', 'Namespace', 'NetConnection', 'NetStatusEvent',
'NetStream', 'NewObjectSample', 'Number', 'Object', 'ObjectEncoding', 'PixelSnapping',
'Point', 'PrintJob', 'PrintJobOptions', 'PrintJobOrientation', 'ProgressEvent', 'Proxy',
'QName', 'RangeError', 'Rectangle', 'ReferenceError', 'RegExp', 'Responder', 'Sample',
'Scene', 'ScriptTimeoutError', 'Security', 'SecurityDomain', 'SecurityError',
'SecurityErrorEvent', 'SecurityPanel', 'Selection', 'Shape', 'SharedObject',
'SharedObjectFlushStatus', 'SimpleButton', 'Socket', 'Sound', 'SoundChannel',
'SoundLoaderContext', 'SoundMixer', 'SoundTransform', 'SpreadMethod', 'Sprite',
'StackFrame', 'StackOverflowError', 'Stage', 'StageAlign', 'StageDisplayState',
'StageQuality', 'StageScaleMode', 'StaticText', 'StatusEvent', 'String', 'StyleSheet',
'SWFVersion', 'SyncEvent', 'SyntaxError', 'System', 'TextColorType', 'TextField',
'TextFieldAutoSize', 'TextFieldType', 'TextFormat', 'TextFormatAlign',
'TextLineMetrics', 'TextRenderer', 'TextSnapshot', 'Timer', 'TimerEvent', 'Transform',
'TypeError', 'uint', 'URIError', 'URLLoader', 'URLLoaderDataFormat', 'URLRequest',
'URLRequestHeader', 'URLRequestMethod', 'URLStream', 'URLVariabeles', 'VerifyError',
'Video', 'XML', 'XMLDocument', 'XMLList', 'XMLNode', 'XMLNodeType', 'XMLSocket',
'XMLUI'), suffix=r'\b'),
Name.Builtin),
(words((
'decodeURI', 'decodeURIComponent', 'encodeURI', 'escape', 'eval', 'isFinite', 'isNaN',
'isXMLName', 'clearInterval', 'fscommand', 'getTimer', 'getURL', 'getVersion',
'parseFloat', 'parseInt', 'setInterval', 'trace', 'updateAfterEvent',
'unescape'), suffix=r'\b'),
Name.Function),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
.. versionadded:: 0.11
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
identifier = r'[$a-zA-Z_]\w*'
typeidentifier = identifier + '(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~^*!%&<>|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s+', Text, '#pop:2'),
default('#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',', Operator, '#pop'),
default('#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
.. versionadded:: 1.1
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}

View File

@ -1,24 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.lisp import SchemeLexer
from pygments.lexers.jvm import IokeLexer, ClojureLexer
from pygments.lexers.python import PythonLexer, PythonConsoleLexer, \
PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer
from pygments.lexers.ruby import RubyLexer, RubyConsoleLexer, FancyLexer
from pygments.lexers.perl import PerlLexer, Perl6Lexer
from pygments.lexers.d import CrocLexer, MiniDLexer
from pygments.lexers.iolang import IoLexer
from pygments.lexers.tcl import TclLexer
from pygments.lexers.factor import FactorLexer
from pygments.lexers.scripting import LuaLexer, MoonScriptLexer
__all__ = []

View File

@ -1,187 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.algebra
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer algebra systems.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['GAPLexer', 'MathematicaLexer', 'MuPADLexer']
class GAPLexer(RegexLexer):
"""
For `GAP <http://www.gap-system.org>`_ source code.
.. versionadded:: 2.0
"""
name = 'GAP'
aliases = ['gap']
filenames = ['*.g', '*.gd', '*.gi', '*.gap']
tokens = {
'root': [
(r'#.*$', Comment.Single),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
if|then|elif|else|fi|
for|while|do|od|
repeat|until|
break|continue|
function|local|return|end|
rec|
quit|QUIT|
IsBound|Unbind|
TryNextMethod|
Info|Assert
)\b''', Keyword),
(r'''(?x)\b(?:
true|false|fail|infinity
)\b''',
Name.Constant),
(r'''(?x)\b(?:
(Declare|Install)([A-Z][A-Za-z]+)|
BindGlobal|BIND_GLOBAL
)\b''',
Name.Builtin),
(r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator),
(r'''(?x)\b(?:
and|or|not|mod|in
)\b''',
Operator.Word),
(r'''(?x)
(?:\w+|`[^`]*`)
(?:::\w+|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
]
}
class MathematicaLexer(RegexLexer):
"""
Lexer for `Mathematica <http://www.wolfram.com/mathematica/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Mathematica'
aliases = ['mathematica', 'mma', 'nb']
filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma']
mimetypes = ['application/mathematica',
'application/vnd.wolfram.mathematica',
'application/vnd.wolfram.mathematica.package',
'application/vnd.wolfram.cdf']
# http://reference.wolfram.com/mathematica/guide/Syntax.html
operators = (
";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/",
"^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@",
"@@@", "~~", "===", "&", "<", ">", "<=", ">=",
)
punctuation = (",", ";", "(", ")", "[", "]", "{", "}")
def _multi_escape(entries):
return '(%s)' % ('|'.join(re.escape(entry) for entry in entries))
tokens = {
'root': [
(r'(?s)\(\*.*?\*\)', Comment),
(r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace),
(r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable),
(r'#\d*', Name.Variable),
(r'([a-zA-Z]+[a-zA-Z0-9]*)', Name),
(r'-?[0-9]+\.[0-9]*', Number.Float),
(r'-?[0-9]*\.[0-9]+', Number.Float),
(r'-?[0-9]+', Number.Integer),
(words(operators), Operator),
(words(punctuation), Punctuation),
(r'".*?"', String),
(r'\s+', Text.Whitespace),
],
}
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
.. versionadded:: 0.8
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
# (r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}

View File

@ -1,76 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.ambient
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for AmbientTalk language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['AmbientTalkLexer']
class AmbientTalkLexer(RegexLexer):
"""
Lexer for `AmbientTalk <https://code.google.com/p/ambienttalk>`_ source code.
.. versionadded:: 2.0
"""
name = 'AmbientTalk'
filenames = ['*.at']
aliases = ['at', 'ambienttalk', 'ambienttalk/2']
mimetypes = ['text/x-ambienttalk']
flags = re.MULTILINE | re.DOTALL
builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:',
'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:',
'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:',
'mirroredBy:', 'is:'))
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(def|deftype|import|alias|exclude)\b', Keyword),
(builtin, Name.Builtin),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\|', Punctuation, 'arglist'),
(r'<:|[*^!%&<>+=,./?-]|:=', Operator),
(r"`[a-zA-Z_]\w*", String.Symbol),
(r"[a-zA-Z_]\w*:", Name.Function),
(r"[{}()\[\];`]", Punctuation),
(r'(self|super)\b', Name.Variable.Instance),
(r"[a-zA-Z_]\w*", Name.Variable),
(r"@[a-zA-Z_]\w*", Name.Class),
(r"@\[", Name.Class, 'annotations'),
include('numbers'),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'namespace': [
(r'[a-zA-Z_]\w*\.', Name.Namespace),
(r'[a-zA-Z_]\w*:', Name.Function, '#pop'),
(r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop')
],
'annotations': [
(r"(.*?)\]", Name.Class, '#pop')
],
'arglist': [
(r'\|', Punctuation, '#pop'),
(r'\s*(,)\s*', Punctuation),
(r'[a-zA-Z_]\w*', Name.Variable),
],
}

View File

@ -1,101 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.apl
~~~~~~~~~~~~~~~~~~~
Lexers for APL.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['APLLexer']
class APLLexer(RegexLexer):
"""
A simple APL lexer.
.. versionadded:: 2.0
"""
name = 'APL'
aliases = ['apl']
filenames = ['*.apl']
tokens = {
'root': [
# Whitespace
# ==========
(r'\s+', Text),
#
# Comment
# =======
# '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog)
(u'[⍝#].*$', Comment.Single),
#
# Strings
# =======
(r'\'((\'\')|[^\'])*\'', String.Single),
(r'"(("")|[^"])*"', String.Double), # supported by NGN APL
#
# Punctuation
# ===========
# This token type is used for diamond and parenthesis
# but not for bracket and ; (see below)
(u'[⋄◇()]', Punctuation),
#
# Array indexing
# ==============
# Since this token type is very important in APL, it is not included in
# the punctuation token type but rather in the following one
(r'[\[\];]', String.Regex),
#
# Distinguished names
# ===================
# following IBM APL2 standard
(u'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function),
#
# Labels
# ======
# following IBM APL2 standard
# (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label),
#
# Variables
# =========
# following IBM APL2 standard
(u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable),
#
# Numbers
# =======
(u'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
u'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
Number),
#
# Operators
# ==========
(u'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘]', Name.Attribute), # closest token type
(u'[+\-×÷⌈⌊∣|?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗]',
Operator),
#
# Constant
# ========
(u'', Name.Constant),
#
# Quad symbol
# ===========
(u'[⎕⍞]', Name.Variable.Global),
#
# Arrows left/right
# =================
(u'[←→]', Keyword.Declaration),
#
# D-Fn
# ====
(u'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo),
(r'[{}]', Keyword.Type),
],
}

View File

@ -1,435 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.asm
~~~~~~~~~~~~~~~~~~~
Lexers for assembly languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer
from pygments.lexers.c_cpp import CppLexer, CLexer
from pygments.lexers.d import DLexer
from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator
__all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer',
'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'NasmObjdumpLexer',
'Ca65Lexer']
class GasLexer(RegexLexer):
"""
For Gas (AT&T) assembly code.
"""
name = 'GAS'
aliases = ['gas', 'asm']
filenames = ['*.s', '*.S']
mimetypes = ['text/x-gas']
#: optional Comment or Whitespace
string = r'"(\\"|[^"])*"'
char = r'[\w$.@-]'
identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)'
number = r'(?:0[xX][a-zA-Z0-9]+|\d+)'
tokens = {
'root': [
include('whitespace'),
(identifier + ':', Name.Label),
(r'\.' + identifier, Name.Attribute, 'directive-args'),
(r'lock|rep(n?z)?|data\d+', Name.Attribute),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'directive-args': [
(identifier, Name.Constant),
(string, String),
('@' + identifier, Name.Attribute),
(number, Number.Integer),
(r'[\r\n]+', Text, '#pop'),
(r'#.*?$', Comment, '#pop'),
include('punctuation'),
include('whitespace')
],
'instruction-args': [
# For objdump-disassembled code, shouldn't occur in
# actual assembler input
('([a-z0-9]+)( )(<)('+identifier+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation)),
('([a-z0-9]+)( )(<)('+identifier+')([-+])('+number+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation, Number.Integer, Punctuation)),
# Address constants
(identifier, Name.Constant),
(number, Number.Integer),
# Registers
('%' + identifier, Name.Variable),
# Numeric constants
('$'+number, Number.Integer),
(r"$'(.|\\')'", String.Char),
(r'[\r\n]+', Text, '#pop'),
(r'#.*?$', Comment, '#pop'),
include('punctuation'),
include('whitespace')
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'#.*?\n', Comment)
],
'punctuation': [
(r'[-*,.():]+', Punctuation)
]
}
def analyse_text(text):
if re.match(r'^\.(text|data|section)', text, re.M):
return True
elif re.match(r'^\.\w+', text, re.M):
return 0.1
def _objdump_lexer_tokens(asm_lexer):
"""
Common objdump lexer tokens to wrap an ASM lexer.
"""
hex_re = r'[0-9A-Za-z]'
return {
'root': [
# File name & format:
('(.*?)(:)( +file format )(.*?)$',
bygroups(Name.Label, Punctuation, Text, String)),
# Section header
('(Disassembly of section )(.*?)(:)$',
bygroups(Text, Name.Label, Punctuation)),
# Function labels
# (With offset)
('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation, Number.Hex, Punctuation)),
# (Without offset)
('('+hex_re+'+)( )(<)(.*?)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation)),
# Code line with disassembled instructions
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text,
using(asm_lexer))),
# Code line with ascii
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
# Continued code line, only raw opcodes without disassembled
# instruction
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$',
bygroups(Text, Name.Label, Text, Number.Hex)),
# Skipped a few bytes
(r'\t\.\.\.$', Text),
# Relocation line
# (With offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant, Punctuation, Number.Hex)),
# (Without offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant)),
(r'[^\n]+\n', Other)
]
}
class ObjdumpLexer(RegexLexer):
"""
For the output of 'objdump -dr'
"""
name = 'objdump'
aliases = ['objdump']
filenames = ['*.objdump']
mimetypes = ['text/x-objdump']
tokens = _objdump_lexer_tokens(GasLexer)
class DObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled D files'
"""
name = 'd-objdump'
aliases = ['d-objdump']
filenames = ['*.d-objdump']
mimetypes = ['text/x-d-objdump']
def __init__(self, **options):
super(DObjdumpLexer, self).__init__(DLexer, ObjdumpLexer, **options)
class CppObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled C++ files'
"""
name = 'cpp-objdump'
aliases = ['cpp-objdump', 'c++-objdumb', 'cxx-objdump']
filenames = ['*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump']
mimetypes = ['text/x-cpp-objdump']
def __init__(self, **options):
super(CppObjdumpLexer, self).__init__(CppLexer, ObjdumpLexer, **options)
class CObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled C files'
"""
name = 'c-objdump'
aliases = ['c-objdump']
filenames = ['*.c-objdump']
mimetypes = ['text/x-c-objdump']
def __init__(self, **options):
super(CObjdumpLexer, self).__init__(CLexer, ObjdumpLexer, **options)
class LlvmLexer(RegexLexer):
"""
For LLVM assembly code.
"""
name = 'LLVM'
aliases = ['llvm']
filenames = ['*.ll']
mimetypes = ['text/x-llvm']
#: optional Comment or Whitespace
string = r'"[^"]*?"'
identifier = r'([-a-zA-Z$._][\w\-$.]*|' + string + ')'
tokens = {
'root': [
include('whitespace'),
# Before keywords, because keywords are valid label names :(...
(identifier + '\s*:', Name.Label),
include('keyword'),
(r'%' + identifier, Name.Variable),
(r'@' + identifier, Name.Variable.Global),
(r'%\d+', Name.Variable.Anonymous),
(r'@\d+', Name.Variable.Global),
(r'#\d+', Name.Variable.Global),
(r'!' + identifier, Name.Variable),
(r'!\d+', Name.Variable.Anonymous),
(r'c?' + string, String),
(r'0[xX][a-fA-F0-9]+', Number),
(r'-?\d+(?:[.]\d+)?(?:[eE][-+]?\d+(?:[.]\d+)?)?', Number),
(r'[=<>{}\[\]()*.,!]|x\b', Punctuation)
],
'whitespace': [
(r'(\n|\s)+', Text),
(r';.*?\n', Comment)
],
'keyword': [
# Regular keywords
(r'(begin|end'
r'|true|false'
r'|declare|define'
r'|global|constant'
r'|private|linker_private|internal|available_externally|linkonce'
r'|linkonce_odr|weak|weak_odr|appending|dllimport|dllexport'
r'|common|default|hidden|protected|extern_weak|external'
r'|thread_local|zeroinitializer|undef|null|to|tail|target|triple'
r'|datalayout|volatile|nuw|nsw|nnan|ninf|nsz|arcp|fast|exact|inbounds'
r'|align|addrspace|section|alias|module|asm|sideeffect|gc|dbg'
r'|linker_private_weak'
r'|attributes|blockaddress|initialexec|localdynamic|localexec'
r'|prefix|unnamed_addr'
r'|ccc|fastcc|coldcc|x86_stdcallcc|x86_fastcallcc|arm_apcscc'
r'|arm_aapcscc|arm_aapcs_vfpcc|ptx_device|ptx_kernel'
r'|intel_ocl_bicc|msp430_intrcc|spir_func|spir_kernel'
r'|x86_64_sysvcc|x86_64_win64cc|x86_thiscallcc'
r'|cc|c'
r'|signext|zeroext|inreg|sret|nounwind|noreturn|noalias|nocapture'
r'|byval|nest|readnone|readonly'
r'|inlinehint|noinline|alwaysinline|optsize|ssp|sspreq|noredzone'
r'|noimplicitfloat|naked'
r'|builtin|cold|nobuiltin|noduplicate|nonlazybind|optnone'
r'|returns_twice|sanitize_address|sanitize_memory|sanitize_thread'
r'|sspstrong|uwtable|returned'
r'|type|opaque'
r'|eq|ne|slt|sgt|sle'
r'|sge|ult|ugt|ule|uge'
r'|oeq|one|olt|ogt|ole'
r'|oge|ord|uno|ueq|une'
r'|x'
r'|acq_rel|acquire|alignstack|atomic|catch|cleanup|filter'
r'|inteldialect|max|min|monotonic|nand|personality|release'
r'|seq_cst|singlethread|umax|umin|unordered|xchg'
# instructions
r'|add|fadd|sub|fsub|mul|fmul|udiv|sdiv|fdiv|urem|srem|frem|shl'
r'|lshr|ashr|and|or|xor|icmp|fcmp'
r'|phi|call|trunc|zext|sext|fptrunc|fpext|uitofp|sitofp|fptoui'
r'|fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch'
r'|invoke|unwind|unreachable'
r'|indirectbr|landingpad|resume'
r'|malloc|alloca|free|load|store|getelementptr'
r'|extractelement|insertelement|shufflevector|getresult'
r'|extractvalue|insertvalue'
r'|atomicrmw|cmpxchg|fence'
r')\b', Keyword),
# Types
(r'void|half|float|double|x86_fp80|fp128|ppc_fp128|label|metadata',
Keyword.Type),
# Integer types
(r'i[1-9]\d*', Keyword)
]
}
class NasmLexer(RegexLexer):
"""
For Nasm (Intel) assembly code.
"""
name = 'NASM'
aliases = ['nasm']
filenames = ['*.asm', '*.ASM']
mimetypes = ['text/x-nasm']
identifier = r'[a-z$._?][\w$.?#@~]*'
hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)'
octn = r'[0-7]+q'
binn = r'[01]+b'
decn = r'[0-9]+'
floatn = decn + r'\.e?' + decn
string = r'"(\\"|[^"\n])*"|' + r"'(\\'|[^'\n])*'|" + r"`(\\`|[^`\n])*`"
declkw = r'(?:res|d)[bwdqt]|times'
register = (r'r[0-9][0-5]?[bwd]|'
r'[a-d][lh]|[er]?[a-d]x|[er]?[sb]p|[er]?[sd]i|[c-gs]s|st[0-7]|'
r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]')
wordop = r'seg|wrt|strict'
type = r'byte|[dq]?word'
directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
r'ORG|ALIGN|STRUC|ENDSTRUC|COMMON|CPU|GROUP|UPPERCASE|IMPORT|'
r'EXPORT|LIBRARY|MODULE')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'^\s*%', Comment.Preproc, 'preproc'),
include('whitespace'),
(identifier + ':', Name.Label),
(r'(%s)(\s+)(equ)' % identifier,
bygroups(Name.Constant, Keyword.Declaration, Keyword.Declaration),
'instruction-args'),
(directives, Keyword, 'instruction-args'),
(declkw, Keyword.Declaration, 'instruction-args'),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'instruction-args': [
(string, String),
(hexn, Number.Hex),
(octn, Number.Oct),
(binn, Number.Bin),
(floatn, Number.Float),
(decn, Number.Integer),
include('punctuation'),
(register, Name.Builtin),
(identifier, Name.Variable),
(r'[\r\n]+', Text, '#pop'),
include('whitespace')
],
'preproc': [
(r'[^;\n]+', Comment.Preproc),
(r';.*?\n', Comment.Single, '#pop'),
(r'\n', Comment.Preproc, '#pop'),
],
'whitespace': [
(r'\n', Text),
(r'[ \t]+', Text),
(r';.*', Comment.Single)
],
'punctuation': [
(r'[,():\[\]]+', Punctuation),
(r'[&|^<>+*/%~-]+', Operator),
(r'[$]+', Keyword.Constant),
(wordop, Operator.Word),
(type, Keyword.Type)
],
}
class NasmObjdumpLexer(ObjdumpLexer):
"""
For the output of 'objdump -d -M intel'.
.. versionadded:: 2.0
"""
name = 'objdump-nasm'
aliases = ['objdump-nasm']
filenames = ['*.objdump-intel']
mimetypes = ['text/x-nasm-objdump']
tokens = _objdump_lexer_tokens(NasmLexer)
class Ca65Lexer(RegexLexer):
"""
For ca65 assembler sources.
.. versionadded:: 1.6
"""
name = 'ca65 assembler'
aliases = ['ca65']
filenames = ['*.s']
flags = re.IGNORECASE
tokens = {
'root': [
(r';.*', Comment.Single),
(r'\s+', Text),
(r'[a-z_.@$][\w.@$]*:', Name.Label),
(r'((ld|st)[axy]|(in|de)[cxy]|asl|lsr|ro[lr]|adc|sbc|cmp|cp[xy]'
r'|cl[cvdi]|se[cdi]|jmp|jsr|bne|beq|bpl|bmi|bvc|bvs|bcc|bcs'
r'|p[lh][ap]|rt[is]|brk|nop|ta[xy]|t[xy]a|txs|tsx|and|ora|eor'
r'|bit)\b', Keyword),
(r'\.\w+', Keyword.Pseudo),
(r'[-+~*/^&|!<>=]', Operator),
(r'"[^"\n]*.', String),
(r"'[^'\n]*.", String.Char),
(r'\$[0-9a-f]+|[0-9a-f]+h\b', Number.Hex),
(r'\d+', Number.Integer),
(r'%[01]+', Number.Bin),
(r'[#,.:()=\[\]]', Punctuation),
(r'[a-z_.@$][\w.@$]*', Name),
]
}
def analyse_text(self, text):
# comments in GAS start with "#"
if re.match(r'^\s*;', text, re.MULTILINE):
return 0.9

View File

@ -1,373 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.automation
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for automation scripting languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, combined
from pygments.token import Text, Comment, Operator, Name, String, \
Number, Punctuation, Generic
__all__ = ['AutohotkeyLexer', 'AutoItLexer']
class AutohotkeyLexer(RegexLexer):
"""
For `autohotkey <http://www.autohotkey.com/>`_ source code.
.. versionadded:: 1.4
"""
name = 'autohotkey'
aliases = ['ahk', 'autohotkey']
filenames = ['*.ahk', '*.ahkl']
mimetypes = ['text/x-autohotkey']
tokens = {
'root': [
(r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'),
(r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'),
(r'\s+;.*?$', Comment.Singleline),
(r'^;.*?$', Comment.Singleline),
(r'[]{}(),;[]', Punctuation),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInVariables'),
(r'"', String, combined('stringescape', 'dqs')),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
include('garbage'),
],
'incomment': [
(r'^\s*\*/', Comment.Multiline, '#pop'),
(r'[^*/]', Comment.Multiline),
(r'[*/]', Comment.Multiline)
],
'incontinuation': [
(r'^\s*\)', Generic, '#pop'),
(r'[^)]', Generic),
(r'[)]', Generic),
],
'commands': [
(r'(?i)^(\s*)(global|local|static|'
r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|'
r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|'
r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|'
r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|'
r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|'
r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|'
r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|'
r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|'
r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|'
r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|'
r'ControlSendRaw|ControlSetText|CoordMode|Critical|'
r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|'
r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|'
r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|'
r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|'
r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|'
r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|'
r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|'
r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|'
r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|'
r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|'
r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|'
r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|'
r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|'
r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|'
r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|'
r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|'
r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|'
r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|'
r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|'
r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|'
r'SetBatchLines|SetCapslockState|SetControlDelay|'
r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|'
r'SetMouseDelay|SetNumlockState|SetScrollLockState|'
r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|'
r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|'
r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|'
r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|'
r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|'
r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|'
r'StringReplace|StringRight|StringSplit|StringTrimLeft|'
r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|'
r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|'
r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|'
r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|'
r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|'
r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|'
r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|'
r'WinWait)\b', bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|'
r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|'
r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|'
r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|'
r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|'
r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|'
r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|'
r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|'
r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|'
r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|'
r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|'
r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|'
r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|'
r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|'
r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|'
r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b',
Name.Function),
],
'builtInVariables': [
(r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|'
r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|'
r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|'
r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|'
r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|'
r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|'
r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|'
r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|'
r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|'
r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|'
r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|'
r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|'
r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|'
r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|'
r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|'
r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|'
r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|'
r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|'
r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|'
r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|'
r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|'
r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|'
r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|'
r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|'
r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|'
r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|'
r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|'
r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|'
r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|'
r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b',
Name.Variable),
],
'labels': [
# hotkeys and labels
# technically, hotkey names are limited to named keys and buttons
(r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)),
(r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'garbage': [
(r'[^\S\n]', Text),
# (r'.', Text), # no cheating
],
}
class AutoItLexer(RegexLexer):
"""
For `AutoIt <http://www.autoitscript.com/site/autoit/>`_ files.
AutoIt is a freeware BASIC-like scripting language
designed for automating the Windows GUI and general scripting
.. versionadded:: 1.6
"""
name = 'AutoIt'
aliases = ['autoit']
filenames = ['*.au3']
mimetypes = ['text/x-autoit']
# Keywords, functions, macros from au3.keywords.properties
# which can be found in AutoIt installed directory, e.g.
# c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties
keywords = """\
#include-once #include #endregion #forcedef #forceref #region
and byref case continueloop dim do else elseif endfunc endif
endselect exit exitloop for func global
if local next not or return select step
then to until wend while exit""".split()
functions = """\
abs acos adlibregister adlibunregister asc ascw asin assign atan
autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen
binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor
blockinput break call cdtray ceiling chr chrw clipget clipput consoleread
consolewrite consolewriteerror controlclick controlcommand controldisable
controlenable controlfocus controlgetfocus controlgethandle controlgetpos
controlgettext controlhide controllistview controlmove controlsend
controlsettext controlshow controltreeview cos dec dircopy dircreate
dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree
dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate
dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata
drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype
drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree
drivespacetotal drivestatus envget envset envupdate eval execute exp
filechangedir fileclose filecopy filecreatentfslink filecreateshortcut
filedelete fileexists filefindfirstfile filefindnextfile fileflush
filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut
filegetshortname filegetsize filegettime filegetversion fileinstall filemove
fileopen fileopendialog fileread filereadline filerecycle filerecycleempty
filesavedialog fileselectfolder filesetattrib filesetpos filesettime
filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi
guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo
guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy
guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon
guictrlcreateinput guictrlcreatelabel guictrlcreatelist
guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu
guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj
guictrlcreatepic guictrlcreateprogress guictrlcreateradio
guictrlcreateslider guictrlcreatetab guictrlcreatetabitem
guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown
guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg
guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy
guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata
guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic
guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos
guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete
guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators
guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon
guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset
httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize
inetread inidelete iniread inireadsection inireadsectionnames
inirenamesection iniwrite iniwritesection inputbox int isadmin isarray
isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword
isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag
mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox
number objcreate objcreateinterface objevent objevent objget objname
onautoitexitregister onautoitexitunregister opt ping pixelchecksum
pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists
processgetstats processlist processsetpriority processwait processwaitclose
progressoff progresson progressset ptr random regdelete regenumkey
regenumval regread regwrite round run runas runaswait runwait send
sendkeepactive seterror setextended shellexecute shellexecutewait shutdown
sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton
sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread
string stringaddcr stringcompare stringformat stringfromasciiarray
stringinstr stringisalnum stringisalpha stringisascii stringisdigit
stringisfloat stringisint stringislower stringisspace stringisupper
stringisxdigit stringleft stringlen stringlower stringmid stringregexp
stringregexpreplace stringreplace stringright stringsplit stringstripcr
stringstripws stringtoasciiarray stringtobinary stringtrimleft
stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect
tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff
timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete
trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent
trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent
traysetpauseicon traysetstate traysettooltip traytip ubound udpbind
udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype
winactivate winactive winclose winexists winflash wingetcaretpos
wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess
wingetstate wingettext wingettitle winkill winlist winmenuselectitem
winminimizeall winminimizeallundo winmove winsetontop winsetstate
winsettitle winsettrans winwait winwaitactive winwaitclose
winwaitnotactive""".split()
macros = """\
@appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion
@autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec
@cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir
@desktopheight @desktoprefresh @desktopwidth @documentscommondir @error
@exitcode @exitmethod @extended @favoritescommondir @favoritesdir
@gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid
@gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour
@ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf
@logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang
@mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype
@osversion @programfilesdir @programscommondir @programsdir @scriptdir
@scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir
@startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide
@sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault
@sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna
@sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir
@tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday
@windowsdir @workingdir @yday @year""".split()
tokens = {
'root': [
(r';.*\n', Comment.Single),
(r'(#comments-start|#cs).*?(#comments-end|#ce)', Comment.Multiline),
(r'[\[\]{}(),;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
(r'[$|@][a-zA-Z_]\w*', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInMarcros'),
(r'"', String, combined('stringescape', 'dqs')),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
(r'_\n', Text), # Line continuation
include('garbage'),
],
'commands': [
(r'(?i)(\s*)(%s)\b' % '|'.join(keywords),
bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(%s)\b' % '|'.join(functions),
Name.Function),
],
'builtInMarcros': [
(r'(?i)(%s)\b' % '|'.join(macros),
Name.Variable.Global),
],
'labels': [
# sendkeys
(r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'garbage': [
(r'[^\S\n]', Text),
],
}

View File

@ -1,500 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.basic
~~~~~~~~~~~~~~~~~~~~~
Lexers for BASIC like languages (other than VB.net).
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, words, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer',
'QBasicLexer']
class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
.. versionadded:: 1.4
"""
name = 'BlitzMax'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_]\w*'
bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \
(bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
(r'\.\.\n', Text), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Text, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Text, Keyword.Type,
Operator, Text, Punctuation, Text,
Keyword.Type, Name.Class, Text,
Keyword.Type, Text, Punctuation)),
(bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator,
Text, Punctuation, Text, Keyword.Type,
Name.Class, Text, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(words((
'TNullMethodException', 'TNullFunctionException',
'TNullObjectException', 'TArrayBoundsException',
'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception),
(words((
'Strict', 'SuperStrict', 'Module', 'ModuleInfo',
'End', 'Return', 'Continue', 'Exit', 'Public', 'Private',
'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max',
'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen',
'Framework', 'Include', 'Import', 'Extern', 'EndExtern',
'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod',
'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect',
'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData',
'RestoreData'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class BlitzBasicLexer(RegexLexer):
"""
For `BlitzBasic <http://blitzbasic.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'BlitzBasic'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z]\w*'
bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
(bb_name, bb_sktypes, bb_name)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not',
'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str',
'First', 'Last', 'Before', 'After'),
prefix=r'\b', suffix=r'\b'),
Operator),
(r'([+\-*/~=<>^])', Operator),
(r'[(),:\[\]\\]', Punctuation),
(r'\.([ \t]*)(%s)' % bb_name, Name.Label),
# Identifiers
(r'\b(New)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Label)),
(r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
bygroups(Operator, Text, Punctuation, Text, Name.Class)),
(r'\b%s\b([ \t]*)(\()' % bb_var,
bygroups(Name.Function, Text, Keyword.Type, Text, Punctuation,
Text, Name.Class, Text, Punctuation)),
(r'\b(Function)\b([ \t]+)%s' % bb_var,
bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
(r'\b(Type)([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(words((
'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert',
'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default',
'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class MonkeyLexer(RegexLexer):
"""
For
`Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
source code.
.. versionadded:: 1.6
"""
name = 'Monkey'
aliases = ['monkey']
filenames = ['*.monkey']
mimetypes = ['text/x-monkey']
name_variable = r'[a-z_]\w*'
name_function = r'[A-Z]\w*'
name_constant = r'[A-Z_][A-Z0-9_]*'
name_class = r'[A-Z]\w*'
name_module = r'[a-z0-9_]*'
keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
# ? == Bool // % == Int // # == Float // $ == String
keyword_type_special = r'[?%#$]'
flags = re.MULTILINE
tokens = {
'root': [
# Text
(r'\s+', Text),
# Comments
(r"'.*", Comment),
(r'(?i)^#rem\b', Comment.Multiline, 'comment'),
# preprocessor directives
(r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
# preprocessor variable (any line starting with '#' that is not a directive)
(r'^#', Comment.Preproc, 'variables'),
# String
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-fA-Z]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Native data types
(r'\b%s\b' % keyword_type, Keyword.Type),
# Exception handling
(r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
(r'Throwable', Name.Exception),
# Builtins
(r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
(r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
(r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
# Keywords
(r'(?i)^(Import)(\s+)(.*)(\n)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text)),
(r'(?i)^Strict\b.*\n', Keyword.Reserved),
(r'(?i)(Const|Local|Global|Field)(\s+)',
bygroups(Keyword.Declaration, Text), 'variables'),
(r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
bygroups(Keyword.Reserved, Text), 'classname'),
(r'(?i)(Function|Method)(\s+)',
bygroups(Keyword.Reserved, Text), 'funcname'),
(r'(?i)(?:End|Return|Public|Private|Extern|Property|'
r'Final|Abstract)\b', Keyword.Reserved),
# Flow Control stuff
(r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
r'Select|Case|Default|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'For|To|Until|Step|EachIn|Next|'
r'Exit|Continue)\s+', Keyword.Reserved),
# not used yet
(r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
# Array
(r'[\[\]]', Punctuation),
# Other
(r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
(r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
(r'[(){}!#,.:]', Punctuation),
# catch the rest
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_function, Name.Function),
(r'%s\b' % name_variable, Name.Variable),
],
'funcname': [
(r'(?i)%s\b' % name_function, Name.Function),
(r':', Punctuation, 'classname'),
(r'\s+', Text),
(r'\(', Punctuation, 'variables'),
(r'\)', Punctuation, '#pop')
],
'classname': [
(r'%s\.' % name_module, Name.Namespace),
(r'%s\b' % keyword_type, Keyword.Type),
(r'%s\b' % name_class, Name.Class),
# array (of given size)
(r'(\[)(\s*)(\d*)(\s*)(\])',
bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)),
# generics
(r'\s+(?!<)', Text, '#pop'),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
default('#pop')
],
'variables': [
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_variable, Name.Variable),
(r'%s' % keyword_type_special, Keyword.Type),
(r'\s+', Text),
(r':', Punctuation, 'classname'),
(r',', Punctuation, '#push'),
default('#pop')
],
'string': [
(r'[^"~]+', String.Double),
(r'~q|~n|~r|~t|~z|~~', String.Escape),
(r'"', String.Double, '#pop'),
],
'comment': [
(r'(?i)^#rem.*?', Comment.Multiline, "#push"),
(r'(?i)^#end.*?', Comment.Multiline, "#pop"),
(r'\n', Comment.Multiline),
(r'.+', Comment.Multiline),
],
}
class CbmBasicV2Lexer(RegexLexer):
"""
For CBM BASIC V2 sources.
.. versionadded:: 1.6
"""
name = 'CBM BASIC V2'
aliases = ['cbmbas']
filenames = ['*.bas']
flags = re.IGNORECASE
tokens = {
'root': [
(r'rem.*\n', Comment.Single),
(r'\s+', Text),
(r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont'
r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?'
r'|list|clr|cmd|open|close|get#?', Keyword.Reserved),
(r'data|restore|dim|let|def|fn', Keyword.Declaration),
(r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn'
r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin),
(r'[-+*/^<>=]', Operator),
(r'not|and|or', Operator.Word),
(r'"[^"\n]*.', String),
(r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'[(),:;]', Punctuation),
(r'\w+[$%]?', Name),
]
}
def analyse_text(self, text):
# if it starts with a line number, it shouldn't be a "modern" Basic
# like VB.net
if re.match(r'\d+', text):
return 0.2
class QBasicLexer(RegexLexer):
"""
For
`QBasic <http://en.wikipedia.org/wiki/QBasic>`_
source code.
.. versionadded:: 2.0
"""
name = 'QBasic'
aliases = ['qbasic', 'basic']
filenames = ['*.BAS', '*.bas']
mimetypes = ['text/basic']
declarations = ('DATA', 'LET')
functions = (
'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG',
'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI',
'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV',
'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE',
'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT',
'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF',
'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$',
'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY',
'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD',
'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC',
'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN',
'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR',
'VARPTR$', 'VARSEG'
)
metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC')
operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR')
statements = (
'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE',
'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR',
'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA',
'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT',
'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP',
'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD',
'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO',
'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY',
'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE',
'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME',
'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY',
'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM',
'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN',
'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING',
'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM',
'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN',
'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP',
'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM',
'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK',
'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE'
)
keywords = (
'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY',
'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF',
'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD',
'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE',
'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND'
)
tokens = {
'root': [
(r'\n+', Text),
(r'\s+', Text.Whitespace),
(r'^(\s*)(\d*)(\s*)(REM .*)$',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace,
Comment.Single)),
(r'^(\s*)(\d+)(\s*)',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace)),
(r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global),
(r'(?=[^"]*)\'.*$', Comment.Single),
(r'"[^\n"]*"', String.Double),
(r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)',
bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)),
(r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name)),
(r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name.Variable.Global)),
(r'(DIM)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)),
(r'^(\s*)([a-zA-Z_]+)(\s*)(\=)',
bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace,
Operator)),
(r'(GOTO|GOSUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
(r'(SUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
include('declarations'),
include('functions'),
include('metacommands'),
include('operators'),
include('statements'),
include('keywords'),
(r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global),
(r'[a-zA-Z_]\w*\:', Name.Label),
(r'\-?\d*\.\d+[@|#]?', Number.Float),
(r'\-?\d+[@|#]', Number.Float),
(r'\-?\d+#?', Number.Integer.Long),
(r'\-?\d+#?', Number.Integer),
(r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator),
(r'[\[\]{}(),;]', Punctuation),
(r'[\w]+', Name.Variable.Global),
],
# can't use regular \b because of X$()
# XXX: use words() here
'declarations': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)),
Keyword.Declaration),
],
'functions': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)),
Keyword.Reserved),
],
'metacommands': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)),
Keyword.Constant),
],
'operators': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word),
],
'statements': [
(r'\b(%s)\b' % '|'.join(map(re.escape, statements)),
Keyword.Reserved),
],
'keywords': [
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
],
}
def analyse_text(text):
if '$DYNAMIC' in text or '$STATIC' in text:
return 0.9

View File

@ -1,592 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.business
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for "business-oriented" languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
from pygments.lexers._openedge_builtins import OPENEDGEKEYWORDS
__all__ = ['CobolLexer', 'CobolFreeformatLexer', 'ABAPLexer', 'OpenEdgeLexer',
'GoodDataCLLexer', 'MaqlLexer']
class CobolLexer(RegexLexer):
"""
Lexer for OpenCOBOL code.
.. versionadded:: 1.6
"""
name = 'COBOL'
aliases = ['cobol']
filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY']
mimetypes = ['text/x-cobol']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: by PICTURE and USAGE
# Operators: **, *, +, -, /, <, >, <=, >=, =, <>
# Logical (?): NOT, AND, OR
# Reserved words:
# http://opencobol.add1tocobol.com/#reserved-words
# Intrinsics:
# http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions
tokens = {
'root': [
include('comment'),
include('strings'),
include('core'),
include('nums'),
(r'[a-z0-9]([\w\-]*[a-z0-9]+)?', Name.Variable),
# (r'[\s]+', Text),
(r'[ \t]+', Text),
],
'comment': [
(r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment),
],
'core': [
# Figurative constants
(r'(^|(?<=[^0-9a-z_\-]))(ALL\s+)?'
r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)'
r'\s*($|(?=[^0-9a-z_\-]))',
Name.Constant),
# Reserved words STATEMENTS and other bolds
(words((
'ACCEPT', 'ADD', 'ALLOCATE', 'CALL', 'CANCEL', 'CLOSE', 'COMPUTE',
'CONFIGURATION', 'CONTINUE', 'DATA', 'DELETE', 'DISPLAY', 'DIVIDE',
'DIVISION', 'ELSE', 'END', 'END-ACCEPT',
'END-ADD', 'END-CALL', 'END-COMPUTE', 'END-DELETE', 'END-DISPLAY',
'END-DIVIDE', 'END-EVALUATE', 'END-IF', 'END-MULTIPLY', 'END-OF-PAGE',
'END-PERFORM', 'END-READ', 'END-RETURN', 'END-REWRITE', 'END-SEARCH',
'END-START', 'END-STRING', 'END-SUBTRACT', 'END-UNSTRING', 'END-WRITE',
'ENVIRONMENT', 'EVALUATE', 'EXIT', 'FD', 'FILE', 'FILE-CONTROL', 'FOREVER',
'FREE', 'GENERATE', 'GO', 'GOBACK', 'IDENTIFICATION', 'IF', 'INITIALIZE',
'INITIATE', 'INPUT-OUTPUT', 'INSPECT', 'INVOKE', 'I-O-CONTROL', 'LINKAGE',
'LOCAL-STORAGE', 'MERGE', 'MOVE', 'MULTIPLY', 'OPEN', 'PERFORM',
'PROCEDURE', 'PROGRAM-ID', 'RAISE', 'READ', 'RELEASE', 'RESUME',
'RETURN', 'REWRITE', 'SCREEN', 'SD', 'SEARCH', 'SECTION', 'SET',
'SORT', 'START', 'STOP', 'STRING', 'SUBTRACT', 'SUPPRESS',
'TERMINATE', 'THEN', 'UNLOCK', 'UNSTRING', 'USE', 'VALIDATE',
'WORKING-STORAGE', 'WRITE'), prefix=r'(^|(?<=[^0-9a-z_\-]))',
suffix=r'\s*($|(?=[^0-9a-z_\-]))'),
Keyword.Reserved),
# Reserved words
(words((
'ACCESS', 'ADDRESS', 'ADVANCING', 'AFTER', 'ALL',
'ALPHABET', 'ALPHABETIC', 'ALPHABETIC-LOWER', 'ALPHABETIC-UPPER',
'ALPHANUMERIC', 'ALPHANUMERIC-EDITED', 'ALSO', 'ALTER', 'ALTERNATE'
'ANY', 'ARE', 'AREA', 'AREAS', 'ARGUMENT-NUMBER', 'ARGUMENT-VALUE', 'AS',
'ASCENDING', 'ASSIGN', 'AT', 'AUTO', 'AUTO-SKIP', 'AUTOMATIC', 'AUTOTERMINATE',
'BACKGROUND-COLOR', 'BASED', 'BEEP', 'BEFORE', 'BELL',
'BLANK', 'BLINK', 'BLOCK', 'BOTTOM', 'BY', 'BYTE-LENGTH', 'CHAINING',
'CHARACTER', 'CHARACTERS', 'CLASS', 'CODE', 'CODE-SET', 'COL', 'COLLATING',
'COLS', 'COLUMN', 'COLUMNS', 'COMMA', 'COMMAND-LINE', 'COMMIT', 'COMMON',
'CONSTANT', 'CONTAINS', 'CONTENT', 'CONTROL',
'CONTROLS', 'CONVERTING', 'COPY', 'CORR', 'CORRESPONDING', 'COUNT', 'CRT',
'CURRENCY', 'CURSOR', 'CYCLE', 'DATE', 'DAY', 'DAY-OF-WEEK', 'DE', 'DEBUGGING',
'DECIMAL-POINT', 'DECLARATIVES', 'DEFAULT', 'DELIMITED',
'DELIMITER', 'DEPENDING', 'DESCENDING', 'DETAIL', 'DISK',
'DOWN', 'DUPLICATES', 'DYNAMIC', 'EBCDIC',
'ENTRY', 'ENVIRONMENT-NAME', 'ENVIRONMENT-VALUE', 'EOL', 'EOP',
'EOS', 'ERASE', 'ERROR', 'ESCAPE', 'EXCEPTION',
'EXCLUSIVE', 'EXTEND', 'EXTERNAL',
'FILE-ID', 'FILLER', 'FINAL', 'FIRST', 'FIXED', 'FLOAT-LONG', 'FLOAT-SHORT',
'FOOTING', 'FOR', 'FOREGROUND-COLOR', 'FORMAT', 'FROM', 'FULL', 'FUNCTION',
'FUNCTION-ID', 'GIVING', 'GLOBAL', 'GROUP',
'HEADING', 'HIGHLIGHT', 'I-O', 'ID',
'IGNORE', 'IGNORING', 'IN', 'INDEX', 'INDEXED', 'INDICATE',
'INITIAL', 'INITIALIZED', 'INPUT',
'INTO', 'INTRINSIC', 'INVALID', 'IS', 'JUST', 'JUSTIFIED', 'KEY', 'LABEL',
'LAST', 'LEADING', 'LEFT', 'LENGTH', 'LIMIT', 'LIMITS', 'LINAGE',
'LINAGE-COUNTER', 'LINE', 'LINES', 'LOCALE', 'LOCK',
'LOWLIGHT', 'MANUAL', 'MEMORY', 'MINUS', 'MODE',
'MULTIPLE', 'NATIONAL', 'NATIONAL-EDITED', 'NATIVE',
'NEGATIVE', 'NEXT', 'NO', 'NULL', 'NULLS', 'NUMBER', 'NUMBERS', 'NUMERIC',
'NUMERIC-EDITED', 'OBJECT-COMPUTER', 'OCCURS', 'OF', 'OFF', 'OMITTED', 'ON', 'ONLY',
'OPTIONAL', 'ORDER', 'ORGANIZATION', 'OTHER', 'OUTPUT', 'OVERFLOW',
'OVERLINE', 'PACKED-DECIMAL', 'PADDING', 'PAGE', 'PARAGRAPH',
'PLUS', 'POINTER', 'POSITION', 'POSITIVE', 'PRESENT', 'PREVIOUS',
'PRINTER', 'PRINTING', 'PROCEDURE-POINTER', 'PROCEDURES',
'PROCEED', 'PROGRAM', 'PROGRAM-POINTER', 'PROMPT', 'QUOTE',
'QUOTES', 'RANDOM', 'RD', 'RECORD', 'RECORDING', 'RECORDS', 'RECURSIVE',
'REDEFINES', 'REEL', 'REFERENCE', 'RELATIVE', 'REMAINDER', 'REMOVAL',
'RENAMES', 'REPLACING', 'REPORT', 'REPORTING', 'REPORTS', 'REPOSITORY',
'REQUIRED', 'RESERVE', 'RETURNING', 'REVERSE-VIDEO', 'REWIND',
'RIGHT', 'ROLLBACK', 'ROUNDED', 'RUN', 'SAME', 'SCROLL',
'SECURE', 'SEGMENT-LIMIT', 'SELECT', 'SENTENCE', 'SEPARATE',
'SEQUENCE', 'SEQUENTIAL', 'SHARING', 'SIGN', 'SIGNED', 'SIGNED-INT',
'SIGNED-LONG', 'SIGNED-SHORT', 'SIZE', 'SORT-MERGE', 'SOURCE',
'SOURCE-COMPUTER', 'SPECIAL-NAMES', 'STANDARD',
'STANDARD-1', 'STANDARD-2', 'STATUS', 'SUM',
'SYMBOLIC', 'SYNC', 'SYNCHRONIZED', 'TALLYING', 'TAPE',
'TEST', 'THROUGH', 'THRU', 'TIME', 'TIMES', 'TO', 'TOP', 'TRAILING',
'TRANSFORM', 'TYPE', 'UNDERLINE', 'UNIT', 'UNSIGNED',
'UNSIGNED-INT', 'UNSIGNED-LONG', 'UNSIGNED-SHORT', 'UNTIL', 'UP',
'UPDATE', 'UPON', 'USAGE', 'USING', 'VALUE', 'VALUES', 'VARYING',
'WAIT', 'WHEN', 'WITH', 'WORDS', 'YYYYDDD', 'YYYYMMDD'),
prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'),
Keyword.Pseudo),
# inactive reserved words
(words((
'ACTIVE-CLASS', 'ALIGNED', 'ANYCASE', 'ARITHMETIC', 'ATTRIBUTE', 'B-AND',
'B-NOT', 'B-OR', 'B-XOR', 'BIT', 'BOOLEAN', 'CD', 'CENTER', 'CF', 'CH', 'CHAIN', 'CLASS-ID',
'CLASSIFICATION', 'COMMUNICATION', 'CONDITION', 'DATA-POINTER',
'DESTINATION', 'DISABLE', 'EC', 'EGI', 'EMI', 'ENABLE', 'END-RECEIVE',
'ENTRY-CONVENTION', 'EO', 'ESI', 'EXCEPTION-OBJECT', 'EXPANDS', 'FACTORY',
'FLOAT-BINARY-16', 'FLOAT-BINARY-34', 'FLOAT-BINARY-7',
'FLOAT-DECIMAL-16', 'FLOAT-DECIMAL-34', 'FLOAT-EXTENDED', 'FORMAT',
'FUNCTION-POINTER', 'GET', 'GROUP-USAGE', 'IMPLEMENTS', 'INFINITY',
'INHERITS', 'INTERFACE', 'INTERFACE-ID', 'INVOKE', 'LC_ALL', 'LC_COLLATE',
'LC_CTYPE', 'LC_MESSAGES', 'LC_MONETARY', 'LC_NUMERIC', 'LC_TIME',
'LINE-COUNTER', 'MESSAGE', 'METHOD', 'METHOD-ID', 'NESTED', 'NONE', 'NORMAL',
'OBJECT', 'OBJECT-REFERENCE', 'OPTIONS', 'OVERRIDE', 'PAGE-COUNTER', 'PF', 'PH',
'PROPERTY', 'PROTOTYPE', 'PURGE', 'QUEUE', 'RAISE', 'RAISING', 'RECEIVE',
'RELATION', 'REPLACE', 'REPRESENTS-NOT-A-NUMBER', 'RESET', 'RESUME', 'RETRY',
'RF', 'RH', 'SECONDS', 'SEGMENT', 'SELF', 'SEND', 'SOURCES', 'STATEMENT', 'STEP',
'STRONG', 'SUB-QUEUE-1', 'SUB-QUEUE-2', 'SUB-QUEUE-3', 'SUPER', 'SYMBOL',
'SYSTEM-DEFAULT', 'TABLE', 'TERMINAL', 'TEXT', 'TYPEDEF', 'UCS-4', 'UNIVERSAL',
'USER-DEFAULT', 'UTF-16', 'UTF-8', 'VAL-STATUS', 'VALID', 'VALIDATE',
'VALIDATE-STATUS'),
prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'),
Error),
# Data Types
(r'(^|(?<=[^0-9a-z_\-]))'
r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|'
r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|'
r'BINARY-C-LONG|'
r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|'
r'BINARY)\s*($|(?=[^0-9a-z_\-]))', Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator),
# (r'(::)', Keyword.Declaration),
(r'([(),;:&%.])', Punctuation),
# Intrinsics
(r'(^|(?<=[^0-9a-z_\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|'
r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|'
r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|'
r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|'
r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|'
r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG(?:10)?|'
r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|'
r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|'
r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|'
r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|'
r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|'
r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*'
r'($|(?=[^0-9a-z_\-]))', Name.Function),
# Booleans
(r'(^|(?<=[^0-9a-z_\-]))(true|false)\s*($|(?=[^0-9a-z_\-]))', Name.Builtin),
# Comparing Operators
(r'(^|(?<=[^0-9a-z_\-]))(equal|equals|ne|lt|le|gt|ge|'
r'greater|less|than|not|and|or)\s*($|(?=[^0-9a-z_\-]))', Operator.Word),
],
# \"[^\"\n]*\"|\'[^\'\n]*\'
'strings': [
# apparently strings can be delimited by EOL if they are continued
# in the next line
(r'"[^"\n]*("|\n)', String.Double),
(r"'[^'\n]*('|\n)", String.Single),
],
'nums': [
(r'\d+(\s*|\.$|$)', Number.Integer),
(r'[+-]?\d*\.\d+(E[-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*(E[-+]?\d+)?', Number.Float),
],
}
class CobolFreeformatLexer(CobolLexer):
"""
Lexer for Free format OpenCOBOL code.
.. versionadded:: 1.6
"""
name = 'COBOLFree'
aliases = ['cobolfree']
filenames = ['*.cbl', '*.CBL']
mimetypes = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'comment': [
(r'(\*>.*\n|^\w*\*.*$)', Comment),
],
}
class ABAPLexer(RegexLexer):
"""
Lexer for ABAP, SAP's integrated language.
.. versionadded:: 1.1
"""
name = 'ABAP'
aliases = ['abap']
filenames = ['*.abap']
mimetypes = ['text/x-abap']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'common': [
(r'\s+', Text),
(r'^\*.*$', Comment.Single),
(r'\".*?\n', Comment.Single),
],
'variable-names': [
(r'<\S+>', Name.Variable),
(r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable),
],
'root': [
include('common'),
# function calls
(r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)',
bygroups(Keyword, Text, Name.Function)),
(r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|'
r'TRANSACTION|TRANSFORMATION))\b',
Keyword),
(r'(FORM|PERFORM)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Function)),
(r'(PERFORM)(\s+)(\()(\w+)(\))',
bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation)),
(r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)',
bygroups(Keyword, Text, Name.Function, Text, Keyword)),
# method implementation
(r'(METHOD)(\s+)([\w~]+)',
bygroups(Keyword, Text, Name.Function)),
# method calls
(r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)',
bygroups(Text, Name.Variable, Operator, Name.Function)),
# call methodnames returning style
(r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function),
# keywords with dashes in them.
# these need to be first, because for instance the -ID part
# of MESSAGE-ID wouldn't get highlighted if MESSAGE was
# first in the list of keywords.
(r'(ADD-CORRESPONDING|AUTHORITY-CHECK|'
r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|'
r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|'
r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|'
r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|'
r'INTERFACE-POOL|INVERTED-DATE|'
r'LOAD-OF-PROGRAM|LOG-POINT|'
r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|'
r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|'
r'OUTPUT-LENGTH|PRINT-CONTROL|'
r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|'
r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|'
r'TYPE-POOL|TYPE-POOLS'
r')\b', Keyword),
# keyword kombinations
(r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|'
r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|'
r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|'
r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|'
r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|'
r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|'
r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|'
r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|'
r'RUN\s+TIME|TIME\s+(STAMP)?)?|'
r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|'
r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|'
r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|'
r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|'
r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|'
r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|'
r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|'
r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|'
r'DATABASE|SHARED\s+(MEMORY|BUFFER))|'
r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|'
r'FREE\s(MEMORY|OBJECT)?|'
r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|'
r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|'
r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|'
r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|'
r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|'
r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|'
r'SCREEN)|COMMENT|FUNCTION\s+KEY|'
r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|'
r'SKIP|ULINE)|'
r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|'
r'TO LIST-PROCESSING|TO TRANSACTION)'
r'(ENDING|STARTING)\s+AT|'
r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|'
r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|'
r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|'
r'(BEGIN|END)\s+OF|'
r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|'
r'COMPARING(\s+ALL\s+FIELDS)?|'
r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|'
r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|'
r'END-OF-(DEFINITION|PAGE|SELECTION)|'
r'WITH\s+FRAME(\s+TITLE)|'
# simple kombinations
r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|'
r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|'
r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|'
r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|'
r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|'
r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|'
r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword),
# single word keywords.
(r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|'
r'ASSIGN(ING)?|AT(\s+FIRST)?|'
r'BACK|BLOCK|BREAK-POINT|'
r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|'
r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|'
r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|'
r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|'
r'DETAIL|DIRECTORY|DIVIDE|DO|'
r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|'
r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|'
r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|'
r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|'
r'HIDE|'
r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|'
r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|'
r'LENGTH|LINES|LOAD|LOCAL|'
r'JOIN|'
r'KEY|'
r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|'
r'NODES|'
r'OBLIGATORY|OF|OFF|ON|OVERLAY|'
r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|'
r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|'
r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|'
r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|'
r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|'
r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|'
r'ULINE|UNDER|UNPACK|UPDATE|USING|'
r'VALUE|VALUES|VIA|'
r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword),
# builtins
(r'(abs|acos|asin|atan|'
r'boolc|boolx|bit_set|'
r'char_off|charlen|ceil|cmax|cmin|condense|contains|'
r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|'
r'count|count_any_of|count_any_not_of|'
r'dbmaxlen|distance|'
r'escape|exp|'
r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|'
r'insert|'
r'lines|log|log10|'
r'match|matches|'
r'nmax|nmin|numofchar|'
r'repeat|replace|rescale|reverse|round|'
r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|'
r'substring|substring_after|substring_from|substring_before|substring_to|'
r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|'
r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)),
(r'&[0-9]', Name),
(r'[0-9]+', Number.Integer),
# operators which look like variable names before
# parsing variable names.
(r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|'
r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|'
r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator),
include('variable-names'),
# standard oparators after variable names,
# because < and > are part of field symbols.
(r'[?*<>=\-+]', Operator),
(r"'(''|[^'])*'", String.Single),
(r"`([^`])*`", String.Single),
(r'[/;:()\[\],.]', Punctuation)
],
}
class OpenEdgeLexer(RegexLexer):
"""
Lexer for `OpenEdge ABL (formerly Progress)
<http://web.progress.com/en/openedge/abl.html>`_ source code.
.. versionadded:: 1.5
"""
name = 'OpenEdge ABL'
aliases = ['openedge', 'abl', 'progress']
filenames = ['*.p', '*.cls']
mimetypes = ['text/x-openedge', 'application/x-openedge']
types = (r'(?i)(^|(?<=[^0-9a-z_\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|'
r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|'
r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|'
r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|'
r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^0-9a-z_\-]))')
keywords = words(OPENEDGEKEYWORDS,
prefix=r'(?i)(^|(?<=[^0-9a-z_\-]))',
suffix=r'\s*($|(?=[^0-9a-z_\-]))')
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'\{', Comment.Preproc, 'preprocessor'),
(r'\s*&.*', Comment.Preproc),
(r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration),
(types, Keyword.Type),
(keywords, Name.Builtin),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\s+', Text),
(r'[+*/=-]', Operator),
(r'[.:()]', Punctuation),
(r'.', Name.Variable), # Lazy catch-all
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'preprocessor': [
(r'[^{}]', Comment.Preproc),
(r'\{', Comment.Preproc, '#push'),
(r'\}', Comment.Preproc, '#pop'),
],
}
class GoodDataCLLexer(RegexLexer):
"""
Lexer for `GoodData-CL
<http://github.com/gooddata/GoodData-CL/raw/master/cli/src/main/resources/\
com/gooddata/processor/COMMANDS.txt>`_
script files.
.. versionadded:: 1.4
"""
name = 'GoodData-CL'
aliases = ['gooddata-cl']
filenames = ['*.gdc']
mimetypes = ['text/x-gooddata-cl']
flags = re.IGNORECASE
tokens = {
'root': [
# Comments
(r'#.*', Comment.Single),
# Function call
(r'[a-z]\w*', Name.Function),
# Argument list
(r'\(', Punctuation, 'args-list'),
# Punctuation
(r';', Punctuation),
# Space is not significant
(r'\s+', Text)
],
'args-list': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'[a-z]\w*', Name.Variable),
(r'=', Operator),
(r'"', String, 'string-literal'),
(r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number),
# Space is not significant
(r'\s', Text)
],
'string-literal': [
(r'\\[tnrfbae"\\]', String.Escape),
(r'"', String, '#pop'),
(r'[^\\"]+', String)
]
}
class MaqlLexer(RegexLexer):
"""
Lexer for `GoodData MAQL
<https://secure.gooddata.com/docs/html/advanced.metric.tutorial.html>`_
scripts.
.. versionadded:: 1.4
"""
name = 'MAQL'
aliases = ['maql']
filenames = ['*.maql']
mimetypes = ['text/x-gooddata-maql', 'application/x-gooddata-maql']
flags = re.IGNORECASE
tokens = {
'root': [
# IDENTITY
(r'IDENTIFIER\b', Name.Builtin),
# IDENTIFIER
(r'\{[^}]+\}', Name.Variable),
# NUMBER
(r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number),
# STRING
(r'"', String, 'string-literal'),
# RELATION
(r'\<\>|\!\=', Operator),
(r'\=|\>\=|\>|\<\=|\<', Operator),
# :=
(r'\:\=', Operator),
# OBJECT
(r'\[[^]]+\]', Name.Variable.Class),
# keywords
(words((
'DIMENSION', 'DIMENSIONS', 'BOTTOM', 'METRIC', 'COUNT', 'OTHER',
'FACT', 'WITH', 'TOP', 'OR', 'ATTRIBUTE', 'CREATE', 'PARENT',
'FALSE', 'ROW', 'ROWS', 'FROM', 'ALL', 'AS', 'PF', 'COLUMN',
'COLUMNS', 'DEFINE', 'REPORT', 'LIMIT', 'TABLE', 'LIKE', 'AND',
'BY', 'BETWEEN', 'EXCEPT', 'SELECT', 'MATCH', 'WHERE', 'TRUE',
'FOR', 'IN', 'WITHOUT', 'FILTER', 'ALIAS', 'WHEN', 'NOT', 'ON',
'KEYS', 'KEY', 'FULLSET', 'PRIMARY', 'LABELS', 'LABEL',
'VISUAL', 'TITLE', 'DESCRIPTION', 'FOLDER', 'ALTER', 'DROP',
'ADD', 'DATASET', 'DATATYPE', 'INT', 'BIGINT', 'DOUBLE', 'DATE',
'VARCHAR', 'DECIMAL', 'SYNCHRONIZE', 'TYPE', 'DEFAULT', 'ORDER',
'ASC', 'DESC', 'HYPERLINK', 'INCLUDE', 'TEMPLATE', 'MODIFY'),
suffix=r'\b'),
Keyword),
# FUNCNAME
(r'[a-z]\w*\b', Name.Function),
# Comments
(r'#.*', Comment.Single),
# Punctuation
(r'[,;()]', Punctuation),
# Space is not significant
(r'\s+', Text)
],
'string-literal': [
(r'\\[tnrfbae"\\]', String.Escape),
(r'"', String, '#pop'),
(r'[^\\"]+', String)
],
}

View File

@ -1,233 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.c_cpp
~~~~~~~~~~~~~~~~~~~~~
Lexers for C/C++ languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, inherit, default, words
from pygments.util import get_bool_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['CLexer', 'CppLexer']
class CFamilyLexer(RegexLexer):
"""
For C family source code. This is used as a base class to avoid repetitious
definitions.
"""
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(words(('auto', 'break', 'case', 'const', 'continue', 'default', 'do',
'else', 'enum', 'extern', 'for', 'goto', 'if', 'register',
'restricted', 'return', 'sizeof', 'static', 'struct',
'switch', 'typedef', 'union', 'volatile', 'while'),
suffix=r'\b'), Keyword),
(r'(bool|int|long|float|short|double|char|unsigned|signed|void|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(words(('inline', '_inline', '__inline', 'naked', 'restrict',
'thread', 'typename'), suffix=r'\b'), Keyword.Reserved),
# Vector intrinsics
(r'(__m(128i|128d|128|64))\b', Keyword.Reserved),
# Microsoft-isms
(words((
'asm', 'int8', 'based', 'except', 'int16', 'stdcall', 'cdecl',
'fastcall', 'int32', 'declspec', 'finally', 'int64', 'try',
'leave', 'wchar_t', 'w64', 'unaligned', 'raise', 'noop',
'identifier', 'forceinline', 'assume'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
(r'([a-zA-Z_]\w*)(\s*)(:)(?!:)', bygroups(Name.Label, Text, Punctuation)),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t',
'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options, 'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options, 'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class CLexer(CFamilyLexer):
"""
For C source code with preprocessor directives.
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc']
mimetypes = ['text/x-chdr', 'text/x-csrc']
priority = 0.1
def analyse_text(text):
if re.search('^\s*#include [<"]', text, re.MULTILINE):
return 0.1
if re.search('^\s*#ifdef ', text, re.MULTILINE):
return 0.1
class CppLexer(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
priority = 0.1
tokens = {
'statements': [
(words((
'asm', 'catch', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
'export', 'friend', 'mutable', 'namespace', 'new', 'operator',
'private', 'protected', 'public', 'reinterpret_cast',
'restrict', 'static_cast', 'template', 'this', 'throw', 'throws',
'typeid', 'typename', 'using', 'virtual',
'constexpr', 'nullptr', 'decltype', 'thread_local',
'alignas', 'alignof', 'static_assert', 'noexcept', 'override',
'final'), suffix=r'\b'), Keyword),
(r'char(16_t|32_t)\b', Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(words(('virtual_inheritance', 'uuidof', 'super', 'single_inheritance',
'multiple_inheritance', 'interface', 'event'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'__(offload|blockingoffload|outer)\b', Keyword.Pseudo),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
def analyse_text(text):
if re.search('#include <[a-z]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4

View File

@ -1,413 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.c_like
~~~~~~~~~~~~~~~~~~~~~~
Lexers for other C-like languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \
default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers import _mql_builtins
__all__ = ['PikeLexer', 'NesCLexer', 'ClayLexer', 'ECLexer', 'ValaLexer',
'CudaLexer', 'SwigLexer', 'MqlLexer']
class PikeLexer(CppLexer):
"""
For `Pike <http://pike.lysator.liu.se/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Pike'
aliases = ['pike']
filenames = ['*.pike', '*.pmod']
mimetypes = ['text/x-pike']
tokens = {
'statements': [
(words((
'catch', 'new', 'private', 'protected', 'public', 'gauge',
'throw', 'throws', 'class', 'interface', 'implement', 'abstract', 'extends', 'from',
'this', 'super', 'constant', 'final', 'static', 'import', 'use', 'extern',
'inline', 'proto', 'break', 'continue', 'if', 'else', 'for',
'while', 'do', 'switch', 'case', 'as', 'in', 'version', 'return', 'true', 'false', 'null',
'__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__',
'__REAL_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__',
'__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__',
'__amigaos__', '_Pragma', 'static_assert', 'defined', 'sscanf'), suffix=r'\b'),
Keyword),
(r'(bool|int|long|float|short|double|char|string|object|void|mapping|'
r'array|multiset|program|function|lambda|mixed|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'[~!%^&*+=|?:<>/@-]', Operator),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
class NesCLexer(CLexer):
"""
For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
directives.
.. versionadded:: 2.0
"""
name = 'nesC'
aliases = ['nesc']
filenames = ['*.nc']
mimetypes = ['text/x-nescsrc']
tokens = {
'statements': [
(words((
'abstract', 'as', 'async', 'atomic', 'call', 'command', 'component',
'components', 'configuration', 'event', 'extends', 'generic',
'implementation', 'includes', 'interface', 'module', 'new', 'norace',
'post', 'provides', 'signal', 'task', 'uses'), suffix=r'\b'),
Keyword),
(words(('nx_struct', 'nx_union', 'nx_int8_t', 'nx_int16_t', 'nx_int32_t',
'nx_int64_t', 'nx_uint8_t', 'nx_uint16_t', 'nx_uint32_t',
'nx_uint64_t'), suffix=r'\b'),
Keyword.Type),
inherit,
],
}
class ClayLexer(RegexLexer):
"""
For `Clay <http://claylabs.com/clay/>`_ source.
.. versionadded:: 2.0
"""
name = 'Clay'
filenames = ['*.clay']
aliases = ['clay']
mimetypes = ['text/x-clay']
tokens = {
'root': [
(r'\s', Text),
(r'//.*?$', Comment.Singleline),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\b(public|private|import|as|record|variant|instance'
r'|define|overload|default|external|alias'
r'|rvalue|ref|forward|inline|noinline|forceinline'
r'|enum|var|and|or|not|if|else|goto|return|while'
r'|switch|case|break|continue|for|in|true|false|try|catch|throw'
r'|finally|onerror|staticassert|eval|when|newtype'
r'|__FILE__|__LINE__|__COLUMN__|__ARG__'
r')\b', Keyword),
(r'[~!%^&*+=|:<>/-]', Operator),
(r'[#(){}\[\],;.]', Punctuation),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'\d+[LlUu]*', Number.Integer),
(r'\b(true|false)\b', Name.Builtin),
(r'(?i)[a-z_?][\w?]*', Name),
(r'"""', String, 'tdqs'),
(r'"', String, 'dqs'),
],
'strings': [
(r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape),
(r'.', String),
],
'nl': [
(r'\n', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings'),
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl'),
],
}
class ECLexer(CLexer):
"""
For eC source code with preprocessor directives.
.. versionadded:: 1.5
"""
name = 'eC'
aliases = ['ec']
filenames = ['*.ec', '*.eh']
mimetypes = ['text/x-echdr', 'text/x-ecsrc']
tokens = {
'statements': [
(words((
'virtual', 'class', 'private', 'public', 'property', 'import',
'delete', 'new', 'new0', 'renew', 'renew0', 'define', 'get',
'set', 'remote', 'dllexport', 'dllimport', 'stdcall', 'subclass',
'__on_register_module', 'namespace', 'using', 'typed_object',
'any_object', 'incref', 'register', 'watch', 'stopwatching', 'firewatchers',
'watchable', 'class_designer', 'class_fixed', 'class_no_expansion', 'isset',
'class_default_property', 'property_category', 'class_data',
'class_property', 'thisclass', 'dbtable', 'dbindex',
'database_open', 'dbfield'), suffix=r'\b'), Keyword),
(words(('uint', 'uint16', 'uint32', 'uint64', 'bool', 'byte',
'unichar', 'int64'), suffix=r'\b'),
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(null|value|this)\b', Name.Builtin),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
.. versionadded:: 1.1
"""
name = 'Vala'
aliases = ['vala', 'vapi']
filenames = ['*.vala', '*.vapi']
mimetypes = ['text/x-vala']
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'[L@]?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(?s)""".*?"""', String), # verbatim strings
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation)),
# TODO: "correctly" parse complex code attributes
(r'(\[)(CCode|(?:Integer|Floating)Type)',
bygroups(Punctuation, Name.Decorator)),
(r'[()\[\],.]', Punctuation),
(words((
'as', 'base', 'break', 'case', 'catch', 'construct', 'continue',
'default', 'delete', 'do', 'else', 'enum', 'finally', 'for',
'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params',
'return', 'set', 'sizeof', 'switch', 'this', 'throw', 'try',
'typeof', 'while', 'yield'), suffix=r'\b'),
Keyword),
(words((
'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern',
'inline', 'internal', 'override', 'owned', 'private', 'protected',
'public', 'ref', 'requires', 'signal', 'static', 'throws', 'unowned',
'var', 'virtual', 'volatile', 'weak', 'yields'), suffix=r'\b'),
Keyword.Declaration),
(r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text),
'namespace'),
(r'(class|errordomain|interface|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(\.)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
# void is an actual keyword, others are in glib-2.0.vapi
(words((
'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16',
'int32', 'int64', 'long', 'short', 'size_t', 'ssize_t', 'string',
'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'ulong', 'unichar', 'ushort'), suffix=r'\b'),
Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
],
}
class CudaLexer(CLexer):
"""
For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
source.
.. versionadded:: 1.6
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
aliases = ['cuda', 'cu']
mimetypes = ['text/x-cuda']
function_qualifiers = set(('__device__', '__global__', '__host__',
'__noinline__', '__forceinline__'))
variable_qualifiers = set(('__device__', '__constant__', '__shared__',
'__restrict__'))
vector_types = set(('char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3',
'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2',
'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1',
'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1',
'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4',
'ulong4', 'longlong1', 'ulonglong1', 'longlong2',
'ulonglong2', 'float1', 'float2', 'float3', 'float4',
'double1', 'double2', 'dim3'))
variables = set(('gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'))
functions = set(('__threadfence_block', '__threadfence', '__threadfence_system',
'__syncthreads', '__syncthreads_count', '__syncthreads_and',
'__syncthreads_or'))
execution_confs = set(('<<<', '>>>'))
def get_tokens_unprocessed(self, text):
for index, token, value in CLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.variable_qualifiers:
token = Keyword.Type
elif value in self.vector_types:
token = Keyword.Type
elif value in self.variables:
token = Name.Builtin
elif value in self.execution_confs:
token = Keyword.Pseudo
elif value in self.function_qualifiers:
token = Keyword.Reserved
elif value in self.functions:
token = Name.Function
yield index, token, value
class SwigLexer(CppLexer):
"""
For `SWIG <http://www.swig.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'SWIG'
aliases = ['swig']
filenames = ['*.swg', '*.i']
mimetypes = ['text/swig']
priority = 0.04 # Lower than C/C++ and Objective C/C++
tokens = {
'statements': [
# SWIG directives
(r'(%[a-z_][a-z0-9_]*)', Name.Function),
# Special variables
('\$\**\&?\w+', Name),
# Stringification / additional preprocessor directives
(r'##*[a-zA-Z_]\w*', Comment.Preproc),
inherit,
],
}
# This is a far from complete set of SWIG directives
swig_directives = set((
# Most common directives
'%apply', '%define', '%director', '%enddef', '%exception', '%extend',
'%feature', '%fragment', '%ignore', '%immutable', '%import', '%include',
'%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma',
'%rename', '%shared_ptr', '%template', '%typecheck', '%typemap',
# Less common directives
'%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear',
'%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum',
'%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor',
'%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor',
'%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments',
'%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv',
'%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception',
'%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar',
'%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend',
'%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall',
'%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof',
'%trackobjects', '%types', '%unrefobject', '%varargs', '%warn',
'%warnfilter'))
def analyse_text(text):
rv = 0
# Search for SWIG directives, which are conventionally at the beginning of
# a line. The probability of them being within a line is low, so let another
# lexer win in this case.
matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M)
for m in matches:
if m in SwigLexer.swig_directives:
rv = 0.98
break
else:
rv = 0.91 # Fraction higher than MatlabLexer
return rv
class MqlLexer(CppLexer):
"""
For `MQL4 <http://docs.mql4.com/>`_ and
`MQL5 <http://www.mql5.com/en/docs>`_ source code.
.. versionadded:: 2.0
"""
name = 'MQL'
aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5']
filenames = ['*.mq4', '*.mq5', '*.mqh']
mimetypes = ['text/x-mql']
tokens = {
'statements': [
(words(_mql_builtins.keywords, suffix=r'\b'), Keyword),
(words(_mql_builtins.c_types, suffix=r'\b'), Keyword.Type),
(words(_mql_builtins.types, suffix=r'\b'), Name.Function),
(words(_mql_builtins.constants, suffix=r'\b'), Name.Constant),
(words(_mql_builtins.colors, prefix='(clr)?', suffix=r'\b'),
Name.Constant),
inherit,
],
}

View File

@ -1,98 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.chapel
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Chapel language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ChapelLexer']
class ChapelLexer(RegexLexer):
"""
For `Chapel <http://chapel.cray.com/>`_ source.
.. versionadded:: 2.0
"""
name = 'Chapel'
filenames = ['*.chpl']
aliases = ['chapel', 'chpl']
# mimetypes = ['text/x-chapel']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(config|const|in|inout|out|param|ref|type|var)\b',
Keyword.Declaration),
(r'(false|nil|true)\b', Keyword.Constant),
(r'(bool|complex|imag|int|opaque|range|real|string|uint)\b',
Keyword.Type),
(words((
'align', 'atomic', 'begin', 'break', 'by', 'cobegin', 'coforall',
'continue', 'delete', 'dmapped', 'do', 'domain', 'else', 'enum',
'export', 'extern', 'for', 'forall', 'if', 'index', 'inline',
'iter', 'label', 'lambda', 'let', 'local', 'new', 'noinit', 'on',
'otherwise', 'pragma', 'reduce', 'return', 'scan', 'select',
'serial', 'single', 'sparse', 'subdomain', 'sync', 'then', 'use',
'when', 'where', 'while', 'with', 'yield', 'zip'), suffix=r'\b'),
Keyword),
(r'(proc)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'procname'),
(r'(class|module|record|union)(\s+)', bygroups(Keyword, Text),
'classname'),
# imaginary integers
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# reals cannot end with a period due to lexical ambiguity with
# .. operator. See reference for rationale.
(r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
(r'\d+[eE][+-]?[0-9]+i?', Number.Float),
# integer literals
# -- binary
(r'0[bB][01]+', Number.Bin),
# -- hex
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- octal
(r'0[oO][0-7]+', Number.Oct),
# -- decimal
(r'[0-9]+', Number.Integer),
# strings
(r'["\'](\\\\|\\"|[^"\'])*["\']', String),
# tokens
(r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
r'<=>|<~>|\.\.|by|#|\.\.\.|'
r'&&|\|\||!|&|\||\^|~|<<|>>|'
r'==|!=|<=|>=|<|>|'
r'[+\-*/%]|\*\*)', Operator),
(r'[:;,.?()\[\]{}]', Punctuation),
# identifiers
(r'[a-zA-Z_][\w$]*', Name.Other),
],
'classname': [
(r'[a-zA-Z_][\w$]*', Name.Class, '#pop'),
],
'procname': [
(r'[a-zA-Z_][\w$]*', Name.Function, '#pop'),
],
}

View File

@ -1,33 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.compiled
~~~~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.jvm import JavaLexer, ScalaLexer
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers.d import DLexer
from pygments.lexers.objective import ObjectiveCLexer, \
ObjectiveCppLexer, LogosLexer
from pygments.lexers.go import GoLexer
from pygments.lexers.rust import RustLexer
from pygments.lexers.c_like import ECLexer, ValaLexer, CudaLexer
from pygments.lexers.pascal import DelphiLexer, Modula2Lexer, AdaLexer
from pygments.lexers.business import CobolLexer, CobolFreeformatLexer
from pygments.lexers.fortran import FortranLexer
from pygments.lexers.prolog import PrologLexer
from pygments.lexers.python import CythonLexer
from pygments.lexers.graphics import GLShaderLexer
from pygments.lexers.ml import OcamlLexer
from pygments.lexers.basic import BlitzBasicLexer, BlitzMaxLexer, MonkeyLexer
from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer
from pygments.lexers.ooc import OocLexer
from pygments.lexers.felix import FelixLexer
from pygments.lexers.nimrod import NimrodLexer
__all__ = []

View File

@ -1,546 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.configs
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for configuration file formats.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, words, bygroups, include, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
from pygments.lexers.shell import BashLexer
__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg', 'dosini']
filenames = ['*.ini', '*.cfg']
mimetypes = ['text/x-ini']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class RegeditLexer(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
.. versionadded:: 1.6
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.+', String, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
.. versionadded:: 1.4
"""
name = 'Properties'
aliases = ['properties', 'jproperties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
tokens = {
'root': [
(r'\s+', Text),
(r'(?:[;#]|//).*$', Comment),
(r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
],
}
def _rx_indent(level):
# Kconfig *always* interprets a tab as 8 spaces, so this is the default.
# Edit this if you are in an environment where KconfigLexer gets expanded
# input (tabs expanded to spaces) and the expansion tab width is != 8,
# e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width).
# Value range here is 2 <= {tab_width} <= 8.
tab_width = 8
# Regex matching a given indentation {level}, assuming that indentation is
# a multiple of {tab_width}. In other cases there might be problems.
if tab_width == 2:
space_repeat = '+'
else:
space_repeat = '{1,%d}' % (tab_width - 1)
if level == 1:
level_repeat = ''
else:
level_repeat = '{%s}' % level
return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat)
class KconfigLexer(RegexLexer):
"""
For Linux-style Kconfig files.
.. versionadded:: 1.6
"""
name = 'Kconfig'
aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config']
# Adjust this if new kconfig file names appear in your environment
filenames = ['Kconfig', '*Config.in*', 'external.in*',
'standard-modules.in']
mimetypes = ['text/x-kconfig']
# No re.MULTILINE, indentation-aware help text needs line-by-line handling
flags = 0
def call_indent(level):
# If indentation >= {level} is detected, enter state 'indent{level}'
return (_rx_indent(level), String.Doc, 'indent%s' % level)
def do_indent(level):
# Print paragraphs of indentation level >= {level} as String.Doc,
# ignoring blank lines. Then return to 'root' state.
return [
(_rx_indent(level), String.Doc),
(r'\s*\n', Text),
default('#pop:2')
]
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(words((
'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice',
'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif',
'source', 'prompt', 'select', 'depends on', 'default',
'range', 'option'), suffix=r'\b'),
Keyword),
(r'(---help---|help)[\t ]*\n', Keyword, 'help'),
(r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b',
Name.Builtin),
(r'[!=&|]', Operator),
(r'[()]', Punctuation),
(r'[0-9]+', Number.Integer),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r'\S+', Text),
],
# Help text is indented, multi-line and ends when a lower indentation
# level is detected.
'help': [
# Skip blank lines after help token, if any
(r'\s*\n', Text),
# Determine the first help line's indentation level heuristically(!).
# Attention: this is not perfect, but works for 99% of "normal"
# indentation schemes up to a max. indentation level of 7.
call_indent(7),
call_indent(6),
call_indent(5),
call_indent(4),
call_indent(3),
call_indent(2),
call_indent(1),
default('#pop'), # for incomplete help sections without text
],
# Handle text for indentation levels 7 to 1
'indent7': do_indent(7),
'indent6': do_indent(6),
'indent5': do_indent(5),
'indent4': do_indent(4),
'indent3': do_indent(3),
'indent2': do_indent(2),
'indent1': do_indent(1),
}
class Cfengine3Lexer(RegexLexer):
"""
Lexer for `CFEngine3 <http://cfengine.org>`_ policy files.
.. versionadded:: 1.5
"""
name = 'CFEngine3'
aliases = ['cfengine3', 'cf3']
filenames = ['*.cf']
mimetypes = []
tokens = {
'root': [
(r'#.*?\n', Comment),
(r'(body)(\s+)(\S+)(\s+)(control)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()',
bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation),
'arglist'),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)',
bygroups(Punctuation, Name.Variable, Punctuation,
Text, Keyword.Type, Text, Operator, Text)),
(r'(\S+)(\s*)(=>)(\s*)',
bygroups(Keyword.Reserved, Text, Operator, Text)),
(r'"', String, 'string'),
(r'(\w+)(\()', bygroups(Name.Function, Punctuation)),
(r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)),
(r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)),
(r'@[{(][^)}]+[})]', Name.Variable),
(r'[(){},;]', Punctuation),
(r'=>', Operator),
(r'->', Operator),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\w+', Name.Function),
(r'\s+', Text),
],
'string': [
(r'\$[{(]', String.Interpol, 'interpol'),
(r'\\.', String.Escape),
(r'"', String, '#pop'),
(r'\n', String),
(r'.', String),
],
'interpol': [
(r'\$[{(]', String.Interpol, '#push'),
(r'[})]', String.Interpol, '#pop'),
(r'[^${()}]+', String.Interpol),
],
'arglist': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'\w+', Name.Variable),
(r'\s+', Text),
],
}
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
.. versionadded:: 0.6
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-z]\w*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'\\\n', Text),
(r'$', Text, '#pop'),
(r'\\', Text),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-z0-9][\w./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"\\]+', Text)
],
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
.. versionadded:: 0.9
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = (
"access_log", "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to", "anonymize_headers",
"append_domain", "as_whois_server", "auth_param_basic",
"authenticate_children", "authenticate_program", "authenticate_ttl",
"broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
"cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
"cache_peer_access", "cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
"client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
"dead_peer_timeout", "debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters", "delay_pools",
"deny_info", "dns_children", "dns_defnames", "dns_nameservers",
"dns_testnames", "emulate_httpd_log", "err_html_text",
"fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
"fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients", "header_access",
"header_replace", "hierarchy_stoplist", "high_response_time_warning",
"high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
"http_anonymizer", "httpd_accel", "httpd_accel_host",
"httpd_accel_port", "httpd_accel_uses_host_header",
"httpd_accel_with_proxy", "http_port", "http_reply_access",
"icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average", "inside_firewall",
"ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
"local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
"log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy", "mime_table",
"min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
"minimum_object_size", "minimum_retry_timeout", "miss_access",
"negative_dns_ttl", "negative_ttl", "neighbor_timeout",
"neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
"pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
"prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"refresh_pattern", "reload_into_ims", "request_body_max_size",
"request_size", "request_timeout", "shutdown_lifetime",
"single_parent_bypass", "siteselect_timeout", "snmp_access",
"snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
"test_reachability", "udp_hit_obj", "udp_hit_obj_size",
"udp_incoming_address", "udp_outgoing_address", "unique_hostname",
"unlinkd_program", "uri_whitespace", "useragent_log",
"visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
)
opts = (
"proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
"multicast-responder", "on", "off", "all", "deny", "allow", "via",
"parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
"credentialsttl", "none", "disable", "offline_toggle", "diskd",
)
actions = (
"shutdown", "info", "parameter", "server_list", "client_list",
r'squid.conf',
)
actions_stats = (
"objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
"redirector", "io", "reply_headers", "filedescriptors", "netdb",
)
actions_log = ("status", "enable", "disable", "clear")
acls = (
"url_regex", "urlpath_regex", "referer_regex", "port", "proto",
"req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
"dst", "time", "dstdomain", "ident", "snmp_community",
)
ip_re = (
r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
r'[1-9]?\d)){3}))'
)
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#', Comment, 'comment'),
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
(words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant),
# Actions
(words(actions, prefix=r'\b', suffix=r'\b'), String),
(words(actions_stats, prefix=r'stats/', suffix=r'\b'), String),
(words(actions_log, prefix=r'log/', suffix=r'='), String),
(words(acls, prefix=r'\b', suffix=r'\b'), Keyword),
(ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.+', Comment, '#pop'),
default('#pop'),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = []
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'\}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'\{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
# (r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class DockerLexer(RegexLexer):
"""
Lexer for `Docker <http://docker.io>`_ configuration files.
.. versionadded:: 2.0
"""
name = 'Docker'
aliases = ['docker', 'dockerfile']
filenames = ['Dockerfile', '*.docker']
mimetypes = ['text/x-dockerfile-config']
_keywords = (r'(?:FROM|MAINTAINER|CMD|EXPOSE|ENV|ADD|ENTRYPOINT|'
r'VOLUME|WORKDIR)')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'^(ONBUILD)(\s+)(%s)\b' % (_keywords,),
bygroups(Name.Keyword, Whitespace, Keyword)),
(r'^(%s)\b(.*)' % (_keywords,), bygroups(Keyword, String)),
(r'#.*', Comment),
(r'RUN', Keyword), # Rest of line falls through
(r'(.*\\\n)*.+', using(BashLexer)),
],
}

View File

@ -1,114 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.console
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for misc console output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Generic, Comment, String, Text, Keyword, Name, \
Punctuation, Number
__all__ = ['VCTreeStatusLexer', 'PyPyLogLexer']
class VCTreeStatusLexer(RegexLexer):
"""
For colorizing output of version control status commans, like "hg
status" or "svn status".
.. versionadded:: 2.0
"""
name = 'VCTreeStatus'
aliases = ['vctreestatus']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^A \+ C\s+', Generic.Error),
(r'^A\s+\+?\s+', String),
(r'^M\s+', Generic.Inserted),
(r'^C\s+', Generic.Error),
(r'^D\s+', Generic.Deleted),
(r'^[?!]\s+', Comment.Preproc),
(r' >\s+.*\n', Comment.Preproc),
(r'.*\n', Text)
]
}
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
.. versionadded:: 1.5
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] \{jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] \{jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"^\+\d+: ", Comment),
(r"--end of the loop--", Comment),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()(\w+(?:\.\w+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name),
(r"<.*?>+", Name.Builtin),
(r"(label|debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|float_neg|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|"
r"cast_int_to_float|cast_float_to_int|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|"
r"virtual_ref|mark_opaque_ptr|"
r"call_may_force|call_assembler|call_loopinvariant|"
r"call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|"
r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r":", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"\s+", Text),
(r"#.*?$", Comment),
],
}

View File

@ -1,498 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.css
~~~~~~~~~~~~~~~~~~~
Lexers for CSS and related stylesheet formats.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \
default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import iteritems
__all__ = ['CssLexer', 'SassLexer', 'ScssLexer']
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\{', Punctuation, 'content'),
(r'\:[\w-]+', Name.Decorator),
(r'\.[\w-]+', Name.Class),
(r'\#[\w-]+', Name.Function),
(r'@[\w-]+', Keyword, 'atrule'),
(r'[\w-]+', Name.Tag),
(r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'\{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'\}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'\}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(words((
'azimuth', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'background', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-left-color', 'border-left-style',
'border-left-width', 'border-right', 'border-right-color',
'border-right-style', 'border-right-width', 'border-top-color',
'border-top-style', 'border-top-width', 'border-bottom',
'border-collapse', 'border-left', 'border-width', 'border-color',
'border-spacing', 'border-style', 'border-top', 'border', 'caption-side',
'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset',
'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display',
'elevation', 'empty-cells', 'float', 'font-family', 'font-size',
'font-size-adjust', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'font', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'list-style-image', 'list-style-position',
'list-style', 'margin-bottom', 'margin-left', 'margin-right',
'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline-color',
'outline-style', 'outline-width', 'outline', 'overflow', 'overflow-x',
'overflow-y', 'padding-bottom', 'padding-left', 'padding-right', 'padding-top',
'padding', 'page', 'page-break-after', 'page-break-before', 'page-break-inside',
'pause-after', 'pause-before', 'pause', 'pitch-range', 'pitch',
'play-during', 'position', 'quotes', 'richness', 'right', 'size',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speak',
'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration',
'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi',
'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space',
'widows', 'width', 'word-spacing', 'z-index', 'bottom',
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bolder', 'bold', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'left', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slower', 'slow', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text-bottom', 'text-top', 'text', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Keyword),
(words((
'indigo', 'gold', 'firebrick', 'indianred', 'yellow', 'darkolivegreen',
'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse',
'mediumslateblue', 'black', 'springgreen', 'crimson', 'lightsalmon', 'brown',
'turquoise', 'olivedrab', 'cyan', 'silver', 'skyblue', 'gray', 'darkturquoise',
'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink', 'teal',
'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle',
'violet', 'navy', 'orchid', 'blue', 'ghostwhite', 'honeydew', 'cornflowerblue',
'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'red', 'bisque', 'slategray',
'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue',
'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'purple', 'lightgrey',
'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace',
'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna',
'lightcoral', 'orangered', 'navajowhite', 'lime', 'palegreen', 'burlywood',
'seashell', 'mediumspringgreen', 'fuchsia', 'papayawhip', 'blanchedalmond',
'peru', 'aquamarine', 'white', 'darkslategray', 'ivory', 'dodgerblue',
'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue', 'olive',
'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin',
'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink',
'plum', 'aqua', 'darkgoldenrod', 'maroon', 'sandybrown', 'magenta', 'tan',
'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen',
'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue',
'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon',
'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink',
'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine', 'green',
'blueviolet', 'peachpuff'), suffix=r'\b'),
Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[.-]?[0-9]*[.]?[0-9]+(em|px|pt|pc|in|mm|cm|ex|s)\b', Number),
# Separate regex for percentages, as can't do word boundaries with %
(r'[.-]?[0-9]*[.]?[0-9]+%', Number),
(r'-?[0-9]+', Number),
(r'[~^*!%&<>|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_]\w*', Name)
]
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(words((
'azimuth', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'background', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-left-color', 'border-left-style',
'border-left-width', 'border-right', 'border-right-color',
'border-right-style', 'border-right-width', 'border-top-color',
'border-top-style', 'border-top-width', 'border-bottom',
'border-collapse', 'border-left', 'border-width', 'border-color',
'border-spacing', 'border-style', 'border-top', 'border', 'caption-side',
'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset',
'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display',
'elevation', 'empty-cells', 'float', 'font-family', 'font-size',
'font-size-adjust', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'font', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'list-style-image', 'list-style-position',
'list-style', 'margin-bottom', 'margin-left', 'margin-right',
'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline', 'outline-color',
'outline-style', 'outline-width', 'overflow', 'padding-bottom',
'padding-left', 'padding-right', 'padding-top', 'padding', 'page',
'page-break-after', 'page-break-before', 'page-break-inside',
'pause-after', 'pause-before', 'pause', 'pitch', 'pitch-range',
'play-during', 'position', 'quotes', 'richness', 'right', 'size',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speak',
'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration',
'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi',
'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space',
'widows', 'width', 'word-spacing', 'z-index', 'bottom', 'left',
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bold', 'bolder', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slow', 'slower', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text', 'text-bottom', 'text-top', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Name.Constant),
(words((
'indigo', 'gold', 'firebrick', 'indianred', 'darkolivegreen',
'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse',
'mediumslateblue', 'springgreen', 'crimson', 'lightsalmon', 'brown',
'turquoise', 'olivedrab', 'cyan', 'skyblue', 'darkturquoise',
'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink',
'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle',
'violet', 'orchid', 'ghostwhite', 'honeydew', 'cornflowerblue',
'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'bisque', 'slategray',
'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue',
'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'lightgrey',
'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace',
'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna',
'lightcoral', 'orangered', 'navajowhite', 'palegreen', 'burlywood',
'seashell', 'mediumspringgreen', 'papayawhip', 'blanchedalmond',
'peru', 'aquamarine', 'darkslategray', 'ivory', 'dodgerblue',
'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue',
'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin',
'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink',
'plum', 'darkgoldenrod', 'sandybrown', 'magenta', 'tan',
'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen',
'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue',
'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon',
'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink',
'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine',
'blueviolet', 'peachpuff'), suffix=r'\b'),
Name.Entity),
(words((
'black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green',
'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua'), suffix=r'\b'),
Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#\{', String.Interpol, 'interpolation'),
(r'[~^*!&%<>|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[\w-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~^*!&\[\]()<>|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
.. versionadded:: 1.3
"""
name = 'Sass'
aliases = ['sass']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[\w-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
default('selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
default('value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'(@media)(\s+)', bygroups(Keyword, Text), 'value'),
(r'@[\w-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
default('selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])

View File

@ -1,251 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.d
~~~~~~~~~~~~~~~~~
Lexers for D languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Keyword, Name, String, \
Number, Punctuation
__all__ = ['DLexer', 'CrocLexer', 'MiniDLexer']
class DLexer(RegexLexer):
"""
For D source.
.. versionadded:: 1.2
"""
name = 'D'
filenames = ['*.d', '*.di']
aliases = ['d']
mimetypes = ['text/x-dsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# (r'\\\n', Text), # line continuations
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nested_comment'),
# Keywords
(words((
'abstract', 'alias', 'align', 'asm', 'assert', 'auto', 'body',
'break', 'case', 'cast', 'catch', 'class', 'const', 'continue',
'debug', 'default', 'delegate', 'delete', 'deprecated', 'do', 'else',
'enum', 'export', 'extern', 'finally', 'final', 'foreach_reverse',
'foreach', 'for', 'function', 'goto', 'if', 'immutable', 'import',
'interface', 'invariant', 'inout', 'in', 'is', 'lazy', 'mixin',
'module', 'new', 'nothrow', 'out', 'override', 'package', 'pragma',
'private', 'protected', 'public', 'pure', 'ref', 'return', 'scope',
'shared', 'static', 'struct', 'super', 'switch', 'synchronized',
'template', 'this', 'throw', 'try', 'typedef', 'typeid', 'typeof',
'union', 'unittest', 'version', 'volatile', 'while', 'with',
'__gshared', '__traits', '__vector', '__parameters'),
suffix=r'\b'),
Keyword),
(words((
'bool', 'byte', 'cdouble', 'cent', 'cfloat', 'char', 'creal',
'dchar', 'double', 'float', 'idouble', 'ifloat', 'int', 'ireal',
'long', 'real', 'short', 'ubyte', 'ucent', 'uint', 'ulong',
'ushort', 'void', 'wchar'), suffix=r'\b'),
Keyword.Type),
(r'(false|true|null)\b', Keyword.Constant),
(words((
'__FILE__', '__MODULE__', '__LINE__', '__FUNCTION__', '__PRETTY_FUNCTION__'
'', '__DATE__', '__EOF__', '__TIME__', '__TIMESTAMP__', '__VENDOR__',
'__VERSION__'), suffix=r'\b'),
Keyword.Pseudo),
(r'macro\b', Keyword.Reserved),
(r'(string|wstring|dstring|size_t|ptrdiff_t)\b', Name.Builtin),
# FloatLiteral
# -- HexFloat
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number.Bin),
# -- Octal
(r'0[0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""",
String.Char),
# StringLiteral
# -- WysiwygString
(r'r"[^"]*"[cwd]?', String),
# -- AlternateWysiwygString
(r'`[^`]*`[cwd]?', String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"[cwd]?', String),
# -- EscapeSequence
(r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}"
r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)",
String),
# -- HexString
(r'x"[0-9a-fA-F_\s]*"[cwd]?', String),
# -- DelimitedString
(r'q"\[', String, 'delimited_bracket'),
(r'q"\(', String, 'delimited_parenthesis'),
(r'q"<', String, 'delimited_angle'),
(r'q"\{', String, 'delimited_curly'),
(r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String),
(r'q"(.).*?\1"', String),
# -- TokenString
(r'q\{', String, 'token_string'),
# Attributes
(r'@([a-zA-Z_]\w*)?', Name.Decorator),
# Tokens
(r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
# Line
(r'#line\s.*\n', Comment.Special),
],
'nested_comment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
'token_string': [
(r'\{', Punctuation, 'token_string_nest'),
(r'\}', String, '#pop'),
include('root'),
],
'token_string_nest': [
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
include('root'),
],
'delimited_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, 'delimited_inside_bracket'),
(r'\]"', String, '#pop'),
],
'delimited_inside_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, '#push'),
(r'\]', String, '#pop'),
],
'delimited_parenthesis': [
(r'[^()]+', String),
(r'\(', String, 'delimited_inside_parenthesis'),
(r'\)"', String, '#pop'),
],
'delimited_inside_parenthesis': [
(r'[^()]+', String),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'delimited_angle': [
(r'[^<>]+', String),
(r'<', String, 'delimited_inside_angle'),
(r'>"', String, '#pop'),
],
'delimited_inside_angle': [
(r'[^<>]+', String),
(r'<', String, '#push'),
(r'>', String, '#pop'),
],
'delimited_curly': [
(r'[^{}]+', String),
(r'\{', String, 'delimited_inside_curly'),
(r'\}"', String, '#pop'),
],
'delimited_inside_curly': [
(r'[^{}]+', String),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
}
class CrocLexer(RegexLexer):
"""
For `Croc <http://jfbillingsley.com/croc>`_ source.
"""
name = 'Croc'
filenames = ['*.croc']
aliases = ['croc']
mimetypes = ['text/x-crocsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'nestedcomment'),
# Keywords
(words((
'as', 'assert', 'break', 'case', 'catch', 'class', 'continue',
'default', 'do', 'else', 'finally', 'for', 'foreach', 'function',
'global', 'namespace', 'if', 'import', 'in', 'is', 'local',
'module', 'return', 'scope', 'super', 'switch', 'this', 'throw',
'try', 'vararg', 'while', 'with', 'yield'), suffix=r'\b'),
Keyword),
(r'(false|true|null)\b', Keyword.Constant),
# FloatLiteral
(r'([0-9][0-9_]*)(?=[.eE])(\.[0-9][0-9_]*)?([eE][+\-]?[0-9_]+)?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[bB][01][01_]*', Number.Bin),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex),
# -- Decimal
(r'([0-9][0-9_]*)(?![.eE])', Number.Integer),
# CharacterLiteral
(r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char),
# StringLiteral
# -- WysiwygString
(r'@"(""|[^"])*"', String),
(r'@`(``|[^`])*`', String),
(r"@'(''|[^'])*'", String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>'
r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)'
r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nestedcomment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
}
class MiniDLexer(CrocLexer):
"""
For MiniD source. MiniD is now known as Croc.
"""
name = 'MiniD'
filenames = [] # don't lex .md as MiniD, reserve for Markdown
aliases = ['minid']
mimetypes = ['text/x-minidsrc']

View File

@ -1,125 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Keyword, Text, Comment, Name, String, Number, \
Punctuation
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
.. versionadded:: 1.6
"""
name = 'Smali'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source)', Keyword),
(r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)', Keyword),
(r'^[ \t]*\.restart local', Keyword),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([\w$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([\w$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':\w+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[\w$]+/)*)([\w$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},():=.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'\b((check-cast|instance-of|throw-verification-error'
r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|'
r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|'
r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE):
score += 0.3
if re.search(r'(\.(catchall|epilogue|restart local|prologue)|'
r'\b(array-data|class-change-error|declared-synchronized|'
r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|'
r'illegal-class-access|illegal-field-access|'
r'illegal-method-access|instantiation-error|no-error|'
r'no-such-class|no-such-field|no-such-method|'
r'packed-switch|sparse-switch))\b', text, re.MULTILINE):
score += 0.6
return score

View File

@ -1,530 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.data
~~~~~~~~~~~~~~~~~~~~
Lexers for data file format.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
include, bygroups, inherit
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Punctuation, Literal
__all__ = ['YamlLexer', 'JsonLexer', 'JsonLdLexer']
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
.. versionadded:: 0.11
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?!\s|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![\w-]*!)'
r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[\w;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[\w-]+)?'
r'(?:![\w;/?:@&=+$,.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[\w-]+', Name.Label),
# an alias
(r'\*[\w-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[\S\t ]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^\s\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^\s"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^\s,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class JsonLexer(RegexLexer):
"""
For JSON data structures.
.. versionadded:: 1.5
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = ['application/json']
flags = re.DOTALL
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'\}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'\}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r'\]', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'\{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
class JsonLdLexer(JsonLexer):
"""
For `JSON-LD <http://json-ld.org/>`_ linked data.
.. versionadded:: 2.0
"""
name = 'JSON-LD'
aliases = ['jsonld', 'json-ld']
filenames = ['*.jsonld']
mimetypes = ['application/ld+json']
tokens = {
'objectvalue': [
(r'"@(context|id|value|language|type|container|list|set|'
r'reverse|index|base|vocab|graph)"', Name.Decorator,
'objectattribute'),
inherit,
],
}

View File

@ -1,106 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.diff
~~~~~~~~~~~~~~~~~~~~
Lexers for diff/patch formats.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \
Literal
__all__ = ['DiffLexer', 'DarcsPatchLexer']
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
.. versionadded:: 0.10
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
DPATCH_KEYWORDS = ('hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace')
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'\{', Operator),
(r'\}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]+', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]+', Generic.Deleted),
],
}

View File

@ -1,689 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt, iteritems
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 0.8
"""
name = 'C#'
aliases = ['csharp', 'c#']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|async|await|base|break|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|lock|new|null|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop'),
default('#pop'),
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'),
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For `Nemerle <http://nemerle.org>`_ source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 1.5
"""
name = 'Nemerle'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'\$\s*"', String, 'splice-string'),
(r'\$\s*<#', String, 'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)\s*(' + cs_ident + ':)?', Keyword),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)\s*(' + cs_ident + r'\??)',
bygroups(Punctuation, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Text),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\"|[^"]*?)"', String.Double),
(r"'(\\\\|\\'|[^']*?)'", String.Single),
(r'[a-zA-Z_]\w*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
('[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
('[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
uni_name = '[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + \
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[(){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(r'(?<!\.)(AddHandler|Alias|'
r'ByRef|ByVal|Call|Case|Catch|CBool|CByte|CChar|CDate|'
r'CDec|CDbl|CInt|CLng|CObj|Continue|CSByte|CShort|'
r'CSng|CStr|CType|CUInt|CULng|CUShort|Declare|'
r'Default|Delegate|DirectCast|Do|Each|Else|ElseIf|'
r'EndIf|Erase|Error|Event|Exit|False|Finally|For|'
r'Friend|Get|Global|GoSub|GoTo|Handles|If|'
r'Implements|Inherits|Interface|'
r'Let|Lib|Loop|Me|MustInherit|'
r'MustOverride|MyBase|MyClass|Narrowing|New|Next|'
r'Not|Nothing|NotInheritable|NotOverridable|Of|On|'
r'Operator|Option|Optional|Overloads|Overridable|'
r'Overrides|ParamArray|Partial|Private|Protected|'
r'Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|'
r'Return|Select|Set|Shadows|Shared|Single|'
r'Static|Step|Stop|SyncLock|Then|'
r'Throw|To|True|Try|TryCast|Wend|'
r'Using|When|While|Widening|With|WithEvents|'
r'WriteOnly)\b', Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
Operator),
('"', String, 'string'),
(r'_\n', Text), # Line continuation (must be before Name)
(uni_name + '[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(uni_name, Name.Variable, '#pop'),
default('#pop'), # any other syntax
],
'funcname': [
(uni_name, Name.Function, '#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'namespace': [
(uni_name, Name.Namespace),
(r'\.', Name.Namespace),
default('#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
return 0.5
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
# TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highligting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highligting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
AAAAACK Strings
http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775
.. versionadded:: 1.5
"""
name = 'FSharp'
aliases = ['fsharp']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.',
'->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-',
'<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]',
'_', '`', '\{', '\|\]', '\|', '\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([\w.]+)',
bygroups(Keyword, Text, Name.Namespace)),
(r'\b(let!?)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Variable)),
(r'\b(type)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Class)),
(r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'@?"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
# e.g. dictionary index access
default('#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}

View File

@ -1,514 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words, include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Text),
(r'[,;{}\[\]()]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated', 'default',
'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns',
'oneof'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Text), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Text), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Text, Operator)),
('[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
.. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
(r'^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|hook|local|module|next'
r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b',
bygroups(Punctuation, Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+:<=>?~|-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense here.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}
class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'[^\S\n]+', Text),
],
'comments': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
('[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
`RSL <http://en.wikipedia.org/wiki/RAISE>`_ is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function),
(r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)',
bygroups(Text, Name.Function, Text, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Text.Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Text),
(r'\s+', Text)
]
}
class AlloyLexer(RegexLexer):
"""
For `Alloy <http://alloy.mit.edu>`_ source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w\']*'
text_tuple = (r'[^\S\n]+', Text)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as)\b', Keyword),
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\n', Text),
]
}
class PanLexer(RegexLexer):
"""
Lexer for `pan <http://github.com/quattor/pan/>`_ source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final', 'prefix',
'unique', 'object', 'foreach', 'include', 'template', 'function', 'variable',
'structure', 'extensible', 'declaration'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches', 'replace',
'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase', 'debug', 'error',
'traceback', 'deprecated', 'base64_decode', 'base64_encode', 'digest', 'escape',
'unescape', 'append', 'create', 'first', 'nlist', 'key', 'list', 'merge', 'next',
'prepend', 'is_boolean', 'is_defined', 'is_double', 'is_list', 'is_long',
'is_nlist', 'is_null', 'is_number', 'is_property', 'is_resource', 'is_string',
'to_boolean', 'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}

View File

@ -1,289 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.dylan
~~~~~~~~~~~~~~~~~~~~~
Lexers for the Dylan language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Literal
__all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer']
class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
.. versionadded:: 0.7
"""
name = 'Dylan'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl', '*.intr']
mimetypes = ['text/x-dylan']
flags = re.IGNORECASE
builtins = set((
'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
'each-subclass', 'exception', 'exclude', 'function', 'generic',
'handler', 'inherited', 'inline', 'inline-only', 'instance',
'interface', 'import', 'keyword', 'library', 'macro', 'method',
'module', 'open', 'primary', 'required', 'sealed', 'sideways',
'singleton', 'slot', 'thread', 'variable', 'virtual'))
keywords = set((
'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
'while'))
operators = set((
'~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
'>', '>=', '&', '|'))
functions = set((
'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
'condition-format-arguments', 'condition-format-string', 'conjoin',
'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
'function-arguments', 'function-return-values',
'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
'generic-function-methods', 'head', 'head-setter', 'identity',
'initialize', 'instance?', 'integral?', 'intersection',
'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
'min', 'modulo', 'negative', 'negative?', 'next-method',
'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
'remove-duplicates', 'remove-duplicates!', 'remove-key!',
'remove-method', 'replace-elements!', 'replace-subsequence!',
'restart-query', 'return-allowed?', 'return-description',
'return-query', 'reverse', 'reverse!', 'round', 'round/',
'row-major-index', 'second', 'second-setter', 'shallow-copy',
'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
'vector', 'zero?'))
valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+'
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
default('code') # no header match, switch to code
],
'code': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# multi-line comment
(r'/\*', Comment.Multiline, 'comment'),
# strings and characters
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char),
# binary integer
(r'#b[01]+', Number.Bin),
# octal integer
(r'#o[0-7]+', Number.Oct),
# floating point
(r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
# decimal integer
(r'[-+]?\d+', Number.Integer),
# hex integer
(r'#x[0-9a-f]+', Number.Hex),
# Macro parameters
(r'(\?' + valid_name + ')(:)'
r'(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'\?' + valid_name, Name.Tag),
# Punctuation
(r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation),
# Most operators are picked up as names and then re-flagged.
# This one isn't valid in a name though, so we pick it up now.
(r':=', Operator),
# Pick up #t / #f before we match other stuff with #.
(r'#[tf]', Literal),
# #"foo" style keywords
(r'#"', String.Symbol, 'keyword'),
# #rest, #key, #all-keys, etc.
(r'#[a-z0-9-]+', Keyword),
# required-init-keyword: style keywords.
(valid_name + ':', Keyword),
# class names
(r'<' + valid_name + '>', Name.Class),
# define variable forms.
(r'\*' + valid_name + '\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
# everything else. We re-flag some of these in the method above.
(valid_name, Name),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'keyword': [
(r'"', String.Symbol, '#pop'),
(r'[^\\"]+', String.Symbol), # all other characters
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
.. versionadded:: 1.6
"""
name = 'DylanLID'
aliases = ['dylan-lid', 'lid']
filenames = ['*.lid', '*.hdp']
mimetypes = ['text/x-dylan-lid']
flags = re.IGNORECASE
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
]
}
class DylanConsoleLexer(Lexer):
"""
For Dylan interactive console output like:
.. sourcecode:: dylan-console
? let a = 1;
=> 1
? a
=> 1
This is based on a copy of the RubyConsoleLexer.
.. versionadded:: 1.6
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
filenames = ['*.dylan-console']
mimetypes = ['text/x-dylan-console']
_line_re = re.compile('.*?\n')
_prompt_re = re.compile('\?| ')
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
curcode = ''
insertions = []
for match in self._line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item

View File

@ -1,125 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.ecl
~~~~~~~~~~~~~~~~~~~
Lexers for the ECL language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['ECLLexer']
class ECLLexer(RegexLexer):
"""
Lexer for the declarative big-data `ECL
<http://hpccsystems.com/community/docs/ecl-language-reference/html>`_
language.
.. versionadded:: 1.5
"""
name = 'ECL'
aliases = ['ecl']
filenames = ['*.ecl']
mimetypes = ['application/x-ecl']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('statements'),
],
'whitespace': [
(r'\s+', Text),
(r'\/\/.*', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
],
'statements': [
include('types'),
include('keywords'),
include('functions'),
include('hash'),
(r'"', String, 'string'),
(r'\'', String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float),
(r'0x[0-9a-f]+[lu]*', Number.Hex),
(r'0[0-7]+[lu]*', Number.Oct),
(r'\d+[lu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]+', Operator),
(r'[{}()\[\],.;]', Punctuation),
(r'[a-z_]\w*', Name),
],
'hash': [
(r'^#.*$', Comment.Preproc),
],
'types': [
(r'(RECORD|END)\D', Keyword.Declaration),
(r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|'
r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|'
r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)',
bygroups(Keyword.Type, Text)),
],
'keywords': [
(words((
'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL',
'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT',
'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED',
'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT',
'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS',
'WAIT', 'WHEN'), suffix=r'\b'),
Keyword.Reserved),
# These are classed differently, check later
(words((
'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST', 'BETWEEN', 'CASE',
'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT', 'ENDC++', 'ENDMACRO', 'EXCEPT',
'EXCLUSIVE', 'EXPIRE', 'EXPORT', 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL',
'FUNCTION', 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN', 'JOINED',
'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL', 'LOCALE', 'LOOKUP', 'MACRO',
'MANY', 'MAXCOUNT', 'MAXLENGTH', 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE',
'NOROOT', 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER', 'OVERWRITE',
'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH', 'PIPE', 'QUOTE', 'RELATIONSHIP',
'REPEAT', 'RETURN', 'RIGHT', 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW',
'SKIP', 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN', 'TRANSFORM', 'TRIM',
'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED', 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD',
'WITHIN', 'XML', 'XPATH', '__COMPRESSED__'), suffix=r'\b'),
Keyword.Reserved),
],
'functions': [
(words((
'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN', 'ATAN2', 'AVE', 'CASE',
'CHOOSE', 'CHOOSEN', 'CHOOSESETS', 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS',
'COSH', 'COUNT', 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE',
'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH', 'ERROR', 'EVALUATE',
'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS', 'EXP', 'FAILCODE', 'FAILMESSAGE',
'FETCH', 'FROMUNICODE', 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32',
'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX', 'INTFORMAT', 'ISVALID',
'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH', 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP',
'MAP', 'MATCHED', 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE',
'MAX', 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE', 'PARSE', 'PIPE',
'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL', 'RANDOM', 'RANGE', 'RANK', 'RANKED',
'REALFORMAT', 'RECORDOF', 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED',
'ROLLUP', 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN', 'SINH', 'SIZEOF',
'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED', 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH',
'THISNODE', 'TOPN', 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP',
'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE', 'XMLENCODE',
'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'),
Name.Function),
],
'string': [
(r'"', String, '#pop'),
(r'\'', String, '#pop'),
(r'[^"\']+', String),
],
}

View File

@ -1,65 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.eiffel
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Eiffel language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['EiffelLexer']
class EiffelLexer(RegexLexer):
"""
For `Eiffel <http://www.eiffel.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'Eiffel'
aliases = ['eiffel']
filenames = ['*.e']
mimetypes = ['text/x-eiffel']
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
# Please note that keyword and operator are case insensitive.
(r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant),
(r'(?i)(and(\s+then)?|not|xor|implies|or(\s+else)?)\b', Operator.Word),
(words((
'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached',
'attribute', 'check', 'class', 'convert', 'create', 'debug',
'deferred', 'detachable', 'do', 'else', 'elseif', 'end', 'ensure',
'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if',
'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'none',
'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename',
'require', 'rescue', 'retry', 'select', 'separate', 'then',
'undefine', 'until', 'variant', 'when'), prefix=r'(?i)\b', suffix=r'\b'),
Keyword.Reserved),
(r'"\[(([^\]%]|\n)|%(.|\n)|\][^"])*?\]"', String),
(r'"([^"%\n]|%.)*?"', String),
include('numbers'),
(r"'([^'%]|%'|%%)'", String.Char),
(r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\?!#%&@|+/\-=>*$<^\[\]])", Operator),
(r"([{}():;,.])", Punctuation),
(r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name),
(r'([A-Z][A-Z0-9_]*)', Name.Class),
(r'\n+', Text),
],
'numbers': [
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'0[bB][01]+', Number.Bin),
(r'0[cC][0-7]+', Number.Oct),
(r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float),
(r'[0-9]+', Number.Integer),
],
}

View File

@ -1,511 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.erlang
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Erlang.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \
include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer',
'ElixirLexer']
line_re = re.compile('.*?\n')
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
.. versionadded:: 0.9
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
mimetypes = ['text/x-erlang']
keywords = (
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
)
builtins = ( # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
)
operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
word_operators = (
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
)
atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_]\w*)'
escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
(words(keywords, suffix=r'\b'), Keyword),
(words(builtins, suffix=r'\b'), Name.Builtin),
(words(word_operators, suffix=r'\b'), Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[]\[:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
.. versionadded:: 1.1
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
def gen_elixir_string_rules(name, symbol, token):
states = {}
states['string_' + name] = [
(r'[^#%s\\]+' % (symbol,), token),
include('escapes'),
(r'\\.', token),
(r'(%s)' % (symbol,), bygroups(token), "#pop"),
include('interpol')
]
return states
def gen_elixir_sigstr_rules(term, token, interpol=True):
if interpol:
return [
(r'[^#%s\\]+' % (term,), token),
include('escapes'),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
include('interpol')
]
else:
return [
(r'[^%s\\]+' % (term,), token),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
]
class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
.. versionadded:: 1.5
"""
name = 'Elixir'
aliases = ['elixir', 'ex', 'exs']
filenames = ['*.ex', '*.exs']
mimetypes = ['text/x-elixir']
KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')
KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')
BUILTIN = (
'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',
'quote', 'unquote', 'unquote_splicing', 'throw', 'super'
)
BUILTIN_DECLARATION = (
'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',
'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback'
)
BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')
CONSTANT = ('nil', 'true', 'false')
PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__')
OPERATORS3 = (
'<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==',
'~>>', '<~>', '|~>', '<|>',
)
OPERATORS2 = (
'==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~',
'->', '<-', '|', '.', '=', '~>', '<~',
)
OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')
PUNCTUATION = (
'\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']'
)
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.KEYWORD:
yield index, Keyword, value
elif value in self.KEYWORD_OPERATOR:
yield index, Operator.Word, value
elif value in self.BUILTIN:
yield index, Keyword, value
elif value in self.BUILTIN_DECLARATION:
yield index, Keyword.Declaration, value
elif value in self.BUILTIN_NAMESPACE:
yield index, Keyword.Namespace, value
elif value in self.CONSTANT:
yield index, Name.Constant, value
elif value in self.PSEUDO_VAR:
yield index, Name.Builtin.Pseudo, value
else:
yield index, token, value
else:
yield index, token, value
def gen_elixir_sigil_rules():
# all valid sigil terminators (excluding heredocs)
terminators = [
(r'\{', r'\}', 'cb'),
(r'\[', r'\]', 'sb'),
(r'\(', r'\)', 'pa'),
(r'<', r'>', 'ab'),
(r'/', r'/', 'slas'),
(r'\|', r'\|', 'pipe'),
('"', '"', 'quot'),
("'", "'", 'apos'),
]
# heredocs have slightly different rules
triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')]
token = String.Other
states = {'sigils': []}
for term, name in triquotes:
states['sigils'] += [
(r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-intp')),
(r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-no-intp')),
]
states[name + '-end'] = [
(r'[a-zA-Z]+', token, '#pop'),
default('#pop'),
]
states[name + '-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_interpol'),
]
states[name + '-no-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_no_interpol'),
]
for lterm, rterm, name in terminators:
states['sigils'] += [
(r'~[a-z]' + lterm, token, name + '-intp'),
(r'~[A-Z]' + lterm, token, name + '-no-intp'),
]
states[name + '-intp'] = gen_elixir_sigstr_rules(rterm, token)
states[name + '-no-intp'] = \
gen_elixir_sigstr_rules(rterm, token, interpol=False)
return states
op3_re = "|".join(re.escape(s) for s in OPERATORS3)
op2_re = "|".join(re.escape(s) for s in OPERATORS2)
op1_re = "|".join(re.escape(s) for s in OPERATORS1)
ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re)
punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION)
alnum = '\w'
name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum
modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum}
complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re)
special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})'
long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})'
hex_char_re = r'(\\x[\da-fA-F]{1,2})'
escape_char_re = r'(\\[abdefnrstv])'
tokens = {
'root': [
(r'\s+', Text),
(r'#.*$', Comment.Single),
# Various kinds of characters
(r'(\?)' + long_hex_char_re,
bygroups(String.Char,
String.Escape, Number.Hex, String.Escape)),
(r'(\?)' + hex_char_re,
bygroups(String.Char, String.Escape)),
(r'(\?)' + escape_char_re,
bygroups(String.Char, String.Escape)),
(r'\?\\?.', String.Char),
# '::' has to go before atoms
(r':::', String.Symbol),
(r'::', Operator),
# atoms
(r':' + special_atom_re, String.Symbol),
(r':' + complex_name_re, String.Symbol),
(r':"', String.Symbol, 'string_double_atom'),
(r":'", String.Symbol, 'string_single_atom'),
# [keywords: ...]
(r'(%s|%s)(:)(?=\s|\n)' % (special_atom_re, complex_name_re),
bygroups(String.Symbol, Punctuation)),
# @attributes
(r'@' + name_re, Name.Attribute),
# identifiers
(name_re, Name),
(r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)),
# operators and punctuation
(op3_re, Operator),
(op2_re, Operator),
(punctuation_re, Punctuation),
(r'&\d', Name.Entity), # anon func arguments
(op1_re, Operator),
# numbers
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[\da-fA-F]+', Number.Hex),
(r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float),
(r'\d(_?\d)*', Number.Integer),
# strings and heredocs
(r'"""\s*', String.Heredoc, 'heredoc_double'),
(r"'''\s*$", String.Heredoc, 'heredoc_single'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
include('sigils'),
(r'%\{', Punctuation, 'map_key'),
(r'\{', Punctuation, 'tuple'),
],
'heredoc_double': [
(r'^\s*"""', String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_single': [
(r"^\s*'''", String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_interpol': [
(r'[^#\\\n]+', String.Heredoc),
include('escapes'),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
include('interpol'),
],
'heredoc_no_interpol': [
(r'[^\\\n]+', String.Heredoc),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
],
'escapes': [
(long_hex_char_re,
bygroups(String.Escape, Number.Hex, String.Escape)),
(hex_char_re, String.Escape),
(escape_char_re, String.Escape),
],
'interpol': [
(r'#\{', String.Interpol, 'interpol_string'),
],
'interpol_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'map_key': [
include('root'),
(r':', Punctuation, 'map_val'),
(r'=>', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
'tuple': [
include('root'),
(r'\}', Punctuation, '#pop'),
],
}
tokens.update(gen_elixir_string_rules('double', '"', String.Double))
tokens.update(gen_elixir_string_rules('single', "'", String.Single))
tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol))
tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol))
tokens.update(gen_elixir_sigil_rules())
class ElixirConsoleLexer(Lexer):
"""
For Elixir interactive console (iex) output like:
.. sourcecode:: iex
iex> [head | tail] = [1,2,3]
[1,2,3]
iex> head
1
iex> tail
[2,3]
iex> [head | tail]
[1,2,3]
iex> length [head | tail]
3
.. versionadded:: 1.5
"""
name = 'Elixir iex session'
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
_prompt_re = re.compile('(iex|\.{3})(\(\d+\))?> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
curcode = ''
in_error = False
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'** '):
in_error = True
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
curcode += line[-1:]
else:
m = self._prompt_re.match(line)
if m is not None:
in_error = False
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
token = Generic.Error if in_error else Generic.Output
yield match.start(), token, line
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item

View File

@ -1,114 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.esoteric
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for esoteric languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer']
class BrainfuckLexer(RegexLexer):
"""
Lexer for the esoteric `BrainFuck <http://www.muppetlabs.com/~breadbox/bf/>`_
language.
"""
name = 'Brainfuck'
aliases = ['brainfuck', 'bf']
filenames = ['*.bf', '*.b']
mimetypes = ['application/x-brainfuck']
tokens = {
'common': [
# use different colors for different instruction types
(r'[.,]+', Name.Tag),
(r'[+-]+', Name.Builtin),
(r'[<>]+', Name.Variable),
(r'[^.,+\-<>\[\]]+', Comment),
],
'root': [
(r'\[', Keyword, 'loop'),
(r'\]', Error),
include('common'),
],
'loop': [
(r'\[', Keyword, '#push'),
(r'\]', Keyword, '#pop'),
include('common'),
]
}
class BefungeLexer(RegexLexer):
"""
Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_
language.
.. versionadded:: 0.7
"""
name = 'Befunge'
aliases = ['befunge']
filenames = ['*.befunge']
mimetypes = ['application/x-befunge']
tokens = {
'root': [
(r'[0-9a-f]', Number),
(r'[+*/%!`-]', Operator), # Traditional math
(r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives
(r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives
(r'[|_mw]', Keyword),
(r'[{}]', Name.Tag), # Befunge-98 stack ops
(r'".*?"', String.Double), # Strings don't appear to allow escapes
(r'\'.', String.Single), # Single character
(r'[#;]', Comment), # Trampoline... depends on direction hit
(r'[pg&~=@iotsy]', Keyword), # Misc
(r'[()A-Z]', Comment), # Fingerprints
(r'\s+', Text), # Whitespace doesn't matter
],
}
class RedcodeLexer(RegexLexer):
"""
A simple Redcode lexer based on ICWS'94.
Contributed by Adam Blinkinsop <blinks@acm.org>.
.. versionadded:: 0.8
"""
name = 'Redcode'
aliases = ['redcode']
filenames = ['*.cw']
opcodes = ('DAT', 'MOV', 'ADD', 'SUB', 'MUL', 'DIV', 'MOD',
'JMP', 'JMZ', 'JMN', 'DJN', 'CMP', 'SLT', 'SPL',
'ORG', 'EQU', 'END')
modifiers = ('A', 'B', 'AB', 'BA', 'F', 'X', 'I')
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
(r';.*$', Comment.Single),
# Lexemes:
# Identifiers
(r'\b(%s)\b' % '|'.join(opcodes), Name.Function),
(r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator),
(r'[A-Za-z_]\w+', Name),
# Operators
(r'[-+*/%]', Operator),
(r'[#$@<>]', Operator), # mode
(r'[.,]', Punctuation), # mode
# Numbers
(r'[-+]?\d+', Number.Integer),
],
}

View File

@ -1,344 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.factor
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Factor language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, words
from pygments.token import Text, Comment, Keyword, Name, String, Number
__all__ = ['FactorLexer']
class FactorLexer(RegexLexer):
"""
Lexer for the `Factor <http://factorcode.org>`_ language.
.. versionadded:: 1.4
"""
name = 'Factor'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
flags = re.MULTILINE | re.UNICODE
builtin_kernel = words((
'-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
'2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
'3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
'?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
'wrapper', 'wrapper?', 'xor'), suffix=r'\s')
builtin_assocs = words((
'2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'\s')
builtin_combinators = words((
'2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
'4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'\s')
builtin_math = words((
'-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
'>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
'(each-integer)', '(find-integer)', '*', '+', '?1+',
'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
'zero?'), suffix=r'\s')
builtin_sequences = words((
'1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
'2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
'3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
'?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
'assert-sequence', 'assert-sequence=', 'assert-sequence?',
'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
'non-negative-integer-expected', 'non-negative-integer-expected?',
'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
'when-empty'), suffix=r'\s')
builtin_namespaces = words((
'+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
suffix=r'\s')
builtin_arrays = words((
'1array', '2array', '3array', '4array', '<array>', '>array', 'array',
'array?', 'pair', 'pair?', 'resize-array'), suffix=r'\s')
builtin_io = words((
'(each-stream-block-slice)', '(each-stream-block)',
'(stream-contents-by-block)', '(stream-contents-by-element)',
'(stream-contents-by-length-or-block)',
'(stream-contents-by-length)', '+byte+', '+character+',
'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
'error-stream', 'flush', 'input-stream', 'input-stream?',
'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
'output-stream', 'output-stream?', 'print', 'read', 'read-into',
'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
'stream-copy*', 'stream-element-type', 'stream-flush',
'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
'stream-read', 'stream-read-into', 'stream-read-partial',
'stream-read-partial-into', 'stream-read-partial-unsafe',
'stream-read-unsafe', 'stream-read-until', 'stream-read1',
'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
'stream-write', 'stream-write1', 'tell-input', 'tell-output',
'with-error-stream', 'with-error-stream*', 'with-error>output',
'with-input-output+error-streams',
'with-input-output+error-streams*', 'with-input-stream',
'with-input-stream*', 'with-output-stream', 'with-output-stream*',
'with-output>error', 'with-output+error-stream',
'with-output+error-stream*', 'with-streams', 'with-streams*',
'write', 'write1'), suffix=r'\s')
builtin_strings = words((
'1string', '<string>', '>string', 'resize-string', 'string',
'string?'), suffix=r'\s')
builtin_vectors = words((
'1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
suffix=r'\s')
builtin_continuations = words((
'<condition>', '<continuation>', '<restart>', 'attempt-all',
'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
'condition?', 'continuation', 'continuation?', 'continue',
'continue-restart', 'continue-with', 'current-continuation',
'error', 'error-continuation', 'error-in-thread', 'error-thread',
'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
'throw-restarts', 'with-datastack', 'with-return'), suffix=r'\s')
tokens = {
'root': [
# factor allows a file to start with a shebang
(r'#!.*$', Comment.Preproc),
default('base'),
],
'base': [
(r'\s+', Text),
# defining words
(r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Function)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(GENERIC:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Function)),
(r'\(\s', Name.Function, 'stackeffect'),
(r';\s', Keyword),
# imports and namespaces
(r'(USING:)(\s+)',
bygroups(Keyword.Namespace, Text), 'vocabs'),
(r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text), 'words'),
(r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+=>\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Namespace, Text, Name.Function)),
(r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function)),
(r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function)),
# tuples and classes
(r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'),
(r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class), 'slots'),
(r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)),
(r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'SINGLETONS:', Keyword, 'classes'),
# other syntax
(r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'SYMBOLS:\s', Keyword, 'words'),
(r'SYNTAX:\s', Keyword),
(r'ALIEN:\s', Keyword),
(r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'(FUNCTION:)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text)),
(r'(FUNCTION-ALIAS:)(\s+)(\S+)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function, Text)),
# vocab.private
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
# strings
(r'"""\s+(?:.|\n)*?\s+"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
# comments
(r'!\s+.*$', Comment),
(r'#!\s+.*$', Comment),
(r'/\*\s+(?:.|\n)*?\s\*/\s', Comment),
# boolean constants
(r'[tf]\s', Name.Constant),
# symbols and literals
(r'[\\$]\s+\S+', Name.Constant),
(r'M\\\s+\S+\s+\S+', Name.Constant),
# numbers
(r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
(r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
(r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'0b[01]+\s', Number.Bin),
(r'0o[0-7]+\s', Number.Oct),
(r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
(r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
Keyword),
# builtins
(builtin_kernel, Name.Builtin),
(builtin_assocs, Name.Builtin),
(builtin_combinators, Name.Builtin),
(builtin_math, Name.Builtin),
(builtin_sequences, Name.Builtin),
(builtin_namespaces, Name.Builtin),
(builtin_arrays, Name.Builtin),
(builtin_io, Name.Builtin),
(builtin_strings, Name.Builtin),
(builtin_vectors, Name.Builtin),
(builtin_continuations, Name.Builtin),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s+', Text),
(r'\(\s+', Name.Function, 'stackeffect'),
(r'\)\s', Name.Function, '#pop'),
(r'--\s', Name.Function),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
bygroups(Text, Name.Variable, Text)),
(r'\S+', Name.Variable),
],
'vocabs': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Namespace),
],
'classes': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Class),
],
'words': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Function),
],
}

View File

@ -1,250 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.fantom
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Fantom language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from string import Template
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['FantomLexer']
class FantomLexer(RegexLexer):
"""
For Fantom source code.
.. versionadded:: 1.5
"""
name = 'Fantom'
aliases = ['fan']
filenames = ['*.fan']
mimetypes = ['application/x-fantom']
# often used regexes
def s(str):
return Template(str).substitute(
dict(
pod=r'[\"\w\.]+',
eos=r'\n|;',
id=r'[a-zA-Z_]\w*',
# all chars which can be part of type definition. Starts with
# either letter, or [ (maps), or | (funcs)
type=r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]|\->?]*?',
)
)
tokens = {
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline), # Multiline
(r'//.*?\n', Comment.Single), # Single line
# TODO: highlight references in fandocs
(r'\*\*.*?\n', Comment.Special), # Fandoc
(r'#.*\n', Comment.Single) # Shell-style
],
'literals': [
(r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration
(r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration with dot
(r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), # Float/Decimal
(r'\b-?0x[0-9a-fA-F_]+', Number.Hex), # Hex
(r'\b-?[\d_]+', Number.Integer), # Int
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), # Char
(r'"', Punctuation, 'insideStr'), # Opening quote
(r'`', Punctuation, 'insideUri'), # Opening accent
(r'\b(true|false|null)\b', Keyword.Constant), # Bool & null
(r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', # DSL
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, String, Punctuation)),
(r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', # Type/slot literal
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, Name.Function)),
(r'\[,\]', Literal), # Empty list
(s(r'($type)(\[,\])'), # Typed empty list
bygroups(using(this, state='inType'), Literal)),
(r'\[:\]', Literal), # Empty Map
(s(r'($type)(\[:\])'),
bygroups(using(this, state='inType'), Literal)),
],
'insideStr': [
(r'\\\\', String.Escape), # Escaped backslash
(r'\\"', String.Escape), # Escaped "
(r'\\`', String.Escape), # Escaped `
(r'\$\w+', String.Interpol), # Subst var
(r'\$\{.*?\}', String.Interpol), # Subst expr
(r'"', Punctuation, '#pop'), # Closing quot
(r'.', String) # String content
],
'insideUri': [ # TODO: remove copy/paste str/uri
(r'\\\\', String.Escape), # Escaped backslash
(r'\\"', String.Escape), # Escaped "
(r'\\`', String.Escape), # Escaped `
(r'\$\w+', String.Interpol), # Subst var
(r'\$\{.*?\}', String.Interpol), # Subst expr
(r'`', Punctuation, '#pop'), # Closing tick
(r'.', String.Backtick) # URI content
],
'protectionKeywords': [
(r'\b(public|protected|private|internal)\b', Keyword),
],
'typeKeywords': [
(r'\b(abstract|final|const|native|facet|enum)\b', Keyword),
],
'methodKeywords': [
(r'\b(abstract|native|once|override|static|virtual|final)\b',
Keyword),
],
'fieldKeywords': [
(r'\b(abstract|const|final|native|override|static|virtual|'
r'readonly)\b', Keyword)
],
'otherKeywords': [
(words((
'try', 'catch', 'throw', 'finally', 'for', 'if', 'else', 'while',
'as', 'is', 'isnot', 'switch', 'case', 'default', 'continue',
'break', 'do', 'return', 'get', 'set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'\b(it|this|super)\b', Name.Builtin.Pseudo),
],
'operators': [
(r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator)
],
'inType': [
(r'[\[\]|\->:?]', Punctuation),
(s(r'$id'), Name.Class),
default('#pop'),
],
'root': [
include('comments'),
include('protectionKeywords'),
include('typeKeywords'),
include('methodKeywords'),
include('fieldKeywords'),
include('literals'),
include('otherKeywords'),
include('operators'),
(r'using\b', Keyword.Namespace, 'using'), # Using stmt
(r'@\w+', Name.Decorator, 'facet'), # Symbol
(r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class),
'inheritance'), # Inheritance list
# Type var := val
(s(r'($type)([ \t]+)($id)(\s*)(:=)'),
bygroups(using(this, state='inType'), Text,
Name.Variable, Text, Operator)),
# var := val
(s(r'($id)(\s*)(:=)'),
bygroups(Name.Variable, Text, Operator)),
# .someId( or ->someId( ###
(s(r'(\.|(?:\->))($id)(\s*)(\()'),
bygroups(Operator, Name.Function, Text, Punctuation),
'insideParen'),
# .someId or ->someId
(s(r'(\.|(?:\->))($id)'),
bygroups(Operator, Name.Function)),
# new makeXXX (
(r'(new)(\s+)(make\w*)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
# Type name (
(s(r'($type)([ \t]+)' # Return type and whitespace
r'($id)(\s*)(\()'), # method name + open brace
bygroups(using(this, state='inType'), Text,
Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
# ArgType argName,
(s(r'($type)(\s+)($id)(\s*)(,)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
# ArgType argName)
# Covered in 'insideParen' state
# ArgType argName -> ArgType|
(s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation, Text, using(this, state='inType'),
Punctuation)),
# ArgType argName|
(s(r'($type)(\s+)($id)(\s*)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
# Type var
(s(r'($type)([ \t]+)($id)'),
bygroups(using(this, state='inType'), Text,
Name.Variable)),
(r'\(', Punctuation, 'insideParen'),
(r'\{', Punctuation, 'insideBrace'),
(r'.', Text)
],
'insideParen': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'insideMethodDeclArgs': [
(r'\)', Punctuation, '#pop'),
(s(r'($type)(\s+)($id)(\s*)(\))'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation), '#pop'),
include('root'),
],
'insideBrace': [
(r'\}', Punctuation, '#pop'),
include('root'),
],
'inheritance': [
(r'\s+', Text), # Whitespace
(r':|,', Punctuation),
(r'(?:(\w+)(::))?(\w+)',
bygroups(Name.Namespace, Punctuation, Name.Class)),
(r'\{', Punctuation, '#pop')
],
'using': [
(r'[ \t]+', Text), # consume whitespaces
(r'(\[)(\w+)(\])',
bygroups(Punctuation, Comment.Special, Punctuation)), # ffi
(r'(\")?([\w.]+)(\")?',
bygroups(Punctuation, Name.Namespace, Punctuation)), # podname
(r'::', Punctuation, 'usingClass'),
default('#pop')
],
'usingClass': [
(r'[ \t]+', Text), # consume whitespaces
(r'(as)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'),
(r'[\w$]+', Name.Class),
default('#pop:2') # jump out to root state
],
'facet': [
(r'\s+', Text),
(r'\{', Punctuation, 'facetFields'),
default('#pop')
],
'facetFields': [
include('comments'),
include('literals'),
include('operators'),
(r'\s+', Text),
(r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)),
(r'\}', Punctuation, '#pop'),
(r'.', Text)
],
}

View File

@ -1,273 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.felix
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Felix language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default, words, \
combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['FelixLexer']
class FelixLexer(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
.. versionadded:: 1.2
"""
name = 'Felix'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
preproc = (
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
)
keywords = (
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
)
keyword_directives = (
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
)
keyword_declarations = (
'def', 'let', 'ref', 'val', 'var',
)
keyword_types = (
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
)
keyword_constants = (
'false', 'true',
)
operator_words = (
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
)
name_builtins = (
'_svc', 'while',
)
name_pseudo = (
'root', 'self', 'this',
)
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce',
'union'), suffix=r'\b'),
Keyword, 'funcname'),
(words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'),
Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(words(keywords, suffix=r'\b'), Keyword),
(words(keyword_directives, suffix=r'\b'), Name.Decorator),
(words(keyword_declarations, suffix=r'\b'), Keyword.Declaration),
(words(keyword_types, suffix=r'\b'), Keyword.Type),
(words(keyword_constants, suffix=r'\b'), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin),
# -- Octal
(r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
(r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
include('comment'),
# Preprocessor
(r'#\s*if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'(%s)\b' % '|'.join(operator_words), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)\n', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[/*]', Comment.Multiline),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
(r'.*?\n', Comment),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
# (r'/[*](.|\n)*?[*]/', Comment),
# (r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
default('modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}

View File

@ -1,161 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.fortran
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Fortran languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['FortranLexer']
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
.. versionadded:: 0.10
"""
name = 'Fortran'
aliases = ['fortran']
filenames = ['*.f', '*.f90', '*.F', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'^#.*\n', Comment.Preproc),
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][\w$]*', Name),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(words((
'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE',
'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND',
'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE',
'CODIMENSION', 'COMMON', 'CONCURRRENT', 'CONTIGUOUS', 'CONTAINS',
'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE',
'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END',
'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'EXIT', 'EXTENDS',
'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT',
'FUNCTION', 'GENERIC', 'GOTO', 'IF', 'IMAGES', 'IMPLICIT',
'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE',
'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY',
'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'OPEN', 'OPTIONAL',
'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', 'PRIVATE',
'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ',
'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE',
'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES',
'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE',
'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
# Data Types
(words((
'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER',
'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG', 'C_SIGNED_CHAR',
'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T', 'C_INT64_T', 'C_INT_LEAST8_T',
'C_INT_LEAST16_T', 'C_INT_LEAST32_T', 'C_INT_LEAST64_T', 'C_INT_FAST8_T',
'C_INT_FAST16_T', 'C_INT_FAST32_T', 'C_INT_FAST64_T', 'C_INTMAX_T',
'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE', 'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX',
'C_DOUBLE_COMPLEX', 'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR',
'C_FUNPTR'), prefix=r'\b', suffix=r'\s*\b'),
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[()\[\],:&%;.]', Punctuation),
# Intrinsics
(words((
'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL',
'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog',
'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH',
'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref',
'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0',
'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE',
'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp',
'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count',
'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift',
'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated',
'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer',
'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc',
'C_Sizeof', 'C_New_Line', 'C_Carriage_Return',
'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin',
'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY',
'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF',
'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax',
'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH',
'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime',
'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime',
'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of',
'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush',
'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell',
'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument',
'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog',
'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs',
'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits',
'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr',
'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index',
'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC',
'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End',
'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound',
'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk',
'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat',
'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent',
'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc',
'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits',
'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images',
'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product',
'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real',
'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift',
'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind',
'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape',
'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH',
'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand',
'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock',
'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer',
'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask',
'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp',
'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer),
(r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
(r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
],
}

View File

@ -1,428 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.foxpro
~~~~~~~~~~~~~~~~~~~~~~
Simple lexer for Microsoft Visual FoxPro source code.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String
__all__ = ['FoxProLexer']
class FoxProLexer(RegexLexer):
"""Lexer for Microsoft Visual FoxPro language.
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
.. versionadded:: 1.6
"""
name = 'FoxPro'
aliases = ['foxpro', 'vfp', 'clipper', 'xbase']
filenames = ['*.PRG', '*.prg']
mimetype = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r';\s*\n', Punctuation), # consume newline
(r'(^|\n)\s*', Text, 'newline'),
# Square brackets may be used for array indices
# and for string literal. Look for arrays
# before matching string literals.
(r'(?<=\w)\[[0-9, ]+\]', Text),
(r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
(r'(^\s*\*|&&|&amp;&amp;).*?\n', Comment.Single),
(r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
r'YEAR)(?=\s*\()', Name.Function),
(r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
(r'THISFORMSET|THISFORM|THIS', Name.Builtin),
(r'Application|CheckBox|Collection|Column|ComboBox|'
r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
r'Project|Relation|ReportListener|Separator|Servers|Server|'
r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
r'XMLAdapter|XMLField|XMLTable', Name.Class),
(r'm\.[a-z_]\w*', Name.Variable),
(r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
(r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
r'BreakOnError|BufferModeOverride|BufferMode|'
r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
r'ContinuousScroll|ControlBox|ControlCount|Controls|'
r'ControlSource|ConversionFunc|Count|CurrentControl|'
r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
r'CursorSchema|CursorSource|CursorStatus|Curvature|'
r'Database|DataSessionID|DataSession|DataSourceType|'
r'DataSource|DataType|DateFormat|DateMark|Debug|'
r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
r'DeleteCmd|DeleteMark|Description|Desktop|'
r'Details|DisabledBackColor|DisabledForeColor|'
r'DisabledItemBackColor|DisabledItemForeColor|'
r'DisabledPicture|DisableEncode|DisplayCount|'
r'DisplayValue|Dockable|Docked|DockPosition|'
r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
r'DynamicFontItalic|DynamicFontStrikethru|'
r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
r'DynamicLineHeight|EditorOptions|Enabled|'
r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
r'FetchMemoDataSource|FetchMemo|FetchSize|'
r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
r'HelpContextID|HideSelection|HighlightBackColor|'
r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
r'InsertCmdDataSource|InsertCmdRefreshCmd|'
r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
r'InsertCmd|Instancing|IntegralHeight|'
r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
r'NumberOfElements|Object|OLEClass|OLEDragMode|'
r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
r'OutputPageCount|OutputType|PageCount|PageHeight|'
r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
r'Parent|Partition|PasswordChar|PictureMargin|'
r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
r'PictureVal|Picture|Prepared|'
r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
r'ProjectHookLibrary|ProjectHook|QuietMode|'
r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
r'RecordSource|RefreshAlias|'
r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
r'Rotation|RowColChange|RowHeight|RowSourceType|'
r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
r'Seconds|SelectCmd|SelectedID|'
r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
r'ServerClass|ServerHelpFile|ServerName|'
r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
r'VersionCompany|VersionCopyright|VersionDescription|'
r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
r'XSDtype|ZoomBox)', Name.Attribute),
(r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
r'AddProperty|AddTableSchema|AddToSCC|Add|'
r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
(r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
r'AfterCursorUpdate|AfterDelete|AfterInsert|'
r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
r'BeforeInsert|BeforeDock|BeforeOpenTables|'
r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
r'dbc_AfterDropOffline|dbc_AfterDropTable|'
r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
r'dbc_AfterRenameTable|dbc_AfterRenameView|'
r'dbc_AfterValidateData|dbc_BeforeAddTable|'
r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
(r'\s+', Text),
# everything else is not colored
(r'.', Text),
],
'newline': [
(r'\*.*?$', Comment.Single, '#pop'),
(r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
Keyword.Reserved, '#pop'),
(r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
Comment.Preproc, '#pop'),
(r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
(r'.', Text, '#pop'),
],
}

View File

@ -1,21 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.functional
~~~~~~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.lisp import SchemeLexer, CommonLispLexer, RacketLexer, \
NewLispLexer
from pygments.lexers.haskell import HaskellLexer, LiterateHaskellLexer, \
KokaLexer
from pygments.lexers.theorem import CoqLexer
from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \
ElixirConsoleLexer, ElixirLexer
from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer
__all__ = []

View File

@ -1,101 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.go
~~~~~~~~~~~~~~~~~~
Lexers for the Google Go language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['GoLexer']
class GoLexer(RegexLexer):
"""
For `Go <http://golang.org>`_ source.
.. versionadded:: 1.2
"""
name = 'Go'
filenames = ['*.go']
aliases = ['go']
mimetypes = ['text/x-gosrc']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuations
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(import|package)\b', Keyword.Namespace),
(r'(var|func|struct|map|chan|type|interface|const)\b',
Keyword.Declaration),
(words((
'break', 'default', 'select', 'case', 'defer', 'go',
'else', 'goto', 'switch', 'fallthrough', 'if', 'range',
'continue', 'for', 'return'), suffix=r'\b'),
Keyword),
(r'(true|false|iota|nil)\b', Keyword.Constant),
# It seems the builtin types aren't actually keywords, but
# can be used as functions. So we need two declarations.
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr',
'print', 'println', 'panic', 'recover', 'close', 'complex',
'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete',
'new', 'make'), suffix=r'\b(\()'),
bygroups(Name.Builtin, Punctuation)),
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr'), suffix=r'\b'),
Keyword.Type),
# imaginary_lit
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# float_lit
(r'\d+(\.\d+[eE][+\-]?\d+|'
r'\.\d*|[eE][+\-]?\d+)', Number.Float),
(r'\.\d+([eE][+\-]?\d+)?', Number.Float),
# int_lit
# -- octal_lit
(r'0[0-7]+', Number.Oct),
# -- hex_lit
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal_lit
(r'(0|[1-9][0-9]*)', Number.Integer),
# char_lit
(r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
String.Char),
# StringLiteral
# -- raw_string_lit
(r'`[^`]*`', String),
# -- interpreted_string_lit
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator),
(r'[|^<>=!()\[\]{}.,;:]', Punctuation),
# identifier
(r'[^\W\d]\w*', Name.Other),
]
}

View File

@ -1,79 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.graph
~~~~~~~~~~~~~~~~~~~~~
Lexers for graph query languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this
from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\
String, Number, Whitespace
__all__ = ['CypherLexer']
class CypherLexer(RegexLexer):
"""
For `Cypher Query Language
<http://docs.neo4j.org/chunked/milestone/cypher-query-lang.html>`_
For the Cypher version in Neo4J 2.0
.. versionadded:: 2.0
"""
name = 'Cypher'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
include('comment'),
include('keywords'),
include('clauses'),
include('relations'),
include('strings'),
include('whitespace'),
include('barewords'),
],
'comment': [
(r'^.*//.*\n', Comment.Single),
],
'keywords': [
(r'(create|order|match|limit|set|skip|start|return|with|where|'
r'delete|foreach|not|by)\b', Keyword),
],
'clauses': [
# TODO: many missing ones, see http://docs.neo4j.org/refcard/2.0/
(r'(all|any|as|asc|create|create\s+unique|delete|'
r'desc|distinct|foreach|in|is\s+null|limit|match|none|'
r'order\s+by|return|set|skip|single|start|union|where|with)\b',
Keyword),
],
'relations': [
(r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
(r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'-->|<--|\[|\]', Operator),
(r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation),
(r'[.*{}]', Punctuation),
],
'strings': [
(r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String),
(r'`(?:``|[^`])+`', Name.Variable),
],
'whitespace': [
(r'\s+', Whitespace),
],
'barewords': [
(r'[a-z]\w*', Name),
(r'\d+', Number),
],
}

View File

@ -1,553 +0,0 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.graphics
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer graphics and plotting related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups, using, \
this, default
from pygments.token import Text, Comment, Operator, Keyword, Name, \
Number, Punctuation, String
__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer',
'PovrayLexer']
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
.. versionadded:: 1.1
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
# FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(words((
'attribute', 'const', 'uniform', 'varying', 'centroid', 'break',
'continue', 'do', 'for', 'while', 'if', 'else', 'in', 'out',
'inout', 'float', 'int', 'void', 'bool', 'true', 'false',
'invariant', 'discard', 'return', 'mat2', 'mat3' 'mat4',
'mat2x2', 'mat3x2', 'mat4x2', 'mat2x3', 'mat3x3', 'mat4x3',
'mat2x4', 'mat3x4', 'mat4x4', 'vec2', 'vec3', 'vec4',
'ivec2', 'ivec3', 'ivec4', 'bvec2', 'bvec3', 'bvec4',
'sampler1D', 'sampler2D', 'sampler3D' 'samplerCube',
'sampler1DShadow', 'sampler2DShadow', 'struct'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'asm', 'class', 'union', 'enum', 'typedef', 'template', 'this',
'packed', 'goto', 'switch', 'default', 'inline', 'noinline',
'volatile', 'public', 'static', 'extern', 'external', 'interface',
'long', 'short', 'double', 'half', 'fixed', 'unsigned', 'lowp',
'mediump', 'highp', 'precision', 'input', 'output',
'hvec2', 'hvec3', 'hvec4', 'dvec2', 'dvec3', 'dvec4',
'fvec2', 'fvec3', 'fvec4', 'sampler2DRect', 'sampler3DRect',
'sampler2DRectShadow', 'sizeof', 'cast', 'namespace', 'using'),
prefix=r'\b', suffix=r'\b'),
Keyword), # future use
(r'[a-zA-Z_]\w*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class PostScriptLexer(RegexLexer):
"""
Lexer for PostScript files.
The PostScript Language Reference published by Adobe at
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
.. versionadded:: 1.4
"""
name = 'PostScript'
aliases = ['postscript', 'postscr']
filenames = ['*.ps', '*.eps']
mimetypes = ['application/postscript']
delimiter = r'()<>\[\]{}/%\s'
delimiter_end = r'(?=[%s])' % delimiter
valid_name_chars = r'[^%s]' % delimiter
valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
tokens = {
'root': [
# All comment types
(r'^%!.+\n', Comment.Preproc),
(r'%%.*\n', Comment.Special),
(r'(^%.*\n){2,}', Comment.Multiline),
(r'%.*\n', Comment.Single),
# String literals are awkward; enter separate state.
(r'\(', String, 'stringliteral'),
(r'[{}<>\[\]]', Punctuation),
# Numbers
(r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex),
# Slight abuse: use Oct to signify any explicit base system
(r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)'
r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct),
(r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'
+ delimiter_end, Number.Float),
(r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer),
# References
(r'\/%s' % valid_name, Name.Variable),
# Names
(valid_name, Name.Function), # Anything else is executed
# These keywords taken from
# <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf>
# Is there an authoritative list anywhere that doesn't involve
# trawling documentation?
(r'(false|true)' + delimiter_end, Keyword.Constant),
# Conditionals / flow control
(r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)'
+ delimiter_end, Keyword.Reserved),
(words((
'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin',
'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat',
'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix',
'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix',
'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end',
'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get',
'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv',
'idtransform', 'index', 'invertmatrix', 'itransform', 'length',
'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto',
'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print',
'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat',
'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run',
'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray',
'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix',
'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt',
'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror',
'transform', 'translate', 'truncate', 'typecheck', 'undefined',
'undefinedfilename', 'undefinedresult'), suffix=delimiter_end),
Name.Builtin),
(r'\s+', Text),
],
'stringliteral': [
(r'[^()\\]+', String),
(r'\\', String.Escape, 'escape'),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'escape': [
(r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'),
default('#pop'),
],
}
class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
.. versionadded:: 1.2
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
('[a-zA-Z_]\w*:(?!:)', Name.Label),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
def _shortened(word):
dpos = word.find('$')
return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1))
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
.. versionadded:: 0.11
"""
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
('([a-zA-Z_]\w*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
('([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
default('#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'sqstring': [
(r"''", String), # escaped single quote
(r"'", String, '#pop'),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # normal backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
('[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_]\w*', Name),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der",
"box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta",
"data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign",
"fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid",
"hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle",
"la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale",
"mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin",
"rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot",
"mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics",
"nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics",
"mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput",
"pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot",
"poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze",
"st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs",
"ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le",
"v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta",
"yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel",
"yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs",
"x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs",
"zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs",
"x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs",
"noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs",
"xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs",
"noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs",
"cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange",
"y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange",
"vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis",
"zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
.. versionadded:: 0.11
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(words((
'break', 'case', 'debug', 'declare', 'default', 'define', 'else',
'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef',
'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render',
'statistics', 'switch', 'undef', 'version', 'warning', 'while',
'write'), prefix=r'#', suffix=r'\b'),
Comment.Preproc),
(words((
'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout',
'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle',
'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma',
'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation',
'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples',
'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size',
'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map',
'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock',
'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat',
'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh',
'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping',
'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction',
'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity',
'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent',
'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false',
'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor',
'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif',
'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold',
'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex',
'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate',
'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard',
'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like',
'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map',
'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level',
'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar',
'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves',
'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open',
'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3',
'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment',
'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm',
'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color',
'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow',
'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red',
'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt',
'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave',
'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere',
'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral',
'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str',
'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys',
't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3',
'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold',
'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit',
'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type',
'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps',
'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength',
'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light',
'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width',
'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference',
'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe',
'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism',
'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid',
'text', 'torus', 'triangle', 'union'), suffix=r'\b'),
Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\s+', Text),
]
}

Some files were not shown because too many files have changed in this diff Show More