Compare commits

..

24 Commits
8.0.1 ... 8.1.0

Author SHA1 Message Date
5e34f3f6a7 v8.1.0 2018-04-03 23:43:43 -07:00
d4441e5575 changes for v8.1.0 2018-04-03 23:43:26 -07:00
9eac8e2bd3 prefer python3 when running wakatime-cli 2018-04-03 23:42:02 -07:00
11d8fc3a09 v8.0.8 2018-03-15 01:51:31 -07:00
d1f1f51f23 changes for v8.0.8 2018-03-15 01:51:20 -07:00
b10bb36c09 Upgrade wakatime-cli to v10.1.3 2018-03-15 01:50:36 -07:00
dc9474befa v8.0.7 2018-03-15 01:32:56 -07:00
b910807e98 changes for v8.0.7 2018-03-15 01:32:37 -07:00
bc770515f0 Upgrade wakatime-cli to v10.1.2 2018-03-15 01:31:17 -07:00
9e102d7c5c v8.0.6 2018-01-04 23:34:40 -08:00
5c1770fb48 changes for v8.0.6 2018-01-04 23:34:13 -08:00
683397534c upgrade wakatime-cli to v10.1.0 2018-01-04 23:33:07 -08:00
1c92017543 v8.0.5 2017-11-24 16:16:47 -08:00
fda1307668 changes for v8.0.5 2017-11-24 16:16:34 -08:00
1c84d457c5 upgrade wakatime-cli to v10.0.5 2017-11-24 16:16:04 -08:00
1e680ce739 v8.0.4 2017-11-23 12:49:07 -08:00
376adbb7d7 changes for v8.0.4 2017-11-23 12:48:44 -08:00
e0040e185b upgrade wakatime-cli to v10.0.4 2017-11-23 12:41:59 -08:00
c4a88541d0 v8.0.3 2017-11-22 13:12:09 -08:00
0cf621d177 changes for v8.0.3 2017-11-22 13:11:48 -08:00
db9d6cec97 upgrade wakatime-cli to v10.0.3 2017-11-22 13:09:17 -08:00
2c17f49a6b v8.0.2 2017-11-15 18:36:43 -08:00
95116d6007 changes for v8.0.2 2017-11-15 18:36:28 -08:00
8c52596f8f upgrade wakatime-cli to v10.0.2 2017-11-15 18:35:43 -08:00
32 changed files with 761 additions and 185 deletions

View File

@ -3,6 +3,71 @@ History
-------
8.1.0 (2018-04-03)
++++++++++++++++++
- Prefer Python3 over Python2 when running wakatime-cli core.
- Improve detection of Python3 on Ubuntu 17.10 platforms.
8.0.8 (2018-03-15)
++++++++++++++++++
- Upgrade wakatime-cli to v10.1.3.
- Smarter C vs C++ vs Objective-C language detection.
8.0.7 (2018-03-15)
++++++++++++++++++
- Upgrade wakatime-cli to v10.1.2.
- Detect dependencies from Swift, Objective-C, TypeScript and JavaScript files.
- Categorize .mjs files as JavaScript.
`#wakatime121 <https://github.com/wakatime/wakatime/issues/121>`_
- Detect dependencies from Elm, Haskell, Haxe, Kotlin, Rust, and Scala files.
- Improved Matlab vs Objective-C language detection.
`#wakatime129 <https://github.com/wakatime/wakatime/issues/129>`_
8.0.6 (2018-01-04)
++++++++++++++++++
- Upgrade wakatime-cli to v10.1.0.
- Ability to only track folders containing a .wakatime-project file using new
include_only_with_project_file argument and config option.
8.0.5 (2017-11-24)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.5.
- Fix bug that caused heartbeats to be cached locally instead of sent to API.
8.0.4 (2017-11-23)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.4.
- Improve Java dependency detection.
- Skip null or missing heartbeats from extra heartbeats argument.
8.0.3 (2017-11-22)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.3.
- Support saving unicode heartbeats when working offline.
`wakatime#112 <https://github.com/wakatime/wakatime/issues/112>`_
8.0.2 (2017-11-15)
++++++++++++++++++
- Upgrade wakatime-cli to v10.0.2.
- Limit bulk syncing to 5 heartbeats per request.
`wakatime#109 <https://github.com/wakatime/wakatime/issues/109>`_
8.0.1 (2017-11-09)
++++++++++++++++++

View File

@ -7,7 +7,7 @@ Website: https://wakatime.com/
==========================================================="""
__version__ = '8.0.1'
__version__ = '8.1.0'
import sublime
@ -305,13 +305,15 @@ def find_python_from_registry(location, reg=None):
return val
def find_python_in_folder(folder, headless=True):
def find_python_in_folder(folder, python3=True, headless=True):
pattern = re.compile(r'\d+\.\d+')
path = 'python'
if folder is not None:
if folder:
path = os.path.realpath(os.path.join(folder, 'python'))
if headless:
if python3:
path = u(path) + u('3')
elif headless:
path = u(path) + u('w')
log(DEBUG, u('Looking for Python at: {0}').format(u(path)))
try:
@ -325,9 +327,13 @@ def find_python_in_folder(folder, headless=True):
except:
log(DEBUG, u(sys.exc_info()[1]))
if headless:
path = find_python_in_folder(folder, headless=False)
if path is not None:
if python3:
path = find_python_in_folder(folder, python3=False, headless=headless)
if path:
return path
elif headless:
path = find_python_in_folder(folder, python3=python3, headless=False)
if path:
return path
return None

View File

@ -1,7 +1,7 @@
__title__ = 'wakatime'
__description__ = 'Common interface to the WakaTime api.'
__url__ = 'https://github.com/wakatime/wakatime'
__version_info__ = ('10', '0', '1')
__version_info__ = ('10', '1', '3')
__version__ = '.'.join(__version_info__)
__author__ = 'Alan Hamlett'
__author_email__ = 'alan@wakatime.com'

View File

@ -30,7 +30,7 @@ log = logging.getLogger('WakaTime')
try:
from .packages import requests
except ImportError:
except ImportError: # pragma: nocover
log.traceback(logging.ERROR)
print(traceback.format_exc())
log.error('Please upgrade Python to the latest version.')
@ -138,50 +138,59 @@ def send_heartbeats(heartbeats, args, configs, use_ntlm_proxy=False):
else:
code = response.status_code if response is not None else None
content = response.text if response is not None else None
try:
results = response.json() if response is not None else []
except:
if log.isEnabledFor(logging.DEBUG):
log.traceback(logging.WARNING)
results = []
if code == requests.codes.created or code == requests.codes.accepted:
log.debug({
'response_code': code,
})
for i in range(len(results)):
if len(heartbeats) <= i:
log.debug('Results from server do not match heartbeats sent.')
break
try:
c = results[i][1]
except:
c = 0
try:
text = json.dumps(results[i][0])
except:
if log.isEnabledFor(logging.DEBUG):
log.traceback(logging.WARNING)
text = ''
handle_result([heartbeats[i]], c, text, args, configs)
if _success(code):
results = _get_results(response)
_process_server_results(heartbeats, code, content, results, args, configs)
session_cache.save(session)
return SUCCESS
else:
log.debug({
'response_code': code,
'response_text': content,
})
if should_try_ntlm:
return send_heartbeats(heartbeats, args, configs, use_ntlm_proxy=True)
else:
handle_result(heartbeats, code, content, args, configs)
_handle_unsent_heartbeats(heartbeats, code, content, args, configs)
session_cache.delete()
return AUTH_ERROR if code == 401 else API_ERROR
def handle_result(h, code, content, args, configs):
if code == requests.codes.created or code == requests.codes.accepted:
return
def _process_server_results(heartbeats, code, content, results, args, configs):
log.debug({
'response_code': code,
'results': results,
})
for i in range(len(results)):
if len(heartbeats) <= i:
log.warn('Results from api not matching heartbeats sent.')
break
try:
c = results[i][1]
except:
log.traceback(logging.WARNING)
c = 0
try:
text = json.dumps(results[i][0])
except:
log.traceback(logging.WARNING)
text = ''
if not _success(c):
_handle_unsent_heartbeats([heartbeats[i]], c, text, args, configs)
leftover = len(heartbeats) - len(results)
if leftover > 0:
log.warn('Missing {0} results from api.'.format(leftover))
start = len(heartbeats) - leftover
_handle_unsent_heartbeats(heartbeats[start:], code, content, args, configs)
def _handle_unsent_heartbeats(heartbeats, code, content, args, configs):
if args.offline:
if code == 400:
log.error({
@ -195,9 +204,23 @@ def handle_result(h, code, content, args, configs):
'response_content': content,
})
queue = Queue(args, configs)
queue.push_many(h)
queue.push_many(heartbeats)
else:
log.error({
'response_code': code,
'response_content': content,
})
def _get_results(response):
results = []
if response is not None:
try:
results = response.json()['responses']
except:
log.traceback(logging.WARNING)
return results
def _success(code):
return code == requests.codes.created or code == requests.codes.accepted

View File

@ -103,12 +103,17 @@ def parse_arguments():
'auto-detected language')
parser.add_argument('--hostname', dest='hostname', action=StoreWithoutQuotes, help='hostname of '+
'current machine.')
parser.add_argument('--disableoffline', dest='offline',
parser.add_argument('--disable-offline', dest='offline',
action='store_false',
help='disables offline time logging instead of queuing logged time')
parser.add_argument('--disableoffline', dest='offline_deprecated',
action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--hide-filenames', dest='hide_filenames',
action='store_true',
help='obfuscate filenames; will not send file names to api')
parser.add_argument('--hidefilenames', dest='hidefilenames',
action='store_true',
help='obfuscate file names; will not send file names to api')
help=argparse.SUPPRESS)
parser.add_argument('--exclude', dest='exclude', action='append',
help='filename patterns to exclude from logging; POSIX regex '+
'syntax; can be used more than once')
@ -116,15 +121,24 @@ def parse_arguments():
help='filename patterns to log; when used in combination with '+
'--exclude, files matching include will still be logged; '+
'POSIX regex syntax; can be used more than once')
parser.add_argument('--include-only-with-project-file',
dest='include_only_with_project_file',
action='store_true',
help='disables tracking folders unless they contain '+
'a .wakatime-project file; defaults to false')
parser.add_argument('--ignore', dest='ignore', action='append',
help=argparse.SUPPRESS)
parser.add_argument('--extra-heartbeats', dest='extra_heartbeats',
action='store_true',
help='reads extra heartbeats from STDIN as a JSON array until EOF')
parser.add_argument('--logfile', dest='logfile', action=StoreWithoutQuotes,
parser.add_argument('--log-file', dest='log_file', action=StoreWithoutQuotes,
help='defaults to ~/.wakatime.log')
parser.add_argument('--apiurl', dest='api_url', action=StoreWithoutQuotes,
parser.add_argument('--logfile', dest='logfile', action=StoreWithoutQuotes,
help=argparse.SUPPRESS)
parser.add_argument('--api-url', dest='api_url', action=StoreWithoutQuotes,
help='heartbeats api url; for debugging with a local server')
parser.add_argument('--apiurl', dest='apiurl', action=StoreWithoutQuotes,
help=argparse.SUPPRESS)
parser.add_argument('--timeout', dest='timeout', type=int, action=StoreWithoutQuotes,
help='number of seconds to wait when sending heartbeats to api; '+
'defaults to 60 seconds')
@ -194,6 +208,8 @@ def parse_arguments():
args.exclude.append(pattern)
except TypeError: # pragma: nocover
pass
if not args.include_only_with_project_file and configs.has_option('settings', 'include_only_with_project_file'):
args.include_only_with_project_file = configs.get('settings', 'include_only_with_project_file')
if not args.include:
args.include = []
if configs.has_option('settings', 'include'):
@ -203,18 +219,26 @@ def parse_arguments():
args.include.append(pattern)
except TypeError: # pragma: nocover
pass
if args.hidefilenames:
args.hidefilenames = ['.*']
if not args.hide_filenames and args.hidefilenames:
args.hide_filenames = args.hidefilenames
if args.hide_filenames:
args.hide_filenames = ['.*']
else:
args.hidefilenames = []
args.hide_filenames = []
option = None
if configs.has_option('settings', 'hidefilenames'):
option = configs.get('settings', 'hidefilenames')
if configs.has_option('settings', 'hide_filenames'):
option = configs.get('settings', 'hide_filenames')
if option is not None:
if option.strip().lower() == 'true':
args.hidefilenames = ['.*']
args.hide_filenames = ['.*']
elif option.strip().lower() != 'false':
for pattern in option.split("\n"):
if pattern.strip() != '':
args.hidefilenames.append(pattern)
args.hide_filenames.append(pattern)
if args.offline_deprecated:
args.offline = False
if args.offline and configs.has_option('settings', 'offline'):
args.offline = configs.getboolean('settings', 'offline')
if not args.proxy and configs.has_option('settings', 'proxy'):
@ -235,11 +259,15 @@ def parse_arguments():
args.verbose = configs.getboolean('settings', 'verbose')
if not args.verbose and configs.has_option('settings', 'debug'):
args.verbose = configs.getboolean('settings', 'debug')
if not args.logfile and configs.has_option('settings', 'logfile'):
args.logfile = configs.get('settings', 'logfile')
if not args.logfile and os.environ.get('WAKATIME_HOME'):
if not args.log_file and args.logfile:
args.log_file = args.logfile
if not args.log_file and configs.has_option('settings', 'log_file'):
args.log_file = configs.get('settings', 'log_file')
if not args.log_file and os.environ.get('WAKATIME_HOME'):
home = os.environ.get('WAKATIME_HOME')
args.logfile = os.path.join(os.path.expanduser(home), '.wakatime.log')
args.log_file = os.path.join(os.path.expanduser(home), '.wakatime.log')
if not args.api_url and args.apiurl:
args.api_url = args.apiurl
if not args.api_url and configs.has_option('settings', 'api_url'):
args.api_url = configs.get('settings', 'api_url')
if not args.timeout and configs.has_option('settings', 'timeout'):

View File

@ -96,5 +96,5 @@ except ImportError: # pragma: nocover
try:
from .packages import simplejson as json
except (ImportError, SyntaxError):
except (ImportError, SyntaxError): # pragma: nocover
import json

View File

@ -21,7 +21,7 @@ from .constants import CONFIG_FILE_PARSE_ERROR
try:
import configparser
except ImportError:
except ImportError: # pragma: nocover
from .packages import configparser

View File

@ -106,8 +106,8 @@ class DependencyParser(object):
self.lexer = lexer
if self.lexer:
module_name = self.lexer.__module__.rsplit('.', 1)[-1]
class_name = self.lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
module_name = self.root_lexer.__module__.rsplit('.', 1)[-1]
class_name = self.root_lexer.__class__.__name__.replace('Lexer', 'Parser', 1)
else:
module_name = 'unknown'
class_name = 'UnknownParser'
@ -121,6 +121,12 @@ class DependencyParser(object):
except ImportError:
log.debug('Parsing dependencies not supported for {0}.{1}'.format(module_name, class_name))
@property
def root_lexer(self):
if hasattr(self.lexer, 'root_lexer'):
return self.lexer.root_lexer
return self.lexer
def parse(self):
if self.parser:
plugin = self.parser(self.source_file, lexer=self.lexer)

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.c_cpp
~~~~~~~~~~~~~~~~~~~~~~~~
wakatime.dependencies.c_cpp
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from C++ code.

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.data
~~~~~~~~~~~~~~~~~~~~~~~
wakatime.dependencies.data
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from data files.

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.dotnet
~~~~~~~~~~~~~~~~~~~~~~~~~
wakatime.dependencies.dotnet
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from .NET code.

View File

@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.elm
~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Elm code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class ElmParser(TokenParser):
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Namespace':
self._process_namespace(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
elif self.partial(token) == 'Class':
self._process_class(token, content)
else:
self._process_other(token, content)
def _process_namespace(self, token, content):
self.state = content.strip()
def _process_class(self, token, content):
if self.state == 'import':
self.append(self._format(content))
def _process_text(self, token, content):
pass
def _process_other(self, token, content):
self.state = None
def _format(self, content):
return content.strip().split('.')[0].strip()

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.go
~~~~~~~~~~~~~~~~~~~~~
wakatime.dependencies.go
~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Go code.

View File

@ -0,0 +1,53 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.haskell
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Haskell code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class HaskellParser(TokenParser):
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Reserved':
self._process_reserved(token, content)
elif self.partial(token) == 'Namespace':
self._process_namespace(token, content)
elif self.partial(token) == 'Keyword':
self._process_keyword(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
else:
self._process_other(token, content)
def _process_reserved(self, token, content):
self.state = content.strip()
def _process_namespace(self, token, content):
if self.state == 'import':
self.append(self._format(content))
def _process_keyword(self, token, content):
if self.state != 'import' or content.strip() != 'qualified':
self.state = None
def _process_text(self, token, content):
pass
def _process_other(self, token, content):
self.state = None
def _format(self, content):
return content.strip().split('.')[0].strip()

View File

@ -0,0 +1,48 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.haxe
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Haxe code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class HaxeParser(TokenParser):
exclude = [
r'^haxe$',
]
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Namespace':
self._process_namespace(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
else:
self._process_other(token, content)
def _process_namespace(self, token, content):
if self.state == 'import':
self.append(self._format(content))
self.state = None
else:
self.state = content
def _process_text(self, token, content):
pass
def _process_other(self, token, content):
self.state = None
def _format(self, content):
return content.strip()

View File

@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.templates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
wakatime.dependencies.html
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Templates.
Parse dependencies from HTML.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
@ -69,7 +69,7 @@ KEYWORDS = [
]
class HtmlDjangoParser(TokenParser):
class HtmlParser(TokenParser):
tags = []
opening_tag = False
getting_attrs = False
@ -141,63 +141,3 @@ class HtmlDjangoParser(TokenParser):
elif content.startswith('"') or content.startswith("'"):
if self.current_attr_value is None:
self.current_attr_value = content
class VelocityHtmlParser(HtmlDjangoParser):
pass
class MyghtyHtmlParser(HtmlDjangoParser):
pass
class MasonParser(HtmlDjangoParser):
pass
class MakoHtmlParser(HtmlDjangoParser):
pass
class CheetahHtmlParser(HtmlDjangoParser):
pass
class HtmlGenshiParser(HtmlDjangoParser):
pass
class RhtmlParser(HtmlDjangoParser):
pass
class HtmlPhpParser(HtmlDjangoParser):
pass
class HtmlSmartyParser(HtmlDjangoParser):
pass
class EvoqueHtmlParser(HtmlDjangoParser):
pass
class ColdfusionHtmlParser(HtmlDjangoParser):
pass
class LassoHtmlParser(HtmlDjangoParser):
pass
class HandlebarsHtmlParser(HtmlDjangoParser):
pass
class YamlJinjaParser(HtmlDjangoParser):
pass
class TwigHtmlParser(HtmlDjangoParser):
pass

View File

@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.javascript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from JavaScript code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import re
from . import TokenParser
class JavascriptParser(TokenParser):
state = None
extension = re.compile(r'\.\w{1,4}$')
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Reserved':
self._process_reserved(token, content)
elif self.partial(token) == 'Single':
self._process_string(token, content)
elif self.partial(token) == 'Punctuation':
self._process_punctuation(token, content)
else:
self._process_other(token, content)
def _process_reserved(self, token, content):
if self.state is None:
self.state = content
def _process_string(self, token, content):
if self.state == 'import':
self.append(self._format_module(content))
self.state = None
def _process_punctuation(self, token, content):
if content == ';':
self.state = None
def _process_other(self, token, content):
pass
def _format_module(self, content):
content = content.strip().strip('"').strip("'").strip()
content = content.split('/')[-1].split('\\')[-1]
content = self.extension.sub('', content, count=1)
return content
class TypeScriptParser(JavascriptParser):
pass

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.java
~~~~~~~~~~~~~~~~~~~~~~~
wakatime.dependencies.java
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Java code.
@ -43,7 +43,7 @@ class JavaParser(TokenParser):
self._process_other(token, content)
def _process_namespace(self, token, content):
if u(content) == u('import'):
if u(content).split() and u(content).split()[0] == u('import'):
self.state = 'import'
elif self.state == 'import':
@ -94,3 +94,89 @@ class JavaParser(TokenParser):
def _process_other(self, token, content):
pass
class KotlinParser(TokenParser):
state = None
exclude = [
r'^java\.',
]
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Keyword':
self._process_keyword(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
elif self.partial(token) == 'Namespace':
self._process_namespace(token, content)
else:
self._process_other(token, content)
def _process_keyword(self, token, content):
self.state = content
def _process_text(self, token, content):
pass
def _process_namespace(self, token, content):
if self.state == 'import':
self.append(self._format(content))
self.state = None
def _process_other(self, token, content):
self.state = None
def _format(self, content):
content = content.split(u('.'))
if content[-1] == u('*'):
content = content[:len(content) - 1]
if len(content) == 0:
return None
if len(content) == 1:
return content[0]
return u('.').join(content[:2])
class ScalaParser(TokenParser):
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Keyword':
self._process_keyword(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
elif self.partial(token) == 'Namespace':
self._process_namespace(token, content)
else:
self._process_other(token, content)
def _process_keyword(self, token, content):
self.state = content
def _process_text(self, token, content):
pass
def _process_namespace(self, token, content):
if self.state == 'import':
self.append(self._format(content))
self.state = None
def _process_other(self, token, content):
self.state = None
def _format(self, content):
return content.strip().lstrip('__root__').strip('_').strip('.')

View File

@ -0,0 +1,84 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.objective
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Objective-C and Swift code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import re
from . import TokenParser
class SwiftParser(TokenParser):
state = None
exclude = [
r'^foundation$',
]
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Declaration':
self._process_declaration(token, content)
elif self.partial(token) == 'Class':
self._process_class(token, content)
else:
self._process_other(token, content)
def _process_declaration(self, token, content):
if self.state is None:
self.state = content
def _process_class(self, token, content):
if self.state == 'import':
self.append(content)
self.state = None
def _process_other(self, token, content):
pass
class ObjectiveCParser(TokenParser):
state = None
extension = re.compile(r'\.[mh]$')
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Preproc':
self._process_preproc(token, content)
else:
self._process_other(token, content)
def _process_preproc(self, token, content):
if self.state:
self._process_import(token, content)
self.state = content
def _process_import(self, token, content):
if self.state == '#' and content.startswith('import '):
self.append(self._format(content))
self.state = None
def _process_other(self, token, content):
pass
def _format(self, content):
content = content.strip().lstrip('import ').strip()
content = content.strip('"').strip("'").strip()
content = content.strip('<').strip('>').strip()
content = content.split('/')[0]
content = self.extension.sub('', content, count=1)
return content

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.php
~~~~~~~~~~~~~~~~~~~~~~
wakatime.dependencies.php
~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from PHP code.
@ -16,6 +16,10 @@ from ..compat import u
class PhpParser(TokenParser):
state = None
parens = 0
exclude = [
r'^app$',
r'app\.php$',
]
def parse(self):
for index, token, content in self.tokens:

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.python
~~~~~~~~~~~~~~~~~~~~~~~~~
wakatime.dependencies.python
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Python code.

View File

@ -0,0 +1,48 @@
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.rust
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Rust code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class RustParser(TokenParser):
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Keyword':
self._process_keyword(token, content)
elif self.partial(token) == 'Whitespace':
self._process_whitespace(token, content)
elif self.partial(token) == 'Name':
self._process_name(token, content)
else:
self._process_other(token, content)
def _process_keyword(self, token, content):
if self.state == 'extern' and content == 'crate':
self.state = 'extern crate'
else:
self.state = content
def _process_whitespace(self, token, content):
pass
def _process_name(self, token, content):
if self.state == 'extern crate':
self.append(content)
self.state = None
def _process_other(self, token, content):
self.state = None

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
wakatime.languages.unknown
~~~~~~~~~~~~~~~~~~~~~~~~~~
wakatime.dependencies.unknown
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from files of unknown language.

View File

@ -12,3 +12,8 @@
class NotYetImplemented(Exception):
"""This method needs to be implemented."""
class SkipHeartbeat(Exception):
"""Raised to prevent the current heartbeat from being sent."""
pass

View File

@ -12,9 +12,10 @@ import logging
import re
from .compat import u, json
from .exceptions import SkipHeartbeat
from .project import get_project_info
from .stats import get_file_stats
from .utils import get_user_agent, should_exclude, format_file_path
from .utils import get_user_agent, should_exclude, format_file_path, find_project_file
log = logging.getLogger('WakaTime')
@ -41,6 +42,10 @@ class Heartbeat(object):
user_agent = None
def __init__(self, data, args, configs, _clone=None):
if not data:
self.skip = u('Skipping because heartbeat data is missing.')
return
self.args = args
self.configs = configs
@ -62,20 +67,28 @@ class Heartbeat(object):
return
if self.type == 'file':
self.entity = format_file_path(self.entity)
if self.type == 'file' and not os.path.isfile(self.entity):
self.skip = u('File does not exist; ignoring this heartbeat.')
return
if not self.entity or not os.path.isfile(self.entity):
self.skip = u('File does not exist; ignoring this heartbeat.')
return
if self._excluded_by_missing_project_file():
self.skip = u('Skipping because missing .wakatime-project file in parent path.')
return
project, branch = get_project_info(configs, self, data)
self.project = project
self.branch = branch
stats = get_file_stats(self.entity,
entity_type=self.type,
lineno=data.get('lineno'),
cursorpos=data.get('cursorpos'),
plugin=args.plugin,
language=data.get('language'))
try:
stats = get_file_stats(self.entity,
entity_type=self.type,
lineno=data.get('lineno'),
cursorpos=data.get('cursorpos'),
plugin=args.plugin,
language=data.get('language'))
except SkipHeartbeat as ex:
self.skip = u(ex) or 'Skipping'
return
else:
self.project = data.get('project')
self.branch = data.get('branch')
@ -91,7 +104,6 @@ class Heartbeat(object):
data = self.dict()
data.update(attrs)
heartbeat = Heartbeat(data, self.args, self.configs, _clone=True)
heartbeat.skip = self.skip
return heartbeat
def sanitize(self):
@ -100,7 +112,7 @@ class Heartbeat(object):
Returns a Heartbeat.
"""
if not self.args.hidefilenames:
if not self.args.hide_filenames:
return self
if self.entity is None:
@ -109,7 +121,7 @@ class Heartbeat(object):
if self.type != 'file':
return self
for pattern in self.args.hidefilenames:
for pattern in self.args.hide_filenames:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(self.entity):
@ -141,30 +153,50 @@ class Heartbeat(object):
def dict(self):
return {
'time': self.time,
'entity': self.entity,
'entity': self._unicode(self.entity),
'type': self.type,
'is_write': self.is_write,
'project': self.project,
'branch': self.branch,
'language': self.language,
'dependencies': self.dependencies,
'project': self._unicode(self.project),
'branch': self._unicode(self.branch),
'language': self._unicode(self.language),
'dependencies': self._unicode_list(self.dependencies),
'lines': self.lines,
'lineno': self.lineno,
'cursorpos': self.cursorpos,
'user_agent': self.user_agent,
'user_agent': self._unicode(self.user_agent),
}
def items(self):
return self.dict().items()
def get_id(self):
return u('{h.time}-{h.type}-{h.project}-{h.branch}-{h.entity}-{h.is_write}').format(
h=self,
return u('{time}-{type}-{project}-{branch}-{entity}-{is_write}').format(
time=self.time,
type=self.type,
project=self._unicode(self.project),
branch=self._unicode(self.branch),
entity=self._unicode(self.entity),
is_write=self.is_write,
)
def _unicode(self, value):
if value is None:
return None
return u(value)
def _unicode_list(self, values):
if values is None:
return None
return [self._unicode(value) for value in values]
def _excluded_by_pattern(self):
return should_exclude(self.entity, self.args.include, self.args.exclude)
def _excluded_by_missing_project_file(self):
if not self.args.include_only_with_project_file:
return False
return find_project_file(self.entity) is None
def __repr__(self):
return self.json()

View File

@ -75,7 +75,7 @@ def setup_logging(args, version):
for handler in logger.handlers:
logger.removeHandler(handler)
set_log_level(logger, args)
logfile = args.logfile
logfile = args.log_file
if not logfile:
logfile = '~/.wakatime.log'
handler = logging.FileHandler(os.path.expanduser(logfile))

View File

@ -104,7 +104,7 @@ class Queue(object):
def pop_many(self, limit=None):
if limit is None:
limit = 100
limit = 5
heartbeats = []

View File

@ -37,7 +37,7 @@ class JavascriptLexer(RegexLexer):
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', '*.jsm']
filenames = ['*.js', '*.jsm', '*.mjs']
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript']
@ -1035,7 +1035,6 @@ class CoffeeScriptLexer(RegexLexer):
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
_operator_re = (
r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|'
@ -1464,6 +1463,7 @@ class EarlGreyLexer(RegexLexer):
],
}
class JuttleLexer(RegexLexer):
"""
For `Juttle`_ source code.

View File

@ -123,6 +123,6 @@ class Git(BaseProject):
subpath = line[len('gitdir: '):].strip()
if os.path.isfile(os.path.join(path, subpath, 'config')) and \
os.path.isfile(os.path.join(path, subpath, 'HEAD')):
return os.path.join(path, subpath)
return os.path.realpath(os.path.join(path, subpath))
return None

View File

@ -12,11 +12,11 @@
"""
import logging
import os
import sys
from .base import BaseProject
from ..compat import u, open
from ..utils import find_project_file
log = logging.getLogger('WakaTime')
@ -25,7 +25,7 @@ log = logging.getLogger('WakaTime')
class ProjectFile(BaseProject):
def process(self):
self.config = self._find_config(self.path)
self.config = find_project_file(self.path)
self._project_name = None
self._project_branch = None
@ -33,13 +33,13 @@ class ProjectFile(BaseProject):
try:
with open(self.config, 'r', encoding='utf-8') as fh:
self._project_name = u(fh.readline().strip())
self._project_branch = u(fh.readline().strip())
self._project_name = u(fh.readline().strip()) or None
self._project_branch = u(fh.readline().strip()) or None
except UnicodeDecodeError: # pragma: nocover
try:
with open(self.config, 'r', encoding=sys.getfilesystemencoding()) as fh:
self._project_name = u(fh.readline().strip())
self._project_branch = u(fh.readline().strip())
self._project_name = u(fh.readline().strip()) or None
self._project_branch = u(fh.readline().strip()) or None
except:
log.traceback(logging.WARNING)
except IOError: # pragma: nocover
@ -53,14 +53,3 @@ class ProjectFile(BaseProject):
def branch(self):
return self._project_branch
def _find_config(self, path):
path = os.path.realpath(path)
if os.path.isfile(path):
path = os.path.split(path)[0]
if os.path.isfile(os.path.join(path, '.wakatime-project')):
return os.path.join(path, '.wakatime-project')
split_path = os.path.split(path)
if split_path[1] == '':
return None
return self._find_config(split_path[0])

View File

@ -17,6 +17,7 @@ import sys
from .compat import u, open
from .constants import MAX_FILE_SIZE_SUPPORTED
from .dependencies import DependencyParser
from .exceptions import SkipHeartbeat
from .language_priorities import LANGUAGES
from .packages.pygments.lexers import (
@ -53,6 +54,8 @@ def get_file_stats(file_name, entity_type='file', lineno=None, cursorpos=None,
if not language:
language, lexer = guess_language(file_name)
language = use_root_language(language, lexer)
parser = DependencyParser(file_name, lexer)
dependencies = parser.parse()
@ -118,6 +121,8 @@ def guess_lexer_using_filename(file_name, text):
try:
lexer = custom_pygments_guess_lexer_for_filename(file_name, text)
except SkipHeartbeat as ex:
raise SkipHeartbeat(u(ex))
except:
log.traceback(logging.DEBUG)
@ -167,17 +172,29 @@ def get_language_from_extension(file_name):
filepart, extension = os.path.splitext(file_name)
if re.match(r'\.h.*', extension, re.IGNORECASE) or re.match(r'\.c.*', extension, re.IGNORECASE):
if re.match(r'\.h.*$', extension, re.IGNORECASE) or re.match(r'\.c.*$', extension, re.IGNORECASE):
if os.path.exists(u('{0}{1}').format(u(filepart), u('.c'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.C'))):
return 'C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.m'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.M'))):
return 'Objective-C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.mm'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.MM'))):
return 'Objective-C++'
available_extensions = extensions_in_same_folder(file_name)
if '.cpp' in available_extensions:
return 'C++'
if '.c' in available_extensions:
return 'C'
if re.match(r'\.m$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C'
if re.match(r'\.mm$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C++'
return None
@ -236,6 +253,13 @@ def get_lexer(language):
return None
def use_root_language(language, lexer):
if lexer and hasattr(lexer, 'root_lexer'):
return u(lexer.root_lexer.name)
return language
def get_language_from_json(language, key):
"""Finds the given language in a json file."""
@ -299,6 +323,12 @@ def custom_pygments_guess_lexer_for_filename(_fn, _text, **options):
return lexer(**options)
result.append(customize_lexer_priority(_fn, rv, lexer))
matlab = list(filter(lambda x: x[2].name.lower() == 'matlab', result))
if len(matlab) > 0:
objc = list(filter(lambda x: x[2].name.lower() == 'objective-c', result))
if objc and objc[0][0] == matlab[0][0]:
raise SkipHeartbeat('Skipping because not enough language accuracy.')
def type_sort(t):
# sort by:
# - analyse score
@ -322,7 +352,17 @@ def customize_lexer_priority(file_name, accuracy, lexer):
elif lexer_name == 'matlab':
available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions:
priority = 0.06
accuracy += 0.01
if '.h' not in available_extensions:
accuracy += 0.01
elif lexer_name == 'objective-c':
available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions:
accuracy -= 0.01
else:
accuracy += 0.01
if '.h' in available_extensions:
accuracy += 0.01
return (accuracy, priority, lexer)

View File

@ -82,3 +82,15 @@ def format_file_path(filepath):
def get_hostname(args):
return args.hostname or socket.gethostname()
def find_project_file(path):
path = os.path.realpath(path)
if os.path.isfile(path):
path = os.path.split(path)[0]
if os.path.isfile(os.path.join(path, '.wakatime-project')):
return os.path.join(path, '.wakatime-project')
split_path = os.path.split(path)
if split_path[1] == '':
return None
return find_project_file(split_path[0])