1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00

compiler: implement -stats option for running a _test.v file

* Draft implementation of `v -stats file_test.v` .

* compiler: call stuff in vlib/benchmark/tests/always_imported.v, when doing `v -stats file_test.v`

* Nicer looking output from 'v -stats file_test.v' .

* Tweak colors and layout of -stats file_test.v .

* Fix a hardcoded path in compiler/main.v .

* Show colorized OK/FAIL for the examples in 'v test v' too.

* Add some comments about the purpose of the methods inside vlib/benchmark/tests/always_imported.v .

* when fails are 0, do not colorize their number at all.
This commit is contained in:
Delyan Angelov 2019-10-07 08:51:26 +03:00 committed by Alexander Medvednikov
parent f1923d454c
commit ac5241b5bd
5 changed files with 181 additions and 20 deletions

View File

@ -8,6 +8,7 @@ import (
os
strings
benchmark
term
)
const (
@ -95,6 +96,7 @@ mut:
sanitize bool // use Clang's new "-fsanitize" option
is_debuggable bool
is_debug bool // keep compiled C files
is_stats bool // `v -stats file_test.v` will produce more detailed statistics for the tests that were run
no_auto_free bool // `v -nofree` disable automatic `free()` insertion for better performance in some applications (e.g. compilers)
cflags string // Additional options which will be passed to the C compiler.
// For example, passing -cflags -Os will cause the C compiler to optimize the generated binaries for size.
@ -108,7 +110,6 @@ mut:
// to increase compilation time.
// This is on by default, since a vast majority of users do not
// work on the builtin module itself.
}
fn main() {
@ -181,7 +182,12 @@ fn main() {
return
}
v.compile()
mut tmark := benchmark.new_benchmark()
v.compile()
if v.pref.is_stats {
tmark.stop()
println( 'compilation took: ' + tmark.total_duration().str() + 'ms')
}
if v.pref.is_test {
v.run_compiled_executable_and_exit()
@ -309,7 +315,8 @@ fn (v mut V) compile() {
//cgen.genln('i64 total_m = 0; // For counting total RAM allocated')
//if v.pref.is_test {
$if !js {
cgen.genln('int g_test_ok = 1; ')
cgen.genln('int g_test_oks = 0;')
cgen.genln('int g_test_fails = 0;')
}
if imports_json {
cgen.genln('
@ -459,12 +466,18 @@ string _STR_TMP(const char *fmt, ...) {
}
// Generate a C `main`, which calls every single test function
v.gen_main_start(false)
if v.pref.is_stats { cgen.genln('BenchedTests bt = main__start_testing();') }
for _, f in v.table.fns {
if f.name.starts_with('main__test_') {
cgen.genln('$f.name();')
if v.pref.is_stats { cgen.genln('BenchedTests_testing_step_start(&bt, tos3("$f.name"));') }
cgen.genln('$f.name();')
if v.pref.is_stats { cgen.genln('BenchedTests_testing_step_end(&bt);') }
}
}
v.gen_main_end('return g_test_ok == 0')
if v.pref.is_stats { cgen.genln('BenchedTests_end_testing(&bt);') }
v.gen_main_end('return g_test_fails > 0')
}
else if v.table.main_exists() {
v.gen_main_start(true)
@ -636,6 +649,11 @@ fn (v &V) get_user_files() []string {
// Need to store user files separately, because they have to be added after libs, but we dont know
// which libs need to be added yet
mut user_files := []string
if v.pref.is_test && v.pref.is_stats {
user_files << [v.vroot, 'vlib', 'benchmark', 'tests', 'always_imported.v'].join( os.PathSeparator )
}
// v volt/slack_test.v: compile all .v files to get the environment
// I need to implement user packages! TODO
is_test_with_imports := dir.ends_with('_test.v') &&
@ -883,6 +901,7 @@ fn new_v(args[]string) &V {
is_verbose: '-verbose' in args || '--verbose' in args
is_debuggable: '-g' in args
is_debug: '-debug' in args || '-g' in args
is_stats: '-stats' in args
obfuscate: obfuscate
is_prof: '-prof' in args
is_live: '-live' in args
@ -1052,6 +1071,8 @@ fn (v &V) test_v() {
mut failed := false
test_files := os.walk_ext(parent_dir, '_test.v')
ok := term.ok_message('OK')
fail := term.fail_message('FAIL')
println('Testing...')
mut tmark := benchmark.new_benchmark()
for dot_relative_file in test_files {
@ -1066,16 +1087,16 @@ fn (v &V) test_v() {
r := os.exec(cmd) or {
tmark.fail()
failed = true
println(tmark.step_message('$relative_file FAIL'))
println(tmark.step_message('$relative_file $fail'))
continue
}
if r.exit_code != 0 {
failed = true
tmark.fail()
println(tmark.step_message('$relative_file FAIL \n`$file`\n (\n$r.output\n)'))
println(tmark.step_message('$relative_file $fail\n`$file`\n (\n$r.output\n)'))
} else {
tmark.ok()
println(tmark.step_message('$relative_file OK'))
println(tmark.step_message('$relative_file $ok'))
}
os.rm( tmpc_filepath )
}
@ -1097,16 +1118,16 @@ fn (v &V) test_v() {
r := os.exec(cmd) or {
failed = true
bmark.fail()
println(bmark.step_message('$relative_file FAIL'))
println(bmark.step_message('$relative_file $fail'))
continue
}
if r.exit_code != 0 {
failed = true
bmark.fail()
println(bmark.step_message('$relative_file FAIL \n`$file`\n (\n$r.output\n)'))
println(bmark.step_message('$relative_file $fail \n`$file`\n (\n$r.output\n)'))
} else {
bmark.ok()
println(bmark.step_message('$relative_file OK'))
println(bmark.step_message('$relative_file $ok'))
}
os.rm(tmpc_filepath)
}

View File

@ -3821,16 +3821,20 @@ fn (p mut Parser) assert_statement() {
p.check_types(p.bool_expression(), 'bool')
// TODO print "expected: got" for failed tests
filename := p.file_path.replace('\\', '\\\\')
p.genln(';\n
p.genln(';
\n
if (!$tmp) {
println(tos2((byte *)"\\x1B[31mFAILED: $p.cur_fn.name() in $filename:$p.scanner.line_nr\\x1B[0m"));
g_test_ok = 0 ;
// TODO
// Maybe print all vars in a test function if it fails?
g_test_fails++;
// TODO
// Maybe print all vars in a test function if it fails?
} else {
g_test_oks++;
//println(tos2((byte *)"\\x1B[32mPASSED: $p.cur_fn.name()\\x1B[0m"));
}
else {
//puts("\\x1B[32mPASSED: $p.cur_fn.name()\\x1B[0m");
}')
')
}
fn (p mut Parser) return_st() {

View File

@ -1,6 +1,7 @@
module benchmark
import time
import term
/*
Example usage of this module:
@ -64,18 +65,40 @@ pub fn (b mut Benchmark) ok() {
b.nok++
}
pub fn (b mut Benchmark) fail_many(n int) {
b.step_end_time = benchmark.now()
b.ntotal+=n
b.nfail+=n
}
pub fn (b mut Benchmark) ok_many(n int) {
b.step_end_time = benchmark.now()
b.ntotal+=n
b.nok+=n
}
pub fn (b mut Benchmark) neither_fail_nor_ok() {
b.step_end_time = benchmark.now()
}
pub fn (b mut Benchmark) step_message(msg string) string {
return b.tdiff_in_ms(msg, b.step_start_time, b.step_end_time)
}
pub fn (b mut Benchmark) total_message(msg string) string {
mut tmsg := '$msg : ok, fail, total = ${b.nok:5d}, ${b.nfail:5d}, ${b.ntotal:5d}'
mut tmsg := '$msg \n ok, fail, total = ' +
term.ok_message('${b.nok:5d}') + ', ' +
if b.nfail > 0 { term.fail_message('${b.nfail:5d}') } else { '${b.nfail:5d}' } + ', ' +
'${b.ntotal:5d}'
if b.verbose {
tmsg = '<=== total time spent $tmsg'
}
return b.tdiff_in_ms(tmsg, b.bench_start_time, b.bench_end_time)
}
pub fn (b mut Benchmark) total_duration() i64 {
return (b.bench_end_time - b.bench_start_time)
}
////////////////////////////////////////////////////////////////////
fn (b mut Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string {
@ -89,4 +112,3 @@ fn (b mut Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string {
fn now() i64 {
return time.ticks()
}

View File

@ -0,0 +1,96 @@
module main
///////////////////////////////////////////////////////////////////////
/// This file will get compiled as a part of the same module,
/// in which a given _test.v file is, when v is given -stats argument
/// The methods defined here are called back by the test program's
/// main function, generated by compiler/main.v so that customizing the
/// look & feel of the results is easy, since it is done in normal V
/// code, instead of in embedded C ...
///////////////////////////////////////////////////////////////////////
import os
import benchmark
import term
struct BenchedTests {
mut:
oks int
fails int
test_suit_file string
step_func_name string
bench benchmark.Benchmark
}
/////////////////////////////////////////////////////////////////////
// Called at the start of the test program produced by `v -stats file_test.v`
fn start_testing() BenchedTests {
mut b := BenchedTests{ bench: benchmark.new_benchmark() }
b.test_suit_file = os.executable() + '.v'
println('running tests in: $b.test_suit_file')
return b
}
// Called before each test_ function, defined in file_test.v
fn (b mut BenchedTests) testing_step_start(stepfunc string) {
b.step_func_name = stepfunc.replace('main__','')
b.oks = C.g_test_oks
b.fails = C.g_test_fails
b.bench.step()
}
// Called after each test_ function, defined in file_test.v
fn (b mut BenchedTests) testing_step_end() {
ok_diff := C.g_test_oks - b.oks
fail_diff := C.g_test_fails - b.fails
//////////////////////////////////////////////////////////////////
if ok_diff == 0 && fail_diff == 0 {
b.bench.neither_fail_nor_ok()
println(' ' + b.bench.step_message('NO asserts | ') + b.fn_name() )
return
}
//////////////////////////////////////////////////////////////////
if ok_diff > 0 {
b.bench.ok_many(ok_diff)
}
if fail_diff > 0 {
b.bench.fail_many(fail_diff)
}
//////////////////////////////////////////////////////////////////
if ok_diff > 0 && fail_diff == 0 {
println(ok_text('OK') + b.bench.step_message(nasserts(ok_diff)) + b.fn_name() )
return
}
if fail_diff > 0 {
println(fail_text('FAIL') + b.bench.step_message(nasserts(fail_diff)) + b.fn_name() )
return
}
}
fn (b &BenchedTests) fn_name() string {
return b.step_func_name + '()'
}
// Called at the end of the test program produced by `v -stats file_test.v`
fn (b mut BenchedTests) end_testing() {
b.bench.stop()
println( ' ' + b.bench.total_message('running V tests in "' + os.filename(b.test_suit_file) + '"' ) )
}
/////////////////////////////////////////////////////////////////////
fn nasserts(n int) string {
if n==0 { return '${n:2d} asserts | ' }
if n==1 { return '${n:2d} assert | ' }
return '${n:2d} asserts | '
}
fn ok_text(s string) string {
return term.ok_message('${s:5s}')
}
fn fail_text(s string) string {
return term.fail_message('${s:5s}')
}

View File

@ -7,3 +7,21 @@ pub fn can_show_color_on_stdout() bool {
pub fn can_show_color_on_stderr() bool {
return can_show_color_on_fd(2)
}
//////////////////////////////////////////////
pub fn ok_message(s string) string {
return if can_show_color_on_stdout() {
green( s )
}else{
s
}
}
pub fn fail_message(s string) string {
return if can_show_color_on_stdout() {
red( s )
}else{
s
}
}