1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00

tests: improved test output formatting

This commit is contained in:
Delyan Angelov 2019-12-30 06:23:54 +02:00 committed by Alexander Medvednikov
parent 4f173c8900
commit a0f32f5c29
11 changed files with 144 additions and 95 deletions

View File

@ -14,8 +14,6 @@ pub mut:
vargs string
failed bool
benchmark benchmark.Benchmark
ok string
fail string
}
pub fn new_test_session(vargs string) TestSession {
@ -34,8 +32,6 @@ pub fn vexe_path() string {
}
pub fn (ts mut TestSession) init() {
ts.ok = term.ok_message('OK')
ts.fail = term.fail_message('FAIL')
ts.benchmark = benchmark.new_benchmark()
}
@ -43,6 +39,7 @@ pub fn (ts mut TestSession) test() {
tmpd := os.tmpdir()
ts.init()
show_stats := '-stats' in ts.vargs.split(' ')
mut remaining_files := []string
for dot_relative_file in ts.files {
relative_file := dot_relative_file.replace('./', '')
file := os.realpath(relative_file)
@ -66,6 +63,12 @@ pub fn (ts mut TestSession) test() {
continue
}
}
remaining_files << dot_relative_file
}
ts.benchmark.set_total_expected_steps(remaining_files.len)
for dot_relative_file in remaining_files {
relative_file := dot_relative_file.replace('./', '')
file := os.realpath(relative_file)
// Ensure that the generated binaries will be stored in the temporary folder.
// Remove them after a test passes/fails.
fname := filepath.filename(file)
@ -97,17 +100,17 @@ pub fn (ts mut TestSession) test() {
r := os.exec(cmd) or {
ts.benchmark.fail()
ts.failed = true
eprintln(ts.benchmark.step_message('$relative_file ${ts.fail}'))
eprintln(ts.benchmark.step_message_fail(relative_file))
continue
}
if r.exit_code != 0 {
ts.benchmark.fail()
ts.failed = true
eprintln(ts.benchmark.step_message('$relative_file ${ts.fail}\n`$file`\n (\n$r.output\n)'))
eprintln(ts.benchmark.step_message_fail('${relative_file}\n`$file`\n (\n$r.output\n)'))
}
else {
ts.benchmark.ok()
eprintln(ts.benchmark.step_message('$relative_file ${ts.ok}'))
eprintln(ts.benchmark.step_message_ok(relative_file))
}
}
if os.exists(generated_binary_fpath) {
@ -171,19 +174,17 @@ pub fn building_any_v_binaries_failed() bool {
'$vexe -o v_prod -prod v.v',
]
mut bmark := benchmark.new_benchmark()
bok := term.ok_message('OK')
bfail := term.fail_message('FAIL')
for cmd in v_build_commands {
bmark.step()
if build_v_cmd_failed(cmd) {
bmark.fail()
failed = true
eprintln(bmark.step_message('$cmd => ${bfail} . See details above ^^^^^^^'))
eprintln(bmark.step_message_fail('command: ${cmd} . See details above ^^^^^^^'))
eprintln('')
continue
}
bmark.ok()
eprintln(bmark.step_message('$cmd => ${bok}'))
eprintln(bmark.step_message_ok('command: ${cmd}'))
}
bmark.stop()
eprintln(term.h_divider())

View File

@ -26,6 +26,11 @@ println( bmark.total_message('remarks about the benchmark') )
*/
const (
BOK = term.ok_message('OK')
BFAIL = term.fail_message('FAIL')
)
pub struct Benchmark {
pub mut:
bench_start_time i64
@ -36,6 +41,10 @@ pub mut:
nok int
nfail int
verbose bool
nexpected_steps int
cstep int
bok string
bfail string
}
pub fn new_benchmark() Benchmark {
@ -45,12 +54,17 @@ pub fn new_benchmark() Benchmark {
}
}
pub fn (b mut Benchmark) set_total_expected_steps(n int) {
b.nexpected_steps = n
}
pub fn (b mut Benchmark) stop() {
b.bench_end_time = benchmark.now()
}
pub fn (b mut Benchmark) step() {
b.step_start_time = benchmark.now()
b.cstep++
}
pub fn (b mut Benchmark) fail() {
@ -81,16 +95,45 @@ pub fn (b mut Benchmark) neither_fail_nor_ok() {
b.step_end_time = benchmark.now()
}
pub fn (b &Benchmark) step_message_with_label(label string, msg string) string {
mut timed_line := ''
if b.nexpected_steps > 0 {
mut sprogress := ''
if b.nexpected_steps < 10 {
sprogress = '${b.cstep:1d}/${b.nexpected_steps:1d}'
}
if b.nexpected_steps >= 10 && b.nexpected_steps < 100 {
sprogress = '${b.cstep:2d}/${b.nexpected_steps:2d}'
}
if b.nexpected_steps >= 100 && b.nexpected_steps < 1000 {
sprogress = '${b.cstep:3d}/${b.nexpected_steps:3d}'
}
timed_line = b.tdiff_in_ms('[${sprogress}] $msg', b.step_start_time, b.step_end_time)
}
else {
timed_line = b.tdiff_in_ms(msg, b.step_start_time, b.step_end_time)
}
return '${label:-5s}${timed_line}'
}
pub fn (b &Benchmark) step_message(msg string) string {
return b.tdiff_in_ms(msg, b.step_start_time, b.step_end_time)
return b.step_message_with_label('', msg)
}
pub fn (b &Benchmark) step_message_ok(msg string) string {
return b.step_message_with_label(BOK, msg)
}
pub fn (b &Benchmark) step_message_fail(msg string) string {
return b.step_message_with_label(BFAIL, msg)
}
pub fn (b &Benchmark) total_message(msg string) string {
mut tmsg := '$msg \n ok, fail, total = ' + term.ok_message('${b.nok:5d}') + ', ' + if b.nfail > 0 { term.fail_message('${b.nfail:5d}') } else { '${b.nfail:5d}' } + ', ' + '${b.ntotal:5d}'
mut tmsg := '${msg}\n ok, fail, total = ' + term.ok_message('${b.nok:5d}') + ', ' + if b.nfail > 0 { term.fail_message('${b.nfail:5d}') } else { '${b.nfail:5d}' } + ', ' + '${b.ntotal:5d}'
if b.verbose {
tmsg = '<=== total time spent $tmsg'
}
return b.tdiff_in_ms(tmsg, b.bench_start_time, b.bench_end_time)
return ' ' + b.tdiff_in_ms(tmsg, b.bench_start_time, b.bench_end_time)
}
pub fn (b &Benchmark) total_duration() i64 {
@ -101,7 +144,7 @@ pub fn (b &Benchmark) total_duration() i64 {
fn (b &Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string {
if b.verbose {
tdiff := (eticks - sticks)
return '${tdiff:6lld} ms | $s'
return '${tdiff:6lld} ms $s'
}
return s
}
@ -109,4 +152,3 @@ fn (b &Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string {
fn now() i64 {
return time.ticks()
}

View File

@ -510,10 +510,10 @@ pub fn (v mut V) generate_main() {
cgen.genln('$cgen.fn_main;')
v.gen_main_end('return 0')
}
else if !v.pref.is_repl {
else if v.v_fmt_file=='' && !v.pref.is_repl {
verror('function `main` is not declared in the main module')
}
}
}
else if v.pref.is_test {
if v.table.main_exists() {
verror('test files cannot have function `main`')
@ -525,13 +525,13 @@ pub fn (v mut V) generate_main() {
// Generate a C `main`, which calls every single test function
v.gen_main_start(false)
if v.pref.is_stats {
cgen.genln('BenchedTests bt = main__start_testing();')
cgen.genln('BenchedTests bt = main__start_testing(${test_fn_names.len},tos3("$v.dir"));')
}
for tfname in test_fn_names {
if v.pref.is_stats {
cgen.genln('BenchedTests_testing_step_start(&bt, tos3("$tfname"));')
}
cgen.genln('$tfname ();')
cgen.genln('${tfname}();')
if v.pref.is_stats {
cgen.genln('BenchedTests_testing_step_end(&bt);')
}

View File

@ -4,6 +4,6 @@ import os
import time
const (
os_used = os.MAX_PATH
time_used = time.now()
os_used = os.MAX_PATH
time_used = time.now()
)

View File

@ -4,6 +4,6 @@ import os
import time
const (
os_used = os.MAX_PATH
time_used = time.now()
os_used = os.MAX_PATH
time_used = time.now()
)

View File

@ -17,7 +17,8 @@ fn cb_assertion_failed(filename string, line int, sourceline string, funcname st
false
}
else {
true}}
true}
}
final_filename := if use_relative_paths { filename } else { os.realpath(filename) }
final_funcname := funcname.replace('main__', '').replace('__', '.')
mut fail_message := 'FAILED assertion'
@ -32,5 +33,4 @@ fn cb_assertion_failed(filename string, line int, sourceline string, funcname st
fn cb_assertion_ok(filename string, line int, sourceline string, funcname string) {
// do nothing for now on an OK assertion
// println('OK ${line:5d}|$sourceline ')
}
}

View File

@ -1,72 +1,75 @@
module main
///////////////////////////////////////////////////////////////////////
/// This file will get compiled as a part of the same module,
/// in which a given _test.v file is, when v is given -stats argument
/// The methods defined here are called back by the test program's
/// main function, generated by compiler/main.v so that customizing the
/// look & feel of the results is easy, since it is done in normal V
/// code, instead of in embedded C ...
///////////////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////////////
// / This file will get compiled as a part of the same module,
// / in which a given _test.v file is, when v is given -stats argument
// / The methods defined here are called back by the test program's
// / main function, generated by compiler/main.v so that customizing the
// / look & feel of the results is easy, since it is done in normal V
// / code, instead of in embedded C ...
// /////////////////////////////////////////////////////////////////////
import (
os
term
filepath
benchmark
)
const (
INNER_INDENT = ' '
)
struct BenchedTests {
mut:
oks int
fails int
oks int
fails int
test_suit_file string
step_func_name string
bench benchmark.Benchmark
bench benchmark.Benchmark
}
/////////////////////////////////////////////////////////////////////
// ///////////////////////////////////////////////////////////////////
// Called at the start of the test program produced by `v -stats file_test.v`
fn start_testing() BenchedTests {
mut b := BenchedTests{ bench: benchmark.new_benchmark() }
b.test_suit_file = os.executable() + '.v'
fn start_testing(total_number_of_tests int, vfilename string) BenchedTests {
mut b := BenchedTests{
bench: benchmark.new_benchmark()
}
b.bench.set_total_expected_steps(total_number_of_tests)
b.test_suit_file = vfilename
println('running tests in: $b.test_suit_file')
return b
}
// Called before each test_ function, defined in file_test.v
fn (b mut BenchedTests) testing_step_start(stepfunc string) {
b.step_func_name = stepfunc.replace('main__','').replace('__','.')
b.oks = C.g_test_oks
b.step_func_name = stepfunc.replace('main__', '').replace('__', '.')
b.oks = C.g_test_oks
b.fails = C.g_test_fails
b.bench.step()
}
// Called after each test_ function, defined in file_test.v
fn (b mut BenchedTests) testing_step_end() {
ok_diff := C.g_test_oks - b.oks
ok_diff := C.g_test_oks - b.oks
fail_diff := C.g_test_fails - b.fails
//////////////////////////////////////////////////////////////////
// ////////////////////////////////////////////////////////////////
if ok_diff == 0 && fail_diff == 0 {
b.bench.neither_fail_nor_ok()
println(' ' + b.bench.step_message('NO asserts | ') + b.fn_name() )
println(INNER_INDENT + b.bench.step_message_ok('NO asserts | ') + b.fn_name())
return
}
//////////////////////////////////////////////////////////////////
if ok_diff > 0 {
}
// ////////////////////////////////////////////////////////////////
if ok_diff > 0 {
b.bench.ok_many(ok_diff)
}
if fail_diff > 0 {
b.bench.fail_many(fail_diff)
}
//////////////////////////////////////////////////////////////////
if ok_diff > 0 && fail_diff == 0 {
println(ok_text('OK') + b.bench.step_message(nasserts(ok_diff)) + b.fn_name() )
// ////////////////////////////////////////////////////////////////
if ok_diff > 0 && fail_diff == 0 {
println(INNER_INDENT + b.bench.step_message_ok(nasserts(ok_diff)) + b.fn_name())
return
}
if fail_diff > 0 {
println(fail_text('FAIL') + b.bench.step_message(nasserts(fail_diff)) + b.fn_name() )
if fail_diff > 0 {
println(INNER_INDENT + b.bench.step_message_fail(nasserts(fail_diff)) + b.fn_name())
return
}
}
@ -78,22 +81,25 @@ fn (b &BenchedTests) fn_name() string {
// Called at the end of the test program produced by `v -stats file_test.v`
fn (b mut BenchedTests) end_testing() {
b.bench.stop()
println( ' ' + b.bench.total_message('running V tests in "' + filepath.filename(b.test_suit_file) + '"' ) )
println(INNER_INDENT + b.bench.total_message('running V tests in "' + filepath.filename(b.test_suit_file) + '"'))
}
/////////////////////////////////////////////////////////////////////
// ///////////////////////////////////////////////////////////////////
fn nasserts(n int) string {
if n==0 { return '${n:2d} asserts | ' }
if n==1 { return '${n:2d} assert | ' }
return '${n:2d} asserts | '
if n == 0 {
return '${n:2d} asserts | '
}
if n == 1 {
return '${n:2d} assert | '
}
if n < 10 {
return '${n:2d} asserts | '
}
if n < 100 {
return '${n:3d} asserts | '
}
if n < 1000 {
return '${n:4d} asserts | '
}
return '${n:5d} asserts | '
}
fn ok_text(s string) string {
return term.ok_message('${s:5s}')
}
fn fail_text(s string) string {
return term.fail_message('${s:5s}')
}

View File

@ -1,6 +1,5 @@
// Build and run files in ./prod/ folder, comparing their output to *.expected.txt files.
// (Similar to REPL tests, but in -prod mode.)
// import os
import compiler.tests.repl.runner
import benchmark
@ -15,15 +14,15 @@ fn test_all_v_prod_files() {
bmark.step()
fres := runner.run_prod_file(options.wd, options.vexec, file) or {
bmark.fail()
eprintln( bmark.step_message(err) )
eprintln(bmark.step_message_fail(err))
assert false
continue
}
bmark.ok()
println( bmark.step_message(fres) )
println(bmark.step_message_ok(fres))
assert true
}
bmark.stop()
println( bmark.total_message('total time spent running PROD files') )
println(bmark.total_message('total time spent running PROD files'))
}
}

View File

@ -8,15 +8,17 @@ fn test_the_v_compiler_can_be_invoked() {
vexec := runner.full_path_to_v(5)
println('vexecutable: $vexec')
assert vexec != ''
vcmd := '"$vexec" --version'
r := os.exec(vcmd) or { panic(err) }
//println('"$vcmd" exit_code: $r.exit_code | output: $r.output')
r := os.exec(vcmd) or {
panic(err)
}
// println('"$vcmd" exit_code: $r.exit_code | output: $r.output')
assert r.exit_code == 0
vcmd_error := '"$vexec" nonexisting.v'
r_error := os.exec(vcmd_error) or { panic(err) }
//println('"$vcmd_error" exit_code: $r_error.exit_code | output: $r_error.output')
r_error := os.exec(vcmd_error) or {
panic(err)
}
// println('"$vcmd_error" exit_code: $r_error.exit_code | output: $r_error.output')
assert r_error.exit_code == 1
assert r_error.output == '`nonexisting.v` does not exist'
}
@ -28,15 +30,14 @@ fn test_all_v_repl_files() {
bmark.step()
fres := runner.run_repl_file(options.wd, options.vexec, file) or {
bmark.fail()
eprintln( bmark.step_message(err) )
eprintln(bmark.step_message_fail(err))
assert false
continue
}
bmark.ok()
println( bmark.step_message(fres) )
println(bmark.step_message_ok(fres))
assert true
}
bmark.stop()
println( bmark.total_message('total time spent running REPL files') )
println(bmark.total_message('total time spent running REPL files'))
}

View File

@ -4,22 +4,21 @@ import compiler.tests.repl.runner
import log
import benchmark
fn main(){
fn main() {
mut logger := log.Log{}
logger.set_level(log.DEBUG)
options := runner.new_options()
mut bmark := benchmark.new_benchmark()
for file in options.files {
bmark.step()
fres := runner.run_repl_file(options.wd, options.vexec, file) or {
bmark.fail()
logger.error( bmark.step_message( err ) )
logger.error(bmark.step_message_fail(err))
continue
}
bmark.ok()
logger.info( bmark.step_message( fres ) )
logger.info(bmark.step_message_ok(fres))
}
bmark.stop()
logger.info( bmark.total_message('total time spent running REPL files') )
logger.info(bmark.total_message('total time spent running REPL files'))
}

View File

@ -23,6 +23,7 @@ struct C.dirent {
fn C.readdir(voidptr) C.dirent
pub const (
args = []string
MAX_PATH = 4096
@ -972,7 +973,7 @@ pub fn walk_ext(path, ext string) []string {
continue
}
p := path + separator + file
if os.is_dir(p) {
if os.is_dir(p) && !os.is_link(p) {
res << walk_ext(p, ext)
}
else if file.ends_with(ext) {
@ -993,7 +994,7 @@ pub fn walk(path string, fnc fn(path string)) {
}
for file in files {
p := path + os.path_separator + file
if os.is_dir(p) {
if os.is_dir(p) && !os.is_link(p) {
walk(p, fnc)
}
else if os.exists(p) {