mirror of
https://github.com/vlang/v.git
synced 2023-08-10 21:13:21 +03:00
tests: improved test output formatting
This commit is contained in:

committed by
Alexander Medvednikov

parent
4f173c8900
commit
a0f32f5c29
@ -510,10 +510,10 @@ pub fn (v mut V) generate_main() {
|
||||
cgen.genln('$cgen.fn_main;')
|
||||
v.gen_main_end('return 0')
|
||||
}
|
||||
else if !v.pref.is_repl {
|
||||
else if v.v_fmt_file=='' && !v.pref.is_repl {
|
||||
verror('function `main` is not declared in the main module')
|
||||
}
|
||||
}
|
||||
}
|
||||
else if v.pref.is_test {
|
||||
if v.table.main_exists() {
|
||||
verror('test files cannot have function `main`')
|
||||
@ -525,13 +525,13 @@ pub fn (v mut V) generate_main() {
|
||||
// Generate a C `main`, which calls every single test function
|
||||
v.gen_main_start(false)
|
||||
if v.pref.is_stats {
|
||||
cgen.genln('BenchedTests bt = main__start_testing();')
|
||||
cgen.genln('BenchedTests bt = main__start_testing(${test_fn_names.len},tos3("$v.dir"));')
|
||||
}
|
||||
for tfname in test_fn_names {
|
||||
if v.pref.is_stats {
|
||||
cgen.genln('BenchedTests_testing_step_start(&bt, tos3("$tfname"));')
|
||||
}
|
||||
cgen.genln('$tfname ();')
|
||||
cgen.genln('${tfname}();')
|
||||
if v.pref.is_stats {
|
||||
cgen.genln('BenchedTests_testing_step_end(&bt);')
|
||||
}
|
||||
|
@ -4,6 +4,6 @@ import os
|
||||
import time
|
||||
|
||||
const (
|
||||
os_used = os.MAX_PATH
|
||||
time_used = time.now()
|
||||
os_used = os.MAX_PATH
|
||||
time_used = time.now()
|
||||
)
|
||||
|
@ -4,6 +4,6 @@ import os
|
||||
import time
|
||||
|
||||
const (
|
||||
os_used = os.MAX_PATH
|
||||
time_used = time.now()
|
||||
os_used = os.MAX_PATH
|
||||
time_used = time.now()
|
||||
)
|
||||
|
@ -17,7 +17,8 @@ fn cb_assertion_failed(filename string, line int, sourceline string, funcname st
|
||||
false
|
||||
}
|
||||
else {
|
||||
true}}
|
||||
true}
|
||||
}
|
||||
final_filename := if use_relative_paths { filename } else { os.realpath(filename) }
|
||||
final_funcname := funcname.replace('main__', '').replace('__', '.')
|
||||
mut fail_message := 'FAILED assertion'
|
||||
@ -32,5 +33,4 @@ fn cb_assertion_failed(filename string, line int, sourceline string, funcname st
|
||||
fn cb_assertion_ok(filename string, line int, sourceline string, funcname string) {
|
||||
// do nothing for now on an OK assertion
|
||||
// println('OK ${line:5d}|$sourceline ')
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,72 +1,75 @@
|
||||
module main
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
/// This file will get compiled as a part of the same module,
|
||||
/// in which a given _test.v file is, when v is given -stats argument
|
||||
/// The methods defined here are called back by the test program's
|
||||
/// main function, generated by compiler/main.v so that customizing the
|
||||
/// look & feel of the results is easy, since it is done in normal V
|
||||
/// code, instead of in embedded C ...
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
|
||||
// /////////////////////////////////////////////////////////////////////
|
||||
// / This file will get compiled as a part of the same module,
|
||||
// / in which a given _test.v file is, when v is given -stats argument
|
||||
// / The methods defined here are called back by the test program's
|
||||
// / main function, generated by compiler/main.v so that customizing the
|
||||
// / look & feel of the results is easy, since it is done in normal V
|
||||
// / code, instead of in embedded C ...
|
||||
// /////////////////////////////////////////////////////////////////////
|
||||
import (
|
||||
os
|
||||
term
|
||||
filepath
|
||||
benchmark
|
||||
)
|
||||
|
||||
const (
|
||||
INNER_INDENT = ' '
|
||||
)
|
||||
|
||||
struct BenchedTests {
|
||||
mut:
|
||||
oks int
|
||||
fails int
|
||||
oks int
|
||||
fails int
|
||||
test_suit_file string
|
||||
step_func_name string
|
||||
bench benchmark.Benchmark
|
||||
bench benchmark.Benchmark
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// ///////////////////////////////////////////////////////////////////
|
||||
// Called at the start of the test program produced by `v -stats file_test.v`
|
||||
fn start_testing() BenchedTests {
|
||||
mut b := BenchedTests{ bench: benchmark.new_benchmark() }
|
||||
b.test_suit_file = os.executable() + '.v'
|
||||
fn start_testing(total_number_of_tests int, vfilename string) BenchedTests {
|
||||
mut b := BenchedTests{
|
||||
bench: benchmark.new_benchmark()
|
||||
}
|
||||
b.bench.set_total_expected_steps(total_number_of_tests)
|
||||
b.test_suit_file = vfilename
|
||||
println('running tests in: $b.test_suit_file')
|
||||
return b
|
||||
}
|
||||
|
||||
// Called before each test_ function, defined in file_test.v
|
||||
fn (b mut BenchedTests) testing_step_start(stepfunc string) {
|
||||
b.step_func_name = stepfunc.replace('main__','').replace('__','.')
|
||||
b.oks = C.g_test_oks
|
||||
b.step_func_name = stepfunc.replace('main__', '').replace('__', '.')
|
||||
b.oks = C.g_test_oks
|
||||
b.fails = C.g_test_fails
|
||||
b.bench.step()
|
||||
}
|
||||
|
||||
// Called after each test_ function, defined in file_test.v
|
||||
fn (b mut BenchedTests) testing_step_end() {
|
||||
ok_diff := C.g_test_oks - b.oks
|
||||
ok_diff := C.g_test_oks - b.oks
|
||||
fail_diff := C.g_test_fails - b.fails
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// ////////////////////////////////////////////////////////////////
|
||||
if ok_diff == 0 && fail_diff == 0 {
|
||||
b.bench.neither_fail_nor_ok()
|
||||
println(' ' + b.bench.step_message('NO asserts | ') + b.fn_name() )
|
||||
println(INNER_INDENT + b.bench.step_message_ok('NO asserts | ') + b.fn_name())
|
||||
return
|
||||
}
|
||||
//////////////////////////////////////////////////////////////////
|
||||
if ok_diff > 0 {
|
||||
}
|
||||
// ////////////////////////////////////////////////////////////////
|
||||
if ok_diff > 0 {
|
||||
b.bench.ok_many(ok_diff)
|
||||
}
|
||||
if fail_diff > 0 {
|
||||
b.bench.fail_many(fail_diff)
|
||||
}
|
||||
//////////////////////////////////////////////////////////////////
|
||||
if ok_diff > 0 && fail_diff == 0 {
|
||||
println(ok_text('OK') + b.bench.step_message(nasserts(ok_diff)) + b.fn_name() )
|
||||
// ////////////////////////////////////////////////////////////////
|
||||
if ok_diff > 0 && fail_diff == 0 {
|
||||
println(INNER_INDENT + b.bench.step_message_ok(nasserts(ok_diff)) + b.fn_name())
|
||||
return
|
||||
}
|
||||
if fail_diff > 0 {
|
||||
println(fail_text('FAIL') + b.bench.step_message(nasserts(fail_diff)) + b.fn_name() )
|
||||
if fail_diff > 0 {
|
||||
println(INNER_INDENT + b.bench.step_message_fail(nasserts(fail_diff)) + b.fn_name())
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -78,22 +81,25 @@ fn (b &BenchedTests) fn_name() string {
|
||||
// Called at the end of the test program produced by `v -stats file_test.v`
|
||||
fn (b mut BenchedTests) end_testing() {
|
||||
b.bench.stop()
|
||||
println( ' ' + b.bench.total_message('running V tests in "' + filepath.filename(b.test_suit_file) + '"' ) )
|
||||
println(INNER_INDENT + b.bench.total_message('running V tests in "' + filepath.filename(b.test_suit_file) + '"'))
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// ///////////////////////////////////////////////////////////////////
|
||||
fn nasserts(n int) string {
|
||||
if n==0 { return '${n:2d} asserts | ' }
|
||||
if n==1 { return '${n:2d} assert | ' }
|
||||
return '${n:2d} asserts | '
|
||||
if n == 0 {
|
||||
return '${n:2d} asserts | '
|
||||
}
|
||||
if n == 1 {
|
||||
return '${n:2d} assert | '
|
||||
}
|
||||
if n < 10 {
|
||||
return '${n:2d} asserts | '
|
||||
}
|
||||
if n < 100 {
|
||||
return '${n:3d} asserts | '
|
||||
}
|
||||
if n < 1000 {
|
||||
return '${n:4d} asserts | '
|
||||
}
|
||||
return '${n:5d} asserts | '
|
||||
}
|
||||
|
||||
fn ok_text(s string) string {
|
||||
return term.ok_message('${s:5s}')
|
||||
}
|
||||
|
||||
fn fail_text(s string) string {
|
||||
return term.fail_message('${s:5s}')
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
// Build and run files in ./prod/ folder, comparing their output to *.expected.txt files.
|
||||
// (Similar to REPL tests, but in -prod mode.)
|
||||
|
||||
// import os
|
||||
import compiler.tests.repl.runner
|
||||
import benchmark
|
||||
@ -15,15 +14,15 @@ fn test_all_v_prod_files() {
|
||||
bmark.step()
|
||||
fres := runner.run_prod_file(options.wd, options.vexec, file) or {
|
||||
bmark.fail()
|
||||
eprintln( bmark.step_message(err) )
|
||||
eprintln(bmark.step_message_fail(err))
|
||||
assert false
|
||||
continue
|
||||
}
|
||||
bmark.ok()
|
||||
println( bmark.step_message(fres) )
|
||||
println(bmark.step_message_ok(fres))
|
||||
assert true
|
||||
}
|
||||
bmark.stop()
|
||||
println( bmark.total_message('total time spent running PROD files') )
|
||||
println(bmark.total_message('total time spent running PROD files'))
|
||||
}
|
||||
}
|
||||
|
@ -8,15 +8,17 @@ fn test_the_v_compiler_can_be_invoked() {
|
||||
vexec := runner.full_path_to_v(5)
|
||||
println('vexecutable: $vexec')
|
||||
assert vexec != ''
|
||||
|
||||
vcmd := '"$vexec" --version'
|
||||
r := os.exec(vcmd) or { panic(err) }
|
||||
//println('"$vcmd" exit_code: $r.exit_code | output: $r.output')
|
||||
r := os.exec(vcmd) or {
|
||||
panic(err)
|
||||
}
|
||||
// println('"$vcmd" exit_code: $r.exit_code | output: $r.output')
|
||||
assert r.exit_code == 0
|
||||
|
||||
vcmd_error := '"$vexec" nonexisting.v'
|
||||
r_error := os.exec(vcmd_error) or { panic(err) }
|
||||
//println('"$vcmd_error" exit_code: $r_error.exit_code | output: $r_error.output')
|
||||
r_error := os.exec(vcmd_error) or {
|
||||
panic(err)
|
||||
}
|
||||
// println('"$vcmd_error" exit_code: $r_error.exit_code | output: $r_error.output')
|
||||
assert r_error.exit_code == 1
|
||||
assert r_error.output == '`nonexisting.v` does not exist'
|
||||
}
|
||||
@ -28,15 +30,14 @@ fn test_all_v_repl_files() {
|
||||
bmark.step()
|
||||
fres := runner.run_repl_file(options.wd, options.vexec, file) or {
|
||||
bmark.fail()
|
||||
eprintln( bmark.step_message(err) )
|
||||
eprintln(bmark.step_message_fail(err))
|
||||
assert false
|
||||
continue
|
||||
}
|
||||
bmark.ok()
|
||||
println( bmark.step_message(fres) )
|
||||
println(bmark.step_message_ok(fres))
|
||||
assert true
|
||||
}
|
||||
bmark.stop()
|
||||
println( bmark.total_message('total time spent running REPL files') )
|
||||
println(bmark.total_message('total time spent running REPL files'))
|
||||
}
|
||||
|
||||
|
@ -4,22 +4,21 @@ import compiler.tests.repl.runner
|
||||
import log
|
||||
import benchmark
|
||||
|
||||
fn main(){
|
||||
fn main() {
|
||||
mut logger := log.Log{}
|
||||
logger.set_level(log.DEBUG)
|
||||
options := runner.new_options()
|
||||
|
||||
mut bmark := benchmark.new_benchmark()
|
||||
for file in options.files {
|
||||
bmark.step()
|
||||
fres := runner.run_repl_file(options.wd, options.vexec, file) or {
|
||||
bmark.fail()
|
||||
logger.error( bmark.step_message( err ) )
|
||||
logger.error(bmark.step_message_fail(err))
|
||||
continue
|
||||
}
|
||||
bmark.ok()
|
||||
logger.info( bmark.step_message( fres ) )
|
||||
logger.info(bmark.step_message_ok(fres))
|
||||
}
|
||||
bmark.stop()
|
||||
logger.info( bmark.total_message('total time spent running REPL files') )
|
||||
logger.info(bmark.total_message('total time spent running REPL files'))
|
||||
}
|
||||
|
Reference in New Issue
Block a user