From a45895a3af872d345c649a551e4e8fe1929107ac Mon Sep 17 00:00:00 2001 From: Delyan Angelov Date: Mon, 16 Sep 2019 17:29:06 +0300 Subject: [PATCH] new benchmark module + make the tests use it --- compiler/main.v | 56 ++++++++++++------ compiler/tests/repl/repl_test.v | 21 +++---- compiler/tests/repl/run.v | 15 +++-- compiler/tests/repl/runner/runner.v | 11 ---- vlib/benchmark/benchmark.v | 91 +++++++++++++++++++++++++++++ 5 files changed, 148 insertions(+), 46 deletions(-) create mode 100644 vlib/benchmark/benchmark.v diff --git a/compiler/main.v b/compiler/main.v index f2785a56d7..27de6fe7b6 100644 --- a/compiler/main.v +++ b/compiler/main.v @@ -6,8 +6,8 @@ module main import ( os - time strings + benchmark ) const ( @@ -132,10 +132,6 @@ fn main() { create_symlink() return } - if args.join(' ').contains(' test v') { - test_v() - return - } if 'install' in args { install_v(args) return @@ -156,6 +152,10 @@ fn main() { } // Construct the V object from command line arguments mut v := new_v(args) + if args.join(' ').contains(' test v') { + v.test_v() + return + } if v.pref.is_verbose { println(args) } @@ -823,7 +823,7 @@ fn new_v(args[]string) &V { is_script: is_script is_so: '-shared' in args is_prod: '-prod' in args - is_verbose: '-verbose' in args + is_verbose: '-verbose' in args || '--verbose' in args is_debuggable: '-g' in args is_debug: '-debug' in args || '-g' in args obfuscate: obfuscate @@ -949,57 +949,77 @@ fn install_v(args[]string) { } } -fn test_v() { +fn (v &V) test_v() { args := env_vflags_and_os_args() vexe := args[0] // Emily: pass args from the invocation to the test // e.g. `v -g -os msvc test v` -> `$vexe -g -os msvc $file` - mut joined_args := env_vflags_and_os_args().right(1).join(' ') + mut joined_args := args.right(1).join(' ') joined_args = joined_args.left(joined_args.last_index('test')) - println('$joined_args') + // println('$joined_args') mut failed := false test_files := os.walk_ext('.', '_test.v') - for dot_relative_file in test_files { + + println('Testing...') + mut tmark := benchmark.new_benchmark() + tmark.verbose = v.pref.is_verbose + for dot_relative_file in test_files { relative_file := dot_relative_file.replace('./', '') file := os.realpath( relative_file ) tmpcfilepath := file.replace('_test.v', '_test.tmp.c') - print(relative_file + ' ') + mut cmd := '"$vexe" $joined_args -debug "$file"' if os.user_os() == 'windows' { cmd = '"$cmd"' } + + tmark.step() r := os.exec(cmd) or { + tmark.fail() failed = true - println('FAIL') + println(tmark.step_message('$relative_file FAIL')) continue } if r.exit_code != 0 { - println('FAIL `$file` (\n$r.output\n)') failed = true + tmark.fail() + println(tmark.step_message('$relative_file FAIL \n`$file`\n (\n$r.output\n)')) } else { - println('OK') + tmark.ok() + println(tmark.step_message('$relative_file OK')) } os.rm( tmpcfilepath ) } + tmark.stop() + println( tmark.total_message('running V tests') ) + println('\nBuilding examples...') examples := os.walk_ext('examples', '.v') + mut bmark := benchmark.new_benchmark() + bmark.verbose = v.pref.is_verbose for relative_file in examples { file := os.realpath( relative_file ) tmpcfilepath := file.replace('.v', '.tmp.c') - print(relative_file + ' ') mut cmd := '"$vexe" $joined_args -debug "$file"' if os.user_os() == 'windows' { cmd = '"$cmd"' } + bmark.step() r := os.exec(cmd) or { failed = true - println('FAIL') + bmark.fail() + println(bmark.step_message('$relative_file FAIL')) continue } if r.exit_code != 0 { - println('FAIL `$file` (\n$r.output\n)') failed = true + bmark.fail() + println(bmark.step_message('$relative_file FAIL \n`$file`\n (\n$r.output\n)')) } else { - println('OK') + bmark.ok() + println(bmark.step_message('$relative_file OK')) } os.rm(tmpcfilepath) } + bmark.stop() + println( bmark.total_message('building examples') ) + if failed { exit(1) } diff --git a/compiler/tests/repl/repl_test.v b/compiler/tests/repl/repl_test.v index 802e3c00d4..cb9394bc61 100644 --- a/compiler/tests/repl/repl_test.v +++ b/compiler/tests/repl/repl_test.v @@ -1,5 +1,6 @@ import os import compiler.tests.repl.runner +import benchmark fn test_the_v_compiler_can_be_invoked() { vexec := runner.full_path_to_v() @@ -20,24 +21,20 @@ fn test_the_v_compiler_can_be_invoked() { fn test_all_v_repl_files() { options := runner.new_options() - global_start_time := runner.now() - mut total_tests := 0 - mut ok_tests := 0 - mut failed_tests := 0 + mut bmark := benchmark.new_benchmark() for file in options.files { - total_tests++ - sticks := runner.now() + bmark.step() fres := runner.run_repl_file(options.wd, options.vexec, file) or { - failed_tests++ + bmark.fail() + eprintln( bmark.step_message(err) ) assert false - eprintln( runner.tdiff_in_ms(err, sticks) ) continue } + bmark.ok() + println( bmark.step_message(fres) ) assert true - println( runner.tdiff_in_ms(fres, sticks) ) - ok_tests++ } - println( runner.tdiff_in_ms('<=== total time spent running REPL files', global_start_time) ) - println( ' ok, failed, total : ${ok_tests:5d}, ${failed_tests:5d}, ${total_tests:5d}' ) + bmark.stop() + println( bmark.total_message('total time spent running REPL files') ) } diff --git a/compiler/tests/repl/run.v b/compiler/tests/repl/run.v index c71444ae48..6578e359c5 100644 --- a/compiler/tests/repl/run.v +++ b/compiler/tests/repl/run.v @@ -2,18 +2,23 @@ module main import compiler.tests.repl.runner import log +import benchmark fn main(){ logger := &log.Log{log.DEBUG, 'terminal'} options := runner.new_options() - global_start_time := runner.now() + + mut bmark := benchmark.new_benchmark() for file in options.files { - stime := runner.now() + bmark.step() fres := runner.run_repl_file(options.wd, options.vexec, file) or { - logger.error( runner.tdiff_in_ms(err, stime) ) + bmark.fail() + logger.error( bmark.step_message( err ) ) continue } - logger.info( runner.tdiff_in_ms(fres, stime) ) + bmark.ok() + logger.info( bmark.step_message( fres ) ) } - logger.info( runner.tdiff_in_ms('<=== total time spent running REPL files', global_start_time) ) + bmark.stop() + logger.info( bmark.total_message('total time spent running REPL files') ) } diff --git a/compiler/tests/repl/runner/runner.v b/compiler/tests/repl/runner/runner.v index 652c534111..1c325fe121 100644 --- a/compiler/tests/repl/runner/runner.v +++ b/compiler/tests/repl/runner/runner.v @@ -1,7 +1,6 @@ module runner import os -import time struct RunnerOptions { pub: @@ -71,13 +70,3 @@ pub fn new_options() RunnerOptions { } } -pub fn now() i64 { - return time.ticks() -} - -pub fn tdiff_in_ms(s string, sticks i64) string { - eticks := time.ticks() - tdiff := (eticks - sticks) - return '${tdiff:6d} ms | $s' -} - diff --git a/vlib/benchmark/benchmark.v b/vlib/benchmark/benchmark.v new file mode 100644 index 0000000000..33d4134537 --- /dev/null +++ b/vlib/benchmark/benchmark.v @@ -0,0 +1,91 @@ +module benchmark + +import time + +/* +Example usage of this module: +``` +import benchmark +mut bmark := benchmark.new_benchmark() +// by default the benchmark will be verbose, i.e. it will include timing information +// if you want it to be silent, set bmark.verbose = false +for { + bmark.step() // call this when you want to advance the benchmark. + // The timing info in bmark.step_message will be measured starting from the last call to bmark.step + .... + + //bmark.fail() // call this if the step failed + //bmark.step_message(('failed') + + bmark.ok() // call this when the step succeeded + println( bmark.step_message('ok') +} +bmark.stop() // call when you want to finalize the benchmark +println( bmark.total_message('remarks about the benchmark') ) +``` +*/ + +struct Benchmark{ +pub mut: + bench_start_time i64 + bench_end_time i64 + step_start_time i64 + step_end_time i64 + ntotal int + nok int + nfail int + verbose bool +} + +pub fn new_benchmark() Benchmark{ + return Benchmark{ + bench_start_time: benchmark.now() + verbose: true + } +} + +pub fn now() i64 { + return time.ticks() +} + +pub fn (b mut Benchmark) stop() { + b.bench_end_time = benchmark.now() +} + +pub fn (b mut Benchmark) step() { + b.step_start_time = benchmark.now() +} + +pub fn (b mut Benchmark) fail() { + b.step_end_time = benchmark.now() + b.ntotal++ + b.nfail++ +} + +pub fn (b mut Benchmark) ok() { + b.step_end_time = benchmark.now() + b.ntotal++ + b.nok++ +} + +pub fn (b mut Benchmark) step_message(msg string) string { + return b.tdiff_in_ms(msg, b.step_start_time, b.step_end_time) +} + +pub fn (b mut Benchmark) total_message(msg string) string { + mut tmsg := '$msg : ok, fail, total = ${b.nok:5d}, ${b.nfail:5d}, ${b.ntotal:5d}' + if b.verbose { + tmsg = '<=== total time spent $tmsg' + } + return b.tdiff_in_ms(tmsg, b.bench_start_time, b.bench_end_time) +} + +//////////////////////////////////////////////////////////////////// + +fn (b mut Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string { + if b.verbose { + tdiff := (eticks - sticks) + return '${tdiff:6d} ms | $s' + } + return s +}