1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00

new benchmark module + make the tests use it

This commit is contained in:
Delyan Angelov 2019-09-16 17:29:06 +03:00 committed by Alexander Medvednikov
parent 3325775944
commit a45895a3af
5 changed files with 148 additions and 46 deletions

View File

@ -6,8 +6,8 @@ module main
import ( import (
os os
time
strings strings
benchmark
) )
const ( const (
@ -132,10 +132,6 @@ fn main() {
create_symlink() create_symlink()
return return
} }
if args.join(' ').contains(' test v') {
test_v()
return
}
if 'install' in args { if 'install' in args {
install_v(args) install_v(args)
return return
@ -156,6 +152,10 @@ fn main() {
} }
// Construct the V object from command line arguments // Construct the V object from command line arguments
mut v := new_v(args) mut v := new_v(args)
if args.join(' ').contains(' test v') {
v.test_v()
return
}
if v.pref.is_verbose { if v.pref.is_verbose {
println(args) println(args)
} }
@ -823,7 +823,7 @@ fn new_v(args[]string) &V {
is_script: is_script is_script: is_script
is_so: '-shared' in args is_so: '-shared' in args
is_prod: '-prod' in args is_prod: '-prod' in args
is_verbose: '-verbose' in args is_verbose: '-verbose' in args || '--verbose' in args
is_debuggable: '-g' in args is_debuggable: '-g' in args
is_debug: '-debug' in args || '-g' in args is_debug: '-debug' in args || '-g' in args
obfuscate: obfuscate obfuscate: obfuscate
@ -949,57 +949,77 @@ fn install_v(args[]string) {
} }
} }
fn test_v() { fn (v &V) test_v() {
args := env_vflags_and_os_args() args := env_vflags_and_os_args()
vexe := args[0] vexe := args[0]
// Emily: pass args from the invocation to the test // Emily: pass args from the invocation to the test
// e.g. `v -g -os msvc test v` -> `$vexe -g -os msvc $file` // e.g. `v -g -os msvc test v` -> `$vexe -g -os msvc $file`
mut joined_args := env_vflags_and_os_args().right(1).join(' ') mut joined_args := args.right(1).join(' ')
joined_args = joined_args.left(joined_args.last_index('test')) joined_args = joined_args.left(joined_args.last_index('test'))
println('$joined_args') // println('$joined_args')
mut failed := false mut failed := false
test_files := os.walk_ext('.', '_test.v') test_files := os.walk_ext('.', '_test.v')
println('Testing...')
mut tmark := benchmark.new_benchmark()
tmark.verbose = v.pref.is_verbose
for dot_relative_file in test_files { for dot_relative_file in test_files {
relative_file := dot_relative_file.replace('./', '') relative_file := dot_relative_file.replace('./', '')
file := os.realpath( relative_file ) file := os.realpath( relative_file )
tmpcfilepath := file.replace('_test.v', '_test.tmp.c') tmpcfilepath := file.replace('_test.v', '_test.tmp.c')
print(relative_file + ' ')
mut cmd := '"$vexe" $joined_args -debug "$file"' mut cmd := '"$vexe" $joined_args -debug "$file"'
if os.user_os() == 'windows' { cmd = '"$cmd"' } if os.user_os() == 'windows' { cmd = '"$cmd"' }
tmark.step()
r := os.exec(cmd) or { r := os.exec(cmd) or {
tmark.fail()
failed = true failed = true
println('FAIL') println(tmark.step_message('$relative_file FAIL'))
continue continue
} }
if r.exit_code != 0 { if r.exit_code != 0 {
println('FAIL `$file` (\n$r.output\n)')
failed = true failed = true
tmark.fail()
println(tmark.step_message('$relative_file FAIL \n`$file`\n (\n$r.output\n)'))
} else { } else {
println('OK') tmark.ok()
println(tmark.step_message('$relative_file OK'))
} }
os.rm( tmpcfilepath ) os.rm( tmpcfilepath )
} }
tmark.stop()
println( tmark.total_message('running V tests') )
println('\nBuilding examples...') println('\nBuilding examples...')
examples := os.walk_ext('examples', '.v') examples := os.walk_ext('examples', '.v')
mut bmark := benchmark.new_benchmark()
bmark.verbose = v.pref.is_verbose
for relative_file in examples { for relative_file in examples {
file := os.realpath( relative_file ) file := os.realpath( relative_file )
tmpcfilepath := file.replace('.v', '.tmp.c') tmpcfilepath := file.replace('.v', '.tmp.c')
print(relative_file + ' ')
mut cmd := '"$vexe" $joined_args -debug "$file"' mut cmd := '"$vexe" $joined_args -debug "$file"'
if os.user_os() == 'windows' { cmd = '"$cmd"' } if os.user_os() == 'windows' { cmd = '"$cmd"' }
bmark.step()
r := os.exec(cmd) or { r := os.exec(cmd) or {
failed = true failed = true
println('FAIL') bmark.fail()
println(bmark.step_message('$relative_file FAIL'))
continue continue
} }
if r.exit_code != 0 { if r.exit_code != 0 {
println('FAIL `$file` (\n$r.output\n)')
failed = true failed = true
bmark.fail()
println(bmark.step_message('$relative_file FAIL \n`$file`\n (\n$r.output\n)'))
} else { } else {
println('OK') bmark.ok()
println(bmark.step_message('$relative_file OK'))
} }
os.rm(tmpcfilepath) os.rm(tmpcfilepath)
} }
bmark.stop()
println( bmark.total_message('building examples') )
if failed { if failed {
exit(1) exit(1)
} }

View File

@ -1,5 +1,6 @@
import os import os
import compiler.tests.repl.runner import compiler.tests.repl.runner
import benchmark
fn test_the_v_compiler_can_be_invoked() { fn test_the_v_compiler_can_be_invoked() {
vexec := runner.full_path_to_v() vexec := runner.full_path_to_v()
@ -20,24 +21,20 @@ fn test_the_v_compiler_can_be_invoked() {
fn test_all_v_repl_files() { fn test_all_v_repl_files() {
options := runner.new_options() options := runner.new_options()
global_start_time := runner.now() mut bmark := benchmark.new_benchmark()
mut total_tests := 0
mut ok_tests := 0
mut failed_tests := 0
for file in options.files { for file in options.files {
total_tests++ bmark.step()
sticks := runner.now()
fres := runner.run_repl_file(options.wd, options.vexec, file) or { fres := runner.run_repl_file(options.wd, options.vexec, file) or {
failed_tests++ bmark.fail()
eprintln( bmark.step_message(err) )
assert false assert false
eprintln( runner.tdiff_in_ms(err, sticks) )
continue continue
} }
bmark.ok()
println( bmark.step_message(fres) )
assert true assert true
println( runner.tdiff_in_ms(fres, sticks) )
ok_tests++
} }
println( runner.tdiff_in_ms('<=== total time spent running REPL files', global_start_time) ) bmark.stop()
println( ' ok, failed, total : ${ok_tests:5d}, ${failed_tests:5d}, ${total_tests:5d}' ) println( bmark.total_message('total time spent running REPL files') )
} }

View File

@ -2,18 +2,23 @@ module main
import compiler.tests.repl.runner import compiler.tests.repl.runner
import log import log
import benchmark
fn main(){ fn main(){
logger := &log.Log{log.DEBUG, 'terminal'} logger := &log.Log{log.DEBUG, 'terminal'}
options := runner.new_options() options := runner.new_options()
global_start_time := runner.now()
mut bmark := benchmark.new_benchmark()
for file in options.files { for file in options.files {
stime := runner.now() bmark.step()
fres := runner.run_repl_file(options.wd, options.vexec, file) or { fres := runner.run_repl_file(options.wd, options.vexec, file) or {
logger.error( runner.tdiff_in_ms(err, stime) ) bmark.fail()
logger.error( bmark.step_message( err ) )
continue continue
} }
logger.info( runner.tdiff_in_ms(fres, stime) ) bmark.ok()
logger.info( bmark.step_message( fres ) )
} }
logger.info( runner.tdiff_in_ms('<=== total time spent running REPL files', global_start_time) ) bmark.stop()
logger.info( bmark.total_message('total time spent running REPL files') )
} }

View File

@ -1,7 +1,6 @@
module runner module runner
import os import os
import time
struct RunnerOptions { struct RunnerOptions {
pub: pub:
@ -71,13 +70,3 @@ pub fn new_options() RunnerOptions {
} }
} }
pub fn now() i64 {
return time.ticks()
}
pub fn tdiff_in_ms(s string, sticks i64) string {
eticks := time.ticks()
tdiff := (eticks - sticks)
return '${tdiff:6d} ms | $s'
}

View File

@ -0,0 +1,91 @@
module benchmark
import time
/*
Example usage of this module:
```
import benchmark
mut bmark := benchmark.new_benchmark()
// by default the benchmark will be verbose, i.e. it will include timing information
// if you want it to be silent, set bmark.verbose = false
for {
bmark.step() // call this when you want to advance the benchmark.
// The timing info in bmark.step_message will be measured starting from the last call to bmark.step
....
//bmark.fail() // call this if the step failed
//bmark.step_message(('failed')
bmark.ok() // call this when the step succeeded
println( bmark.step_message('ok')
}
bmark.stop() // call when you want to finalize the benchmark
println( bmark.total_message('remarks about the benchmark') )
```
*/
struct Benchmark{
pub mut:
bench_start_time i64
bench_end_time i64
step_start_time i64
step_end_time i64
ntotal int
nok int
nfail int
verbose bool
}
pub fn new_benchmark() Benchmark{
return Benchmark{
bench_start_time: benchmark.now()
verbose: true
}
}
pub fn now() i64 {
return time.ticks()
}
pub fn (b mut Benchmark) stop() {
b.bench_end_time = benchmark.now()
}
pub fn (b mut Benchmark) step() {
b.step_start_time = benchmark.now()
}
pub fn (b mut Benchmark) fail() {
b.step_end_time = benchmark.now()
b.ntotal++
b.nfail++
}
pub fn (b mut Benchmark) ok() {
b.step_end_time = benchmark.now()
b.ntotal++
b.nok++
}
pub fn (b mut Benchmark) step_message(msg string) string {
return b.tdiff_in_ms(msg, b.step_start_time, b.step_end_time)
}
pub fn (b mut Benchmark) total_message(msg string) string {
mut tmsg := '$msg : ok, fail, total = ${b.nok:5d}, ${b.nfail:5d}, ${b.ntotal:5d}'
if b.verbose {
tmsg = '<=== total time spent $tmsg'
}
return b.tdiff_in_ms(tmsg, b.bench_start_time, b.bench_end_time)
}
////////////////////////////////////////////////////////////////////
fn (b mut Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string {
if b.verbose {
tdiff := (eticks - sticks)
return '${tdiff:6d} ms | $s'
}
return s
}