mirror of
https://github.com/vlang/v.git
synced 2023-08-10 21:13:21 +03:00
ac5241b5bd
* Draft implementation of `v -stats file_test.v` . * compiler: call stuff in vlib/benchmark/tests/always_imported.v, when doing `v -stats file_test.v` * Nicer looking output from 'v -stats file_test.v' . * Tweak colors and layout of -stats file_test.v . * Fix a hardcoded path in compiler/main.v . * Show colorized OK/FAIL for the examples in 'v test v' too. * Add some comments about the purpose of the methods inside vlib/benchmark/tests/always_imported.v . * when fails are 0, do not colorize their number at all.
115 lines
2.5 KiB
V
115 lines
2.5 KiB
V
module benchmark
|
|
|
|
import time
|
|
import term
|
|
|
|
/*
|
|
Example usage of this module:
|
|
```
|
|
import benchmark
|
|
mut bmark := benchmark.new_benchmark()
|
|
// by default the benchmark will be verbose, i.e. it will include timing information
|
|
// if you want it to be silent, set bmark.verbose = false
|
|
for {
|
|
bmark.step() // call this when you want to advance the benchmark.
|
|
// The timing info in bmark.step_message will be measured starting from the last call to bmark.step
|
|
....
|
|
|
|
//bmark.fail() // call this if the step failed
|
|
//bmark.step_message(('failed')
|
|
|
|
bmark.ok() // call this when the step succeeded
|
|
println( bmark.step_message('ok')
|
|
}
|
|
bmark.stop() // call when you want to finalize the benchmark
|
|
println( bmark.total_message('remarks about the benchmark') )
|
|
```
|
|
*/
|
|
|
|
struct Benchmark{
|
|
pub mut:
|
|
bench_start_time i64
|
|
bench_end_time i64
|
|
step_start_time i64
|
|
step_end_time i64
|
|
ntotal int
|
|
nok int
|
|
nfail int
|
|
verbose bool
|
|
}
|
|
|
|
pub fn new_benchmark() Benchmark{
|
|
return Benchmark{
|
|
bench_start_time: benchmark.now()
|
|
verbose: true
|
|
}
|
|
}
|
|
|
|
pub fn (b mut Benchmark) stop() {
|
|
b.bench_end_time = benchmark.now()
|
|
}
|
|
|
|
pub fn (b mut Benchmark) step() {
|
|
b.step_start_time = benchmark.now()
|
|
}
|
|
|
|
pub fn (b mut Benchmark) fail() {
|
|
b.step_end_time = benchmark.now()
|
|
b.ntotal++
|
|
b.nfail++
|
|
}
|
|
|
|
pub fn (b mut Benchmark) ok() {
|
|
b.step_end_time = benchmark.now()
|
|
b.ntotal++
|
|
b.nok++
|
|
}
|
|
|
|
pub fn (b mut Benchmark) fail_many(n int) {
|
|
b.step_end_time = benchmark.now()
|
|
b.ntotal+=n
|
|
b.nfail+=n
|
|
}
|
|
|
|
pub fn (b mut Benchmark) ok_many(n int) {
|
|
b.step_end_time = benchmark.now()
|
|
b.ntotal+=n
|
|
b.nok+=n
|
|
}
|
|
|
|
pub fn (b mut Benchmark) neither_fail_nor_ok() {
|
|
b.step_end_time = benchmark.now()
|
|
}
|
|
|
|
pub fn (b mut Benchmark) step_message(msg string) string {
|
|
return b.tdiff_in_ms(msg, b.step_start_time, b.step_end_time)
|
|
}
|
|
|
|
pub fn (b mut Benchmark) total_message(msg string) string {
|
|
mut tmsg := '$msg \n ok, fail, total = ' +
|
|
term.ok_message('${b.nok:5d}') + ', ' +
|
|
if b.nfail > 0 { term.fail_message('${b.nfail:5d}') } else { '${b.nfail:5d}' } + ', ' +
|
|
'${b.ntotal:5d}'
|
|
if b.verbose {
|
|
tmsg = '<=== total time spent $tmsg'
|
|
}
|
|
return b.tdiff_in_ms(tmsg, b.bench_start_time, b.bench_end_time)
|
|
}
|
|
|
|
pub fn (b mut Benchmark) total_duration() i64 {
|
|
return (b.bench_end_time - b.bench_start_time)
|
|
}
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
fn (b mut Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string {
|
|
if b.verbose {
|
|
tdiff := (eticks - sticks)
|
|
return '${tdiff:6d} ms | $s'
|
|
}
|
|
return s
|
|
}
|
|
|
|
fn now() i64 {
|
|
return time.ticks()
|
|
}
|