mirror of
https://github.com/vlang/v.git
synced 2023-08-10 21:13:21 +03:00
vlib: transfer intro to readme; normalize comments
This commit is contained in:
39
vlib/benchmark/README.md
Normal file
39
vlib/benchmark/README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
Example usage of this module:
|
||||
```
|
||||
import benchmark
|
||||
mut bmark := benchmark.new_benchmark()
|
||||
// by default the benchmark will be verbose, i.e. it will include timing information
|
||||
// if you want it to be silent, set bmark.verbose = false
|
||||
for {
|
||||
bmark.step() // call this when you want to advance the benchmark.
|
||||
// The timing info in bmark.step_message will be measured starting from the last call to bmark.step
|
||||
....
|
||||
|
||||
//bmark.fail() // call this if the step failed
|
||||
//bmark.step_message(('failed')
|
||||
|
||||
bmark.ok() // call this when the step succeeded
|
||||
println( bmark.step_message('ok')
|
||||
}
|
||||
bmark.stop() // call when you want to finalize the benchmark
|
||||
println( bmark.total_message('remarks about the benchmark') )
|
||||
```
|
||||
|
||||
benchmark.start() and b.measure() are convenience methods,
|
||||
intended to be used in combination. Their goal is to make
|
||||
benchmarking of small snippets of code as *short*, easy to
|
||||
write, and then to read and analyze the results, as possible.
|
||||
Example:
|
||||
```v
|
||||
import benchmark
|
||||
b := benchmark.start()
|
||||
|
||||
// your code 1 ...
|
||||
b.measure('code_1')
|
||||
|
||||
// your code 2 ...
|
||||
b.measure('code_2')
|
||||
```
|
||||
... which will produce on stdout something like this:
|
||||
SPENT 17 ms in code_1
|
||||
SPENT 462 ms in code_2
|
@@ -2,48 +2,6 @@ module benchmark
|
||||
|
||||
import time
|
||||
import term
|
||||
/*
|
||||
Example usage of this module:
|
||||
```
|
||||
import benchmark
|
||||
mut bmark := benchmark.new_benchmark()
|
||||
// by default the benchmark will be verbose, i.e. it will include timing information
|
||||
// if you want it to be silent, set bmark.verbose = false
|
||||
for {
|
||||
bmark.step() // call this when you want to advance the benchmark.
|
||||
// The timing info in bmark.step_message will be measured starting from the last call to bmark.step
|
||||
....
|
||||
|
||||
//bmark.fail() // call this if the step failed
|
||||
//bmark.step_message(('failed')
|
||||
|
||||
bmark.ok() // call this when the step succeeded
|
||||
println( bmark.step_message('ok')
|
||||
}
|
||||
bmark.stop() // call when you want to finalize the benchmark
|
||||
println( bmark.total_message('remarks about the benchmark') )
|
||||
```
|
||||
|
||||
benchmark.start() and b.measure() are convenience methods,
|
||||
intended to be used in combination. Their goal is to make
|
||||
benchmarking of small snippets of code as *short*, easy to
|
||||
write, and then to read and analyze the results, as possible.
|
||||
Example:
|
||||
```v
|
||||
import benchmark
|
||||
b := benchmark.start()
|
||||
|
||||
// your code 1 ...
|
||||
b.measure('code_1')
|
||||
|
||||
// your code 2 ...
|
||||
b.measure('code_2')
|
||||
```
|
||||
... which will produce on stdout something like this:
|
||||
SPENT 17 ms in code_1
|
||||
SPENT 462 ms in code_2
|
||||
*/
|
||||
|
||||
|
||||
const (
|
||||
b_ok = term.ok_message('OK ')
|
||||
|
Reference in New Issue
Block a user