1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00

testing: refactor the v test implementation to make supporting different output modes easier

This commit is contained in:
Delyan Angelov 2022-12-02 13:45:33 +02:00
parent e419faf746
commit d09c8c914b
No known key found for this signature in database
GPG Key ID: 66886C0F12D595ED
8 changed files with 399 additions and 65 deletions

View File

@ -1,6 +1,7 @@
module testing
import os
import os.cmdline
import time
import term
import benchmark
@ -51,25 +52,13 @@ pub mut:
benchmark benchmark.Benchmark
rm_binaries bool = true
silent_mode bool
show_stats bool
progress_mode bool
root_relative bool // used by CI runs, so that the output is stable everywhere
nmessages chan LogMessage // many publishers, single consumer/printer
nmessage_idx int // currently printed message index
nprint_ended chan int // read to block till printing ends, 1:1
failed_cmds shared []string
}
enum MessageKind {
ok
fail
skip
info
sentinel
}
struct LogMessage {
message string
kind MessageKind
reporter Reporter = Reporter(NormalReporter{})
}
pub fn (mut ts TestSession) add_failed_cmd(cmd string) {
@ -79,20 +68,47 @@ pub fn (mut ts TestSession) add_failed_cmd(cmd string) {
}
pub fn (mut ts TestSession) show_list_of_failed_tests() {
for i, cmd in ts.failed_cmds {
eprintln(term.failed('Failed command ${i + 1}:') + ' ${cmd}')
rlock ts.failed_cmds {
ts.reporter.list_of_failed_commands(ts.failed_cmds)
}
}
pub fn (mut ts TestSession) append_message(kind MessageKind, msg string) {
struct MessageThreadContext {
mut:
file string
flow_id string
}
fn (mut ts TestSession) append_message(kind MessageKind, msg string, mtc MessageThreadContext) {
ts.nmessages <- LogMessage{
file: mtc.file
flow_id: mtc.flow_id
message: msg
kind: kind
when: time.now()
}
}
fn (mut ts TestSession) append_message_with_duration(kind MessageKind, msg string, d time.Duration, mtc MessageThreadContext) {
ts.nmessages <- LogMessage{
file: mtc.file
flow_id: mtc.flow_id
message: msg
kind: kind
when: time.now()
took: d
}
}
pub fn (mut ts TestSession) session_start(message string) {
ts.reporter.session_start(message, mut ts)
}
pub fn (mut ts TestSession) session_stop(message string) {
ts.reporter.session_stop(message, mut ts)
}
pub fn (mut ts TestSession) print_messages() {
empty := term.header(' ', ' ')
mut print_msg_time := time.new_stopwatch()
for {
// get a message from the channel of messages to be printed:
@ -100,14 +116,15 @@ pub fn (mut ts TestSession) print_messages() {
if rmessage.kind == .sentinel {
// a sentinel for stopping the printing thread
if !ts.silent_mode && ts.progress_mode {
eprintln('')
ts.reporter.report_stop()
}
ts.nprint_ended <- 0
return
}
if rmessage.kind != .info {
ts.nmessage_idx++
}
//
ts.reporter.report(ts.nmessage_idx, rmessage)
msg := rmessage.message.replace_each([
'TMP1',
'${ts.nmessage_idx:1d}',
@ -125,25 +142,21 @@ pub fn (mut ts TestSession) print_messages() {
// Even if OK tests are suppressed,
// show *at least* 1 result every 10 seconds,
// otherwise the CI can seem stuck ...
eprintln(msg)
ts.reporter.progress(ts.nmessage_idx, msg)
print_msg_time.restart()
continue
}
if ts.progress_mode {
// progress mode, the last line is rewritten many times:
if is_ok && !ts.silent_mode {
print('\r${empty}\r${msg}')
flush_stdout()
ts.reporter.update_last_line(ts.nmessage_idx, msg)
} else {
// the last \n is needed, so SKIP/FAIL messages
// will not get overwritten by the OK ones
eprint('\r${empty}\r${msg}\n')
ts.reporter.update_last_line_and_move_to_next(ts.nmessage_idx, msg)
}
continue
}
if !ts.silent_mode || !is_ok {
// normal expanded mode, or failures in -silent mode
eprintln(msg)
ts.reporter.message(ts.nmessage_idx, msg)
continue
}
}
@ -228,23 +241,49 @@ pub fn new_test_session(_vargs string, will_compile bool) TestSession {
skip_files << 'examples/macos_tray/tray.v'
}
}
vargs := _vargs.replace('-progress', '').replace('-progress', '')
vargs := _vargs.replace('-progress', '')
vexe := pref.vexe_path()
vroot := os.dir(vexe)
new_vtmp_dir := setup_new_vtmp_folder()
if term.can_show_color_on_stderr() {
os.setenv('VCOLORS', 'always', true)
}
return TestSession{
mut ts := TestSession{
vexe: vexe
vroot: vroot
skip_files: skip_files
fail_fast: testing.fail_fast
show_stats: '-stats' in vargs.split(' ')
vargs: vargs
vtmp_dir: new_vtmp_dir
silent_mode: _vargs.contains('-silent')
progress_mode: _vargs.contains('-progress')
}
ts.handle_test_runner_option()
return ts
}
fn (mut ts TestSession) handle_test_runner_option() {
test_runner := cmdline.option(os.args, '-test-runner', 'normal')
if test_runner !in pref.supported_test_runners {
eprintln('v test: `-test-runner ${test_runner}` is not using one of the supported test runners: ${pref.supported_test_runners_list()}')
}
test_runner_implementation_file := os.join_path(ts.vroot, 'cmd/tools/modules/testing/output_${test_runner}.v')
if !os.exists(test_runner_implementation_file) {
eprintln('v test: using `-test-runner ${test_runner}` needs ${test_runner_implementation_file} to exist, and contain a valid testing.Reporter implementation for that runner. See `cmd/tools/modules/testing/output_dump.v` for an example.')
exit(1)
}
match test_runner {
'normal' {
// default, nothing to do
}
'dump' {
ts.reporter = DumpReporter{}
}
else {
dump('just set ts.reporter to an instance of your own struct here')
}
}
}
pub fn (mut ts TestSession) init() {
@ -294,17 +333,20 @@ pub fn (mut ts TestSession) test() {
}
ts.benchmark.njobs = njobs
mut pool_of_test_runners := pool.new_pool_processor(callback: worker_trunner)
// for handling messages across threads
// ensure that the nmessages queue/channel, has enough capacity for handling many messages across threads, without blocking
ts.nmessages = chan LogMessage{cap: 10000}
ts.nprint_ended = chan int{cap: 0}
ts.nmessage_idx = 0
spawn ts.print_messages()
printing_thread := spawn ts.print_messages()
pool_of_test_runners.set_shared_context(ts)
ts.reporter.worker_threads_start(remaining_files, mut ts)
// all the testing happens here:
pool_of_test_runners.work_on_pointers(unsafe { remaining_files.pointers() })
//
ts.benchmark.stop()
ts.append_message(.sentinel, '') // send the sentinel
_ := <-ts.nprint_ended // wait for the stop of the printing thread
eprintln(term.h_divider('-'))
ts.append_message(.sentinel, '', MessageThreadContext{ flow_id: sync.thread_id().str() }) // send the sentinel
printing_thread.wait()
ts.reporter.worker_threads_finish(mut ts)
ts.reporter.divider()
ts.show_list_of_failed_tests()
// cleanup generated .tmp.c files after successful tests:
if ts.benchmark.nfail == 0 {
@ -326,7 +368,6 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
}
}
tmpd := ts.vtmp_dir
show_stats := '-stats' in ts.vargs.split(' ')
// tls_bench is used to format the step messages/timings
mut tls_bench := &benchmark.Benchmark(p.get_thread_context(idx))
if isnil(tls_bench) {
@ -358,6 +399,10 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
relative_file = relative_file.replace(ts.vroot + os.path_separator, '')
}
file := os.real_path(relative_file)
mtc := MessageThreadContext{
file: file
flow_id: thread_id.str()
}
normalised_relative_file := relative_file.replace('\\', '/')
// Ensure that the generated binaries will be stored in the temporary folder.
// Remove them after a test passes/fails.
@ -384,18 +429,20 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
ts.benchmark.skip()
tls_bench.skip()
if !testing.hide_skips {
ts.append_message(.skip, tls_bench.step_message_skip(normalised_relative_file))
ts.append_message(.skip, tls_bench.step_message_skip(normalised_relative_file),
mtc)
}
return pool.no_result
}
if show_stats {
ts.append_message(.ok, term.h_divider('-'))
if ts.show_stats {
ts.reporter.divider()
mut status := os.system(cmd)
if status != 0 {
details := get_test_details(file)
os.setenv('VTEST_RETRY_MAX', '${details.retry}', true)
for retry := 1; retry <= details.retry; retry++ {
ts.append_message(.info, ' [stats] retrying ${retry}/${details.retry} of ${relative_file} ; known flaky: ${details.flaky} ...')
ts.append_message(.info, ' [stats] retrying ${retry}/${details.retry} of ${relative_file} ; known flaky: ${details.flaky} ...',
mtc)
os.setenv('VTEST_RETRY', '${retry}', true)
status = os.system(cmd)
if status == 0 {
@ -406,7 +453,8 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
time.sleep(500 * time.millisecond)
}
if details.flaky && !testing.fail_flaky {
ts.append_message(.info, ' *FAILURE* of the known flaky test file ${relative_file} is ignored, since VTEST_FAIL_FLAKY is 0 . Retry count: ${details.retry} .')
ts.append_message(.info, ' *FAILURE* of the known flaky test file ${relative_file} is ignored, since VTEST_FAIL_FLAKY is 0 . Retry count: ${details.retry} .',
mtc)
unsafe {
goto test_passed_system
}
@ -422,13 +470,17 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
}
} else {
if testing.show_start {
ts.append_message(.info, ' starting ${relative_file} ...')
ts.append_message(.info, ' starting ${relative_file} ...',
mtc)
}
d_cmd := time.new_stopwatch()
mut r := os.execute(cmd)
cmd_duration := d_cmd.elapsed()
if r.exit_code < 0 {
ts.benchmark.fail()
tls_bench.fail()
ts.append_message(.fail, tls_bench.step_message_fail(normalised_relative_file))
ts.append_message_with_duration(.fail, tls_bench.step_message_fail(normalised_relative_file),
cmd_duration, mtc)
ts.add_failed_cmd(cmd)
return pool.no_result
}
@ -436,7 +488,8 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
details := get_test_details(file)
os.setenv('VTEST_RETRY_MAX', '${details.retry}', true)
for retry := 1; retry <= details.retry; retry++ {
ts.append_message(.info, ' retrying ${retry}/${details.retry} of ${relative_file} ; known flaky: ${details.flaky} ...')
ts.append_message(.info, ' retrying ${retry}/${details.retry} of ${relative_file} ; known flaky: ${details.flaky} ...',
mtc)
os.setenv('VTEST_RETRY', '${retry}', true)
r = os.execute(cmd)
if r.exit_code == 0 {
@ -446,7 +499,8 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
}
}
if details.flaky && !testing.fail_flaky {
ts.append_message(.info, ' *FAILURE* of the known flaky test file ${relative_file} is ignored, since VTEST_FAIL_FLAKY is 0 . Retry count: ${details.retry} .')
ts.append_message(.info, ' *FAILURE* of the known flaky test file ${relative_file} is ignored, since VTEST_FAIL_FLAKY is 0 . Retry count: ${details.retry} .',
mtc)
unsafe {
goto test_passed_execute
}
@ -454,14 +508,16 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
ts.benchmark.fail()
tls_bench.fail()
ending_newline := if r.output.ends_with('\n') { '\n' } else { '' }
ts.append_message(.fail, tls_bench.step_message_fail('${normalised_relative_file}\n${r.output.trim_space()}${ending_newline}'))
ts.append_message_with_duration(.fail, tls_bench.step_message_fail('${normalised_relative_file}\n${r.output.trim_space()}${ending_newline}'),
cmd_duration, mtc)
ts.add_failed_cmd(cmd)
} else {
test_passed_execute:
ts.benchmark.ok()
tls_bench.ok()
if !testing.hide_oks {
ts.append_message(.ok, tls_bench.step_message_ok(normalised_relative_file))
ts.append_message_with_duration(.ok, tls_bench.step_message_ok(normalised_relative_file),
cmd_duration, mtc)
}
}
}
@ -586,15 +642,6 @@ pub fn building_any_v_binaries_failed() bool {
return failed
}
pub fn eheader(msg string) {
eprintln(term.header_left(msg, '-'))
}
pub fn header(msg string) {
println(term.header_left(msg, '-'))
flush_stdout()
}
// setup_new_vtmp_folder creates a new nested folder inside VTMP, then resets VTMP to it,
// so that V programs/tests will write their temporary files to new location.
// The new nested folder, and its contents, will get removed after all tests/programs succeed.
@ -634,3 +681,12 @@ pub fn find_started_process(pname string) ?string {
}
return error('could not find process matching ${pname}')
}
pub fn eheader(msg string) {
eprintln(term.header_left(msg, '-'))
}
pub fn header(msg string) {
println(term.header_left(msg, '-'))
flush_stdout()
}

View File

@ -0,0 +1,40 @@
module testing
import time
pub enum MessageKind {
ok
fail
skip
info
sentinel
}
pub struct LogMessage {
pub:
flow_id string // the messages of each thread, producing LogMessage, will have all the same unique flowid. Messages by other threads will have other flowid. If you use VJOBS=1 to serialise the execution, then all messages will have the same flowid.
file string // the _test.v file that the message is about
message string // the actual message text; the result of the event, that the message describes; most reporters could ignore this, since it could be reconstructed by the other fields
kind MessageKind // see the MessageKind declaration
when time.Time // when was the message sent (messages are sent by the execution threads at the *end* of each event)
took time.Duration // the duration of the event, that this message describes
}
pub interface Reporter {
mut:
session_start(message string, mut ts TestSession) // called once per test session, in the main thread, suitable for setting up supporting infrastructure.
session_stop(message string, mut ts TestSession) // called once per test session, in the main thread, after everything else, suitable for summaries, creating .xml reports, uploads etc.
worker_threads_start(files []string, mut ts TestSession) // called once per test session, in the main thread, right before all the worker threads start
worker_threads_finish(mut ts TestSession) // called once per test session, in the main thread, right after all the worker threads finish
//
report(index int, log_msg LogMessage) // called once per each message, that will be shown (ok/fail/skip etc), only in the reporting thread.
report_stop() // called just once after all messages are processed, only in the reporting thread, but before stop_session.
//
// TODO: reconsider, whether the next methods, should be kept for all reporters, or just moved inside the normal reporter, to simplify the interface
progress(index int, message string)
update_last_line(index int, message string)
update_last_line_and_move_to_next(index int, message string)
message(index int, message string)
divider() // called to show a long ---------- horizontal line; can be safely ignored in most reporters; used in the main thread.
list_of_failed_commands(cmds []string) // called after all testing is done, to produce a small summary that only lists the failed commands, so that they can be retried manually if needed, without forcing the user to scroll and find them.
}

View File

@ -0,0 +1,64 @@
module testing
// DumpReporter implements the interface testing.Reporter.
// It is used by `v -test-runner dump test .`
pub struct DumpReporter {
mut:
files []string
}
//
pub fn (mut r DumpReporter) worker_threads_start(files []string, mut ts TestSession) {
eprintln('> ${@METHOD} | files: ${files}')
r.files = files
}
pub fn (r DumpReporter) worker_threads_finish(mut ts TestSession) {
eprintln('> ${@METHOD}')
}
//
pub fn (r DumpReporter) session_start(message string, mut ts TestSession) {
eprintln('> ${@METHOD} | message: ${message}')
// dump(ts)
}
pub fn (r DumpReporter) session_stop(message string, mut ts TestSession) {
eprintln('> ${@METHOD} | message: ${message}')
}
//
pub fn (r DumpReporter) report(index int, message LogMessage) {
eprintln('> ${@METHOD} | index: ${index} | message: ${message}')
}
pub fn (r DumpReporter) report_stop() {
eprintln('> ${@METHOD}')
}
pub fn (r DumpReporter) progress(index int, message string) {
eprintln('> ${@METHOD} | index: ${index} | message: ${message}')
}
pub fn (r DumpReporter) update_last_line(index int, message string) {
eprintln('> ${@METHOD} | index: ${index} | message: ${message}')
}
pub fn (r DumpReporter) update_last_line_and_move_to_next(index int, message string) {
eprintln('> ${@METHOD} | index: ${index} | message: ${message}')
}
pub fn (r DumpReporter) message(index int, message string) {
eprintln('> ${@METHOD} | index: ${index} | message: ${message}')
}
pub fn (r DumpReporter) divider() {
eprintln('> ${@METHOD}')
}
pub fn (r DumpReporter) list_of_failed_commands(failed_cmds []string) {
eprintln('> ${@METHOD} | failed_cmds: ${failed_cmds}')
}

View File

@ -0,0 +1,73 @@
module testing
import term
pub const empty = term.header(' ', ' ')
// NormalReporter implements the interface testing.Reporter.
// It is used by default by `v test .`
// It was extracted by the original non customiseable output implementation directly in cmd/tools/modules/testing/common.v
pub struct NormalReporter {
}
pub fn (r NormalReporter) session_start(message string, mut ts TestSession) {
header(message)
}
pub fn (r NormalReporter) session_stop(message string, mut ts TestSession) {
println(ts.benchmark.total_message(message))
}
// the most general form; it may be useful for other reporters
// in the normal one, it currently does nothing
pub fn (r NormalReporter) report(index int, message LogMessage) {
// eprintln('> ${@METHOD} index: $index | message: $message')
}
pub fn (r NormalReporter) report_stop() {
// eprintln('> ${@METHOD}')
eprintln('')
}
//// TODO: reconsider if these should be public:
pub fn (r NormalReporter) progress(index int, message string) {
eprintln(message)
}
// in progress mode, the last line will be rewritten many times, and does not end with \n
// the \n will be printed just once when some progress has been made.
pub fn (r NormalReporter) update_last_line(index int, message string) {
print('\r${testing.empty}\r${message}')
flush_stdout()
}
pub fn (r NormalReporter) update_last_line_and_move_to_next(index int, message string) {
// the last \n is needed, so SKIP/FAIL messages
// will not get overwritten by the OK ones
eprint('\r${testing.empty}\r${message}\n')
}
pub fn (r NormalReporter) message(index int, message string) {
eprintln(message)
}
pub fn (r NormalReporter) divider() {
eprintln(term.h_divider('-'))
}
//
pub fn (r NormalReporter) worker_threads_start(files []string, mut ts TestSession) {
// eprintln('> ${@METHOD}')
}
pub fn (r NormalReporter) worker_threads_finish(mut ts TestSession) {
// eprintln('> ${@METHOD}')
}
pub fn (r NormalReporter) list_of_failed_commands(failed_cmds []string) {
for i, cmd in failed_cmds {
eprintln(term.failed('Failed command ${i + 1}:') + ' ${cmd}')
}
}

View File

@ -66,9 +66,9 @@ fn main() {
exit(1)
}
}
testing.header('Testing...')
ts.session_start('Testing...')
ts.test()
println(ts.benchmark.total_message('all V _test.v files'))
ts.session_stop('all V _test.v files')
if ts.failed_cmds.len > 0 {
exit(1)
}

View File

@ -264,7 +264,7 @@ pub fn (v &Builder) get_user_files() []string {
}
if !os.is_file(v_test_runner_prelude) || !os.is_readable(v_test_runner_prelude) {
eprintln('test runner error: File ${v_test_runner_prelude} should be readable.')
verror('supported test runners are: tap, json, simple, normal')
verror('the supported test runners are: ${pref.supported_test_runners_list()}')
}
user_files << v_test_runner_prelude
}

View File

@ -87,10 +87,10 @@ pub enum Arch {
_max
}
const (
list_of_flags_with_param = ['o', 'd', 'define', 'b', 'backend', 'cc', 'os', 'cf', 'cflags',
pub const list_of_flags_with_param = ['o', 'd', 'define', 'b', 'backend', 'cc', 'os', 'cf', 'cflags',
'path', 'arch']
)
pub const supported_test_runners = ['normal', 'simple', 'tap', 'dump']
[heap; minify]
pub struct Preferences {
@ -1062,3 +1062,7 @@ fn (mut prefs Preferences) diagnose_deprecated_defines(define_parts []string) {
eprintln('`-d no_bounds_checking` was deprecated in 2022/10/30. Use `-no-bounds-checking` instead.')
}
}
pub fn supported_test_runners_list() string {
return pref.supported_test_runners.map('`${it}`').join(', ')
}

View File

@ -0,0 +1,97 @@
module main
// This prelude file implements the Dump output producer for V tests.
// It is not useful by its own, but can verify that -test-runner works
// as expected. Use it with:
// `VTEST_RUNNER=dump v run file_test.v`
// or
// `v -test-runner dump run file_test.v`
fn vtest_init() {
change_test_runner(&TestRunner(DumpTestRunner{}))
}
struct DumpTestRunner {
mut:
fname string
plan_tests int
test_counter int
//
file_test_info VTestFileMetaInfo
fn_test_info VTestFnMetaInfo
fn_assert_passes u64
fn_passes u64
fn_fails u64
//
total_assert_passes u64
total_assert_fails u64
}
fn (mut runner DumpTestRunner) free() {
unsafe {
runner.fname.free()
runner.fn_test_info.free()
runner.file_test_info.free()
}
}
fn (mut runner DumpTestRunner) start(ntests int) {
runner.plan_tests = ntests
eprintln('> ${@METHOD} | ntests: ${ntests}')
}
fn (mut runner DumpTestRunner) finish() {
eprintln('> ${@METHOD} , ${runner.plan_tests} tests, ${runner.total_assert_fails +
runner.total_assert_passes} assertions, ${runner.total_assert_fails} failures')
}
fn (mut runner DumpTestRunner) exit_code() int {
eprintln('> ${@METHOD}')
if runner.fn_fails > 0 {
return 1
}
if runner.total_assert_fails > 0 {
return 2
}
return 0
}
//
fn (mut runner DumpTestRunner) fn_start() bool {
eprintln('> ${@METHOD}')
runner.fn_assert_passes = 0
runner.test_counter++
runner.fname = runner.fn_test_info.name
return true
}
fn (mut runner DumpTestRunner) fn_pass() {
runner.fn_passes++
eprintln('> ${@METHOD} OK ${runner.test_counter} - ${runner.fname}')
}
fn (mut runner DumpTestRunner) fn_fail() {
eprintln('> ${@METHOD} NOT OK ${runner.test_counter} - ${runner.fname}')
runner.fn_fails++
}
fn (mut runner DumpTestRunner) fn_error(line_nr int, file string, mod string, fn_name string, errmsg string) {
eprintln('> ${@METHOD} test function propagated error: ${runner.fname}, line_nr: ${line_nr}, file: ${file}, mod: ${mod}, fn_name: ${fn_name}, errmsg: ${errmsg}')
}
//
fn (mut runner DumpTestRunner) assert_pass(i &VAssertMetaInfo) {
eprintln('> ${@METHOD} ASSERT PASS')
runner.total_assert_passes++
runner.fn_assert_passes++
unsafe { i.free() }
}
fn (mut runner DumpTestRunner) assert_fail(i &VAssertMetaInfo) {
runner.total_assert_fails++
eprintln('> ${@METHOD} ASSERT FAIL: ${runner.fn_assert_passes + 1} in ${runner.fname}, assert was in ${i.fn_name}, line: ${
i.line_nr + 1}')
unsafe { i.free() }
}