mirror of
https://github.com/vlang/v.git
synced 2023-08-10 21:13:21 +03:00
tests: parallelize compiler_errors_test.v using channels & threads
This commit is contained in:
parent
34d03801de
commit
55b8cc1bb2
@ -3,7 +3,7 @@ module benchmark
|
|||||||
import time
|
import time
|
||||||
import term
|
import term
|
||||||
|
|
||||||
const (
|
pub const (
|
||||||
b_ok = term.ok_message('OK ')
|
b_ok = term.ok_message('OK ')
|
||||||
b_fail = term.fail_message('FAIL')
|
b_fail = term.fail_message('FAIL')
|
||||||
b_skip = term.warn_message('SKIP')
|
b_skip = term.warn_message('SKIP')
|
||||||
@ -111,8 +111,8 @@ pub fn (mut b Benchmark) measure(label string) i64 {
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (b &Benchmark) step_message_with_label(label string, msg string) string {
|
pub fn (b &Benchmark) step_message_with_label_and_duration(label string, msg string, sduration time.Duration) string {
|
||||||
timed_line := b.tdiff_in_ms(msg, b.step_timer.elapsed().microseconds())
|
timed_line := b.tdiff_in_ms(msg, sduration.microseconds())
|
||||||
if b.nexpected_steps > 1 {
|
if b.nexpected_steps > 1 {
|
||||||
mut sprogress := ''
|
mut sprogress := ''
|
||||||
if b.nexpected_steps < 10 {
|
if b.nexpected_steps < 10 {
|
||||||
@ -137,6 +137,10 @@ pub fn (b &Benchmark) step_message_with_label(label string, msg string) string {
|
|||||||
return '${label:-5s}${timed_line}'
|
return '${label:-5s}${timed_line}'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn (b &Benchmark) step_message_with_label(label string, msg string) string {
|
||||||
|
return b.step_message_with_label_and_duration(label, msg, b.step_timer.elapsed())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn (b &Benchmark) step_message(msg string) string {
|
pub fn (b &Benchmark) step_message(msg string) string {
|
||||||
return b.step_message_with_label('', msg)
|
return b.step_message_with_label('', msg)
|
||||||
}
|
}
|
||||||
|
@ -2,9 +2,25 @@ import os
|
|||||||
import term
|
import term
|
||||||
import v.util
|
import v.util
|
||||||
import v.util.vtest
|
import v.util.vtest
|
||||||
|
import time
|
||||||
|
import sync
|
||||||
|
import runtime
|
||||||
|
import benchmark
|
||||||
|
|
||||||
|
struct TaskDescription {
|
||||||
|
vexe string
|
||||||
|
dir string
|
||||||
|
voptions string
|
||||||
|
result_extension string
|
||||||
|
path string
|
||||||
|
mut:
|
||||||
|
is_error bool
|
||||||
|
expected string
|
||||||
|
found___ string
|
||||||
|
took time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
fn test_all() {
|
fn test_all() {
|
||||||
mut total_errors := 0
|
|
||||||
vexe := os.getenv('VEXE')
|
vexe := os.getenv('VEXE')
|
||||||
vroot := os.dir(vexe)
|
vroot := os.dir(vexe)
|
||||||
os.chdir(vroot)
|
os.chdir(vroot)
|
||||||
@ -17,61 +33,116 @@ fn test_all() {
|
|||||||
parser_dir := 'vlib/v/parser/tests'
|
parser_dir := 'vlib/v/parser/tests'
|
||||||
parser_tests := get_tests_in_dir(parser_dir)
|
parser_tests := get_tests_in_dir(parser_dir)
|
||||||
// -prod so that warns are errors
|
// -prod so that warns are errors
|
||||||
total_errors += check_path(vexe, classic_dir, '-prod', '.out', classic_tests)
|
mut tasks := []TaskDescription{}
|
||||||
total_errors += check_path(vexe, global_dir, '--enable-globals', '.out', global_tests)
|
tasks << new_tasks(vexe, classic_dir, '-prod', '.out', classic_tests)
|
||||||
total_errors += check_path(vexe, classic_dir, '--enable-globals run', '.run.out',
|
tasks << new_tasks(vexe, global_dir, '--enable-globals', '.out', global_tests)
|
||||||
['globals_error.vv'])
|
tasks <<
|
||||||
total_errors += check_path(vexe, run_dir, 'run', '.run.out', run_tests)
|
new_tasks(vexe, classic_dir, '--enable-globals run', '.run.out', ['globals_error.vv'])
|
||||||
total_errors += check_path(vexe, parser_dir, '-prod', '.out', parser_tests)
|
tasks << new_tasks(vexe, run_dir, 'run', '.run.out', run_tests)
|
||||||
|
tasks << new_tasks(vexe, parser_dir, '-prod', '.out', parser_tests)
|
||||||
|
tasks.run()
|
||||||
|
total_errors := tasks.filter(it.is_error).len
|
||||||
assert total_errors == 0
|
assert total_errors == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_tests_in_dir(dir string) []string {
|
fn new_tasks(vexe, dir, voptions, result_extension string, tests []string) []TaskDescription {
|
||||||
files := os.ls(dir) or {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
mut tests := files.filter(it.ends_with('.vv'))
|
|
||||||
tests.sort()
|
|
||||||
return tests
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_path(vexe, dir, voptions, result_extension string, tests []string) int {
|
|
||||||
mut nb_fail := 0
|
|
||||||
paths := vtest.filter_vtest_only(tests, {
|
paths := vtest.filter_vtest_only(tests, {
|
||||||
basepath: dir
|
basepath: dir
|
||||||
})
|
})
|
||||||
|
mut res := []TaskDescription{}
|
||||||
for path in paths {
|
for path in paths {
|
||||||
program := path.replace('.vv', '.v')
|
res << TaskDescription{
|
||||||
print(path + ' ')
|
vexe: vexe
|
||||||
os.cp(path, program) or {
|
dir: dir
|
||||||
panic(err)
|
voptions: voptions
|
||||||
|
result_extension: result_extension
|
||||||
|
path: path
|
||||||
}
|
}
|
||||||
res := os.exec('$vexe $voptions $program') or {
|
|
||||||
panic(err)
|
|
||||||
}
|
}
|
||||||
mut expected := os.read_file(program.replace('.v', '') + result_extension) or {
|
return res
|
||||||
panic(err)
|
|
||||||
}
|
}
|
||||||
expected = clean_line_endings(expected)
|
|
||||||
found := clean_line_endings(res.output)
|
// process an array of tasks in parallel, using no more than vjobs worker threads
|
||||||
if expected != found {
|
fn (mut tasks []TaskDescription) run() {
|
||||||
println(term.red('FAIL'))
|
vjobs := runtime.nr_jobs()
|
||||||
|
mut bench := benchmark.new_benchmark()
|
||||||
|
bench.set_total_expected_steps(tasks.len)
|
||||||
|
// TODO: close work channel instead of using sentinel items
|
||||||
|
task_sentinel := TaskDescription{
|
||||||
|
path: ''
|
||||||
|
}
|
||||||
|
mut work := sync.new_channel<TaskDescription>(tasks.len + vjobs)
|
||||||
|
mut results := sync.new_channel<TaskDescription>(tasks.len)
|
||||||
|
for i in 0 .. tasks.len {
|
||||||
|
work.push(&tasks[i])
|
||||||
|
}
|
||||||
|
for _ in 0 .. vjobs {
|
||||||
|
work.push(&task_sentinel)
|
||||||
|
go work_processor(mut work, mut results)
|
||||||
|
}
|
||||||
|
for _ in 0 .. tasks.len {
|
||||||
|
mut task := TaskDescription{}
|
||||||
|
results.pop(&task)
|
||||||
|
bench.step()
|
||||||
|
if task.is_error {
|
||||||
|
bench.fail()
|
||||||
|
eprintln(bench.step_message_with_label_and_duration(benchmark.b_fail, task.path,
|
||||||
|
task.took))
|
||||||
println('============')
|
println('============')
|
||||||
println('expected:')
|
println('expected:')
|
||||||
println(expected)
|
println(task.expected)
|
||||||
println('============')
|
println('============')
|
||||||
println('found:')
|
println('found:')
|
||||||
println(found)
|
println(task.found___)
|
||||||
println('============\n')
|
println('============\n')
|
||||||
diff_content(expected, found)
|
diff_content(task.expected, task.found___)
|
||||||
nb_fail++
|
} else {
|
||||||
|
bench.ok()
|
||||||
|
eprintln(bench.step_message_with_label_and_duration(benchmark.b_ok, task.path,
|
||||||
|
task.took))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bench.stop()
|
||||||
|
eprintln(term.h_divider('-'))
|
||||||
|
eprintln(bench.total_message('all tests'))
|
||||||
|
}
|
||||||
|
|
||||||
|
// a single worker thread spends its time getting work from the `work` channel,
|
||||||
|
// processing the task, and then putting the task in the `results` channel
|
||||||
|
fn work_processor(mut work sync.Channel, mut results sync.Channel) {
|
||||||
|
for {
|
||||||
|
mut task := TaskDescription{}
|
||||||
|
work.pop(&task)
|
||||||
|
if task.path == '' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sw := time.new_stopwatch({})
|
||||||
|
task.execute()
|
||||||
|
task.took = sw.elapsed()
|
||||||
|
results.push(&task)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// actual processing; NB: no output is done here at all
|
||||||
|
fn (mut task TaskDescription) execute() {
|
||||||
|
program := task.path.replace('.vv', '.v')
|
||||||
|
os.cp(task.path, program) or {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
res := os.exec('$task.vexe $task.voptions $program') or {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
mut expected := os.read_file(program.replace('.v', '') + task.result_extension) or {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
task.expected = clean_line_endings(expected)
|
||||||
|
task.found___ = clean_line_endings(res.output)
|
||||||
|
if task.expected != task.found___ {
|
||||||
|
task.is_error = true
|
||||||
} else {
|
} else {
|
||||||
println(term.green('OK'))
|
|
||||||
os.rm(program)
|
os.rm(program)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nb_fail
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clean_line_endings(s string) string {
|
fn clean_line_endings(s string) string {
|
||||||
mut res := s.trim_space()
|
mut res := s.trim_space()
|
||||||
@ -90,3 +161,12 @@ fn diff_content(s1, s2 string) {
|
|||||||
println(util.color_compare_strings(diff_cmd, s1, s2))
|
println(util.color_compare_strings(diff_cmd, s1, s2))
|
||||||
println('============\n')
|
println('============\n')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_tests_in_dir(dir string) []string {
|
||||||
|
files := os.ls(dir) or {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
mut tests := files.filter(it.ends_with('.vv'))
|
||||||
|
tests.sort()
|
||||||
|
return tests
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user