mirror of
https://github.com/vlang/v.git
synced 2023-08-10 21:13:21 +03:00
sync: move sync.atomic2 to sync.stdatomic, cleanup
This commit is contained in:
71
vlib/sync/stdatomic/1.declarations.c.v
Normal file
71
vlib/sync/stdatomic/1.declarations.c.v
Normal file
@@ -0,0 +1,71 @@
|
||||
module stdatomic
|
||||
|
||||
// The @VEXEROOT/thirdparty/stdatomic contains compatibility headers
|
||||
// for stdatomic, that supports both nix, windows and c++.
|
||||
|
||||
$if windows {
|
||||
#flag -I @VEXEROOT/thirdparty/stdatomic/win
|
||||
} $else {
|
||||
#flag -I @VEXEROOT/thirdparty/stdatomic/nix
|
||||
}
|
||||
|
||||
$if linux {
|
||||
$if tinyc {
|
||||
$if amd64 {
|
||||
// most Linux distributions have /usr/lib/libatomic.so,
|
||||
// but Ubuntu uses gcc version specific dir
|
||||
#flag -L/usr/lib/gcc/x86_64-linux-gnu/6
|
||||
#flag -L/usr/lib/gcc/x86_64-linux-gnu/7
|
||||
#flag -L/usr/lib/gcc/x86_64-linux-gnu/8
|
||||
#flag -L/usr/lib/gcc/x86_64-linux-gnu/9
|
||||
#flag -L/usr/lib/gcc/x86_64-linux-gnu/10
|
||||
#flag -L/usr/lib/gcc/x86_64-linux-gnu/11
|
||||
#flag -L/usr/lib/gcc/x86_64-linux-gnu/12
|
||||
} $else $if arm64 {
|
||||
#flag -L/usr/lib/gcc/aarch64-linux-gnu/6
|
||||
#flag -L/usr/lib/gcc/aarch64-linux-gnu/7
|
||||
#flag -L/usr/lib/gcc/aarch64-linux-gnu/8
|
||||
#flag -L/usr/lib/gcc/aarch64-linux-gnu/9
|
||||
#flag -L/usr/lib/gcc/aarch64-linux-gnu/10
|
||||
#flag -L/usr/lib/gcc/aarch64-linux-gnu/11
|
||||
#flag -L/usr/lib/gcc/aarch64-linux-gnu/12
|
||||
}
|
||||
#flag -latomic
|
||||
}
|
||||
}
|
||||
|
||||
#include <atomic.h>
|
||||
// The following functions are actually generic in C
|
||||
fn C.atomic_load_ptr(voidptr) voidptr
|
||||
fn C.atomic_store_ptr(voidptr, voidptr)
|
||||
fn C.atomic_compare_exchange_weak_ptr(voidptr, voidptr, voidptr) bool
|
||||
fn C.atomic_compare_exchange_strong_ptr(voidptr, voidptr, voidptr) bool
|
||||
fn C.atomic_exchange_ptr(voidptr, voidptr) voidptr
|
||||
fn C.atomic_fetch_add_ptr(voidptr, voidptr) voidptr
|
||||
fn C.atomic_fetch_sub_ptr(voidptr, voidptr) voidptr
|
||||
|
||||
fn C.atomic_load_u16(voidptr) u16
|
||||
fn C.atomic_store_u16(voidptr, u16)
|
||||
fn C.atomic_compare_exchange_weak_u16(voidptr, voidptr, u16) bool
|
||||
fn C.atomic_compare_exchange_strong_u16(voidptr, voidptr, u16) bool
|
||||
fn C.atomic_exchange_u16(voidptr, u16) u16
|
||||
fn C.atomic_fetch_add_u16(voidptr, u16) u16
|
||||
fn C.atomic_fetch_sub_u16(voidptr, u16) u16
|
||||
|
||||
fn C.atomic_load_u32(voidptr) u32
|
||||
fn C.atomic_store_u32(voidptr, u32)
|
||||
fn C.atomic_compare_exchange_weak_u32(voidptr, voidptr, u32) bool
|
||||
fn C.atomic_compare_exchange_strong_u32(voidptr, voidptr, u32) bool
|
||||
fn C.atomic_exchange_u32(voidptr, u32) u32
|
||||
fn C.atomic_fetch_add_u32(voidptr, u32) u32
|
||||
fn C.atomic_fetch_sub_u32(voidptr, u32) u32
|
||||
|
||||
fn C.atomic_load_u64(voidptr) u64
|
||||
fn C.atomic_store_u64(voidptr, u64)
|
||||
fn C.atomic_compare_exchange_weak_u64(voidptr, voidptr, u64) bool
|
||||
fn C.atomic_compare_exchange_strong_u64(voidptr, voidptr, u64) bool
|
||||
fn C.atomic_exchange_u64(voidptr, u64) u64
|
||||
fn C.atomic_fetch_add_u64(voidptr, u64) u64
|
||||
fn C.atomic_fetch_sub_u64(voidptr, u64) u64
|
||||
|
||||
pub const used = 1
|
||||
54
vlib/sync/stdatomic/atomic.c.v
Normal file
54
vlib/sync/stdatomic/atomic.c.v
Normal file
@@ -0,0 +1,54 @@
|
||||
module stdatomic
|
||||
|
||||
// Implement the atomic operations. For now TCC does not support the atomic
|
||||
// versions on nix so it uses locks to simulate the same behavor.
|
||||
//
|
||||
// On windows tcc can simulate with other atomic operations.
|
||||
//
|
||||
// NB: this implementations should be regarded as alpha stage and be tested
|
||||
// much more.
|
||||
|
||||
// add_u64 adds provided delta as an atomic operation
|
||||
pub fn add_u64(ptr &u64, delta int) bool {
|
||||
res := C.atomic_fetch_add_u64(voidptr(ptr), delta)
|
||||
return res == 0
|
||||
}
|
||||
|
||||
// sub_u64 subtracts provided delta as an atomic operation
|
||||
pub fn sub_u64(ptr &u64, delta int) bool {
|
||||
res := C.atomic_fetch_sub_u64(voidptr(ptr), delta)
|
||||
return res == 0
|
||||
}
|
||||
|
||||
// add_i64 adds provided delta as an atomic operation
|
||||
pub fn add_i64(ptr &i64, delta int) bool {
|
||||
res := C.atomic_fetch_add_u64(voidptr(ptr), delta)
|
||||
return res == 0
|
||||
}
|
||||
|
||||
// add_i64 subtracts provided delta as an atomic operation
|
||||
pub fn sub_i64(ptr &i64, delta int) bool {
|
||||
res := C.atomic_fetch_sub_u64(voidptr(ptr), delta)
|
||||
return res == 0
|
||||
}
|
||||
|
||||
// atomic store/load operations have to be used when there might be another concurrent access
|
||||
// atomicall set a value
|
||||
pub fn store_u64(ptr &u64, val u64) {
|
||||
C.atomic_store_u64(voidptr(ptr), val)
|
||||
}
|
||||
|
||||
// atomicall get a value
|
||||
pub fn load_u64(ptr &u64) u64 {
|
||||
return C.atomic_load_u64(voidptr(ptr))
|
||||
}
|
||||
|
||||
// atomicall set a value
|
||||
pub fn store_i64(ptr &i64, val i64) {
|
||||
C.atomic_store_u64(voidptr(ptr), val)
|
||||
}
|
||||
|
||||
// atomicall get a value
|
||||
pub fn load_i64(ptr &i64) i64 {
|
||||
return i64(C.atomic_load_u64(voidptr(ptr)))
|
||||
}
|
||||
103
vlib/sync/stdatomic/atomic_test.v
Normal file
103
vlib/sync/stdatomic/atomic_test.v
Normal file
@@ -0,0 +1,103 @@
|
||||
import sync
|
||||
import sync.stdatomic
|
||||
|
||||
const iterations_per_cycle = 100_000
|
||||
|
||||
struct Counter {
|
||||
mut:
|
||||
counter u64
|
||||
}
|
||||
|
||||
// without proper syncronization this would fail
|
||||
fn test_count_10_times_1_cycle_should_result_10_cycles_with_sync() {
|
||||
desired_iterations := 10 * iterations_per_cycle
|
||||
mut wg := sync.new_waitgroup()
|
||||
mut counter := &Counter{}
|
||||
wg.add(10)
|
||||
for i := 0; i < 10; i++ {
|
||||
go count_one_cycle(mut counter, mut wg)
|
||||
}
|
||||
wg.wait()
|
||||
assert counter.counter == desired_iterations
|
||||
eprintln(' with synchronization the counter is: ${counter.counter:10} , expectedly == ${desired_iterations:10}')
|
||||
}
|
||||
|
||||
// This test just to make sure that we have an anti-test to prove it works
|
||||
fn test_count_10_times_1_cycle_should_not_be_10_cycles_without_sync() {
|
||||
desired_iterations := 10 * iterations_per_cycle
|
||||
mut wg := sync.new_waitgroup()
|
||||
mut counter := &Counter{}
|
||||
wg.add(10)
|
||||
for i := 0; i < 10; i++ {
|
||||
go count_one_cycle_without_sync(mut counter, mut wg)
|
||||
}
|
||||
wg.wait()
|
||||
// NB: we do not assert here, just print, because sometimes by chance counter.counter may be == desired_iterations
|
||||
eprintln('without synchronization the counter is: ${counter.counter:10} , expectedly != ${desired_iterations:10}')
|
||||
}
|
||||
|
||||
fn test_atomic_count_plus_one_u64() {
|
||||
mut c := u64(0)
|
||||
stdatomic.add_u64(&c, 1)
|
||||
assert stdatomic.load_u64(&c) == 1
|
||||
}
|
||||
|
||||
fn test_atomic_count_plus_one_i64() {
|
||||
mut c := i64(0)
|
||||
stdatomic.add_i64(&c, 1)
|
||||
assert stdatomic.load_i64(&c) == 1
|
||||
}
|
||||
|
||||
fn test_atomic_count_plus_greater_than_one_u64() {
|
||||
mut c := u64(0)
|
||||
stdatomic.add_u64(&c, 10)
|
||||
assert stdatomic.load_u64(&c) == 10
|
||||
}
|
||||
|
||||
fn test_atomic_count_plus_greater_than_one_i64() {
|
||||
mut c := i64(0)
|
||||
stdatomic.add_i64(&c, 10)
|
||||
assert stdatomic.load_i64(&c) == 10
|
||||
}
|
||||
|
||||
fn test_atomic_count_minus_one_u64() {
|
||||
mut c := u64(1)
|
||||
stdatomic.sub_u64(&c, 1)
|
||||
assert stdatomic.load_u64(&c) == 0
|
||||
}
|
||||
|
||||
fn test_atomic_count_minus_one_i64() {
|
||||
mut c := i64(0)
|
||||
stdatomic.sub_i64(&c, 1)
|
||||
assert stdatomic.load_i64(&c) == -1
|
||||
}
|
||||
|
||||
fn test_atomic_count_minus_greater_than_one_u64() {
|
||||
mut c := u64(0)
|
||||
stdatomic.store_u64(&c, 10)
|
||||
stdatomic.sub_u64(&c, 10)
|
||||
assert stdatomic.load_u64(&c) == 0
|
||||
}
|
||||
|
||||
fn test_atomic_count_minus_greater_than_one_i64() {
|
||||
mut c := i64(0)
|
||||
stdatomic.store_i64(&c, 10)
|
||||
stdatomic.sub_i64(&c, 20)
|
||||
assert stdatomic.load_i64(&c) == -10
|
||||
}
|
||||
|
||||
// count_one_cycle counts the common counter iterations_per_cycle times in thread-safe way
|
||||
fn count_one_cycle(mut counter Counter, mut group sync.WaitGroup) {
|
||||
for i := 0; i < iterations_per_cycle; i++ {
|
||||
stdatomic.add_u64(&counter.counter, 1)
|
||||
}
|
||||
group.done()
|
||||
}
|
||||
|
||||
// count_one_cycle_without_sync counts the common counter iterations_per_cycle times in none thread-safe way
|
||||
fn count_one_cycle_without_sync(mut counter Counter, mut group sync.WaitGroup) {
|
||||
for i := 0; i < iterations_per_cycle; i++ {
|
||||
counter.counter++
|
||||
}
|
||||
group.done()
|
||||
}
|
||||
Reference in New Issue
Block a user