1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00
v/vlib/builtin/map.v

476 lines
13 KiB
V
Raw Normal View History

2020-02-03 07:00:36 +03:00
// Copyright (c) 2019-2020 Alexander Medvednikov. All rights reserved.
2019-06-23 05:21:30 +03:00
// Use of this source code is governed by an MIT license
// that can be found in the LICENSE file.
2019-06-22 21:20:28 +03:00
module builtin
import strings
import hash.wyhash
2020-04-08 01:02:15 +03:00
fn C.memcmp(byteptr, byteptr, int) int
2020-04-08 01:02:15 +03:00
2020-03-19 08:52:34 +03:00
/*
2020-05-17 14:51:18 +03:00
This is a highly optimized hashmap implementation. It has several traits that
in combination makes it very fast and memory efficient. Here is a short expl-
anation of each trait. After reading this you should have a basic understand-
ing of how it functions:
1. Hash-function: Wyhash. Wyhash is the fastest hash-function for short keys
passing SMHasher, so it was an obvious choice.
2. Open addressing: Robin Hood Hashing. With this method, a hash-collision is
resolved by probing. As opposed to linear probing, Robin Hood hashing has a
simple but clever twist: As new keys are inserted, old keys are shifted arou-
nd in a way such that all keys stay reasonably close to the slot they origin-
ally hash to. A new key may displace a key already inserted if its probe cou-
2020-05-17 14:51:18 +03:00
nt is larger than that of the key at the current position.
3. Memory layout: key-value pairs are stored in a `DenseArray`. This is a dy-
namic array with a very low volume of unused memory, at the cost of more rea-
llocations when inserting elements. It also preserves the order of the key-v-
alues. This array is named `key_values`. Instead of probing a new key-value,
this map probes two 32-bit numbers collectively. The first number has its 8
most significant bits reserved for the probe-count and the remaining 24 bits
are cached bits from the hash which are utilized for faster re-hashing. This
number is often referred to as `meta`. The other 32-bit number is the index
at which the key-value was pushed to in `key_values`. Both of these numbers
are stored in a sparse array `metas`. The `meta`s and `kv_index`s are stored
at even and odd indices, respectively:
metas = [meta, kv_index, 0, 0, meta, kv_index, 0, 0, meta, kv_index, ...]
key_values = [kv, kv, kv, ...]
4. The size of metas is a power of two. This enables the use of bitwise AND
to convert the 64-bit hash to a bucket/index that doesn't overflow metas. If
the size is power of two you can use "hash & (SIZE - 1)" instead of "hash %
SIZE". Modulo is extremely expensive so using '&' is a big performance impro-
vement. The general concern with this approach is that you only make use of
the lower bits of the hash which can cause more collisions. This is solved by
using a well-dispersed hash-function.
5. The hashmap keeps track of the highest probe_count. The trick is to alloc-
ate `extra_metas` > max(probe_count), so you never have to do any bounds-che-
2020-05-17 14:51:18 +03:00
cking since the extra meta memory ensures that a meta will never go beyond
2020-03-21 15:55:07 +03:00
the last index.
2020-03-19 08:52:34 +03:00
6. Cached rehashing. When the `load_factor` of the map exceeds the `max_load_
factor` the size of metas is doubled and all the key-values are "rehashed" to
find the index for their meta's in the new array. Instead of rehashing compl-
etely, it simply uses the cached-hashbits stored in the meta, resulting in
much faster rehashing.
2020-03-19 08:52:34 +03:00
*/
2020-01-24 22:13:59 +03:00
const (
2020-03-21 15:55:07 +03:00
// Number of bits from the hash stored for each entry
2020-05-09 13:42:01 +03:00
hashbits = 24
2020-03-21 15:55:07 +03:00
// Number of bits from the hash stored for rehashing
2020-04-05 23:09:52 +03:00
max_cached_hashbits = 16
2020-02-20 22:04:06 +03:00
// Initial log-number of buckets in the hashtable
2020-05-09 13:42:01 +03:00
init_log_capicity = 5
2020-02-20 22:04:06 +03:00
// Initial number of buckets in the hashtable
2020-05-09 13:42:01 +03:00
init_capicity = 1 << init_log_capicity
2020-03-21 15:55:07 +03:00
// Maximum load-factor (size / capacity)
2020-05-09 13:42:01 +03:00
max_load_factor = 0.8
2020-03-21 15:55:07 +03:00
// Initial highest even index in metas
2020-05-09 13:42:01 +03:00
init_cap = init_capicity - 2
2020-03-19 08:52:34 +03:00
// Used for incrementing `extra_metas` when max
// probe count is too high, to avoid overflow
2020-05-09 13:42:01 +03:00
extra_metas_inc = 4
2020-02-20 22:04:06 +03:00
// Bitmask to select all the hashbits
2020-05-09 13:42:01 +03:00
hash_mask = u32(0x00FFFFFF)
2020-02-20 22:04:06 +03:00
// Used for incrementing the probe-count
2020-05-09 13:42:01 +03:00
probe_inc = u32(0x01000000)
2020-01-24 22:13:59 +03:00
)
2020-05-17 14:51:18 +03:00
// This function is intended to be fast when
// the strings are very likely to be equal
// TODO: add branch prediction hints
[inline]
fn fast_string_eq(a, b string) bool {
if a.len != b.len {
return false
}
return C.memcmp(a.str, b.str, b.len) == 0
}
// Dynamic array with very low growth factor
struct DenseArray {
value_bytes int
mut:
cap u32
size u32
deletes u32
keys &string
values byteptr
2020-03-19 08:52:34 +03:00
}
[inline]
[unsafe_fn]
fn new_dense_array(value_bytes int) DenseArray {
return DenseArray{
value_bytes: value_bytes
cap: 8
size: 0
deletes: 0
keys: &string(malloc(8 * sizeof(string)))
values: malloc(8 * value_bytes)
2020-03-19 08:52:34 +03:00
}
}
// Push element to array and return index
// The growth-factor is roughly 1.125 `(x + (x >> 3))`
2020-03-19 08:52:34 +03:00
[inline]
2020-05-17 14:51:18 +03:00
fn (mut d DenseArray) push(key string, value voidptr) u32 {
2020-03-19 08:52:34 +03:00
if d.cap == d.size {
d.cap += d.cap >> 3
d.keys = &string(C.realloc(d.keys, sizeof(string) * d.cap))
d.values = C.realloc(d.values, d.value_bytes * d.cap)
2020-03-19 08:52:34 +03:00
}
push_index := d.size
d.keys[push_index] = key
C.memcpy(d.values + push_index * d.value_bytes, value, d.value_bytes)
2020-03-19 08:52:34 +03:00
d.size++
return push_index
}
2020-04-22 05:00:38 +03:00
// Private function. Used to implement array[] operator
fn (d DenseArray) get(i int) voidptr {
$if !no_bounds_checking? {
if i < 0 || i >= d.size {
panic('DenseArray.get: index out of range (i == $i, d.len == $d.size)')
}
}
return byteptr(d.keys) + i * sizeof(string)
2020-04-22 05:00:38 +03:00
}
2020-03-19 08:52:34 +03:00
// Move all zeros to the end of the array
// and resize array
2020-05-17 14:51:18 +03:00
fn (mut d DenseArray) zeros_to_end() {
mut tmp_value := malloc(d.value_bytes)
2020-03-19 08:52:34 +03:00
mut count := u32(0)
2020-03-21 15:55:07 +03:00
for i in 0 .. d.size {
if d.keys[i].str != 0 {
// swap keys
tmp_key := d.keys[count]
d.keys[count] = d.keys[i]
d.keys[i] = tmp_key
// swap values (TODO: optimize)
C.memcpy(tmp_value, d.values + count * d.value_bytes, d.value_bytes)
C.memcpy(d.values + count * d.value_bytes, d.values + i * d.value_bytes, d.value_bytes)
C.memcpy(d.values + i * d.value_bytes, tmp_value, d.value_bytes)
2020-03-19 08:52:34 +03:00
count++
}
}
free(tmp_value)
2020-03-21 15:55:07 +03:00
d.deletes = 0
2020-03-19 08:52:34 +03:00
d.size = count
d.cap = if count < 8 { u32(8) } else { count }
d.keys = &string(C.realloc(d.keys, sizeof(string) * d.cap))
d.values = C.realloc(d.values, d.value_bytes * d.cap)
2020-03-19 08:52:34 +03:00
}
pub struct map {
// Byte size of value
2020-05-09 13:42:01 +03:00
value_bytes int
2020-03-19 08:52:34 +03:00
mut:
// highest even index in the hashtable
2020-05-09 13:42:01 +03:00
cap u32
2020-03-19 08:52:34 +03:00
// Number of cached hashbits left for rehasing
2020-05-09 13:42:01 +03:00
cached_hashbits byte
2020-03-19 08:52:34 +03:00
// Used for right-shifting out used hashbits
2020-05-09 13:42:01 +03:00
shift byte
2020-03-21 15:55:07 +03:00
// Array storing key-values (ordered)
2020-05-09 13:42:01 +03:00
key_values DenseArray
2020-03-21 15:55:07 +03:00
// Pointer to meta-data:
// Odd indices store kv_index.
// Even indices store probe_count and hashbits.
2020-05-09 13:42:01 +03:00
metas &u32
2020-03-19 08:52:34 +03:00
// Extra metas that allows for no ranging when incrementing
// index in the hashmap
2020-05-09 13:42:01 +03:00
extra_metas u32
2020-03-19 08:52:34 +03:00
pub mut:
// Number of key-values currently in the hashmap
2020-05-09 13:42:01 +03:00
size int
}
fn new_map_1(value_bytes int) map {
2020-02-20 22:04:06 +03:00
return map{
2020-01-24 22:13:59 +03:00
value_bytes: value_bytes
2020-03-19 08:52:34 +03:00
cap: init_cap
2020-04-05 23:09:52 +03:00
cached_hashbits: max_cached_hashbits
2020-03-19 08:52:34 +03:00
shift: init_log_capicity
key_values: new_dense_array(value_bytes)
2020-03-19 08:52:34 +03:00
metas: &u32(vcalloc(sizeof(u32) * (init_capicity + extra_metas_inc)))
extra_metas: extra_metas_inc
2020-01-24 22:13:59 +03:00
size: 0
2019-06-22 21:20:28 +03:00
}
}
2020-01-24 22:13:59 +03:00
fn new_map_init(n, value_bytes int, keys &string, values voidptr) map {
mut out := new_map_1(value_bytes)
2020-01-24 22:13:59 +03:00
for i in 0 .. n {
2020-04-02 16:31:44 +03:00
out.set(keys[i], byteptr(values) + i * value_bytes)
2019-08-03 10:44:08 +03:00
}
2020-01-24 22:13:59 +03:00
return out
2019-08-29 01:52:32 +03:00
}
2019-08-03 10:44:08 +03:00
2020-03-19 08:52:34 +03:00
[inline]
2020-04-27 14:27:33 +03:00
fn (m &map) key_to_index(key string) (u32,u32) {
2020-04-05 23:09:52 +03:00
hash := wyhash.wyhash_c(key.str, u64(key.len), 0)
2020-03-19 08:52:34 +03:00
index := hash & m.cap
2020-05-09 13:42:01 +03:00
meta := ((hash >> m.shift) & hash_mask) | probe_inc
2020-04-05 23:09:52 +03:00
return u32(index),u32(meta)
2020-03-19 08:52:34 +03:00
}
[inline]
2020-04-27 14:27:33 +03:00
fn (m &map) meta_less(_index u32, _metas u32) (u32,u32) {
2020-04-05 23:09:52 +03:00
mut index := _index
mut meta := _metas
for meta < m.metas[index] {
2020-03-19 08:52:34 +03:00
index += 2
meta += probe_inc
2020-02-20 22:04:06 +03:00
}
2020-03-21 15:55:07 +03:00
return index,meta
2020-03-19 08:52:34 +03:00
}
[inline]
2020-05-17 14:51:18 +03:00
fn (mut m map) meta_greater(_index u32, _metas u32, kvi u32) {
2020-04-05 23:09:52 +03:00
mut meta := _metas
mut index := _index
2020-03-19 08:52:34 +03:00
mut kv_index := kvi
2020-04-05 23:09:52 +03:00
for m.metas[index] != 0 {
if meta > m.metas[index] {
tmp_meta := m.metas[index]
m.metas[index] = meta
2020-03-19 08:52:34 +03:00
meta = tmp_meta
2020-04-05 23:09:52 +03:00
tmp_index := m.metas[index + 1]
m.metas[index + 1] = kv_index
2020-03-19 08:52:34 +03:00
kv_index = tmp_index
2020-01-24 22:13:59 +03:00
}
2020-03-19 08:52:34 +03:00
index += 2
meta += probe_inc
2020-02-20 22:04:06 +03:00
}
2020-04-05 23:09:52 +03:00
m.metas[index] = meta
m.metas[index + 1] = kv_index
2020-05-09 13:42:01 +03:00
probe_count := (meta >> hashbits) - 1
if (probe_count << 1) == m.extra_metas {
2020-03-19 08:52:34 +03:00
m.extra_metas += extra_metas_inc
mem_size := (m.cap + 2 + m.extra_metas)
2020-04-05 23:09:52 +03:00
m.metas = &u32(C.realloc(m.metas, sizeof(u32) * mem_size))
C.memset(m.metas + mem_size - extra_metas_inc, 0, sizeof(u32) * extra_metas_inc)
2020-03-19 08:52:34 +03:00
// Should almost never happen
if probe_count == 252 {
2020-03-21 15:55:07 +03:00
panic('Probe overflow')
2020-03-19 08:52:34 +03:00
}
2020-03-19 09:05:20 +03:00
}
2020-03-19 08:52:34 +03:00
}
2020-05-17 14:51:18 +03:00
fn (mut m map) set(key string, value voidptr) {
2020-05-09 13:42:01 +03:00
load_factor := f32(m.size << 1) / f32(m.cap)
2020-03-21 15:55:07 +03:00
if load_factor > max_load_factor {
2020-03-19 08:52:34 +03:00
m.expand()
2020-02-20 22:30:34 +03:00
}
2020-03-21 15:55:07 +03:00
mut index,mut meta := m.key_to_index(key)
2020-04-05 23:09:52 +03:00
index,meta = m.meta_less(index, meta)
2020-03-19 08:52:34 +03:00
// While we might have a match
for meta == m.metas[index] {
kv_index := m.metas[index + 1]
if fast_string_eq(key, m.key_values.keys[kv_index]) {
C.memcpy(m.key_values.values + kv_index * m.value_bytes , value, m.value_bytes)
2020-03-19 08:52:34 +03:00
return
}
2020-03-19 08:52:34 +03:00
index += 2
meta += probe_inc
}
kv_index := m.key_values.push(key, value)
2020-04-05 23:09:52 +03:00
m.meta_greater(index, meta, kv_index)
2020-02-20 22:04:06 +03:00
m.size++
}
2020-03-19 08:52:34 +03:00
// Doubles the size of the hashmap
2020-05-17 14:51:18 +03:00
fn (mut m map) expand() {
2020-03-19 08:52:34 +03:00
old_cap := m.cap
2020-05-09 13:42:01 +03:00
m.cap = ((m.cap + 2) << 1) - 2
2020-03-19 08:52:34 +03:00
// Check if any hashbits are left
2020-04-05 23:09:52 +03:00
if m.cached_hashbits == 0 {
m.shift += max_cached_hashbits
m.cached_hashbits = max_cached_hashbits
2020-03-21 15:55:07 +03:00
m.rehash()
2019-06-22 21:20:28 +03:00
}
2020-02-20 22:04:06 +03:00
else {
2020-03-19 08:52:34 +03:00
m.cached_rehash(old_cap)
m.cached_hashbits--
2019-08-29 01:52:32 +03:00
}
}
2020-05-17 14:51:18 +03:00
fn (mut m map) rehash() {
2020-03-19 08:52:34 +03:00
meta_bytes := sizeof(u32) * (m.cap + 2 + m.extra_metas)
m.metas = &u32(C.realloc(m.metas, meta_bytes))
C.memset(m.metas, 0, meta_bytes)
for i := u32(0); i < m.key_values.size; i++ {
if m.key_values.keys[i].str == 0 {
2020-03-19 08:52:34 +03:00
continue
}
mut index,mut meta := m.key_to_index(m.key_values.keys[i])
2020-04-05 23:09:52 +03:00
index,meta = m.meta_less(index, meta)
m.meta_greater(index, meta, i)
}
2019-08-29 01:52:32 +03:00
}
2020-05-17 14:51:18 +03:00
fn (mut m map) cached_rehash(old_cap u32) {
2020-04-05 23:09:52 +03:00
old_metas := m.metas
m.metas = &u32(vcalloc(sizeof(u32) * (m.cap + 2 + m.extra_metas)))
2020-03-19 08:52:34 +03:00
old_extra_metas := m.extra_metas
2020-03-21 15:55:07 +03:00
for i := u32(0); i <= old_cap + old_extra_metas; i += 2 {
2020-04-05 23:09:52 +03:00
if old_metas[i] == 0 {
2020-03-19 08:52:34 +03:00
continue
}
2020-04-05 23:09:52 +03:00
old_meta := old_metas[i]
2020-05-09 13:42:01 +03:00
old_probe_count := ((old_meta >> hashbits) - 1) << 1
old_index := (i - old_probe_count) & (m.cap >> 1)
mut index := (old_index | (old_meta << m.shift)) & m.cap
2020-03-19 08:52:34 +03:00
mut meta := (old_meta & hash_mask) | probe_inc
2020-04-05 23:09:52 +03:00
index,meta = m.meta_less(index, meta)
kv_index := old_metas[i + 1]
m.meta_greater(index, meta, kv_index)
}
2020-02-20 22:04:06 +03:00
unsafe{
2020-04-05 23:09:52 +03:00
free(old_metas)
2020-01-24 22:13:59 +03:00
}
}
fn (m map) get3(key string, zero voidptr) voidptr {
mut index,mut meta := m.key_to_index(key)
2020-05-09 13:42:01 +03:00
for {
if meta == m.metas[index] {
kv_index := m.metas[index + 1]
if fast_string_eq(key, m.key_values.keys[kv_index]) {
return voidptr(m.key_values.values + kv_index * m.value_bytes)
}
}
index += 2
meta += probe_inc
2020-05-09 13:42:01 +03:00
if meta > m.metas[index] { break }
}
return zero
}
2020-02-20 22:04:06 +03:00
fn (m map) exists(key string) bool {
2020-03-21 15:55:07 +03:00
mut index,mut meta := m.key_to_index(key)
2020-05-09 13:42:01 +03:00
for {
if meta == m.metas[index] {
kv_index := m.metas[index + 1]
if fast_string_eq(key, m.key_values.keys[kv_index]) {
return true
}
}
2020-03-19 08:52:34 +03:00
index += 2
meta += probe_inc
2020-05-09 13:42:01 +03:00
if meta > m.metas[index] { break }
}
2020-02-20 22:04:06 +03:00
return false
2019-12-30 08:57:56 +03:00
}
2020-05-17 14:51:18 +03:00
pub fn (mut m map) delete(key string) {
2020-03-21 15:55:07 +03:00
mut index,mut meta := m.key_to_index(key)
2020-04-05 23:09:52 +03:00
index,meta = m.meta_less(index, meta)
2020-03-19 08:52:34 +03:00
// Perform backwards shifting
for meta == m.metas[index] {
kv_index := m.metas[index + 1]
if fast_string_eq(key, m.key_values.keys[kv_index]) {
2020-05-09 13:42:01 +03:00
for (m.metas[index + 2] >> hashbits) > 1 {
2020-03-21 15:55:07 +03:00
m.metas[index] = m.metas[index + 2] - probe_inc
m.metas[index + 1] = m.metas[index + 3]
2020-03-19 08:52:34 +03:00
index += 2
}
m.size--
2020-03-21 15:55:07 +03:00
m.metas[index] = 0
2020-03-19 08:52:34 +03:00
m.key_values.deletes++
C.memset(&m.key_values.keys[kv_index], 0, sizeof(string))
2020-03-21 15:55:07 +03:00
if m.key_values.size <= 32 {
return
}
2020-04-05 23:09:52 +03:00
// Clean up key_values if too many have been deleted
if m.key_values.deletes >= (m.key_values.size >> 1) {
2020-03-19 08:52:34 +03:00
m.key_values.zeros_to_end()
m.rehash()
2020-04-05 23:09:52 +03:00
m.key_values.deletes = 0
2020-03-19 08:52:34 +03:00
}
return
}
index += 2
meta += probe_inc
}
}
// TODO: add optimization in case of no deletes
2020-01-24 22:13:59 +03:00
pub fn (m &map) keys() []string {
mut keys := [''].repeat(m.size)
2020-02-20 22:04:06 +03:00
mut j := 0
2020-03-19 08:52:34 +03:00
for i := u32(0); i < m.key_values.size; i++ {
if m.key_values.keys[i].str == 0 {
2020-03-19 08:52:34 +03:00
continue
2020-02-20 22:04:06 +03:00
}
keys[j] = m.key_values.keys[i]
2020-03-19 08:52:34 +03:00
j++
2020-02-20 22:04:06 +03:00
}
2020-01-24 22:13:59 +03:00
return keys
}
2020-05-26 02:52:06 +03:00
[unsafe_fn]
pub fn (d DenseArray) clone() DenseArray {
res := DenseArray {
value_bytes: d.value_bytes
cap: d.cap
size: d.size
deletes: d.deletes
keys: &string(malloc(d.cap * sizeof(string)))
values: byteptr(malloc(d.cap * d.value_bytes))
}
C.memcpy(res.keys, d.keys, d.cap * sizeof(string))
C.memcpy(res.values, d.values, d.cap * d.value_bytes)
return res
}
[unsafe_fn]
pub fn (m map) clone() map {
metas_size := sizeof(u32) * (m.cap + 2 + m.extra_metas)
res := map{
value_bytes: m.value_bytes
cap: m.cap
cached_hashbits: m.cached_hashbits
shift: m.shift
key_values: m.key_values.clone()
metas: &u32(malloc(metas_size))
extra_metas: m.extra_metas
size: m.size
}
C.memcpy(res.metas, m.metas, metas_size)
return res
}
2020-04-06 00:31:53 +03:00
[unsafe_fn]
pub fn (m &map) free() {
2020-04-06 00:31:53 +03:00
free(m.metas)
for i := u32(0); i < m.key_values.size; i++ {
if m.key_values.keys[i].str == 0 {
2020-04-06 00:31:53 +03:00
continue
}
m.key_values.keys[i].free()
2020-01-24 22:13:59 +03:00
}
free(m.key_values.keys)
free(m.key_values.values)
2019-07-23 23:57:06 +03:00
}
pub fn (m map_string) str() string {
2019-07-14 12:01:32 +03:00
if m.size == 0 {
2019-06-22 21:20:28 +03:00
return '{}'
}
mut sb := strings.new_builder(50)
2019-08-29 01:52:32 +03:00
sb.writeln('{')
2020-03-21 15:55:07 +03:00
for key, val in m {
2019-08-29 01:52:32 +03:00
sb.writeln(' "$key" => "$val"')
2019-08-05 05:34:12 +03:00
}
2019-08-29 01:52:32 +03:00
sb.writeln('}')
return sb.str()
2020-03-19 09:05:20 +03:00
}