1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00
v/vlib/builtin/map.v

810 lines
21 KiB
V
Raw Normal View History

// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
2019-06-23 05:21:30 +03:00
// Use of this source code is governed by an MIT license
// that can be found in the LICENSE file.
2019-06-22 21:20:28 +03:00
module builtin
// import hash.wyhash as hash
2020-07-18 14:49:00 +03:00
import hash
2020-04-08 01:02:15 +03:00
2020-03-19 08:52:34 +03:00
/*
2020-05-17 14:51:18 +03:00
This is a highly optimized hashmap implementation. It has several traits that
in combination makes it very fast and memory efficient. Here is a short expl-
anation of each trait. After reading this you should have a basic understand-
ing of how it functions:
1. Hash-function: Wyhash. Wyhash is the fastest hash-function for short keys
passing SMHasher, so it was an obvious choice.
2. Open addressing: Robin Hood Hashing. With this method, a hash-collision is
resolved by probing. As opposed to linear probing, Robin Hood hashing has a
simple but clever twist: As new keys are inserted, old keys are shifted arou-
nd in a way such that all keys stay reasonably close to the slot they origin-
ally hash to. A new key may displace a key already inserted if its probe cou-
2020-05-17 14:51:18 +03:00
nt is larger than that of the key at the current position.
3. Memory layout: key-value pairs are stored in a `DenseArray`. This is a dy-
namic array with a very low volume of unused memory, at the cost of more rea-
llocations when inserting elements. It also preserves the order of the key-v-
alues. This array is named `key_values`. Instead of probing a new key-value,
this map probes two 32-bit numbers collectively. The first number has its 8
most significant bits reserved for the probe-count and the remaining 24 bits
are cached bits from the hash which are utilized for faster re-hashing. This
number is often referred to as `meta`. The other 32-bit number is the index
at which the key-value was pushed to in `key_values`. Both of these numbers
are stored in a sparse array `metas`. The `meta`s and `kv_index`s are stored
at even and odd indices, respectively:
metas = [meta, kv_index, 0, 0, meta, kv_index, 0, 0, meta, kv_index, ...]
key_values = [kv, kv, kv, ...]
4. The size of metas is a power of two. This enables the use of bitwise AND
2020-05-26 18:59:52 +03:00
to convert the 64-bit hash to a bucket/index that doesn't overflow metas. If
the size is power of two you can use "hash & (SIZE - 1)" instead of "hash %
SIZE". Modulo is extremely expensive so using '&' is a big performance impro-
vement. The general concern with this approach is that you only make use of
the lower bits of the hash which can cause more collisions. This is solved by
using a well-dispersed hash-function.
5. The hashmap keeps track of the highest probe_count. The trick is to alloc-
ate `extra_metas` > max(probe_count), so you never have to do any bounds-che-
2020-05-17 14:51:18 +03:00
cking since the extra meta memory ensures that a meta will never go beyond
2020-03-21 15:55:07 +03:00
the last index.
2020-03-19 08:52:34 +03:00
6. Cached rehashing. When the `load_factor` of the map exceeds the `max_load_
factor` the size of metas is doubled and all the key-values are "rehashed" to
find the index for their meta's in the new array. Instead of rehashing compl-
etely, it simply uses the cached-hashbits stored in the meta, resulting in
much faster rehashing.
2020-03-19 08:52:34 +03:00
*/
2020-01-24 22:13:59 +03:00
const (
2020-05-26 18:59:52 +03:00
// Number of bits from the hash stored for each entry
2020-05-09 13:42:01 +03:00
hashbits = 24
2020-03-21 15:55:07 +03:00
// Number of bits from the hash stored for rehashing
2020-04-05 23:09:52 +03:00
max_cached_hashbits = 16
2020-02-20 22:04:06 +03:00
// Initial log-number of buckets in the hashtable
2020-05-09 13:42:01 +03:00
init_log_capicity = 5
2020-02-20 22:04:06 +03:00
// Initial number of buckets in the hashtable
2020-05-09 13:42:01 +03:00
init_capicity = 1 << init_log_capicity
2020-06-21 17:51:02 +03:00
// Maximum load-factor (len / capacity)
2020-05-09 13:42:01 +03:00
max_load_factor = 0.8
2020-03-21 15:55:07 +03:00
// Initial highest even index in metas
init_even_index = init_capicity - 2
2020-03-19 08:52:34 +03:00
// Used for incrementing `extra_metas` when max
// probe count is too high, to avoid overflow
2020-05-09 13:42:01 +03:00
extra_metas_inc = 4
2020-02-20 22:04:06 +03:00
// Bitmask to select all the hashbits
2020-05-09 13:42:01 +03:00
hash_mask = u32(0x00FFFFFF)
2020-02-20 22:04:06 +03:00
// Used for incrementing the probe-count
2020-05-09 13:42:01 +03:00
probe_inc = u32(0x01000000)
2020-01-24 22:13:59 +03:00
)
// fast_string_eq is intended to be fast when
// the strings are very likely to be equal
// TODO: add branch prediction hints
[inline]
fn fast_string_eq(a string, b string) bool {
if a.len != b.len {
return false
}
unsafe {
return C.memcmp(a.str, b.str, b.len) == 0
}
}
// DenseArray represents a dynamic array with very low growth factor
struct DenseArray {
key_bytes int
value_bytes int
mut:
cap int
len int
deletes u32 // count
// array allocated (with `cap` bytes) on first deletion
// has non-zero element when key deleted
all_deleted &byte
values &byte
keys &byte
2020-03-19 08:52:34 +03:00
}
[inline]
fn new_dense_array(key_bytes int, value_bytes int) DenseArray {
cap := 8
return DenseArray{
key_bytes: key_bytes
value_bytes: value_bytes
cap: cap
2020-06-21 17:51:02 +03:00
len: 0
deletes: 0
all_deleted: 0
2021-04-01 11:39:00 +03:00
keys: unsafe { malloc(cap * key_bytes) }
values: unsafe { malloc(cap * value_bytes) }
2020-03-19 08:52:34 +03:00
}
}
[inline]
fn (d &DenseArray) key(i int) voidptr {
2021-04-01 11:39:00 +03:00
return unsafe { d.keys + i * d.key_bytes }
}
// for cgen
[inline]
fn (d &DenseArray) value(i int) voidptr {
2021-04-01 11:39:00 +03:00
return unsafe { d.values + i * d.value_bytes }
}
[inline]
fn (d &DenseArray) has_index(i int) bool {
return d.deletes == 0 || unsafe { d.all_deleted[i] } == 0
}
// Make space to append an element and return index
// The growth-factor is roughly 1.125 `(x + (x >> 3))`
2020-03-19 08:52:34 +03:00
[inline]
fn (mut d DenseArray) expand() int {
old_cap := d.cap
2021-04-01 11:39:00 +03:00
old_value_size := d.value_bytes * old_cap
old_key_size := d.key_bytes * old_cap
2020-06-21 17:51:02 +03:00
if d.cap == d.len {
d.cap += d.cap >> 3
unsafe {
2021-04-01 11:39:00 +03:00
d.keys = realloc_data(d.keys, old_key_size, d.key_bytes * d.cap)
d.values = realloc_data(d.values, old_value_size, d.value_bytes * d.cap)
if d.deletes != 0 {
d.all_deleted = realloc_data(d.all_deleted, old_cap, d.cap)
C.memset(d.all_deleted + d.len, 0, d.cap - d.len)
}
}
2020-03-19 08:52:34 +03:00
}
2020-06-21 17:51:02 +03:00
push_index := d.len
unsafe {
if d.deletes != 0 {
d.all_deleted[push_index] = 0
}
}
2020-06-21 17:51:02 +03:00
d.len++
2020-03-19 08:52:34 +03:00
return push_index
}
2020-06-25 00:31:19 +03:00
// Move all zeros to the end of the array and resize array
2020-05-17 14:51:18 +03:00
fn (mut d DenseArray) zeros_to_end() {
// TODO alloca?
2021-04-01 11:39:00 +03:00
mut tmp_value := unsafe { malloc(d.value_bytes) }
mut tmp_key := unsafe { malloc(d.key_bytes) }
mut count := 0
for i in 0 .. d.len {
if d.has_index(i) {
// swap (TODO: optimize)
unsafe {
2021-04-01 11:39:00 +03:00
// Swap keys
C.memcpy(tmp_key, d.key(count), d.key_bytes)
C.memcpy(d.key(count), d.key(i), d.key_bytes)
C.memcpy(d.key(i), tmp_key, d.key_bytes)
// Swap values
C.memcpy(tmp_value, d.value(count), d.value_bytes)
C.memcpy(d.value(count), d.value(i), d.value_bytes)
C.memcpy(d.value(i), tmp_value, d.value_bytes)
}
2020-03-19 08:52:34 +03:00
count++
}
}
unsafe {
2021-04-01 11:39:00 +03:00
free(tmp_value)
free(tmp_key)
d.deletes = 0
// TODO: reallocate instead as more deletes are likely
free(d.all_deleted)
}
2020-06-21 17:51:02 +03:00
d.len = count
old_cap := d.cap
d.cap = if count < 8 { 8 } else { count }
unsafe {
2021-04-01 11:39:00 +03:00
d.values = realloc_data(d.values, d.value_bytes * old_cap, d.value_bytes * d.cap)
d.keys = realloc_data(d.keys, d.key_bytes * old_cap, d.key_bytes * d.cap)
}
2020-03-19 08:52:34 +03:00
}
type MapHashFn = fn (voidptr) u64
type MapEqFn = fn (voidptr, voidptr) bool
type MapCloneFn = fn (voidptr, voidptr)
type MapFreeFn = fn (voidptr)
// map is the internal representation of a V `map` type.
2020-03-19 08:52:34 +03:00
pub struct map {
// Number of bytes of a key
2021-04-01 11:39:00 +03:00
key_bytes int
2020-06-25 00:31:19 +03:00
// Number of bytes of a value
value_bytes int
2020-03-19 08:52:34 +03:00
mut:
2020-06-25 00:31:19 +03:00
// Highest even index in the hashtable
even_index u32
2021-02-12 03:02:33 +03:00
// Number of cached hashbits left for rehashing
2020-05-09 13:42:01 +03:00
cached_hashbits byte
2020-03-19 08:52:34 +03:00
// Used for right-shifting out used hashbits
shift byte
2020-03-21 15:55:07 +03:00
// Array storing key-values (ordered)
key_values DenseArray
2020-03-21 15:55:07 +03:00
// Pointer to meta-data:
2020-06-25 00:31:19 +03:00
// - Odd indices store kv_index.
// - Even indices store probe_count and hashbits.
metas &u32
2020-03-19 08:52:34 +03:00
// Extra metas that allows for no ranging when incrementing
// index in the hashmap
2020-05-09 13:42:01 +03:00
extra_metas u32
has_string_keys bool
hash_fn MapHashFn
key_eq_fn MapEqFn
clone_fn MapCloneFn
free_fn MapFreeFn
2020-03-19 08:52:34 +03:00
pub mut:
// Number of key-values currently in the hashmap
len int
}
fn map_hash_string(pkey voidptr) u64 {
key := *unsafe { &string(pkey) }
return hash.wyhash_c(key.str, u64(key.len), 0)
}
fn map_hash_int_1(pkey voidptr) u64 {
return hash.wyhash64_c(*unsafe { &byte(pkey) }, 0)
}
fn map_hash_int_2(pkey voidptr) u64 {
return hash.wyhash64_c(*unsafe { &u16(pkey) }, 0)
}
fn map_hash_int_4(pkey voidptr) u64 {
return hash.wyhash64_c(*unsafe { &u32(pkey) }, 0)
}
fn map_hash_int_8(pkey voidptr) u64 {
return hash.wyhash64_c(*unsafe { &u64(pkey) }, 0)
}
fn map_eq_string(a voidptr, b voidptr) bool {
return fast_string_eq(*unsafe { &string(a) }, *unsafe { &string(b) })
}
fn map_eq_int_1(a voidptr, b voidptr) bool {
2021-04-01 11:39:00 +03:00
return unsafe { *&byte(a) == *&byte(b) }
}
fn map_eq_int_2(a voidptr, b voidptr) bool {
2021-04-01 11:39:00 +03:00
return unsafe { *&u16(a) == *&u16(b) }
}
fn map_eq_int_4(a voidptr, b voidptr) bool {
2021-04-01 11:39:00 +03:00
return unsafe { *&u32(a) == *&u32(b) }
}
fn map_eq_int_8(a voidptr, b voidptr) bool {
2021-04-01 11:39:00 +03:00
return unsafe { *&u64(a) == *&u64(b) }
}
fn map_clone_string(dest voidptr, pkey voidptr) {
unsafe {
s := *&string(pkey)
(*&string(dest)) = s.clone()
}
}
fn map_clone_int_1(dest voidptr, pkey voidptr) {
unsafe {
2021-04-01 11:39:00 +03:00
*&byte(dest) = *&byte(pkey)
}
}
fn map_clone_int_2(dest voidptr, pkey voidptr) {
unsafe {
2021-04-01 11:39:00 +03:00
*&u16(dest) = *&u16(pkey)
}
}
fn map_clone_int_4(dest voidptr, pkey voidptr) {
unsafe {
2021-04-01 11:39:00 +03:00
*&u32(dest) = *&u32(pkey)
}
}
fn map_clone_int_8(dest voidptr, pkey voidptr) {
unsafe {
2021-04-01 11:39:00 +03:00
*&u64(dest) = *&u64(pkey)
}
}
fn map_free_string(pkey voidptr) {
unsafe {
(*&string(pkey)).free()
}
}
fn map_free_nop(_ voidptr) {
}
2020-12-27 20:31:50 +03:00
fn new_map_2(key_bytes int, value_bytes int, hash_fn MapHashFn, key_eq_fn MapEqFn, clone_fn MapCloneFn, free_fn MapFreeFn) map {
metasize := int(sizeof(u32) * (init_capicity + extra_metas_inc))
// for now assume anything bigger than a pointer is a string
has_string_keys := key_bytes > sizeof(voidptr)
return map{
key_bytes: key_bytes
value_bytes: value_bytes
even_index: init_even_index
cached_hashbits: max_cached_hashbits
shift: init_log_capicity
key_values: new_dense_array(key_bytes, value_bytes)
metas: unsafe { &u32(vcalloc(metasize)) }
2020-12-27 20:31:50 +03:00
extra_metas: extra_metas_inc
len: 0
has_string_keys: has_string_keys
hash_fn: hash_fn
key_eq_fn: key_eq_fn
clone_fn: clone_fn
free_fn: free_fn
}
}
2020-12-27 20:31:50 +03:00
fn new_map_init_2(hash_fn MapHashFn, key_eq_fn MapEqFn, clone_fn MapCloneFn, free_fn MapFreeFn, n int, key_bytes int, value_bytes int, keys voidptr, values voidptr) map {
mut out := new_map_2(key_bytes, value_bytes, hash_fn, key_eq_fn, clone_fn, free_fn)
// TODO pre-allocate n slots
2021-04-05 21:21:46 +03:00
mut pkey := &byte(keys)
mut pval := &byte(values)
2020-12-27 20:31:50 +03:00
for _ in 0 .. n {
unsafe {
out.set_1(pkey, pval)
2021-04-05 22:18:18 +03:00
pkey = pkey + key_bytes
pval = pval + value_bytes
2020-12-27 20:31:50 +03:00
}
}
return out
}
2021-02-12 03:02:33 +03:00
pub fn (mut m map) move() map {
r := *m
unsafe {
C.memset(m, 0, sizeof(map))
}
return r
}
[inline]
fn (m &map) key_to_index(pkey voidptr) (u32, u32) {
hash := m.hash_fn(pkey)
index := hash & m.even_index
2020-05-09 13:42:01 +03:00
meta := ((hash >> m.shift) & hash_mask) | probe_inc
return u32(index), u32(meta)
2020-03-19 08:52:34 +03:00
}
[inline]
fn (m &map) meta_less(_index u32, _metas u32) (u32, u32) {
2020-04-05 23:09:52 +03:00
mut index := _index
mut meta := _metas
for meta < unsafe { m.metas[index] } {
2020-03-19 08:52:34 +03:00
index += 2
meta += probe_inc
2020-02-20 22:04:06 +03:00
}
return index, meta
2020-03-19 08:52:34 +03:00
}
[inline]
2020-05-17 14:51:18 +03:00
fn (mut m map) meta_greater(_index u32, _metas u32, kvi u32) {
2020-04-05 23:09:52 +03:00
mut meta := _metas
mut index := _index
2020-03-19 08:52:34 +03:00
mut kv_index := kvi
for unsafe { m.metas[index] } != 0 {
if meta > unsafe { m.metas[index] } {
unsafe {
tmp_meta := m.metas[index]
m.metas[index] = meta
meta = tmp_meta
tmp_index := m.metas[index + 1]
m.metas[index + 1] = kv_index
kv_index = tmp_index
}
2020-01-24 22:13:59 +03:00
}
2020-03-19 08:52:34 +03:00
index += 2
meta += probe_inc
2020-02-20 22:04:06 +03:00
}
unsafe {
m.metas[index] = meta
m.metas[index + 1] = kv_index
}
2020-05-09 13:42:01 +03:00
probe_count := (meta >> hashbits) - 1
2020-05-26 18:59:52 +03:00
m.ensure_extra_metas(probe_count)
}
[inline]
fn (mut m map) ensure_extra_metas(probe_count u32) {
2020-05-09 13:42:01 +03:00
if (probe_count << 1) == m.extra_metas {
size_of_u32 := sizeof(u32)
old_mem_size := (m.even_index + 2 + m.extra_metas)
2020-03-19 08:52:34 +03:00
m.extra_metas += extra_metas_inc
mem_size := (m.even_index + 2 + m.extra_metas)
unsafe {
x := realloc_data(&byte(m.metas), int(size_of_u32 * old_mem_size), int(size_of_u32 * mem_size))
m.metas = &u32(x)
C.memset(m.metas + mem_size - extra_metas_inc, 0, int(sizeof(u32) * extra_metas_inc))
}
2020-03-19 08:52:34 +03:00
// Should almost never happen
if probe_count == 252 {
2020-03-21 15:55:07 +03:00
panic('Probe overflow')
2020-03-19 08:52:34 +03:00
}
2020-03-19 09:05:20 +03:00
}
2020-03-19 08:52:34 +03:00
}
// Insert new element to the map. The element is inserted if its key is
2020-06-25 00:31:19 +03:00
// not equivalent to the key of any other element already in the container.
// If the key already exists, its value is changed to the value of the new element.
2020-12-18 09:37:59 +03:00
fn (mut m map) set_1(key voidptr, value voidptr) {
load_factor := f32(m.len << 1) / f32(m.even_index)
2020-03-21 15:55:07 +03:00
if load_factor > max_load_factor {
2020-03-19 08:52:34 +03:00
m.expand()
2020-02-20 22:30:34 +03:00
}
2020-12-18 09:37:59 +03:00
mut index, mut meta := m.key_to_index(key)
index, meta = m.meta_less(index, meta)
2020-03-19 08:52:34 +03:00
// While we might have a match
for meta == unsafe { m.metas[index] } {
kv_index := int(unsafe { m.metas[index + 1] })
pkey := unsafe { m.key_values.key(kv_index) }
if m.key_eq_fn(key, pkey) {
unsafe {
2021-04-01 11:39:00 +03:00
pval := m.key_values.value(kv_index)
C.memcpy(pval, value, m.value_bytes)
}
2020-03-19 08:52:34 +03:00
return
}
2020-03-19 08:52:34 +03:00
index += 2
meta += probe_inc
}
kv_index := m.key_values.expand()
unsafe {
pkey := m.key_values.key(kv_index)
2021-04-01 11:39:00 +03:00
pvalue := m.key_values.value(kv_index)
m.clone_fn(pkey, key)
C.memcpy(&byte(pvalue), value, m.value_bytes)
}
m.meta_greater(index, meta, u32(kv_index))
2020-06-21 17:51:02 +03:00
m.len++
}
2020-03-19 08:52:34 +03:00
// Doubles the size of the hashmap
2020-05-17 14:51:18 +03:00
fn (mut m map) expand() {
old_cap := m.even_index
m.even_index = ((m.even_index + 2) << 1) - 2
2020-03-19 08:52:34 +03:00
// Check if any hashbits are left
2020-04-05 23:09:52 +03:00
if m.cached_hashbits == 0 {
m.shift += max_cached_hashbits
m.cached_hashbits = max_cached_hashbits
2020-03-21 15:55:07 +03:00
m.rehash()
} else {
2020-03-19 08:52:34 +03:00
m.cached_rehash(old_cap)
m.cached_hashbits--
2019-08-29 01:52:32 +03:00
}
}
// A rehash is the reconstruction of the hash table:
// All the elements in the container are rearranged according
// to their hash value into the newly sized key-value container.
2020-06-25 00:31:19 +03:00
// Rehashes are performed when the load_factor is going to surpass
// the max_load_factor in an operation.
2020-05-17 14:51:18 +03:00
fn (mut m map) rehash() {
meta_bytes := sizeof(u32) * (m.even_index + 2 + m.extra_metas)
unsafe {
// TODO: use realloc_data here too
x := v_realloc(&byte(m.metas), int(meta_bytes))
m.metas = &u32(x)
C.memset(m.metas, 0, meta_bytes)
}
for i := 0; i < m.key_values.len; i++ {
if !m.key_values.has_index(i) {
2020-03-19 08:52:34 +03:00
continue
}
pkey := unsafe { m.key_values.key(i) }
mut index, mut meta := m.key_to_index(pkey)
index, meta = m.meta_less(index, meta)
m.meta_greater(index, meta, u32(i))
}
2019-08-29 01:52:32 +03:00
}
2020-06-25 00:31:19 +03:00
// This method works like rehash. However, instead of rehashing the
// key completely, it uses the bits cached in `metas`.
2020-05-17 14:51:18 +03:00
fn (mut m map) cached_rehash(old_cap u32) {
2020-04-05 23:09:52 +03:00
old_metas := m.metas
metasize := int(sizeof(u32) * (m.even_index + 2 + m.extra_metas))
2021-04-05 21:21:46 +03:00
m.metas = unsafe { &u32(vcalloc(metasize)) }
2020-03-19 08:52:34 +03:00
old_extra_metas := m.extra_metas
2020-03-21 15:55:07 +03:00
for i := u32(0); i <= old_cap + old_extra_metas; i += 2 {
if unsafe { old_metas[i] } == 0 {
2020-03-19 08:52:34 +03:00
continue
}
old_meta := unsafe { old_metas[i] }
2020-05-09 13:42:01 +03:00
old_probe_count := ((old_meta >> hashbits) - 1) << 1
old_index := (i - old_probe_count) & (m.even_index >> 1)
mut index := (old_index | (old_meta << m.shift)) & m.even_index
2020-03-19 08:52:34 +03:00
mut meta := (old_meta & hash_mask) | probe_inc
index, meta = m.meta_less(index, meta)
kv_index := unsafe { old_metas[i + 1] }
2020-04-05 23:09:52 +03:00
m.meta_greater(index, meta, kv_index)
}
unsafe { free(old_metas) }
}
2020-06-25 00:31:19 +03:00
// This method is used for assignment operators. If the argument-key
// does not exist in the map, it's added to the map along with the zero/default value.
// If the key exists, its respective value is returned.
2020-12-18 09:37:59 +03:00
fn (mut m map) get_and_set_1(key voidptr, zero voidptr) voidptr {
2020-06-24 21:41:26 +03:00
for {
2020-12-18 09:37:59 +03:00
mut index, mut meta := m.key_to_index(key)
2020-06-24 21:41:26 +03:00
for {
if meta == unsafe { m.metas[index] } {
kv_index := int(unsafe { m.metas[index + 1] })
pkey := unsafe { m.key_values.key(kv_index) }
if m.key_eq_fn(key, pkey) {
2021-04-01 11:39:00 +03:00
pval := unsafe { m.key_values.value(kv_index) }
return unsafe { &byte(pval) }
2020-06-24 21:41:26 +03:00
}
}
index += 2
meta += probe_inc
if meta > unsafe { m.metas[index] } {
break
}
2020-06-24 21:41:26 +03:00
}
// Key not found, insert key with zero-value
2020-12-18 09:37:59 +03:00
m.set_1(key, zero)
2020-06-24 21:41:26 +03:00
}
assert false
return voidptr(0)
2020-06-24 21:41:26 +03:00
}
2020-06-25 00:31:19 +03:00
// If `key` matches the key of an element in the container,
2020-06-25 00:31:19 +03:00
// the method returns a reference to its mapped value.
// If not, a zero/default value is returned.
fn (m &map) get_1(key voidptr, zero voidptr) voidptr {
2020-12-18 09:37:59 +03:00
mut index, mut meta := m.key_to_index(key)
2020-06-25 00:31:19 +03:00
for {
if meta == unsafe { m.metas[index] } {
kv_index := int(unsafe { m.metas[index + 1] })
pkey := unsafe { m.key_values.key(kv_index) }
if m.key_eq_fn(key, pkey) {
2021-04-01 11:39:00 +03:00
pval := unsafe { m.key_values.value(kv_index) }
return unsafe { &byte(pval) }
2020-06-25 00:31:19 +03:00
}
}
index += 2
meta += probe_inc
if meta > unsafe { m.metas[index] } {
break
}
2020-06-25 00:31:19 +03:00
}
return zero
}
2020-06-24 21:41:26 +03:00
// If `key` matches the key of an element in the container,
// the method returns a reference to its mapped value.
// If not, a zero pointer is returned.
// This is used in `x := m['key'] or { ... }`
fn (m &map) get_1_check(key voidptr) voidptr {
mut index, mut meta := m.key_to_index(key)
for {
if meta == unsafe { m.metas[index] } {
kv_index := int(unsafe { m.metas[index + 1] })
pkey := unsafe { m.key_values.key(kv_index) }
if m.key_eq_fn(key, pkey) {
2021-04-01 11:39:00 +03:00
pval := unsafe { m.key_values.value(kv_index) }
return unsafe { &byte(pval) }
}
}
index += 2
meta += probe_inc
if meta > unsafe { m.metas[index] } {
break
}
}
return 0
}
2020-12-18 09:37:59 +03:00
// Checks whether a particular key exists in the map.
fn (m &map) exists_1(key voidptr) bool {
2020-12-18 09:37:59 +03:00
mut index, mut meta := m.key_to_index(key)
2020-05-09 13:42:01 +03:00
for {
if meta == unsafe { m.metas[index] } {
kv_index := int(unsafe { m.metas[index + 1] })
pkey := unsafe { m.key_values.key(kv_index) }
if m.key_eq_fn(key, pkey) {
return true
2020-05-09 13:42:01 +03:00
}
}
2020-03-19 08:52:34 +03:00
index += 2
meta += probe_inc
if meta > unsafe { m.metas[index] } {
break
}
}
2020-02-20 22:04:06 +03:00
return false
2019-12-30 08:57:56 +03:00
}
[inline]
fn (mut d DenseArray) delete(i int) {
if d.deletes == 0 {
d.all_deleted = vcalloc(d.cap) // sets to 0
}
d.deletes++
unsafe {
d.all_deleted[i] = 1
}
}
2020-12-18 09:37:59 +03:00
// Removes the mapping of a particular key from the map.
[unsafe]
2021-04-07 15:12:12 +03:00
pub fn (mut m map) delete(key voidptr) {
2020-12-18 09:37:59 +03:00
mut index, mut meta := m.key_to_index(key)
index, meta = m.meta_less(index, meta)
2020-03-19 08:52:34 +03:00
// Perform backwards shifting
for meta == unsafe { m.metas[index] } {
kv_index := int(unsafe { m.metas[index + 1] })
pkey := unsafe { m.key_values.key(kv_index) }
if m.key_eq_fn(key, pkey) {
for (unsafe { m.metas[index + 2] } >> hashbits) > 1 {
unsafe {
m.metas[index] = m.metas[index + 2] - probe_inc
m.metas[index + 1] = m.metas[index + 3]
}
2020-03-19 08:52:34 +03:00
index += 2
}
2020-06-21 17:51:02 +03:00
m.len--
m.key_values.delete(kv_index)
unsafe {
m.metas[index] = 0
m.free_fn(pkey)
// Mark key as deleted
2020-12-18 09:37:59 +03:00
C.memset(pkey, 0, m.key_bytes)
}
2020-06-21 17:51:02 +03:00
if m.key_values.len <= 32 {
2020-03-21 15:55:07 +03:00
return
}
2020-04-05 23:09:52 +03:00
// Clean up key_values if too many have been deleted
2020-06-21 17:51:02 +03:00
if m.key_values.deletes >= (m.key_values.len >> 1) {
2020-03-19 08:52:34 +03:00
m.key_values.zeros_to_end()
m.rehash()
}
return
}
index += 2
meta += probe_inc
}
}
2020-12-18 09:37:59 +03:00
// bootstrap
2020-12-27 20:31:50 +03:00
// delete this
2020-01-24 22:13:59 +03:00
pub fn (m &map) keys() []string {
mut keys := []string{len: m.len}
2021-04-05 21:21:46 +03:00
mut item := unsafe { &byte(keys.data) }
2020-12-18 09:37:59 +03:00
for i := 0; i < m.key_values.len; i++ {
if !m.key_values.has_index(i) {
continue
}
unsafe {
pkey := m.key_values.key(i)
m.clone_fn(item, pkey)
2021-04-05 22:18:18 +03:00
item = item + m.key_bytes
2020-12-18 09:37:59 +03:00
}
}
return keys
}
// Returns all keys in the map.
fn (m &map) keys_1() array {
2020-12-18 09:37:59 +03:00
mut keys := __new_array(m.len, 0, m.key_bytes)
2021-04-05 21:21:46 +03:00
mut item := unsafe { &byte(keys.data) }
if m.key_values.deletes == 0 {
for i := 0; i < m.key_values.len; i++ {
unsafe {
pkey := m.key_values.key(i)
m.clone_fn(item, pkey)
2021-04-05 22:18:18 +03:00
item = item + m.key_bytes
}
}
return keys
}
for i := 0; i < m.key_values.len; i++ {
if !m.key_values.has_index(i) {
2020-03-19 08:52:34 +03:00
continue
2020-02-20 22:04:06 +03:00
}
unsafe {
pkey := m.key_values.key(i)
m.clone_fn(item, pkey)
2021-04-05 22:18:18 +03:00
item = item + m.key_bytes
}
2020-02-20 22:04:06 +03:00
}
2020-01-24 22:13:59 +03:00
return keys
}
// warning: only copies keys, does not clone
2020-08-09 12:22:11 +03:00
[unsafe]
fn (d &DenseArray) clone() DenseArray {
res := DenseArray{
key_bytes: d.key_bytes
2020-05-26 02:52:06 +03:00
value_bytes: d.value_bytes
cap: d.cap
len: d.len
deletes: d.deletes
all_deleted: 0
2021-04-01 11:39:00 +03:00
values: 0
keys: 0
}
unsafe {
if d.deletes != 0 {
res.all_deleted = memdup(d.all_deleted, d.cap)
}
2021-04-01 11:39:00 +03:00
res.keys = memdup(d.keys, d.cap * d.key_bytes)
res.values = memdup(d.values, d.cap * d.value_bytes)
2020-05-26 02:52:06 +03:00
}
return res
}
// clone returns a clone of the `map`.
2020-08-09 12:22:11 +03:00
[unsafe]
2020-12-17 10:44:50 +03:00
pub fn (m &map) clone() map {
metasize := int(sizeof(u32) * (m.even_index + 2 + m.extra_metas))
2020-05-26 02:52:06 +03:00
res := map{
key_bytes: m.key_bytes
value_bytes: m.value_bytes
even_index: m.even_index
2020-05-26 02:52:06 +03:00
cached_hashbits: m.cached_hashbits
shift: m.shift
key_values: unsafe { m.key_values.clone() }
metas: unsafe { &u32(malloc(metasize)) }
extra_metas: m.extra_metas
len: m.len
has_string_keys: m.has_string_keys
hash_fn: m.hash_fn
key_eq_fn: m.key_eq_fn
clone_fn: m.clone_fn
free_fn: m.free_fn
}
unsafe { C.memcpy(res.metas, m.metas, metasize) }
if !m.has_string_keys {
return res
}
// clone keys
for i in 0 .. m.key_values.len {
if !m.key_values.has_index(i) {
continue
}
m.clone_fn(res.key_values.key(i), m.key_values.key(i))
}
2020-05-26 02:52:06 +03:00
return res
}
// free releases all memory resources occupied by the `map`.
2020-08-09 12:22:11 +03:00
[unsafe]
pub fn (m &map) free() {
unsafe { free(m.metas) }
if m.key_values.deletes == 0 {
for i := 0; i < m.key_values.len; i++ {
unsafe {
2020-12-18 09:37:59 +03:00
pkey := m.key_values.key(i)
m.free_fn(pkey)
}
2020-04-06 00:31:53 +03:00
}
} else {
for i := 0; i < m.key_values.len; i++ {
if !m.key_values.has_index(i) {
continue
}
unsafe {
2020-12-18 09:37:59 +03:00
pkey := m.key_values.key(i)
m.free_fn(pkey)
}
}
unsafe { free(m.key_values.all_deleted) }
}
2021-04-01 11:39:00 +03:00
unsafe {
free(m.key_values.keys)
free(m.key_values.values)
}
2019-07-23 23:57:06 +03:00
}