From 55ef849cd2abc9a7f208867c995adebfcc1a7ff5 Mon Sep 17 00:00:00 2001 From: Delyan Angelov Date: Sat, 5 Jun 2021 17:29:23 +0300 Subject: [PATCH] v.markused: trim unused map fns for programs that do not use them --- vlib/v/ast/table.v | 1 + vlib/v/gen/c/cgen.v | 9 +- vlib/v/gen/c/cheaders.v | 350 +++++++++++++++++++------------------ vlib/v/gen/c/str.v | 4 - vlib/v/markused/markused.v | 16 +- vlib/v/markused/walker.v | 14 +- 6 files changed, 208 insertions(+), 186 deletions(-) diff --git a/vlib/v/ast/table.v b/vlib/v/ast/table.v index 9ed4023526..f8da55a370 100644 --- a/vlib/v/ast/table.v +++ b/vlib/v/ast/table.v @@ -25,6 +25,7 @@ pub mut: used_fns map[string]bool // filled in by the checker, when pref.skip_unused = true; used_consts map[string]bool // filled in by the checker, when pref.skip_unused = true; used_vweb_types []Type // vweb context types, filled in by checker, when pref.skip_unused = true; + used_maps int // how many times maps were used, filled in by checker, when pref.skip_unused = true; panic_handler FnPanicHandler = default_table_panic_handler panic_userdata voidptr = voidptr(0) // can be used to pass arbitrary data to panic_handler; panic_npanics int diff --git a/vlib/v/gen/c/cgen.v b/vlib/v/gen/c/cgen.v index eb5566719d..e4b95b87b0 100644 --- a/vlib/v/gen/c/cgen.v +++ b/vlib/v/gen/c/cgen.v @@ -405,10 +405,13 @@ pub fn (mut g Gen) init() { g.cheaders.writeln(get_guarded_include_text('', 'The C compiler can not find . Please install build-essentials')) // int64_t etc g.cheaders.writeln(c_builtin_types) if g.pref.is_bare { - g.cheaders.writeln(bare_c_headers) + g.cheaders.writeln(c_bare_headers) } else { g.cheaders.writeln(c_headers) } + if !g.pref.skip_unused || g.table.used_maps > 0 { + g.cheaders.writeln(c_wyhash_headers) + } } if g.pref.os == .ios { g.cheaders.writeln('#define __TARGET_IOS__ 1') @@ -417,10 +420,6 @@ pub fn (mut g Gen) init() { g.write_builtin_types() g.write_typedef_types() g.write_typeof_functions() - if g.pref.build_mode != .build_module { - // _STR functions should not be defined in builtin.o - g.write_str_fn_definitions() - } g.write_sorted_types() g.write_multi_return_types() g.definitions.writeln('// end of definitions #endif') diff --git a/vlib/v/gen/c/cheaders.v b/vlib/v/gen/c/cheaders.v index 7e283fe2ab..bc7a19a8b4 100644 --- a/vlib/v/gen/c/cheaders.v +++ b/vlib/v/gen/c/cheaders.v @@ -3,20 +3,22 @@ module c // NB: @@@ here serve as placeholders. // They will be replaced with correct strings // for each constant, during C code generation. -const ( - // V_COMMIT_HASH is generated by cmd/tools/gen_vc.v . - c_commit_hash_default = ' + +// V_COMMIT_HASH is generated by cmd/tools/gen_vc.v . +const c_commit_hash_default = ' #ifndef V_COMMIT_HASH #define V_COMMIT_HASH "@@@" #endif ' - // V_CURRENT_COMMIT_HASH is updated, when V is rebuilt inside a git repo. - c_current_commit_hash_default = ' + +// V_CURRENT_COMMIT_HASH is updated, when V is rebuilt inside a git repo. +const c_current_commit_hash_default = ' #ifndef V_CURRENT_COMMIT_HASH #define V_CURRENT_COMMIT_HASH "@@@" #endif ' - c_concurrency_helpers = ' + +const c_concurrency_helpers = ' typedef struct __shared_map __shared_map; struct __shared_map { map val; sync__RwMutex mtx; }; static inline voidptr __dup_shared_map(voidptr src, int sz) { @@ -31,8 +33,7 @@ static inline voidptr __dup_shared_array(voidptr src, int sz) { sync__RwMutex_init(&dest->mtx); return dest; } -static inline void __sort_ptr(uintptr_t a[], bool b[], int l) -{ +static inline void __sort_ptr(uintptr_t a[], bool b[], int l) { for (int i=1; i INT32_MAX || (int32_t)a > b; } static inline bool _us32_ge(uint32_t a, int32_t b) { return a >= INT32_MAX || (int32_t)a >= b; } @@ -180,157 +189,8 @@ static inline bool _us64_ne(uint64_t a, int64_t b) { return a > INT64_MAX || (in static inline bool _us64_le(uint64_t a, int64_t b) { return a <= INT64_MAX && (int64_t)a <= b; } static inline bool _us64_lt(uint64_t a, int64_t b) { return a < INT64_MAX && (int64_t)a < b; } ' - c_wyhash = ' -// ============== wyhash ============== -#ifndef wyhash_final_version_3 -#define wyhash_final_version_3 -#ifndef WYHASH_CONDOM -//protections that produce different results: -//1: normal valid behavior -//2: extra protection against entropy loss (probability=2^-63), aka. "blind multiplication" -#define WYHASH_CONDOM 1 -#endif - -#ifndef WYHASH_32BIT_MUM -//0: normal version, slow on 32 bit systems -//1: faster on 32 bit systems but produces different results, incompatible with wy2u0k function -#define WYHASH_32BIT_MUM 0 -#endif - -//includes -#include -#if defined(_MSC_VER) && defined(_M_X64) - #include - #pragma intrinsic(_umul128) -#endif - -//likely and unlikely macros -#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) - #define _likely_(x) __builtin_expect(x,1) - #define _unlikely_(x) __builtin_expect(x,0) -#else - #define _likely_(x) (x) - #define _unlikely_(x) (x) -#endif - -//128bit multiply function -static inline uint64_t _wyrot(uint64_t x) { return (x>>32)|(x<<32); } -static inline void _wymum(uint64_t *A, uint64_t *B){ -#if(WYHASH_32BIT_MUM) - uint64_t hh=(*A>>32)*(*B>>32), hl=(*A>>32)*(uint32_t)*B, lh=(uint32_t)*A*(*B>>32), ll=(uint64_t)(uint32_t)*A*(uint32_t)*B; - #if(WYHASH_CONDOM>1) - *A^=_wyrot(hl)^hh; *B^=_wyrot(lh)^ll; - #else - *A=_wyrot(hl)^hh; *B=_wyrot(lh)^ll; - #endif -#elif defined(__SIZEOF_INT128__) - __uint128_t r=*A; r*=*B; - #if(WYHASH_CONDOM>1) - *A^=(uint64_t)r; *B^=(uint64_t)(r>>64); - #else - *A=(uint64_t)r; *B=(uint64_t)(r>>64); - #endif -#elif defined(_MSC_VER) && defined(_M_X64) - #if(WYHASH_CONDOM>1) - uint64_t a, b; - a=_umul128(*A,*B,&b); - *A^=a; *B^=b; - #else - *A=_umul128(*A,*B,B); - #endif -#else - uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B, hi, lo; - uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t>32)+(rm1>>32)+c; - #if(WYHASH_CONDOM>1) - *A^=lo; *B^=hi; - #else - *A=lo; *B=hi; - #endif -#endif -} - -//multiply and xor mix function, aka MUM -static inline uint64_t _wymix(uint64_t A, uint64_t B){ _wymum(&A,&B); return A^B; } - -//endian macros -#ifndef WYHASH_LITTLE_ENDIAN - #ifdef TARGET_ORDER_IS_LITTLE - #define WYHASH_LITTLE_ENDIAN 1 - #else - #define WYHASH_LITTLE_ENDIAN 0 - #endif -#endif - -//read functions -#if (WYHASH_LITTLE_ENDIAN) -static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v;} -static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v;} -#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) -static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return __builtin_bswap64(v);} -static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return __builtin_bswap32(v);} -#elif defined(_MSC_VER) -static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return _byteswap_uint64(v);} -static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return _byteswap_ulong(v);} -#else -static inline uint64_t _wyr8(const uint8_t *p) { - uint64_t v; memcpy(&v, p, 8); - return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000)); -} -static inline uint64_t _wyr4(const uint8_t *p) { - uint32_t v; memcpy(&v, p, 4); - return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000)); -} -#endif -static inline uint64_t _wyr3(const uint8_t *p, size_t k) { return (((uint64_t)p[0])<<16)|(((uint64_t)p[k>>1])<<8)|p[k-1];} -//wyhash main function -static inline uint64_t wyhash(const void *key, size_t len, uint64_t seed, const uint64_t *secret){ - const uint8_t *p=(const uint8_t *)key; seed^=*secret; uint64_t a, b; - if(_likely_(len<=16)){ - if(_likely_(len>=4)){ a=(_wyr4(p)<<32)|_wyr4(p+((len>>3)<<2)); b=(_wyr4(p+len-4)<<32)|_wyr4(p+len-4-((len>>3)<<2)); } - else if(_likely_(len>0)){ a=_wyr3(p,len); b=0;} - else a=b=0; - } - else{ - size_t i=len; - if(_unlikely_(i>48)){ - uint64_t see1=seed, see2=seed; - do{ - seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed); - see1=_wymix(_wyr8(p+16)^secret[2],_wyr8(p+24)^see1); - see2=_wymix(_wyr8(p+32)^secret[3],_wyr8(p+40)^see2); - p+=48; i-=48; - }while(_likely_(i>48)); - seed^=see1^see2; - } - while(_unlikely_(i>16)){ seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed); i-=16; p+=16; } - a=_wyr8(p+i-16); b=_wyr8(p+i-8); - } - return _wymix(secret[1]^len,_wymix(a^secret[1],b^seed)); -} -//the default secret parameters -static const uint64_t _wyp[4] = {0xa0761d6478bd642full, 0xe7037ed1a0b428dbull, 0x8ebc6af09c88c6e3ull, 0x589965cc75374cc3ull}; - -//a useful 64bit-64bit mix function to produce deterministic pseudo random numbers that can pass BigCrush and PractRand -static inline uint64_t wyhash64(uint64_t A, uint64_t B){ A^=0xa0761d6478bd642full; B^=0xe7037ed1a0b428dbull; _wymum(&A,&B); return _wymix(A^0xa0761d6478bd642full,B^0xe7037ed1a0b428dbull);} - -//The wyrand PRNG that pass BigCrush and PractRand -static inline uint64_t wyrand(uint64_t *seed){ *seed+=0xa0761d6478bd642full; return _wymix(*seed,*seed^0xe7037ed1a0b428dbull);} - -//convert any 64 bit pseudo random numbers to uniform distribution [0,1). It can be combined with wyrand, wyhash64 or wyhash. -static inline double wy2u01(uint64_t r){ const double _wynorm=1.0/(1ull<<52); return (r>>12)*_wynorm;} - -//convert any 64 bit pseudo random numbers to APPROXIMATE Gaussian distribution. It can be combined with wyrand, wyhash64 or wyhash. -static inline double wy2gau(uint64_t r){ const double _wynorm=1.0/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+((r>>42)&0x1fffff))*_wynorm-3.0;} - -#if(!WYHASH_32BIT_MUM) -//fast range integer random number generation on [0,k) credit to Daniel Lemire. May not work when WYHASH_32BIT_MUM=1. It can be combined with wyrand, wyhash64 or wyhash. -static inline uint64_t wy2u0k(uint64_t r, uint64_t k){ _wymum(&r,&k); return k; } -#endif -#endif -' - c_helper_macros = '//============================== HELPER C MACROS =============================*/ +const c_helper_macros = '//============================== HELPER C MACROS =============================*/ //#define tos4(s, slen) ((string){.str=(s), .len=(slen)}) // _SLIT0 is used as NULL string for literal arguments // `"" s` is used to enforce a string literal argument @@ -342,10 +202,10 @@ static inline uint64_t wy2u0k(uint64_t r, uint64_t k){ _wymum(&r,&k); return k; // copy something to the heap #define HEAP(type, expr) ((type*)memdup((void*)&((type[]){expr}[0]), sizeof(type))) #define _PUSH_MANY(arr, val, tmp, tmp_typ) {tmp_typ tmp = (val); array_push_many(arr, tmp.data, tmp.len);} -#define _IN_MAP(val, m) map_exists(m, val) ' - c_headers = c_helper_macros + c_unsigned_comparison_functions + c_common_macros + - r' + +const c_headers = c_helper_macros + c_unsigned_comparison_functions + c_common_macros + + r' // c_headers typedef int (*qsort_callback_func)(const void*, const void*); #include // TODO remove all these includes, define all function signatures and types manually @@ -411,7 +271,6 @@ static voidptr memfreedup(voidptr ptr, voidptr src, int sz) { return memdup(src, sz); } - #if INTPTR_MAX == INT32_MAX #define TARGET_IS_32BIT 1 #elif INTPTR_MAX == INT64_MAX @@ -537,9 +396,9 @@ static void* g_live_info = NULL; #ifdef _VFREESTANDING #undef _VFREESTANDING #endif -' + - c_wyhash - c_builtin_types = ' +' + +const c_builtin_types = ' //================================== builtin types ================================*/ typedef int64_t i64; typedef int16_t i16; @@ -573,9 +432,9 @@ typedef bool (*MapEqFn)(voidptr, voidptr); typedef void (*MapCloneFn)(voidptr, voidptr); typedef void (*MapFreeFn)(voidptr); ' - bare_c_headers = c_helper_macros + c_unsigned_comparison_functions + c_common_macros + - ' +const c_bare_headers = c_helper_macros + c_unsigned_comparison_functions + c_common_macros + + ' #define _VFREESTANDING typedef long unsigned int size_t; @@ -612,6 +471,149 @@ static voidptr memfreedup(voidptr ptr, voidptr src, int sz) { return memdup(src, sz); } -' + - c_wyhash -) +' + +const c_wyhash_headers = ' +// ============== wyhash ============== +#ifndef wyhash_final_version_3 +#define wyhash_final_version_3 + +#ifndef WYHASH_CONDOM +//protections that produce different results: +//1: normal valid behavior +//2: extra protection against entropy loss (probability=2^-63), aka. "blind multiplication" +#define WYHASH_CONDOM 1 +#endif + +#ifndef WYHASH_32BIT_MUM +//0: normal version, slow on 32 bit systems +//1: faster on 32 bit systems but produces different results, incompatible with wy2u0k function +#define WYHASH_32BIT_MUM 0 +#endif + +//includes +#include +#if defined(_MSC_VER) && defined(_M_X64) + #include + #pragma intrinsic(_umul128) +#endif + +//128bit multiply function +static inline uint64_t _wyrot(uint64_t x) { return (x>>32)|(x<<32); } +static inline void _wymum(uint64_t *A, uint64_t *B){ +#if(WYHASH_32BIT_MUM) + uint64_t hh=(*A>>32)*(*B>>32), hl=(*A>>32)*(uint32_t)*B, lh=(uint32_t)*A*(*B>>32), ll=(uint64_t)(uint32_t)*A*(uint32_t)*B; + #if(WYHASH_CONDOM>1) + *A^=_wyrot(hl)^hh; *B^=_wyrot(lh)^ll; + #else + *A=_wyrot(hl)^hh; *B=_wyrot(lh)^ll; + #endif +#elif defined(__SIZEOF_INT128__) + __uint128_t r=*A; r*=*B; + #if(WYHASH_CONDOM>1) + *A^=(uint64_t)r; *B^=(uint64_t)(r>>64); + #else + *A=(uint64_t)r; *B=(uint64_t)(r>>64); + #endif +#elif defined(_MSC_VER) && defined(_M_X64) + #if(WYHASH_CONDOM>1) + uint64_t a, b; + a=_umul128(*A,*B,&b); + *A^=a; *B^=b; + #else + *A=_umul128(*A,*B,B); + #endif +#else + uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B, hi, lo; + uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t>32)+(rm1>>32)+c; + #if(WYHASH_CONDOM>1) + *A^=lo; *B^=hi; + #else + *A=lo; *B=hi; + #endif +#endif +} + +//multiply and xor mix function, aka MUM +static inline uint64_t _wymix(uint64_t A, uint64_t B){ _wymum(&A,&B); return A^B; } + +//endian macros +#ifndef WYHASH_LITTLE_ENDIAN + #ifdef TARGET_ORDER_IS_LITTLE + #define WYHASH_LITTLE_ENDIAN 1 + #else + #define WYHASH_LITTLE_ENDIAN 0 + #endif +#endif + +//read functions +#if (WYHASH_LITTLE_ENDIAN) +static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v;} +static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v;} +#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) +static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return __builtin_bswap64(v);} +static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return __builtin_bswap32(v);} +#elif defined(_MSC_VER) +static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return _byteswap_uint64(v);} +static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return _byteswap_ulong(v);} +#else +static inline uint64_t _wyr8(const uint8_t *p) { + uint64_t v; memcpy(&v, p, 8); + return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000)); +} +static inline uint64_t _wyr4(const uint8_t *p) { + uint32_t v; memcpy(&v, p, 4); + return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000)); +} +#endif +static inline uint64_t _wyr3(const uint8_t *p, size_t k) { return (((uint64_t)p[0])<<16)|(((uint64_t)p[k>>1])<<8)|p[k-1];} +//wyhash main function +static inline uint64_t wyhash(const void *key, size_t len, uint64_t seed, const uint64_t *secret){ + const uint8_t *p=(const uint8_t *)key; seed^=*secret; uint64_t a, b; + if(_likely_(len<=16)){ + if(_likely_(len>=4)){ a=(_wyr4(p)<<32)|_wyr4(p+((len>>3)<<2)); b=(_wyr4(p+len-4)<<32)|_wyr4(p+len-4-((len>>3)<<2)); } + else if(_likely_(len>0)){ a=_wyr3(p,len); b=0;} + else a=b=0; + } + else{ + size_t i=len; + if(_unlikely_(i>48)){ + uint64_t see1=seed, see2=seed; + do{ + seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed); + see1=_wymix(_wyr8(p+16)^secret[2],_wyr8(p+24)^see1); + see2=_wymix(_wyr8(p+32)^secret[3],_wyr8(p+40)^see2); + p+=48; i-=48; + }while(_likely_(i>48)); + seed^=see1^see2; + } + while(_unlikely_(i>16)){ seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed); i-=16; p+=16; } + a=_wyr8(p+i-16); b=_wyr8(p+i-8); + } + return _wymix(secret[1]^len,_wymix(a^secret[1],b^seed)); +} +//the default secret parameters +static const uint64_t _wyp[4] = {0xa0761d6478bd642full, 0xe7037ed1a0b428dbull, 0x8ebc6af09c88c6e3ull, 0x589965cc75374cc3ull}; + +//a useful 64bit-64bit mix function to produce deterministic pseudo random numbers that can pass BigCrush and PractRand +static inline uint64_t wyhash64(uint64_t A, uint64_t B){ A^=0xa0761d6478bd642full; B^=0xe7037ed1a0b428dbull; _wymum(&A,&B); return _wymix(A^0xa0761d6478bd642full,B^0xe7037ed1a0b428dbull);} + +//The wyrand PRNG that pass BigCrush and PractRand +static inline uint64_t wyrand(uint64_t *seed){ *seed+=0xa0761d6478bd642full; return _wymix(*seed,*seed^0xe7037ed1a0b428dbull);} + +//convert any 64 bit pseudo random numbers to uniform distribution [0,1). It can be combined with wyrand, wyhash64 or wyhash. +static inline double wy2u01(uint64_t r){ const double _wynorm=1.0/(1ull<<52); return (r>>12)*_wynorm;} + +//convert any 64 bit pseudo random numbers to APPROXIMATE Gaussian distribution. It can be combined with wyrand, wyhash64 or wyhash. +static inline double wy2gau(uint64_t r){ const double _wynorm=1.0/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+((r>>42)&0x1fffff))*_wynorm-3.0;} + +#if(!WYHASH_32BIT_MUM) +//fast range integer random number generation on [0,k) credit to Daniel Lemire. May not work when WYHASH_32BIT_MUM=1. It can be combined with wyrand, wyhash64 or wyhash. +static inline uint64_t wy2u0k(uint64_t r, uint64_t k){ _wymum(&r,&k); return k; } +#endif +#endif + +#define _IN_MAP(val, m) map_exists(m, val) + +' diff --git a/vlib/v/gen/c/str.v b/vlib/v/gen/c/str.v index 6fa3613ab1..7154858e52 100644 --- a/vlib/v/gen/c/str.v +++ b/vlib/v/gen/c/str.v @@ -5,10 +5,6 @@ module c import v.ast import v.util -fn (mut g Gen) write_str_fn_definitions() { - g.writeln(c_str_fn_defs) -} - fn (mut g Gen) string_literal(node ast.StringLiteral) { if node.is_raw { escaped_val := util.smart_quote(node.val, true) diff --git a/vlib/v/markused/markused.v b/vlib/v/markused/markused.v index 7d75135889..12cdd6ef4f 100644 --- a/vlib/v/markused/markused.v +++ b/vlib/v/markused/markused.v @@ -156,6 +156,7 @@ pub fn mark_used(mut table ast.Table, pref &pref.Preferences, ast_files []&ast.F all_fn_root_names << k continue } + // sync: if k == 'sync.new_channel_st' { all_fn_root_names << k @@ -271,7 +272,7 @@ pub fn mark_used(mut table ast.Table, pref &pref.Preferences, ast_files []&ast.F if walker.n_asserts > 0 { walker.fn_decl(mut all_fns['__print_assert_failure']) } - if walker.n_maps > 0 { + if table.used_maps > 0 { for k, mut mfn in all_fns { mut method_receiver_typename := '' if mfn.is_method { @@ -283,6 +284,19 @@ pub fn mark_used(mut table ast.Table, pref &pref.Preferences, ast_files []&ast.F walker.fn_decl(mut mfn) } } + } else { + for map_fn_name in ['new_map', 'new_map_init', 'map_hash_string', 'new_dense_array'] { + walker.used_fns.delete(map_fn_name) + } + for k, mut mfn in all_fns { + if !mfn.is_method { + continue + } + method_receiver_typename := table.type_to_str(mfn.receiver.typ) + if method_receiver_typename in ['&map', '&mapnode', '&SortedMap', '&DenseArray'] { + walker.used_fns.delete(k) + } + } } $if trace_skip_unused_fn_names ? { diff --git a/vlib/v/markused/walker.v b/vlib/v/markused/walker.v index a42ca7ba6d..d78de8a790 100644 --- a/vlib/v/markused/walker.v +++ b/vlib/v/markused/walker.v @@ -11,7 +11,6 @@ pub mut: table &ast.Table used_fns map[string]bool // used_fns['println'] == true used_consts map[string]bool // used_consts['os.args'] == true - n_maps int n_asserts int mut: files []&ast.File @@ -95,6 +94,9 @@ pub fn (mut w Walker) stmt(node ast.Stmt) { w.expr(node.cond) w.expr(node.high) w.stmts(node.stmts) + if node.kind == .map { + w.table.used_maps++ + } } ast.ForStmt { w.expr(node.cond) @@ -216,6 +218,10 @@ fn (mut w Walker) expr(node ast.Expr) { w.expr(node.left) w.expr(node.index) w.or_block(node.or_expr) + sym := w.table.get_final_type_symbol(node.left_type) + if sym.kind == .map { + w.table.used_maps++ + } } ast.InfixExpr { w.expr(node.left) @@ -230,6 +236,10 @@ fn (mut w Walker) expr(node ast.Expr) { w.fn_decl(mut &ast.FnDecl(opmethod.source_fn)) } } + right_sym := w.table.get_type_symbol(node.right_type) + if node.op in [.not_in, .key_in] && right_sym.kind == .map { + w.table.used_maps++ + } } ast.IfGuardExpr { w.expr(node.expr) @@ -261,7 +271,7 @@ fn (mut w Walker) expr(node ast.Expr) { ast.MapInit { w.exprs(node.keys) w.exprs(node.vals) - w.n_maps++ + w.table.used_maps++ } ast.MatchExpr { w.expr(node.cond)