diff --git a/vlib/v/gen/c/cheaders.v b/vlib/v/gen/c/cheaders.v index 462ae00572..1a6bd04944 100644 --- a/vlib/v/gen/c/cheaders.v +++ b/vlib/v/gen/c/cheaders.v @@ -63,15 +63,15 @@ const c_common_macros = ' #define __V_architecture 0 #if defined(__x86_64__) -#define __V_amd64 1 -#undef __V_architecture -#define __V_architecture 1 + #define __V_amd64 1 + #undef __V_architecture + #define __V_architecture 1 #endif #if defined(__aarch64__) || defined(__arm64__) -#define __V_arm64 1 -#undef __V_architecture -#define __V_architecture 2 + #define __V_arm64 1 + #undef __V_architecture + #define __V_architecture 2 #endif // Using just __GNUC__ for detecting gcc, is not reliable because other compilers define it too: @@ -198,12 +198,10 @@ static inline bool _us64_lt(uint64_t a, int64_t b) { return a < INT64_MAX && (in ' const c_helper_macros = '//============================== HELPER C MACROS =============================*/ -//#define tos4(s, slen) ((string){.str=(s), .len=(slen)}) // _SLIT0 is used as NULL string for literal arguments // `"" s` is used to enforce a string literal argument #define _SLIT0 (string){.len=0} #define _SLIT(s) ((string){.str=(byteptr)("" s), .len=(sizeof(s)-1), .is_lit=1}) -//#define _SLIT(s) ((string){.str=(byteptr)("" s), .len=(sizeof(s)-1), .is_lit=1}) // take the address of an rvalue #define ADDR(type, expr) (&((type[]){expr}[0])) // copy something to the heap @@ -476,26 +474,26 @@ const c_wyhash_headers = ' #define wyhash_final_version_3 #ifndef WYHASH_CONDOM -//protections that produce different results: -//1: normal valid behavior -//2: extra protection against entropy loss (probability=2^-63), aka. "blind multiplication" +// protections that produce different results: +// 1: normal valid behavior +// 2: extra protection against entropy loss (probability=2^-63), aka. "blind multiplication" #define WYHASH_CONDOM 1 #endif #ifndef WYHASH_32BIT_MUM -//0: normal version, slow on 32 bit systems -//1: faster on 32 bit systems but produces different results, incompatible with wy2u0k function +// 0: normal version, slow on 32 bit systems +// 1: faster on 32 bit systems but produces different results, incompatible with wy2u0k function #define WYHASH_32BIT_MUM 0 #endif -//includes +// includes #include #if defined(_MSC_VER) && defined(_M_X64) #include #pragma intrinsic(_umul128) #endif -//128bit multiply function +// 128bit multiply function static inline uint64_t _wyrot(uint64_t x) { return (x>>32)|(x<<32); } static inline void _wymum(uint64_t *A, uint64_t *B){ #if(WYHASH_32BIT_MUM) @@ -532,10 +530,10 @@ static inline void _wymum(uint64_t *A, uint64_t *B){ #endif } -//multiply and xor mix function, aka MUM +// multiply and xor mix function, aka MUM static inline uint64_t _wymix(uint64_t A, uint64_t B){ _wymum(&A,&B); return A^B; } -//endian macros +// endian macros #ifndef WYHASH_LITTLE_ENDIAN #ifdef TARGET_ORDER_IS_LITTLE #define WYHASH_LITTLE_ENDIAN 1 @@ -544,69 +542,68 @@ static inline uint64_t _wymix(uint64_t A, uint64_t B){ _wymum(&A,&B); return A^B #endif #endif -//read functions +// read functions #if (WYHASH_LITTLE_ENDIAN) -static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v;} -static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v;} + static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v;} + static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v;} #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) -static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return __builtin_bswap64(v);} -static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return __builtin_bswap32(v);} + static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return __builtin_bswap64(v);} + static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return __builtin_bswap32(v);} #elif defined(_MSC_VER) -static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return _byteswap_uint64(v);} -static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return _byteswap_ulong(v);} + static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return _byteswap_uint64(v);} + static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return _byteswap_ulong(v);} #else -static inline uint64_t _wyr8(const uint8_t *p) { - uint64_t v; memcpy(&v, p, 8); - return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000)); -} -static inline uint64_t _wyr4(const uint8_t *p) { - uint32_t v; memcpy(&v, p, 4); - return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000)); -} + static inline uint64_t _wyr8(const uint8_t *p) { + uint64_t v; memcpy(&v, p, 8); + return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000)); + } + static inline uint64_t _wyr4(const uint8_t *p) { + uint32_t v; memcpy(&v, p, 4); + return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000)); + } #endif static inline uint64_t _wyr3(const uint8_t *p, size_t k) { return (((uint64_t)p[0])<<16)|(((uint64_t)p[k>>1])<<8)|p[k-1];} -//wyhash main function +// wyhash main function static inline uint64_t wyhash(const void *key, size_t len, uint64_t seed, const uint64_t *secret){ - const uint8_t *p=(const uint8_t *)key; seed^=*secret; uint64_t a, b; - if(_likely_(len<=16)){ - if(_likely_(len>=4)){ a=(_wyr4(p)<<32)|_wyr4(p+((len>>3)<<2)); b=(_wyr4(p+len-4)<<32)|_wyr4(p+len-4-((len>>3)<<2)); } - else if(_likely_(len>0)){ a=_wyr3(p,len); b=0;} + const uint8_t *p=(const uint8_t *)key; seed^=*secret; uint64_t a, b; + if (_likely_(len<=16)) { + if (_likely_(len>=4)) { a=(_wyr4(p)<<32)|_wyr4(p+((len>>3)<<2)); b=(_wyr4(p+len-4)<<32)|_wyr4(p+len-4-((len>>3)<<2)); } + else if (_likely_(len>0)) { a=_wyr3(p,len); b=0; } else a=b=0; - } - else{ + } else { size_t i=len; - if(_unlikely_(i>48)){ + if (_unlikely_(i>48)) { uint64_t see1=seed, see2=seed; - do{ + do { seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed); see1=_wymix(_wyr8(p+16)^secret[2],_wyr8(p+24)^see1); see2=_wymix(_wyr8(p+32)^secret[3],_wyr8(p+40)^see2); p+=48; i-=48; - }while(_likely_(i>48)); + } while(_likely_(i>48)); seed^=see1^see2; } - while(_unlikely_(i>16)){ seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed); i-=16; p+=16; } + while(_unlikely_(i>16)) { seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed); i-=16; p+=16; } a=_wyr8(p+i-16); b=_wyr8(p+i-8); } return _wymix(secret[1]^len,_wymix(a^secret[1],b^seed)); } -//the default secret parameters +// the default secret parameters static const uint64_t _wyp[4] = {0xa0761d6478bd642full, 0xe7037ed1a0b428dbull, 0x8ebc6af09c88c6e3ull, 0x589965cc75374cc3ull}; -//a useful 64bit-64bit mix function to produce deterministic pseudo random numbers that can pass BigCrush and PractRand +// a useful 64bit-64bit mix function to produce deterministic pseudo random numbers that can pass BigCrush and PractRand static inline uint64_t wyhash64(uint64_t A, uint64_t B){ A^=0xa0761d6478bd642full; B^=0xe7037ed1a0b428dbull; _wymum(&A,&B); return _wymix(A^0xa0761d6478bd642full,B^0xe7037ed1a0b428dbull);} -//The wyrand PRNG that pass BigCrush and PractRand +// the wyrand PRNG that pass BigCrush and PractRand static inline uint64_t wyrand(uint64_t *seed){ *seed+=0xa0761d6478bd642full; return _wymix(*seed,*seed^0xe7037ed1a0b428dbull);} -//convert any 64 bit pseudo random numbers to uniform distribution [0,1). It can be combined with wyrand, wyhash64 or wyhash. +// convert any 64 bit pseudo random numbers to uniform distribution [0,1). It can be combined with wyrand, wyhash64 or wyhash. static inline double wy2u01(uint64_t r){ const double _wynorm=1.0/(1ull<<52); return (r>>12)*_wynorm;} -//convert any 64 bit pseudo random numbers to APPROXIMATE Gaussian distribution. It can be combined with wyrand, wyhash64 or wyhash. +// convert any 64 bit pseudo random numbers to APPROXIMATE Gaussian distribution. It can be combined with wyrand, wyhash64 or wyhash. static inline double wy2gau(uint64_t r){ const double _wynorm=1.0/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+((r>>42)&0x1fffff))*_wynorm-3.0;} #if(!WYHASH_32BIT_MUM) -//fast range integer random number generation on [0,k) credit to Daniel Lemire. May not work when WYHASH_32BIT_MUM=1. It can be combined with wyrand, wyhash64 or wyhash. +// fast range integer random number generation on [0,k) credit to Daniel Lemire. May not work when WYHASH_32BIT_MUM=1. It can be combined with wyrand, wyhash64 or wyhash. static inline uint64_t wy2u0k(uint64_t r, uint64_t k){ _wymum(&r,&k); return k; } #endif #endif