1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00

crypto.[sha256/sha512]: remove extraneous casts & use rotations from math.bits

This commit is contained in:
joe-conigliaro 2019-09-26 23:38:12 +10:00 committed by Alexander Medvednikov
parent c069525e8c
commit 47da52b85b
4 changed files with 14 additions and 12 deletions

View File

@ -74,7 +74,7 @@ fn (d mut Digest) reset() {
d.h[7] = u32(Init7_224)
}
d.nx = 0
d.len = u64(0)
d.len = 0
}
// new returns a new Digest (implementing hash.Hash) computing the SHA256 checksum.

View File

@ -99,13 +99,13 @@ fn block_generic(dig mut Digest, p_ []byte) {
// rounds below if needed for speed.
for i := 0; i < 16; i++ {
j := i * 4
w[i] = u32(u32(p[j])<<u32(24)) | u32(u32(p[j+1])<<u32(16)) | u32(u32(p[j+2])<<u32(8)) | u32(p[j+3])
w[i] = u32(p[j]<<24) | u32(p[j+1]<<16) | u32(p[j+2]<<8) | u32(p[j+3])
}
for i := 16; i < 64; i++ {
v1 := w[i-2]
t1 := (bits.rotate_left_32(v1, -17)) ^ (bits.rotate_left_32(v1, -19)) ^ u32((v1 >> u32(10)))
t1 := (bits.rotate_left_32(v1, -17)) ^ (bits.rotate_left_32(v1, -19)) ^ (v1 >> 10)
v2 := w[i-15]
t2 := (bits.rotate_left_32(v2, -7)) ^ (bits.rotate_left_32(v2, -18)) ^ u32((v2 >> u32(3)))
t2 := (bits.rotate_left_32(v2, -7)) ^ (bits.rotate_left_32(v2, -18)) ^ (v2 >> 3)
w[i] = t1 + w[i-7] + t2 + w[i-16]
}
@ -120,7 +120,6 @@ fn block_generic(dig mut Digest, p_ []byte) {
for i := 0; i < 64; i++ {
t1 := h + ((bits.rotate_left_32(e, -6)) ^ (bits.rotate_left_32(e, -11)) ^ (bits.rotate_left_32(e, -25))) + ((e & f) ^ (~e & g)) + u32(_K[i]) + w[i]
t2 := ((bits.rotate_left_32(a, -2)) ^ (bits.rotate_left_32(a, -13)) ^ (bits.rotate_left_32(a, -22))) + ((a & b) ^ (a & c) ^ (b & c))
h = g

View File

@ -117,7 +117,7 @@ fn (d mut Digest) reset() {
d.h[7] = Init7
}
d.nx = 0
d.len = u64(0)
d.len = 0
}
fn _new(hash crypto.Hash) &Digest {

View File

@ -9,6 +9,8 @@
module sha512
import math.bits
const(
_K = [
0x428a2f98d728ae22,
@ -111,14 +113,14 @@ fn block_generic(dig mut Digest, p_ []byte) {
for p.len >= Chunk {
for i := 0; i < 16; i++ {
j := i * 8
w[i] = u64(u64(u64(p[j])<<u64(56)) | u64(u64(p[j+1])<<u64(48)) | u64(u64(p[j+2])<<u64(40)) | u64(u64(p[j+3])<<u64(32)) |
u64(u64(p[j+4])<<u64(24)) | u64(u64(p[j+5])<<u64(16)) | u64(u64(p[j+6])<<u64(8)) | u64(p[j+7]))
w[i] = u64(u64(u64(p[j])<<56) | u64(u64(p[j+1])<<48) | u64(u64(p[j+2])<<40) | u64(u64(p[j+3])<<32) |
u64(u64(p[j+4])<<24) | u64(u64(p[j+5])<<16) | u64(u64(p[j+6])<<8) | u64(p[j+7]))
}
for i := 16; i < 80; i++ {
v1 := w[i-2]
t1 := (u64(v1>>u64(19)) | u64(v1<<u64(64-19))) ^ u64(u64(v1>>u64(61)) | u64(v1<<u64(64-61))) ^ u64(v1 >> u64(6))
t1 := bits.rotate_left_64(v1, -19) ^ bits.rotate_left_64(v1, -61) ^ (v1 >> 6)
v2 := w[i-15]
t2 := (u64(v2>>u64(1)) | u64(v2<<u64(64-1))) ^ u64(u64(v2>>u64(8)) | u64(v2<<u64(64-8))) ^ u64(v2 >> u64(7))
t2 := bits.rotate_left_64(v2, -1) ^ bits.rotate_left_64(v2, -8) ^ (v2 >> 7)
w[i] = t1 + w[i-7] + t2 + w[i-16]
}
@ -133,8 +135,9 @@ fn block_generic(dig mut Digest, p_ []byte) {
mut h := h7
for i := 0; i < 80; i++ {
t1 := h + (u64(u64(e>>u64(14)) | u64(e<<u64(64-14))) ^ u64(u64(e>>u64(18)) | u64(e<<u64(64-18))) ^ u64(u64(e>>u64(41)) | u64(e<<u64(64-41)))) + ((e & f) ^ (~e & g)) + _K[i] + w[i]
t2 := (u64(u64(a>>u64(28)) | u64(a<<u64(64-28))) ^ u64(u64(a>>u64(34)) | u64(a<<u64(64-34))) ^ u64(u64(a>>u64(39)) | u64(a<<u64(64-39)))) + ((a & b) ^ (a & c) ^ (b & c))
t1 := h + (bits.rotate_left_64(e, -14) ^ bits.rotate_left_64(e, -18) ^ bits.rotate_left_64(e, -41)) + ((e & f) ^ (~e & g)) + _K[i] + w[i]
t2 := (bits.rotate_left_64(a, -28) ^ bits.rotate_left_64(a, -34) ^ bits.rotate_left_64(a, -39)) + ((a & b) ^ (a & c) ^ (b & c))
h = g
g = f
f = e