1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00

all: replace []byte with []u8

This commit is contained in:
Alexander Medvednikov
2022-04-15 15:35:35 +03:00
parent 0527ac633e
commit fb192d949b
164 changed files with 533 additions and 533 deletions

View File

@@ -28,7 +28,7 @@ mut:
// The key argument should be the AES key,
// either 16, 24, or 32 bytes to select
// AES-128, AES-192, or AES-256.
pub fn new_cipher(key []byte) cipher.Block {
pub fn new_cipher(key []u8) cipher.Block {
k := key.len
match k {
16, 24, 32 {
@@ -52,7 +52,7 @@ pub fn (c &AesCipher) block_size() int {
// NOTE: `dst` and `src` are both mutable for performance reasons.
// NOTE: `dst` and `src` must both be pre-allocated to the correct length.
// NOTE: `dst` and `src` may be the same (overlapping entirely).
pub fn (c &AesCipher) encrypt(mut dst []byte, src []byte) {
pub fn (c &AesCipher) encrypt(mut dst []u8, src []u8) {
if src.len < aes.block_size {
panic('crypto.aes: input not full block')
}
@@ -71,7 +71,7 @@ pub fn (c &AesCipher) encrypt(mut dst []byte, src []byte) {
// NOTE: `dst` and `src` are both mutable for performance reasons.
// NOTE: `dst` and `src` must both be pre-allocated to the correct length.
// NOTE: `dst` and `src` may be the same (overlapping entirely).
pub fn (c &AesCipher) decrypt(mut dst []byte, src []byte) {
pub fn (c &AesCipher) decrypt(mut dst []u8, src []u8) {
if src.len < aes.block_size {
panic('crypto.aes: input not full block')
}

View File

@@ -38,7 +38,7 @@ module aes
import encoding.binary
// Encrypt one block from src into dst, using the expanded key xk.
fn encrypt_block_generic(xk []u32, mut dst []byte, src []byte) {
fn encrypt_block_generic(xk []u32, mut dst []u8, src []u8) {
_ = src[15] // early bounds check
mut s0 := binary.big_endian_u32(src[..4])
mut s1 := binary.big_endian_u32(src[4..8])
@@ -85,7 +85,7 @@ fn encrypt_block_generic(xk []u32, mut dst []byte, src []byte) {
}
// Decrypt one block from src into dst, using the expanded key xk.
fn decrypt_block_generic(xk []u32, mut dst []byte, src []byte) {
fn decrypt_block_generic(xk []u32, mut dst []u8, src []u8) {
_ = src[15] // early bounds check
mut s0 := binary.big_endian_u32(src[0..4])
mut s1 := binary.big_endian_u32(src[4..8])
@@ -143,7 +143,7 @@ fn rotw(w u32) u32 {
// Key expansion algorithm. See FIPS-197, Figure 11.
// Their rcon[i] is our powx[i-1] << 24.
fn expand_key_generic(key []byte, mut enc []u32, mut dec []u32) {
fn expand_key_generic(key []u8, mut enc []u32, mut dec []u32) {
// Encryption key setup.
mut i := 0
nk := key.len / 4

View File

@@ -7,7 +7,7 @@ import crypto.cipher
// new_cipher_generic creates and returns a new cipher.Block
// this is the generiv v version, no arch optimisations
fn new_cipher_generic(key []byte) cipher.Block {
fn new_cipher_generic(key []u8) cipher.Block {
n := key.len + 28
mut c := AesCipher{
enc: []u32{len: n}

View File

@@ -20,8 +20,8 @@ pub const (
pub struct Hashed {
mut:
hash []byte
salt []byte
hash []u8
salt []u8
cost int
major string
minor string
@@ -31,14 +31,14 @@ const magic_cipher_data = [u8(0x4f), 0x72, 0x70, 0x68, 0x65, 0x61, 0x6e, 0x42, 0
0x6c, 0x64, 0x65, 0x72, 0x53, 0x63, 0x72, 0x79, 0x44, 0x6f, 0x75, 0x62, 0x74]
// generate_from_password return a bcrypt string from Hashed struct.
pub fn generate_from_password(password []byte, cost int) ?string {
pub fn generate_from_password(password []u8, cost int) ?string {
mut p := new_from_password(password, cost) or { return error('Error: $err') }
x := p.hash_u8()
return x.bytestr()
}
// compare_hash_and_password compares a bcrypt hashed password with its possible hashed version.
pub fn compare_hash_and_password(password []byte, hashed_password []byte) ? {
pub fn compare_hash_and_password(password []u8, hashed_password []u8) ? {
mut p := new_from_hash(hashed_password) or { return error('Error: $err') }
p.salt << `=`
p.salt << `=`
@@ -64,7 +64,7 @@ pub fn generate_salt() string {
}
// new_from_password converting from password to a Hashed struct with bcrypt.
fn new_from_password(password []byte, cost int) ?&Hashed {
fn new_from_password(password []u8, cost int) ?&Hashed {
mut cost_ := cost
if cost < bcrypt.min_cost {
cost_ = bcrypt.default_cost
@@ -86,7 +86,7 @@ fn new_from_password(password []byte, cost int) ?&Hashed {
}
// new_from_hash converting from hashed data to a Hashed struct.
fn new_from_hash(hashed_secret []byte) ?&Hashed {
fn new_from_hash(hashed_secret []u8) ?&Hashed {
mut tmp := hashed_secret.clone()
if tmp.len < bcrypt.min_hash_size {
return error('hash to short')
@@ -106,8 +106,8 @@ fn new_from_hash(hashed_secret []byte) ?&Hashed {
}
// bcrypt hashing passwords.
fn bcrypt(password []byte, cost int, salt []byte) ?[]byte {
mut cipher_data := []byte{len: 72 - bcrypt.magic_cipher_data.len, init: 0}
fn bcrypt(password []u8, cost int, salt []u8) ?[]u8 {
mut cipher_data := []u8{len: 72 - bcrypt.magic_cipher_data.len, init: 0}
cipher_data << bcrypt.magic_cipher_data
mut bf := expensive_blowfish_setup(password, u32(cost), salt) or { return err }
@@ -123,7 +123,7 @@ fn bcrypt(password []byte, cost int, salt []byte) ?[]byte {
}
// expensive_blowfish_setup generate a Blowfish cipher, given key, cost and salt.
fn expensive_blowfish_setup(key []byte, cost u32, salt []byte) ?&blowfish.Blowfish {
fn expensive_blowfish_setup(key []u8, cost u32, salt []u8) ?&blowfish.Blowfish {
csalt := base64.decode(salt.bytestr())
mut bf := blowfish.new_salted_cipher(key, csalt) or { return err }
@@ -140,8 +140,8 @@ fn expensive_blowfish_setup(key []byte, cost u32, salt []byte) ?&blowfish.Blowfi
}
// hash_byte converts the hash value to a byte array.
fn (mut h Hashed) hash_u8() []byte {
mut arr := []byte{len: 65, init: 0}
fn (mut h Hashed) hash_u8() []u8 {
mut arr := []u8{len: 65, init: 0}
arr[0] = `$`
arr[1] = h.major[0]
mut n := 2
@@ -164,7 +164,7 @@ fn (mut h Hashed) hash_u8() []byte {
}
// decode_version decode bcrypt version.
fn (mut h Hashed) decode_version(sbytes []byte) ?int {
fn (mut h Hashed) decode_version(sbytes []u8) ?int {
if sbytes[0] != `$` {
return error("bcrypt hashes must start with '$'")
}
@@ -181,7 +181,7 @@ fn (mut h Hashed) decode_version(sbytes []byte) ?int {
}
// decode_cost extracts the value of cost and returns the next index in the array.
fn (mut h Hashed) decode_cost(sbytes []byte) ?int {
fn (mut h Hashed) decode_cost(sbytes []u8) ?int {
cost := sbytes[0..2].bytestr().int()
check_cost(cost) or { return err }
h.cost = cost

View File

@@ -1,7 +1,7 @@
module blowfish
// expand_key performs a key expansion on the given Blowfish cipher.
pub fn expand_key(key []byte, mut bf Blowfish) {
pub fn expand_key(key []u8, mut bf Blowfish) {
mut j := 0
for i := 0; i < 18; i++ {
mut d := u32(0)
@@ -41,7 +41,7 @@ pub fn expand_key(key []byte, mut bf Blowfish) {
}
// expand_key_with_salt using salt to expand the key.
pub fn expand_key_with_salt(key []byte, salt []byte, mut bf Blowfish) {
pub fn expand_key_with_salt(key []u8, salt []u8, mut bf Blowfish) {
mut j := 0
for i := 0; i < 18; i++ {
bf.p[i] ^= get_next_word(key, &j)
@@ -128,7 +128,7 @@ fn setup_tables(l u32, r u32, mut bf Blowfish) []u32 {
// get_next_word returns the next big-endian u32 value from the byte
// slice at the given position in a circular manner, updating the position.
fn get_next_word(b []byte, pos &int) u32 {
fn get_next_word(b []u8, pos &int) u32 {
mut w := u32(0)
mut j := 0
unsafe {

View File

@@ -8,7 +8,7 @@ pub mut:
// new_cipher creates and returns a new Blowfish cipher.
// The key argument should be the Blowfish key, from 1 to 56 bytes.
pub fn new_cipher(key []byte) ?Blowfish {
pub fn new_cipher(key []u8) ?Blowfish {
mut bf := Blowfish{}
unsafe { vmemcpy(&bf.p[0], &p[0], int(sizeof(bf.p))) }
unsafe { vmemcpy(&bf.s[0], &s[0], int(sizeof(bf.s))) }
@@ -21,7 +21,7 @@ pub fn new_cipher(key []byte) ?Blowfish {
}
// new_salted_cipher returns a new Blowfish cipher that folds a salt into its key schedule.
pub fn new_salted_cipher(key []byte, salt []byte) ?Blowfish {
pub fn new_salted_cipher(key []u8, salt []u8) ?Blowfish {
if salt.len == 0 {
return new_cipher(key)
}
@@ -36,7 +36,7 @@ pub fn new_salted_cipher(key []byte, salt []byte) ?Blowfish {
}
// encrypt encrypts the 8-byte buffer src using the key k and stores the result in dst.
pub fn (mut bf Blowfish) encrypt(mut dst []byte, src []byte) {
pub fn (mut bf Blowfish) encrypt(mut dst []u8, src []u8) {
l := u32(src[0]) << 24 | u32(src[1]) << 16 | u32(src[2]) << 8 | u32(src[3])
r := u32(src[4]) << 24 | u32(src[5]) << 16 | u32(src[6]) << 8 | u32(src[7])
arr := setup_tables(l, r, mut bf)

View File

@@ -19,13 +19,13 @@ fn test_aes_cbc() {
println('test_aes_cbc ok')
}
fn aes_cbc_en(mut src []byte, key []byte, iv []byte) {
fn aes_cbc_en(mut src []u8, key []u8, iv []u8) {
block := aes.new_cipher(key)
mut mode := cipher.new_cbc(block, iv)
mode.encrypt_blocks(mut src, src.clone())
}
fn aes_cbc_de(mut src []byte, key []byte, iv []byte) {
fn aes_cbc_de(mut src []u8, key []u8, iv []u8) {
block := aes.new_cipher(key)
mut mode := cipher.new_cbc(block, iv)
mode.decrypt_blocks(mut src, src.clone())

View File

@@ -16,13 +16,13 @@ fn test_aes_cfb() {
println('test_aes_cfb ok')
}
fn aes_cfb_en(mut src []byte, key []byte, iv []byte) {
fn aes_cfb_en(mut src []u8, key []u8, iv []u8) {
block := aes.new_cipher(key)
mut mode := cipher.new_cfb_encrypter(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn aes_cfb_de(mut src []byte, key []byte, iv []byte) {
fn aes_cfb_de(mut src []u8, key []u8, iv []u8) {
block := aes.new_cipher(key)
mut mode := cipher.new_cfb_decrypter(block, iv)
mode.xor_key_stream(mut src, src.clone())

View File

@@ -16,13 +16,13 @@ fn test_aes_ctr() {
println('test_aes_ctr ok')
}
fn aes_ctr_en(mut src []byte, key []byte, iv []byte) {
fn aes_ctr_en(mut src []u8, key []u8, iv []u8) {
block := aes.new_cipher(key)
mode := cipher.new_ctr(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn aes_ctr_de(mut src []byte, key []byte, iv []byte) {
fn aes_ctr_de(mut src []u8, key []u8, iv []u8) {
block := aes.new_cipher(key)
mode := cipher.new_ctr(block, iv)
mode.xor_key_stream(mut src, src.clone())

View File

@@ -18,13 +18,13 @@ fn test_aes_ofb() {
println('test_aes_ofb ok')
}
fn aes_ofb_en(mut src []byte, key []byte, iv []byte) {
fn aes_ofb_en(mut src []u8, key []u8, iv []u8) {
block := aes.new_cipher(key)
mut mode := cipher.new_ofb(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn aes_ofb_de(mut src []byte, key []byte, iv []byte) {
fn aes_ofb_de(mut src []u8, key []u8, iv []u8) {
block := aes.new_cipher(key)
mut mode := cipher.new_ofb(block, iv)
mode.xor_key_stream(mut src, src.clone())

View File

@@ -15,24 +15,24 @@ struct Cbc {
mut:
b Block
block_size int
iv []byte
tmp []byte
iv []u8
tmp []u8
}
// internal
fn new_des_cbc(b Block, iv []byte) Cbc {
fn new_des_cbc(b Block, iv []u8) Cbc {
return Cbc{
b: b
block_size: b.block_size
iv: iv.clone()
tmp: []byte{len: b.block_size}
tmp: []u8{len: b.block_size}
}
}
// new_cbc returns a `DesCbc` which encrypts in cipher block chaining
// mode, using the given Block. The length of iv must be the same as the
// Block's block size.
pub fn new_cbc(b Block, iv []byte) Cbc {
pub fn new_cbc(b Block, iv []u8) Cbc {
if iv.len != b.block_size {
panic('crypto.cipher.new_cbc_encrypter: IV length must equal block size')
}
@@ -41,7 +41,7 @@ pub fn new_cbc(b Block, iv []byte) Cbc {
// encrypt_blocks encrypts the blocks in `src_` to `dst_`.
// Please note: `dst_` is mutable for performance reasons.
pub fn (mut x Cbc) encrypt_blocks(mut dst_ []byte, src_ []byte) {
pub fn (mut x Cbc) encrypt_blocks(mut dst_ []u8, src_ []u8) {
unsafe {
mut dst := *dst_
mut src := src_
@@ -75,7 +75,7 @@ pub fn (mut x Cbc) encrypt_blocks(mut dst_ []byte, src_ []byte) {
// decrypt_blocks decrypts the blocks in `src` to `dst`.
// Please note: `dst` is mutable for performance reasons.
pub fn (mut x Cbc) decrypt_blocks(mut dst []byte, src []byte) {
pub fn (mut x Cbc) decrypt_blocks(mut dst []u8, src []u8) {
if src.len % x.block_size != 0 {
panic('crypto.cipher: input not full blocks')
}
@@ -113,7 +113,7 @@ pub fn (mut x Cbc) decrypt_blocks(mut dst []byte, src []byte) {
x.tmp = x.iv
}
fn (mut x Cbc) set_iv(iv []byte) {
fn (mut x Cbc) set_iv(iv []u8) {
if iv.len != x.iv.len {
panic('cipher: incorrect length IV')
}

View File

@@ -13,8 +13,8 @@ import crypto.internal.subtle
struct Cfb {
mut:
b Block
next []byte
out []byte
next []u8
out []u8
out_used int
decrypt bool
@@ -23,26 +23,26 @@ mut:
// new_cfb_encrypter returns a `Cfb` which encrypts with cipher feedback mode,
// using the given Block. The iv must be the same length as the Block's block
// size
pub fn new_cfb_encrypter(b Block, iv []byte) Cfb {
pub fn new_cfb_encrypter(b Block, iv []u8) Cfb {
return new_cfb(b, iv, false)
}
// new_cfb_decrypter returns a `Cfb` which decrypts with cipher feedback mode,
// using the given Block. The iv must be the same length as the Block's block
// size
pub fn new_cfb_decrypter(b Block, iv []byte) Cfb {
pub fn new_cfb_decrypter(b Block, iv []u8) Cfb {
return new_cfb(b, iv, true)
}
fn new_cfb(b Block, iv []byte, decrypt bool) Cfb {
fn new_cfb(b Block, iv []u8, decrypt bool) Cfb {
block_size := b.block_size
if iv.len != block_size {
panic('cipher.new_cfb: IV length must be equal block size')
}
mut x := Cfb{
b: b
out: []byte{len: b.block_size}
next: []byte{len: b.block_size}
out: []u8{len: b.block_size}
next: []u8{len: b.block_size}
out_used: block_size
decrypt: decrypt
}
@@ -50,7 +50,7 @@ fn new_cfb(b Block, iv []byte, decrypt bool) Cfb {
return x
}
pub fn (mut x Cfb) xor_key_stream(mut dst_ []byte, src_ []byte) {
pub fn (mut x Cfb) xor_key_stream(mut dst_ []u8, src_ []u8) {
unsafe {
mut dst := *dst_
mut src := src_

View File

@@ -8,9 +8,9 @@ module cipher
// extend that capability to streams of blocks.
interface Block {
block_size int // block_size returns the cipher's block size.
encrypt(mut dst []byte, src []byte) // Encrypt encrypts the first block in src into dst.
encrypt(mut dst []u8, src []u8) // Encrypt encrypts the first block in src into dst.
// Dst and src must overlap entirely or not at all.
decrypt(mut dst []byte, src []byte) // Decrypt decrypts the first block in src into dst.
decrypt(mut dst []u8, src []u8) // Decrypt decrypts the first block in src into dst.
// Dst and src must overlap entirely or not at all.
}
@@ -26,14 +26,14 @@ interface Stream {
// Multiple calls to xor_key_stream behave as if the concatenation of
// the src buffers was passed in a single run. That is, Stream
// maintains state and does not reset at each xor_key_stream call.
xor_key_stream(mut dst []byte, src []byte)
xor_key_stream(mut dst []u8, src []u8)
}
// A BlockMode represents a block cipher running in a block-based mode (CBC,
// ECB etc).
interface BlockMode {
block_size int // block_size returns the mode's block size.
crypt_blocks(mut dst []byte, src []byte) // crypt_blocks encrypts or decrypts a number of blocks. The length of
crypt_blocks(mut dst []u8, src []u8) // crypt_blocks encrypts or decrypts a number of blocks. The length of
// src must be a multiple of the block size. Dst and src must overlap
// entirely or not at all.
//
@@ -48,8 +48,8 @@ interface BlockMode {
// Utility routines
// fn dup(p []byte) []byte {
// q := make([]byte, p.len)
// fn dup(p []u8) []u8 {
// q := make([]u8, p.len)
// copy(mut q, p)
// return q
// }

View File

@@ -16,27 +16,27 @@ import crypto.internal.subtle
struct Ctr {
mut:
b Block
next []byte
out []byte
next []u8
out []u8
out_used int
}
// new_ctr returns a Ctr which encrypts/decrypts using the given Block in
// counter mode. The length of iv must be the same as the Block's block size.
pub fn new_ctr(b Block, iv []byte) Ctr {
pub fn new_ctr(b Block, iv []u8) Ctr {
block_size := b.block_size
if iv.len != block_size {
panic('cipher.new_cfb: IV length must be equal block size')
}
return Ctr{
b: b
out: []byte{len: b.block_size}
out: []u8{len: b.block_size}
next: iv.clone()
out_used: block_size
}
}
pub fn (x &Ctr) xor_key_stream(mut dst_ []byte, src_ []byte) {
pub fn (x &Ctr) xor_key_stream(mut dst_ []u8, src_ []u8) {
unsafe {
mut dst := *dst_
mut src := src_

View File

@@ -29,25 +29,25 @@ fn test_des_cbc() {
println('test_des_cbc ok')
}
fn des_cbc_en(mut src []byte, key []byte, iv []byte) {
fn des_cbc_en(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
mut mode := cipher.new_cbc(block, iv)
mode.encrypt_blocks(mut src, src.clone())
}
fn des_cbc_de(mut src []byte, key []byte, iv []byte) {
fn des_cbc_de(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
mut mode := cipher.new_cbc(block, iv)
mode.decrypt_blocks(mut src, src.clone())
}
fn triple_des_cbc_en(mut src []byte, key []byte, iv []byte) {
fn triple_des_cbc_en(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
mut mode := cipher.new_cbc(block, iv)
mode.encrypt_blocks(mut src, src.clone())
}
fn triple_des_cbc_de(mut src []byte, key []byte, iv []byte) {
fn triple_des_cbc_de(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
mut mode := cipher.new_cbc(block, iv)
mode.decrypt_blocks(mut src, src.clone())

View File

@@ -29,25 +29,25 @@ fn test_des_cfb() {
println('test_des_cfb ok')
}
fn des_cfb_en(mut src []byte, key []byte, iv []byte) {
fn des_cfb_en(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
mut mode := cipher.new_cfb_encrypter(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn des_cfb_de(mut src []byte, key []byte, iv []byte) {
fn des_cfb_de(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
mut mode := cipher.new_cfb_decrypter(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn triple_des_cfb_en(mut src []byte, key []byte, iv []byte) {
fn triple_des_cfb_en(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
mut mode := cipher.new_cfb_encrypter(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn triple_des_cfb_de(mut src []byte, key []byte, iv []byte) {
fn triple_des_cfb_de(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
mut mode := cipher.new_cfb_decrypter(block, iv)
mode.xor_key_stream(mut src, src.clone())

View File

@@ -29,25 +29,25 @@ fn test_des_ctr() {
println('test_des_ctr ok')
}
fn des_ctr_en(mut src []byte, key []byte, iv []byte) {
fn des_ctr_en(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
mode := cipher.new_ctr(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn des_ctr_de(mut src []byte, key []byte, iv []byte) {
fn des_ctr_de(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
mode := cipher.new_ctr(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn triple_des_ctr_en(mut src []byte, key []byte, iv []byte) {
fn triple_des_ctr_en(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
mode := cipher.new_ctr(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn triple_des_ctr_de(mut src []byte, key []byte, iv []byte) {
fn triple_des_ctr_de(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
mode := cipher.new_ctr(block, iv)
mode.xor_key_stream(mut src, src.clone())

View File

@@ -29,25 +29,25 @@ fn test_des_ofb() {
println('test_des_ofb ok')
}
fn des_ofb_en(mut src []byte, key []byte, iv []byte) {
fn des_ofb_en(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
mut mode := cipher.new_ofb(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn des_ofb_de(mut src []byte, key []byte, iv []byte) {
fn des_ofb_de(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
mut mode := cipher.new_ofb(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn triple_des_ofb_en(mut src []byte, key []byte, iv []byte) {
fn triple_des_ofb_en(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
mut mode := cipher.new_ofb(block, iv)
mode.xor_key_stream(mut src, src.clone())
}
fn triple_des_ofb_de(mut src []byte, key []byte, iv []byte) {
fn triple_des_ofb_de(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
mut mode := cipher.new_ofb(block, iv)
mode.xor_key_stream(mut src, src.clone())

View File

@@ -12,30 +12,30 @@ import crypto.internal.subtle
struct Ofb {
mut:
b Block
next []byte
out []byte
next []u8
out []u8
out_used int
}
// new_ofb returns a Ofb that encrypts or decrypts using the block cipher b
// in output feedback mode. The initialization vector iv's length must be equal
// to b's block size.
pub fn new_ofb(b Block, iv []byte) Ofb {
pub fn new_ofb(b Block, iv []u8) Ofb {
block_size := b.block_size
if iv.len != block_size {
panic('cipher.new_ofb: IV length must be equal block size')
}
mut x := Ofb{
b: b
out: []byte{len: b.block_size}
next: []byte{len: b.block_size}
out: []u8{len: b.block_size}
next: []u8{len: b.block_size}
out_used: block_size
}
copy(mut x.next, iv)
return x
}
pub fn (mut x Ofb) xor_key_stream(mut dst_ []byte, src_ []byte) {
pub fn (mut x Ofb) xor_key_stream(mut dst_ []u8, src_ []u8) {
unsafe {
mut dst := *dst_
mut src := src_

View File

@@ -6,7 +6,7 @@ module cipher
// NOTE: Implement other versions (joe-c)
// xor_bytes xors the bytes in a and b. The destination should have enough
// space, otherwise xor_bytes will panic. Returns the number of bytes xor'd.
pub fn xor_bytes(mut dst []byte, a []byte, b []byte) int {
pub fn xor_bytes(mut dst []u8, a []u8, b []u8) int {
mut n := a.len
if b.len < n {
n = b.len
@@ -20,7 +20,7 @@ pub fn xor_bytes(mut dst []byte, a []byte, b []byte) int {
// safe_xor_bytes XORs the bytes in `a` and `b` into `dst` it does so `n` times.
// Please note: `n` needs to be smaller or equal than the length of `a` and `b`.
pub fn safe_xor_bytes(mut dst []byte, a []byte, b []byte, n int) {
pub fn safe_xor_bytes(mut dst []u8, a []u8, b []u8, n int) {
for i in 0 .. n {
dst[i] = a[i] ^ b[i]
}
@@ -28,6 +28,6 @@ pub fn safe_xor_bytes(mut dst []byte, a []byte, b []byte, n int) {
// xor_words XORs multiples of 4 or 8 bytes (depending on architecture.)
// The slice arguments `a` and `b` are assumed to be of equal length.
pub fn xor_words(mut dst []byte, a []byte, b []byte) {
pub fn xor_words(mut dst []u8, a []u8, b []u8) {
safe_xor_bytes(mut dst, a, b, b.len)
}

View File

@@ -23,7 +23,7 @@ fn feistel(ll u32, rr u32, k0 u64, k1 u64) (u32, u32) {
return l, r
}
fn crypt_block(subkeys []u64, mut dst []byte, src []byte, decrypt bool) {
fn crypt_block(subkeys []u64, mut dst []u8, src []u8, decrypt bool) {
mut b := binary.big_endian_u64(src)
b = permute_initial_block(b)
@@ -51,17 +51,17 @@ fn crypt_block(subkeys []u64, mut dst []byte, src []byte, decrypt bool) {
}
// Encrypt one block from src into dst, using the subkeys.
pub fn encrypt_block(subkeys []u64, mut dst []byte, src []byte) {
pub fn encrypt_block(subkeys []u64, mut dst []u8, src []u8) {
crypt_block(subkeys, mut dst, src, false)
}
// Decrypt one block from src into dst, using the subkeys.
fn decrypt_block(subkeys []u64, mut dst []byte, src []byte) {
fn decrypt_block(subkeys []u64, mut dst []u8, src []u8) {
crypt_block(subkeys, mut dst, src, true)
}
// general purpose function to perform DES block permutations
fn permute_block(src u64, permutation []byte) u64 {
fn permute_block(src u64, permutation []u8) u64 {
mut block := u64(0)
for position, n in permutation {
bit := (src >> u64(u8(n))) & 1

View File

@@ -25,7 +25,7 @@ mut:
}
// NewCipher creates and returns a new cipher.Block.
pub fn new_cipher(key []byte) cipher.Block {
pub fn new_cipher(key []u8) cipher.Block {
if key.len != 8 {
panic('crypto.aes: invalid key size')
}
@@ -36,7 +36,7 @@ pub fn new_cipher(key []byte) cipher.Block {
}
// creates 16 56-bit subkeys from the original key
fn (mut c DesCipher) generate_subkeys(key_bytes []byte) {
fn (mut c DesCipher) generate_subkeys(key_bytes []u8) {
// feistel_box_once.do(initFeistel_box)
// apply PC1 permutation to key
@@ -56,7 +56,7 @@ fn (mut c DesCipher) generate_subkeys(key_bytes []byte) {
}
}
pub fn (c &DesCipher) encrypt(mut dst []byte, src []byte) {
pub fn (c &DesCipher) encrypt(mut dst []u8, src []u8) {
if src.len < des.block_size {
panic('crypto/des: input not full block')
}
@@ -69,7 +69,7 @@ pub fn (c &DesCipher) encrypt(mut dst []byte, src []byte) {
encrypt_block(c.subkeys[..], mut dst, src)
}
pub fn (c &DesCipher) decrypt(mut dst []byte, src []byte) {
pub fn (c &DesCipher) decrypt(mut dst []u8, src []u8) {
if src.len < des.block_size {
panic('crypto/des: input not full block')
}
@@ -83,7 +83,7 @@ pub fn (c &DesCipher) decrypt(mut dst []byte, src []byte) {
}
// NewTripleDesCipher creates and returns a new cipher.Block.
pub fn new_triple_des_cipher(key []byte) cipher.Block {
pub fn new_triple_des_cipher(key []u8) cipher.Block {
if key.len != 24 {
panic('crypto.des: invalid key size')
}
@@ -94,7 +94,7 @@ pub fn new_triple_des_cipher(key []byte) cipher.Block {
return c
}
pub fn (c &TripleDesCipher) encrypt(mut dst []byte, src []byte) {
pub fn (c &TripleDesCipher) encrypt(mut dst []u8, src []u8) {
if src.len < des.block_size {
panic('crypto/des: input not full block')
}
@@ -130,7 +130,7 @@ pub fn (c &TripleDesCipher) encrypt(mut dst []byte, src []byte) {
binary.big_endian_put_u64(mut dst, permute_final_block(pre_output))
}
pub fn (c &TripleDesCipher) decrypt(mut dst []byte, src []byte) {
pub fn (c &TripleDesCipher) decrypt(mut dst []u8, src []u8) {
if src.len < des.block_size {
panic('crypto/des: input not full block')
}

View File

@@ -29,22 +29,22 @@ fn test_des() {
println('test_des ok')
}
fn des_en(mut src []byte, key []byte, iv []byte) {
fn des_en(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
block.encrypt(mut src, src.clone())
}
fn des_de(mut src []byte, key []byte, iv []byte) {
fn des_de(mut src []u8, key []u8, iv []u8) {
block := des.new_cipher(key)
block.decrypt(mut src, src.clone())
}
fn triple_des_en(mut src []byte, key []byte, iv []byte) {
fn triple_des_en(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
block.encrypt(mut src, src.clone())
}
fn triple_des_de(mut src []byte, key []byte, iv []byte) {
fn triple_des_de(mut src []u8, key []u8, iv []u8) {
block := des.new_triple_des_cipher(key)
inbuf := src.clone()
block.decrypt(mut src, inbuf)

View File

@@ -18,39 +18,39 @@ pub const signature_size = 64
pub const seed_size = 32
// `PublicKey` is Ed25519 public keys.
pub type PublicKey = []byte
pub type PublicKey = []u8
// equal reports whether p and x have the same value.
pub fn (p PublicKey) equal(x []byte) bool {
pub fn (p PublicKey) equal(x []u8) bool {
return subtle.constant_time_compare(p, PublicKey(x)) == 1
}
// PrivateKey is Ed25519 private keys
pub type PrivateKey = []byte
pub type PrivateKey = []u8
// seed returns the private key seed corresponding to priv.
// RFC 8032's private keys correspond to seeds in this module.
pub fn (priv PrivateKey) seed() []byte {
mut seed := []byte{len: ed25519.seed_size}
pub fn (priv PrivateKey) seed() []u8 {
mut seed := []u8{len: ed25519.seed_size}
copy(mut seed, priv[..32])
return seed
}
// public_key returns the []byte corresponding to priv.
// public_key returns the []u8 corresponding to priv.
pub fn (priv PrivateKey) public_key() PublicKey {
assert priv.len == ed25519.private_key_size
mut publickey := []byte{len: ed25519.public_key_size}
mut publickey := []u8{len: ed25519.public_key_size}
copy(mut publickey, priv[32..])
return PublicKey(publickey)
}
// currentyly x not `crypto.PrivateKey`
pub fn (priv PrivateKey) equal(x []byte) bool {
pub fn (priv PrivateKey) equal(x []u8) bool {
return subtle.constant_time_compare(priv, PrivateKey(x)) == 1
}
// sign signs the given message with priv.
pub fn (priv PrivateKey) sign(message []byte) ?[]byte {
pub fn (priv PrivateKey) sign(message []u8) ?[]u8 {
/*
if opts.HashFunc() != crypto.Hash(0) {
return nil, errors.New("ed25519: cannot sign hashed message")
@@ -60,13 +60,13 @@ pub fn (priv PrivateKey) sign(message []byte) ?[]byte {
}
// sign`signs the message with privatekey and returns a signature
pub fn sign(privatekey PrivateKey, message []byte) ?[]byte {
mut signature := []byte{len: ed25519.signature_size}
pub fn sign(privatekey PrivateKey, message []u8) ?[]u8 {
mut signature := []u8{len: ed25519.signature_size}
sign_generic(mut signature, privatekey, message) ?
return signature
}
fn sign_generic(mut signature []byte, privatekey []byte, message []byte) ? {
fn sign_generic(mut signature []u8, privatekey []u8, message []u8) ? {
if privatekey.len != ed25519.private_key_size {
panic('ed25519: bad private key length: $privatekey.len')
}
@@ -81,7 +81,7 @@ fn sign_generic(mut signature []byte, privatekey []byte, message []byte) ? {
mh.write(prefix) ?
mh.write(message) ?
mut msg_digest := []byte{cap: sha512.size}
mut msg_digest := []u8{cap: sha512.size}
msg_digest = mh.sum(msg_digest)
mut r := edwards25519.new_scalar()
@@ -95,7 +95,7 @@ fn sign_generic(mut signature []byte, privatekey []byte, message []byte) ? {
kh.write(publickey) ?
kh.write(message) ?
mut hram_digest := []byte{cap: sha512.size}
mut hram_digest := []u8{cap: sha512.size}
hram_digest = kh.sum(hram_digest)
mut k := edwards25519.new_scalar()
k.set_uniform_bytes(hram_digest) ?
@@ -108,7 +108,7 @@ fn sign_generic(mut signature []byte, privatekey []byte, message []byte) ? {
}
// verify reports whether sig is a valid signature of message by publickey.
pub fn verify(publickey PublicKey, message []byte, sig []byte) ?bool {
pub fn verify(publickey PublicKey, message []u8, sig []u8) ?bool {
if publickey.len != ed25519.public_key_size {
return error('ed25519: bad public key length: $publickey.len')
}
@@ -125,7 +125,7 @@ pub fn verify(publickey PublicKey, message []byte, sig []byte) ?bool {
kh.write(publickey) ?
kh.write(message) ?
mut hram_digest := []byte{cap: sha512.size}
mut hram_digest := []u8{cap: sha512.size}
hram_digest = kh.sum(hram_digest)
mut k := edwards25519.new_scalar()
@@ -148,7 +148,7 @@ pub fn generate_key() ?(PublicKey, PrivateKey) {
mut seed := rand.bytes(ed25519.seed_size) ?
privatekey := new_key_from_seed(seed)
mut publickey := []byte{len: ed25519.public_key_size}
mut publickey := []u8{len: ed25519.public_key_size}
copy(mut publickey, privatekey[32..])
return publickey, privatekey
@@ -156,14 +156,14 @@ pub fn generate_key() ?(PublicKey, PrivateKey) {
// new_key_from_seed calculates a private key from a seed. private keys of RFC 8032
// correspond to seeds in this module
pub fn new_key_from_seed(seed []byte) PrivateKey {
pub fn new_key_from_seed(seed []u8) PrivateKey {
// Outline the function body so that the returned key can be stack-allocated.
mut privatekey := []byte{len: ed25519.private_key_size}
mut privatekey := []u8{len: ed25519.private_key_size}
new_key_from_seed_generic(mut privatekey, seed)
return PrivateKey(privatekey)
}
fn new_key_from_seed_generic(mut privatekey []byte, seed []byte) {
fn new_key_from_seed_generic(mut privatekey []u8, seed []u8) {
if seed.len != ed25519.seed_size {
panic('ed25519: bad seed length: $seed.len')
}

View File

@@ -19,7 +19,7 @@ const contents = os.read_lines(os.join_path(testdata, 'sign.input')) or { panic(
/*
struct ZeroReader {}
fn (z ZeroReader) read(mut buf []byte) ?int {
fn (z ZeroReader) read(mut buf []u8) ?int {
for i, _ in buf {
buf[i] = 0
}
@@ -96,7 +96,7 @@ fn works_check_on_sign_input_string(item string) bool {
// assert pubkey.len == public_key_size
sig = sig[..ed25519.signature_size]
mut priv := []byte{len: ed25519.private_key_size}
mut priv := []u8{len: ed25519.private_key_size}
copy(mut priv[..], privbytes)
copy(mut priv[32..], pubkey)
@@ -181,7 +181,7 @@ fn test_input_from_djb_ed25519_crypto_sign_input_without_syncpool() ? {
assert pubkey.len == public_key_size
sig = sig[..signature_size]
mut priv := []byte{len: ed25519.private_key_size}
mut priv := []u8{len: ed25519.private_key_size}
copy(mut priv[..], privbytes)
copy(mut priv[32..], pubkey)

View File

@@ -624,7 +624,7 @@ pub fn (mut v Element) set(a Element) Element {
// Consistent with RFC 7748, the most significant bit (the high bit of the
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
// are accepted. Note that this is laxer than specified by RFC 8032.
pub fn (mut v Element) set_bytes(x []byte) ?Element {
pub fn (mut v Element) set_bytes(x []u8) ?Element {
if x.len != 32 {
return error('edwards25519: invalid edwards25519 element input size')
}
@@ -650,19 +650,19 @@ pub fn (mut v Element) set_bytes(x []byte) ?Element {
}
// bytes returns the canonical 32-byte little-endian encoding of v.
pub fn (mut v Element) bytes() []byte {
pub fn (mut v Element) bytes() []u8 {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
// out := v.bytes_generic()
return v.bytes_generic()
}
fn (mut v Element) bytes_generic() []byte {
mut out := []byte{len: 32}
fn (mut v Element) bytes_generic() []u8 {
mut out := []u8{len: 32}
v = v.reduce()
mut buf := []byte{len: 8}
mut buf := []u8{len: 8}
idxs := [v.l0, v.l1, v.l2, v.l3, v.l4]
for i, l in idxs {
bits_offset := i * 51
@@ -725,7 +725,7 @@ pub fn (mut v Element) mult_32(x Element, y u32) Element {
return v
}
fn swap_endianness(mut buf []byte) []byte {
fn swap_endianness(mut buf []u8) []u8 {
for i := 0; i < buf.len / 2; i++ {
buf[i], buf[buf.len - i - 1] = buf[buf.len - i - 1], buf[i]
}

View File

@@ -230,7 +230,7 @@ fn test_set_bytes_reduced() {
struct FeRTTest {
mut:
fe Element
b []byte
b []u8
}
fn test_set_bytes_from_dalek_test_vectors() ? {
@@ -395,7 +395,7 @@ fn test_bytes_big_equivalence() ? {
assert fe == fe1
mut buf := []byte{len: 32} // pad with zeroes
mut buf := []u8{len: 32} // pad with zeroes
fedtobig := fe1.to_big_integer()
mut fedbig_bytes, _ := fedtobig.bytes()
copy(mut buf, fedbig_bytes) // does not need to do swap_endianness

View File

@@ -86,14 +86,14 @@ fn is_on_curve(x Element, y Element, z Element, t Element) bool {
// Note that bytes_montgomery only encodes the u-coordinate, so v and -v encode
// to the same value. If v is the identity point, bytes_montgomery returns 32
// zero bytes, analogously to the X25519 function.
pub fn (mut v Point) bytes_montgomery() []byte {
pub fn (mut v Point) bytes_montgomery() []u8 {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
mut buf := [32]byte{}
return v.bytes_montgomery_generic(mut buf)
}
fn (mut v Point) bytes_montgomery_generic(mut buf [32]byte) []byte {
fn (mut v Point) bytes_montgomery_generic(mut buf [32]byte) []u8 {
check_initialized(v)
// RFC 7748, Section 4.1 provides the bilinear map to calculate the

View File

@@ -70,7 +70,7 @@ const (
loworder_bytes = hex.decode(loworder_string) or { panic(err) }
)
fn fn_cofactor(mut data []byte) bool {
fn fn_cofactor(mut data []u8) bool {
if data.len != 64 {
panic('data.len should be 64')
}

View File

@@ -117,7 +117,7 @@ fn (mut v ProjectiveP2) zero() ProjectiveP2 {
// Note that set_bytes accepts all non-canonical encodings of valid points.
// That is, it follows decoding rules that match most implementations in
// the ecosystem rather than RFC 8032.
pub fn (mut v Point) set_bytes(x []byte) ?Point {
pub fn (mut v Point) set_bytes(x []u8) ?Point {
// Specifically, the non-canonical encodings that are accepted are
// 1) the ones where the edwards25519 element is not reduced (see the
// (*edwards25519.Element).set_bytes docs) and
@@ -201,14 +201,14 @@ fn (mut v AffineCached) zero() AffineCached {
// bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
// Section 5.1.2.
pub fn (mut v Point) bytes() []byte {
pub fn (mut v Point) bytes() []u8 {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
mut buf := [32]byte{}
return v.bytes_generic(mut buf)
}
fn (mut v Point) bytes_generic(mut buf [32]byte) []byte {
fn (mut v Point) bytes_generic(mut buf [32]byte) []u8 {
check_initialized(v)
mut zinv := Element{}
@@ -226,7 +226,7 @@ fn (mut v Point) bytes_generic(mut buf [32]byte) []byte {
return out
}
fn copy_field_element(mut buf [32]byte, mut v Element) []byte {
fn copy_field_element(mut buf [32]byte, mut v Element) []u8 {
// this fail in test
/*
copy(mut buf[..], v.bytes())
@@ -234,7 +234,7 @@ fn copy_field_element(mut buf [32]byte, mut v Element) []byte {
*/
// this pass the test
mut out := []byte{len: 32}
mut out := []u8{len: 32}
for i := 0; i <= buf.len - 1; i++ {
out[i] = v.bytes()[i]
}

View File

@@ -86,11 +86,11 @@ pub fn (mut s Scalar) set(x Scalar) Scalar {
// set_uniform_bytes sets s to an uniformly distributed value given 64 uniformly
// distributed random bytes. If x is not of the right length, set_uniform_bytes
// returns an error, and the receiver is unchanged.
pub fn (mut s Scalar) set_uniform_bytes(x []byte) ?Scalar {
pub fn (mut s Scalar) set_uniform_bytes(x []u8) ?Scalar {
if x.len != 64 {
return error('edwards25519: invalid set_uniform_bytes input length')
}
mut wide_bytes := []byte{len: 64}
mut wide_bytes := []u8{len: 64}
copy(mut wide_bytes, x)
// for i, item in x {
// wide_bytes[i] = item
@@ -102,11 +102,11 @@ pub fn (mut s Scalar) set_uniform_bytes(x []byte) ?Scalar {
// set_canonical_bytes sets s = x, where x is a 32-byte little-endian encoding of
// s, and returns s. If x is not a canonical encoding of s, set_canonical_bytes
// returns an error, and the receiver is unchanged.
pub fn (mut s Scalar) set_canonical_bytes(x []byte) ?Scalar {
pub fn (mut s Scalar) set_canonical_bytes(x []u8) ?Scalar {
if x.len != 32 {
return error('invalid scalar length')
}
// mut bb := []byte{len:32}
// mut bb := []u8{len:32}
mut ss := Scalar{}
for i, item in x {
ss.s[i] = item
@@ -152,7 +152,7 @@ fn is_reduced(s Scalar) bool {
// expected as long as it is applied to points on the prime order subgroup, like
// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
// irrelevant RFC 7748 clamping, but it is now required for compatibility.
pub fn (mut s Scalar) set_bytes_with_clamping(x []byte) ?Scalar {
pub fn (mut s Scalar) set_bytes_with_clamping(x []u8) ?Scalar {
// The description above omits the purpose of the high bits of the clamping
// for brevity, but those are also lost to reductions, and are also
// irrelevant to edwards25519 as they protect against a specific
@@ -161,7 +161,7 @@ pub fn (mut s Scalar) set_bytes_with_clamping(x []byte) ?Scalar {
return error('edwards25519: invalid set_bytes_with_clamping input length')
}
mut wide_bytes := []byte{len: 64, cap: 64}
mut wide_bytes := []u8{len: 64, cap: 64}
copy(mut wide_bytes, x)
// for i, item in x {
// wide_bytes[i] = item
@@ -174,8 +174,8 @@ pub fn (mut s Scalar) set_bytes_with_clamping(x []byte) ?Scalar {
}
// bytes returns the canonical 32-byte little-endian encoding of s.
pub fn (mut s Scalar) bytes() []byte {
mut buf := []byte{len: 32}
pub fn (mut s Scalar) bytes() []u8 {
mut buf := []u8{len: 32}
copy(mut buf, s.s[..])
return buf
}
@@ -187,14 +187,14 @@ pub fn (s Scalar) equal(t Scalar) int {
// sc_mul_add and sc_reduce are ported from the public domain, “ref10”
// implementation of ed25519 from SUPERCOP.
fn load3(inp []byte) i64 {
fn load3(inp []u8) i64 {
mut r := i64(inp[0])
r |= i64(inp[1]) * 256 // << 8
r |= i64(inp[2]) * 65536 // << 16
return r
}
fn load4(inp []byte) i64 {
fn load4(inp []u8) i64 {
mut r := i64(inp[0])
r |= i64(inp[1]) * 256
r |= i64(inp[2]) * 65536
@@ -653,7 +653,7 @@ fn sc_mul_add(mut s [32]byte, a [32]byte, b [32]byte, c [32]byte) {
// Output:
// s[0]+256*s[1]+...+256^31*s[31] = s mod l
// where l = 2^252 + 27742317777372353535851937790883648493.
fn sc_reduce(mut out [32]byte, mut s []byte) {
fn sc_reduce(mut out [32]byte, mut s []u8) {
assert out.len == 32
assert s.len == 64
mut s0 := 2097151 & load3(s[..])

View File

@@ -134,7 +134,7 @@ fn test_scalar_set_uniform_bytes() ? {
assert m.abs_cmp(scbig) == 0 // NEED FIX
}
fn bigint_from_le_bytes(b []byte) big.Integer {
fn bigint_from_le_bytes(b []u8) big.Integer {
mut bc := b.clone()
buf := swap_endianness(mut bc) // WITHOUT THIS, some test would fail
bg := big.integer_from_bytes(buf)

View File

@@ -5,14 +5,14 @@ module hmac
import crypto.internal.subtle
const (
ipad = []byte{len: 256, init: 0x36} // TODO is 256 enough??
opad = []byte{len: 256, init: 0x5C}
npad = []byte{len: 256, init: 0}
ipad = []u8{len: 256, init: 0x36} // TODO is 256 enough??
opad = []u8{len: 256, init: 0x5C}
npad = []u8{len: 256, init: 0}
)
// new returns a HMAC byte array, depending on the hash algorithm used.
pub fn new(key []byte, data []byte, hash_func fn ([]byte) []byte, blocksize int) []byte {
mut b_key := []byte{}
pub fn new(key []u8, data []u8, hash_func fn ([]u8) []u8, blocksize int) []u8 {
mut b_key := []u8{}
if key.len <= blocksize {
b_key = key.clone() // TODO: remove .clone() once https://github.com/vlang/v/issues/6604 gets fixed
} else {
@@ -21,13 +21,13 @@ pub fn new(key []byte, data []byte, hash_func fn ([]byte) []byte, blocksize int)
if b_key.len < blocksize {
b_key << hmac.npad[..blocksize - b_key.len]
}
mut inner := []byte{}
mut inner := []u8{}
for i, b in hmac.ipad[..blocksize] {
inner << b_key[i] ^ b
}
inner << data
inner_hash := hash_func(inner)
mut outer := []byte{cap: b_key.len}
mut outer := []u8{cap: b_key.len}
for i, b in hmac.opad[..blocksize] {
outer << b_key[i] ^ b
}
@@ -39,6 +39,6 @@ pub fn new(key []byte, data []byte, hash_func fn ([]byte) []byte, blocksize int)
// equal compares 2 MACs for equality, without leaking timing info.
// Note: if the lengths of the 2 MACs are different, probably a completely different
// hash function was used to generate them => no useful timing information.
pub fn equal(mac1 []byte, mac2 []byte) bool {
pub fn equal(mac1 []u8, mac2 []u8) bool {
return subtle.constant_time_compare(mac1, mac2) == 1
}

View File

@@ -8,7 +8,7 @@ module subtle
// NOTE: require unsafe in future
// any_overlap reports whether x and y share memory at any (not necessarily
// corresponding) index. The memory beyond the slice length is ignored.
pub fn any_overlap(x []byte, y []byte) bool {
pub fn any_overlap(x []u8, y []u8) bool {
// NOTE: Remember to come back to this (joe-c)
return x.len > 0 && y.len > 0 && // &x.data[0] <= &y.data[y.len-1] &&
// &y.data[0] <= &x.data[x.len-1]
@@ -21,7 +21,7 @@ pub fn any_overlap(x []byte, y []byte) bool {
//
// inexact_overlap can be used to implement the requirements of the crypto/cipher
// AEAD, Block, BlockMode and Stream interfaces.
pub fn inexact_overlap(x []byte, y []byte) bool {
pub fn inexact_overlap(x []u8, y []u8) bool {
if x.len == 0 || y.len == 0 || unsafe { &x[0] == &y[0] } {
return false
}

View File

@@ -19,7 +19,7 @@ pub fn constant_time_select(v int, x int, y int) int {
// constant_time_compare returns 1 when x and y have equal contents.
// The runtime of this function is proportional of the length of x and y.
// It is *NOT* dependent on their content.
pub fn constant_time_compare(x []byte, y []byte) int {
pub fn constant_time_compare(x []u8, y []u8) int {
if x.len != y.len {
return 0
}
@@ -33,7 +33,7 @@ pub fn constant_time_compare(x []byte, y []byte) int {
// constant_time_copy copies the contents of y into x, when v == 1.
// When v == 0, x is left unchanged. this function is undefined, when
// v takes any other value
pub fn constant_time_copy(v int, mut x []byte, y []byte) {
pub fn constant_time_copy(v int, mut x []u8, y []u8) {
if x.len != y.len {
panic('subtle: arrays have different lengths')
}

View File

@@ -28,14 +28,14 @@ const (
struct Digest {
mut:
s []u32
x []byte
x []u8
nx int
len u64
}
fn (mut d Digest) reset() {
d.s = []u32{len: (4)}
d.x = []byte{len: md5.block_size}
d.x = []u8{len: md5.block_size}
d.s[0] = u32(md5.init0)
d.s[1] = u32(md5.init1)
d.s[2] = u32(md5.init2)
@@ -52,7 +52,7 @@ pub fn new() &Digest {
}
// write writes the contents of `p_` to the internal hash representation.
pub fn (mut d Digest) write(p_ []byte) ?int {
pub fn (mut d Digest) write(p_ []u8) ?int {
unsafe {
mut p := p_
nn := p.len
@@ -87,7 +87,7 @@ pub fn (mut d Digest) write(p_ []byte) ?int {
}
// sum returns the md5 sum of the bytes in `b_in`.
pub fn (d &Digest) sum(b_in []byte) []byte {
pub fn (d &Digest) sum(b_in []u8) []u8 {
// Make a copy of d so that caller can keep writing and summing.
mut d0 := *d
hash := d0.checksum()
@@ -99,14 +99,14 @@ pub fn (d &Digest) sum(b_in []byte) []byte {
}
// checksum returns the byte checksum of the `Digest`.
pub fn (mut d Digest) checksum() []byte {
pub fn (mut d Digest) checksum() []u8 {
// Append 0x80 to the end of the message and then append zeros
// until the length is a multiple of 56 bytes. Finally append
// 8 bytes representing the message length in bits.
//
// 1 byte end marker :: 0-63 padding bytes :: 8 byte length
// tmp := [1 + 63 + 8]byte{0x80}
mut tmp := []byte{len: (1 + 63 + 8)}
mut tmp := []u8{len: (1 + 63 + 8)}
tmp[0] = 0x80
pad := ((55 - d.len) % 64) // calculate number of padding bytes
binary.little_endian_put_u64(mut tmp[1 + pad..], d.len << 3) // append length in bits
@@ -116,7 +116,7 @@ pub fn (mut d Digest) checksum() []byte {
if d.nx != 0 {
panic('d.nx != 0')
}
mut digest := []byte{len: md5.size}
mut digest := []u8{len: md5.size}
binary.little_endian_put_u32(mut digest, d.s[0])
binary.little_endian_put_u32(mut digest[4..], d.s[1])
binary.little_endian_put_u32(mut digest[8..], d.s[2])
@@ -125,13 +125,13 @@ pub fn (mut d Digest) checksum() []byte {
}
// sum returns the MD5 checksum of the data.
pub fn sum(data []byte) []byte {
pub fn sum(data []u8) []u8 {
mut d := new()
d.write(data) or { panic(err) }
return d.checksum()
}
fn block(mut dig Digest, p []byte) {
fn block(mut dig Digest, p []u8) {
// For now just use block_generic until we have specific
// architecture optimized versions
block_generic(mut dig, p)

View File

@@ -11,7 +11,7 @@ module md5
import math.bits
import encoding.binary
fn block_generic(mut dig Digest, p []byte) {
fn block_generic(mut dig Digest, p []u8) {
// load state
mut a := dig.s[0]
mut b := dig.s[1]

View File

@@ -18,6 +18,6 @@ pub fn (err ReadError) msg() string {
// See also rand.bytes(), if you do not need really random bytes,
// but instead pseudo random ones, from a pseudo random generator
// that can be seeded, and that is usually faster.
pub fn bytes(bytes_needed int) ?[]byte {
pub fn bytes(bytes_needed int) ?[]u8 {
return read(bytes_needed)
}

View File

@@ -11,8 +11,8 @@ module rand
fn C.SecRandomCopyBytes(rnd C.SecRandomRef, count usize, bytes voidptr) int
// read returns an array of `bytes_needed` random bytes read from the OS.
pub fn read(bytes_needed int) ?[]byte {
mut buffer := []byte{len: bytes_needed}
pub fn read(bytes_needed int) ?[]u8 {
mut buffer := []u8{len: bytes_needed}
status := C.SecRandomCopyBytes(C.SecRandomRef(0), bytes_needed, buffer.data)
if status != 0 {
return IError(&ReadError{})

View File

@@ -4,6 +4,6 @@
module rand
// read returns an array of `bytes_needed` random bytes read from the OS.
pub fn read(bytes_needed int) ?[]byte {
pub fn read(bytes_needed int) ?[]u8 {
return error('rand.read is not implemented on this platform')
}

View File

@@ -10,7 +10,7 @@ const (
)
// read returns an array of `bytes_needed` random bytes read from the OS.
pub fn read(bytes_needed int) ?[]byte {
pub fn read(bytes_needed int) ?[]u8 {
mut buffer := unsafe { vcalloc_noscan(bytes_needed) }
mut bytes_read := 0
mut remaining_bytes := bytes_needed

View File

@@ -13,7 +13,7 @@ const (
)
// read returns an array of `bytes_needed` random bytes read from the OS.
pub fn read(bytes_needed int) ?[]byte {
pub fn read(bytes_needed int) ?[]u8 {
mut buffer := unsafe { malloc_noscan(bytes_needed) }
mut bytes_read := 0
mut remaining_bytes := bytes_needed

View File

@@ -14,8 +14,8 @@ const (
)
// read returns an array of `bytes_needed` random bytes read from the OS.
pub fn read(bytes_needed int) ?[]byte {
mut buffer := []byte{len: bytes_needed}
pub fn read(bytes_needed int) ?[]u8 {
mut buffer := []u8{len: bytes_needed}
// use bcrypt_use_system_preferred_rng because we passed null as algo
status := C.BCryptGenRandom(0, buffer.data, bytes_needed, rand.bcrypt_use_system_preferred_rng)
if status != rand.status_success {

View File

@@ -35,7 +35,7 @@ pub fn int_u64(max u64) ?u64 {
return n
}
fn bytes_to_u64(b []byte) []u64 {
fn bytes_to_u64(b []u8) []u64 {
ws := 64 / 8
mut z := []u64{len: ((b.len + ws - 1) / ws)}
mut i := b.len

View File

@@ -22,7 +22,7 @@ mut:
// new_cipher creates and returns a new Cipher. The key argument should be the
// RC4 key, at least 1 byte and at most 256 bytes.
pub fn new_cipher(key []byte) ?Cipher {
pub fn new_cipher(key []u8) ?Cipher {
if key.len < 1 || key.len > 256 {
return error('crypto.rc4: invalid key size ' + key.len.str())
}
@@ -56,7 +56,7 @@ pub fn (mut c Cipher) reset() {
// xor_key_stream sets dst to the result of XORing src with the key stream.
// Dst and src must overlap entirely or not at all.
pub fn (mut c Cipher) xor_key_stream(mut dst []byte, mut src []byte) {
pub fn (mut c Cipher) xor_key_stream(mut dst []u8, mut src []u8) {
if src.len == 0 {
return
}

View File

@@ -30,13 +30,13 @@ const (
struct Digest {
mut:
h []u32
x []byte
x []u8
nx int
len u64
}
fn (mut d Digest) reset() {
d.x = []byte{len: sha1.chunk}
d.x = []u8{len: sha1.chunk}
d.h = []u32{len: (5)}
d.h[0] = u32(sha1.init0)
d.h[1] = u32(sha1.init1)
@@ -56,7 +56,7 @@ pub fn new() &Digest {
// write writes the contents of `p_` to the internal hash representation.
[manualfree]
pub fn (mut d Digest) write(p_ []byte) ?int {
pub fn (mut d Digest) write(p_ []u8) ?int {
nn := p_.len
unsafe {
mut p := p_
@@ -91,7 +91,7 @@ pub fn (mut d Digest) write(p_ []byte) ?int {
}
// sum returns a copy of the generated sum of the bytes in `b_in`.
pub fn (d &Digest) sum(b_in []byte) []byte {
pub fn (d &Digest) sum(b_in []u8) []u8 {
// Make a copy of d so that caller can keep writing and summing.
mut d0 := *d
hash := d0.checksum()
@@ -103,10 +103,10 @@ pub fn (d &Digest) sum(b_in []byte) []byte {
}
// checksum returns the current byte checksum of the `Digest`.
pub fn (mut d Digest) checksum() []byte {
pub fn (mut d Digest) checksum() []u8 {
mut len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
mut tmp := []byte{len: (64)}
mut tmp := []u8{len: (64)}
tmp[0] = 0x80
if int(len) % 64 < 56 {
d.write(tmp[..56 - int(len) % 64]) or { panic(err) }
@@ -117,7 +117,7 @@ pub fn (mut d Digest) checksum() []byte {
len <<= 3
binary.big_endian_put_u64(mut tmp, len)
d.write(tmp[..8]) or { panic(err) }
mut digest := []byte{len: sha1.size}
mut digest := []u8{len: sha1.size}
binary.big_endian_put_u32(mut digest, d.h[0])
binary.big_endian_put_u32(mut digest[4..], d.h[1])
binary.big_endian_put_u32(mut digest[8..], d.h[2])
@@ -127,13 +127,13 @@ pub fn (mut d Digest) checksum() []byte {
}
// sum returns the SHA-1 checksum of the bytes passed in `data`.
pub fn sum(data []byte) []byte {
pub fn sum(data []u8) []u8 {
mut d := new()
d.write(data) or { panic(err) }
return d.checksum()
}
fn block(mut dig Digest, p []byte) {
fn block(mut dig Digest, p []u8) {
// For now just use block_generic until we have specific
// architecture optimized versions
block_generic(mut dig, p)

View File

@@ -15,7 +15,7 @@ const (
_k3 = 0xCA62C1D6
)
fn block_generic(mut dig Digest, p_ []byte) {
fn block_generic(mut dig Digest, p_ []u8) {
unsafe {
mut p := p_
mut w := []u32{len: (16)}

View File

@@ -42,7 +42,7 @@ const (
struct Digest {
mut:
h []u32
x []byte
x []u8
nx int
len u64
is224 bool // mark if this digest is SHA-224
@@ -50,7 +50,7 @@ mut:
fn (mut d Digest) reset() {
d.h = []u32{len: (8)}
d.x = []byte{len: sha256.chunk}
d.x = []u8{len: sha256.chunk}
if !d.is224 {
d.h[0] = u32(sha256.init0)
d.h[1] = u32(sha256.init1)
@@ -90,7 +90,7 @@ pub fn new224() &Digest {
}
// write writes the contents of `p_` to the internal hash representation.
pub fn (mut d Digest) write(p_ []byte) ?int {
pub fn (mut d Digest) write(p_ []u8) ?int {
unsafe {
mut p := p_
nn := p.len
@@ -125,7 +125,7 @@ pub fn (mut d Digest) write(p_ []byte) ?int {
}
// sum returns the SHA256 or SHA224 checksum of digest with the data.
pub fn (d &Digest) sum(b_in []byte) []byte {
pub fn (d &Digest) sum(b_in []u8) []u8 {
// Make a copy of d so that caller can keep writing and summing.
mut d0 := *d
hash := d0.checksum()
@@ -143,10 +143,10 @@ pub fn (d &Digest) sum(b_in []byte) []byte {
}
// checksum returns the current byte checksum of the Digest.
pub fn (mut d Digest) checksum() []byte {
pub fn (mut d Digest) checksum() []u8 {
mut len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
mut tmp := []byte{len: (64)}
mut tmp := []u8{len: (64)}
tmp[0] = 0x80
if int(len) % 64 < 56 {
d.write(tmp[..56 - int(len) % 64]) or { panic(err) }
@@ -160,7 +160,7 @@ pub fn (mut d Digest) checksum() []byte {
if d.nx != 0 {
panic('d.nx != 0')
}
mut digest := []byte{len: sha256.size}
mut digest := []u8{len: sha256.size}
binary.big_endian_put_u32(mut digest, d.h[0])
binary.big_endian_put_u32(mut digest[4..], d.h[1])
binary.big_endian_put_u32(mut digest[8..], d.h[2])
@@ -176,28 +176,28 @@ pub fn (mut d Digest) checksum() []byte {
// sum returns the SHA256 checksum of the bytes in `data`.
// Example: assert sha256.sum('V'.bytes()).len > 0 == true
pub fn sum(data []byte) []byte {
pub fn sum(data []u8) []u8 {
return sum256(data)
}
// sum256 returns the SHA256 checksum of the data.
pub fn sum256(data []byte) []byte {
pub fn sum256(data []u8) []u8 {
mut d := new()
d.write(data) or { panic(err) }
return d.checksum()
}
// sum224 returns the SHA224 checksum of the data.
pub fn sum224(data []byte) []byte {
pub fn sum224(data []u8) []u8 {
mut d := new224()
d.write(data) or { panic(err) }
sum := d.checksum()
mut sum224 := []byte{len: sha256.size224}
mut sum224 := []u8{len: sha256.size224}
copy(mut sum224, sum[..sha256.size224])
return sum224
}
fn block(mut dig Digest, p []byte) {
fn block(mut dig Digest, p []u8) {
// For now just use block_generic until we have specific
// architecture optimized versions
block_generic(mut dig, p)

View File

@@ -78,7 +78,7 @@ const (
]
)
fn block_generic(mut dig Digest, p_ []byte) {
fn block_generic(mut dig Digest, p_ []u8) {
unsafe {
mut p := p_
mut w := []u32{len: (64)}

View File

@@ -64,7 +64,7 @@ const (
struct Digest {
mut:
h []u64
x []byte
x []u8
nx int
len u64
function crypto.Hash
@@ -72,7 +72,7 @@ mut:
fn (mut d Digest) reset() {
d.h = []u64{len: (8)}
d.x = []byte{len: sha512.chunk}
d.x = []u8{len: sha512.chunk}
match d.function {
.sha384 {
d.h[0] = sha512.init0_384
@@ -149,7 +149,7 @@ fn new384() &Digest {
}
// write writes the contents of `p_` to the internal hash representation.
pub fn (mut d Digest) write(p_ []byte) ?int {
pub fn (mut d Digest) write(p_ []u8) ?int {
unsafe {
mut p := p_
nn := p.len
@@ -184,7 +184,7 @@ pub fn (mut d Digest) write(p_ []byte) ?int {
}
// sum returns the SHA512 or SHA384 checksum of digest with the data bytes in `b_in`
pub fn (d &Digest) sum(b_in []byte) []byte {
pub fn (d &Digest) sum(b_in []u8) []u8 {
// Make a copy of d so that caller can keep writing and summing.
mut d0 := *d
hash := d0.checksum()
@@ -215,10 +215,10 @@ pub fn (d &Digest) sum(b_in []byte) []byte {
}
// checksum returns the current byte checksum of the Digest.
pub fn (mut d Digest) checksum() []byte {
pub fn (mut d Digest) checksum() []u8 {
// Padding. Add a 1 bit and 0 bits until 112 bytes mod 128.
mut len := d.len
mut tmp := []byte{len: (128)}
mut tmp := []u8{len: (128)}
tmp[0] = 0x80
if int(len) % 128 < 112 {
d.write(tmp[..112 - int(len) % 128]) or { panic(err) }
@@ -233,7 +233,7 @@ pub fn (mut d Digest) checksum() []byte {
if d.nx != 0 {
panic('d.nx != 0')
}
mut digest := []byte{len: sha512.size}
mut digest := []u8{len: sha512.size}
binary.big_endian_put_u64(mut digest, d.h[0])
binary.big_endian_put_u64(mut digest[8..], d.h[1])
binary.big_endian_put_u64(mut digest[16..], d.h[2])
@@ -248,43 +248,43 @@ pub fn (mut d Digest) checksum() []byte {
}
// sum512 returns the SHA512 checksum of the data.
pub fn sum512(data []byte) []byte {
pub fn sum512(data []u8) []u8 {
mut d := new_digest(.sha512)
d.write(data) or { panic(err) }
return d.checksum()
}
// sum384 returns the SHA384 checksum of the data.
pub fn sum384(data []byte) []byte {
pub fn sum384(data []u8) []u8 {
mut d := new_digest(.sha384)
d.write(data) or { panic(err) }
sum := d.checksum()
mut sum384 := []byte{len: sha512.size384}
mut sum384 := []u8{len: sha512.size384}
copy(mut sum384, sum[..sha512.size384])
return sum384
}
// sum512_224 returns the Sum512/224 checksum of the data.
pub fn sum512_224(data []byte) []byte {
pub fn sum512_224(data []u8) []u8 {
mut d := new_digest(.sha512_224)
d.write(data) or { panic(err) }
sum := d.checksum()
mut sum224 := []byte{len: sha512.size224}
mut sum224 := []u8{len: sha512.size224}
copy(mut sum224, sum[..sha512.size224])
return sum224
}
// sum512_256 returns the Sum512/256 checksum of the data.
pub fn sum512_256(data []byte) []byte {
pub fn sum512_256(data []u8) []u8 {
mut d := new_digest(.sha512_256)
d.write(data) or { panic(err) }
sum := d.checksum()
mut sum256 := []byte{len: sha512.size256}
mut sum256 := []u8{len: sha512.size256}
copy(mut sum256, sum[..sha512.size256])
return sum256
}
fn block(mut dig Digest, p []byte) {
fn block(mut dig Digest, p []u8) {
// For now just use block_generic until we have specific
// architecture optimized versions
block_generic(mut dig, p)

View File

@@ -32,7 +32,7 @@ const (
u64(0x4cc5d4becb3e42b6), u64(0x597f299cfc657e2a), u64(0x5fcb6fab3ad6faec), u64(0x6c44198c4a475817)]
)
fn block_generic(mut dig Digest, p_ []byte) {
fn block_generic(mut dig Digest, p_ []u8) {
unsafe {
mut p := p_
mut w := []u64{len: (80)}