diff --git a/vlib/builtin/array.v b/vlib/builtin/array.v index 665f215ada..db447248e7 100644 --- a/vlib/builtin/array.v +++ b/vlib/builtin/array.v @@ -898,7 +898,9 @@ pub fn copy(mut dst []u8, src []u8) int { // reduce executes a given reducer function on each element of the array, // resulting in a single output value. // NOTE: It exists as a method on `[]int` types only. -// See also `arrays.fold`. +// See also `arrays.reduce` for same name or `arrays.fold` for same functionality. +[deprecated: 'use arrays.fold instead, this function has less flexibility than arrays.fold'] +[deprecated_after: '2022-10-11'] pub fn (a []int) reduce(iter fn (int, int) int, accum_start int) int { mut accum_ := accum_start for i in a { diff --git a/vlib/v/gen/native/amd64.v b/vlib/v/gen/native/amd64.v index e196659102..dd5bf243ab 100644 --- a/vlib/v/gen/native/amd64.v +++ b/vlib/v/gen/native/amd64.v @@ -1,5 +1,6 @@ module native +import arrays import v.ast import v.token @@ -1675,12 +1676,12 @@ pub fn (mut g Gen) call_fn_amd64(node ast.CallExpr) { } stack_args << i } - reg_size := reg_args.map((args_size[it] + 7) / 8).reduce(fn (a int, b int) int { - return a + b - }, 0) - stack_size := stack_args.map((args_size[it] + 7) / 8).reduce(fn (a int, b int) int { - return a + b - }, 0) + reg_size := arrays.fold(reg_args.map((args_size[it] + 7) / 8), 0, fn (acc int, elem int) int { + return acc + elem + }) + stack_size := arrays.fold(stack_args.map((args_size[it] + 7) / 8), 0, fn (acc int, elem int) int { + return acc + elem + }) // not aligned now XOR pushed args will be odd is_16bit_aligned := if mut g.code_gen is Amd64 { g.code_gen.is_16bit_aligned } else { true } != (stack_size % 2 == 1)