1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00

native: split codegen into multiple files and refactor assign statement generation (#18546)

This commit is contained in:
Spydr 2023-06-25 08:47:10 +02:00 committed by GitHub
parent e9960339f9
commit 31f68eea94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 967 additions and 1001 deletions

View File

@ -1,3 +1,6 @@
// Copyright (c) 2019-2023 Alexander Medvednikov. All rights reserved.
// Use of this source code is governed by an MIT license
// that can be found in the LICENSE file.
module native
import arrays
@ -648,6 +651,7 @@ fn (mut c Amd64) mov_reg_to_var(var Var, r Register, config VarConfig) {
far_var_offset := if is_far_var { 0x40 } else { 0 }
match reg {
.eax, .rax, .r8 { c.g.write8(0x45 + far_var_offset) }
.rbx { c.g.write8(0x5d + far_var_offset) }
.edi, .rdi { c.g.write8(0x7d + far_var_offset) }
.rsi { c.g.write8(0x75 + far_var_offset) }
.rdx { c.g.write8(0x55 + far_var_offset) }
@ -1942,267 +1946,150 @@ fn (mut c Amd64) gen_concat_expr(node ast.ConcatExpr) {
c.lea_var_to_reg(c.main_reg(), var.offset)
}
// !!!!!
// TODO: this *must* be done better and platform independant
// !!!!!
fn (mut c Amd64) assign_right_expr(node ast.AssignStmt, i int, right ast.Expr, name string, ident ast.Ident) {
match right {
ast.IntegerLiteral {
// c.allocate_var(name, 4, right.val.int())
match node.op {
.plus_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.add(.rax, right.val.int())
fn (mut c Amd64) assign_struct_var(ident_var IdentVar, typ ast.Type, s int) {
// struct types bigger are passed around as a pointer in rax.
// we need to dereference and copy the contents one after the other
if ident_var !is LocalVar {
c.g.n_error('cannot assign struct to global var or register yet')
}
var := ident_var as LocalVar
mut size := s
mut offset := 0
for size >= 8 {
c.mov_deref(.rbx, .rax, ast.u64_type_idx)
c.mov_reg_to_var(var, Amd64Register.rbx,
offset: offset
typ: ast.u64_type_idx
)
c.add(.rax, 8)
size -= 8
offset += 8
}
if size >= 4 {
c.mov_deref(.rbx, .rax, ast.u32_type_idx)
c.mov_reg_to_var(var, Amd64Register.rbx,
offset: offset
typ: ast.u32_type_idx
)
c.add(.rax, 4)
size -= 4
offset += 4
}
if size >= 2 {
c.mov_deref(.rbx, .rax, ast.u16_type_idx)
c.mov_reg_to_var(var, Amd64Register.rbx,
offset: offset
typ: ast.u16_type_idx
)
c.add(.rax, 2)
size -= 2
offset += 2
}
if size == 1 {
c.mov_deref(.rbx, .rax, ast.u8_type_idx)
c.mov_reg_to_var(var, Amd64Register.rbx,
offset: offset
typ: ast.u8_type_idx
)
c.add(.rax, 1)
size--
offset++
}
assert size == 0
}
fn (mut c Amd64) assign_var(var IdentVar, typ ast.Type) {
size := c.g.get_type_size(typ)
if typ.is_pure_float() {
match var {
LocalVar { c.mov_ssereg_to_var(var as LocalVar, .xmm0) }
GlobalVar { c.mov_ssereg_to_var(var as GlobalVar, .xmm0) }
// Amd64Register { c.g.mov_ssereg(var as Amd64Register, .xmm0) }
else {}
}
} else if c.g.table.sym(typ).info is ast.Struct && !typ.is_any_kind_of_pointer() {
c.assign_struct_var(var, typ, size)
} else if size in [1, 2, 4, 8] {
match var {
LocalVar { c.mov_reg_to_var(var as LocalVar, Amd64Register.rax) }
GlobalVar { c.mov_reg_to_var(var as GlobalVar, Amd64Register.rax) }
Register { c.mov_reg(var as Amd64Register, Amd64Register.rax) }
}
} else {
c.g.n_error('error assigning type ${typ} with size ${size}')
}
}
fn (mut c Amd64) assign_int(node ast.AssignStmt, i int, name string, ident ast.Ident, int_lit ast.IntegerLiteral) {
match node.op {
.plus_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.add(.rax, int_lit.val.int())
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.minus_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.sub(.rax, int_lit.val.int())
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.mult_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.mov64(Amd64Register.rdx, int_lit.val.int())
c.mul_reg(.rax, .rdx)
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.div_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.mov64(Amd64Register.rdx, int_lit.val.int())
c.div_reg(.rax, .rdx)
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.decl_assign {
c.allocate_var(name, 8, int_lit.val.int())
}
.assign {
match node.left[i] {
ast.Ident {
c.mov(Amd64Register.rax, int_lit.val.int())
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.minus_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.sub(.rax, right.val.int())
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.mult_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.mov64(Amd64Register.rdx, right.val.int())
c.mul_reg(.rax, .rdx)
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.div_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.mov64(Amd64Register.rdx, right.val.int())
c.div_reg(.rax, .rdx)
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.decl_assign {
c.allocate_var(name, 8, right.val.int())
}
.assign {
// dump(c.g.typ(node.left_types[i]))
match node.left[i] {
ast.Ident {
// lname := '${node.left[i]}'
// c.g.expr(node.right[i])
c.mov(Amd64Register.rax, right.val.int())
c.mov_reg_to_var(ident, Amd64Register.rax)
}
else {
tn := node.left[i].type_name()
dump(node.left_types)
c.g.n_error('unhandled assign type: ${tn}')
}
}
}
else {
eprintln('ERROR 2')
dump(node)
tn := node.left[i].type_name()
dump(node.left_types)
c.g.n_error('unhandled assign type: ${tn}')
}
}
}
ast.Ident {
// eprintln('identr') dump(node) dump(right)
match node.op {
.plus_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.mov_var_to_reg(Amd64Register.rbx, right as ast.Ident)
c.add_reg(.rax, .rbx)
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.minus_assign {
c.mov_var_to_reg(Amd64Register.rax, ident)
c.mov_var_to_reg(Amd64Register.rbx, right as ast.Ident)
c.sub_reg(.rax, .rbx)
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.div_assign {
// this should be called when `a /= b` but it's not :?
c.mov_var_to_reg(Amd64Register.rax, ident)
c.mov_var_to_reg(Amd64Register.rbx, right as ast.Ident)
c.div_reg(.rax, .rbx)
c.mov_reg_to_var(ident, Amd64Register.rax)
}
.decl_assign {
typ := node.left_types[i]
if typ.is_number() || typ.is_any_kind_of_pointer() || typ.is_bool() {
c.allocate_var(name, c.g.get_type_size(typ), 0)
} else {
ts := c.g.table.sym(typ)
match ts.info {
ast.Struct {
c.g.allocate_by_type(name, typ)
}
else {}
}
}
var_ := c.g.get_var_from_ident(ident)
// TODO global var
right_var := c.g.get_var_from_ident(right) as LocalVar
match var_ {
LocalVar {
var := var_ as LocalVar
if var.typ.is_number() || var.typ.is_any_kind_of_pointer()
|| var.typ.is_bool() {
c.mov_var_to_reg(Amd64Register.rax, right as ast.Ident)
c.mov_reg_to_var(ident, Amd64Register.rax)
} else {
ts := c.g.table.sym(var.typ)
match ts.info {
ast.Struct {
size := c.g.get_type_size(var.typ)
if size >= 8 {
for offset in 0 .. size / 8 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
offset: offset * 8
typ: ast.i64_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
offset: offset * 8
typ: ast.i64_type_idx
)
}
if size % 8 != 0 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
offset: size - 8
typ: ast.i64_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
offset: size - 8
typ: ast.i64_type_idx
)
}
} else {
mut left_size := if size >= 4 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
typ: ast.int_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
typ: ast.int_type_idx
)
size - 4
} else {
size
}
if left_size >= 2 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
offset: size - left_size
typ: ast.i16_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
offset: size - left_size
typ: ast.i16_type_idx
)
left_size -= 2
}
if left_size == 1 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
offset: size - left_size
typ: ast.i8_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
offset: size - left_size
typ: ast.i8_type_idx
)
}
}
}
else {
c.g.n_error('Unsupported variable type')
}
}
}
}
else {
c.g.n_error('Unsupported variable kind')
}
}
}
.assign {
var_ := c.g.get_var_from_ident(ident)
// TODO global var
right_var := c.g.get_var_from_ident(right) as LocalVar
match var_ {
LocalVar {
var := var_ as LocalVar
if var.typ.is_number() || var.typ.is_any_kind_of_pointer()
|| var.typ.is_bool() {
c.mov_var_to_reg(Amd64Register.rax, right as ast.Ident)
c.mov_reg_to_var(ident, Amd64Register.rax)
} else {
ts := c.g.table.sym(var.typ)
match ts.info {
ast.Struct {
size := c.g.get_type_size(var.typ)
if size >= 8 {
for offset in 0 .. size / 8 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
offset: offset * 8
typ: ast.i64_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
offset: offset * 8
typ: ast.i64_type_idx
)
}
if size % 8 != 0 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
offset: size - 8
typ: ast.i64_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
offset: size - 8
typ: ast.i64_type_idx
)
}
} else {
mut left_size := if size >= 4 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
typ: ast.int_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
typ: ast.int_type_idx
)
size - 4
} else {
size
}
if left_size >= 2 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
offset: size - left_size
typ: ast.i16_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
offset: size - left_size
typ: ast.i16_type_idx
)
left_size -= 2
}
if left_size == 1 {
c.mov_var_to_reg(Amd64Register.rax, right_var,
offset: size - left_size
typ: ast.i8_type_idx
)
c.mov_reg_to_var(var, Amd64Register.rax,
offset: size - left_size
typ: ast.i8_type_idx
)
}
}
}
else {
c.g.n_error('Unsupported variable type')
}
}
}
}
else {
c.g.n_error('Unsupported variable kind')
}
}
}
else {
eprintln('TODO: unhandled assign ident case')
dump(node)
}
}
// a += b
else {
c.g.n_error('unexpected assignment op ${node.op}')
}
}
}
fn (mut c Amd64) assign_right_expr(node ast.AssignStmt, i int, right ast.Expr, name string, ident ast.Ident) {
match right {
ast.IntegerLiteral {
c.assign_int(node, i, name, ident, right)
return
}
ast.StringLiteral {
dest := c.allocate_var(name, 8, 0)
ie := right as ast.StringLiteral
str := c.g.eval_str_lit_escape_codes(ie)
c.learel(Amd64Register.rsi, c.g.allocate_string(str, 3, .rel32))
c.mov_reg_to_var(LocalVar{dest, ast.u64_type_idx, name}, Amd64Register.rsi)
return
}
ast.StructInit {
match node.op {
@ -2214,33 +2101,95 @@ fn (mut c Amd64) assign_right_expr(node ast.AssignStmt, i int, right ast.Expr, n
c.g.n_error('Unexpected operator `${node.op}`')
}
}
return
}
ast.ArrayInit {
// check if array is empty
mut pos := c.g.allocate_array(name, 8, right.exprs.len)
// allocate array of right.exprs.len vars
for e in right.exprs {
match e {
ast.IntegerLiteral {
c.mov(Amd64Register.rax, e.val.int())
c.mov_reg_to_var(LocalVar{pos, ast.i64_type_idx, ''}, Amd64Register.rax)
pos += 8
}
ast.StringLiteral {
// TODO: use learel
str := c.g.eval_str_lit_escape_codes(e)
c.mov64(Amd64Register.rsi, c.g.allocate_string(str, 2, .abs64)) // for rsi its 2
c.mov_reg_to_var(LocalVar{pos, ast.u64_type_idx, ''}, Amd64Register.rsi)
pos += 8
}
else {
dump(e)
c.g.n_error('unhandled array init type')
}
match node.op {
.decl_assign {
c.g.allocate_by_type(name, right.typ)
c.init_array(ident, right)
}
else {
c.g.n_error('Unexpected operator `${node.op}`')
}
}
return
}
ast.IndexExpr {
ast.TypeOf {
c.g.gen_typeof_expr(right as ast.TypeOf, true)
c.mov_reg(Amd64Register.rsi, Amd64Register.rax)
return
}
ast.AtExpr {
dest := c.allocate_var(name, 8, 0)
c.learel(Amd64Register.rsi, c.g.allocate_string(c.g.comptime_at(right), 3,
.rel32))
c.mov_reg_to_var(LocalVar{dest, ast.u64_type_idx, name}, Amd64Register.rsi)
return
}
ast.IfExpr {
if right.is_comptime {
if stmts := c.g.comptime_conditional(right) {
for j, stmt in stmts {
if j + 1 != stmts.len {
c.g.stmt(stmt)
continue
}
if stmt is ast.ExprStmt {
c.assign_right_expr(node, i, stmt.expr, name, ident)
} else {
c.g.n_error('last stmt must be expr')
}
}
} else {
c.g.n_error('missing value for assignment')
}
return
}
}
else {}
}
left_type := node.left_types[i]
if node.op == .decl_assign {
c.g.allocate_by_type(name, left_type)
}
c.g.expr(right)
if node.op in [.assign, .decl_assign] {
var := c.g.get_var_from_ident(ident)
c.assign_var(var, left_type)
} else if left_type.is_pure_float() {
c.mov_var_to_ssereg(.xmm1, ident)
match node.op {
.plus_assign { c.add_sse(.xmm1, .xmm0, left_type) }
.minus_assign { c.sub_sse(.xmm1, .xmm0, left_type) }
.mult_assign { c.mul_sse(.xmm1, .xmm0, left_type) }
.div_assign { c.div_sse(.xmm1, .xmm0, left_type) }
else { c.g.n_error('unexpected assignment operator ${node.op} for fp') }
}
c.mov_ssereg_to_var(ident, .xmm1)
} else if left_type.is_int() {
c.mov_var_to_reg(Amd64Register.rbx, ident)
match node.op {
.plus_assign { c.add_reg(.rbx, .rax) }
.minus_assign { c.sub_reg(.rbx, .rax) }
.div_assign { c.div_reg(.rbx, .rax) }
.mult_assign { c.mul_reg(.rbx, .rax) }
else { c.g.n_error('unexpected assignment operator ${node.op} for int') }
}
c.mov_reg_to_var(ident, Amd64Register.rbx)
} else {
c.g.n_error('assignment arithmetic not implemented for type ${node.left_types[i]}')
}
/*
ast.IndexExpr {
// a := arr[0]
offset := c.allocate_var(name, c.g.get_sizeof_ident(ident), 0)
if c.g.pref.is_verbose {
@ -2268,73 +2217,7 @@ fn (mut c Amd64) assign_right_expr(node ast.AssignStmt, i int, right ast.Expr, n
// TODO check if out of bounds access
c.mov_reg_to_var(ident, Amd64Register.eax)
}
ast.StringLiteral {
dest := c.allocate_var(name, 8, 0)
ie := right as ast.StringLiteral
str := c.g.eval_str_lit_escape_codes(ie)
c.learel(Amd64Register.rsi, c.g.allocate_string(str, 3, .rel32))
c.mov_reg_to_var(LocalVar{dest, ast.u64_type_idx, name}, Amd64Register.rsi)
}
ast.GoExpr {
c.g.v_error('threads not implemented for the native backend', node.pos)
}
ast.TypeOf {
c.g.gen_typeof_expr(right as ast.TypeOf, true)
c.mov_reg(Amd64Register.rsi, Amd64Register.rax)
}
ast.AtExpr {
dest := c.allocate_var(name, 8, 0)
c.learel(Amd64Register.rsi, c.g.allocate_string(c.g.comptime_at(right), 3,
.rel32))
c.mov_reg_to_var(LocalVar{dest, ast.u64_type_idx, name}, Amd64Register.rsi)
}
else {
if right is ast.IfExpr && (right as ast.IfExpr).is_comptime {
if stmts := c.g.comptime_conditional(right) {
for j, stmt in stmts {
if j + 1 == stmts.len {
if stmt is ast.ExprStmt {
c.assign_right_expr(node, i, stmt.expr, name, ident)
} else {
c.g.n_error('last stmt must be expr')
}
} else {
c.g.stmt(stmt)
}
}
} else {
c.g.n_error('missing value for assignment')
}
return
}
// dump(node)
size := c.g.get_type_size(node.left_types[i])
if size !in [1, 2, 4, 8] || node.op !in [.assign, .decl_assign] {
c.g.v_error('unhandled assign_stmt expression: ${right.type_name()}',
right.pos())
}
if node.op == .decl_assign {
c.allocate_var(name, size, 0)
}
c.g.expr(right)
var := c.g.get_var_from_ident(ident)
if node.left_types[i].is_pure_float() {
match var {
LocalVar { c.mov_ssereg_to_var(var as LocalVar, .xmm0) }
GlobalVar { c.mov_ssereg_to_var(var as GlobalVar, .xmm0) }
// Amd64Register { c.g.mov_ssereg(var as Amd64Register, .xmm0) }
else {}
}
} else {
match var {
LocalVar { c.mov_reg_to_var(var as LocalVar, Amd64Register.rax) }
GlobalVar { c.mov_reg_to_var(var as GlobalVar, Amd64Register.rax) }
Register { c.mov_reg(var as Amd64Register, Amd64Register.rax) }
}
}
}
}
}*/
}
fn (mut c Amd64) gen_type_promotion(from ast.Type, to ast.Type, option Amd64RegisterOption) {
@ -3561,7 +3444,38 @@ fn (mut c Amd64) init_struct(var Var, init ast.StructInit) {
}
}
GlobalVar {
// TODO
c.g.n_error('GlobalVar not implemented for ast.StructInit')
}
}
}
fn (mut c Amd64) init_array(var Var, node ast.ArrayInit) {
match var {
ast.Ident {
var_object := c.g.get_var_from_ident(var)
match var_object {
LocalVar {
c.init_array(var_object as LocalVar, node)
}
GlobalVar {
c.init_array(var_object as GlobalVar, node)
}
Register {
// TODO
// c.g.cmp()
}
}
}
LocalVar {
mut offset := var.offset
for expr in node.exprs {
c.g.expr(expr)
c.mov_reg_to_var(LocalVar{offset, ast.i64_type_idx, ''}, c.main_reg())
offset += 8
}
}
GlobalVar {
c.g.n_error('GlobalVar not implemented for ast.ArrayInit')
}
}
}

View File

@ -1,3 +1,6 @@
// Copyright (c) 2019-2023 Alexander Medvednikov. All rights reserved.
// Use of this source code is governed by an MIT license
// that can be found in the LICENSE file.
module native
import v.ast
@ -439,6 +442,10 @@ fn (mut c Arm64) init_struct(var Var, init ast.StructInit) {
panic('Arm64.init_struct() not implemented')
}
fn (mut c Arm64) init_array(var Var, init ast.ArrayInit) {
panic('Arm64.init_array() not implemented')
}
fn (mut c Arm64) load_fp_var(var Var, config VarConfig) {
panic('Arm64.load_fp_var() not implemented')
}

347
vlib/v/gen/native/expr.v Normal file
View File

@ -0,0 +1,347 @@
// Copyright (c) 2019-2023 Alexander Medvednikov. All rights reserved.
// Use of this source code is governed by an MIT license
// that can be found in the LICENSE file.
module native
import v.ast
fn (mut g Gen) expr(node ast.Expr) {
match node {
ast.ParExpr {
g.expr(node.expr)
}
ast.ArrayInit {
pos := g.allocate_array('_anonarray', 8, node.exprs.len)
g.code_gen.init_array(LocalVar{ offset: pos, typ: node.typ }, node)
g.code_gen.lea_var_to_reg(g.code_gen.main_reg(), pos)
}
ast.BoolLiteral {
g.code_gen.mov64(g.code_gen.main_reg(), if node.val {
1
} else {
0
})
}
ast.CallExpr {
if node.name == 'C.syscall' {
g.code_gen.gen_syscall(node)
} else if node.name == 'exit' {
g.code_gen.gen_exit(node.args[0].expr)
} else if node.name in ['println', 'print', 'eprintln', 'eprint'] {
expr := node.args[0].expr
typ := node.args[0].typ
g.gen_print_from_expr(expr, typ, node.name)
} else {
g.code_gen.call_fn(node)
}
}
ast.FloatLiteral {
val := g.eval.expr(node, ast.float_literal_type_idx).float_val()
g.code_gen.load_fp(val)
}
ast.Ident {
var := g.get_var_from_ident(node)
// XXX this is intel specific
match var {
LocalVar {
if g.is_register_type(var.typ) {
g.code_gen.mov_var_to_reg(g.code_gen.main_reg(), node as ast.Ident)
} else if var.typ.is_pure_float() {
g.code_gen.load_fp_var(node as ast.Ident)
} else {
ts := g.table.sym(var.typ)
match ts.info {
ast.Struct {
g.code_gen.lea_var_to_reg(g.code_gen.main_reg(), g.get_var_offset(node.name))
}
ast.Enum {
g.code_gen.mov_var_to_reg(g.code_gen.main_reg(), node as ast.Ident,
typ: ast.int_type_idx
)
}
else {
g.n_error('Unsupported variable type')
}
}
}
}
else {
g.n_error('Unsupported variable kind')
}
}
}
ast.IfExpr {
g.if_expr(node)
}
ast.InfixExpr {
g.code_gen.infix_expr(node)
}
ast.IntegerLiteral {
g.code_gen.mov64(g.code_gen.main_reg(), i64(node.val.u64()))
}
ast.Nil {
g.code_gen.mov64(g.code_gen.main_reg(), 0)
}
ast.PostfixExpr {
g.postfix_expr(node)
}
ast.PrefixExpr {
g.code_gen.prefix_expr(node)
}
ast.StringLiteral {
str := g.eval_str_lit_escape_codes(node)
g.allocate_string(str, 3, .rel32)
}
ast.CharLiteral {
bytes := g.eval_escape_codes(node.val)
.bytes()
mut val := rune(0)
for i, v in bytes {
val |= v << (i * 8)
if i >= sizeof(rune) {
g.n_error('runes are only 4 bytes wide')
}
}
g.code_gen.movabs(g.code_gen.main_reg(), i64(val))
}
ast.StructInit {
pos := g.allocate_by_type('_anonstruct', node.typ)
g.code_gen.init_struct(LocalVar{ offset: pos, typ: node.typ }, node)
g.code_gen.lea_var_to_reg(g.code_gen.main_reg(), pos)
}
ast.GoExpr {
g.v_error('native backend doesnt support threads yet', node.pos)
}
ast.MatchExpr {
g.code_gen.gen_match_expr(node)
}
ast.SelectorExpr {
g.code_gen.gen_selector_expr(node)
}
ast.CastExpr {
g.code_gen.gen_cast_expr(node)
}
ast.EnumVal {
type_name := g.table.get_type_name(node.typ)
g.code_gen.mov(g.code_gen.main_reg(), g.enum_vals[type_name].fields[node.val])
}
ast.UnsafeExpr {
g.expr(node.expr)
}
ast.ConcatExpr {
g.code_gen.gen_concat_expr(node)
}
else {
g.n_error('expr: unhandled node type: ${node.type_name()}')
}
}
}
fn (mut g Gen) condition(expr ast.Expr, neg bool) int {
g.expr(expr)
g.code_gen.cmp_zero(g.code_gen.main_reg())
return g.code_gen.cjmp(if neg { .jne } else { .je })
}
fn (mut g Gen) if_expr(node ast.IfExpr) {
if node.is_comptime {
if stmts := g.comptime_conditional(node) {
g.stmts(stmts)
}
return
}
if node.branches.len == 0 {
return
}
mut endif_label := 0
has_endif := node.branches.len > 1
if has_endif {
endif_label = g.labels.new_label()
}
for idx in 0 .. node.branches.len {
branch := node.branches[idx]
if idx == node.branches.len - 1 && node.has_else {
g.stmts(branch.stmts)
} else {
if branch.cond is ast.BoolLiteral {
if branch.cond.val {
g.stmts(branch.stmts)
}
continue
}
expr := branch.cond
label := g.labels.new_label()
cjmp_addr := g.condition(expr, false)
g.labels.patches << LabelPatch{
id: label
pos: cjmp_addr
}
g.println('; jump to label ${label}')
g.stmts(branch.stmts)
if has_endif {
jump_addr := g.code_gen.jmp(0)
g.labels.patches << LabelPatch{
id: endif_label
pos: jump_addr
}
g.println('; jump to label ${endif_label}')
}
// println('after if g.pos=$g.pos() jneaddr=$cjmp_addr')
g.labels.addrs[label] = g.pos()
g.println('; label ${label}')
}
}
if has_endif {
g.labels.addrs[endif_label] = g.pos()
g.println('; label ${endif_label}')
}
}
fn (mut g Gen) postfix_expr(node ast.PostfixExpr) {
if node.expr !is ast.Ident {
return
}
ident := node.expr as ast.Ident
match node.op {
.inc {
g.code_gen.inc_var(ident)
}
.dec {
g.code_gen.dec_var(ident)
}
else {}
}
}
fn (mut g Gen) gen_typeof_expr(it ast.TypeOf, newline bool) {
nl := if newline { '\n' } else { '' }
r := g.typ(it.typ).name
g.code_gen.learel(g.code_gen.main_reg(), g.allocate_string('${r}${nl}', 3, .rel32))
}
fn (mut g Gen) gen_print_from_expr(expr ast.Expr, typ ast.Type, name string) {
newline := name in ['println', 'eprintln']
fd := if name in ['eprint', 'eprintln'] { 2 } else { 1 }
match expr {
ast.StringLiteral {
str := g.eval_str_lit_escape_codes(expr)
if newline {
g.code_gen.gen_print(str + '\n', fd)
} else {
g.code_gen.gen_print(str, fd)
}
}
ast.Nil {
str := '0x0'
if newline {
g.code_gen.gen_print(str + '\n', fd)
} else {
g.code_gen.gen_print(str, fd)
}
}
ast.CharLiteral {
str := g.eval_escape_codes(expr.val)
if newline {
g.code_gen.gen_print(str + '\n', fd)
} else {
g.code_gen.gen_print(str, fd)
}
}
ast.Ident {
vo := g.try_var_offset(expr.name)
reg := g.code_gen.main_reg()
if vo != -1 {
g.gen_var_to_string(reg, expr, expr as ast.Ident)
g.code_gen.gen_print_reg(reg, -1, fd)
if newline {
g.code_gen.gen_print('\n', fd)
}
} else {
g.code_gen.gen_print_reg(reg, -1, fd)
}
}
ast.IntegerLiteral {
if newline {
g.code_gen.gen_print('${expr.val}\n', fd)
} else {
g.code_gen.gen_print('${expr.val}', fd)
}
}
ast.BoolLiteral {
// register 'true' and 'false' strings // g.expr(expr)
// XXX mov64 shuoldnt be used for addressing
nl := if newline { '\n' } else { '' }
if expr.val {
g.code_gen.gen_print('true' + nl, fd)
} else {
g.code_gen.gen_print('false' + nl, fd)
}
}
ast.SizeOf {
size := g.get_type_size(expr.typ)
if newline {
g.code_gen.gen_print('${size}\n', fd)
} else {
g.code_gen.gen_print('${size}', fd)
}
}
ast.OffsetOf {
styp := g.typ(expr.struct_type)
field_name := expr.field
if styp.kind == .struct_ {
off := g.get_field_offset(expr.struct_type, field_name)
if newline {
g.code_gen.gen_print('${off}\n', fd)
} else {
g.code_gen.gen_print('${off}', fd)
}
} else {
g.v_error('_offsetof expects a struct Type as first argument', expr.pos)
}
}
ast.None {
if newline {
g.code_gen.gen_print('<none>\n', fd)
} else {
g.code_gen.gen_print('<none>', fd)
}
}
ast.AtExpr {
if newline {
g.code_gen.gen_print(g.comptime_at(expr) + '\n', fd)
} else {
g.code_gen.gen_print(g.comptime_at(expr), fd)
}
}
ast.StringInterLiteral {
g.n_error('Interlaced string literals are not yet supported in the native backend.') // , expr.pos)
}
ast.IfExpr {
if expr.is_comptime {
if stmts := g.comptime_conditional(expr) {
for i, stmt in stmts {
if i + 1 == stmts.len && stmt is ast.ExprStmt {
g.gen_print_from_expr(stmt.expr, stmt.typ, name)
} else {
g.stmt(stmt)
}
}
} else {
g.n_error('nothing to print')
}
} else {
g.n_error('non-comptime conditionals are not implemented yet.')
}
}
else {
g.expr(expr)
g.gen_to_string(g.code_gen.main_reg(), typ)
g.code_gen.gen_print_reg(g.code_gen.main_reg(), -1, fd)
if newline {
g.code_gen.gen_print('\n', fd)
}
}
}
}

View File

@ -16,66 +16,6 @@ import v.eval
import term
import strconv
interface CodeGen {
mut:
g &Gen
adr(r Arm64Register, delta int) // Note: Temporary!
allocate_var(name string, size int, initial_val int) int
apicall(call ApiCall) // winapi calls
assign_stmt(node ast.AssignStmt) // TODO: make platform-independant
builtin_decl(builtin BuiltinFn)
call_addr_at(addr int, at i64) i64
call_builtin(name Builtin) i64
call_fn(node ast.CallExpr)
call(addr int) i64
cjmp(op JumpOp) int
cmp_var_reg(var Var, reg Register, config VarConfig)
cmp_var(var Var, val int, config VarConfig)
cmp_zero(reg Register)
convert_bool_to_string(r Register)
convert_int_to_string(a Register, b Register)
convert_rune_to_string(r Register, buffer int, var Var, config VarConfig)
dec_var(var Var, config VarConfig)
fn_decl(node ast.FnDecl)
for_in_stmt(node ast.ForInStmt)
gen_asm_stmt(asm_node ast.AsmStmt)
gen_assert(assert_node ast.AssertStmt)
gen_cast_expr(expr ast.CastExpr)
gen_concat_expr(expr ast.ConcatExpr)
gen_exit(expr ast.Expr)
gen_match_expr(expr ast.MatchExpr)
gen_print_reg(r Register, n int, fd int)
gen_print(s string, fd int)
gen_selector_expr(expr ast.SelectorExpr)
gen_syscall(node ast.CallExpr)
inc_var(var Var, config VarConfig)
infix_expr(node ast.InfixExpr) // TODO: make platform-independant
infloop()
init_struct(var Var, init ast.StructInit)
jmp_back(start i64)
jmp(addr int) int
lea_var_to_reg(r Register, var_offset int)
learel(reg Register, val int)
leave()
load_fp_var(var Var, config VarConfig)
load_fp(val f64)
main_reg() Register
mov_int_to_var(var Var, integer int, config VarConfig)
mov_reg_to_var(var Var, reg Register, config VarConfig)
mov_reg(r1 Register, r2 Register)
mov_var_to_reg(reg Register, var Var, config VarConfig)
mov(r Register, val int)
mov64(r Register, val i64)
movabs(reg Register, val i64)
prefix_expr(node ast.PrefixExpr)
ret()
return_stmt(node ast.Return)
reverse_string(r Register)
svc()
syscall() // unix syscalls
trap()
}
[heap; minify]
pub struct Gen {
out_name string
@ -123,6 +63,67 @@ mut:
requires_linking bool
}
interface CodeGen {
mut:
g &Gen
adr(r Arm64Register, delta int) // Note: Temporary!
allocate_var(name string, size int, initial_val int) int
apicall(call ApiCall) // winapi calls
assign_stmt(node ast.AssignStmt) // TODO: make platform-independant
builtin_decl(builtin BuiltinFn)
call_addr_at(addr int, at i64) i64
call_builtin(name Builtin) i64
call_fn(node ast.CallExpr)
call(addr int) i64
cjmp(op JumpOp) int
cmp_var_reg(var Var, reg Register, config VarConfig)
cmp_var(var Var, val int, config VarConfig)
cmp_zero(reg Register)
convert_bool_to_string(r Register)
convert_int_to_string(a Register, b Register)
convert_rune_to_string(r Register, buffer int, var Var, config VarConfig)
dec_var(var Var, config VarConfig)
fn_decl(node ast.FnDecl)
for_in_stmt(node ast.ForInStmt)
gen_asm_stmt(asm_node ast.AsmStmt)
gen_assert(assert_node ast.AssertStmt)
gen_cast_expr(expr ast.CastExpr)
gen_concat_expr(expr ast.ConcatExpr)
gen_exit(expr ast.Expr)
gen_match_expr(expr ast.MatchExpr)
gen_print_reg(r Register, n int, fd int)
gen_print(s string, fd int)
gen_selector_expr(expr ast.SelectorExpr)
gen_syscall(node ast.CallExpr)
inc_var(var Var, config VarConfig)
infix_expr(node ast.InfixExpr) // TODO: make platform-independant
infloop()
init_struct(var Var, init ast.StructInit)
init_array(var Var, init ast.ArrayInit)
jmp_back(start i64)
jmp(addr int) int
lea_var_to_reg(r Register, var_offset int)
learel(reg Register, val int)
leave()
load_fp_var(var Var, config VarConfig)
load_fp(val f64)
main_reg() Register
mov_int_to_var(var Var, integer int, config VarConfig)
mov_reg_to_var(var Var, reg Register, config VarConfig)
mov_reg(r1 Register, r2 Register)
mov_var_to_reg(reg Register, var Var, config VarConfig)
mov(r Register, val int)
mov64(r Register, val i64)
movabs(reg Register, val i64)
prefix_expr(node ast.PrefixExpr)
ret()
return_stmt(node ast.Return)
reverse_string(r Register)
svc()
syscall() // unix syscalls
trap()
}
type Register = Amd64Register | Arm64Register
enum RelocType {
@ -223,6 +224,11 @@ enum JumpOp {
jnb
}
union F64I64 {
f f64
i i64
}
fn (mut g Gen) get_var_from_ident(ident ast.Ident) IdentVar {
mut obj := ident.obj
if obj !in [ast.Var, ast.ConstField, ast.GlobalField, ast.AsmRegister] {
@ -495,12 +501,6 @@ pub fn (mut g Gen) calculate_enum_fields() {
}
}
pub fn (mut g Gen) stmts(stmts []ast.Stmt) {
for stmt in stmts {
g.stmt(stmt)
}
}
pub fn (g &Gen) pos() i64 {
return g.buf.len
}
@ -773,12 +773,6 @@ fn (mut g Gen) allocate_array(name string, size int, items int) int {
return pos
}
fn (mut g Gen) gen_typeof_expr(it ast.TypeOf, newline bool) {
nl := if newline { '\n' } else { '' }
r := g.typ(it.typ).name
g.code_gen.learel(g.code_gen.main_reg(), g.allocate_string('${r}${nl}', 3, .rel32))
}
fn (mut g Gen) eval_str_lit_escape_codes(str_lit ast.StringLiteral) string {
if str_lit.is_raw {
return str_lit.val
@ -909,133 +903,6 @@ fn (mut g Gen) gen_var_to_string(reg Register, expr ast.Expr, var Var, config Va
}
}
pub fn (mut g Gen) gen_print_from_expr(expr ast.Expr, typ ast.Type, name string) {
newline := name in ['println', 'eprintln']
fd := if name in ['eprint', 'eprintln'] { 2 } else { 1 }
match expr {
ast.StringLiteral {
str := g.eval_str_lit_escape_codes(expr)
if newline {
g.code_gen.gen_print(str + '\n', fd)
} else {
g.code_gen.gen_print(str, fd)
}
}
ast.Nil {
str := '0x0'
if newline {
g.code_gen.gen_print(str + '\n', fd)
} else {
g.code_gen.gen_print(str, fd)
}
}
ast.CharLiteral {
str := g.eval_escape_codes(expr.val)
if newline {
g.code_gen.gen_print(str + '\n', fd)
} else {
g.code_gen.gen_print(str, fd)
}
}
ast.Ident {
vo := g.try_var_offset(expr.name)
reg := g.code_gen.main_reg()
if vo != -1 {
g.gen_var_to_string(reg, expr, expr as ast.Ident)
g.code_gen.gen_print_reg(reg, -1, fd)
if newline {
g.code_gen.gen_print('\n', fd)
}
} else {
g.code_gen.gen_print_reg(reg, -1, fd)
}
}
ast.IntegerLiteral {
if newline {
g.code_gen.gen_print('${expr.val}\n', fd)
} else {
g.code_gen.gen_print('${expr.val}', fd)
}
}
ast.BoolLiteral {
// register 'true' and 'false' strings // g.expr(expr)
// XXX mov64 shuoldnt be used for addressing
nl := if newline { '\n' } else { '' }
if expr.val {
g.code_gen.gen_print('true' + nl, fd)
} else {
g.code_gen.gen_print('false' + nl, fd)
}
}
ast.SizeOf {
size := g.get_type_size(expr.typ)
if newline {
g.code_gen.gen_print('${size}\n', fd)
} else {
g.code_gen.gen_print('${size}', fd)
}
}
ast.OffsetOf {
styp := g.typ(expr.struct_type)
field_name := expr.field
if styp.kind == .struct_ {
off := g.get_field_offset(expr.struct_type, field_name)
if newline {
g.code_gen.gen_print('${off}\n', fd)
} else {
g.code_gen.gen_print('${off}', fd)
}
} else {
g.v_error('_offsetof expects a struct Type as first argument', expr.pos)
}
}
ast.None {
if newline {
g.code_gen.gen_print('<none>\n', fd)
} else {
g.code_gen.gen_print('<none>', fd)
}
}
ast.AtExpr {
if newline {
g.code_gen.gen_print(g.comptime_at(expr) + '\n', fd)
} else {
g.code_gen.gen_print(g.comptime_at(expr), fd)
}
}
ast.StringInterLiteral {
g.n_error('Interlaced string literals are not yet supported in the native backend.') // , expr.pos)
}
ast.IfExpr {
if expr.is_comptime {
if stmts := g.comptime_conditional(expr) {
for i, stmt in stmts {
if i + 1 == stmts.len && stmt is ast.ExprStmt {
g.gen_print_from_expr(stmt.expr, stmt.typ, name)
} else {
g.stmt(stmt)
}
}
} else {
g.n_error('nothing to print')
}
} else {
g.n_error('non-comptime conditionals are not implemented yet.')
}
}
else {
g.expr(expr)
g.gen_to_string(g.code_gen.main_reg(), typ)
g.code_gen.gen_print_reg(g.code_gen.main_reg(), -1, fd)
if newline {
g.code_gen.gen_print('\n', fd)
}
}
}
}
fn (mut g Gen) is_used_by_main(node ast.FnDecl) bool {
mut used := true
if g.pref.skip_unused {
@ -1150,463 +1017,6 @@ fn (mut g Gen) println(comment string) {
println(final)
}
fn (mut g Gen) gen_forc_stmt(node ast.ForCStmt) {
if node.has_init {
g.stmts([node.init])
}
start := g.pos()
start_label := g.labels.new_label()
mut jump_addr := i64(0)
if node.has_cond {
cond := node.cond
match cond {
ast.InfixExpr {
match cond.left {
ast.Ident {
lit := cond.right as ast.IntegerLiteral
g.code_gen.cmp_var(cond.left as ast.Ident, lit.val.int())
match cond.op {
.gt {
jump_addr = g.code_gen.cjmp(.jle)
}
.lt {
jump_addr = g.code_gen.cjmp(.jge)
}
else {
g.n_error('unsupported conditional in for-c loop')
}
}
}
else {
g.n_error('unhandled infix.left')
}
}
}
else {}
}
// dump(node.cond)
g.expr(node.cond)
}
end_label := g.labels.new_label()
g.labels.patches << LabelPatch{
id: end_label
pos: int(jump_addr)
}
g.println('; jump to label ${end_label}')
g.labels.branches << BranchLabel{
name: node.label
start: start_label
end: end_label
}
g.stmts(node.stmts)
g.labels.addrs[start_label] = g.pos()
g.println('; label ${start_label}')
if node.has_inc {
g.stmts([node.inc])
}
g.labels.branches.pop()
g.code_gen.jmp_back(start)
g.labels.addrs[end_label] = g.pos()
g.println('; jump to label ${end_label}')
// loop back
}
fn (mut g Gen) for_stmt(node ast.ForStmt) {
if node.is_inf {
if node.stmts.len == 0 {
g.code_gen.infloop()
return
}
// infinite loop
start := g.pos()
start_label := g.labels.new_label()
g.labels.addrs[start_label] = start
g.println('; label ${start_label}')
end_label := g.labels.new_label()
g.labels.branches << BranchLabel{
name: node.label
start: start_label
end: end_label
}
g.stmts(node.stmts)
g.labels.branches.pop()
g.code_gen.jmp_back(start)
g.println('jmp after infinite for')
g.labels.addrs[end_label] = g.pos()
g.println('; label ${end_label}')
return
}
infix_expr := node.cond as ast.InfixExpr
mut jump_addr := 0 // location of `jne *00 00 00 00*`
start := g.pos()
start_label := g.labels.new_label()
g.labels.addrs[start_label] = start
g.println('; label ${start_label}')
match infix_expr.left {
ast.Ident {
match infix_expr.right {
ast.Ident {
reg := g.code_gen.main_reg()
g.code_gen.mov_var_to_reg(reg, infix_expr.right as ast.Ident)
g.code_gen.cmp_var_reg(infix_expr.left as ast.Ident, reg)
}
ast.IntegerLiteral {
lit := infix_expr.right as ast.IntegerLiteral
g.code_gen.cmp_var(infix_expr.left as ast.Ident, lit.val.int())
}
else {
g.n_error('unhandled expression type')
}
}
match infix_expr.left.tok_kind {
.lt {
jump_addr = g.code_gen.cjmp(.jge)
}
.gt {
jump_addr = g.code_gen.cjmp(.jle)
}
.le {
jump_addr = g.code_gen.cjmp(.jg)
}
.ge {
jump_addr = g.code_gen.cjmp(.jl)
}
.ne {
jump_addr = g.code_gen.cjmp(.je)
}
.eq {
jump_addr = g.code_gen.cjmp(.jne)
}
else {
g.n_error('unhandled infix cond token')
}
}
}
else {
g.n_error('unhandled infix.left')
}
}
end_label := g.labels.new_label()
g.labels.patches << LabelPatch{
id: end_label
pos: jump_addr
}
g.println('; jump to label ${end_label}')
g.labels.branches << BranchLabel{
name: node.label
start: start_label
end: end_label
}
g.stmts(node.stmts)
g.labels.branches.pop()
// Go back to `cmp ...`
g.code_gen.jmp_back(start)
// Update the jump addr to current pos
g.labels.addrs[end_label] = g.pos()
g.println('; label ${end_label}')
g.println('jmp after for')
}
fn (mut g Gen) stmt(node ast.Stmt) {
match node {
ast.AssignStmt {
g.code_gen.assign_stmt(node)
}
ast.Block {
g.stmts(node.stmts)
}
ast.BranchStmt {
label_name := node.label
for i := g.labels.branches.len - 1; i >= 0; i-- {
branch := g.labels.branches[i]
if label_name == '' || label_name == branch.name {
label := if node.kind == .key_break {
branch.end
} else { // continue
branch.start
}
jump_addr := g.code_gen.jmp(0)
g.labels.patches << LabelPatch{
id: label
pos: jump_addr
}
g.println('; jump to ${label}: ${node.kind}')
break
}
}
}
ast.ConstDecl {}
ast.DeferStmt {
name := '_defer${g.defer_stmts.len}'
defer_var := g.get_var_offset(name)
g.code_gen.mov_int_to_var(LocalVar{defer_var, ast.i64_type_idx, name}, 1)
g.defer_stmts << node
g.defer_stmts[g.defer_stmts.len - 1].idx_in_fn = g.defer_stmts.len - 1
}
ast.ExprStmt {
g.expr(node.expr)
}
ast.FnDecl {
g.fn_decl(node)
}
ast.ForCStmt {
g.gen_forc_stmt(node)
}
ast.ForInStmt {
if node.stmts.len == 0 {
// if no statements, just dont make it
return
}
g.code_gen.for_in_stmt(node)
}
ast.ForStmt {
g.for_stmt(node)
}
ast.HashStmt {
words := node.val.split(' ')
for word in words {
if word.len != 2 {
g.n_error('opcodes format: xx xx xx xx\nhash statements are not allowed with the native backend, use the C backend for extended C interoperability.')
}
b := unsafe { C.strtol(&char(word.str), 0, 16) }
// b := word.u8()
// println('"$word" $b')
g.write8(b)
}
}
ast.Module {}
ast.Return {
g.code_gen.return_stmt(node)
}
ast.AsmStmt {
g.code_gen.gen_asm_stmt(node)
}
ast.AssertStmt {
g.code_gen.gen_assert(node)
}
ast.Import {} // do nothing here
ast.StructDecl {}
ast.EnumDecl {}
else {
eprintln('native.stmt(): bad node: ' + node.type_name())
}
}
}
fn C.strtol(str &char, endptr &&char, base int) int
union F64I64 {
f f64
i i64
}
fn (mut g Gen) expr(node ast.Expr) {
match node {
ast.ParExpr {
g.expr(node.expr)
}
ast.ArrayInit {
g.n_error('array init expr not supported yet')
}
ast.BoolLiteral {
g.code_gen.mov64(g.code_gen.main_reg(), if node.val {
1
} else {
0
})
}
ast.CallExpr {
if node.name == 'C.syscall' {
g.code_gen.gen_syscall(node)
} else if node.name == 'exit' {
g.code_gen.gen_exit(node.args[0].expr)
} else if node.name in ['println', 'print', 'eprintln', 'eprint'] {
expr := node.args[0].expr
typ := node.args[0].typ
g.gen_print_from_expr(expr, typ, node.name)
} else {
g.code_gen.call_fn(node)
}
}
ast.FloatLiteral {
val := g.eval.expr(node, ast.float_literal_type_idx).float_val()
g.code_gen.load_fp(val)
}
ast.Ident {
var := g.get_var_from_ident(node)
// XXX this is intel specific
match var {
LocalVar {
if g.is_register_type(var.typ) {
g.code_gen.mov_var_to_reg(g.code_gen.main_reg(), node as ast.Ident)
} else if var.typ.is_pure_float() {
g.code_gen.load_fp_var(node as ast.Ident)
} else {
ts := g.table.sym(var.typ)
match ts.info {
ast.Struct {
g.code_gen.lea_var_to_reg(g.code_gen.main_reg(), g.get_var_offset(node.name))
}
ast.Enum {
g.code_gen.mov_var_to_reg(g.code_gen.main_reg(), node as ast.Ident,
typ: ast.int_type_idx
)
}
else {
g.n_error('Unsupported variable type')
}
}
}
}
else {
g.n_error('Unsupported variable kind')
}
}
}
ast.IfExpr {
g.if_expr(node)
}
ast.InfixExpr {
g.code_gen.infix_expr(node)
}
ast.IntegerLiteral {
g.code_gen.movabs(g.code_gen.main_reg(), i64(node.val.u64()))
}
ast.Nil {
g.code_gen.movabs(g.code_gen.main_reg(), 0)
}
ast.PostfixExpr {
g.postfix_expr(node)
}
ast.PrefixExpr {
g.code_gen.prefix_expr(node)
}
ast.StringLiteral {
str := g.eval_str_lit_escape_codes(node)
g.allocate_string(str, 3, .rel32)
}
ast.CharLiteral {
bytes := g.eval_escape_codes(node.val)
.bytes()
mut val := rune(0)
for i, v in bytes {
val |= v << (i * 8)
if i >= sizeof(rune) {
g.n_error('runes are only 4 bytes wide')
}
}
g.code_gen.movabs(g.code_gen.main_reg(), i64(val))
}
ast.StructInit {
pos := g.allocate_by_type('_anonstruct', node.typ)
g.code_gen.init_struct(LocalVar{ offset: pos, typ: node.typ }, node)
g.code_gen.lea_var_to_reg(g.code_gen.main_reg(), pos)
}
ast.GoExpr {
g.v_error('native backend doesnt support threads yet', node.pos)
}
ast.MatchExpr {
g.code_gen.gen_match_expr(node)
}
ast.SelectorExpr {
g.code_gen.gen_selector_expr(node)
}
ast.CastExpr {
g.code_gen.gen_cast_expr(node)
}
ast.EnumVal {
type_name := g.table.get_type_name(node.typ)
g.code_gen.mov(g.code_gen.main_reg(), g.enum_vals[type_name].fields[node.val])
}
ast.UnsafeExpr {
g.expr(node.expr)
}
ast.ConcatExpr {
g.code_gen.gen_concat_expr(node)
}
else {
g.n_error('expr: unhandled node type: ${node.type_name()}')
}
}
}
fn (mut g Gen) condition(expr ast.Expr, neg bool) int {
g.expr(expr)
g.code_gen.cmp_zero(g.code_gen.main_reg())
return g.code_gen.cjmp(if neg { .jne } else { .je })
}
fn (mut g Gen) if_expr(node ast.IfExpr) {
if node.is_comptime {
if stmts := g.comptime_conditional(node) {
g.stmts(stmts)
}
return
}
if node.branches.len == 0 {
return
}
mut endif_label := 0
has_endif := node.branches.len > 1
if has_endif {
endif_label = g.labels.new_label()
}
for idx in 0 .. node.branches.len {
branch := node.branches[idx]
if idx == node.branches.len - 1 && node.has_else {
g.stmts(branch.stmts)
} else {
if branch.cond is ast.BoolLiteral {
if branch.cond.val {
g.stmts(branch.stmts)
}
continue
}
expr := branch.cond
label := g.labels.new_label()
cjmp_addr := g.condition(expr, false)
g.labels.patches << LabelPatch{
id: label
pos: cjmp_addr
}
g.println('; jump to label ${label}')
g.stmts(branch.stmts)
if has_endif {
jump_addr := g.code_gen.jmp(0)
g.labels.patches << LabelPatch{
id: endif_label
pos: jump_addr
}
g.println('; jump to label ${endif_label}')
}
// println('after if g.pos=$g.pos() jneaddr=$cjmp_addr')
g.labels.addrs[label] = g.pos()
g.println('; label ${label}')
}
}
if has_endif {
g.labels.addrs[endif_label] = g.pos()
g.println('; label ${endif_label}')
}
}
fn (mut g Gen) postfix_expr(node ast.PostfixExpr) {
if node.expr !is ast.Ident {
return
}
ident := node.expr as ast.Ident
match node.op {
.inc {
g.code_gen.inc_var(ident)
}
.dec {
g.code_gen.dec_var(ident)
}
else {}
}
}
[noreturn]
pub fn (mut g Gen) n_error(s string) {
util.verror('native error', s)

258
vlib/v/gen/native/stmt.v Normal file
View File

@ -0,0 +1,258 @@
// Copyright (c) 2019-2023 Alexander Medvednikov. All rights reserved.
// Use of this source code is governed by an MIT license
// that can be found in the LICENSE file.
module native
import v.ast
fn C.strtol(str &char, endptr &&char, base int) int
pub fn (mut g Gen) stmts(stmts []ast.Stmt) {
for stmt in stmts {
g.stmt(stmt)
}
}
fn (mut g Gen) stmt(node ast.Stmt) {
match node {
ast.AssignStmt {
g.code_gen.assign_stmt(node)
}
ast.Block {
g.stmts(node.stmts)
}
ast.BranchStmt {
label_name := node.label
for i := g.labels.branches.len - 1; i >= 0; i-- {
branch := g.labels.branches[i]
if label_name == '' || label_name == branch.name {
label := if node.kind == .key_break {
branch.end
} else { // continue
branch.start
}
jump_addr := g.code_gen.jmp(0)
g.labels.patches << LabelPatch{
id: label
pos: jump_addr
}
g.println('; jump to ${label}: ${node.kind}')
break
}
}
}
ast.ConstDecl {}
ast.DeferStmt {
name := '_defer${g.defer_stmts.len}'
defer_var := g.get_var_offset(name)
g.code_gen.mov_int_to_var(LocalVar{defer_var, ast.i64_type_idx, name}, 1)
g.defer_stmts << node
g.defer_stmts[g.defer_stmts.len - 1].idx_in_fn = g.defer_stmts.len - 1
}
ast.ExprStmt {
g.expr(node.expr)
}
ast.FnDecl {
g.fn_decl(node)
}
ast.ForCStmt {
g.gen_forc_stmt(node)
}
ast.ForInStmt {
if node.stmts.len == 0 {
// if no statements, just dont make it
return
}
g.code_gen.for_in_stmt(node)
}
ast.ForStmt {
g.for_stmt(node)
}
ast.HashStmt {
words := node.val.split(' ')
for word in words {
if word.len != 2 {
g.n_error('opcodes format: xx xx xx xx\nhash statements are not allowed with the native backend, use the C backend for extended C interoperability.')
}
b := unsafe { C.strtol(&char(word.str), 0, 16) }
// b := word.u8()
// println('"$word" $b')
g.write8(b)
}
}
ast.Module {}
ast.Return {
g.code_gen.return_stmt(node)
}
ast.AsmStmt {
g.code_gen.gen_asm_stmt(node)
}
ast.AssertStmt {
g.code_gen.gen_assert(node)
}
ast.Import {} // do nothing here
ast.StructDecl {}
ast.EnumDecl {}
else {
eprintln('native.stmt(): bad node: ' + node.type_name())
}
}
}
fn (mut g Gen) gen_forc_stmt(node ast.ForCStmt) {
if node.has_init {
g.stmts([node.init])
}
start := g.pos()
start_label := g.labels.new_label()
mut jump_addr := i64(0)
if node.has_cond {
cond := node.cond
match cond {
ast.InfixExpr {
match cond.left {
ast.Ident {
lit := cond.right as ast.IntegerLiteral
g.code_gen.cmp_var(cond.left as ast.Ident, lit.val.int())
match cond.op {
.gt {
jump_addr = g.code_gen.cjmp(.jle)
}
.lt {
jump_addr = g.code_gen.cjmp(.jge)
}
else {
g.n_error('unsupported conditional in for-c loop')
}
}
}
else {
g.n_error('unhandled infix.left')
}
}
}
else {}
}
// dump(node.cond)
g.expr(node.cond)
}
end_label := g.labels.new_label()
g.labels.patches << LabelPatch{
id: end_label
pos: int(jump_addr)
}
g.println('; jump to label ${end_label}')
g.labels.branches << BranchLabel{
name: node.label
start: start_label
end: end_label
}
g.stmts(node.stmts)
g.labels.addrs[start_label] = g.pos()
g.println('; label ${start_label}')
if node.has_inc {
g.stmts([node.inc])
}
g.labels.branches.pop()
g.code_gen.jmp_back(start)
g.labels.addrs[end_label] = g.pos()
g.println('; jump to label ${end_label}')
// loop back
}
fn (mut g Gen) for_stmt(node ast.ForStmt) {
if node.is_inf {
if node.stmts.len == 0 {
g.code_gen.infloop()
return
}
// infinite loop
start := g.pos()
start_label := g.labels.new_label()
g.labels.addrs[start_label] = start
g.println('; label ${start_label}')
end_label := g.labels.new_label()
g.labels.branches << BranchLabel{
name: node.label
start: start_label
end: end_label
}
g.stmts(node.stmts)
g.labels.branches.pop()
g.code_gen.jmp_back(start)
g.println('jmp after infinite for')
g.labels.addrs[end_label] = g.pos()
g.println('; label ${end_label}')
return
}
infix_expr := node.cond as ast.InfixExpr
mut jump_addr := 0 // location of `jne *00 00 00 00*`
start := g.pos()
start_label := g.labels.new_label()
g.labels.addrs[start_label] = start
g.println('; label ${start_label}')
match infix_expr.left {
ast.Ident {
match infix_expr.right {
ast.Ident {
reg := g.code_gen.main_reg()
g.code_gen.mov_var_to_reg(reg, infix_expr.right as ast.Ident)
g.code_gen.cmp_var_reg(infix_expr.left as ast.Ident, reg)
}
ast.IntegerLiteral {
lit := infix_expr.right as ast.IntegerLiteral
g.code_gen.cmp_var(infix_expr.left as ast.Ident, lit.val.int())
}
else {
g.n_error('unhandled expression type')
}
}
match infix_expr.left.tok_kind {
.lt {
jump_addr = g.code_gen.cjmp(.jge)
}
.gt {
jump_addr = g.code_gen.cjmp(.jle)
}
.le {
jump_addr = g.code_gen.cjmp(.jg)
}
.ge {
jump_addr = g.code_gen.cjmp(.jl)
}
.ne {
jump_addr = g.code_gen.cjmp(.je)
}
.eq {
jump_addr = g.code_gen.cjmp(.jne)
}
else {
g.n_error('unhandled infix cond token')
}
}
}
else {
g.n_error('unhandled infix.left')
}
}
end_label := g.labels.new_label()
g.labels.patches << LabelPatch{
id: end_label
pos: jump_addr
}
g.println('; jump to label ${end_label}')
g.labels.branches << BranchLabel{
name: node.label
start: start_label
end: end_label
}
g.stmts(node.stmts)
g.labels.branches.pop()
// Go back to `cmp ...`
g.code_gen.jmp_back(start)
// Update the jump addr to current pos
g.labels.addrs[end_label] = g.pos()
g.println('; label ${end_label}')
g.println('jmp after for')
}

View File

@ -0,0 +1,28 @@
fn main() {
test_int()
test_fp()
}
fn test_int() {
a := 100
mut b := a
b += b
b += 50
assert b == 250
mut c := u8(b)
d := 4
c += u8(-d)
println(c)
}
fn test_fp() {
a := 1.0
mut b := a
b += 0.5
b *= 4
b /= 2
println(int(b))
}

View File

@ -0,0 +1,2 @@
246
3