mirror of
https://github.com/vlang/v.git
synced 2023-08-10 21:13:21 +03:00
fmt: comments
This commit is contained in:
parent
5a5f4ce99a
commit
d5faf36aa9
@ -153,7 +153,7 @@ fn (foptions &FormatOptions) format_file(file string) {
|
|||||||
eprintln('vfmt2 running fmt.fmt over file: $file')
|
eprintln('vfmt2 running fmt.fmt over file: $file')
|
||||||
}
|
}
|
||||||
table := table.new_table()
|
table := table.new_table()
|
||||||
file_ast := parser.parse_file(file, table)
|
file_ast := parser.parse_file(file, table, .parse_comments)
|
||||||
formatted_content := fmt.fmt(file_ast, table)
|
formatted_content := fmt.fmt(file_ast, table)
|
||||||
file_name := filepath.filename(file)
|
file_name := filepath.filename(file)
|
||||||
vfmt_output_path := filepath.join(os.tmpdir(), 'vfmt_' + file_name)
|
vfmt_output_path := filepath.join(os.tmpdir(), 'vfmt_' + file_name)
|
||||||
@ -302,7 +302,7 @@ Options:
|
|||||||
-diff display only diffs between the formatted source and the original source.
|
-diff display only diffs between the formatted source and the original source.
|
||||||
-l list files whose formatting differs from vfmt.
|
-l list files whose formatting differs from vfmt.
|
||||||
-w write result to (source) file(s) instead of to stdout.
|
-w write result to (source) file(s) instead of to stdout.
|
||||||
-2 Use the new V parser/vfmt. NB: this is EXPERIMENTAL for now.
|
-2 Use the new V parser/vfmt. NB: this is EXPERIMENTAL for now.
|
||||||
The new vfmt is much faster and more forgiving.
|
The new vfmt is much faster and more forgiving.
|
||||||
It also may EAT some of your code for now.
|
It also may EAT some of your code for now.
|
||||||
Please be carefull, and make frequent BACKUPS, when running with -vfmt2 .
|
Please be carefull, and make frequent BACKUPS, when running with -vfmt2 .
|
||||||
|
@ -22,9 +22,10 @@ struct Parser {
|
|||||||
// the #include directives in the parsed .v file
|
// the #include directives in the parsed .v file
|
||||||
file_pcguard string
|
file_pcguard string
|
||||||
v &V
|
v &V
|
||||||
pref &pref.Preferences // Preferences shared from V struct
|
pref &pref.Preferences
|
||||||
mut:
|
mut:
|
||||||
scanner &Scanner
|
scanner &Scanner
|
||||||
|
// Preferences shared from V struct
|
||||||
tokens []Token
|
tokens []Token
|
||||||
token_idx int
|
token_idx int
|
||||||
prev_stuck_token_idx int
|
prev_stuck_token_idx int
|
||||||
@ -261,6 +262,7 @@ fn (p mut Parser) next() {
|
|||||||
// (only when vfmt compile time flag is enabled, otherwise this function
|
// (only when vfmt compile time flag is enabled, otherwise this function
|
||||||
// is not even generated)
|
// is not even generated)
|
||||||
p.fnext()
|
p.fnext()
|
||||||
|
//
|
||||||
p.prev_tok2 = p.prev_tok
|
p.prev_tok2 = p.prev_tok
|
||||||
p.prev_tok = p.tok
|
p.prev_tok = p.tok
|
||||||
p.scanner.prev_tok = p.tok
|
p.scanner.prev_tok = p.tok
|
||||||
|
@ -137,7 +137,7 @@ fn (s mut Scanner) ident_bin_number() string {
|
|||||||
if !c.is_bin_digit() && c != num_sep {
|
if !c.is_bin_digit() && c != num_sep {
|
||||||
if (!c.is_digit() && !c.is_letter()) || s.inside_string {
|
if (!c.is_digit() && !c.is_letter()) || s.inside_string {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
else if !has_wrong_digit {
|
else if !has_wrong_digit {
|
||||||
has_wrong_digit = true
|
has_wrong_digit = true
|
||||||
first_wrong_digit = c
|
first_wrong_digit = c
|
||||||
@ -166,7 +166,7 @@ fn (s mut Scanner) ident_hex_number() string {
|
|||||||
if !c.is_hex_digit() && c != num_sep {
|
if !c.is_hex_digit() && c != num_sep {
|
||||||
if !c.is_letter() || s.inside_string {
|
if !c.is_letter() || s.inside_string {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
else if !has_wrong_digit {
|
else if !has_wrong_digit {
|
||||||
has_wrong_digit = true
|
has_wrong_digit = true
|
||||||
first_wrong_digit = c
|
first_wrong_digit = c
|
||||||
@ -195,7 +195,7 @@ fn (s mut Scanner) ident_oct_number() string {
|
|||||||
if !c.is_oct_digit() && c != num_sep {
|
if !c.is_oct_digit() && c != num_sep {
|
||||||
if (!c.is_digit() && !c.is_letter()) || s.inside_string {
|
if (!c.is_digit() && !c.is_letter()) || s.inside_string {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
else if !has_wrong_digit {
|
else if !has_wrong_digit {
|
||||||
has_wrong_digit = true
|
has_wrong_digit = true
|
||||||
first_wrong_digit = c
|
first_wrong_digit = c
|
||||||
|
@ -14,6 +14,9 @@ import (
|
|||||||
|
|
||||||
pub fn (node &FnDecl) str(t &table.Table) string {
|
pub fn (node &FnDecl) str(t &table.Table) string {
|
||||||
mut f := strings.new_builder(30)
|
mut f := strings.new_builder(30)
|
||||||
|
if node.is_pub {
|
||||||
|
f.write('pub ')
|
||||||
|
}
|
||||||
mut receiver := ''
|
mut receiver := ''
|
||||||
if node.is_method {
|
if node.is_method {
|
||||||
sym := t.get_type_symbol(node.receiver.typ)
|
sym := t.get_type_symbol(node.receiver.typ)
|
||||||
|
@ -19,7 +19,7 @@ mut:
|
|||||||
stmts []ast.Stmt // all module statements from all files
|
stmts []ast.Stmt // all module statements from all files
|
||||||
}
|
}
|
||||||
|
|
||||||
type FilterFn fn(node ast.FnDecl) bool
|
type FilterFn fn(node ast.FnDecl)bool
|
||||||
|
|
||||||
pub fn doc(mod string, table &table.Table) string {
|
pub fn doc(mod string, table &table.Table) string {
|
||||||
mut d := Doc{
|
mut d := Doc{
|
||||||
@ -45,7 +45,7 @@ pub fn doc(mod string, table &table.Table) string {
|
|||||||
if file.ends_with('_test.v') || file.ends_with('_windows.v') || file.ends_with('_macos.v') {
|
if file.ends_with('_test.v') || file.ends_with('_windows.v') || file.ends_with('_macos.v') {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
file_ast := parser.parse_file(filepath.join(path,file), table)
|
file_ast := parser.parse_file(filepath.join(path,file), table, .skip_comments)
|
||||||
d.stmts << file_ast.stmts
|
d.stmts << file_ast.stmts
|
||||||
}
|
}
|
||||||
d.print_fns()
|
d.print_fns()
|
||||||
@ -92,7 +92,7 @@ fn (d Doc) get_fn_signatures(filter_fn FilterFn) []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {}
|
else {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn_signatures.sort()
|
fn_signatures.sort()
|
||||||
return fn_signatures
|
return fn_signatures
|
||||||
|
@ -195,6 +195,9 @@ fn (f mut Fmt) stmt(node ast.Stmt) {
|
|||||||
f.stmts(it.stmts)
|
f.stmts(it.stmts)
|
||||||
f.writeln('}')
|
f.writeln('}')
|
||||||
}
|
}
|
||||||
|
ast.LineComment {
|
||||||
|
f.writeln('// $it.text')
|
||||||
|
}
|
||||||
ast.Return {
|
ast.Return {
|
||||||
f.write('return')
|
f.write('return')
|
||||||
// multiple returns
|
// multiple returns
|
||||||
@ -244,6 +247,9 @@ fn (f mut Fmt) stmt(node ast.Stmt) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn (f mut Fmt) struct_decl(node ast.StructDecl) {
|
fn (f mut Fmt) struct_decl(node ast.StructDecl) {
|
||||||
|
if node.is_pub {
|
||||||
|
f.write('pub ')
|
||||||
|
}
|
||||||
f.writeln('struct $node.name {')
|
f.writeln('struct $node.name {')
|
||||||
mut max := 0
|
mut max := 0
|
||||||
for field in node.fields {
|
for field in node.fields {
|
||||||
|
@ -15,7 +15,7 @@ const (
|
|||||||
|
|
||||||
fn test_fmt() {
|
fn test_fmt() {
|
||||||
fmt_message := 'vfmt tests'
|
fmt_message := 'vfmt tests'
|
||||||
eprintln(term.header(fmt_message,'-'))
|
eprintln(term.header(fmt_message, '-'))
|
||||||
vexe := os.getenv('VEXE')
|
vexe := os.getenv('VEXE')
|
||||||
if vexe.len == 0 || !os.exists(vexe) {
|
if vexe.len == 0 || !os.exists(vexe) {
|
||||||
eprintln('VEXE must be set')
|
eprintln('VEXE must be set')
|
||||||
@ -23,11 +23,13 @@ fn test_fmt() {
|
|||||||
}
|
}
|
||||||
vroot := filepath.dir(vexe)
|
vroot := filepath.dir(vexe)
|
||||||
tmpfolder := os.tmpdir()
|
tmpfolder := os.tmpdir()
|
||||||
diff_cmd := find_working_diff_command() or { '' }
|
diff_cmd := find_working_diff_command() or {
|
||||||
|
''
|
||||||
|
}
|
||||||
mut fmt_bench := benchmark.new_benchmark()
|
mut fmt_bench := benchmark.new_benchmark()
|
||||||
// Lookup the existing test _input.vv files:
|
// Lookup the existing test _input.vv files:
|
||||||
input_files := os.walk_ext('$vroot/vlib/v/fmt/tests', '_input.vv')
|
input_files := os.walk_ext('$vroot/vlib/v/fmt/tests', '_input.vv')
|
||||||
fmt_bench.set_total_expected_steps( input_files.len )
|
fmt_bench.set_total_expected_steps(input_files.len)
|
||||||
for istep, ipath in input_files {
|
for istep, ipath in input_files {
|
||||||
fmt_bench.cstep = istep
|
fmt_bench.cstep = istep
|
||||||
fmt_bench.step()
|
fmt_bench.step()
|
||||||
@ -39,12 +41,12 @@ fn test_fmt() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
expected_ocontent := os.read_file(opath) or {
|
expected_ocontent := os.read_file(opath) or {
|
||||||
fmt_bench.fail()
|
fmt_bench.fail()
|
||||||
eprintln(fmt_bench.step_message_fail('cannot read from ${opath}'))
|
eprintln(fmt_bench.step_message_fail('cannot read from ${opath}'))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
table := table.new_table()
|
table := table.new_table()
|
||||||
file_ast := parser.parse_file(ipath, table)
|
file_ast := parser.parse_file(ipath, table, .skip_comments)
|
||||||
result_ocontent := fmt.fmt(file_ast, table)
|
result_ocontent := fmt.fmt(file_ast, table)
|
||||||
if expected_ocontent != result_ocontent {
|
if expected_ocontent != result_ocontent {
|
||||||
fmt_bench.fail()
|
fmt_bench.fail()
|
||||||
|
@ -46,11 +46,12 @@ mut:
|
|||||||
expected_type table.Type
|
expected_type table.Type
|
||||||
scope &ast.Scope
|
scope &ast.Scope
|
||||||
imports map[string]string
|
imports map[string]string
|
||||||
|
ast_imports []ast.Import
|
||||||
}
|
}
|
||||||
|
|
||||||
// for tests
|
// for tests
|
||||||
pub fn parse_stmt(text string, table &table.Table, scope &ast.Scope) ast.Stmt {
|
pub fn parse_stmt(text string, table &table.Table, scope &ast.Scope) ast.Stmt {
|
||||||
s := scanner.new_scanner(text)
|
s := scanner.new_scanner(text, .skip_comments)
|
||||||
mut p := Parser{
|
mut p := Parser{
|
||||||
scanner: s
|
scanner: s
|
||||||
table: table
|
table: table
|
||||||
@ -64,10 +65,15 @@ pub fn parse_stmt(text string, table &table.Table, scope &ast.Scope) ast.Stmt {
|
|||||||
return p.stmt()
|
return p.stmt()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_file(path string, table &table.Table) ast.File {
|
pub fn parse_file(path string, table &table.Table, comments_mode scanner.CommentsMode) ast.File {
|
||||||
|
// println('parse_file("$path")')
|
||||||
|
text := os.read_file(path) or {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
mut stmts := []ast.Stmt
|
mut stmts := []ast.Stmt
|
||||||
mut p := Parser{
|
mut p := Parser{
|
||||||
scanner: scanner.new_scanner_file(path)
|
// scanner: scanner.new_scanner(text, comments_mode)
|
||||||
|
scanner: scanner.new_scanner_file(path, comments_mode)
|
||||||
table: table
|
table: table
|
||||||
file_name: path
|
file_name: path
|
||||||
pref: &pref.Preferences{}
|
pref: &pref.Preferences{}
|
||||||
@ -75,6 +81,8 @@ pub fn parse_file(path string, table &table.Table) ast.File {
|
|||||||
start_pos: 0
|
start_pos: 0
|
||||||
parent: 0
|
parent: 0
|
||||||
}
|
}
|
||||||
|
// comments_mode: comments_mode
|
||||||
|
|
||||||
}
|
}
|
||||||
p.read_first_token()
|
p.read_first_token()
|
||||||
// p.scope = &ast.Scope{start_pos: p.tok.position(), parent: 0}
|
// p.scope = &ast.Scope{start_pos: p.tok.position(), parent: 0}
|
||||||
@ -84,10 +92,12 @@ pub fn parse_file(path string, table &table.Table) ast.File {
|
|||||||
p.mod = module_decl.name
|
p.mod = module_decl.name
|
||||||
p.builtin_mod = p.mod == 'builtin'
|
p.builtin_mod = p.mod == 'builtin'
|
||||||
// imports
|
// imports
|
||||||
|
/*
|
||||||
mut imports := []ast.Import
|
mut imports := []ast.Import
|
||||||
for p.tok.kind == .key_import {
|
for p.tok.kind == .key_import {
|
||||||
imports << p.import_stmt()
|
imports << p.import_stmt()
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
// TODO: import only mode
|
// TODO: import only mode
|
||||||
for {
|
for {
|
||||||
// res := s.scan()
|
// res := s.scan()
|
||||||
@ -104,7 +114,7 @@ pub fn parse_file(path string, table &table.Table) ast.File {
|
|||||||
return ast.File{
|
return ast.File{
|
||||||
path: path
|
path: path
|
||||||
mod: module_decl
|
mod: module_decl
|
||||||
imports: imports
|
imports: p.ast_imports
|
||||||
stmts: stmts
|
stmts: stmts
|
||||||
scope: p.scope
|
scope: p.scope
|
||||||
}
|
}
|
||||||
@ -113,7 +123,7 @@ pub fn parse_file(path string, table &table.Table) ast.File {
|
|||||||
pub fn parse_files(paths []string, table &table.Table) []ast.File {
|
pub fn parse_files(paths []string, table &table.Table) []ast.File {
|
||||||
mut files := []ast.File
|
mut files := []ast.File
|
||||||
for path in paths {
|
for path in paths {
|
||||||
files << parse_file(path, table)
|
files << parse_file(path, table, .skip_comments)
|
||||||
}
|
}
|
||||||
return files
|
return files
|
||||||
}
|
}
|
||||||
@ -164,12 +174,20 @@ pub fn (p mut Parser) parse_block() []ast.Stmt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn (p mut Parser) next() {
|
fn (p mut Parser) next() {
|
||||||
|
// for {
|
||||||
p.tok = p.peek_tok
|
p.tok = p.peek_tok
|
||||||
p.peek_tok = p.scanner.scan()
|
p.peek_tok = p.scanner.scan()
|
||||||
|
// if !(p.tok.kind in [.line_comment, .mline_comment]) {
|
||||||
|
// break
|
||||||
|
// }
|
||||||
|
// }
|
||||||
// println(p.tok.str())
|
// println(p.tok.str())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (p mut Parser) check(expected token.Kind) {
|
fn (p mut Parser) check(expected token.Kind) {
|
||||||
|
// for p.tok.kind in [.line_comment, .mline_comment] {
|
||||||
|
// p.next()
|
||||||
|
// }
|
||||||
if p.tok.kind != expected {
|
if p.tok.kind != expected {
|
||||||
s := 'syntax error: unexpected `${p.tok.kind.str()}`, expecting `${expected.str()}`'
|
s := 'syntax error: unexpected `${p.tok.kind.str()}`, expecting `${expected.str()}`'
|
||||||
p.error(s)
|
p.error(s)
|
||||||
@ -211,6 +229,14 @@ pub fn (p mut Parser) top_stmt() ast.Stmt {
|
|||||||
.lsbr {
|
.lsbr {
|
||||||
return p.attr()
|
return p.attr()
|
||||||
}
|
}
|
||||||
|
.key_module {
|
||||||
|
return p.module_decl()
|
||||||
|
}
|
||||||
|
.key_import {
|
||||||
|
node := p.import_stmt()
|
||||||
|
p.ast_imports << node
|
||||||
|
return node[0]
|
||||||
|
}
|
||||||
.key_global {
|
.key_global {
|
||||||
return p.global_decl()
|
return p.global_decl()
|
||||||
}
|
}
|
||||||
@ -239,10 +265,7 @@ pub fn (p mut Parser) top_stmt() ast.Stmt {
|
|||||||
return p.struct_decl()
|
return p.struct_decl()
|
||||||
}
|
}
|
||||||
.line_comment {
|
.line_comment {
|
||||||
// p.next()
|
return p.line_comment()
|
||||||
return ast.LineComment{
|
|
||||||
text: p.scanner.line_comment
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
.mline_comment {
|
.mline_comment {
|
||||||
// p.next()
|
// p.next()
|
||||||
@ -258,6 +281,14 @@ pub fn (p mut Parser) top_stmt() ast.Stmt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn (p mut Parser) line_comment() ast.LineComment {
|
||||||
|
text := p.tok.lit
|
||||||
|
p.next()
|
||||||
|
return ast.LineComment{
|
||||||
|
text: text
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn (p mut Parser) stmt() ast.Stmt {
|
pub fn (p mut Parser) stmt() ast.Stmt {
|
||||||
match p.tok.kind {
|
match p.tok.kind {
|
||||||
.key_assert {
|
.key_assert {
|
||||||
@ -273,6 +304,9 @@ pub fn (p mut Parser) stmt() ast.Stmt {
|
|||||||
.key_for {
|
.key_for {
|
||||||
return p.for_statement()
|
return p.for_statement()
|
||||||
}
|
}
|
||||||
|
.line_comment {
|
||||||
|
return p.line_comment()
|
||||||
|
}
|
||||||
.key_return {
|
.key_return {
|
||||||
return p.return_stmt()
|
return p.return_stmt()
|
||||||
}
|
}
|
||||||
@ -553,7 +587,8 @@ pub fn (p mut Parser) name_expr() ast.Expr {
|
|||||||
if p.peek_tok.kind == .dot && (is_c || p.known_import(p.tok.lit) || p.mod.all_after('.') == p.tok.lit) {
|
if p.peek_tok.kind == .dot && (is_c || p.known_import(p.tok.lit) || p.mod.all_after('.') == p.tok.lit) {
|
||||||
if is_c {
|
if is_c {
|
||||||
mod = 'C'
|
mod = 'C'
|
||||||
} else {
|
}
|
||||||
|
else {
|
||||||
// prepend the full import
|
// prepend the full import
|
||||||
mod = p.imports[p.tok.lit]
|
mod = p.imports[p.tok.lit]
|
||||||
}
|
}
|
||||||
@ -1472,7 +1507,8 @@ fn (p mut Parser) struct_decl() ast.StructDecl {
|
|||||||
p.check(.rcbr)
|
p.check(.rcbr)
|
||||||
if is_c {
|
if is_c {
|
||||||
name = 'C.$name'
|
name = 'C.$name'
|
||||||
} else {
|
}
|
||||||
|
else {
|
||||||
name = p.prepend_mod(name)
|
name = p.prepend_mod(name)
|
||||||
}
|
}
|
||||||
t := table.TypeSymbol{
|
t := table.TypeSymbol{
|
||||||
|
@ -41,9 +41,16 @@ mut:
|
|||||||
is_vh bool // Keep newlines
|
is_vh bool // Keep newlines
|
||||||
is_fmt bool // Used only for skipping ${} in strings, since we need literal
|
is_fmt bool // Used only for skipping ${} in strings, since we need literal
|
||||||
// string values when generating formatted code.
|
// string values when generating formatted code.
|
||||||
|
comments_mode CommentsMode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub enum CommentsMode {
|
||||||
|
skip_comments
|
||||||
|
parse_comments
|
||||||
|
}
|
||||||
|
|
||||||
// new scanner from file.
|
// new scanner from file.
|
||||||
pub fn new_scanner_file(file_path string) &Scanner {
|
pub fn new_scanner_file(file_path string, comments_mode CommentsMode) &Scanner {
|
||||||
if !os.exists(file_path) {
|
if !os.exists(file_path) {
|
||||||
verror("$file_path doesn't exist")
|
verror("$file_path doesn't exist")
|
||||||
}
|
}
|
||||||
@ -60,7 +67,7 @@ pub fn new_scanner_file(file_path string) &Scanner {
|
|||||||
raw_text = tos(c_text[offset_from_begin], vstrlen(c_text) - offset_from_begin)
|
raw_text = tos(c_text[offset_from_begin], vstrlen(c_text) - offset_from_begin)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mut s := new_scanner(raw_text)
|
mut s := new_scanner(raw_text, comments_mode) // .skip_comments)
|
||||||
// s.init_fmt()
|
// s.init_fmt()
|
||||||
s.file_path = file_path
|
s.file_path = file_path
|
||||||
return s
|
return s
|
||||||
@ -70,13 +77,14 @@ const (
|
|||||||
is_fmt = os.getenv('VEXE').contains('vfmt')
|
is_fmt = os.getenv('VEXE').contains('vfmt')
|
||||||
)
|
)
|
||||||
// new scanner from string.
|
// new scanner from string.
|
||||||
pub fn new_scanner(text string) &Scanner {
|
pub fn new_scanner(text string, comments_mode CommentsMode) &Scanner {
|
||||||
return &Scanner{
|
return &Scanner{
|
||||||
text: text
|
text: text
|
||||||
print_line_on_error: true
|
print_line_on_error: true
|
||||||
print_colored_error: true
|
print_colored_error: true
|
||||||
print_rel_paths_on_error: true
|
print_rel_paths_on_error: true
|
||||||
is_fmt: is_fmt
|
is_fmt: is_fmt
|
||||||
|
comments_mode: comments_mode
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -105,7 +113,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
fn filter_num_sep(txt byteptr, start int, end int) string {
|
fn filter_num_sep(txt byteptr, start int, end int) string {
|
||||||
unsafe {
|
unsafe{
|
||||||
mut b := malloc(end - start + 1) // add a byte for the endstring 0
|
mut b := malloc(end - start + 1) // add a byte for the endstring 0
|
||||||
mut i := start
|
mut i := start
|
||||||
mut i1 := 0
|
mut i1 := 0
|
||||||
@ -134,7 +142,7 @@ fn (s mut Scanner) ident_bin_number() string {
|
|||||||
if !c.is_bin_digit() && c != num_sep {
|
if !c.is_bin_digit() && c != num_sep {
|
||||||
if (!c.is_digit() && !c.is_letter()) || s.inside_string {
|
if (!c.is_digit() && !c.is_letter()) || s.inside_string {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
else if !has_wrong_digit {
|
else if !has_wrong_digit {
|
||||||
has_wrong_digit = true
|
has_wrong_digit = true
|
||||||
first_wrong_digit = c
|
first_wrong_digit = c
|
||||||
@ -163,7 +171,7 @@ fn (s mut Scanner) ident_hex_number() string {
|
|||||||
if !c.is_hex_digit() && c != num_sep {
|
if !c.is_hex_digit() && c != num_sep {
|
||||||
if !c.is_letter() || s.inside_string {
|
if !c.is_letter() || s.inside_string {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
else if !has_wrong_digit {
|
else if !has_wrong_digit {
|
||||||
has_wrong_digit = true
|
has_wrong_digit = true
|
||||||
first_wrong_digit = c
|
first_wrong_digit = c
|
||||||
@ -192,7 +200,7 @@ fn (s mut Scanner) ident_oct_number() string {
|
|||||||
if !c.is_oct_digit() && c != num_sep {
|
if !c.is_oct_digit() && c != num_sep {
|
||||||
if (!c.is_digit() && !c.is_letter()) || s.inside_string {
|
if (!c.is_digit() && !c.is_letter()) || s.inside_string {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
else if !has_wrong_digit {
|
else if !has_wrong_digit {
|
||||||
has_wrong_digit = true
|
has_wrong_digit = true
|
||||||
first_wrong_digit = c
|
first_wrong_digit = c
|
||||||
@ -332,6 +340,9 @@ fn (s mut Scanner) end_of_file() token.Token {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn (s mut Scanner) scan() token.Token {
|
pub fn (s mut Scanner) scan() token.Token {
|
||||||
|
// if s.comments_mode == .parse_comments {
|
||||||
|
// println('\nscan()')
|
||||||
|
// }
|
||||||
// if s.line_comment != '' {
|
// if s.line_comment != '' {
|
||||||
// s.fgenln('// LC "$s.line_comment"')
|
// s.fgenln('// LC "$s.line_comment"')
|
||||||
// s.line_comment = ''
|
// s.line_comment = ''
|
||||||
@ -400,17 +411,17 @@ pub fn (s mut Scanner) scan() token.Token {
|
|||||||
// `123`, `.123`
|
// `123`, `.123`
|
||||||
else if c.is_digit() || (c == `.` && nextc.is_digit()) {
|
else if c.is_digit() || (c == `.` && nextc.is_digit()) {
|
||||||
if !s.inside_string {
|
if !s.inside_string {
|
||||||
// In C ints with `0` prefix are octal (in V they're decimal), so discarding heading zeros is needed.
|
// In C ints with `0` prefix are octal (in V they're decimal), so discarding heading zeros is needed.
|
||||||
mut start_pos := s.pos
|
mut start_pos := s.pos
|
||||||
for start_pos < s.text.len && s.text[start_pos] == `0` {
|
for start_pos < s.text.len && s.text[start_pos] == `0` {
|
||||||
start_pos++
|
start_pos++
|
||||||
}
|
}
|
||||||
mut prefix_zero_num := start_pos - s.pos // how many prefix zeros should be jumped
|
mut prefix_zero_num := start_pos - s.pos // how many prefix zeros should be jumped
|
||||||
// for 0b, 0o, 0x the heading zero shouldn't be jumped
|
// for 0b, 0o, 0x the heading zero shouldn't be jumped
|
||||||
if start_pos == s.text.len || (c == `0` && !s.text[start_pos].is_digit()) {
|
if start_pos == s.text.len || (c == `0` && !s.text[start_pos].is_digit()) {
|
||||||
prefix_zero_num--
|
prefix_zero_num--
|
||||||
}
|
}
|
||||||
s.pos += prefix_zero_num // jump these zeros
|
s.pos += prefix_zero_num // jump these zeros
|
||||||
}
|
}
|
||||||
num := s.ident_number()
|
num := s.ident_number()
|
||||||
return s.scan_res(.number, num)
|
return s.scan_res(.number, num)
|
||||||
@ -712,11 +723,19 @@ pub fn (s mut Scanner) scan() token.Token {
|
|||||||
start := s.pos + 1
|
start := s.pos + 1
|
||||||
s.ignore_line()
|
s.ignore_line()
|
||||||
s.line_comment = s.text[start + 1..s.pos]
|
s.line_comment = s.text[start + 1..s.pos]
|
||||||
s.line_comment = s.line_comment.trim_space()
|
// if s.comments_mode == .parse_comments {
|
||||||
if s.is_fmt {
|
// println('line c $s.line_comment')
|
||||||
s.pos-- // fix line_nr, \n was read, and the comment is marked on the next line
|
// }
|
||||||
s.line_nr--
|
comment := s.line_comment.trim_space()
|
||||||
return s.scan_res(.line_comment, s.line_comment)
|
// s.line_comment = comment
|
||||||
|
if s.comments_mode == .parse_comments {
|
||||||
|
// println('line c "$comment" z=')
|
||||||
|
// fix line_nr, \n was read, and the comment is marked
|
||||||
|
// on the next line
|
||||||
|
s.pos--
|
||||||
|
// println("'" + s.text[s.pos].str() + "'")
|
||||||
|
// s.line_nr--
|
||||||
|
return s.scan_res(.line_comment, comment)
|
||||||
}
|
}
|
||||||
// s.fgenln('// ${s.prev_tok.str()} "$s.line_comment"')
|
// s.fgenln('// ${s.prev_tok.str()} "$s.line_comment"')
|
||||||
// Skip the comment (return the next token)
|
// Skip the comment (return the next token)
|
||||||
@ -748,7 +767,8 @@ pub fn (s mut Scanner) scan() token.Token {
|
|||||||
s.pos++
|
s.pos++
|
||||||
end := s.pos + 1
|
end := s.pos + 1
|
||||||
comment := s.text[start..end]
|
comment := s.text[start..end]
|
||||||
if s.is_fmt {
|
// if s.is_fmt {
|
||||||
|
if false && s.comments_mode == .parse_comments {
|
||||||
s.line_comment = comment
|
s.line_comment = comment
|
||||||
return s.scan_res(.mline_comment, s.line_comment)
|
return s.scan_res(.mline_comment, s.line_comment)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user