replace TOKENIZER_TAG_ with TOKEN_
This commit is contained in:
2
ast.c
2
ast.c
@@ -31,7 +31,7 @@ Ast astParse(const char* source, const uint32_t len) {
|
|||||||
tokens.tags[tokens.len] = token.tag;
|
tokens.tags[tokens.len] = token.tag;
|
||||||
tokens.starts[tokens.len] = token.loc.start;
|
tokens.starts[tokens.len] = token.loc.start;
|
||||||
tokens.len++;
|
tokens.len++;
|
||||||
if (token.tag == TOKENIZER_TAG_EOF)
|
if (token.tag == TOKEN_EOF)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
244
parser.c
244
parser.c
@@ -74,12 +74,12 @@ static AstTokenIndex eatToken(Parser* p, TokenizerTag tag, bool* ok) {
|
|||||||
|
|
||||||
static void eatDocComments(Parser* p) {
|
static void eatDocComments(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
while (eatToken(p, TOKENIZER_TAG_DOC_COMMENT, &ok), ok) { }
|
while (eatToken(p, TOKEN_DOC_COMMENT, &ok), ok) { }
|
||||||
}
|
}
|
||||||
|
|
||||||
static void expectSemicolon(Parser* p) {
|
static void expectSemicolon(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
eatToken(p, TOKENIZER_TAG_SEMICOLON, &ok);
|
eatToken(p, TOKEN_SEMICOLON, &ok);
|
||||||
if (ok)
|
if (ok)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@@ -126,7 +126,7 @@ static AstNodeIndex addExtra(Parser* p, const AstNodeIndex* extra, uint32_t coun
|
|||||||
|
|
||||||
static AstNodeIndex parseByteAlign(Parser* p) {
|
static AstNodeIndex parseByteAlign(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_ALIGN, &ok);
|
eatToken(p, TOKEN_KEYWORD_ALIGN, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return null_node;
|
return null_node;
|
||||||
fprintf(stderr, "parseByteAlign cannot parse alginment\n");
|
fprintf(stderr, "parseByteAlign cannot parse alginment\n");
|
||||||
@@ -136,7 +136,7 @@ static AstNodeIndex parseByteAlign(Parser* p) {
|
|||||||
|
|
||||||
static AstNodeIndex parseAddrSpace(Parser* p) {
|
static AstNodeIndex parseAddrSpace(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_ADDRSPACE, &ok);
|
eatToken(p, TOKEN_KEYWORD_ADDRSPACE, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return null_node;
|
return null_node;
|
||||||
fprintf(stderr, "parseAddrSpace cannot parse addrspace\n");
|
fprintf(stderr, "parseAddrSpace cannot parse addrspace\n");
|
||||||
@@ -146,7 +146,7 @@ static AstNodeIndex parseAddrSpace(Parser* p) {
|
|||||||
|
|
||||||
static AstNodeIndex parseLinkSection(Parser* p) {
|
static AstNodeIndex parseLinkSection(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_LINKSECTION, &ok);
|
eatToken(p, TOKEN_KEYWORD_LINKSECTION, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return null_node;
|
return null_node;
|
||||||
fprintf(stderr, "parseLinkSection cannot parse linksection\n");
|
fprintf(stderr, "parseLinkSection cannot parse linksection\n");
|
||||||
@@ -156,7 +156,7 @@ static AstNodeIndex parseLinkSection(Parser* p) {
|
|||||||
|
|
||||||
static AstNodeIndex parseCallconv(Parser* p) {
|
static AstNodeIndex parseCallconv(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_CALLCONV, &ok);
|
eatToken(p, TOKEN_KEYWORD_CALLCONV, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return null_node;
|
return null_node;
|
||||||
fprintf(stderr, "parseCallconv cannot parse callconv\n");
|
fprintf(stderr, "parseCallconv cannot parse callconv\n");
|
||||||
@@ -171,15 +171,15 @@ typedef struct {
|
|||||||
} NodeContainerField;
|
} NodeContainerField;
|
||||||
|
|
||||||
static AstNodeIndex expectContainerField(Parser* p) {
|
static AstNodeIndex expectContainerField(Parser* p) {
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_COMPTIME, NULL);
|
eatToken(p, TOKEN_KEYWORD_COMPTIME, NULL);
|
||||||
const AstTokenIndex main_token = p->tok_i;
|
const AstTokenIndex main_token = p->tok_i;
|
||||||
if (p->token_tags[p->tok_i] == TOKENIZER_TAG_IDENTIFIER && p->token_tags[p->tok_i + 1] == TOKENIZER_TAG_COLON)
|
if (p->token_tags[p->tok_i] == TOKEN_IDENTIFIER && p->token_tags[p->tok_i + 1] == TOKEN_COLON)
|
||||||
p->tok_i += 2;
|
p->tok_i += 2;
|
||||||
|
|
||||||
const AstNodeIndex type_expr = parseTypeExpr(p);
|
const AstNodeIndex type_expr = parseTypeExpr(p);
|
||||||
const AstNodeIndex align_expr = parseByteAlign(p);
|
const AstNodeIndex align_expr = parseByteAlign(p);
|
||||||
bool ok;
|
bool ok;
|
||||||
eatToken(p, TOKENIZER_TAG_EQUAL, &ok);
|
eatToken(p, TOKEN_EQUAL, &ok);
|
||||||
if (ok) {
|
if (ok) {
|
||||||
fprintf(stderr, "expectContainerField does not support expr\n");
|
fprintf(stderr, "expectContainerField does not support expr\n");
|
||||||
exit(1);
|
exit(1);
|
||||||
@@ -225,27 +225,27 @@ static AstNodeIndex expectContainerField(Parser* p) {
|
|||||||
static AstNodeIndex parsePrimaryTypeExpr(Parser* p) {
|
static AstNodeIndex parsePrimaryTypeExpr(Parser* p) {
|
||||||
const TokenizerTag tok = p->token_tags[p->tok_i];
|
const TokenizerTag tok = p->token_tags[p->tok_i];
|
||||||
switch (tok) {
|
switch (tok) {
|
||||||
case TOKENIZER_TAG_CHAR_LITERAL:
|
case TOKEN_CHAR_LITERAL:
|
||||||
case TOKENIZER_TAG_NUMBER_LITERAL:
|
case TOKEN_NUMBER_LITERAL:
|
||||||
case TOKENIZER_TAG_KEYWORD_UNREACHABLE:
|
case TOKEN_KEYWORD_UNREACHABLE:
|
||||||
case TOKENIZER_TAG_KEYWORD_ANYFRAME:
|
case TOKEN_KEYWORD_ANYFRAME:
|
||||||
case TOKENIZER_TAG_STRING_LITERAL:
|
case TOKEN_STRING_LITERAL:
|
||||||
case TOKENIZER_TAG_BUILTIN:
|
case TOKEN_BUILTIN:
|
||||||
case TOKENIZER_TAG_KEYWORD_FN:
|
case TOKEN_KEYWORD_FN:
|
||||||
case TOKENIZER_TAG_KEYWORD_IF:
|
case TOKEN_KEYWORD_IF:
|
||||||
case TOKENIZER_TAG_KEYWORD_SWITCH:
|
case TOKEN_KEYWORD_SWITCH:
|
||||||
case TOKENIZER_TAG_KEYWORD_EXTERN:
|
case TOKEN_KEYWORD_EXTERN:
|
||||||
case TOKENIZER_TAG_KEYWORD_PACKED:
|
case TOKEN_KEYWORD_PACKED:
|
||||||
case TOKENIZER_TAG_KEYWORD_STRUCT:
|
case TOKEN_KEYWORD_STRUCT:
|
||||||
case TOKENIZER_TAG_KEYWORD_OPAQUE:
|
case TOKEN_KEYWORD_OPAQUE:
|
||||||
case TOKENIZER_TAG_KEYWORD_ENUM:
|
case TOKEN_KEYWORD_ENUM:
|
||||||
case TOKENIZER_TAG_KEYWORD_UNION:
|
case TOKEN_KEYWORD_UNION:
|
||||||
case TOKENIZER_TAG_KEYWORD_COMPTIME:
|
case TOKEN_KEYWORD_COMPTIME:
|
||||||
case TOKENIZER_TAG_MULTILINE_STRING_LITERAL_LINE:
|
case TOKEN_MULTILINE_STRING_LITERAL_LINE:
|
||||||
fprintf(stderr, "parsePrimaryTypeExpr does not support %s\n", tokenizerGetTagString(tok));
|
fprintf(stderr, "parsePrimaryTypeExpr does not support %s\n", tokenizerGetTagString(tok));
|
||||||
exit(1);
|
exit(1);
|
||||||
case TOKENIZER_TAG_IDENTIFIER:
|
case TOKEN_IDENTIFIER:
|
||||||
if (p->token_tags[p->tok_i + 1] == TOKENIZER_TAG_COLON) {
|
if (p->token_tags[p->tok_i + 1] == TOKEN_COLON) {
|
||||||
fprintf(stderr, "parsePrimaryTypeExpr does not support identifier followed by colon\n");
|
fprintf(stderr, "parsePrimaryTypeExpr does not support identifier followed by colon\n");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
@@ -255,12 +255,12 @@ static AstNodeIndex parsePrimaryTypeExpr(Parser* p) {
|
|||||||
.tag = AST_NODE_TAG_IDENTIFIER,
|
.tag = AST_NODE_TAG_IDENTIFIER,
|
||||||
.main_token = nextToken(p),
|
.main_token = nextToken(p),
|
||||||
.data = {} });
|
.data = {} });
|
||||||
case TOKENIZER_TAG_KEYWORD_INLINE:
|
case TOKEN_KEYWORD_INLINE:
|
||||||
case TOKENIZER_TAG_KEYWORD_FOR:
|
case TOKEN_KEYWORD_FOR:
|
||||||
case TOKENIZER_TAG_KEYWORD_WHILE:
|
case TOKEN_KEYWORD_WHILE:
|
||||||
case TOKENIZER_TAG_PERIOD:
|
case TOKEN_PERIOD:
|
||||||
case TOKENIZER_TAG_KEYWORD_ERROR:
|
case TOKEN_KEYWORD_ERROR:
|
||||||
case TOKENIZER_TAG_L_PAREN:
|
case TOKEN_L_PAREN:
|
||||||
fprintf(stderr, "parsePrimaryTypeExpr does not support %s\n", tokenizerGetTagString(tok));
|
fprintf(stderr, "parsePrimaryTypeExpr does not support %s\n", tokenizerGetTagString(tok));
|
||||||
exit(1);
|
exit(1);
|
||||||
default:
|
default:
|
||||||
@@ -271,10 +271,10 @@ static AstNodeIndex parsePrimaryTypeExpr(Parser* p) {
|
|||||||
static AstNodeIndex parseSuffixOp(Parser* p) {
|
static AstNodeIndex parseSuffixOp(Parser* p) {
|
||||||
const TokenizerTag tok = p->token_tags[p->tok_i];
|
const TokenizerTag tok = p->token_tags[p->tok_i];
|
||||||
switch (tok) {
|
switch (tok) {
|
||||||
case TOKENIZER_TAG_L_BRACKET:
|
case TOKEN_L_BRACKET:
|
||||||
case TOKENIZER_TAG_PERIOD_ASTERISK:
|
case TOKEN_PERIOD_ASTERISK:
|
||||||
case TOKENIZER_TAG_INVALID_PERIODASTERISKS:
|
case TOKEN_INVALID_PERIODASTERISKS:
|
||||||
case TOKENIZER_TAG_PERIOD:
|
case TOKEN_PERIOD:
|
||||||
fprintf(stderr, "parseSuffixOp does not support %s\n", tokenizerGetTagString(tok));
|
fprintf(stderr, "parseSuffixOp does not support %s\n", tokenizerGetTagString(tok));
|
||||||
exit(1);
|
exit(1);
|
||||||
default:
|
default:
|
||||||
@@ -284,7 +284,7 @@ static AstNodeIndex parseSuffixOp(Parser* p) {
|
|||||||
|
|
||||||
static AstNodeIndex parseSuffixExpr(Parser* p) {
|
static AstNodeIndex parseSuffixExpr(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_ASYNC, &ok);
|
eatToken(p, TOKEN_KEYWORD_ASYNC, &ok);
|
||||||
if (ok) {
|
if (ok) {
|
||||||
fprintf(stderr, "async not supported\n");
|
fprintf(stderr, "async not supported\n");
|
||||||
exit(1);
|
exit(1);
|
||||||
@@ -300,13 +300,13 @@ static AstNodeIndex parseSuffixExpr(Parser* p) {
|
|||||||
res = suffix_op;
|
res = suffix_op;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
eatToken(p, TOKENIZER_TAG_L_PAREN, &ok);
|
eatToken(p, TOKEN_L_PAREN, &ok);
|
||||||
if (ok) {
|
if (ok) {
|
||||||
fprintf(stderr, "parseSuffixExpr does not support expr with parens\n");
|
fprintf(stderr, "parseSuffixExpr does not support expr with parens\n");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
// TODO more work
|
// TODO more work
|
||||||
// const bool comma = p->token_tags[p->tok_i - 2] == TOKENIZER_TAG_COMMA;
|
// const bool comma = p->token_tags[p->tok_i - 2] == TOKEN_COMMA;
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@@ -329,7 +329,7 @@ static AstNodeIndex parseErrorUnionExpr(Parser* p) {
|
|||||||
if (suffix_expr == 0)
|
if (suffix_expr == 0)
|
||||||
return null_node;
|
return null_node;
|
||||||
bool ok;
|
bool ok;
|
||||||
const AstNodeIndex bang = eatToken(p, TOKENIZER_TAG_BANG, &ok);
|
const AstNodeIndex bang = eatToken(p, TOKEN_BANG, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return suffix_expr;
|
return suffix_expr;
|
||||||
return addNode(
|
return addNode(
|
||||||
@@ -347,11 +347,11 @@ static AstNodeIndex parseErrorUnionExpr(Parser* p) {
|
|||||||
static AstNodeIndex parseTypeExpr(Parser* p) {
|
static AstNodeIndex parseTypeExpr(Parser* p) {
|
||||||
const AstNodeIndex tok = p->token_tags[p->tok_i];
|
const AstNodeIndex tok = p->token_tags[p->tok_i];
|
||||||
switch (tok) {
|
switch (tok) {
|
||||||
case TOKENIZER_TAG_QUESTION_MARK:
|
case TOKEN_QUESTION_MARK:
|
||||||
case TOKENIZER_TAG_KEYWORD_ANYFRAME:
|
case TOKEN_KEYWORD_ANYFRAME:
|
||||||
case TOKENIZER_TAG_ASTERISK:
|
case TOKEN_ASTERISK:
|
||||||
case TOKENIZER_TAG_ASTERISK_ASTERISK:
|
case TOKEN_ASTERISK_ASTERISK:
|
||||||
case TOKENIZER_TAG_L_BRACKET:
|
case TOKEN_L_BRACKET:
|
||||||
fprintf(stderr, "parseTypeExpr not supported for %s\n", tokenizerGetTagString(tok));
|
fprintf(stderr, "parseTypeExpr not supported for %s\n", tokenizerGetTagString(tok));
|
||||||
exit(1);
|
exit(1);
|
||||||
default:
|
default:
|
||||||
@@ -362,13 +362,13 @@ static AstNodeIndex parseTypeExpr(Parser* p) {
|
|||||||
static SmallSpan parseParamDeclList(Parser* p) {
|
static SmallSpan parseParamDeclList(Parser* p) {
|
||||||
// can only parse functions with no declarations
|
// can only parse functions with no declarations
|
||||||
bool ok;
|
bool ok;
|
||||||
AstTokenIndex got_token = eatToken(p, TOKENIZER_TAG_L_PAREN, &ok);
|
AstTokenIndex got_token = eatToken(p, TOKEN_L_PAREN, &ok);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
fprintf(stderr, "expected (, got %s\n", tokenizerGetTagString(got_token));
|
fprintf(stderr, "expected (, got %s\n", tokenizerGetTagString(got_token));
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
got_token = eatToken(p, TOKENIZER_TAG_R_PAREN, &ok);
|
got_token = eatToken(p, TOKEN_R_PAREN, &ok);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
fprintf(stderr, "expected ), got %s\n", tokenizerGetTagString(got_token));
|
fprintf(stderr, "expected ), got %s\n", tokenizerGetTagString(got_token));
|
||||||
exit(1);
|
exit(1);
|
||||||
@@ -387,20 +387,20 @@ static uint32_t reserveNode(Parser* p, AstNodeTag tag) {
|
|||||||
|
|
||||||
static AstNodeIndex parseFnProto(Parser* p) {
|
static AstNodeIndex parseFnProto(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
AstNodeIndex fn_token = eatToken(p, TOKENIZER_TAG_KEYWORD_FN, &ok);
|
AstNodeIndex fn_token = eatToken(p, TOKEN_KEYWORD_FN, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return null_node;
|
return null_node;
|
||||||
|
|
||||||
AstNodeIndex fn_proto_index = reserveNode(p, AST_NODE_TAG_FN_PROTO);
|
AstNodeIndex fn_proto_index = reserveNode(p, AST_NODE_TAG_FN_PROTO);
|
||||||
|
|
||||||
eatToken(p, TOKENIZER_TAG_IDENTIFIER, NULL);
|
eatToken(p, TOKEN_IDENTIFIER, NULL);
|
||||||
|
|
||||||
SmallSpan params = parseParamDeclList(p);
|
SmallSpan params = parseParamDeclList(p);
|
||||||
const AstNodeIndex align_expr = parseByteAlign(p);
|
const AstNodeIndex align_expr = parseByteAlign(p);
|
||||||
const AstNodeIndex addrspace_expr = parseAddrSpace(p);
|
const AstNodeIndex addrspace_expr = parseAddrSpace(p);
|
||||||
const AstNodeIndex section_expr = parseLinkSection(p);
|
const AstNodeIndex section_expr = parseLinkSection(p);
|
||||||
const AstNodeIndex callconv_expr = parseCallconv(p);
|
const AstNodeIndex callconv_expr = parseCallconv(p);
|
||||||
eatToken(p, TOKENIZER_TAG_BANG, NULL);
|
eatToken(p, TOKEN_BANG, NULL);
|
||||||
|
|
||||||
const AstNodeIndex return_type_expr = parseTypeExpr(p);
|
const AstNodeIndex return_type_expr = parseTypeExpr(p);
|
||||||
|
|
||||||
@@ -425,7 +425,7 @@ static AstNodeIndex parseFnProto(Parser* p) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static AstTokenIndex parseBlockLabel(Parser* p) {
|
static AstTokenIndex parseBlockLabel(Parser* p) {
|
||||||
if (p->token_tags[p->tok_i] == TOKENIZER_TAG_IDENTIFIER && p->token_tags[p->tok_i + 1] == TOKENIZER_TAG_COLON) {
|
if (p->token_tags[p->tok_i] == TOKEN_IDENTIFIER && p->token_tags[p->tok_i + 1] == TOKEN_COLON) {
|
||||||
const AstTokenIndex identifier = p->tok_i;
|
const AstTokenIndex identifier = p->tok_i;
|
||||||
p->tok_i += 2;
|
p->tok_i += 2;
|
||||||
return identifier;
|
return identifier;
|
||||||
@@ -435,7 +435,7 @@ static AstTokenIndex parseBlockLabel(Parser* p) {
|
|||||||
|
|
||||||
static AstNodeIndex parseForStatement(Parser* p) {
|
static AstNodeIndex parseForStatement(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
const AstNodeIndex for_token = eatToken(p, TOKENIZER_TAG_KEYWORD_FOR, &ok);
|
const AstNodeIndex for_token = eatToken(p, TOKEN_KEYWORD_FOR, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return null_node;
|
return null_node;
|
||||||
|
|
||||||
@@ -446,7 +446,7 @@ static AstNodeIndex parseForStatement(Parser* p) {
|
|||||||
|
|
||||||
static AstNodeIndex parseWhileStatement(Parser* p) {
|
static AstNodeIndex parseWhileStatement(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
const AstNodeIndex while_token = eatToken(p, TOKENIZER_TAG_KEYWORD_WHILE, &ok);
|
const AstNodeIndex while_token = eatToken(p, TOKEN_KEYWORD_WHILE, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return null_node;
|
return null_node;
|
||||||
|
|
||||||
@@ -457,7 +457,7 @@ static AstNodeIndex parseWhileStatement(Parser* p) {
|
|||||||
|
|
||||||
static AstNodeIndex parseLoopStatement(Parser* p) {
|
static AstNodeIndex parseLoopStatement(Parser* p) {
|
||||||
bool ok_inline_token;
|
bool ok_inline_token;
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_INLINE, &ok_inline_token);
|
eatToken(p, TOKEN_KEYWORD_INLINE, &ok_inline_token);
|
||||||
|
|
||||||
const AstNodeIndex for_statement = parseForStatement(p);
|
const AstNodeIndex for_statement = parseForStatement(p);
|
||||||
if (for_statement != 0)
|
if (for_statement != 0)
|
||||||
@@ -492,21 +492,21 @@ static AstNodeIndex expectVarDeclExprStatement(Parser* p) {
|
|||||||
static AstNodeIndex parseLabeledStatement(Parser*);
|
static AstNodeIndex parseLabeledStatement(Parser*);
|
||||||
static AstNodeIndex expectStatement(Parser* p, bool allow_defer_var) {
|
static AstNodeIndex expectStatement(Parser* p, bool allow_defer_var) {
|
||||||
bool ok;
|
bool ok;
|
||||||
if (eatToken(p, TOKENIZER_TAG_KEYWORD_COMPTIME, &ok), ok) {
|
if (eatToken(p, TOKEN_KEYWORD_COMPTIME, &ok), ok) {
|
||||||
fprintf(stderr, "expectStatement: comptime keyword not supported\n");
|
fprintf(stderr, "expectStatement: comptime keyword not supported\n");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
const AstNodeIndex tok = p->token_tags[p->tok_i];
|
const AstNodeIndex tok = p->token_tags[p->tok_i];
|
||||||
switch (tok) {
|
switch (tok) {
|
||||||
case TOKENIZER_TAG_KEYWORD_NOSUSPEND:
|
case TOKEN_KEYWORD_NOSUSPEND:
|
||||||
case TOKENIZER_TAG_KEYWORD_SUSPEND:
|
case TOKEN_KEYWORD_SUSPEND:
|
||||||
case TOKENIZER_TAG_KEYWORD_DEFER:
|
case TOKEN_KEYWORD_DEFER:
|
||||||
case TOKENIZER_TAG_KEYWORD_ERRDEFER:
|
case TOKEN_KEYWORD_ERRDEFER:
|
||||||
case TOKENIZER_TAG_KEYWORD_IF:
|
case TOKEN_KEYWORD_IF:
|
||||||
case TOKENIZER_TAG_KEYWORD_ENUM:
|
case TOKEN_KEYWORD_ENUM:
|
||||||
case TOKENIZER_TAG_KEYWORD_STRUCT:
|
case TOKEN_KEYWORD_STRUCT:
|
||||||
case TOKENIZER_TAG_KEYWORD_UNION:;
|
case TOKEN_KEYWORD_UNION:;
|
||||||
const char* tok_str = tokenizerGetTagString(tok);
|
const char* tok_str = tokenizerGetTagString(tok);
|
||||||
fprintf(stderr, "expectStatement does not support keyword %s\n", tok_str);
|
fprintf(stderr, "expectStatement does not support keyword %s\n", tok_str);
|
||||||
exit(1);
|
exit(1);
|
||||||
@@ -532,7 +532,7 @@ static void cleanupScratch(CleanupScratch* c) { c->scratch->len = c->old_len; }
|
|||||||
|
|
||||||
static AstNodeIndex parseBlock(Parser* p) {
|
static AstNodeIndex parseBlock(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
const AstNodeIndex lbrace = eatToken(p, TOKENIZER_TAG_L_BRACE, &ok);
|
const AstNodeIndex lbrace = eatToken(p, TOKEN_L_BRACE, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return null_node;
|
return null_node;
|
||||||
|
|
||||||
@@ -542,7 +542,7 @@ static AstNodeIndex parseBlock(Parser* p) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (p->token_tags[p->tok_i] == TOKENIZER_TAG_R_BRACE)
|
if (p->token_tags[p->tok_i] == TOKEN_R_BRACE)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// "const AstNodeIndex statement" once tinycc supports typeof_unqual (C23)
|
// "const AstNodeIndex statement" once tinycc supports typeof_unqual (C23)
|
||||||
@@ -551,8 +551,8 @@ static AstNodeIndex parseBlock(Parser* p) {
|
|||||||
break;
|
break;
|
||||||
SLICE_APPEND(AstNodeIndex, &p->scratch, statement);
|
SLICE_APPEND(AstNodeIndex, &p->scratch, statement);
|
||||||
}
|
}
|
||||||
expectToken(p, TOKENIZER_TAG_R_BRACE, NULL);
|
expectToken(p, TOKEN_R_BRACE, NULL);
|
||||||
const bool semicolon = (p->token_tags[p->tok_i] - 2 == TOKENIZER_TAG_SEMICOLON);
|
const bool semicolon = (p->token_tags[p->tok_i] - 2 == TOKEN_SEMICOLON);
|
||||||
|
|
||||||
const uint32_t statements_len = p->scratch.len - scratch_top.old_len;
|
const uint32_t statements_len = p->scratch.len - scratch_top.old_len;
|
||||||
switch (statements_len) {
|
switch (statements_len) {
|
||||||
@@ -626,9 +626,9 @@ static AstNodeIndex parseLabeledStatement(Parser* p) {
|
|||||||
|
|
||||||
static AstNodeIndex parseVarDeclProto(Parser* p) {
|
static AstNodeIndex parseVarDeclProto(Parser* p) {
|
||||||
bool ok;
|
bool ok;
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_CONST, &ok);
|
eatToken(p, TOKEN_KEYWORD_CONST, &ok);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_VAR, &ok);
|
eatToken(p, TOKEN_KEYWORD_VAR, &ok);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return null_node;
|
return null_node;
|
||||||
}
|
}
|
||||||
@@ -651,12 +651,12 @@ static AstNodeIndex expectTopLevelDecl(Parser* p) {
|
|||||||
AstTokenIndex extern_export_inline_token = nextToken(p);
|
AstTokenIndex extern_export_inline_token = nextToken(p);
|
||||||
|
|
||||||
switch (p->token_tags[extern_export_inline_token]) {
|
switch (p->token_tags[extern_export_inline_token]) {
|
||||||
case TOKENIZER_TAG_KEYWORD_EXTERN:
|
case TOKEN_KEYWORD_EXTERN:
|
||||||
eatToken(p, TOKENIZER_TAG_STRING_LITERAL, NULL);
|
eatToken(p, TOKEN_STRING_LITERAL, NULL);
|
||||||
break;
|
break;
|
||||||
case TOKENIZER_TAG_KEYWORD_EXPORT:
|
case TOKEN_KEYWORD_EXPORT:
|
||||||
case TOKENIZER_TAG_KEYWORD_INLINE:
|
case TOKEN_KEYWORD_INLINE:
|
||||||
case TOKENIZER_TAG_KEYWORD_NOINLINE:
|
case TOKEN_KEYWORD_NOINLINE:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
p->tok_i--;
|
p->tok_i--;
|
||||||
@@ -665,10 +665,10 @@ static AstNodeIndex expectTopLevelDecl(Parser* p) {
|
|||||||
AstNodeIndex fn_proto = parseFnProto(p);
|
AstNodeIndex fn_proto = parseFnProto(p);
|
||||||
if (fn_proto != 0) {
|
if (fn_proto != 0) {
|
||||||
switch (p->token_tags[p->tok_i]) {
|
switch (p->token_tags[p->tok_i]) {
|
||||||
case TOKENIZER_TAG_SEMICOLON:
|
case TOKEN_SEMICOLON:
|
||||||
p->tok_i++;
|
p->tok_i++;
|
||||||
return fn_proto;
|
return fn_proto;
|
||||||
case TOKENIZER_TAG_L_BRACE:;
|
case TOKEN_L_BRACE:;
|
||||||
AstNodeIndex fn_decl_index = reserveNode(p, AST_NODE_TAG_FN_DECL);
|
AstNodeIndex fn_decl_index = reserveNode(p, AST_NODE_TAG_FN_DECL);
|
||||||
AstNodeIndex body_block = parseBlock(p);
|
AstNodeIndex body_block = parseBlock(p);
|
||||||
return setNode(
|
return setNode(
|
||||||
@@ -684,7 +684,7 @@ static AstNodeIndex expectTopLevelDecl(Parser* p) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
eatToken(p, TOKENIZER_TAG_KEYWORD_THREADLOCAL, NULL);
|
eatToken(p, TOKEN_KEYWORD_THREADLOCAL, NULL);
|
||||||
AstNodeIndex var_decl = parseGlobalVarDecl(p);
|
AstNodeIndex var_decl = parseGlobalVarDecl(p);
|
||||||
if (var_decl != 0) {
|
if (var_decl != 0) {
|
||||||
return var_decl;
|
return var_decl;
|
||||||
@@ -704,46 +704,46 @@ void findNextContainerMember(Parser* p) {
|
|||||||
|
|
||||||
switch (p->token_tags[tok]) {
|
switch (p->token_tags[tok]) {
|
||||||
// Any of these can start a new top level declaration
|
// Any of these can start a new top level declaration
|
||||||
case TOKENIZER_TAG_KEYWORD_TEST:
|
case TOKEN_KEYWORD_TEST:
|
||||||
case TOKENIZER_TAG_KEYWORD_COMPTIME:
|
case TOKEN_KEYWORD_COMPTIME:
|
||||||
case TOKENIZER_TAG_KEYWORD_PUB:
|
case TOKEN_KEYWORD_PUB:
|
||||||
case TOKENIZER_TAG_KEYWORD_EXPORT:
|
case TOKEN_KEYWORD_EXPORT:
|
||||||
case TOKENIZER_TAG_KEYWORD_EXTERN:
|
case TOKEN_KEYWORD_EXTERN:
|
||||||
case TOKENIZER_TAG_KEYWORD_INLINE:
|
case TOKEN_KEYWORD_INLINE:
|
||||||
case TOKENIZER_TAG_KEYWORD_NOINLINE:
|
case TOKEN_KEYWORD_NOINLINE:
|
||||||
case TOKENIZER_TAG_KEYWORD_USINGNAMESPACE:
|
case TOKEN_KEYWORD_USINGNAMESPACE:
|
||||||
case TOKENIZER_TAG_KEYWORD_THREADLOCAL:
|
case TOKEN_KEYWORD_THREADLOCAL:
|
||||||
case TOKENIZER_TAG_KEYWORD_CONST:
|
case TOKEN_KEYWORD_CONST:
|
||||||
case TOKENIZER_TAG_KEYWORD_VAR:
|
case TOKEN_KEYWORD_VAR:
|
||||||
case TOKENIZER_TAG_KEYWORD_FN:
|
case TOKEN_KEYWORD_FN:
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
p->tok_i--;
|
p->tok_i--;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TOKENIZER_TAG_IDENTIFIER:
|
case TOKEN_IDENTIFIER:
|
||||||
if (p->token_tags[tok + 1] == TOKENIZER_TAG_COMMA && level == 0) {
|
if (p->token_tags[tok + 1] == TOKEN_COMMA && level == 0) {
|
||||||
p->tok_i--;
|
p->tok_i--;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TOKENIZER_TAG_COMMA:
|
case TOKEN_COMMA:
|
||||||
case TOKENIZER_TAG_SEMICOLON:
|
case TOKEN_SEMICOLON:
|
||||||
// This decl was likely meant to end here
|
// This decl was likely meant to end here
|
||||||
if (level == 0)
|
if (level == 0)
|
||||||
return;
|
return;
|
||||||
break;
|
break;
|
||||||
case TOKENIZER_TAG_L_PAREN:
|
case TOKEN_L_PAREN:
|
||||||
case TOKENIZER_TAG_L_BRACKET:
|
case TOKEN_L_BRACKET:
|
||||||
case TOKENIZER_TAG_L_BRACE:
|
case TOKEN_L_BRACE:
|
||||||
level++;
|
level++;
|
||||||
break;
|
break;
|
||||||
case TOKENIZER_TAG_R_PAREN:
|
case TOKEN_R_PAREN:
|
||||||
case TOKENIZER_TAG_R_BRACKET:
|
case TOKEN_R_BRACKET:
|
||||||
if (level != 0)
|
if (level != 0)
|
||||||
level--;
|
level--;
|
||||||
break;
|
break;
|
||||||
case TOKENIZER_TAG_R_BRACE:
|
case TOKEN_R_BRACE:
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
// end of container, exit
|
// end of container, exit
|
||||||
p->tok_i--;
|
p->tok_i--;
|
||||||
@@ -751,7 +751,7 @@ void findNextContainerMember(Parser* p) {
|
|||||||
}
|
}
|
||||||
level--;
|
level--;
|
||||||
break;
|
break;
|
||||||
case TOKENIZER_TAG_EOF:
|
case TOKEN_EOF:
|
||||||
p->tok_i--;
|
p->tok_i--;
|
||||||
return;
|
return;
|
||||||
default:
|
default:
|
||||||
@@ -766,7 +766,7 @@ static Members parseContainerMembers(Parser* p) {
|
|||||||
.old_len = p->scratch.len,
|
.old_len = p->scratch.len,
|
||||||
};
|
};
|
||||||
bool ok;
|
bool ok;
|
||||||
while (eatToken(p, TOKENIZER_TAG_CONTAINER_DOC_COMMENT, &ok), ok)
|
while (eatToken(p, TOKEN_CONTAINER_DOC_COMMENT, &ok), ok)
|
||||||
;
|
;
|
||||||
|
|
||||||
FieldState field_state = { .tag = FIELD_STATE_NONE };
|
FieldState field_state = { .tag = FIELD_STATE_NONE };
|
||||||
@@ -775,13 +775,13 @@ static Members parseContainerMembers(Parser* p) {
|
|||||||
while (1) {
|
while (1) {
|
||||||
eatDocComments(p);
|
eatDocComments(p);
|
||||||
switch (p->token_tags[p->tok_i]) {
|
switch (p->token_tags[p->tok_i]) {
|
||||||
case TOKENIZER_TAG_KEYWORD_TEST:
|
case TOKEN_KEYWORD_TEST:
|
||||||
case TOKENIZER_TAG_KEYWORD_COMPTIME:
|
case TOKEN_KEYWORD_COMPTIME:
|
||||||
case TOKENIZER_TAG_KEYWORD_USINGNAMESPACE:;
|
case TOKEN_KEYWORD_USINGNAMESPACE:;
|
||||||
const char* str = tokenizerGetTagString(p->token_tags[p->tok_i]);
|
const char* str = tokenizerGetTagString(p->token_tags[p->tok_i]);
|
||||||
fprintf(stderr, "%s not implemented in parseContainerMembers\n", str);
|
fprintf(stderr, "%s not implemented in parseContainerMembers\n", str);
|
||||||
exit(1);
|
exit(1);
|
||||||
case TOKENIZER_TAG_KEYWORD_PUB: {
|
case TOKEN_KEYWORD_PUB: {
|
||||||
p->tok_i++;
|
p->tok_i++;
|
||||||
AstNodeIndex top_level_decl = expectTopLevelDecl(p);
|
AstNodeIndex top_level_decl = expectTopLevelDecl(p);
|
||||||
if (top_level_decl != 0) {
|
if (top_level_decl != 0) {
|
||||||
@@ -791,17 +791,17 @@ static Members parseContainerMembers(Parser* p) {
|
|||||||
}
|
}
|
||||||
SLICE_APPEND(AstNodeIndex, &p->scratch, top_level_decl);
|
SLICE_APPEND(AstNodeIndex, &p->scratch, top_level_decl);
|
||||||
}
|
}
|
||||||
trailing = p->token_tags[p->tok_i - 1] == TOKENIZER_TAG_SEMICOLON;
|
trailing = p->token_tags[p->tok_i - 1] == TOKEN_SEMICOLON;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TOKENIZER_TAG_KEYWORD_CONST:
|
case TOKEN_KEYWORD_CONST:
|
||||||
case TOKENIZER_TAG_KEYWORD_VAR:
|
case TOKEN_KEYWORD_VAR:
|
||||||
case TOKENIZER_TAG_KEYWORD_THREADLOCAL:
|
case TOKEN_KEYWORD_THREADLOCAL:
|
||||||
case TOKENIZER_TAG_KEYWORD_EXPORT:
|
case TOKEN_KEYWORD_EXPORT:
|
||||||
case TOKENIZER_TAG_KEYWORD_EXTERN:
|
case TOKEN_KEYWORD_EXTERN:
|
||||||
case TOKENIZER_TAG_KEYWORD_INLINE:
|
case TOKEN_KEYWORD_INLINE:
|
||||||
case TOKENIZER_TAG_KEYWORD_NOINLINE:
|
case TOKEN_KEYWORD_NOINLINE:
|
||||||
case TOKENIZER_TAG_KEYWORD_FN: {
|
case TOKEN_KEYWORD_FN: {
|
||||||
const AstNodeIndex top_level_decl = expectTopLevelDecl(p);
|
const AstNodeIndex top_level_decl = expectTopLevelDecl(p);
|
||||||
if (top_level_decl != 0) {
|
if (top_level_decl != 0) {
|
||||||
if (field_state.tag == FIELD_STATE_SEEN) {
|
if (field_state.tag == FIELD_STATE_SEEN) {
|
||||||
@@ -810,11 +810,11 @@ static Members parseContainerMembers(Parser* p) {
|
|||||||
}
|
}
|
||||||
SLICE_APPEND(AstNodeIndex, &p->scratch, top_level_decl);
|
SLICE_APPEND(AstNodeIndex, &p->scratch, top_level_decl);
|
||||||
}
|
}
|
||||||
trailing = (p->token_tags[p->tok_i - 1] == TOKENIZER_TAG_SEMICOLON);
|
trailing = (p->token_tags[p->tok_i - 1] == TOKEN_SEMICOLON);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TOKENIZER_TAG_EOF:
|
case TOKEN_EOF:
|
||||||
case TOKENIZER_TAG_R_BRACE:
|
case TOKEN_R_BRACE:
|
||||||
goto break_loop;
|
goto break_loop;
|
||||||
default:;
|
default:;
|
||||||
// skip parseCStyleContainer
|
// skip parseCStyleContainer
|
||||||
@@ -831,12 +831,12 @@ static Members parseContainerMembers(Parser* p) {
|
|||||||
}
|
}
|
||||||
SLICE_APPEND(AstNodeIndex, &p->scratch, container_field);
|
SLICE_APPEND(AstNodeIndex, &p->scratch, container_field);
|
||||||
switch (p->token_tags[p->tok_i]) {
|
switch (p->token_tags[p->tok_i]) {
|
||||||
case TOKENIZER_TAG_COMMA:
|
case TOKEN_COMMA:
|
||||||
p->tok_i++;
|
p->tok_i++;
|
||||||
trailing = true;
|
trailing = true;
|
||||||
continue;
|
continue;
|
||||||
case TOKENIZER_TAG_R_BRACE:
|
case TOKEN_R_BRACE:
|
||||||
case TOKENIZER_TAG_EOF:
|
case TOKEN_EOF:
|
||||||
trailing = false;
|
trailing = false;
|
||||||
goto break_loop;
|
goto break_loop;
|
||||||
default:;
|
default:;
|
||||||
|
|||||||
288
tokenizer.c
288
tokenizer.c
@@ -19,55 +19,55 @@ const char* tokenizerGetTagString(TokenizerTag tag) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const KeywordMap keywords[] = {
|
const KeywordMap keywords[] = {
|
||||||
{ "addrspace", TOKENIZER_TAG_KEYWORD_ADDRSPACE },
|
{ "addrspace", TOKEN_KEYWORD_ADDRSPACE },
|
||||||
{ "align", TOKENIZER_TAG_KEYWORD_ALIGN },
|
{ "align", TOKEN_KEYWORD_ALIGN },
|
||||||
{ "allowzero", TOKENIZER_TAG_KEYWORD_ALLOWZERO },
|
{ "allowzero", TOKEN_KEYWORD_ALLOWZERO },
|
||||||
{ "and", TOKENIZER_TAG_KEYWORD_AND },
|
{ "and", TOKEN_KEYWORD_AND },
|
||||||
{ "anyframe", TOKENIZER_TAG_KEYWORD_ANYFRAME },
|
{ "anyframe", TOKEN_KEYWORD_ANYFRAME },
|
||||||
{ "anytype", TOKENIZER_TAG_KEYWORD_ANYTYPE },
|
{ "anytype", TOKEN_KEYWORD_ANYTYPE },
|
||||||
{ "asm", TOKENIZER_TAG_KEYWORD_ASM },
|
{ "asm", TOKEN_KEYWORD_ASM },
|
||||||
{ "async", TOKENIZER_TAG_KEYWORD_ASYNC },
|
{ "async", TOKEN_KEYWORD_ASYNC },
|
||||||
{ "await", TOKENIZER_TAG_KEYWORD_AWAIT },
|
{ "await", TOKEN_KEYWORD_AWAIT },
|
||||||
{ "break", TOKENIZER_TAG_KEYWORD_BREAK },
|
{ "break", TOKEN_KEYWORD_BREAK },
|
||||||
{ "callconv", TOKENIZER_TAG_KEYWORD_CALLCONV },
|
{ "callconv", TOKEN_KEYWORD_CALLCONV },
|
||||||
{ "catch", TOKENIZER_TAG_KEYWORD_CATCH },
|
{ "catch", TOKEN_KEYWORD_CATCH },
|
||||||
{ "comptime", TOKENIZER_TAG_KEYWORD_COMPTIME },
|
{ "comptime", TOKEN_KEYWORD_COMPTIME },
|
||||||
{ "const", TOKENIZER_TAG_KEYWORD_CONST },
|
{ "const", TOKEN_KEYWORD_CONST },
|
||||||
{ "continue", TOKENIZER_TAG_KEYWORD_CONTINUE },
|
{ "continue", TOKEN_KEYWORD_CONTINUE },
|
||||||
{ "defer", TOKENIZER_TAG_KEYWORD_DEFER },
|
{ "defer", TOKEN_KEYWORD_DEFER },
|
||||||
{ "else", TOKENIZER_TAG_KEYWORD_ELSE },
|
{ "else", TOKEN_KEYWORD_ELSE },
|
||||||
{ "enum", TOKENIZER_TAG_KEYWORD_ENUM },
|
{ "enum", TOKEN_KEYWORD_ENUM },
|
||||||
{ "errdefer", TOKENIZER_TAG_KEYWORD_ERRDEFER },
|
{ "errdefer", TOKEN_KEYWORD_ERRDEFER },
|
||||||
{ "error", TOKENIZER_TAG_KEYWORD_ERROR },
|
{ "error", TOKEN_KEYWORD_ERROR },
|
||||||
{ "export", TOKENIZER_TAG_KEYWORD_EXPORT },
|
{ "export", TOKEN_KEYWORD_EXPORT },
|
||||||
{ "extern", TOKENIZER_TAG_KEYWORD_EXTERN },
|
{ "extern", TOKEN_KEYWORD_EXTERN },
|
||||||
{ "fn", TOKENIZER_TAG_KEYWORD_FN },
|
{ "fn", TOKEN_KEYWORD_FN },
|
||||||
{ "for", TOKENIZER_TAG_KEYWORD_FOR },
|
{ "for", TOKEN_KEYWORD_FOR },
|
||||||
{ "if", TOKENIZER_TAG_KEYWORD_IF },
|
{ "if", TOKEN_KEYWORD_IF },
|
||||||
{ "inline", TOKENIZER_TAG_KEYWORD_INLINE },
|
{ "inline", TOKEN_KEYWORD_INLINE },
|
||||||
{ "linksection", TOKENIZER_TAG_KEYWORD_LINKSECTION },
|
{ "linksection", TOKEN_KEYWORD_LINKSECTION },
|
||||||
{ "noalias", TOKENIZER_TAG_KEYWORD_NOALIAS },
|
{ "noalias", TOKEN_KEYWORD_NOALIAS },
|
||||||
{ "noinline", TOKENIZER_TAG_KEYWORD_NOINLINE },
|
{ "noinline", TOKEN_KEYWORD_NOINLINE },
|
||||||
{ "nosuspend", TOKENIZER_TAG_KEYWORD_NOSUSPEND },
|
{ "nosuspend", TOKEN_KEYWORD_NOSUSPEND },
|
||||||
{ "opaque", TOKENIZER_TAG_KEYWORD_OPAQUE },
|
{ "opaque", TOKEN_KEYWORD_OPAQUE },
|
||||||
{ "or", TOKENIZER_TAG_KEYWORD_OR },
|
{ "or", TOKEN_KEYWORD_OR },
|
||||||
{ "orelse", TOKENIZER_TAG_KEYWORD_ORELSE },
|
{ "orelse", TOKEN_KEYWORD_ORELSE },
|
||||||
{ "packed", TOKENIZER_TAG_KEYWORD_PACKED },
|
{ "packed", TOKEN_KEYWORD_PACKED },
|
||||||
{ "pub", TOKENIZER_TAG_KEYWORD_PUB },
|
{ "pub", TOKEN_KEYWORD_PUB },
|
||||||
{ "resume", TOKENIZER_TAG_KEYWORD_RESUME },
|
{ "resume", TOKEN_KEYWORD_RESUME },
|
||||||
{ "return", TOKENIZER_TAG_KEYWORD_RETURN },
|
{ "return", TOKEN_KEYWORD_RETURN },
|
||||||
{ "struct", TOKENIZER_TAG_KEYWORD_STRUCT },
|
{ "struct", TOKEN_KEYWORD_STRUCT },
|
||||||
{ "suspend", TOKENIZER_TAG_KEYWORD_SUSPEND },
|
{ "suspend", TOKEN_KEYWORD_SUSPEND },
|
||||||
{ "switch", TOKENIZER_TAG_KEYWORD_SWITCH },
|
{ "switch", TOKEN_KEYWORD_SWITCH },
|
||||||
{ "test", TOKENIZER_TAG_KEYWORD_TEST },
|
{ "test", TOKEN_KEYWORD_TEST },
|
||||||
{ "threadlocal", TOKENIZER_TAG_KEYWORD_THREADLOCAL },
|
{ "threadlocal", TOKEN_KEYWORD_THREADLOCAL },
|
||||||
{ "try", TOKENIZER_TAG_KEYWORD_TRY },
|
{ "try", TOKEN_KEYWORD_TRY },
|
||||||
{ "union", TOKENIZER_TAG_KEYWORD_UNION },
|
{ "union", TOKEN_KEYWORD_UNION },
|
||||||
{ "unreachable", TOKENIZER_TAG_KEYWORD_UNREACHABLE },
|
{ "unreachable", TOKEN_KEYWORD_UNREACHABLE },
|
||||||
{ "usingnamespace", TOKENIZER_TAG_KEYWORD_USINGNAMESPACE },
|
{ "usingnamespace", TOKEN_KEYWORD_USINGNAMESPACE },
|
||||||
{ "var", TOKENIZER_TAG_KEYWORD_VAR },
|
{ "var", TOKEN_KEYWORD_VAR },
|
||||||
{ "volatile", TOKENIZER_TAG_KEYWORD_VOLATILE },
|
{ "volatile", TOKEN_KEYWORD_VOLATILE },
|
||||||
{ "while", TOKENIZER_TAG_KEYWORD_WHILE }
|
{ "while", TOKEN_KEYWORD_WHILE }
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO binary search
|
// TODO binary search
|
||||||
@@ -80,13 +80,13 @@ static TokenizerTag getKeyword(const char* bytes, const uint32_t len) {
|
|||||||
if (len == klen) {
|
if (len == klen) {
|
||||||
return keywords[i].tag;
|
return keywords[i].tag;
|
||||||
} else {
|
} else {
|
||||||
return TOKENIZER_TAG_INVALID;
|
return TOKEN_INVALID;
|
||||||
}
|
}
|
||||||
} else if (cmp < 0) {
|
} else if (cmp < 0) {
|
||||||
return TOKENIZER_TAG_INVALID;
|
return TOKEN_INVALID;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return TOKENIZER_TAG_INVALID;
|
return TOKEN_INVALID;
|
||||||
}
|
}
|
||||||
|
|
||||||
Tokenizer tokenizerInit(const char* buffer, const uint32_t len) {
|
Tokenizer tokenizerInit(const char* buffer, const uint32_t len) {
|
||||||
@@ -99,7 +99,7 @@ Tokenizer tokenizerInit(const char* buffer, const uint32_t len) {
|
|||||||
|
|
||||||
TokenizerToken tokenizerNext(Tokenizer* self) {
|
TokenizerToken tokenizerNext(Tokenizer* self) {
|
||||||
TokenizerToken result = (TokenizerToken) {
|
TokenizerToken result = (TokenizerToken) {
|
||||||
.tag = TOKENIZER_TAG_INVALID,
|
.tag = TOKEN_INVALID,
|
||||||
.loc = {
|
.loc = {
|
||||||
.start = 0,
|
.start = 0,
|
||||||
},
|
},
|
||||||
@@ -114,7 +114,7 @@ state:
|
|||||||
case 0:
|
case 0:
|
||||||
if (self->index == self->buffer_len) {
|
if (self->index == self->buffer_len) {
|
||||||
return (TokenizerToken) {
|
return (TokenizerToken) {
|
||||||
.tag = TOKENIZER_TAG_EOF,
|
.tag = TOKEN_EOF,
|
||||||
.loc = {
|
.loc = {
|
||||||
.start = self->index,
|
.start = self->index,
|
||||||
.end = self->index,
|
.end = self->index,
|
||||||
@@ -132,17 +132,17 @@ state:
|
|||||||
result.loc.start = self->index;
|
result.loc.start = self->index;
|
||||||
goto state;
|
goto state;
|
||||||
case '"':
|
case '"':
|
||||||
result.tag = TOKENIZER_TAG_STRING_LITERAL;
|
result.tag = TOKEN_STRING_LITERAL;
|
||||||
state = TOKENIZER_STATE_STRING_LITERAL;
|
state = TOKENIZER_STATE_STRING_LITERAL;
|
||||||
goto state;
|
goto state;
|
||||||
case '\'':
|
case '\'':
|
||||||
result.tag = TOKENIZER_TAG_CHAR_LITERAL;
|
result.tag = TOKEN_CHAR_LITERAL;
|
||||||
state = TOKENIZER_STATE_CHAR_LITERAL;
|
state = TOKENIZER_STATE_CHAR_LITERAL;
|
||||||
goto state;
|
goto state;
|
||||||
case 'a' ... 'z':
|
case 'a' ... 'z':
|
||||||
case 'A' ... 'Z':
|
case 'A' ... 'Z':
|
||||||
case '_':
|
case '_':
|
||||||
result.tag = TOKENIZER_TAG_IDENTIFIER;
|
result.tag = TOKEN_IDENTIFIER;
|
||||||
state = TOKENIZER_STATE_IDENTIFIER;
|
state = TOKENIZER_STATE_IDENTIFIER;
|
||||||
goto state;
|
goto state;
|
||||||
case '@':
|
case '@':
|
||||||
@@ -158,35 +158,35 @@ state:
|
|||||||
state = TOKENIZER_STATE_PIPE;
|
state = TOKENIZER_STATE_PIPE;
|
||||||
goto state;
|
goto state;
|
||||||
case '(':
|
case '(':
|
||||||
result.tag = TOKENIZER_TAG_L_PAREN;
|
result.tag = TOKEN_L_PAREN;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case ')':
|
case ')':
|
||||||
result.tag = TOKENIZER_TAG_R_PAREN;
|
result.tag = TOKEN_R_PAREN;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '[':
|
case '[':
|
||||||
result.tag = TOKENIZER_TAG_L_BRACKET;
|
result.tag = TOKEN_L_BRACKET;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case ']':
|
case ']':
|
||||||
result.tag = TOKENIZER_TAG_R_BRACKET;
|
result.tag = TOKEN_R_BRACKET;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case ';':
|
case ';':
|
||||||
result.tag = TOKENIZER_TAG_SEMICOLON;
|
result.tag = TOKEN_SEMICOLON;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case ',':
|
case ',':
|
||||||
result.tag = TOKENIZER_TAG_COMMA;
|
result.tag = TOKEN_COMMA;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '?':
|
case '?':
|
||||||
result.tag = TOKENIZER_TAG_QUESTION_MARK;
|
result.tag = TOKEN_QUESTION_MARK;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case ':':
|
case ':':
|
||||||
result.tag = TOKENIZER_TAG_COLON;
|
result.tag = TOKEN_COLON;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '%':
|
case '%':
|
||||||
@@ -208,19 +208,19 @@ state:
|
|||||||
state = TOKENIZER_STATE_CARET;
|
state = TOKENIZER_STATE_CARET;
|
||||||
goto state;
|
goto state;
|
||||||
case '\\':
|
case '\\':
|
||||||
result.tag = TOKENIZER_TAG_MULTILINE_STRING_LITERAL_LINE;
|
result.tag = TOKEN_MULTILINE_STRING_LITERAL_LINE;
|
||||||
state = TOKENIZER_STATE_BACKSLASH;
|
state = TOKENIZER_STATE_BACKSLASH;
|
||||||
goto state;
|
goto state;
|
||||||
case '{':
|
case '{':
|
||||||
result.tag = TOKENIZER_TAG_L_BRACE;
|
result.tag = TOKEN_L_BRACE;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '}':
|
case '}':
|
||||||
result.tag = TOKENIZER_TAG_R_BRACE;
|
result.tag = TOKEN_R_BRACE;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '~':
|
case '~':
|
||||||
result.tag = TOKENIZER_TAG_TILDE;
|
result.tag = TOKEN_TILDE;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '.':
|
case '.':
|
||||||
@@ -236,7 +236,7 @@ state:
|
|||||||
state = TOKENIZER_STATE_AMPERSAND;
|
state = TOKENIZER_STATE_AMPERSAND;
|
||||||
goto state;
|
goto state;
|
||||||
case '0' ... '9':
|
case '0' ... '9':
|
||||||
result.tag = TOKENIZER_TAG_NUMBER_LITERAL;
|
result.tag = TOKEN_NUMBER_LITERAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
state = TOKENIZER_STATE_INT;
|
state = TOKENIZER_STATE_INT;
|
||||||
goto state;
|
goto state;
|
||||||
@@ -251,7 +251,7 @@ state:
|
|||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case 0:
|
case 0:
|
||||||
if (self->index == self->buffer_len) {
|
if (self->index == self->buffer_len) {
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
} else {
|
} else {
|
||||||
state = TOKENIZER_STATE_INVALID;
|
state = TOKENIZER_STATE_INVALID;
|
||||||
goto state;
|
goto state;
|
||||||
@@ -273,14 +273,14 @@ state:
|
|||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case 0:
|
case 0:
|
||||||
if (self->index == self->buffer_len) {
|
if (self->index == self->buffer_len) {
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
} else {
|
} else {
|
||||||
state = TOKENIZER_STATE_INVALID;
|
state = TOKENIZER_STATE_INVALID;
|
||||||
goto state;
|
goto state;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case '\n':
|
case '\n':
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
state = TOKENIZER_STATE_INVALID;
|
state = TOKENIZER_STATE_INVALID;
|
||||||
@@ -293,16 +293,16 @@ state:
|
|||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case 0:
|
case 0:
|
||||||
case '\n':
|
case '\n':
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
break;
|
break;
|
||||||
case '"':
|
case '"':
|
||||||
result.tag = TOKENIZER_TAG_IDENTIFIER;
|
result.tag = TOKEN_IDENTIFIER;
|
||||||
state = TOKENIZER_STATE_STRING_LITERAL;
|
state = TOKENIZER_STATE_STRING_LITERAL;
|
||||||
goto state;
|
goto state;
|
||||||
case 'a' ... 'z':
|
case 'a' ... 'z':
|
||||||
case 'A' ... 'Z':
|
case 'A' ... 'Z':
|
||||||
case '_':
|
case '_':
|
||||||
result.tag = TOKENIZER_TAG_BUILTIN;
|
result.tag = TOKEN_BUILTIN;
|
||||||
state = TOKENIZER_STATE_BUILTIN;
|
state = TOKENIZER_STATE_BUILTIN;
|
||||||
goto state;
|
goto state;
|
||||||
default:
|
default:
|
||||||
@@ -315,11 +315,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_AMPERSAND_EQUAL;
|
result.tag = TOKEN_AMPERSAND_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_AMPERSAND;
|
result.tag = TOKEN_AMPERSAND;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -328,11 +328,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_ASTERISK_EQUAL;
|
result.tag = TOKEN_ASTERISK_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '*':
|
case '*':
|
||||||
result.tag = TOKENIZER_TAG_ASTERISK_ASTERISK;
|
result.tag = TOKEN_ASTERISK_ASTERISK;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '%':
|
case '%':
|
||||||
@@ -342,7 +342,7 @@ state:
|
|||||||
state = TOKENIZER_STATE_ASTERISK_PIPE;
|
state = TOKENIZER_STATE_ASTERISK_PIPE;
|
||||||
goto state;
|
goto state;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_ASTERISK;
|
result.tag = TOKEN_ASTERISK;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -351,11 +351,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_ASTERISK_PERCENT_EQUAL;
|
result.tag = TOKEN_ASTERISK_PERCENT_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_ASTERISK_PERCENT;
|
result.tag = TOKEN_ASTERISK_PERCENT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -364,11 +364,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_ASTERISK_PIPE_EQUAL;
|
result.tag = TOKEN_ASTERISK_PIPE_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_ASTERISK_PIPE;
|
result.tag = TOKEN_ASTERISK_PIPE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -377,11 +377,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_PERCENT_EQUAL;
|
result.tag = TOKEN_PERCENT_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_PERCENT;
|
result.tag = TOKEN_PERCENT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -390,11 +390,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_PLUS_EQUAL;
|
result.tag = TOKEN_PLUS_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '+':
|
case '+':
|
||||||
result.tag = TOKENIZER_TAG_PLUS_PLUS;
|
result.tag = TOKEN_PLUS_PLUS;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '%':
|
case '%':
|
||||||
@@ -404,7 +404,7 @@ state:
|
|||||||
state = TOKENIZER_STATE_PLUS_PIPE;
|
state = TOKENIZER_STATE_PLUS_PIPE;
|
||||||
goto state;
|
goto state;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_PLUS;
|
result.tag = TOKEN_PLUS;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -413,11 +413,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_PLUS_PERCENT_EQUAL;
|
result.tag = TOKEN_PLUS_PERCENT_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_PLUS_PERCENT;
|
result.tag = TOKEN_PLUS_PERCENT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -426,11 +426,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_PLUS_PIPE_EQUAL;
|
result.tag = TOKEN_PLUS_PIPE_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_PLUS_PIPE;
|
result.tag = TOKEN_PLUS_PIPE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -439,11 +439,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_CARET_EQUAL;
|
result.tag = TOKEN_CARET_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_CARET;
|
result.tag = TOKEN_CARET;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -461,7 +461,7 @@ state:
|
|||||||
const char* start = self->buffer + result.loc.start;
|
const char* start = self->buffer + result.loc.start;
|
||||||
uint32_t len = self->index - result.loc.start;
|
uint32_t len = self->index - result.loc.start;
|
||||||
TokenizerTag tag = getKeyword(start, len);
|
TokenizerTag tag = getKeyword(start, len);
|
||||||
if (tag != TOKENIZER_TAG_INVALID) {
|
if (tag != TOKEN_INVALID) {
|
||||||
result.tag = tag;
|
result.tag = tag;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -484,13 +484,13 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case 0:
|
case 0:
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
break;
|
break;
|
||||||
case '\\':
|
case '\\':
|
||||||
state = TOKENIZER_STATE_MULTILINE_STRING_LITERAL_LINE;
|
state = TOKENIZER_STATE_MULTILINE_STRING_LITERAL_LINE;
|
||||||
goto state;
|
goto state;
|
||||||
case '\n':
|
case '\n':
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
state = TOKENIZER_STATE_INVALID;
|
state = TOKENIZER_STATE_INVALID;
|
||||||
@@ -506,11 +506,11 @@ state:
|
|||||||
state = TOKENIZER_STATE_INVALID;
|
state = TOKENIZER_STATE_INVALID;
|
||||||
goto state;
|
goto state;
|
||||||
} else {
|
} else {
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case '\n':
|
case '\n':
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
break;
|
break;
|
||||||
case '\\':
|
case '\\':
|
||||||
state = TOKENIZER_STATE_STRING_LITERAL_BACKSLASH;
|
state = TOKENIZER_STATE_STRING_LITERAL_BACKSLASH;
|
||||||
@@ -534,7 +534,7 @@ state:
|
|||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case 0:
|
case 0:
|
||||||
case '\n':
|
case '\n':
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
state = TOKENIZER_STATE_STRING_LITERAL;
|
state = TOKENIZER_STATE_STRING_LITERAL;
|
||||||
@@ -550,11 +550,11 @@ state:
|
|||||||
state = TOKENIZER_STATE_INVALID;
|
state = TOKENIZER_STATE_INVALID;
|
||||||
goto state;
|
goto state;
|
||||||
} else {
|
} else {
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case '\n':
|
case '\n':
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
break;
|
break;
|
||||||
case '\\':
|
case '\\':
|
||||||
state = TOKENIZER_STATE_CHAR_LITERAL_BACKSLASH;
|
state = TOKENIZER_STATE_CHAR_LITERAL_BACKSLASH;
|
||||||
@@ -581,11 +581,11 @@ state:
|
|||||||
state = TOKENIZER_STATE_INVALID;
|
state = TOKENIZER_STATE_INVALID;
|
||||||
goto state;
|
goto state;
|
||||||
} else {
|
} else {
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case '\n':
|
case '\n':
|
||||||
result.tag = TOKENIZER_TAG_INVALID;
|
result.tag = TOKEN_INVALID;
|
||||||
break;
|
break;
|
||||||
case 0x01 ... 0x09:
|
case 0x01 ... 0x09:
|
||||||
case 0x0b ... 0x1f:
|
case 0x0b ... 0x1f:
|
||||||
@@ -631,11 +631,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_BANG_EQUAL;
|
result.tag = TOKEN_BANG_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_BANG;
|
result.tag = TOKEN_BANG;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -644,15 +644,15 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_PIPE_EQUAL;
|
result.tag = TOKEN_PIPE_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '|':
|
case '|':
|
||||||
result.tag = TOKENIZER_TAG_PIPE_PIPE;
|
result.tag = TOKEN_PIPE_PIPE;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_PIPE;
|
result.tag = TOKEN_PIPE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -661,15 +661,15 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_EQUAL_EQUAL;
|
result.tag = TOKEN_EQUAL_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '>':
|
case '>':
|
||||||
result.tag = TOKENIZER_TAG_EQUAL_ANGLE_BRACKET_RIGHT;
|
result.tag = TOKEN_EQUAL_ANGLE_BRACKET_RIGHT;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_EQUAL;
|
result.tag = TOKEN_EQUAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -678,11 +678,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '>':
|
case '>':
|
||||||
result.tag = TOKENIZER_TAG_ARROW;
|
result.tag = TOKEN_ARROW;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_MINUS_EQUAL;
|
result.tag = TOKEN_MINUS_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '%':
|
case '%':
|
||||||
@@ -692,7 +692,7 @@ state:
|
|||||||
state = TOKENIZER_STATE_MINUS_PIPE;
|
state = TOKENIZER_STATE_MINUS_PIPE;
|
||||||
goto state;
|
goto state;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_MINUS;
|
result.tag = TOKEN_MINUS;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -701,11 +701,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_MINUS_PERCENT_EQUAL;
|
result.tag = TOKEN_MINUS_PERCENT_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_MINUS_PERCENT;
|
result.tag = TOKEN_MINUS_PERCENT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -714,11 +714,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_MINUS_PIPE_EQUAL;
|
result.tag = TOKEN_MINUS_PIPE_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_MINUS_PIPE;
|
result.tag = TOKEN_MINUS_PIPE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -730,11 +730,11 @@ state:
|
|||||||
state = TOKENIZER_STATE_ANGLE_BRACKET_ANGLE_BRACKET_LEFT;
|
state = TOKENIZER_STATE_ANGLE_BRACKET_ANGLE_BRACKET_LEFT;
|
||||||
goto state;
|
goto state;
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_LEFT_EQUAL;
|
result.tag = TOKEN_ANGLE_BRACKET_LEFT_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_LEFT;
|
result.tag = TOKEN_ANGLE_BRACKET_LEFT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -743,14 +743,14 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_EQUAL;
|
result.tag = TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
case '|':
|
case '|':
|
||||||
state = TOKENIZER_STATE_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE;
|
state = TOKENIZER_STATE_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE;
|
||||||
goto state;
|
goto state;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT;
|
result.tag = TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -759,11 +759,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE_EQUAL;
|
result.tag = TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE;
|
result.tag = TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -775,11 +775,11 @@ state:
|
|||||||
state = TOKENIZER_STATE_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT;
|
state = TOKENIZER_STATE_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT;
|
||||||
goto state;
|
goto state;
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_RIGHT_EQUAL;
|
result.tag = TOKEN_ANGLE_BRACKET_RIGHT_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_RIGHT;
|
result.tag = TOKEN_ANGLE_BRACKET_RIGHT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -788,11 +788,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT_EQUAL;
|
result.tag = TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT;
|
result.tag = TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -807,7 +807,7 @@ state:
|
|||||||
state = TOKENIZER_STATE_PERIOD_ASTERISK;
|
state = TOKENIZER_STATE_PERIOD_ASTERISK;
|
||||||
goto state;
|
goto state;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_PERIOD;
|
result.tag = TOKEN_PERIOD;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -816,11 +816,11 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '.':
|
case '.':
|
||||||
result.tag = TOKENIZER_TAG_ELLIPSIS3;
|
result.tag = TOKEN_ELLIPSIS3;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_ELLIPSIS2;
|
result.tag = TOKEN_ELLIPSIS2;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -829,10 +829,10 @@ state:
|
|||||||
self->index++;
|
self->index++;
|
||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case '*':
|
case '*':
|
||||||
result.tag = TOKENIZER_TAG_INVALID_PERIODASTERISKS;
|
result.tag = TOKEN_INVALID_PERIODASTERISKS;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_PERIOD_ASTERISK;
|
result.tag = TOKEN_PERIOD_ASTERISK;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -844,11 +844,11 @@ state:
|
|||||||
state = TOKENIZER_STATE_LINE_COMMENT_START;
|
state = TOKENIZER_STATE_LINE_COMMENT_START;
|
||||||
goto state;
|
goto state;
|
||||||
case '=':
|
case '=':
|
||||||
result.tag = TOKENIZER_TAG_SLASH_EQUAL;
|
result.tag = TOKEN_SLASH_EQUAL;
|
||||||
self->index++;
|
self->index++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_SLASH;
|
result.tag = TOKEN_SLASH;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@@ -862,7 +862,7 @@ state:
|
|||||||
goto state;
|
goto state;
|
||||||
} else {
|
} else {
|
||||||
return (TokenizerToken) {
|
return (TokenizerToken) {
|
||||||
.tag = TOKENIZER_TAG_EOF,
|
.tag = TOKEN_EOF,
|
||||||
.loc = {
|
.loc = {
|
||||||
.start = self->index,
|
.start = self->index,
|
||||||
.end = self->index }
|
.end = self->index }
|
||||||
@@ -870,7 +870,7 @@ state:
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case '!':
|
case '!':
|
||||||
result.tag = TOKENIZER_TAG_CONTAINER_DOC_COMMENT;
|
result.tag = TOKEN_CONTAINER_DOC_COMMENT;
|
||||||
state = TOKENIZER_STATE_DOC_COMMENT;
|
state = TOKENIZER_STATE_DOC_COMMENT;
|
||||||
goto state;
|
goto state;
|
||||||
case '\n':
|
case '\n':
|
||||||
@@ -901,11 +901,11 @@ state:
|
|||||||
switch (self->buffer[self->index]) {
|
switch (self->buffer[self->index]) {
|
||||||
case 0:
|
case 0:
|
||||||
case '\n':
|
case '\n':
|
||||||
result.tag = TOKENIZER_TAG_DOC_COMMENT;
|
result.tag = TOKEN_DOC_COMMENT;
|
||||||
break;
|
break;
|
||||||
case '\r':
|
case '\r':
|
||||||
if (self->buffer[self->index + 1] == '\n') {
|
if (self->buffer[self->index + 1] == '\n') {
|
||||||
result.tag = TOKENIZER_TAG_DOC_COMMENT;
|
result.tag = TOKEN_DOC_COMMENT;
|
||||||
} else {
|
} else {
|
||||||
state = TOKENIZER_STATE_INVALID;
|
state = TOKENIZER_STATE_INVALID;
|
||||||
goto state;
|
goto state;
|
||||||
@@ -921,7 +921,7 @@ state:
|
|||||||
state = TOKENIZER_STATE_INVALID;
|
state = TOKENIZER_STATE_INVALID;
|
||||||
goto state;
|
goto state;
|
||||||
default:
|
default:
|
||||||
result.tag = TOKENIZER_TAG_DOC_COMMENT;
|
result.tag = TOKEN_DOC_COMMENT;
|
||||||
state = TOKENIZER_STATE_DOC_COMMENT;
|
state = TOKENIZER_STATE_DOC_COMMENT;
|
||||||
goto state;
|
goto state;
|
||||||
}
|
}
|
||||||
@@ -936,7 +936,7 @@ state:
|
|||||||
goto state;
|
goto state;
|
||||||
} else {
|
} else {
|
||||||
return (TokenizerToken) {
|
return (TokenizerToken) {
|
||||||
.tag = TOKENIZER_TAG_EOF,
|
.tag = TOKEN_EOF,
|
||||||
.loc = {
|
.loc = {
|
||||||
.start = self->index,
|
.start = self->index,
|
||||||
.end = self->index }
|
.end = self->index }
|
||||||
|
|||||||
244
tokenizer.h
244
tokenizer.h
@@ -5,128 +5,128 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#define TOKENIZER_FOREACH_TAG_ENUM(TAG) \
|
#define TOKENIZER_FOREACH_TAG_ENUM(TAG) \
|
||||||
TAG(TOKENIZER_TAG_INVALID) \
|
TAG(TOKEN_INVALID) \
|
||||||
TAG(TOKENIZER_TAG_INVALID_PERIODASTERISKS) \
|
TAG(TOKEN_INVALID_PERIODASTERISKS) \
|
||||||
TAG(TOKENIZER_TAG_IDENTIFIER) \
|
TAG(TOKEN_IDENTIFIER) \
|
||||||
TAG(TOKENIZER_TAG_STRING_LITERAL) \
|
TAG(TOKEN_STRING_LITERAL) \
|
||||||
TAG(TOKENIZER_TAG_MULTILINE_STRING_LITERAL_LINE) \
|
TAG(TOKEN_MULTILINE_STRING_LITERAL_LINE) \
|
||||||
TAG(TOKENIZER_TAG_CHAR_LITERAL) \
|
TAG(TOKEN_CHAR_LITERAL) \
|
||||||
TAG(TOKENIZER_TAG_EOF) \
|
TAG(TOKEN_EOF) \
|
||||||
TAG(TOKENIZER_TAG_BUILTIN) \
|
TAG(TOKEN_BUILTIN) \
|
||||||
TAG(TOKENIZER_TAG_BANG) \
|
TAG(TOKEN_BANG) \
|
||||||
TAG(TOKENIZER_TAG_PIPE) \
|
TAG(TOKEN_PIPE) \
|
||||||
TAG(TOKENIZER_TAG_PIPE_PIPE) \
|
TAG(TOKEN_PIPE_PIPE) \
|
||||||
TAG(TOKENIZER_TAG_PIPE_EQUAL) \
|
TAG(TOKEN_PIPE_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_EQUAL) \
|
TAG(TOKEN_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_EQUAL_EQUAL) \
|
TAG(TOKEN_EQUAL_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_EQUAL_ANGLE_BRACKET_RIGHT) \
|
TAG(TOKEN_EQUAL_ANGLE_BRACKET_RIGHT) \
|
||||||
TAG(TOKENIZER_TAG_BANG_EQUAL) \
|
TAG(TOKEN_BANG_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_L_PAREN) \
|
TAG(TOKEN_L_PAREN) \
|
||||||
TAG(TOKENIZER_TAG_R_PAREN) \
|
TAG(TOKEN_R_PAREN) \
|
||||||
TAG(TOKENIZER_TAG_SEMICOLON) \
|
TAG(TOKEN_SEMICOLON) \
|
||||||
TAG(TOKENIZER_TAG_PERCENT) \
|
TAG(TOKEN_PERCENT) \
|
||||||
TAG(TOKENIZER_TAG_PERCENT_EQUAL) \
|
TAG(TOKEN_PERCENT_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_L_BRACE) \
|
TAG(TOKEN_L_BRACE) \
|
||||||
TAG(TOKENIZER_TAG_R_BRACE) \
|
TAG(TOKEN_R_BRACE) \
|
||||||
TAG(TOKENIZER_TAG_L_BRACKET) \
|
TAG(TOKEN_L_BRACKET) \
|
||||||
TAG(TOKENIZER_TAG_R_BRACKET) \
|
TAG(TOKEN_R_BRACKET) \
|
||||||
TAG(TOKENIZER_TAG_PERIOD) \
|
TAG(TOKEN_PERIOD) \
|
||||||
TAG(TOKENIZER_TAG_PERIOD_ASTERISK) \
|
TAG(TOKEN_PERIOD_ASTERISK) \
|
||||||
TAG(TOKENIZER_TAG_ELLIPSIS2) \
|
TAG(TOKEN_ELLIPSIS2) \
|
||||||
TAG(TOKENIZER_TAG_ELLIPSIS3) \
|
TAG(TOKEN_ELLIPSIS3) \
|
||||||
TAG(TOKENIZER_TAG_CARET) \
|
TAG(TOKEN_CARET) \
|
||||||
TAG(TOKENIZER_TAG_CARET_EQUAL) \
|
TAG(TOKEN_CARET_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_PLUS) \
|
TAG(TOKEN_PLUS) \
|
||||||
TAG(TOKENIZER_TAG_PLUS_PLUS) \
|
TAG(TOKEN_PLUS_PLUS) \
|
||||||
TAG(TOKENIZER_TAG_PLUS_EQUAL) \
|
TAG(TOKEN_PLUS_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_PLUS_PERCENT) \
|
TAG(TOKEN_PLUS_PERCENT) \
|
||||||
TAG(TOKENIZER_TAG_PLUS_PERCENT_EQUAL) \
|
TAG(TOKEN_PLUS_PERCENT_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_PLUS_PIPE) \
|
TAG(TOKEN_PLUS_PIPE) \
|
||||||
TAG(TOKENIZER_TAG_PLUS_PIPE_EQUAL) \
|
TAG(TOKEN_PLUS_PIPE_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_MINUS) \
|
TAG(TOKEN_MINUS) \
|
||||||
TAG(TOKENIZER_TAG_MINUS_EQUAL) \
|
TAG(TOKEN_MINUS_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_MINUS_PERCENT) \
|
TAG(TOKEN_MINUS_PERCENT) \
|
||||||
TAG(TOKENIZER_TAG_MINUS_PERCENT_EQUAL) \
|
TAG(TOKEN_MINUS_PERCENT_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_MINUS_PIPE) \
|
TAG(TOKEN_MINUS_PIPE) \
|
||||||
TAG(TOKENIZER_TAG_MINUS_PIPE_EQUAL) \
|
TAG(TOKEN_MINUS_PIPE_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_ASTERISK) \
|
TAG(TOKEN_ASTERISK) \
|
||||||
TAG(TOKENIZER_TAG_ASTERISK_EQUAL) \
|
TAG(TOKEN_ASTERISK_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_ASTERISK_ASTERISK) \
|
TAG(TOKEN_ASTERISK_ASTERISK) \
|
||||||
TAG(TOKENIZER_TAG_ASTERISK_PERCENT) \
|
TAG(TOKEN_ASTERISK_PERCENT) \
|
||||||
TAG(TOKENIZER_TAG_ASTERISK_PERCENT_EQUAL) \
|
TAG(TOKEN_ASTERISK_PERCENT_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_ASTERISK_PIPE) \
|
TAG(TOKEN_ASTERISK_PIPE) \
|
||||||
TAG(TOKENIZER_TAG_ASTERISK_PIPE_EQUAL) \
|
TAG(TOKEN_ASTERISK_PIPE_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_ARROW) \
|
TAG(TOKEN_ARROW) \
|
||||||
TAG(TOKENIZER_TAG_COLON) \
|
TAG(TOKEN_COLON) \
|
||||||
TAG(TOKENIZER_TAG_SLASH) \
|
TAG(TOKEN_SLASH) \
|
||||||
TAG(TOKENIZER_TAG_SLASH_EQUAL) \
|
TAG(TOKEN_SLASH_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_COMMA) \
|
TAG(TOKEN_COMMA) \
|
||||||
TAG(TOKENIZER_TAG_AMPERSAND) \
|
TAG(TOKEN_AMPERSAND) \
|
||||||
TAG(TOKENIZER_TAG_AMPERSAND_EQUAL) \
|
TAG(TOKEN_AMPERSAND_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_QUESTION_MARK) \
|
TAG(TOKEN_QUESTION_MARK) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_LEFT) \
|
TAG(TOKEN_ANGLE_BRACKET_LEFT) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_LEFT_EQUAL) \
|
TAG(TOKEN_ANGLE_BRACKET_LEFT_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT) \
|
TAG(TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_EQUAL) \
|
TAG(TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE) \
|
TAG(TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE_EQUAL) \
|
TAG(TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_RIGHT) \
|
TAG(TOKEN_ANGLE_BRACKET_RIGHT) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_RIGHT_EQUAL) \
|
TAG(TOKEN_ANGLE_BRACKET_RIGHT_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT) \
|
TAG(TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT) \
|
||||||
TAG(TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT_EQUAL) \
|
TAG(TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT_EQUAL) \
|
||||||
TAG(TOKENIZER_TAG_TILDE) \
|
TAG(TOKEN_TILDE) \
|
||||||
TAG(TOKENIZER_TAG_NUMBER_LITERAL) \
|
TAG(TOKEN_NUMBER_LITERAL) \
|
||||||
TAG(TOKENIZER_TAG_DOC_COMMENT) \
|
TAG(TOKEN_DOC_COMMENT) \
|
||||||
TAG(TOKENIZER_TAG_CONTAINER_DOC_COMMENT) \
|
TAG(TOKEN_CONTAINER_DOC_COMMENT) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ADDRSPACE) \
|
TAG(TOKEN_KEYWORD_ADDRSPACE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ALIGN) \
|
TAG(TOKEN_KEYWORD_ALIGN) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ALLOWZERO) \
|
TAG(TOKEN_KEYWORD_ALLOWZERO) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_AND) \
|
TAG(TOKEN_KEYWORD_AND) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ANYFRAME) \
|
TAG(TOKEN_KEYWORD_ANYFRAME) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ANYTYPE) \
|
TAG(TOKEN_KEYWORD_ANYTYPE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ASM) \
|
TAG(TOKEN_KEYWORD_ASM) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ASYNC) \
|
TAG(TOKEN_KEYWORD_ASYNC) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_AWAIT) \
|
TAG(TOKEN_KEYWORD_AWAIT) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_BREAK) \
|
TAG(TOKEN_KEYWORD_BREAK) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_CALLCONV) \
|
TAG(TOKEN_KEYWORD_CALLCONV) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_CATCH) \
|
TAG(TOKEN_KEYWORD_CATCH) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_COMPTIME) \
|
TAG(TOKEN_KEYWORD_COMPTIME) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_CONST) \
|
TAG(TOKEN_KEYWORD_CONST) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_CONTINUE) \
|
TAG(TOKEN_KEYWORD_CONTINUE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_DEFER) \
|
TAG(TOKEN_KEYWORD_DEFER) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ELSE) \
|
TAG(TOKEN_KEYWORD_ELSE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ENUM) \
|
TAG(TOKEN_KEYWORD_ENUM) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ERRDEFER) \
|
TAG(TOKEN_KEYWORD_ERRDEFER) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ERROR) \
|
TAG(TOKEN_KEYWORD_ERROR) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_EXPORT) \
|
TAG(TOKEN_KEYWORD_EXPORT) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_EXTERN) \
|
TAG(TOKEN_KEYWORD_EXTERN) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_FN) \
|
TAG(TOKEN_KEYWORD_FN) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_FOR) \
|
TAG(TOKEN_KEYWORD_FOR) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_IF) \
|
TAG(TOKEN_KEYWORD_IF) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_INLINE) \
|
TAG(TOKEN_KEYWORD_INLINE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_NOALIAS) \
|
TAG(TOKEN_KEYWORD_NOALIAS) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_NOINLINE) \
|
TAG(TOKEN_KEYWORD_NOINLINE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_NOSUSPEND) \
|
TAG(TOKEN_KEYWORD_NOSUSPEND) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_OPAQUE) \
|
TAG(TOKEN_KEYWORD_OPAQUE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_OR) \
|
TAG(TOKEN_KEYWORD_OR) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_ORELSE) \
|
TAG(TOKEN_KEYWORD_ORELSE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_PACKED) \
|
TAG(TOKEN_KEYWORD_PACKED) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_PUB) \
|
TAG(TOKEN_KEYWORD_PUB) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_RESUME) \
|
TAG(TOKEN_KEYWORD_RESUME) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_RETURN) \
|
TAG(TOKEN_KEYWORD_RETURN) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_LINKSECTION) \
|
TAG(TOKEN_KEYWORD_LINKSECTION) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_STRUCT) \
|
TAG(TOKEN_KEYWORD_STRUCT) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_SUSPEND) \
|
TAG(TOKEN_KEYWORD_SUSPEND) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_SWITCH) \
|
TAG(TOKEN_KEYWORD_SWITCH) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_TEST) \
|
TAG(TOKEN_KEYWORD_TEST) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_THREADLOCAL) \
|
TAG(TOKEN_KEYWORD_THREADLOCAL) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_TRY) \
|
TAG(TOKEN_KEYWORD_TRY) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_UNION) \
|
TAG(TOKEN_KEYWORD_UNION) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_UNREACHABLE) \
|
TAG(TOKEN_KEYWORD_UNREACHABLE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_USINGNAMESPACE) \
|
TAG(TOKEN_KEYWORD_USINGNAMESPACE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_VAR) \
|
TAG(TOKEN_KEYWORD_VAR) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_VOLATILE) \
|
TAG(TOKEN_KEYWORD_VOLATILE) \
|
||||||
TAG(TOKENIZER_TAG_KEYWORD_WHILE)
|
TAG(TOKEN_KEYWORD_WHILE)
|
||||||
|
|
||||||
#define TOKENIZER_GENERATE_ENUM(ENUM) ENUM,
|
#define TOKENIZER_GENERATE_ENUM(ENUM) ENUM,
|
||||||
#define TOKENIZER_GENERATE_CASE(ENUM) \
|
#define TOKENIZER_GENERATE_CASE(ENUM) \
|
||||||
|
|||||||
@@ -10,128 +10,128 @@ const c = @cImport({
|
|||||||
|
|
||||||
fn zigToken(token: c_uint) Token.Tag {
|
fn zigToken(token: c_uint) Token.Tag {
|
||||||
return switch (token) {
|
return switch (token) {
|
||||||
c.TOKENIZER_TAG_INVALID => .invalid,
|
c.TOKEN_INVALID => .invalid,
|
||||||
c.TOKENIZER_TAG_INVALID_PERIODASTERISKS => .invalid_periodasterisks,
|
c.TOKEN_INVALID_PERIODASTERISKS => .invalid_periodasterisks,
|
||||||
c.TOKENIZER_TAG_IDENTIFIER => .identifier,
|
c.TOKEN_IDENTIFIER => .identifier,
|
||||||
c.TOKENIZER_TAG_STRING_LITERAL => .string_literal,
|
c.TOKEN_STRING_LITERAL => .string_literal,
|
||||||
c.TOKENIZER_TAG_MULTILINE_STRING_LITERAL_LINE => .multiline_string_literal_line,
|
c.TOKEN_MULTILINE_STRING_LITERAL_LINE => .multiline_string_literal_line,
|
||||||
c.TOKENIZER_TAG_CHAR_LITERAL => .char_literal,
|
c.TOKEN_CHAR_LITERAL => .char_literal,
|
||||||
c.TOKENIZER_TAG_EOF => .eof,
|
c.TOKEN_EOF => .eof,
|
||||||
c.TOKENIZER_TAG_BUILTIN => .builtin,
|
c.TOKEN_BUILTIN => .builtin,
|
||||||
c.TOKENIZER_TAG_BANG => .bang,
|
c.TOKEN_BANG => .bang,
|
||||||
c.TOKENIZER_TAG_PIPE => .pipe,
|
c.TOKEN_PIPE => .pipe,
|
||||||
c.TOKENIZER_TAG_PIPE_PIPE => .pipe_pipe,
|
c.TOKEN_PIPE_PIPE => .pipe_pipe,
|
||||||
c.TOKENIZER_TAG_PIPE_EQUAL => .pipe_equal,
|
c.TOKEN_PIPE_EQUAL => .pipe_equal,
|
||||||
c.TOKENIZER_TAG_EQUAL => .equal,
|
c.TOKEN_EQUAL => .equal,
|
||||||
c.TOKENIZER_TAG_EQUAL_EQUAL => .equal_equal,
|
c.TOKEN_EQUAL_EQUAL => .equal_equal,
|
||||||
c.TOKENIZER_TAG_EQUAL_ANGLE_BRACKET_RIGHT => .equal_angle_bracket_right,
|
c.TOKEN_EQUAL_ANGLE_BRACKET_RIGHT => .equal_angle_bracket_right,
|
||||||
c.TOKENIZER_TAG_BANG_EQUAL => .bang_equal,
|
c.TOKEN_BANG_EQUAL => .bang_equal,
|
||||||
c.TOKENIZER_TAG_L_PAREN => .l_paren,
|
c.TOKEN_L_PAREN => .l_paren,
|
||||||
c.TOKENIZER_TAG_R_PAREN => .r_paren,
|
c.TOKEN_R_PAREN => .r_paren,
|
||||||
c.TOKENIZER_TAG_SEMICOLON => .semicolon,
|
c.TOKEN_SEMICOLON => .semicolon,
|
||||||
c.TOKENIZER_TAG_PERCENT => .percent,
|
c.TOKEN_PERCENT => .percent,
|
||||||
c.TOKENIZER_TAG_PERCENT_EQUAL => .percent_equal,
|
c.TOKEN_PERCENT_EQUAL => .percent_equal,
|
||||||
c.TOKENIZER_TAG_L_BRACE => .l_brace,
|
c.TOKEN_L_BRACE => .l_brace,
|
||||||
c.TOKENIZER_TAG_R_BRACE => .r_brace,
|
c.TOKEN_R_BRACE => .r_brace,
|
||||||
c.TOKENIZER_TAG_L_BRACKET => .l_bracket,
|
c.TOKEN_L_BRACKET => .l_bracket,
|
||||||
c.TOKENIZER_TAG_R_BRACKET => .r_bracket,
|
c.TOKEN_R_BRACKET => .r_bracket,
|
||||||
c.TOKENIZER_TAG_PERIOD => .period,
|
c.TOKEN_PERIOD => .period,
|
||||||
c.TOKENIZER_TAG_PERIOD_ASTERISK => .period_asterisk,
|
c.TOKEN_PERIOD_ASTERISK => .period_asterisk,
|
||||||
c.TOKENIZER_TAG_ELLIPSIS2 => .ellipsis2,
|
c.TOKEN_ELLIPSIS2 => .ellipsis2,
|
||||||
c.TOKENIZER_TAG_ELLIPSIS3 => .ellipsis3,
|
c.TOKEN_ELLIPSIS3 => .ellipsis3,
|
||||||
c.TOKENIZER_TAG_CARET => .caret,
|
c.TOKEN_CARET => .caret,
|
||||||
c.TOKENIZER_TAG_CARET_EQUAL => .caret_equal,
|
c.TOKEN_CARET_EQUAL => .caret_equal,
|
||||||
c.TOKENIZER_TAG_PLUS => .plus,
|
c.TOKEN_PLUS => .plus,
|
||||||
c.TOKENIZER_TAG_PLUS_PLUS => .plus_plus,
|
c.TOKEN_PLUS_PLUS => .plus_plus,
|
||||||
c.TOKENIZER_TAG_PLUS_EQUAL => .plus_equal,
|
c.TOKEN_PLUS_EQUAL => .plus_equal,
|
||||||
c.TOKENIZER_TAG_PLUS_PERCENT => .plus_percent,
|
c.TOKEN_PLUS_PERCENT => .plus_percent,
|
||||||
c.TOKENIZER_TAG_PLUS_PERCENT_EQUAL => .plus_percent_equal,
|
c.TOKEN_PLUS_PERCENT_EQUAL => .plus_percent_equal,
|
||||||
c.TOKENIZER_TAG_PLUS_PIPE => .plus_pipe,
|
c.TOKEN_PLUS_PIPE => .plus_pipe,
|
||||||
c.TOKENIZER_TAG_PLUS_PIPE_EQUAL => .plus_pipe_equal,
|
c.TOKEN_PLUS_PIPE_EQUAL => .plus_pipe_equal,
|
||||||
c.TOKENIZER_TAG_MINUS => .minus,
|
c.TOKEN_MINUS => .minus,
|
||||||
c.TOKENIZER_TAG_MINUS_EQUAL => .minus_equal,
|
c.TOKEN_MINUS_EQUAL => .minus_equal,
|
||||||
c.TOKENIZER_TAG_MINUS_PERCENT => .minus_percent,
|
c.TOKEN_MINUS_PERCENT => .minus_percent,
|
||||||
c.TOKENIZER_TAG_MINUS_PERCENT_EQUAL => .minus_percent_equal,
|
c.TOKEN_MINUS_PERCENT_EQUAL => .minus_percent_equal,
|
||||||
c.TOKENIZER_TAG_MINUS_PIPE => .minus_pipe,
|
c.TOKEN_MINUS_PIPE => .minus_pipe,
|
||||||
c.TOKENIZER_TAG_MINUS_PIPE_EQUAL => .minus_pipe_equal,
|
c.TOKEN_MINUS_PIPE_EQUAL => .minus_pipe_equal,
|
||||||
c.TOKENIZER_TAG_ASTERISK => .asterisk,
|
c.TOKEN_ASTERISK => .asterisk,
|
||||||
c.TOKENIZER_TAG_ASTERISK_EQUAL => .asterisk_equal,
|
c.TOKEN_ASTERISK_EQUAL => .asterisk_equal,
|
||||||
c.TOKENIZER_TAG_ASTERISK_ASTERISK => .asterisk_asterisk,
|
c.TOKEN_ASTERISK_ASTERISK => .asterisk_asterisk,
|
||||||
c.TOKENIZER_TAG_ASTERISK_PERCENT => .asterisk_percent,
|
c.TOKEN_ASTERISK_PERCENT => .asterisk_percent,
|
||||||
c.TOKENIZER_TAG_ASTERISK_PERCENT_EQUAL => .asterisk_percent_equal,
|
c.TOKEN_ASTERISK_PERCENT_EQUAL => .asterisk_percent_equal,
|
||||||
c.TOKENIZER_TAG_ASTERISK_PIPE => .asterisk_pipe,
|
c.TOKEN_ASTERISK_PIPE => .asterisk_pipe,
|
||||||
c.TOKENIZER_TAG_ASTERISK_PIPE_EQUAL => .asterisk_pipe_equal,
|
c.TOKEN_ASTERISK_PIPE_EQUAL => .asterisk_pipe_equal,
|
||||||
c.TOKENIZER_TAG_ARROW => .arrow,
|
c.TOKEN_ARROW => .arrow,
|
||||||
c.TOKENIZER_TAG_COLON => .colon,
|
c.TOKEN_COLON => .colon,
|
||||||
c.TOKENIZER_TAG_SLASH => .slash,
|
c.TOKEN_SLASH => .slash,
|
||||||
c.TOKENIZER_TAG_SLASH_EQUAL => .slash_equal,
|
c.TOKEN_SLASH_EQUAL => .slash_equal,
|
||||||
c.TOKENIZER_TAG_COMMA => .comma,
|
c.TOKEN_COMMA => .comma,
|
||||||
c.TOKENIZER_TAG_AMPERSAND => .ampersand,
|
c.TOKEN_AMPERSAND => .ampersand,
|
||||||
c.TOKENIZER_TAG_AMPERSAND_EQUAL => .ampersand_equal,
|
c.TOKEN_AMPERSAND_EQUAL => .ampersand_equal,
|
||||||
c.TOKENIZER_TAG_QUESTION_MARK => .question_mark,
|
c.TOKEN_QUESTION_MARK => .question_mark,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_LEFT => .angle_bracket_left,
|
c.TOKEN_ANGLE_BRACKET_LEFT => .angle_bracket_left,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_LEFT_EQUAL => .angle_bracket_left_equal,
|
c.TOKEN_ANGLE_BRACKET_LEFT_EQUAL => .angle_bracket_left_equal,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT => .angle_bracket_angle_bracket_left,
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT => .angle_bracket_angle_bracket_left,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_EQUAL => .angle_bracket_angle_bracket_left_equal,
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_EQUAL => .angle_bracket_angle_bracket_left_equal,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE => .angle_bracket_angle_bracket_left_pipe,
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE => .angle_bracket_angle_bracket_left_pipe,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE_EQUAL => .angle_bracket_angle_bracket_left_pipe_equal,
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE_EQUAL => .angle_bracket_angle_bracket_left_pipe_equal,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_RIGHT => .angle_bracket_right,
|
c.TOKEN_ANGLE_BRACKET_RIGHT => .angle_bracket_right,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_RIGHT_EQUAL => .angle_bracket_right_equal,
|
c.TOKEN_ANGLE_BRACKET_RIGHT_EQUAL => .angle_bracket_right_equal,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT => .angle_bracket_angle_bracket_right,
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT => .angle_bracket_angle_bracket_right,
|
||||||
c.TOKENIZER_TAG_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT_EQUAL => .angle_bracket_angle_bracket_right_equal,
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT_EQUAL => .angle_bracket_angle_bracket_right_equal,
|
||||||
c.TOKENIZER_TAG_TILDE => .tilde,
|
c.TOKEN_TILDE => .tilde,
|
||||||
c.TOKENIZER_TAG_NUMBER_LITERAL => .number_literal,
|
c.TOKEN_NUMBER_LITERAL => .number_literal,
|
||||||
c.TOKENIZER_TAG_DOC_COMMENT => .doc_comment,
|
c.TOKEN_DOC_COMMENT => .doc_comment,
|
||||||
c.TOKENIZER_TAG_CONTAINER_DOC_COMMENT => .container_doc_comment,
|
c.TOKEN_CONTAINER_DOC_COMMENT => .container_doc_comment,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ADDRSPACE => .keyword_addrspace,
|
c.TOKEN_KEYWORD_ADDRSPACE => .keyword_addrspace,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ALIGN => .keyword_align,
|
c.TOKEN_KEYWORD_ALIGN => .keyword_align,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ALLOWZERO => .keyword_allowzero,
|
c.TOKEN_KEYWORD_ALLOWZERO => .keyword_allowzero,
|
||||||
c.TOKENIZER_TAG_KEYWORD_AND => .keyword_and,
|
c.TOKEN_KEYWORD_AND => .keyword_and,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ANYFRAME => .keyword_anyframe,
|
c.TOKEN_KEYWORD_ANYFRAME => .keyword_anyframe,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ANYTYPE => .keyword_anytype,
|
c.TOKEN_KEYWORD_ANYTYPE => .keyword_anytype,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ASM => .keyword_asm,
|
c.TOKEN_KEYWORD_ASM => .keyword_asm,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ASYNC => .keyword_async,
|
c.TOKEN_KEYWORD_ASYNC => .keyword_async,
|
||||||
c.TOKENIZER_TAG_KEYWORD_AWAIT => .keyword_await,
|
c.TOKEN_KEYWORD_AWAIT => .keyword_await,
|
||||||
c.TOKENIZER_TAG_KEYWORD_BREAK => .keyword_break,
|
c.TOKEN_KEYWORD_BREAK => .keyword_break,
|
||||||
c.TOKENIZER_TAG_KEYWORD_CALLCONV => .keyword_callconv,
|
c.TOKEN_KEYWORD_CALLCONV => .keyword_callconv,
|
||||||
c.TOKENIZER_TAG_KEYWORD_CATCH => .keyword_catch,
|
c.TOKEN_KEYWORD_CATCH => .keyword_catch,
|
||||||
c.TOKENIZER_TAG_KEYWORD_COMPTIME => .keyword_comptime,
|
c.TOKEN_KEYWORD_COMPTIME => .keyword_comptime,
|
||||||
c.TOKENIZER_TAG_KEYWORD_CONST => .keyword_const,
|
c.TOKEN_KEYWORD_CONST => .keyword_const,
|
||||||
c.TOKENIZER_TAG_KEYWORD_CONTINUE => .keyword_continue,
|
c.TOKEN_KEYWORD_CONTINUE => .keyword_continue,
|
||||||
c.TOKENIZER_TAG_KEYWORD_DEFER => .keyword_defer,
|
c.TOKEN_KEYWORD_DEFER => .keyword_defer,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ELSE => .keyword_else,
|
c.TOKEN_KEYWORD_ELSE => .keyword_else,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ENUM => .keyword_enum,
|
c.TOKEN_KEYWORD_ENUM => .keyword_enum,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ERRDEFER => .keyword_errdefer,
|
c.TOKEN_KEYWORD_ERRDEFER => .keyword_errdefer,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ERROR => .keyword_error,
|
c.TOKEN_KEYWORD_ERROR => .keyword_error,
|
||||||
c.TOKENIZER_TAG_KEYWORD_EXPORT => .keyword_export,
|
c.TOKEN_KEYWORD_EXPORT => .keyword_export,
|
||||||
c.TOKENIZER_TAG_KEYWORD_EXTERN => .keyword_extern,
|
c.TOKEN_KEYWORD_EXTERN => .keyword_extern,
|
||||||
c.TOKENIZER_TAG_KEYWORD_FN => .keyword_fn,
|
c.TOKEN_KEYWORD_FN => .keyword_fn,
|
||||||
c.TOKENIZER_TAG_KEYWORD_FOR => .keyword_for,
|
c.TOKEN_KEYWORD_FOR => .keyword_for,
|
||||||
c.TOKENIZER_TAG_KEYWORD_IF => .keyword_if,
|
c.TOKEN_KEYWORD_IF => .keyword_if,
|
||||||
c.TOKENIZER_TAG_KEYWORD_INLINE => .keyword_inline,
|
c.TOKEN_KEYWORD_INLINE => .keyword_inline,
|
||||||
c.TOKENIZER_TAG_KEYWORD_NOALIAS => .keyword_noalias,
|
c.TOKEN_KEYWORD_NOALIAS => .keyword_noalias,
|
||||||
c.TOKENIZER_TAG_KEYWORD_NOINLINE => .keyword_noinline,
|
c.TOKEN_KEYWORD_NOINLINE => .keyword_noinline,
|
||||||
c.TOKENIZER_TAG_KEYWORD_NOSUSPEND => .keyword_nosuspend,
|
c.TOKEN_KEYWORD_NOSUSPEND => .keyword_nosuspend,
|
||||||
c.TOKENIZER_TAG_KEYWORD_OPAQUE => .keyword_opaque,
|
c.TOKEN_KEYWORD_OPAQUE => .keyword_opaque,
|
||||||
c.TOKENIZER_TAG_KEYWORD_OR => .keyword_or,
|
c.TOKEN_KEYWORD_OR => .keyword_or,
|
||||||
c.TOKENIZER_TAG_KEYWORD_ORELSE => .keyword_orelse,
|
c.TOKEN_KEYWORD_ORELSE => .keyword_orelse,
|
||||||
c.TOKENIZER_TAG_KEYWORD_PACKED => .keyword_packed,
|
c.TOKEN_KEYWORD_PACKED => .keyword_packed,
|
||||||
c.TOKENIZER_TAG_KEYWORD_PUB => .keyword_pub,
|
c.TOKEN_KEYWORD_PUB => .keyword_pub,
|
||||||
c.TOKENIZER_TAG_KEYWORD_RESUME => .keyword_resume,
|
c.TOKEN_KEYWORD_RESUME => .keyword_resume,
|
||||||
c.TOKENIZER_TAG_KEYWORD_RETURN => .keyword_return,
|
c.TOKEN_KEYWORD_RETURN => .keyword_return,
|
||||||
c.TOKENIZER_TAG_KEYWORD_LINKSECTION => .keyword_linksection,
|
c.TOKEN_KEYWORD_LINKSECTION => .keyword_linksection,
|
||||||
c.TOKENIZER_TAG_KEYWORD_STRUCT => .keyword_struct,
|
c.TOKEN_KEYWORD_STRUCT => .keyword_struct,
|
||||||
c.TOKENIZER_TAG_KEYWORD_SUSPEND => .keyword_suspend,
|
c.TOKEN_KEYWORD_SUSPEND => .keyword_suspend,
|
||||||
c.TOKENIZER_TAG_KEYWORD_SWITCH => .keyword_switch,
|
c.TOKEN_KEYWORD_SWITCH => .keyword_switch,
|
||||||
c.TOKENIZER_TAG_KEYWORD_TEST => .keyword_test,
|
c.TOKEN_KEYWORD_TEST => .keyword_test,
|
||||||
c.TOKENIZER_TAG_KEYWORD_THREADLOCAL => .keyword_threadlocal,
|
c.TOKEN_KEYWORD_THREADLOCAL => .keyword_threadlocal,
|
||||||
c.TOKENIZER_TAG_KEYWORD_TRY => .keyword_try,
|
c.TOKEN_KEYWORD_TRY => .keyword_try,
|
||||||
c.TOKENIZER_TAG_KEYWORD_UNION => .keyword_union,
|
c.TOKEN_KEYWORD_UNION => .keyword_union,
|
||||||
c.TOKENIZER_TAG_KEYWORD_UNREACHABLE => .keyword_unreachable,
|
c.TOKEN_KEYWORD_UNREACHABLE => .keyword_unreachable,
|
||||||
c.TOKENIZER_TAG_KEYWORD_USINGNAMESPACE => .keyword_usingnamespace,
|
c.TOKEN_KEYWORD_USINGNAMESPACE => .keyword_usingnamespace,
|
||||||
c.TOKENIZER_TAG_KEYWORD_VAR => .keyword_var,
|
c.TOKEN_KEYWORD_VAR => .keyword_var,
|
||||||
c.TOKENIZER_TAG_KEYWORD_VOLATILE => .keyword_volatile,
|
c.TOKEN_KEYWORD_VOLATILE => .keyword_volatile,
|
||||||
c.TOKENIZER_TAG_KEYWORD_WHILE => .keyword_while,
|
c.TOKEN_KEYWORD_WHILE => .keyword_while,
|
||||||
else => undefined,
|
else => undefined,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user