780 lines
29 KiB
Zig
780 lines
29 KiB
Zig
const std = @import("std");
|
|
const testing = std.testing;
|
|
|
|
const Token = std.zig.Token;
|
|
const Tokenizer = std.zig.Tokenizer;
|
|
|
|
const c = @cImport({
|
|
@cInclude("tokenizer.h");
|
|
});
|
|
|
|
pub fn zigToken(token: c_uint) Token.Tag {
|
|
return switch (token) {
|
|
c.TOKEN_INVALID => .invalid,
|
|
c.TOKEN_INVALID_PERIODASTERISKS => .invalid_periodasterisks,
|
|
c.TOKEN_IDENTIFIER => .identifier,
|
|
c.TOKEN_STRING_LITERAL => .string_literal,
|
|
c.TOKEN_MULTILINE_STRING_LITERAL_LINE => .multiline_string_literal_line,
|
|
c.TOKEN_CHAR_LITERAL => .char_literal,
|
|
c.TOKEN_EOF => .eof,
|
|
c.TOKEN_BUILTIN => .builtin,
|
|
c.TOKEN_BANG => .bang,
|
|
c.TOKEN_PIPE => .pipe,
|
|
c.TOKEN_PIPE_PIPE => .pipe_pipe,
|
|
c.TOKEN_PIPE_EQUAL => .pipe_equal,
|
|
c.TOKEN_EQUAL => .equal,
|
|
c.TOKEN_EQUAL_EQUAL => .equal_equal,
|
|
c.TOKEN_EQUAL_ANGLE_BRACKET_RIGHT => .equal_angle_bracket_right,
|
|
c.TOKEN_BANG_EQUAL => .bang_equal,
|
|
c.TOKEN_L_PAREN => .l_paren,
|
|
c.TOKEN_R_PAREN => .r_paren,
|
|
c.TOKEN_SEMICOLON => .semicolon,
|
|
c.TOKEN_PERCENT => .percent,
|
|
c.TOKEN_PERCENT_EQUAL => .percent_equal,
|
|
c.TOKEN_L_BRACE => .l_brace,
|
|
c.TOKEN_R_BRACE => .r_brace,
|
|
c.TOKEN_L_BRACKET => .l_bracket,
|
|
c.TOKEN_R_BRACKET => .r_bracket,
|
|
c.TOKEN_PERIOD => .period,
|
|
c.TOKEN_PERIOD_ASTERISK => .period_asterisk,
|
|
c.TOKEN_ELLIPSIS2 => .ellipsis2,
|
|
c.TOKEN_ELLIPSIS3 => .ellipsis3,
|
|
c.TOKEN_CARET => .caret,
|
|
c.TOKEN_CARET_EQUAL => .caret_equal,
|
|
c.TOKEN_PLUS => .plus,
|
|
c.TOKEN_PLUS_PLUS => .plus_plus,
|
|
c.TOKEN_PLUS_EQUAL => .plus_equal,
|
|
c.TOKEN_PLUS_PERCENT => .plus_percent,
|
|
c.TOKEN_PLUS_PERCENT_EQUAL => .plus_percent_equal,
|
|
c.TOKEN_PLUS_PIPE => .plus_pipe,
|
|
c.TOKEN_PLUS_PIPE_EQUAL => .plus_pipe_equal,
|
|
c.TOKEN_MINUS => .minus,
|
|
c.TOKEN_MINUS_EQUAL => .minus_equal,
|
|
c.TOKEN_MINUS_PERCENT => .minus_percent,
|
|
c.TOKEN_MINUS_PERCENT_EQUAL => .minus_percent_equal,
|
|
c.TOKEN_MINUS_PIPE => .minus_pipe,
|
|
c.TOKEN_MINUS_PIPE_EQUAL => .minus_pipe_equal,
|
|
c.TOKEN_ASTERISK => .asterisk,
|
|
c.TOKEN_ASTERISK_EQUAL => .asterisk_equal,
|
|
c.TOKEN_ASTERISK_ASTERISK => .asterisk_asterisk,
|
|
c.TOKEN_ASTERISK_PERCENT => .asterisk_percent,
|
|
c.TOKEN_ASTERISK_PERCENT_EQUAL => .asterisk_percent_equal,
|
|
c.TOKEN_ASTERISK_PIPE => .asterisk_pipe,
|
|
c.TOKEN_ASTERISK_PIPE_EQUAL => .asterisk_pipe_equal,
|
|
c.TOKEN_ARROW => .arrow,
|
|
c.TOKEN_COLON => .colon,
|
|
c.TOKEN_SLASH => .slash,
|
|
c.TOKEN_SLASH_EQUAL => .slash_equal,
|
|
c.TOKEN_COMMA => .comma,
|
|
c.TOKEN_AMPERSAND => .ampersand,
|
|
c.TOKEN_AMPERSAND_EQUAL => .ampersand_equal,
|
|
c.TOKEN_QUESTION_MARK => .question_mark,
|
|
c.TOKEN_ANGLE_BRACKET_LEFT => .angle_bracket_left,
|
|
c.TOKEN_ANGLE_BRACKET_LEFT_EQUAL => .angle_bracket_left_equal,
|
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT => .angle_bracket_angle_bracket_left,
|
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_EQUAL => .angle_bracket_angle_bracket_left_equal,
|
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE => .angle_bracket_angle_bracket_left_pipe,
|
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_LEFT_PIPE_EQUAL => .angle_bracket_angle_bracket_left_pipe_equal,
|
|
c.TOKEN_ANGLE_BRACKET_RIGHT => .angle_bracket_right,
|
|
c.TOKEN_ANGLE_BRACKET_RIGHT_EQUAL => .angle_bracket_right_equal,
|
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT => .angle_bracket_angle_bracket_right,
|
|
c.TOKEN_ANGLE_BRACKET_ANGLE_BRACKET_RIGHT_EQUAL => .angle_bracket_angle_bracket_right_equal,
|
|
c.TOKEN_TILDE => .tilde,
|
|
c.TOKEN_NUMBER_LITERAL => .number_literal,
|
|
c.TOKEN_DOC_COMMENT => .doc_comment,
|
|
c.TOKEN_CONTAINER_DOC_COMMENT => .container_doc_comment,
|
|
c.TOKEN_KEYWORD_ADDRSPACE => .keyword_addrspace,
|
|
c.TOKEN_KEYWORD_ALIGN => .keyword_align,
|
|
c.TOKEN_KEYWORD_ALLOWZERO => .keyword_allowzero,
|
|
c.TOKEN_KEYWORD_AND => .keyword_and,
|
|
c.TOKEN_KEYWORD_ANYFRAME => .keyword_anyframe,
|
|
c.TOKEN_KEYWORD_ANYTYPE => .keyword_anytype,
|
|
c.TOKEN_KEYWORD_ASM => .keyword_asm,
|
|
c.TOKEN_KEYWORD_ASYNC => .keyword_async,
|
|
c.TOKEN_KEYWORD_AWAIT => .keyword_await,
|
|
c.TOKEN_KEYWORD_BREAK => .keyword_break,
|
|
c.TOKEN_KEYWORD_CALLCONV => .keyword_callconv,
|
|
c.TOKEN_KEYWORD_CATCH => .keyword_catch,
|
|
c.TOKEN_KEYWORD_COMPTIME => .keyword_comptime,
|
|
c.TOKEN_KEYWORD_CONST => .keyword_const,
|
|
c.TOKEN_KEYWORD_CONTINUE => .keyword_continue,
|
|
c.TOKEN_KEYWORD_DEFER => .keyword_defer,
|
|
c.TOKEN_KEYWORD_ELSE => .keyword_else,
|
|
c.TOKEN_KEYWORD_ENUM => .keyword_enum,
|
|
c.TOKEN_KEYWORD_ERRDEFER => .keyword_errdefer,
|
|
c.TOKEN_KEYWORD_ERROR => .keyword_error,
|
|
c.TOKEN_KEYWORD_EXPORT => .keyword_export,
|
|
c.TOKEN_KEYWORD_EXTERN => .keyword_extern,
|
|
c.TOKEN_KEYWORD_FN => .keyword_fn,
|
|
c.TOKEN_KEYWORD_FOR => .keyword_for,
|
|
c.TOKEN_KEYWORD_IF => .keyword_if,
|
|
c.TOKEN_KEYWORD_INLINE => .keyword_inline,
|
|
c.TOKEN_KEYWORD_NOALIAS => .keyword_noalias,
|
|
c.TOKEN_KEYWORD_NOINLINE => .keyword_noinline,
|
|
c.TOKEN_KEYWORD_NOSUSPEND => .keyword_nosuspend,
|
|
c.TOKEN_KEYWORD_OPAQUE => .keyword_opaque,
|
|
c.TOKEN_KEYWORD_OR => .keyword_or,
|
|
c.TOKEN_KEYWORD_ORELSE => .keyword_orelse,
|
|
c.TOKEN_KEYWORD_PACKED => .keyword_packed,
|
|
c.TOKEN_KEYWORD_PUB => .keyword_pub,
|
|
c.TOKEN_KEYWORD_RESUME => .keyword_resume,
|
|
c.TOKEN_KEYWORD_RETURN => .keyword_return,
|
|
c.TOKEN_KEYWORD_LINKSECTION => .keyword_linksection,
|
|
c.TOKEN_KEYWORD_STRUCT => .keyword_struct,
|
|
c.TOKEN_KEYWORD_SUSPEND => .keyword_suspend,
|
|
c.TOKEN_KEYWORD_SWITCH => .keyword_switch,
|
|
c.TOKEN_KEYWORD_TEST => .keyword_test,
|
|
c.TOKEN_KEYWORD_THREADLOCAL => .keyword_threadlocal,
|
|
c.TOKEN_KEYWORD_TRY => .keyword_try,
|
|
c.TOKEN_KEYWORD_UNION => .keyword_union,
|
|
c.TOKEN_KEYWORD_UNREACHABLE => .keyword_unreachable,
|
|
c.TOKEN_KEYWORD_USINGNAMESPACE => .keyword_usingnamespace,
|
|
c.TOKEN_KEYWORD_VAR => .keyword_var,
|
|
c.TOKEN_KEYWORD_VOLATILE => .keyword_volatile,
|
|
c.TOKEN_KEYWORD_WHILE => .keyword_while,
|
|
else => undefined,
|
|
};
|
|
}
|
|
|
|
// Copy-pasted from lib/std/zig/tokenizer.zig
|
|
fn testTokenize(source: [:0]const u8, expected_token_tags: []const Token.Tag) !void {
|
|
// Do the C thing
|
|
var ctokenizer = c.tokenizerInit(source.ptr, @intCast(source.len));
|
|
for (expected_token_tags) |expected_token_tag| {
|
|
const token = c.tokenizerNext(&ctokenizer);
|
|
try std.testing.expectEqual(expected_token_tag, zigToken(token.tag));
|
|
}
|
|
const last_token = c.tokenizerNext(&ctokenizer);
|
|
try std.testing.expectEqual(Token.Tag.eof, zigToken(last_token.tag));
|
|
|
|
// uncomment when Zig source and compiler get in sync (e.g. with 0.14)
|
|
//var tokenizer = Tokenizer.init(source);
|
|
//for (expected_token_tags) |expected_token_tag| {
|
|
// const token = tokenizer.next();
|
|
// try std.testing.expectEqual(expected_token_tag, token.tag);
|
|
//}
|
|
//// Last token should always be eof, even when the last token was invalid,
|
|
//// in which case the tokenizer is in an invalid state, which can only be
|
|
//// recovered by opinionated means outside the scope of this implementation.
|
|
//const last_token = tokenizer.next();
|
|
//try std.testing.expectEqual(Token.Tag.eof, last_token.tag);
|
|
try std.testing.expectEqual(source.len, last_token.loc.start);
|
|
try std.testing.expectEqual(source.len, last_token.loc.end);
|
|
}
|
|
|
|
test "keywords" {
|
|
try testTokenize("test const else", &.{ .keyword_test, .keyword_const, .keyword_else });
|
|
}
|
|
|
|
test "parser first test" {
|
|
try testTokenize(
|
|
\\
|
|
\\
|
|
\\// hello
|
|
\\
|
|
\\
|
|
,
|
|
&.{},
|
|
);
|
|
}
|
|
|
|
test "line comment followed by top-level comptime" {
|
|
try testTokenize(
|
|
\\// line comment
|
|
\\comptime {}
|
|
\\
|
|
, &.{
|
|
.keyword_comptime,
|
|
.l_brace,
|
|
.r_brace,
|
|
});
|
|
}
|
|
|
|
test "unknown length pointer and then c pointer" {
|
|
try testTokenize(
|
|
\\[*]u8
|
|
\\[*c]u8
|
|
, &.{
|
|
.l_bracket,
|
|
.asterisk,
|
|
.r_bracket,
|
|
.identifier,
|
|
.l_bracket,
|
|
.asterisk,
|
|
.identifier,
|
|
.r_bracket,
|
|
.identifier,
|
|
});
|
|
}
|
|
|
|
test "code point literal with hex escape" {
|
|
try testTokenize(
|
|
\\'\x1b'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\x1'
|
|
, &.{.char_literal});
|
|
}
|
|
|
|
test "newline in char literal" {
|
|
try testTokenize(
|
|
\\'
|
|
\\'
|
|
, &.{ .invalid, .invalid });
|
|
}
|
|
|
|
test "newline in string literal" {
|
|
try testTokenize(
|
|
\\"
|
|
\\"
|
|
, &.{ .invalid, .invalid });
|
|
}
|
|
|
|
test "code point literal with unicode escapes" {
|
|
// Valid unicode escapes
|
|
try testTokenize(
|
|
\\'\u{3}'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\u{01}'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\u{2a}'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\u{3f9}'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\u{6E09aBc1523}'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\"\u{440}"
|
|
, &.{.string_literal});
|
|
|
|
// Invalid unicode escapes
|
|
try testTokenize(
|
|
\\'\u'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\u{{'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\u{}'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\u{s}'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\u{2z}'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\u{4a'
|
|
, &.{.char_literal});
|
|
|
|
// Test old-style unicode literals
|
|
try testTokenize(
|
|
\\'\u0333'
|
|
, &.{.char_literal});
|
|
try testTokenize(
|
|
\\'\U0333'
|
|
, &.{.char_literal});
|
|
}
|
|
|
|
test "code point literal with unicode code point" {
|
|
try testTokenize(
|
|
\\'💩'
|
|
, &.{.char_literal});
|
|
}
|
|
|
|
test "float literal e exponent" {
|
|
try testTokenize("a = 4.94065645841246544177e-324;\n", &.{
|
|
.identifier,
|
|
.equal,
|
|
.number_literal,
|
|
.semicolon,
|
|
});
|
|
}
|
|
|
|
test "float literal p exponent" {
|
|
try testTokenize("a = 0x1.a827999fcef32p+1022;\n", &.{
|
|
.identifier,
|
|
.equal,
|
|
.number_literal,
|
|
.semicolon,
|
|
});
|
|
}
|
|
|
|
test "chars" {
|
|
try testTokenize("'c'", &.{.char_literal});
|
|
}
|
|
|
|
test "invalid token characters" {
|
|
try testTokenize("#", &.{.invalid});
|
|
try testTokenize("`", &.{.invalid});
|
|
try testTokenize("'c", &.{.invalid});
|
|
try testTokenize("'", &.{.invalid});
|
|
try testTokenize("''", &.{.char_literal});
|
|
try testTokenize("'\n'", &.{ .invalid, .invalid });
|
|
}
|
|
|
|
test "invalid literal/comment characters" {
|
|
try testTokenize("\"\x00\"", &.{.invalid});
|
|
try testTokenize("`\x00`", &.{.invalid});
|
|
try testTokenize("//\x00", &.{.invalid});
|
|
try testTokenize("//\x1f", &.{.invalid});
|
|
try testTokenize("//\x7f", &.{.invalid});
|
|
}
|
|
|
|
test "utf8" {
|
|
try testTokenize("//\xc2\x80", &.{});
|
|
try testTokenize("//\xf4\x8f\xbf\xbf", &.{});
|
|
}
|
|
|
|
test "invalid utf8" {
|
|
try testTokenize("//\x80", &.{});
|
|
try testTokenize("//\xbf", &.{});
|
|
try testTokenize("//\xf8", &.{});
|
|
try testTokenize("//\xff", &.{});
|
|
try testTokenize("//\xc2\xc0", &.{});
|
|
try testTokenize("//\xe0", &.{});
|
|
try testTokenize("//\xf0", &.{});
|
|
try testTokenize("//\xf0\x90\x80\xc0", &.{});
|
|
}
|
|
|
|
test "illegal unicode codepoints" {
|
|
// unicode newline characters.U+0085, U+2028, U+2029
|
|
try testTokenize("//\xc2\x84", &.{});
|
|
try testTokenize("//\xc2\x85", &.{});
|
|
try testTokenize("//\xc2\x86", &.{});
|
|
try testTokenize("//\xe2\x80\xa7", &.{});
|
|
try testTokenize("//\xe2\x80\xa8", &.{});
|
|
try testTokenize("//\xe2\x80\xa9", &.{});
|
|
try testTokenize("//\xe2\x80\xaa", &.{});
|
|
}
|
|
|
|
test "string identifier and builtin fns" {
|
|
try testTokenize(
|
|
\\const @"if" = @import("std");
|
|
, &.{
|
|
.keyword_const,
|
|
.identifier,
|
|
.equal,
|
|
.builtin,
|
|
.l_paren,
|
|
.string_literal,
|
|
.r_paren,
|
|
.semicolon,
|
|
});
|
|
}
|
|
|
|
test "pipe and then invalid" {
|
|
try testTokenize("||=", &.{
|
|
.pipe_pipe,
|
|
.equal,
|
|
});
|
|
}
|
|
|
|
test "line comment and doc comment" {
|
|
try testTokenize("//", &.{});
|
|
try testTokenize("// a / b", &.{});
|
|
try testTokenize("// /", &.{});
|
|
try testTokenize("/// a", &.{.doc_comment});
|
|
try testTokenize("///", &.{.doc_comment});
|
|
try testTokenize("////", &.{});
|
|
try testTokenize("//!", &.{.container_doc_comment});
|
|
try testTokenize("//!!", &.{.container_doc_comment});
|
|
}
|
|
|
|
test "line comment followed by identifier" {
|
|
try testTokenize(
|
|
\\ Unexpected,
|
|
\\ // another
|
|
\\ Another,
|
|
, &.{
|
|
.identifier,
|
|
.comma,
|
|
.identifier,
|
|
.comma,
|
|
});
|
|
}
|
|
|
|
test "UTF-8 BOM is recognized and skipped" {
|
|
try testTokenize("\xEF\xBB\xBFa;\n", &.{
|
|
.identifier,
|
|
.semicolon,
|
|
});
|
|
}
|
|
|
|
test "correctly parse pointer assignment" {
|
|
try testTokenize("b.*=3;\n", &.{
|
|
.identifier,
|
|
.period_asterisk,
|
|
.equal,
|
|
.number_literal,
|
|
.semicolon,
|
|
});
|
|
}
|
|
|
|
test "correctly parse pointer dereference followed by asterisk" {
|
|
try testTokenize("\"b\".* ** 10", &.{
|
|
.string_literal,
|
|
.period_asterisk,
|
|
.asterisk_asterisk,
|
|
.number_literal,
|
|
});
|
|
|
|
try testTokenize("(\"b\".*)** 10", &.{
|
|
.l_paren,
|
|
.string_literal,
|
|
.period_asterisk,
|
|
.r_paren,
|
|
.asterisk_asterisk,
|
|
.number_literal,
|
|
});
|
|
|
|
try testTokenize("\"b\".*** 10", &.{
|
|
.string_literal,
|
|
.invalid_periodasterisks,
|
|
.asterisk_asterisk,
|
|
.number_literal,
|
|
});
|
|
}
|
|
|
|
test "range literals" {
|
|
try testTokenize("0...9", &.{ .number_literal, .ellipsis3, .number_literal });
|
|
try testTokenize("'0'...'9'", &.{ .char_literal, .ellipsis3, .char_literal });
|
|
try testTokenize("0x00...0x09", &.{ .number_literal, .ellipsis3, .number_literal });
|
|
try testTokenize("0b00...0b11", &.{ .number_literal, .ellipsis3, .number_literal });
|
|
try testTokenize("0o00...0o11", &.{ .number_literal, .ellipsis3, .number_literal });
|
|
}
|
|
|
|
test "number literals decimal" {
|
|
try testTokenize("0", &.{.number_literal});
|
|
try testTokenize("1", &.{.number_literal});
|
|
try testTokenize("2", &.{.number_literal});
|
|
try testTokenize("3", &.{.number_literal});
|
|
try testTokenize("4", &.{.number_literal});
|
|
try testTokenize("5", &.{.number_literal});
|
|
try testTokenize("6", &.{.number_literal});
|
|
try testTokenize("7", &.{.number_literal});
|
|
try testTokenize("8", &.{.number_literal});
|
|
try testTokenize("9", &.{.number_literal});
|
|
try testTokenize("1..", &.{ .number_literal, .ellipsis2 });
|
|
try testTokenize("0a", &.{.number_literal});
|
|
try testTokenize("9b", &.{.number_literal});
|
|
try testTokenize("1z", &.{.number_literal});
|
|
try testTokenize("1z_1", &.{.number_literal});
|
|
try testTokenize("9z3", &.{.number_literal});
|
|
|
|
try testTokenize("0_0", &.{.number_literal});
|
|
try testTokenize("0001", &.{.number_literal});
|
|
try testTokenize("01234567890", &.{.number_literal});
|
|
try testTokenize("012_345_6789_0", &.{.number_literal});
|
|
try testTokenize("0_1_2_3_4_5_6_7_8_9_0", &.{.number_literal});
|
|
|
|
try testTokenize("00_", &.{.number_literal});
|
|
try testTokenize("0_0_", &.{.number_literal});
|
|
try testTokenize("0__0", &.{.number_literal});
|
|
try testTokenize("0_0f", &.{.number_literal});
|
|
try testTokenize("0_0_f", &.{.number_literal});
|
|
try testTokenize("0_0_f_00", &.{.number_literal});
|
|
try testTokenize("1_,", &.{ .number_literal, .comma });
|
|
|
|
try testTokenize("0.0", &.{.number_literal});
|
|
try testTokenize("1.0", &.{.number_literal});
|
|
try testTokenize("10.0", &.{.number_literal});
|
|
try testTokenize("0e0", &.{.number_literal});
|
|
try testTokenize("1e0", &.{.number_literal});
|
|
try testTokenize("1e100", &.{.number_literal});
|
|
try testTokenize("1.0e100", &.{.number_literal});
|
|
try testTokenize("1.0e+100", &.{.number_literal});
|
|
try testTokenize("1.0e-100", &.{.number_literal});
|
|
try testTokenize("1_0_0_0.0_0_0_0_0_1e1_0_0_0", &.{.number_literal});
|
|
|
|
try testTokenize("1.", &.{ .number_literal, .period });
|
|
try testTokenize("1e", &.{.number_literal});
|
|
try testTokenize("1.e100", &.{.number_literal});
|
|
try testTokenize("1.0e1f0", &.{.number_literal});
|
|
try testTokenize("1.0p100", &.{.number_literal});
|
|
try testTokenize("1.0p-100", &.{.number_literal});
|
|
try testTokenize("1.0p1f0", &.{.number_literal});
|
|
try testTokenize("1.0_,", &.{ .number_literal, .comma });
|
|
try testTokenize("1_.0", &.{.number_literal});
|
|
try testTokenize("1._", &.{.number_literal});
|
|
try testTokenize("1.a", &.{.number_literal});
|
|
try testTokenize("1.z", &.{.number_literal});
|
|
try testTokenize("1._0", &.{.number_literal});
|
|
try testTokenize("1.+", &.{ .number_literal, .period, .plus });
|
|
try testTokenize("1._+", &.{ .number_literal, .plus });
|
|
try testTokenize("1._e", &.{.number_literal});
|
|
try testTokenize("1.0e", &.{.number_literal});
|
|
try testTokenize("1.0e,", &.{ .number_literal, .comma });
|
|
try testTokenize("1.0e_", &.{.number_literal});
|
|
try testTokenize("1.0e+_", &.{.number_literal});
|
|
try testTokenize("1.0e-_", &.{.number_literal});
|
|
try testTokenize("1.0e0_+", &.{ .number_literal, .plus });
|
|
}
|
|
|
|
test "number literals binary" {
|
|
try testTokenize("0b0", &.{.number_literal});
|
|
try testTokenize("0b1", &.{.number_literal});
|
|
try testTokenize("0b2", &.{.number_literal});
|
|
try testTokenize("0b3", &.{.number_literal});
|
|
try testTokenize("0b4", &.{.number_literal});
|
|
try testTokenize("0b5", &.{.number_literal});
|
|
try testTokenize("0b6", &.{.number_literal});
|
|
try testTokenize("0b7", &.{.number_literal});
|
|
try testTokenize("0b8", &.{.number_literal});
|
|
try testTokenize("0b9", &.{.number_literal});
|
|
try testTokenize("0ba", &.{.number_literal});
|
|
try testTokenize("0bb", &.{.number_literal});
|
|
try testTokenize("0bc", &.{.number_literal});
|
|
try testTokenize("0bd", &.{.number_literal});
|
|
try testTokenize("0be", &.{.number_literal});
|
|
try testTokenize("0bf", &.{.number_literal});
|
|
try testTokenize("0bz", &.{.number_literal});
|
|
|
|
try testTokenize("0b0000_0000", &.{.number_literal});
|
|
try testTokenize("0b1111_1111", &.{.number_literal});
|
|
try testTokenize("0b10_10_10_10", &.{.number_literal});
|
|
try testTokenize("0b0_1_0_1_0_1_0_1", &.{.number_literal});
|
|
try testTokenize("0b1.", &.{ .number_literal, .period });
|
|
try testTokenize("0b1.0", &.{.number_literal});
|
|
|
|
try testTokenize("0B0", &.{.number_literal});
|
|
try testTokenize("0b_", &.{.number_literal});
|
|
try testTokenize("0b_0", &.{.number_literal});
|
|
try testTokenize("0b1_", &.{.number_literal});
|
|
try testTokenize("0b0__1", &.{.number_literal});
|
|
try testTokenize("0b0_1_", &.{.number_literal});
|
|
try testTokenize("0b1e", &.{.number_literal});
|
|
try testTokenize("0b1p", &.{.number_literal});
|
|
try testTokenize("0b1e0", &.{.number_literal});
|
|
try testTokenize("0b1p0", &.{.number_literal});
|
|
try testTokenize("0b1_,", &.{ .number_literal, .comma });
|
|
}
|
|
|
|
test "number literals octal" {
|
|
try testTokenize("0o0", &.{.number_literal});
|
|
try testTokenize("0o1", &.{.number_literal});
|
|
try testTokenize("0o2", &.{.number_literal});
|
|
try testTokenize("0o3", &.{.number_literal});
|
|
try testTokenize("0o4", &.{.number_literal});
|
|
try testTokenize("0o5", &.{.number_literal});
|
|
try testTokenize("0o6", &.{.number_literal});
|
|
try testTokenize("0o7", &.{.number_literal});
|
|
try testTokenize("0o8", &.{.number_literal});
|
|
try testTokenize("0o9", &.{.number_literal});
|
|
try testTokenize("0oa", &.{.number_literal});
|
|
try testTokenize("0ob", &.{.number_literal});
|
|
try testTokenize("0oc", &.{.number_literal});
|
|
try testTokenize("0od", &.{.number_literal});
|
|
try testTokenize("0oe", &.{.number_literal});
|
|
try testTokenize("0of", &.{.number_literal});
|
|
try testTokenize("0oz", &.{.number_literal});
|
|
|
|
try testTokenize("0o01234567", &.{.number_literal});
|
|
try testTokenize("0o0123_4567", &.{.number_literal});
|
|
try testTokenize("0o01_23_45_67", &.{.number_literal});
|
|
try testTokenize("0o0_1_2_3_4_5_6_7", &.{.number_literal});
|
|
try testTokenize("0o7.", &.{ .number_literal, .period });
|
|
try testTokenize("0o7.0", &.{.number_literal});
|
|
|
|
try testTokenize("0O0", &.{.number_literal});
|
|
try testTokenize("0o_", &.{.number_literal});
|
|
try testTokenize("0o_0", &.{.number_literal});
|
|
try testTokenize("0o1_", &.{.number_literal});
|
|
try testTokenize("0o0__1", &.{.number_literal});
|
|
try testTokenize("0o0_1_", &.{.number_literal});
|
|
try testTokenize("0o1e", &.{.number_literal});
|
|
try testTokenize("0o1p", &.{.number_literal});
|
|
try testTokenize("0o1e0", &.{.number_literal});
|
|
try testTokenize("0o1p0", &.{.number_literal});
|
|
try testTokenize("0o_,", &.{ .number_literal, .comma });
|
|
}
|
|
|
|
test "number literals hexadecimal" {
|
|
try testTokenize("0x0", &.{.number_literal});
|
|
try testTokenize("0x1", &.{.number_literal});
|
|
try testTokenize("0x2", &.{.number_literal});
|
|
try testTokenize("0x3", &.{.number_literal});
|
|
try testTokenize("0x4", &.{.number_literal});
|
|
try testTokenize("0x5", &.{.number_literal});
|
|
try testTokenize("0x6", &.{.number_literal});
|
|
try testTokenize("0x7", &.{.number_literal});
|
|
try testTokenize("0x8", &.{.number_literal});
|
|
try testTokenize("0x9", &.{.number_literal});
|
|
try testTokenize("0xa", &.{.number_literal});
|
|
try testTokenize("0xb", &.{.number_literal});
|
|
try testTokenize("0xc", &.{.number_literal});
|
|
try testTokenize("0xd", &.{.number_literal});
|
|
try testTokenize("0xe", &.{.number_literal});
|
|
try testTokenize("0xf", &.{.number_literal});
|
|
try testTokenize("0xA", &.{.number_literal});
|
|
try testTokenize("0xB", &.{.number_literal});
|
|
try testTokenize("0xC", &.{.number_literal});
|
|
try testTokenize("0xD", &.{.number_literal});
|
|
try testTokenize("0xE", &.{.number_literal});
|
|
try testTokenize("0xF", &.{.number_literal});
|
|
try testTokenize("0x0z", &.{.number_literal});
|
|
try testTokenize("0xz", &.{.number_literal});
|
|
|
|
try testTokenize("0x0123456789ABCDEF", &.{.number_literal});
|
|
try testTokenize("0x0123_4567_89AB_CDEF", &.{.number_literal});
|
|
try testTokenize("0x01_23_45_67_89AB_CDE_F", &.{.number_literal});
|
|
try testTokenize("0x0_1_2_3_4_5_6_7_8_9_A_B_C_D_E_F", &.{.number_literal});
|
|
|
|
try testTokenize("0X0", &.{.number_literal});
|
|
try testTokenize("0x_", &.{.number_literal});
|
|
try testTokenize("0x_1", &.{.number_literal});
|
|
try testTokenize("0x1_", &.{.number_literal});
|
|
try testTokenize("0x0__1", &.{.number_literal});
|
|
try testTokenize("0x0_1_", &.{.number_literal});
|
|
try testTokenize("0x_,", &.{ .number_literal, .comma });
|
|
|
|
try testTokenize("0x1.0", &.{.number_literal});
|
|
try testTokenize("0xF.0", &.{.number_literal});
|
|
try testTokenize("0xF.F", &.{.number_literal});
|
|
try testTokenize("0xF.Fp0", &.{.number_literal});
|
|
try testTokenize("0xF.FP0", &.{.number_literal});
|
|
try testTokenize("0x1p0", &.{.number_literal});
|
|
try testTokenize("0xfp0", &.{.number_literal});
|
|
try testTokenize("0x1.0+0xF.0", &.{ .number_literal, .plus, .number_literal });
|
|
|
|
try testTokenize("0x1.", &.{ .number_literal, .period });
|
|
try testTokenize("0xF.", &.{ .number_literal, .period });
|
|
try testTokenize("0x1.+0xF.", &.{ .number_literal, .period, .plus, .number_literal, .period });
|
|
try testTokenize("0xff.p10", &.{.number_literal});
|
|
|
|
try testTokenize("0x0123456.789ABCDEF", &.{.number_literal});
|
|
try testTokenize("0x0_123_456.789_ABC_DEF", &.{.number_literal});
|
|
try testTokenize("0x0_1_2_3_4_5_6.7_8_9_A_B_C_D_E_F", &.{.number_literal});
|
|
try testTokenize("0x0p0", &.{.number_literal});
|
|
try testTokenize("0x0.0p0", &.{.number_literal});
|
|
try testTokenize("0xff.ffp10", &.{.number_literal});
|
|
try testTokenize("0xff.ffP10", &.{.number_literal});
|
|
try testTokenize("0xffp10", &.{.number_literal});
|
|
try testTokenize("0xff_ff.ff_ffp1_0_0_0", &.{.number_literal});
|
|
try testTokenize("0xf_f_f_f.f_f_f_fp+1_000", &.{.number_literal});
|
|
try testTokenize("0xf_f_f_f.f_f_f_fp-1_00_0", &.{.number_literal});
|
|
|
|
try testTokenize("0x1e", &.{.number_literal});
|
|
try testTokenize("0x1e0", &.{.number_literal});
|
|
try testTokenize("0x1p", &.{.number_literal});
|
|
try testTokenize("0xfp0z1", &.{.number_literal});
|
|
try testTokenize("0xff.ffpff", &.{.number_literal});
|
|
try testTokenize("0x0.p", &.{.number_literal});
|
|
try testTokenize("0x0.z", &.{.number_literal});
|
|
try testTokenize("0x0._", &.{.number_literal});
|
|
try testTokenize("0x0_.0", &.{.number_literal});
|
|
try testTokenize("0x0_.0.0", &.{ .number_literal, .period, .number_literal });
|
|
try testTokenize("0x0._0", &.{.number_literal});
|
|
try testTokenize("0x0.0_", &.{.number_literal});
|
|
try testTokenize("0x0_p0", &.{.number_literal});
|
|
try testTokenize("0x0_.p0", &.{.number_literal});
|
|
try testTokenize("0x0._p0", &.{.number_literal});
|
|
try testTokenize("0x0.0_p0", &.{.number_literal});
|
|
try testTokenize("0x0._0p0", &.{.number_literal});
|
|
try testTokenize("0x0.0p_0", &.{.number_literal});
|
|
try testTokenize("0x0.0p+_0", &.{.number_literal});
|
|
try testTokenize("0x0.0p-_0", &.{.number_literal});
|
|
try testTokenize("0x0.0p0_", &.{.number_literal});
|
|
}
|
|
|
|
test "multi line string literal with only 1 backslash" {
|
|
try testTokenize("x \\\n;", &.{ .identifier, .invalid, .semicolon });
|
|
}
|
|
|
|
test "invalid builtin identifiers" {
|
|
try testTokenize("@()", &.{.invalid});
|
|
try testTokenize("@0()", &.{.invalid});
|
|
}
|
|
|
|
test "invalid token with unfinished escape right before eof" {
|
|
try testTokenize("\"\\", &.{.invalid});
|
|
try testTokenize("'\\", &.{.invalid});
|
|
try testTokenize("'\\u", &.{.invalid});
|
|
}
|
|
|
|
test "saturating operators" {
|
|
try testTokenize("<<", &.{.angle_bracket_angle_bracket_left});
|
|
try testTokenize("<<|", &.{.angle_bracket_angle_bracket_left_pipe});
|
|
try testTokenize("<<|=", &.{.angle_bracket_angle_bracket_left_pipe_equal});
|
|
|
|
try testTokenize("*", &.{.asterisk});
|
|
try testTokenize("*|", &.{.asterisk_pipe});
|
|
try testTokenize("*|=", &.{.asterisk_pipe_equal});
|
|
|
|
try testTokenize("+", &.{.plus});
|
|
try testTokenize("+|", &.{.plus_pipe});
|
|
try testTokenize("+|=", &.{.plus_pipe_equal});
|
|
|
|
try testTokenize("-", &.{.minus});
|
|
try testTokenize("-|", &.{.minus_pipe});
|
|
try testTokenize("-|=", &.{.minus_pipe_equal});
|
|
}
|
|
|
|
test "null byte before eof" {
|
|
try testTokenize("123 \x00 456", &.{ .number_literal, .invalid });
|
|
try testTokenize("//\x00", &.{.invalid});
|
|
try testTokenize("\\\\\x00", &.{.invalid});
|
|
try testTokenize("\x00", &.{.invalid});
|
|
try testTokenize("// NUL\x00\n", &.{.invalid});
|
|
try testTokenize("///\x00\n", &.{ .doc_comment, .invalid });
|
|
try testTokenize("/// NUL\x00\n", &.{ .doc_comment, .invalid });
|
|
}
|
|
|
|
test "invalid tabs and carriage returns" {
|
|
// "Inside Line Comments and Documentation Comments, Any TAB is rejected by
|
|
// the grammar since it is ambiguous how it should be rendered."
|
|
// https://github.com/ziglang/zig-spec/issues/38
|
|
try testTokenize("//\t", &.{.invalid});
|
|
try testTokenize("// \t", &.{.invalid});
|
|
try testTokenize("///\t", &.{.invalid});
|
|
try testTokenize("/// \t", &.{.invalid});
|
|
try testTokenize("//!\t", &.{.invalid});
|
|
try testTokenize("//! \t", &.{.invalid});
|
|
|
|
// "Inside Line Comments and Documentation Comments, CR directly preceding
|
|
// NL is unambiguously part of the newline sequence. It is accepted by the
|
|
// grammar and removed by zig fmt, leaving only NL. CR anywhere else is
|
|
// rejected by the grammar."
|
|
// https://github.com/ziglang/zig-spec/issues/38
|
|
try testTokenize("//\r", &.{.invalid});
|
|
try testTokenize("// \r", &.{.invalid});
|
|
try testTokenize("///\r", &.{.invalid});
|
|
try testTokenize("/// \r", &.{.invalid});
|
|
try testTokenize("//\r ", &.{.invalid});
|
|
try testTokenize("// \r ", &.{.invalid});
|
|
try testTokenize("///\r ", &.{.invalid});
|
|
try testTokenize("/// \r ", &.{.invalid});
|
|
try testTokenize("//\r\n", &.{});
|
|
try testTokenize("// \r\n", &.{});
|
|
try testTokenize("///\r\n", &.{.doc_comment});
|
|
try testTokenize("/// \r\n", &.{.doc_comment});
|
|
try testTokenize("//!\r", &.{.invalid});
|
|
try testTokenize("//! \r", &.{.invalid});
|
|
try testTokenize("//!\r ", &.{.invalid});
|
|
try testTokenize("//! \r ", &.{.invalid});
|
|
try testTokenize("//!\r\n", &.{.container_doc_comment});
|
|
try testTokenize("//! \r\n", &.{.container_doc_comment});
|
|
|
|
// The control characters TAB and CR are rejected by the grammar inside multi-line string literals,
|
|
// except if CR is directly before NL.
|
|
// https://github.com/ziglang/zig-spec/issues/38
|
|
try testTokenize("\\\\\r", &.{.invalid});
|
|
try testTokenize("\\\\\r ", &.{.invalid});
|
|
try testTokenize("\\\\ \r", &.{.invalid});
|
|
try testTokenize("\\\\\t", &.{.invalid});
|
|
try testTokenize("\\\\\t ", &.{.invalid});
|
|
try testTokenize("\\\\ \t", &.{.invalid});
|
|
try testTokenize("\\\\\r\n", &.{.multiline_string_literal_line});
|
|
|
|
// "TAB used as whitespace is...accepted by the grammar. CR used as
|
|
// whitespace, whether directly preceding NL or stray, is...accepted by the
|
|
// grammar."
|
|
// https://github.com/ziglang/zig-spec/issues/38
|
|
try testTokenize("\tpub\tswitch\t", &.{ .keyword_pub, .keyword_switch });
|
|
try testTokenize("\rpub\rswitch\r", &.{ .keyword_pub, .keyword_switch });
|
|
}
|