Merge pull request #8449 from ziglang/stage2-enums

stage2: implement simple enums
This commit is contained in:
Andrew Kelley
2021-04-07 22:29:28 -07:00
committed by GitHub
11 changed files with 1843 additions and 2538 deletions

View File

@@ -9,6 +9,7 @@ const warn = std.debug.warn;
const Tokenizer = std.zig.Tokenizer;
const Parser = std.zig.Parser;
const io = std.io;
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
const source = @embedFile("../os.zig");
var fixed_buffer_mem: [10 * 1024 * 1024]u8 = undefined;
@@ -25,12 +26,15 @@ pub fn main() !void {
const end = timer.read();
memory_used /= iterations;
const elapsed_s = @intToFloat(f64, end - start) / std.time.ns_per_s;
const bytes_per_sec = @intToFloat(f64, source.len * iterations) / elapsed_s;
const mb_per_sec = bytes_per_sec / (1024 * 1024);
const bytes_per_sec_float = @intToFloat(f64, source.len * iterations) / elapsed_s;
const bytes_per_sec = @floatToInt(u64, @floor(bytes_per_sec_float));
var stdout_file = std.io.getStdOut();
const stdout = stdout_file.writer();
try stdout.print("{:.3} MiB/s, {} KiB used \n", .{ mb_per_sec, memory_used / 1024 });
try stdout.print("parsing speed: {:.2}/s, {:.2} used \n", .{
fmtIntSizeBin(bytes_per_sec),
fmtIntSizeBin(memory_used),
});
}
fn testOnce() usize {

View File

@@ -28,8 +28,6 @@ const BuiltinFn = @import("BuiltinFn.zig");
instructions: std.MultiArrayList(zir.Inst) = .{},
string_bytes: ArrayListUnmanaged(u8) = .{},
extra: ArrayListUnmanaged(u32) = .{},
decl_map: std.StringArrayHashMapUnmanaged(void) = .{},
decls: ArrayListUnmanaged(*Decl) = .{},
/// The end of special indexes. `zir.Inst.Ref` subtracts against this number to convert
/// to `zir.Inst.Index`. The default here is correct if there are 0 parameters.
ref_start_index: u32 = zir.Inst.Ref.typed_value_map.len,
@@ -110,8 +108,6 @@ pub fn deinit(astgen: *AstGen) void {
astgen.instructions.deinit(gpa);
astgen.extra.deinit(gpa);
astgen.string_bytes.deinit(gpa);
astgen.decl_map.deinit(gpa);
astgen.decls.deinit(gpa);
}
pub const ResultLoc = union(enum) {
@@ -1183,13 +1179,6 @@ fn blockExprStmts(
// in the above while loop.
const zir_tags = gz.astgen.instructions.items(.tag);
switch (zir_tags[inst]) {
.@"const" => {
const tv = gz.astgen.instructions.items(.data)[inst].@"const";
break :b switch (tv.ty.zigTypeTag()) {
.NoReturn, .Void => true,
else => false,
};
},
// For some instructions, swap in a slightly different ZIR tag
// so we can avoid a separate ensure_result_used instruction.
.call_none_chkused => unreachable,
@@ -1257,6 +1246,8 @@ fn blockExprStmts(
.fn_type_cc,
.fn_type_cc_var_args,
.int,
.float,
.float128,
.intcast,
.int_type,
.is_non_null,
@@ -1334,7 +1325,10 @@ fn blockExprStmts(
.struct_decl_extern,
.union_decl,
.enum_decl,
.enum_decl_nonexhaustive,
.opaque_decl,
.int_to_enum,
.enum_to_int,
=> break :b false,
// ZIR instructions that are always either `noreturn` or `void`.
@@ -1490,7 +1484,7 @@ fn varDecl(
init_scope.rl_ptr = try init_scope.addUnNode(.alloc, type_inst, node);
init_scope.rl_ty_inst = type_inst;
} else {
const alloc = try init_scope.addUnNode(.alloc_inferred, undefined, node);
const alloc = try init_scope.addNode(.alloc_inferred, node);
resolve_inferred_alloc = alloc;
init_scope.rl_ptr = alloc;
}
@@ -1565,7 +1559,7 @@ fn varDecl(
const alloc = try gz.addUnNode(.alloc_mut, type_inst, node);
break :a .{ .alloc = alloc, .result_loc = .{ .ptr = alloc } };
} else a: {
const alloc = try gz.addUnNode(.alloc_inferred_mut, undefined, node);
const alloc = try gz.addNode(.alloc_inferred_mut, node);
resolve_inferred_alloc = alloc;
break :a .{ .alloc = alloc, .result_loc = .{ .inferred_ptr = alloc } };
};
@@ -1823,15 +1817,18 @@ fn containerDecl(
defer bit_bag.deinit(gpa);
var cur_bit_bag: u32 = 0;
var member_index: usize = 0;
while (true) {
const member_node = container_decl.ast.members[member_index];
var field_index: usize = 0;
for (container_decl.ast.members) |member_node| {
const member = switch (node_tags[member_node]) {
.container_field_init => tree.containerFieldInit(member_node),
.container_field_align => tree.containerFieldAlign(member_node),
.container_field => tree.containerField(member_node),
else => unreachable,
else => continue,
};
if (field_index % 16 == 0 and field_index != 0) {
try bit_bag.append(gpa, cur_bit_bag);
cur_bit_bag = 0;
}
if (member.comptime_token) |comptime_token| {
return mod.failTok(scope, comptime_token, "TODO implement comptime struct fields", .{});
}
@@ -1858,17 +1855,9 @@ fn containerDecl(
fields_data.appendAssumeCapacity(@enumToInt(default_inst));
}
member_index += 1;
if (member_index < container_decl.ast.members.len) {
if (member_index % 16 == 0) {
try bit_bag.append(gpa, cur_bit_bag);
cur_bit_bag = 0;
}
} else {
break;
}
field_index += 1;
}
const empty_slot_count = 16 - ((member_index - 1) % 16);
const empty_slot_count = 16 - (field_index % 16);
cur_bit_bag >>= @intCast(u5, empty_slot_count * 2);
const result = try gz.addPlNode(tag, node, zir.Inst.StructDecl{
@@ -1885,7 +1874,172 @@ fn containerDecl(
return mod.failTok(scope, container_decl.ast.main_token, "TODO AstGen for union decl", .{});
},
.keyword_enum => {
return mod.failTok(scope, container_decl.ast.main_token, "TODO AstGen for enum decl", .{});
if (container_decl.layout_token) |t| {
return mod.failTok(scope, t, "enums do not support 'packed' or 'extern'; instead provide an explicit integer tag type", .{});
}
// Count total fields as well as how many have explicitly provided tag values.
const counts = blk: {
var values: usize = 0;
var total_fields: usize = 0;
var decls: usize = 0;
var nonexhaustive_node: ast.Node.Index = 0;
for (container_decl.ast.members) |member_node| {
const member = switch (node_tags[member_node]) {
.container_field_init => tree.containerFieldInit(member_node),
.container_field_align => tree.containerFieldAlign(member_node),
.container_field => tree.containerField(member_node),
else => {
decls += 1;
continue;
},
};
if (member.comptime_token) |comptime_token| {
return mod.failTok(scope, comptime_token, "enum fields cannot be marked comptime", .{});
}
if (member.ast.type_expr != 0) {
return mod.failNode(scope, member.ast.type_expr, "enum fields do not have types", .{});
}
// Alignment expressions in enums are caught by the parser.
assert(member.ast.align_expr == 0);
const name_token = member.ast.name_token;
if (mem.eql(u8, tree.tokenSlice(name_token), "_")) {
if (nonexhaustive_node != 0) {
const msg = msg: {
const msg = try mod.errMsg(
scope,
gz.nodeSrcLoc(member_node),
"redundant non-exhaustive enum mark",
.{},
);
errdefer msg.destroy(gpa);
const other_src = gz.nodeSrcLoc(nonexhaustive_node);
try mod.errNote(scope, other_src, msg, "other mark here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
}
nonexhaustive_node = member_node;
if (member.ast.value_expr != 0) {
return mod.failNode(scope, member.ast.value_expr, "'_' is used to mark an enum as non-exhaustive and cannot be assigned a value", .{});
}
continue;
}
total_fields += 1;
if (member.ast.value_expr != 0) {
values += 1;
}
}
break :blk .{
.total_fields = total_fields,
.values = values,
.decls = decls,
.nonexhaustive_node = nonexhaustive_node,
};
};
if (counts.total_fields == 0) {
// One can construct an enum with no tags, and it functions the same as `noreturn`. But
// this is only useful for generic code; when explicitly using `enum {}` syntax, there
// must be at least one tag.
return mod.failNode(scope, node, "enum declarations must have at least one tag", .{});
}
if (counts.nonexhaustive_node != 0 and arg_inst == .none) {
const msg = msg: {
const msg = try mod.errMsg(
scope,
gz.nodeSrcLoc(node),
"non-exhaustive enum missing integer tag type",
.{},
);
errdefer msg.destroy(gpa);
const other_src = gz.nodeSrcLoc(counts.nonexhaustive_node);
try mod.errNote(scope, other_src, msg, "marked non-exhaustive here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
}
if (counts.values == 0 and counts.decls == 0 and arg_inst == .none) {
// No explicitly provided tag values and no top level declarations! In this case,
// we can construct the enum type in AstGen and it will be correctly shared by all
// generic function instantiations and comptime function calls.
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
const arena = &new_decl_arena.allocator;
var fields_map: std.StringArrayHashMapUnmanaged(void) = .{};
try fields_map.ensureCapacity(arena, counts.total_fields);
for (container_decl.ast.members) |member_node| {
if (member_node == counts.nonexhaustive_node)
continue;
const member = switch (node_tags[member_node]) {
.container_field_init => tree.containerFieldInit(member_node),
.container_field_align => tree.containerFieldAlign(member_node),
.container_field => tree.containerField(member_node),
else => unreachable, // We checked earlier.
};
const name_token = member.ast.name_token;
const tag_name = try mod.identifierTokenStringTreeArena(
scope,
name_token,
tree,
arena,
);
const gop = fields_map.getOrPutAssumeCapacity(tag_name);
if (gop.found_existing) {
const msg = msg: {
const msg = try mod.errMsg(
scope,
gz.tokSrcLoc(name_token),
"duplicate enum tag",
.{},
);
errdefer msg.destroy(gpa);
// Iterate to find the other tag. We don't eagerly store it in a hash
// map because in the hot path there will be no compile error and we
// don't need to waste time with a hash map.
const bad_node = for (container_decl.ast.members) |other_member_node| {
const other_member = switch (node_tags[other_member_node]) {
.container_field_init => tree.containerFieldInit(other_member_node),
.container_field_align => tree.containerFieldAlign(other_member_node),
.container_field => tree.containerField(other_member_node),
else => unreachable, // We checked earlier.
};
const other_tag_name = try mod.identifierTokenStringTreeArena(
scope,
other_member.ast.name_token,
tree,
arena,
);
if (mem.eql(u8, tag_name, other_tag_name))
break other_member_node;
} else unreachable;
const other_src = gz.nodeSrcLoc(bad_node);
try mod.errNote(scope, other_src, msg, "other tag here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(scope, msg);
}
}
const enum_simple = try arena.create(Module.EnumSimple);
enum_simple.* = .{
.owner_decl = astgen.decl,
.node_offset = astgen.decl.nodeIndexToRelative(node),
.fields = fields_map,
};
const enum_ty = try Type.Tag.enum_simple.create(arena, enum_simple);
const enum_val = try Value.Tag.ty.create(arena, enum_ty);
const new_decl = try mod.createAnonymousDecl(scope, &new_decl_arena, .{
.ty = Type.initTag(.type),
.val = enum_val,
});
const decl_index = try mod.declareDeclDependency(astgen.decl, new_decl);
const result = try gz.addDecl(.decl_val, decl_index, node);
return rvalue(gz, scope, rl, result, node);
}
// In this case we must generate ZIR code for the tag values, similar to
// how structs are handled above. The new anonymous Decl will be created in
// Sema, not AstGen.
return mod.failNode(scope, node, "TODO AstGen for enum decl with decls or explicitly provided field values", .{});
},
.keyword_opaque => {
const result = try gz.addNode(.opaque_decl, node);
@@ -1901,11 +2055,11 @@ fn errorSetDecl(
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!zir.Inst.Ref {
const mod = gz.astgen.mod;
const astgen = gz.astgen;
const mod = astgen.mod;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
const token_tags = tree.tokens.items(.tag);
const arena = gz.astgen.arena;
// Count how many fields there are.
const error_token = main_tokens[node];
@@ -1922,6 +2076,11 @@ fn errorSetDecl(
} else unreachable; // TODO should not need else unreachable here
};
const gpa = mod.gpa;
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
const arena = &new_decl_arena.allocator;
const fields = try arena.alloc([]const u8, count);
{
var tok_i = error_token + 2;
@@ -1930,7 +2089,7 @@ fn errorSetDecl(
switch (token_tags[tok_i]) {
.doc_comment, .comma => {},
.identifier => {
fields[field_i] = try mod.identifierTokenString(scope, tok_i);
fields[field_i] = try mod.identifierTokenStringTreeArena(scope, tok_i, tree, arena);
field_i += 1;
},
.r_brace => break,
@@ -1940,18 +2099,19 @@ fn errorSetDecl(
}
const error_set = try arena.create(Module.ErrorSet);
error_set.* = .{
.owner_decl = gz.astgen.decl,
.node_offset = gz.astgen.decl.nodeIndexToRelative(node),
.owner_decl = astgen.decl,
.node_offset = astgen.decl.nodeIndexToRelative(node),
.names_ptr = fields.ptr,
.names_len = @intCast(u32, fields.len),
};
const error_set_ty = try Type.Tag.error_set.create(arena, error_set);
const typed_value = try arena.create(TypedValue);
typed_value.* = .{
const error_set_val = try Value.Tag.ty.create(arena, error_set_ty);
const new_decl = try mod.createAnonymousDecl(scope, &new_decl_arena, .{
.ty = Type.initTag(.type),
.val = try Value.Tag.ty.create(arena, error_set_ty),
};
const result = try gz.addConst(typed_value);
.val = error_set_val,
});
const decl_index = try mod.declareDeclDependency(astgen.decl, new_decl);
const result = try gz.addDecl(.decl_val, decl_index, node);
return rvalue(gz, scope, rl, result, node);
}
@@ -3196,8 +3356,13 @@ fn switchExpr(
switch (strat.tag) {
.break_operand => {
// Switch expressions return `true` for `nodeMayNeedMemoryLocation` thus
// this is always true.
assert(strat.elide_store_to_block_ptr_instructions);
// `elide_store_to_block_ptr_instructions` will either be true,
// or all prongs are noreturn.
if (!strat.elide_store_to_block_ptr_instructions) {
astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items);
astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items);
return astgen.indexToRef(switch_block);
}
// There will necessarily be a store_to_block_ptr for
// all prongs, except for prongs that ended with a noreturn instruction.
@@ -3426,7 +3591,8 @@ fn identifier(
const tracy = trace(@src());
defer tracy.end();
const mod = gz.astgen.mod;
const astgen = gz.astgen;
const mod = astgen.mod;
const tree = gz.tree();
const main_tokens = tree.nodes.items(.main_token);
@@ -3459,7 +3625,7 @@ fn identifier(
const result = try gz.add(.{
.tag = .int_type,
.data = .{ .int_type = .{
.src_node = gz.astgen.decl.nodeIndexToRelative(ident),
.src_node = astgen.decl.nodeIndexToRelative(ident),
.signedness = signedness,
.bit_count = bit_count,
} },
@@ -3497,13 +3663,13 @@ fn identifier(
};
}
const gop = try gz.astgen.decl_map.getOrPut(mod.gpa, ident_name);
if (!gop.found_existing) {
const decl = mod.lookupDeclName(scope, ident_name) orelse
return mod.failNode(scope, ident, "use of undeclared identifier '{s}'", .{ident_name});
try gz.astgen.decls.append(mod.gpa, decl);
}
const decl_index = @intCast(u32, gop.index);
const decl = mod.lookupDeclName(scope, ident_name) orelse {
// TODO insert a "dependency on the non-existence of a decl" here to make this
// compile error go away when the decl is introduced. This data should be in a global
// sparse map since it is only relevant when a compile error occurs.
return mod.failNode(scope, ident, "use of undeclared identifier '{s}'", .{ident_name});
};
const decl_index = try mod.declareDeclDependency(astgen.decl, decl);
switch (rl) {
.ref, .none_or_ref => return gz.addDecl(.decl_ref, decl_index, ident),
else => return rvalue(gz, scope, rl, try gz.addDecl(.decl_val, decl_index, ident), ident),
@@ -3638,12 +3804,23 @@ fn floatLiteral(
const float_number = std.fmt.parseFloat(f128, bytes) catch |e| switch (e) {
error.InvalidCharacter => unreachable, // validated by tokenizer
};
const typed_value = try arena.create(TypedValue);
typed_value.* = .{
.ty = Type.initTag(.comptime_float),
.val = try Value.Tag.float_128.create(arena, float_number),
};
const result = try gz.addConst(typed_value);
// If the value fits into a f32 without losing any precision, store it that way.
@setFloatMode(.Strict);
const smaller_float = @floatCast(f32, float_number);
const bigger_again: f128 = smaller_float;
if (bigger_again == float_number) {
const result = try gz.addFloat(smaller_float, node);
return rvalue(gz, scope, rl, result, node);
}
// We need to use 128 bits. Break the float into 4 u32 values so we can
// put it into the `extra` array.
const int_bits = @bitCast(u128, float_number);
const result = try gz.addPlNode(.float128, node, zir.Inst.Float128{
.piece0 = @truncate(u32, int_bits),
.piece1 = @truncate(u32, int_bits >> 32),
.piece2 = @truncate(u32, int_bits >> 64),
.piece3 = @truncate(u32, int_bits >> 96),
});
return rvalue(gz, scope, rl, result, node);
}
@@ -3955,6 +4132,20 @@ fn builtinCall(
.bit_cast => return bitCast(gz, scope, rl, node, params[0], params[1]),
.TypeOf => return typeOf(gz, scope, rl, node, params),
.int_to_enum => {
const result = try gz.addPlNode(.int_to_enum, node, zir.Inst.Bin{
.lhs = try typeExpr(gz, scope, params[0]),
.rhs = try expr(gz, scope, .none, params[1]),
});
return rvalue(gz, scope, rl, result, node);
},
.enum_to_int => {
const operand = try expr(gz, scope, .none, params[0]);
const result = try gz.addUnNode(.enum_to_int, operand, node);
return rvalue(gz, scope, rl, result, node);
},
.add_with_overflow,
.align_cast,
.align_of,
@@ -3981,7 +4172,6 @@ fn builtinCall(
.div_floor,
.div_trunc,
.embed_file,
.enum_to_int,
.error_name,
.error_return_trace,
.err_set_cast,
@@ -3991,7 +4181,6 @@ fn builtinCall(
.float_to_int,
.has_decl,
.has_field,
.int_to_enum,
.int_to_float,
.int_to_ptr,
.memcpy,

View File

@@ -484,7 +484,7 @@ pub const list = list: {
"@intToEnum",
.{
.tag = .int_to_enum,
.param_count = 1,
.param_count = 2,
},
},
.{

View File

@@ -1346,9 +1346,9 @@ pub fn update(self: *Compilation) !void {
module.generation += 1;
// TODO Detect which source files changed.
// Until then we simulate a full cache miss. Source files could have been loaded for any reason;
// to force a refresh we unload now.
module.root_scope.unload(module.gpa);
// Until then we simulate a full cache miss. Source files could have been loaded
// for any reason; to force a refresh we unload now.
module.unloadFile(module.root_scope);
module.failed_root_src_file = null;
module.analyzeContainer(&module.root_scope.root_container) catch |err| switch (err) {
error.AnalysisFail => {
@@ -1362,7 +1362,7 @@ pub fn update(self: *Compilation) !void {
// TODO only analyze imports if they are still referenced
for (module.import_table.items()) |entry| {
entry.value.unload(module.gpa);
module.unloadFile(entry.value);
module.analyzeContainer(&entry.value.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
@@ -1377,14 +1377,17 @@ pub fn update(self: *Compilation) !void {
if (!use_stage1) {
if (self.bin_file.options.module) |module| {
// Process the deletion set.
while (module.deletion_set.popOrNull()) |decl| {
if (decl.dependants.items().len != 0) {
decl.deletion_flag = false;
continue;
}
try module.deleteDecl(decl);
// Process the deletion set. We use a while loop here because the
// deletion set may grow as we call `deleteDecl` within this loop,
// and more unreferenced Decls are revealed.
var entry_i: usize = 0;
while (entry_i < module.deletion_set.entries.items.len) : (entry_i += 1) {
const decl = module.deletion_set.entries.items[entry_i].key;
assert(decl.deletion_flag);
assert(decl.dependants.items().len == 0);
try module.deleteDecl(decl, null);
}
module.deletion_set.shrinkRetainingCapacity(0);
}
}
@@ -1429,11 +1432,25 @@ pub fn totalErrorCount(self: *Compilation) usize {
var total: usize = self.failed_c_objects.items().len;
if (self.bin_file.options.module) |module| {
total += module.failed_decls.count() +
module.emit_h_failed_decls.count() +
module.failed_exports.items().len +
total += module.failed_exports.items().len +
module.failed_files.items().len +
@boolToInt(module.failed_root_src_file != null);
// Skip errors for Decls within files that failed parsing.
// When a parse error is introduced, we keep all the semantic analysis for
// the previous parse success, including compile errors, but we cannot
// emit them until the file succeeds parsing.
for (module.failed_decls.items()) |entry| {
if (entry.key.container.file_scope.status == .unloaded_parse_failure) {
continue;
}
total += 1;
}
for (module.emit_h_failed_decls.items()) |entry| {
if (entry.key.container.file_scope.status == .unloaded_parse_failure) {
continue;
}
total += 1;
}
}
// The "no entry point found" error only counts if there are no other errors.
@@ -1480,9 +1497,19 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
try AllErrors.add(module, &arena, &errors, entry.value.*);
}
for (module.failed_decls.items()) |entry| {
if (entry.key.container.file_scope.status == .unloaded_parse_failure) {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
continue;
}
try AllErrors.add(module, &arena, &errors, entry.value.*);
}
for (module.emit_h_failed_decls.items()) |entry| {
if (entry.key.container.file_scope.status == .unloaded_parse_failure) {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
continue;
}
try AllErrors.add(module, &arena, &errors, entry.value.*);
}
for (module.failed_exports.items()) |entry| {

View File

@@ -65,8 +65,8 @@ emit_h_failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
/// Keep track of one `@compileLog` callsite per owner Decl.
compile_log_decls: std.AutoArrayHashMapUnmanaged(*Decl, SrcLoc) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Scope`, using Module's general purpose allocator.
failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *ErrorMsg) = .{},
/// The ErrorMsg memory is owned by the `Scope.File`, using Module's general purpose allocator.
failed_files: std.AutoArrayHashMapUnmanaged(*Scope.File, *ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator.
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{},
@@ -75,7 +75,7 @@ next_anon_name_index: usize = 0,
/// Candidates for deletion. After a semantic analysis update completes, this list
/// contains Decls that need to be deleted if they end up having no references to them.
deletion_set: ArrayListUnmanaged(*Decl) = .{},
deletion_set: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{},
/// Error tags and their values, tag names are duped with mod.gpa.
/// Corresponds with `error_name_list`.
@@ -192,7 +192,7 @@ pub const Decl = struct {
/// to require re-analysis.
outdated,
},
/// This flag is set when this Decl is added to a check_for_deletion set, and cleared
/// This flag is set when this Decl is added to `Module.deletion_set`, and cleared
/// when removed.
deletion_flag: bool,
/// Whether the corresponding AST decl has a `pub` keyword.
@@ -290,6 +290,18 @@ pub const Decl = struct {
return decl.container.fullyQualifiedNameHash(mem.spanZ(decl.name));
}
pub fn renderFullyQualifiedName(decl: Decl, writer: anytype) !void {
const unqualified_name = mem.spanZ(decl.name);
return decl.container.renderFullyQualifiedName(unqualified_name, writer);
}
pub fn getFullyQualifiedName(decl: Decl, gpa: *Allocator) ![]u8 {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
try decl.renderFullyQualifiedName(buffer.writer());
return buffer.toOwnedSlice();
}
pub fn typedValue(decl: *Decl) error{AnalysisFail}!TypedValue {
const tvm = decl.typedValueManaged() orelse return error.AnalysisFail;
return tvm.typed_value;
@@ -354,6 +366,13 @@ pub const ErrorSet = struct {
/// The string bytes are stored in the owner Decl arena.
/// They are in the same order they appear in the AST.
names_ptr: [*]const []const u8,
pub fn srcLoc(self: ErrorSet) SrcLoc {
return .{
.container = .{ .decl = self.owner_decl },
.lazy = .{ .node_offset = self.node_offset },
};
}
};
/// Represents the data that a struct declaration provides.
@@ -375,8 +394,7 @@ pub const Struct = struct {
};
pub fn getFullyQualifiedName(s: *Struct, gpa: *Allocator) ![]u8 {
// TODO this should return e.g. "std.fs.Dir.OpenOptions"
return gpa.dupe(u8, mem.spanZ(s.owner_decl.name));
return s.owner_decl.getFullyQualifiedName(gpa);
}
pub fn srcLoc(s: Struct) SrcLoc {
@@ -387,6 +405,53 @@ pub const Struct = struct {
}
};
/// Represents the data that an enum declaration provides, when the fields
/// are auto-numbered, and there are no declarations. The integer tag type
/// is inferred to be the smallest power of two unsigned int that fits
/// the number of fields.
pub const EnumSimple = struct {
owner_decl: *Decl,
/// Set of field names in declaration order.
fields: std.StringArrayHashMapUnmanaged(void),
/// Offset from `owner_decl`, points to the enum decl AST node.
node_offset: i32,
pub fn srcLoc(self: EnumSimple) SrcLoc {
return .{
.container = .{ .decl = self.owner_decl },
.lazy = .{ .node_offset = self.node_offset },
};
}
};
/// Represents the data that an enum declaration provides, when there is
/// at least one tag value explicitly specified, or at least one declaration.
pub const EnumFull = struct {
owner_decl: *Decl,
/// An integer type which is used for the numerical value of the enum.
/// Whether zig chooses this type or the user specifies it, it is stored here.
tag_ty: Type,
/// Set of field names in declaration order.
fields: std.StringArrayHashMapUnmanaged(void),
/// Maps integer tag value to field index.
/// Entries are in declaration order, same as `fields`.
/// If this hash map is empty, it means the enum tags are auto-numbered.
values: ValueMap,
/// Represents the declarations inside this struct.
container: Scope.Container,
/// Offset from `owner_decl`, points to the enum decl AST node.
node_offset: i32,
pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.hash_u32, Value.eql, false);
pub fn srcLoc(self: EnumFull) SrcLoc {
return .{
.container = .{ .decl = self.owner_decl },
.lazy = .{ .node_offset = self.node_offset },
};
}
};
/// Some Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
/// Extern functions do not have this data structure; they are represented by
/// the `Decl` only, with a `Value` tag of `extern_fn`.
@@ -634,6 +699,11 @@ pub const Scope = struct {
// TODO container scope qualified names.
return std.zig.hashSrc(name);
}
pub fn renderFullyQualifiedName(cont: Container, name: []const u8, writer: anytype) !void {
// TODO this should render e.g. "std.fs.Dir.OpenOptions"
return writer.writeAll(name);
}
};
pub const File = struct {
@@ -662,10 +732,12 @@ pub const Scope = struct {
pub fn unload(file: *File, gpa: *Allocator) void {
switch (file.status) {
.never_loaded,
.unloaded_parse_failure,
.never_loaded,
.unloaded_success,
=> {},
=> {
file.status = .unloaded_success;
},
.loaded_success => {
file.tree.deinit(gpa);
@@ -1030,7 +1102,6 @@ pub const Scope = struct {
.instructions = gz.astgen.instructions.toOwnedSlice(),
.string_bytes = gz.astgen.string_bytes.toOwnedSlice(gpa),
.extra = gz.astgen.extra.toOwnedSlice(gpa),
.decls = gz.astgen.decls.toOwnedSlice(gpa),
};
}
@@ -1242,6 +1313,16 @@ pub const Scope = struct {
});
}
pub fn addFloat(gz: *GenZir, number: f32, src_node: ast.Node.Index) !zir.Inst.Ref {
return gz.add(.{
.tag = .float,
.data = .{ .float = .{
.src_node = gz.astgen.decl.nodeIndexToRelative(src_node),
.number = number,
} },
});
}
pub fn addUnNode(
gz: *GenZir,
tag: zir.Inst.Tag,
@@ -1450,13 +1531,6 @@ pub const Scope = struct {
return new_index;
}
pub fn addConst(gz: *GenZir, typed_value: *TypedValue) !zir.Inst.Ref {
return gz.add(.{
.tag = .@"const",
.data = .{ .@"const" = typed_value },
});
}
pub fn add(gz: *GenZir, inst: zir.Inst) !zir.Inst.Ref {
return gz.astgen.indexToRef(try gz.addAsIndex(inst));
}
@@ -2321,7 +2395,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void {
// We don't perform a deletion here, because this Decl or another one
// may end up referencing it before the update is complete.
dep.deletion_flag = true;
try mod.deletion_set.append(mod.gpa, dep);
try mod.deletion_set.put(mod.gpa, dep, {});
}
}
decl.dependencies.clearRetainingCapacity();
@@ -3120,12 +3194,19 @@ fn astgenAndSemaVarDecl(
return type_changed;
}
pub fn declareDeclDependency(mod: *Module, depender: *Decl, dependee: *Decl) !void {
try depender.dependencies.ensureCapacity(mod.gpa, depender.dependencies.items().len + 1);
try dependee.dependants.ensureCapacity(mod.gpa, dependee.dependants.items().len + 1);
/// Returns the depender's index of the dependee.
pub fn declareDeclDependency(mod: *Module, depender: *Decl, dependee: *Decl) !u32 {
try depender.dependencies.ensureCapacity(mod.gpa, depender.dependencies.count() + 1);
try dependee.dependants.ensureCapacity(mod.gpa, dependee.dependants.count() + 1);
if (dependee.deletion_flag) {
dependee.deletion_flag = false;
mod.deletion_set.removeAssertDiscard(dependee);
}
depender.dependencies.putAssumeCapacity(dependee, {});
dependee.dependants.putAssumeCapacity(depender, {});
const gop = depender.dependencies.getOrPutAssumeCapacity(dependee);
return @intCast(u32, gop.index);
}
pub fn getAstTree(mod: *Module, root_scope: *Scope.File) !*const ast.Tree {
@@ -3150,17 +3231,19 @@ pub fn getAstTree(mod: *Module, root_scope: *Scope.File) !*const ast.Tree {
var msg = std.ArrayList(u8).init(mod.gpa);
defer msg.deinit();
const token_starts = tree.tokens.items(.start);
try tree.renderError(parse_err, msg.writer());
const err_msg = try mod.gpa.create(ErrorMsg);
err_msg.* = .{
.src_loc = .{
.container = .{ .file_scope = root_scope },
.lazy = .{ .token_abs = parse_err.token },
.lazy = .{ .byte_abs = token_starts[parse_err.token] },
},
.msg = msg.toOwnedSlice(),
};
mod.failed_files.putAssumeCapacityNoClobber(&root_scope.base, err_msg);
mod.failed_files.putAssumeCapacityNoClobber(root_scope, err_msg);
root_scope.status = .unloaded_parse_failure;
return error.AnalysisFail;
}
@@ -3200,6 +3283,14 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
deleted_decls.putAssumeCapacityNoClobber(entry.key, {});
}
// Keep track of decls that are invalidated from the update. Ultimately,
// the goal is to queue up `analyze_decl` tasks in the work queue for
// the outdated decls, but we cannot queue up the tasks until after
// we find out which ones have been deleted, otherwise there would be
// deleted Decl pointers in the work queue.
var outdated_decls = std.AutoArrayHashMap(*Decl, void).init(mod.gpa);
defer outdated_decls.deinit();
for (decls) |decl_node, decl_i| switch (node_tags[decl_node]) {
.fn_decl => {
const fn_proto = node_datas[decl_node].lhs;
@@ -3210,6 +3301,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3220,6 +3312,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.fn_proto_multi => try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3231,6 +3324,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3241,6 +3335,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.fn_proto => try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3255,6 +3350,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3265,6 +3361,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.fn_proto_multi => try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3276,6 +3373,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3286,6 +3384,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.fn_proto => try mod.semaContainerFn(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3296,6 +3395,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.global_var_decl => try mod.semaContainerVar(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3304,6 +3404,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.local_var_decl => try mod.semaContainerVar(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3312,6 +3413,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.simple_var_decl => try mod.semaContainerVar(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3320,6 +3422,7 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
.aligned_var_decl => try mod.semaContainerVar(
container_scope,
&deleted_decls,
&outdated_decls,
decl_node,
decl_i,
tree.*,
@@ -3372,11 +3475,27 @@ pub fn analyzeContainer(mod: *Module, container_scope: *Scope.Container) !void {
},
else => unreachable,
};
// Handle explicitly deleted decls from the source code. Not to be confused
// with when we delete decls because they are no longer referenced.
// Handle explicitly deleted decls from the source code. This is one of two
// places that Decl deletions happen. The other is in `Compilation`, after
// `performAllTheWork`, where we iterate over `Module.deletion_set` and
// delete Decls which are no longer referenced.
// If a Decl is explicitly deleted from source, and also no longer referenced,
// it may be both in this `deleted_decls` set, as well as in the
// `Module.deletion_set`. To avoid deleting it twice, we remove it from the
// deletion set at this time.
for (deleted_decls.items()) |entry| {
log.debug("noticed '{s}' deleted from source", .{entry.key.name});
try mod.deleteDecl(entry.key);
const decl = entry.key;
log.debug("'{s}' deleted from source", .{decl.name});
if (decl.deletion_flag) {
log.debug("'{s}' redundantly in deletion set; removing", .{decl.name});
mod.deletion_set.removeAssertDiscard(decl);
}
try mod.deleteDecl(decl, &outdated_decls);
}
// Finally we can queue up re-analysis tasks after we have processed
// the deleted decls.
for (outdated_decls.items()) |entry| {
try mod.markOutdatedDecl(entry.key);
}
}
@@ -3384,6 +3503,7 @@ fn semaContainerFn(
mod: *Module,
container_scope: *Scope.Container,
deleted_decls: *std.AutoArrayHashMap(*Decl, void),
outdated_decls: *std.AutoArrayHashMap(*Decl, void),
decl_node: ast.Node.Index,
decl_i: usize,
tree: ast.Tree,
@@ -3415,7 +3535,7 @@ fn semaContainerFn(
try mod.failed_decls.putNoClobber(mod.gpa, decl, msg);
} else {
if (!srcHashEql(decl.contents_hash, contents_hash)) {
try mod.markOutdatedDecl(decl);
try outdated_decls.put(decl, {});
decl.contents_hash = contents_hash;
} else switch (mod.comp.bin_file.tag) {
.coff => {
@@ -3450,6 +3570,7 @@ fn semaContainerVar(
mod: *Module,
container_scope: *Scope.Container,
deleted_decls: *std.AutoArrayHashMap(*Decl, void),
outdated_decls: *std.AutoArrayHashMap(*Decl, void),
decl_node: ast.Node.Index,
decl_i: usize,
tree: ast.Tree,
@@ -3475,7 +3596,7 @@ fn semaContainerVar(
errdefer err_msg.destroy(mod.gpa);
try mod.failed_decls.putNoClobber(mod.gpa, decl, err_msg);
} else if (!srcHashEql(decl.contents_hash, contents_hash)) {
try mod.markOutdatedDecl(decl);
try outdated_decls.put(decl, {});
decl.contents_hash = contents_hash;
}
} else {
@@ -3505,17 +3626,27 @@ fn semaContainerField(
log.err("TODO: analyze container field", .{});
}
pub fn deleteDecl(mod: *Module, decl: *Decl) !void {
pub fn deleteDecl(
mod: *Module,
decl: *Decl,
outdated_decls: ?*std.AutoArrayHashMap(*Decl, void),
) !void {
const tracy = trace(@src());
defer tracy.end();
try mod.deletion_set.ensureCapacity(mod.gpa, mod.deletion_set.items.len + decl.dependencies.items().len);
log.debug("deleting decl '{s}'", .{decl.name});
if (outdated_decls) |map| {
_ = map.swapRemove(decl);
try map.ensureCapacity(map.count() + decl.dependants.count());
}
try mod.deletion_set.ensureCapacity(mod.gpa, mod.deletion_set.count() +
decl.dependencies.count());
// Remove from the namespace it resides in. In the case of an anonymous Decl it will
// not be present in the set, and this does nothing.
decl.container.removeDecl(decl);
log.debug("deleting decl '{s}'", .{decl.name});
const name_hash = decl.fullyQualifiedNameHash();
mod.decl_table.removeAssertDiscard(name_hash);
// Remove itself from its dependencies, because we are about to destroy the decl pointer.
@@ -3526,16 +3657,22 @@ pub fn deleteDecl(mod: *Module, decl: *Decl) !void {
// We don't recursively perform a deletion here, because during the update,
// another reference to it may turn up.
dep.deletion_flag = true;
mod.deletion_set.appendAssumeCapacity(dep);
mod.deletion_set.putAssumeCapacity(dep, {});
}
}
// Anything that depends on this deleted decl certainly needs to be re-analyzed.
// Anything that depends on this deleted decl needs to be re-analyzed.
for (decl.dependants.items()) |entry| {
const dep = entry.key;
dep.removeDependency(decl);
if (dep.analysis != .outdated) {
// TODO Move this failure possibility to the top of the function.
try mod.markOutdatedDecl(dep);
if (outdated_decls) |map| {
map.putAssumeCapacity(dep, {});
} else if (std.debug.runtime_safety) {
// If `outdated_decls` is `null`, it means we're being called from
// `Compilation` after `performAllTheWork` and we cannot queue up any
// more work. `dep` must necessarily be another Decl that is no longer
// being referenced, and will be in the `deletion_set`. Otherwise,
// something has gone wrong.
assert(mod.deletion_set.contains(dep));
}
}
if (mod.failed_decls.swapRemove(decl)) |entry| {
@@ -4455,7 +4592,29 @@ pub fn identifierTokenString(mod: *Module, scope: *Scope, token: ast.TokenIndex)
var buf: ArrayListUnmanaged(u8) = .{};
defer buf.deinit(mod.gpa);
try parseStrLit(mod, scope, token, &buf, ident_name, 1);
return buf.toOwnedSlice(mod.gpa);
const duped = try scope.arena().dupe(u8, buf.items);
return duped;
}
/// `scope` is only used for error reporting.
/// The string is stored in `arena` regardless of whether it uses @"" syntax.
pub fn identifierTokenStringTreeArena(
mod: *Module,
scope: *Scope,
token: ast.TokenIndex,
tree: *const ast.Tree,
arena: *Allocator,
) InnerError![]u8 {
const token_tags = tree.tokens.items(.tag);
assert(token_tags[token] == .identifier);
const ident_name = tree.tokenSlice(token);
if (!mem.startsWith(u8, ident_name, "@")) {
return arena.dupe(u8, ident_name);
}
var buf: ArrayListUnmanaged(u8) = .{};
defer buf.deinit(mod.gpa);
try parseStrLit(mod, scope, token, &buf, ident_name, 1);
return arena.dupe(u8, buf.items);
}
/// Given an identifier token, obtain the string for it (possibly parsing as a string
@@ -4545,3 +4704,10 @@ pub fn parseStrLit(
},
}
}
pub fn unloadFile(mod: *Module, file_scope: *Scope.File) void {
if (file_scope.status == .unloaded_parse_failure) {
mod.failed_files.swapRemove(file_scope).?.value.destroy(mod.gpa);
}
file_scope.unload(mod.gpa);
}

View File

@@ -168,7 +168,6 @@ pub fn analyzeBody(
.cmp_lte => try sema.zirCmp(block, inst, .lte),
.cmp_neq => try sema.zirCmp(block, inst, .neq),
.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst),
.@"const" => try sema.zirConst(block, inst),
.decl_ref => try sema.zirDeclRef(block, inst),
.decl_val => try sema.zirDeclVal(block, inst),
.load => try sema.zirLoad(block, inst),
@@ -179,6 +178,8 @@ pub fn analyzeBody(
.elem_val_node => try sema.zirElemValNode(block, inst),
.enum_literal => try sema.zirEnumLiteral(block, inst),
.enum_literal_small => try sema.zirEnumLiteralSmall(block, inst),
.enum_to_int => try sema.zirEnumToInt(block, inst),
.int_to_enum => try sema.zirIntToEnum(block, inst),
.err_union_code => try sema.zirErrUnionCode(block, inst),
.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst),
.err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true),
@@ -201,6 +202,8 @@ pub fn analyzeBody(
.import => try sema.zirImport(block, inst),
.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst),
.int => try sema.zirInt(block, inst),
.float => try sema.zirFloat(block, inst),
.float128 => try sema.zirFloat128(block, inst),
.int_type => try sema.zirIntType(block, inst),
.intcast => try sema.zirIntcast(block, inst),
.is_err => try sema.zirIsErr(block, inst),
@@ -264,7 +267,8 @@ pub fn analyzeBody(
.struct_decl => try sema.zirStructDecl(block, inst, .Auto),
.struct_decl_packed => try sema.zirStructDecl(block, inst, .Packed),
.struct_decl_extern => try sema.zirStructDecl(block, inst, .Extern),
.enum_decl => try sema.zirEnumDecl(block, inst),
.enum_decl => try sema.zirEnumDecl(block, inst, false),
.enum_decl_nonexhaustive => try sema.zirEnumDecl(block, inst, true),
.union_decl => try sema.zirUnionDecl(block, inst),
.opaque_decl => try sema.zirOpaqueDecl(block, inst),
@@ -498,18 +502,6 @@ fn resolveInstConst(
};
}
fn zirConst(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const tv_ptr = sema.code.instructions.items(.data)[inst].@"const";
// Move the TypedValue from old memory to new memory. This allows freeing the ZIR instructions
// after analysis. This happens, for example, with variable declaration initialization
// expressions.
const typed_value_copy = try tv_ptr.copy(sema.arena);
return sema.mod.constInst(sema.arena, .unneeded, typed_value_copy);
}
fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -617,7 +609,12 @@ fn zirStructDecl(
return sema.analyzeDeclVal(block, src, new_decl);
}
fn zirEnumDecl(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
fn zirEnumDecl(
sema: *Sema,
block: *Scope.Block,
inst: zir.Inst.Index,
nonexhaustive: bool,
) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -788,8 +785,8 @@ fn zirAllocInferred(
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const src_node = sema.code.instructions.items(.data)[inst].node;
const src: LazySrcLoc = .{ .node_offset = src_node };
const val_payload = try sema.arena.create(Value.Payload.InferredAlloc);
val_payload.* = .{
@@ -900,7 +897,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Ind
try mod.errNoteNonLazy(
struct_obj.srcLoc(),
msg,
"'{s}' declared here",
"struct '{s}' declared here",
.{fqn},
);
return mod.failWithOwnedErrorMsg(&block.base, msg);
@@ -928,7 +925,7 @@ fn failWithBadFieldAccess(
.{ field_name, fqn },
);
errdefer msg.destroy(gpa);
try mod.errNoteNonLazy(struct_obj.srcLoc(), msg, "'{s}' declared here", .{fqn});
try mod.errNoteNonLazy(struct_obj.srcLoc(), msg, "struct declared here", .{});
break :msg msg;
};
return mod.failWithOwnedErrorMsg(&block.base, msg);
@@ -1070,6 +1067,31 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*In
return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int);
}
fn zirFloat(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].float;
const src = inst_data.src();
const number = inst_data.number;
return sema.mod.constInst(arena, src, .{
.ty = Type.initTag(.comptime_float),
.val = try Value.Tag.float_32.create(arena, number),
});
}
fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(zir.Inst.Float128, inst_data.payload_index).data;
const src = inst_data.src();
const number = extra.get();
return sema.mod.constInst(arena, src, .{
.ty = Type.initTag(.comptime_float),
.val = try Value.Tag.float_128.create(arena, number),
});
}
fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
@@ -1385,7 +1407,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const decl = sema.code.decls[inst_data.payload_index];
const decl = sema.owner_decl.dependencies.entries.items[inst_data.payload_index].key;
return sema.analyzeDeclRef(block, src, decl);
}
@@ -1395,7 +1417,7 @@ fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const decl = sema.code.decls[inst_data.payload_index];
const decl = sema.owner_decl.dependencies.entries.items[inst_data.payload_index].key;
return sema.analyzeDeclVal(block, src, decl);
}
@@ -1852,6 +1874,143 @@ fn zirEnumLiteralSmall(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) I
});
}
fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
const mod = sema.mod;
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const enum_tag: *Inst = switch (operand.ty.zigTypeTag()) {
.Enum => operand,
.Union => {
//if (!operand.ty.unionHasTag()) {
// return mod.fail(
// &block.base,
// operand_src,
// "untagged union '{}' cannot be converted to integer",
// .{dest_ty_src},
// );
//}
return mod.fail(&block.base, operand_src, "TODO zirEnumToInt for tagged unions", .{});
},
else => {
return mod.fail(&block.base, operand_src, "expected enum or tagged union, found {}", .{
operand.ty,
});
},
};
var int_tag_type_buffer: Type.Payload.Bits = undefined;
const int_tag_ty = try enum_tag.ty.intTagType(&int_tag_type_buffer).copy(arena);
if (enum_tag.ty.onePossibleValue()) |opv| {
return mod.constInst(arena, src, .{
.ty = int_tag_ty,
.val = opv,
});
}
if (enum_tag.value()) |enum_tag_val| {
if (enum_tag_val.castTag(.enum_field_index)) |enum_field_payload| {
const field_index = enum_field_payload.data;
switch (enum_tag.ty.tag()) {
.enum_full => {
const enum_full = enum_tag.ty.castTag(.enum_full).?.data;
if (enum_full.values.count() != 0) {
const val = enum_full.values.entries.items[field_index].key;
return mod.constInst(arena, src, .{
.ty = int_tag_ty,
.val = val,
});
} else {
// Field index and integer values are the same.
const val = try Value.Tag.int_u64.create(arena, field_index);
return mod.constInst(arena, src, .{
.ty = int_tag_ty,
.val = val,
});
}
},
.enum_simple => {
// Field index and integer values are the same.
const val = try Value.Tag.int_u64.create(arena, field_index);
return mod.constInst(arena, src, .{
.ty = int_tag_ty,
.val = val,
});
},
else => unreachable,
}
} else {
// Assume it is already an integer and return it directly.
return mod.constInst(arena, src, .{
.ty = int_tag_ty,
.val = enum_tag_val,
});
}
}
try sema.requireRuntimeBlock(block, src);
return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag);
}
fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
const mod = sema.mod;
const target = mod.getTarget();
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
if (dest_ty.zigTypeTag() != .Enum) {
return mod.fail(&block.base, dest_ty_src, "expected enum, found {}", .{dest_ty});
}
if (dest_ty.isNonexhaustiveEnum()) {
if (operand.value()) |int_val| {
return mod.constInst(arena, src, .{
.ty = dest_ty,
.val = int_val,
});
}
}
if (try sema.resolveDefinedValue(block, operand_src, operand)) |int_val| {
if (!dest_ty.enumHasInt(int_val, target)) {
const msg = msg: {
const msg = try mod.errMsg(
&block.base,
src,
"enum '{}' has no tag with value {}",
.{ dest_ty, int_val },
);
errdefer msg.destroy(sema.gpa);
try mod.errNoteNonLazy(
dest_ty.declSrcLoc(),
msg,
"enum declared here",
.{},
);
break :msg msg;
};
return mod.failWithOwnedErrorMsg(&block.base, msg);
}
return mod.constInst(arena, src, .{
.ty = dest_ty,
.val = int_val,
});
}
try sema.requireRuntimeBlock(block, src);
return block.addUnOp(src, dest_ty, .bitcast, operand);
}
/// Pointer in, pointer out.
fn zirOptionalPayloadPtr(
sema: *Sema,
@@ -2584,6 +2743,8 @@ fn analyzeSwitch(
src_node_offset: i32,
) InnerError!*Inst {
const gpa = sema.gpa;
const mod = sema.mod;
const special: struct { body: []const zir.Inst.Index, end: usize } = switch (special_prong) {
.none => .{ .body = &.{}, .end = extra_end },
.under, .@"else" => blk: {
@@ -2601,16 +2762,16 @@ fn analyzeSwitch(
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
// Validate usage of '_' prongs.
if (special_prong == .under and !operand.ty.isExhaustiveEnum()) {
if (special_prong == .under and !operand.ty.isNonexhaustiveEnum()) {
const msg = msg: {
const msg = try sema.mod.errMsg(
const msg = try mod.errMsg(
&block.base,
src,
"'_' prong only allowed when switching on non-exhaustive enums",
.{},
);
errdefer msg.destroy(gpa);
try sema.mod.errNote(
try mod.errNote(
&block.base,
special_prong_src,
msg,
@@ -2619,14 +2780,123 @@ fn analyzeSwitch(
);
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
return mod.failWithOwnedErrorMsg(&block.base, msg);
}
// Validate for duplicate items, missing else prong, and invalid range.
switch (operand.ty.zigTypeTag()) {
.Enum => return sema.mod.fail(&block.base, src, "TODO validate switch .Enum", .{}),
.ErrorSet => return sema.mod.fail(&block.base, src, "TODO validate switch .ErrorSet", .{}),
.Union => return sema.mod.fail(&block.base, src, "TODO validate switch .Union", .{}),
.Enum => {
var seen_fields = try gpa.alloc(?AstGen.SwitchProngSrc, operand.ty.enumFieldCount());
defer gpa.free(seen_fields);
mem.set(?AstGen.SwitchProngSrc, seen_fields, null);
var extra_index: usize = special.end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
try sema.validateSwitchItemEnum(
block,
seen_fields,
item_ref,
src_node_offset,
.{ .scalar = scalar_i },
);
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
for (items) |item_ref, item_i| {
try sema.validateSwitchItemEnum(
block,
seen_fields,
item_ref,
src_node_offset,
.{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
);
}
try sema.validateSwitchNoRange(block, ranges_len, operand.ty, src_node_offset);
}
}
const all_tags_handled = for (seen_fields) |seen_src| {
if (seen_src == null) break false;
} else true;
switch (special_prong) {
.none => {
if (!all_tags_handled) {
const msg = msg: {
const msg = try mod.errMsg(
&block.base,
src,
"switch must handle all possibilities",
.{},
);
errdefer msg.destroy(sema.gpa);
for (seen_fields) |seen_src, i| {
if (seen_src != null) continue;
const field_name = operand.ty.enumFieldName(i);
// TODO have this point to the tag decl instead of here
try mod.errNote(
&block.base,
src,
msg,
"unhandled enumeration value: '{s}'",
.{field_name},
);
}
try mod.errNoteNonLazy(
operand.ty.declSrcLoc(),
msg,
"enum '{}' declared here",
.{operand.ty},
);
break :msg msg;
};
return mod.failWithOwnedErrorMsg(&block.base, msg);
}
},
.under => {
if (all_tags_handled) return mod.fail(
&block.base,
special_prong_src,
"unreachable '_' prong; all cases already handled",
.{},
);
},
.@"else" => {
if (all_tags_handled) return mod.fail(
&block.base,
special_prong_src,
"unreachable else prong; all cases already handled",
.{},
);
},
}
},
.ErrorSet => return mod.fail(&block.base, src, "TODO validate switch .ErrorSet", .{}),
.Union => return mod.fail(&block.base, src, "TODO validate switch .Union", .{}),
.Int, .ComptimeInt => {
var range_set = RangeSet.init(gpa);
defer range_set.deinit();
@@ -2699,11 +2969,11 @@ fn analyzeSwitch(
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
const min_int = try operand.ty.minInt(&arena, sema.mod.getTarget());
const max_int = try operand.ty.maxInt(&arena, sema.mod.getTarget());
const min_int = try operand.ty.minInt(&arena, mod.getTarget());
const max_int = try operand.ty.maxInt(&arena, mod.getTarget());
if (try range_set.spans(min_int, max_int)) {
if (special_prong == .@"else") {
return sema.mod.fail(
return mod.fail(
&block.base,
special_prong_src,
"unreachable else prong; all cases already handled",
@@ -2714,7 +2984,7 @@ fn analyzeSwitch(
}
}
if (special_prong != .@"else") {
return sema.mod.fail(
return mod.fail(
&block.base,
src,
"switch must handle all possibilities",
@@ -2777,7 +3047,7 @@ fn analyzeSwitch(
switch (special_prong) {
.@"else" => {
if (true_count + false_count == 2) {
return sema.mod.fail(
return mod.fail(
&block.base,
src,
"unreachable else prong; all cases already handled",
@@ -2787,7 +3057,7 @@ fn analyzeSwitch(
},
.under, .none => {
if (true_count + false_count < 2) {
return sema.mod.fail(
return mod.fail(
&block.base,
src,
"switch must handle all possibilities",
@@ -2799,7 +3069,7 @@ fn analyzeSwitch(
},
.EnumLiteral, .Void, .Fn, .Pointer, .Type => {
if (special_prong != .@"else") {
return sema.mod.fail(
return mod.fail(
&block.base,
src,
"else prong required when switching on type '{}'",
@@ -2871,7 +3141,7 @@ fn analyzeSwitch(
.AnyFrame,
.ComptimeFloat,
.Float,
=> return sema.mod.fail(&block.base, operand_src, "invalid switch operand type '{}'", .{
=> return mod.fail(&block.base, operand_src, "invalid switch operand type '{}'", .{
operand.ty,
}),
}
@@ -3146,7 +3416,7 @@ fn resolveSwitchItemVal(
switch_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
range_expand: AstGen.SwitchProngSrc.RangeExpand,
) InnerError!Value {
) InnerError!TypedValue {
const item = try sema.resolveInst(item_ref);
// We have to avoid the other helper functions here because we cannot construct a LazySrcLoc
// because we only have the switch AST node. Only if we know for sure we need to report
@@ -3156,7 +3426,7 @@ fn resolveSwitchItemVal(
const src = switch_prong_src.resolve(block.src_decl, switch_node_offset, range_expand);
return sema.failWithUseOfUndef(block, src);
}
return val;
return TypedValue{ .ty = item.ty, .val = val };
}
const src = switch_prong_src.resolve(block.src_decl, switch_node_offset, range_expand);
return sema.failWithNeededComptime(block, src);
@@ -3171,8 +3441,8 @@ fn validateSwitchRange(
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
const first_val = try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first);
const last_val = try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last);
const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val;
const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val;
const maybe_prev_src = try range_set.add(first_val, last_val, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
@@ -3185,11 +3455,46 @@ fn validateSwitchItem(
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
const item_val = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
const maybe_prev_src = try range_set.add(item_val, item_val, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
fn validateSwitchItemEnum(
sema: *Sema,
block: *Scope.Block,
seen_fields: []?AstGen.SwitchProngSrc,
item_ref: zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
const mod = sema.mod;
const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val) orelse {
const msg = msg: {
const src = switch_prong_src.resolve(block.src_decl, src_node_offset, .none);
const msg = try mod.errMsg(
&block.base,
src,
"enum '{}' has no tag with value '{}'",
.{ item_tv.ty, item_tv.val },
);
errdefer msg.destroy(sema.gpa);
try mod.errNoteNonLazy(
item_tv.ty.declSrcLoc(),
msg,
"enum declared here",
.{},
);
break :msg msg;
};
return mod.failWithOwnedErrorMsg(&block.base, msg);
};
const maybe_prev_src = seen_fields[field_index];
seen_fields[field_index] = switch_prong_src;
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
fn validateSwitchDupe(
sema: *Sema,
block: *Scope.Block,
@@ -3198,17 +3503,18 @@ fn validateSwitchDupe(
src_node_offset: i32,
) InnerError!void {
const prev_prong_src = maybe_prev_src orelse return;
const mod = sema.mod;
const src = switch_prong_src.resolve(block.src_decl, src_node_offset, .none);
const prev_src = prev_prong_src.resolve(block.src_decl, src_node_offset, .none);
const msg = msg: {
const msg = try sema.mod.errMsg(
const msg = try mod.errMsg(
&block.base,
src,
"duplicate switch value",
.{},
);
errdefer msg.destroy(sema.gpa);
try sema.mod.errNote(
try mod.errNote(
&block.base,
prev_src,
msg,
@@ -3217,7 +3523,7 @@ fn validateSwitchDupe(
);
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
return mod.failWithOwnedErrorMsg(&block.base, msg);
}
fn validateSwitchItemBool(
@@ -3229,7 +3535,7 @@ fn validateSwitchItemBool(
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
const item_val = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
if (item_val.toBool()) {
true_count.* += 1;
} else {
@@ -3251,7 +3557,7 @@ fn validateSwitchItemSparse(
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
const item_val = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
const entry = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return;
return sema.validateSwitchDupe(block, entry.value, switch_prong_src, src_node_offset);
}
@@ -3631,9 +3937,13 @@ fn zirCmp(
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
@@ -3645,7 +3955,7 @@ fn zirCmp(
const rhs_ty_tag = rhs.ty.zigTypeTag();
if (is_equality_cmp and lhs_ty_tag == .Null and rhs_ty_tag == .Null) {
// null == null, null != null
return sema.mod.constBool(sema.arena, src, op == .eq);
return mod.constBool(sema.arena, src, op == .eq);
} else if (is_equality_cmp and
((lhs_ty_tag == .Null and rhs_ty_tag == .Optional) or
rhs_ty_tag == .Null and lhs_ty_tag == .Optional))
@@ -3656,23 +3966,23 @@ fn zirCmp(
} else if (is_equality_cmp and
((lhs_ty_tag == .Null and rhs.ty.isCPtr()) or (rhs_ty_tag == .Null and lhs.ty.isCPtr())))
{
return sema.mod.fail(&block.base, src, "TODO implement C pointer cmp", .{});
return mod.fail(&block.base, src, "TODO implement C pointer cmp", .{});
} else if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) {
const non_null_type = if (lhs_ty_tag == .Null) rhs.ty else lhs.ty;
return sema.mod.fail(&block.base, src, "comparison of '{}' with null", .{non_null_type});
return mod.fail(&block.base, src, "comparison of '{}' with null", .{non_null_type});
} else if (is_equality_cmp and
((lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) or
(rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union)))
{
return sema.mod.fail(&block.base, src, "TODO implement equality comparison between a union's tag value and an enum literal", .{});
return mod.fail(&block.base, src, "TODO implement equality comparison between a union's tag value and an enum literal", .{});
} else if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) {
if (!is_equality_cmp) {
return sema.mod.fail(&block.base, src, "{s} operator not allowed for errors", .{@tagName(op)});
return mod.fail(&block.base, src, "{s} operator not allowed for errors", .{@tagName(op)});
}
if (rhs.value()) |rval| {
if (lhs.value()) |lval| {
// TODO optimisation oppurtunity: evaluate if std.mem.eql is faster with the names, or calling to Module.getErrorValue to get the values and then compare them is faster
return sema.mod.constBool(sema.arena, src, std.mem.eql(u8, lval.castTag(.@"error").?.data.name, rval.castTag(.@"error").?.data.name) == (op == .eq));
return mod.constBool(sema.arena, src, std.mem.eql(u8, lval.castTag(.@"error").?.data.name, rval.castTag(.@"error").?.data.name) == (op == .eq));
}
}
try sema.requireRuntimeBlock(block, src);
@@ -3684,11 +3994,30 @@ fn zirCmp(
return sema.cmpNumeric(block, src, lhs, rhs, op);
} else if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) {
if (!is_equality_cmp) {
return sema.mod.fail(&block.base, src, "{s} operator not allowed for types", .{@tagName(op)});
return mod.fail(&block.base, src, "{s} operator not allowed for types", .{@tagName(op)});
}
return sema.mod.constBool(sema.arena, src, lhs.value().?.eql(rhs.value().?) == (op == .eq));
return mod.constBool(sema.arena, src, lhs.value().?.eql(rhs.value().?) == (op == .eq));
}
return sema.mod.fail(&block.base, src, "TODO implement more cmp analysis", .{});
const instructions = &[_]*Inst{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions);
if (!resolved_type.isSelfComparable(is_equality_cmp)) {
return mod.fail(&block.base, src, "operator not allowed for type '{}'", .{resolved_type});
}
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
try sema.requireRuntimeBlock(block, src); // TODO try to do it at comptime
const bool_type = Type.initTag(.bool); // TODO handle vectors
const tag: Inst.Tag = switch (op) {
.lt => .cmp_lt,
.lte => .cmp_lte,
.eq => .cmp_eq,
.gte => .cmp_gte,
.gt => .cmp_gt,
.neq => .cmp_neq,
};
return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs);
}
fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
@@ -4215,22 +4544,25 @@ fn namedFieldPtr(
field_name: []const u8,
field_name_src: LazySrcLoc,
) InnerError!*Inst {
const mod = sema.mod;
const arena = sema.arena;
const elem_ty = switch (object_ptr.ty.zigTypeTag()) {
.Pointer => object_ptr.ty.elemType(),
else => return sema.mod.fail(&block.base, object_ptr.src, "expected pointer, found '{}'", .{object_ptr.ty}),
else => return mod.fail(&block.base, object_ptr.src, "expected pointer, found '{}'", .{object_ptr.ty}),
};
switch (elem_ty.zigTypeTag()) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
return sema.mod.constInst(sema.arena, src, .{
return mod.constInst(arena, src, .{
.ty = Type.initTag(.single_const_pointer_to_comptime_int),
.val = try Value.Tag.ref_val.create(
sema.arena,
try Value.Tag.int_u64.create(sema.arena, elem_ty.arrayLen()),
arena,
try Value.Tag.int_u64.create(arena, elem_ty.arrayLen()),
),
});
} else {
return sema.mod.fail(
return mod.fail(
&block.base,
field_name_src,
"no member named '{s}' in '{}'",
@@ -4243,15 +4575,15 @@ fn namedFieldPtr(
switch (ptr_child.zigTypeTag()) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
return sema.mod.constInst(sema.arena, src, .{
return mod.constInst(arena, src, .{
.ty = Type.initTag(.single_const_pointer_to_comptime_int),
.val = try Value.Tag.ref_val.create(
sema.arena,
try Value.Tag.int_u64.create(sema.arena, ptr_child.arrayLen()),
arena,
try Value.Tag.int_u64.create(arena, ptr_child.arrayLen()),
),
});
} else {
return sema.mod.fail(
return mod.fail(
&block.base,
field_name_src,
"no member named '{s}' in '{}'",
@@ -4266,7 +4598,7 @@ fn namedFieldPtr(
_ = try sema.resolveConstValue(block, object_ptr.src, object_ptr);
const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr.src);
const val = result.value().?;
const child_type = try val.toType(sema.arena);
const child_type = try val.toType(arena);
switch (child_type.zigTypeTag()) {
.ErrorSet => {
// TODO resolve inferred error sets
@@ -4280,42 +4612,90 @@ fn namedFieldPtr(
break :blk name;
}
}
return sema.mod.fail(&block.base, src, "no error named '{s}' in '{}'", .{
return mod.fail(&block.base, src, "no error named '{s}' in '{}'", .{
field_name,
child_type,
});
} else (try sema.mod.getErrorValue(field_name)).key;
} else (try mod.getErrorValue(field_name)).key;
return sema.mod.constInst(sema.arena, src, .{
.ty = try sema.mod.simplePtrType(sema.arena, child_type, false, .One),
return mod.constInst(arena, src, .{
.ty = try mod.simplePtrType(arena, child_type, false, .One),
.val = try Value.Tag.ref_val.create(
sema.arena,
try Value.Tag.@"error".create(sema.arena, .{
arena,
try Value.Tag.@"error".create(arena, .{
.name = name,
}),
),
});
},
.Struct => {
const container_scope = child_type.getContainerScope();
if (sema.mod.lookupDeclName(&container_scope.base, field_name)) |decl| {
// TODO if !decl.is_pub and inDifferentFiles() "{} is private"
return sema.analyzeDeclRef(block, src, decl);
}
.Struct, .Opaque, .Union => {
if (child_type.getContainerScope()) |container_scope| {
if (mod.lookupDeclName(&container_scope.base, field_name)) |decl| {
// TODO if !decl.is_pub and inDifferentFiles() "{} is private"
return sema.analyzeDeclRef(block, src, decl);
}
if (container_scope.file_scope == sema.mod.root_scope) {
return sema.mod.fail(&block.base, src, "root source file has no member called '{s}'", .{field_name});
} else {
return sema.mod.fail(&block.base, src, "container '{}' has no member called '{s}'", .{ child_type, field_name });
// TODO this will give false positives for structs inside the root file
if (container_scope.file_scope == mod.root_scope) {
return mod.fail(
&block.base,
src,
"root source file has no member named '{s}'",
.{field_name},
);
}
}
// TODO add note: declared here
const kw_name = switch (child_type.zigTypeTag()) {
.Struct => "struct",
.Opaque => "opaque",
.Union => "union",
else => unreachable,
};
return mod.fail(&block.base, src, "{s} '{}' has no member named '{s}'", .{
kw_name, child_type, field_name,
});
},
else => return sema.mod.fail(&block.base, src, "type '{}' does not support field access", .{child_type}),
.Enum => {
if (child_type.getContainerScope()) |container_scope| {
if (mod.lookupDeclName(&container_scope.base, field_name)) |decl| {
// TODO if !decl.is_pub and inDifferentFiles() "{} is private"
return sema.analyzeDeclRef(block, src, decl);
}
}
const field_index = child_type.enumFieldIndex(field_name) orelse {
const msg = msg: {
const msg = try mod.errMsg(
&block.base,
src,
"enum '{}' has no member named '{s}'",
.{ child_type, field_name },
);
errdefer msg.destroy(sema.gpa);
try mod.errNoteNonLazy(
child_type.declSrcLoc(),
msg,
"enum declared here",
.{},
);
break :msg msg;
};
return mod.failWithOwnedErrorMsg(&block.base, msg);
};
const field_index_u32 = @intCast(u32, field_index);
const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32);
return mod.constInst(arena, src, .{
.ty = try mod.simplePtrType(arena, child_type, false, .One),
.val = try Value.Tag.ref_val.create(arena, enum_val),
});
},
else => return mod.fail(&block.base, src, "type '{}' has no members", .{child_type}),
}
},
.Struct => return sema.analyzeStructFieldPtr(block, src, object_ptr, field_name, field_name_src, elem_ty),
else => {},
}
return sema.mod.fail(&block.base, src, "type '{}' does not support field access", .{elem_ty});
return mod.fail(&block.base, src, "type '{}' does not support field access", .{elem_ty});
}
fn analyzeStructFieldPtr(
@@ -4400,10 +4780,13 @@ fn coerce(
return sema.bitcast(block, dest_type, inst);
}
const mod = sema.mod;
const arena = sema.arena;
// undefined to anything
if (inst.value()) |val| {
if (val.isUndef() or inst.ty.zigTypeTag() == .Undefined) {
return sema.mod.constInst(sema.arena, inst_src, .{ .ty = dest_type, .val = val });
return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = val });
}
}
assert(inst.ty.zigTypeTag() != .Undefined);
@@ -4417,13 +4800,13 @@ fn coerce(
if (try sema.coerceNum(block, dest_type, inst)) |some|
return some;
const target = sema.mod.getTarget();
const target = mod.getTarget();
switch (dest_type.zigTypeTag()) {
.Optional => {
// null to ?T
if (inst.ty.zigTypeTag() == .Null) {
return sema.mod.constInst(sema.arena, inst_src, .{ .ty = dest_type, .val = Value.initTag(.null_value) });
return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = Value.initTag(.null_value) });
}
// T to ?T
@@ -4509,10 +4892,40 @@ fn coerce(
}
}
},
.Enum => {
// enum literal to enum
if (inst.ty.zigTypeTag() == .EnumLiteral) {
const val = try sema.resolveConstValue(block, inst_src, inst);
const bytes = val.castTag(.enum_literal).?.data;
const field_index = dest_type.enumFieldIndex(bytes) orelse {
const msg = msg: {
const msg = try mod.errMsg(
&block.base,
inst_src,
"enum '{}' has no field named '{s}'",
.{ dest_type, bytes },
);
errdefer msg.destroy(sema.gpa);
try mod.errNoteNonLazy(
dest_type.declSrcLoc(),
msg,
"enum declared here",
.{},
);
break :msg msg;
};
return mod.failWithOwnedErrorMsg(&block.base, msg);
};
return mod.constInst(arena, inst_src, .{
.ty = dest_type,
.val = try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)),
});
}
},
else => {},
}
return sema.mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst.ty });
return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst.ty });
}
const InMemoryCoercionResult = enum {
@@ -4630,7 +5043,7 @@ fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl
}
fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst {
try sema.mod.declareDeclDependency(sema.owner_decl, decl);
_ = try sema.mod.declareDeclDependency(sema.owner_decl, decl);
sema.mod.ensureDeclAnalyzed(decl) catch |err| {
if (sema.func) |func| {
func.state = .dependency_failure;

View File

@@ -172,7 +172,10 @@ pub const DeclGen = struct {
val: Value,
) error{ OutOfMemory, AnalysisFail }!void {
if (val.isUndef()) {
return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: properly handle undefined in all cases (with debug safety?)", .{});
// This should lower to 0xaa bytes in safe modes, and for unsafe modes should
// lower to leaving variables uninitialized (that might need to be implemented
// outside of this function).
return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement renderValue undef", .{});
}
switch (t.zigTypeTag()) {
.Int => {
@@ -288,6 +291,31 @@ pub const DeclGen = struct {
try writer.writeAll(", .error = 0 }");
}
},
.Enum => {
switch (val.tag()) {
.enum_field_index => {
const field_index = val.castTag(.enum_field_index).?.data;
switch (t.tag()) {
.enum_simple => return writer.print("{d}", .{field_index}),
.enum_full, .enum_nonexhaustive => {
const enum_full = t.cast(Type.Payload.EnumFull).?.data;
if (enum_full.values.count() != 0) {
const tag_val = enum_full.values.entries.items[field_index].key;
return dg.renderValue(writer, enum_full.tag_ty, tag_val);
} else {
return writer.print("{d}", .{field_index});
}
},
else => unreachable,
}
},
else => {
var int_tag_ty_buffer: Type.Payload.Bits = undefined;
const int_tag_ty = t.intTagType(&int_tag_ty_buffer);
return dg.renderValue(writer, int_tag_ty, val);
},
}
},
else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{
@tagName(e),
}),
@@ -368,6 +396,9 @@ pub const DeclGen = struct {
else => unreachable,
}
},
.Float => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Float", .{}),
.Pointer => {
if (t.isSlice()) {
return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement slices", .{});
@@ -472,10 +503,29 @@ pub const DeclGen = struct {
try w.writeAll(name);
dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered });
},
.Null, .Undefined => unreachable, // must be const or comptime
else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type {s}", .{
@tagName(e),
}),
.Enum => {
// For enums, we simply use the integer tag type.
var int_tag_ty_buffer: Type.Payload.Bits = undefined;
const int_tag_ty = t.intTagType(&int_tag_ty_buffer);
try dg.renderType(w, int_tag_ty);
},
.Union => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Union", .{}),
.Fn => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Fn", .{}),
.Opaque => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Opaque", .{}),
.Frame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Frame", .{}),
.AnyFrame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type AnyFrame", .{}),
.Vector => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Vector", .{}),
.Null,
.Undefined,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Type,
=> unreachable, // must be const or comptime
.BoundFn => unreachable, // this type will be deleted from the language
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -37,8 +37,6 @@ pub const Code = struct {
string_bytes: []u8,
/// The meaning of this data is determined by `Inst.Tag` value.
extra: []u32,
/// Used for decl_val and decl_ref instructions.
decls: []*Module.Decl,
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
@@ -78,7 +76,6 @@ pub const Code = struct {
code.instructions.deinit(gpa);
gpa.free(code.string_bytes);
gpa.free(code.extra);
gpa.free(code.decls);
code.* = undefined;
}
@@ -133,7 +130,7 @@ pub const Inst = struct {
/// Same as `alloc` except mutable.
alloc_mut,
/// Same as `alloc` except the type is inferred.
/// The operand is unused.
/// Uses the `node` union field.
alloc_inferred,
/// Same as `alloc_inferred` except mutable.
alloc_inferred_mut,
@@ -267,9 +264,6 @@ pub const Inst = struct {
/// only the taken branch is analyzed. The then block and else block must
/// terminate with an "inline" variant of a noreturn instruction.
condbr_inline,
/// A comptime known value.
/// Uses the `const` union field.
@"const",
/// A struct type definition. Contains references to ZIR instructions for
/// the field types, defaults, and alignments.
/// Uses the `pl_node` union field. Payload is `StructDecl`.
@@ -286,6 +280,8 @@ pub const Inst = struct {
/// the field value expressions and optional type tag expression.
/// Uses the `pl_node` union field. Payload is `EnumDecl`.
enum_decl,
/// Same as `enum_decl`, except the enum is non-exhaustive.
enum_decl_nonexhaustive,
/// An opaque type definition. Provides an AST node only.
/// Uses the `node` union field.
opaque_decl,
@@ -369,6 +365,11 @@ pub const Inst = struct {
import,
/// Integer literal that fits in a u64. Uses the int union value.
int,
/// A float literal that fits in a f32. Uses the float union value.
float,
/// A float literal that fits in a f128. Uses the `pl_node` union value.
/// Payload is `Float128`.
float128,
/// Convert an integer value to another integer type, asserting that the destination type
/// can hold the same mathematical value.
/// Uses the `pl_node` field. AST is the `@intCast` syntax.
@@ -667,6 +668,12 @@ pub const Inst = struct {
/// A struct literal with a specified type, with no fields.
/// Uses the `un_node` field.
struct_init_empty,
/// Converts an integer into an enum value.
/// Uses `pl_node` with payload `Bin`. `lhs` is enum type, `rhs` is operand.
int_to_enum,
/// Converts an enum value into an integer. Resulting type will be the tag type
/// of the enum. Uses `un_node`.
enum_to_int,
/// Returns whether the instruction is one of the control flow "noreturn" types.
/// Function calls do not count.
@@ -712,12 +719,12 @@ pub const Inst = struct {
.cmp_gt,
.cmp_neq,
.coerce_result_ptr,
.@"const",
.struct_decl,
.struct_decl_packed,
.struct_decl_extern,
.union_decl,
.enum_decl,
.enum_decl_nonexhaustive,
.opaque_decl,
.dbg_stmt_node,
.decl_ref,
@@ -740,6 +747,8 @@ pub const Inst = struct {
.fn_type_cc,
.fn_type_cc_var_args,
.int,
.float,
.float128,
.intcast,
.int_type,
.is_non_null,
@@ -822,6 +831,8 @@ pub const Inst = struct {
.switch_block_ref_under_multi,
.validate_struct_init_ptr,
.struct_init_empty,
.int_to_enum,
.enum_to_int,
=> false,
.@"break",
@@ -1184,7 +1195,6 @@ pub const Inst = struct {
}
},
bin: Bin,
@"const": *TypedValue,
/// For strings which may contain null bytes.
str: struct {
/// Offset into `string_bytes`.
@@ -1226,6 +1236,16 @@ pub const Inst = struct {
/// Offset from Decl AST node index.
node: i32,
int: u64,
float: struct {
/// Offset from Decl AST node index.
/// `Tag` determines which kind of AST node this points to.
src_node: i32,
number: f32,
pub fn src(self: @This()) LazySrcLoc {
return .{ .node_offset = self.src_node };
}
},
array_type_sentinel: struct {
len: Ref,
/// index into extra, points to an `ArrayTypeSentinel`
@@ -1507,6 +1527,22 @@ pub const Inst = struct {
tag_type: Ref,
fields_len: u32,
};
/// A f128 value, broken up into 4 u32 parts.
pub const Float128 = struct {
piece0: u32,
piece1: u32,
piece2: u32,
piece3: u32,
pub fn get(self: Float128) f128 {
const int_bits = @as(u128, self.piece0) |
(@as(u128, self.piece1) << 32) |
(@as(u128, self.piece2) << 64) |
(@as(u128, self.piece3) << 96);
return @bitCast(f128, int_bits);
}
};
};
pub const SpecialProng = enum { none, @"else", under };
@@ -1536,12 +1572,11 @@ const Writer = struct {
.intcast,
.store,
.store_to_block_ptr,
.store_to_inferred_ptr,
=> try self.writeBin(stream, inst),
.alloc,
.alloc_mut,
.alloc_inferred,
.alloc_inferred_mut,
.indexable_ptr_len,
.bit_not,
.bool_not,
@@ -1581,6 +1616,7 @@ const Writer = struct {
.typeof,
.typeof_elem,
.struct_init_empty,
.enum_to_int,
=> try self.writeUnNode(stream, inst),
.ref,
@@ -1594,11 +1630,12 @@ const Writer = struct {
=> try self.writeBoolBr(stream, inst),
.array_type_sentinel => try self.writeArrayTypeSentinel(stream, inst),
.@"const" => try self.writeConst(stream, inst),
.param_type => try self.writeParamType(stream, inst),
.ptr_type_simple => try self.writePtrTypeSimple(stream, inst),
.ptr_type => try self.writePtrType(stream, inst),
.int => try self.writeInt(stream, inst),
.float => try self.writeFloat(stream, inst),
.float128 => try self.writeFloat128(stream, inst),
.str => try self.writeStr(stream, inst),
.elided => try stream.writeAll(")"),
.int_type => try self.writeIntType(stream, inst),
@@ -1619,6 +1656,7 @@ const Writer = struct {
.slice_sentinel,
.union_decl,
.enum_decl,
.enum_decl_nonexhaustive,
=> try self.writePlNode(stream, inst),
.add,
@@ -1647,6 +1685,7 @@ const Writer = struct {
.merge_error_sets,
.bit_and,
.bit_or,
.int_to_enum,
=> try self.writePlNodeBin(stream, inst),
.call,
@@ -1704,6 +1743,8 @@ const Writer = struct {
.ret_type,
.repeat,
.repeat_inline,
.alloc_inferred,
.alloc_inferred_mut,
=> try self.writeNode(stream, inst),
.error_value,
@@ -1729,7 +1770,6 @@ const Writer = struct {
.bitcast,
.bitcast_result_ptr,
.store_to_inferred_ptr,
=> try stream.writeAll("TODO)"),
}
}
@@ -1773,15 +1813,6 @@ const Writer = struct {
try stream.writeAll("TODO)");
}
fn writeConst(
self: *Writer,
stream: anytype,
inst: Inst.Index,
) (@TypeOf(stream).Error || error{OutOfMemory})!void {
const inst_data = self.code.instructions.items(.data)[inst].@"const";
try stream.writeAll("TODO)");
}
fn writeParamType(
self: *Writer,
stream: anytype,
@@ -1819,6 +1850,23 @@ const Writer = struct {
try stream.print("{d})", .{inst_data});
}
fn writeFloat(self: *Writer, stream: anytype, inst: Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].float;
const src = inst_data.src();
try stream.print("{d}) ", .{inst_data.number});
try self.writeSrc(stream, src);
}
fn writeFloat128(self: *Writer, stream: anytype, inst: Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Inst.Float128, inst_data.payload_index).data;
const src = inst_data.src();
const number = extra.get();
// TODO improve std.format to be able to print f128 values
try stream.print("{d}) ", .{@floatCast(f64, number)});
try self.writeSrc(stream, src);
}
fn writeStr(
self: *Writer,
stream: anytype,
@@ -2136,7 +2184,8 @@ const Writer = struct {
fn writePlNodeDecl(self: *Writer, stream: anytype, inst: Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const decl = self.code.decls[inst_data.payload_index];
const owner_decl = self.scope.ownerDecl().?;
const decl = owner_decl.dependencies.entries.items[inst_data.payload_index].key;
try stream.print("{s}) ", .{decl.name});
try self.writeSrc(stream, inst_data.src());
}

View File

@@ -517,7 +517,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, &.{
":3:21: error: mising struct field: x",
":1:15: note: 'Point' declared here",
":1:15: note: struct 'Point' declared here",
});
case.addError(
\\const Point = struct { x: i32, y: i32 };
@@ -531,7 +531,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, &.{
":6:10: error: no field named 'z' in struct 'Point'",
":1:15: note: 'Point' declared here",
":1:15: note: struct declared here",
});
case.addCompareOutput(
\\const Point = struct { x: i32, y: i32 };
@@ -545,6 +545,255 @@ pub fn addCases(ctx: *TestContext) !void {
, "");
}
{
var case = ctx.exeFromCompiledC("enums", .{});
case.addError(
\\const E1 = packed enum { a, b, c };
\\const E2 = extern enum { a, b, c };
\\export fn foo() void {
\\ const x = E1.a;
\\}
\\export fn bar() void {
\\ const x = E2.a;
\\}
, &.{
":1:12: error: enums do not support 'packed' or 'extern'; instead provide an explicit integer tag type",
":2:12: error: enums do not support 'packed' or 'extern'; instead provide an explicit integer tag type",
});
// comptime and types are caught in AstGen.
case.addError(
\\const E1 = enum {
\\ a,
\\ comptime b,
\\ c,
\\};
\\const E2 = enum {
\\ a,
\\ b: i32,
\\ c,
\\};
\\export fn foo() void {
\\ const x = E1.a;
\\}
\\export fn bar() void {
\\ const x = E2.a;
\\}
, &.{
":3:5: error: enum fields cannot be marked comptime",
":8:8: error: enum fields do not have types",
});
// @enumToInt, @intToEnum, enum literal coercion, field access syntax, comparison, switch
case.addCompareOutput(
\\const Number = enum { One, Two, Three };
\\
\\export fn main() c_int {
\\ var number1 = Number.One;
\\ var number2: Number = .Two;
\\ const number3 = @intToEnum(Number, 2);
\\ if (number1 == number2) return 1;
\\ if (number2 == number3) return 1;
\\ if (@enumToInt(number1) != 0) return 1;
\\ if (@enumToInt(number2) != 1) return 1;
\\ if (@enumToInt(number3) != 2) return 1;
\\ var x: Number = .Two;
\\ if (number2 != x) return 1;
\\ switch (x) {
\\ .One => return 1,
\\ .Two => return 0,
\\ number3 => return 2,
\\ }
\\}
, "");
// Specifying alignment is a parse error.
// This also tests going from a successful build to a parse error.
case.addError(
\\const E1 = enum {
\\ a,
\\ b align(4),
\\ c,
\\};
\\export fn foo() void {
\\ const x = E1.a;
\\}
, &.{
":3:7: error: expected ',', found 'align'",
});
// Redundant non-exhaustive enum mark.
// This also tests going from a parse error to an AstGen error.
case.addError(
\\const E1 = enum {
\\ a,
\\ _,
\\ b,
\\ c,
\\ _,
\\};
\\export fn foo() void {
\\ const x = E1.a;
\\}
, &.{
":6:5: error: redundant non-exhaustive enum mark",
":3:5: note: other mark here",
});
case.addError(
\\const E1 = enum {
\\ a,
\\ b,
\\ c,
\\ _ = 10,
\\};
\\export fn foo() void {
\\ const x = E1.a;
\\}
, &.{
":5:9: error: '_' is used to mark an enum as non-exhaustive and cannot be assigned a value",
});
case.addError(
\\const E1 = enum {};
\\export fn foo() void {
\\ const x = E1.a;
\\}
, &.{
":1:12: error: enum declarations must have at least one tag",
});
case.addError(
\\const E1 = enum { a, b, _ };
\\export fn foo() void {
\\ const x = E1.a;
\\}
, &.{
":1:12: error: non-exhaustive enum missing integer tag type",
":1:25: note: marked non-exhaustive here",
});
case.addError(
\\const E1 = enum { a, b, c, b, d };
\\export fn foo() void {
\\ const x = E1.a;
\\}
, &.{
":1:28: error: duplicate enum tag",
":1:22: note: other tag here",
});
case.addError(
\\export fn foo() void {
\\ const a = true;
\\ const b = @enumToInt(a);
\\}
, &.{
":3:26: error: expected enum or tagged union, found bool",
});
case.addError(
\\export fn foo() void {
\\ const a = 1;
\\ const b = @intToEnum(bool, a);
\\}
, &.{
":3:26: error: expected enum, found bool",
});
case.addError(
\\const E = enum { a, b, c };
\\export fn foo() void {
\\ const b = @intToEnum(E, 3);
\\}
, &.{
":3:15: error: enum 'E' has no tag with value 3",
":1:11: note: enum declared here",
});
case.addError(
\\const E = enum { a, b, c };
\\export fn foo() void {
\\ var x: E = .a;
\\ switch (x) {
\\ .a => {},
\\ .c => {},
\\ }
\\}
, &.{
":4:5: error: switch must handle all possibilities",
":4:5: note: unhandled enumeration value: 'b'",
":1:11: note: enum 'E' declared here",
});
case.addError(
\\const E = enum { a, b, c };
\\export fn foo() void {
\\ var x: E = .a;
\\ switch (x) {
\\ .a => {},
\\ .b => {},
\\ .b => {},
\\ .c => {},
\\ }
\\}
, &.{
":7:10: error: duplicate switch value",
":6:10: note: previous value here",
});
case.addError(
\\const E = enum { a, b, c };
\\export fn foo() void {
\\ var x: E = .a;
\\ switch (x) {
\\ .a => {},
\\ .b => {},
\\ .c => {},
\\ else => {},
\\ }
\\}
, &.{
":8:14: error: unreachable else prong; all cases already handled",
});
case.addError(
\\const E = enum { a, b, c };
\\export fn foo() void {
\\ var x: E = .a;
\\ switch (x) {
\\ .a => {},
\\ .b => {},
\\ _ => {},
\\ }
\\}
, &.{
":4:5: error: '_' prong only allowed when switching on non-exhaustive enums",
":7:11: note: '_' prong here",
});
case.addError(
\\const E = enum { a, b, c };
\\export fn foo() void {
\\ var x = E.d;
\\}
, &.{
":3:14: error: enum 'E' has no member named 'd'",
":1:11: note: enum declared here",
});
case.addError(
\\const E = enum { a, b, c };
\\export fn foo() void {
\\ var x: E = .d;
\\}
, &.{
":3:17: error: enum 'E' has no field named 'd'",
":1:11: note: enum declared here",
});
}
ctx.c("empty start function", linux_x64,
\\export fn _start() noreturn {
\\ unreachable;