Merge pull request #18814 from mlugg/incremental-dependencies
Begin re-implementing incremental compilation
This commit is contained in:
@@ -27,11 +27,12 @@ pub const parseNumberLiteral = number_literal.parseNumberLiteral;
|
||||
pub const c_builtins = @import("zig/c_builtins.zig");
|
||||
pub const c_translation = @import("zig/c_translation.zig");
|
||||
|
||||
pub const SrcHasher = std.crypto.hash.Blake3;
|
||||
pub const SrcHash = [16]u8;
|
||||
|
||||
pub fn hashSrc(src: []const u8) SrcHash {
|
||||
var out: SrcHash = undefined;
|
||||
std.crypto.hash.Blake3.hash(src, &out, .{});
|
||||
SrcHasher.hash(src, &out, .{});
|
||||
return out;
|
||||
}
|
||||
|
||||
@@ -41,7 +42,7 @@ pub fn srcHashEql(a: SrcHash, b: SrcHash) bool {
|
||||
|
||||
pub fn hashName(parent_hash: SrcHash, sep: []const u8, name: []const u8) SrcHash {
|
||||
var out: SrcHash = undefined;
|
||||
var hasher = std.crypto.hash.Blake3.init(.{});
|
||||
var hasher = SrcHasher.init(.{});
|
||||
hasher.update(&parent_hash);
|
||||
hasher.update(sep);
|
||||
hasher.update(name);
|
||||
|
||||
109
src/AstGen.zig
109
src/AstGen.zig
@@ -4815,6 +4815,7 @@ fn structDeclInner(
|
||||
.any_comptime_fields = false,
|
||||
.any_default_inits = false,
|
||||
.any_aligned_fields = false,
|
||||
.fields_hash = std.zig.hashSrc(@tagName(layout)),
|
||||
});
|
||||
return decl_inst.toRef();
|
||||
}
|
||||
@@ -4936,6 +4937,12 @@ fn structDeclInner(
|
||||
}
|
||||
};
|
||||
|
||||
var fields_hasher = std.zig.SrcHasher.init(.{});
|
||||
fields_hasher.update(@tagName(layout));
|
||||
if (backing_int_node != 0) {
|
||||
fields_hasher.update(tree.getNodeSource(backing_int_node));
|
||||
}
|
||||
|
||||
var sfba = std.heap.stackFallback(256, astgen.arena);
|
||||
const sfba_allocator = sfba.get();
|
||||
|
||||
@@ -4956,6 +4963,8 @@ fn structDeclInner(
|
||||
.field => |field| field,
|
||||
};
|
||||
|
||||
fields_hasher.update(tree.getNodeSource(member_node));
|
||||
|
||||
if (!is_tuple) {
|
||||
const field_name = try astgen.identAsString(member.ast.main_token);
|
||||
|
||||
@@ -5083,6 +5092,9 @@ fn structDeclInner(
|
||||
return error.AnalysisFail;
|
||||
}
|
||||
|
||||
var fields_hash: std.zig.SrcHash = undefined;
|
||||
fields_hasher.final(&fields_hash);
|
||||
|
||||
try gz.setStruct(decl_inst, .{
|
||||
.src_node = node,
|
||||
.layout = layout,
|
||||
@@ -5096,6 +5108,7 @@ fn structDeclInner(
|
||||
.any_comptime_fields = any_comptime_fields,
|
||||
.any_default_inits = any_default_inits,
|
||||
.any_aligned_fields = any_aligned_fields,
|
||||
.fields_hash = fields_hash,
|
||||
});
|
||||
|
||||
wip_members.finishBits(bits_per_field);
|
||||
@@ -5174,6 +5187,13 @@ fn unionDeclInner(
|
||||
var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size);
|
||||
defer wip_members.deinit();
|
||||
|
||||
var fields_hasher = std.zig.SrcHasher.init(.{});
|
||||
fields_hasher.update(@tagName(layout));
|
||||
fields_hasher.update(&.{@intFromBool(auto_enum_tok != null)});
|
||||
if (arg_node != 0) {
|
||||
fields_hasher.update(astgen.tree.getNodeSource(arg_node));
|
||||
}
|
||||
|
||||
var sfba = std.heap.stackFallback(256, astgen.arena);
|
||||
const sfba_allocator = sfba.get();
|
||||
|
||||
@@ -5188,6 +5208,7 @@ fn unionDeclInner(
|
||||
.decl => continue,
|
||||
.field => |field| field,
|
||||
};
|
||||
fields_hasher.update(astgen.tree.getNodeSource(member_node));
|
||||
member.convertToNonTupleLike(astgen.tree.nodes);
|
||||
if (member.ast.tuple_like) {
|
||||
return astgen.failTok(member.ast.main_token, "union field missing name", .{});
|
||||
@@ -5289,6 +5310,9 @@ fn unionDeclInner(
|
||||
return error.AnalysisFail;
|
||||
}
|
||||
|
||||
var fields_hash: std.zig.SrcHash = undefined;
|
||||
fields_hasher.final(&fields_hash);
|
||||
|
||||
if (!block_scope.isEmpty()) {
|
||||
_ = try block_scope.addBreak(.break_inline, decl_inst, .void_value);
|
||||
}
|
||||
@@ -5305,6 +5329,7 @@ fn unionDeclInner(
|
||||
.decls_len = decl_count,
|
||||
.auto_enum_tag = auto_enum_tok != null,
|
||||
.any_aligned_fields = any_aligned_fields,
|
||||
.fields_hash = fields_hash,
|
||||
});
|
||||
|
||||
wip_members.finishBits(bits_per_field);
|
||||
@@ -5498,6 +5523,12 @@ fn containerDecl(
|
||||
var wip_members = try WipMembers.init(gpa, &astgen.scratch, @intCast(counts.decls), @intCast(counts.total_fields), bits_per_field, max_field_size);
|
||||
defer wip_members.deinit();
|
||||
|
||||
var fields_hasher = std.zig.SrcHasher.init(.{});
|
||||
if (container_decl.ast.arg != 0) {
|
||||
fields_hasher.update(tree.getNodeSource(container_decl.ast.arg));
|
||||
}
|
||||
fields_hasher.update(&.{@intFromBool(nonexhaustive)});
|
||||
|
||||
var sfba = std.heap.stackFallback(256, astgen.arena);
|
||||
const sfba_allocator = sfba.get();
|
||||
|
||||
@@ -5510,6 +5541,7 @@ fn containerDecl(
|
||||
for (container_decl.ast.members) |member_node| {
|
||||
if (member_node == counts.nonexhaustive_node)
|
||||
continue;
|
||||
fields_hasher.update(tree.getNodeSource(member_node));
|
||||
namespace.base.tag = .namespace;
|
||||
var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
|
||||
.decl => continue,
|
||||
@@ -5590,6 +5622,9 @@ fn containerDecl(
|
||||
_ = try block_scope.addBreak(.break_inline, decl_inst, .void_value);
|
||||
}
|
||||
|
||||
var fields_hash: std.zig.SrcHash = undefined;
|
||||
fields_hasher.final(&fields_hash);
|
||||
|
||||
const body = block_scope.instructionsSlice();
|
||||
const body_len = astgen.countBodyLenAfterFixups(body);
|
||||
|
||||
@@ -5600,6 +5635,7 @@ fn containerDecl(
|
||||
.body_len = body_len,
|
||||
.fields_len = @intCast(counts.total_fields),
|
||||
.decls_len = @intCast(counts.decls),
|
||||
.fields_hash = fields_hash,
|
||||
});
|
||||
|
||||
wip_members.finishBits(bits_per_field);
|
||||
@@ -11900,8 +11936,8 @@ const GenZir = struct {
|
||||
|
||||
var body: []Zir.Inst.Index = &[0]Zir.Inst.Index{};
|
||||
var ret_body: []Zir.Inst.Index = &[0]Zir.Inst.Index{};
|
||||
var src_locs_buffer: [3]u32 = undefined;
|
||||
var src_locs: []u32 = src_locs_buffer[0..0];
|
||||
var src_locs_and_hash_buffer: [7]u32 = undefined;
|
||||
var src_locs_and_hash: []u32 = src_locs_and_hash_buffer[0..0];
|
||||
if (args.body_gz) |body_gz| {
|
||||
const tree = astgen.tree;
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
@@ -11916,10 +11952,27 @@ const GenZir = struct {
|
||||
const rbrace_column: u32 = @intCast(astgen.source_column);
|
||||
|
||||
const columns = args.lbrace_column | (rbrace_column << 16);
|
||||
src_locs_buffer[0] = args.lbrace_line;
|
||||
src_locs_buffer[1] = rbrace_line;
|
||||
src_locs_buffer[2] = columns;
|
||||
src_locs = &src_locs_buffer;
|
||||
|
||||
const proto_hash: std.zig.SrcHash = switch (node_tags[fn_decl]) {
|
||||
.fn_decl => sig_hash: {
|
||||
const proto_node = node_datas[fn_decl].lhs;
|
||||
break :sig_hash std.zig.hashSrc(tree.getNodeSource(proto_node));
|
||||
},
|
||||
.test_decl => std.zig.hashSrc(""), // tests don't have a prototype
|
||||
else => unreachable,
|
||||
};
|
||||
const proto_hash_arr: [4]u32 = @bitCast(proto_hash);
|
||||
|
||||
src_locs_and_hash_buffer = .{
|
||||
args.lbrace_line,
|
||||
rbrace_line,
|
||||
columns,
|
||||
proto_hash_arr[0],
|
||||
proto_hash_arr[1],
|
||||
proto_hash_arr[2],
|
||||
proto_hash_arr[3],
|
||||
};
|
||||
src_locs_and_hash = &src_locs_and_hash_buffer;
|
||||
|
||||
body = body_gz.instructionsSlice();
|
||||
if (args.ret_gz) |ret_gz|
|
||||
@@ -11953,7 +12006,7 @@ const GenZir = struct {
|
||||
fancyFnExprExtraLen(astgen, section_body, args.section_ref) +
|
||||
fancyFnExprExtraLen(astgen, cc_body, args.cc_ref) +
|
||||
fancyFnExprExtraLen(astgen, ret_body, ret_ref) +
|
||||
body_len + src_locs.len +
|
||||
body_len + src_locs_and_hash.len +
|
||||
@intFromBool(args.lib_name != .empty) +
|
||||
@intFromBool(args.noalias_bits != 0),
|
||||
);
|
||||
@@ -12040,7 +12093,7 @@ const GenZir = struct {
|
||||
}
|
||||
|
||||
astgen.appendBodyWithFixups(body);
|
||||
astgen.extra.appendSliceAssumeCapacity(src_locs);
|
||||
astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash);
|
||||
|
||||
// Order is important when unstacking.
|
||||
if (args.body_gz) |body_gz| body_gz.unstack();
|
||||
@@ -12068,7 +12121,7 @@ const GenZir = struct {
|
||||
gpa,
|
||||
@typeInfo(Zir.Inst.Func).Struct.fields.len + 1 +
|
||||
fancyFnExprExtraLen(astgen, ret_body, ret_ref) +
|
||||
body_len + src_locs.len,
|
||||
body_len + src_locs_and_hash.len,
|
||||
);
|
||||
|
||||
const ret_body_len = if (ret_body.len != 0)
|
||||
@@ -12092,7 +12145,7 @@ const GenZir = struct {
|
||||
astgen.extra.appendAssumeCapacity(@intFromEnum(ret_ref));
|
||||
}
|
||||
astgen.appendBodyWithFixups(body);
|
||||
astgen.extra.appendSliceAssumeCapacity(src_locs);
|
||||
astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash);
|
||||
|
||||
// Order is important when unstacking.
|
||||
if (args.body_gz) |body_gz| body_gz.unstack();
|
||||
@@ -12853,12 +12906,20 @@ const GenZir = struct {
|
||||
any_comptime_fields: bool,
|
||||
any_default_inits: bool,
|
||||
any_aligned_fields: bool,
|
||||
fields_hash: std.zig.SrcHash,
|
||||
}) !void {
|
||||
const astgen = gz.astgen;
|
||||
const gpa = astgen.gpa;
|
||||
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, 6);
|
||||
const payload_index: u32 = @intCast(astgen.extra.items.len);
|
||||
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
|
||||
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 6);
|
||||
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.StructDecl{
|
||||
.fields_hash_0 = fields_hash_arr[0],
|
||||
.fields_hash_1 = fields_hash_arr[1],
|
||||
.fields_hash_2 = fields_hash_arr[2],
|
||||
.fields_hash_3 = fields_hash_arr[3],
|
||||
});
|
||||
|
||||
if (args.src_node != 0) {
|
||||
const node_offset = gz.nodeIndexToRelative(args.src_node);
|
||||
@@ -12908,12 +12969,20 @@ const GenZir = struct {
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
auto_enum_tag: bool,
|
||||
any_aligned_fields: bool,
|
||||
fields_hash: std.zig.SrcHash,
|
||||
}) !void {
|
||||
const astgen = gz.astgen;
|
||||
const gpa = astgen.gpa;
|
||||
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, 5);
|
||||
const payload_index: u32 = @intCast(astgen.extra.items.len);
|
||||
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
|
||||
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 5);
|
||||
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.UnionDecl{
|
||||
.fields_hash_0 = fields_hash_arr[0],
|
||||
.fields_hash_1 = fields_hash_arr[1],
|
||||
.fields_hash_2 = fields_hash_arr[2],
|
||||
.fields_hash_3 = fields_hash_arr[3],
|
||||
});
|
||||
|
||||
if (args.src_node != 0) {
|
||||
const node_offset = gz.nodeIndexToRelative(args.src_node);
|
||||
@@ -12958,12 +13027,20 @@ const GenZir = struct {
|
||||
fields_len: u32,
|
||||
decls_len: u32,
|
||||
nonexhaustive: bool,
|
||||
fields_hash: std.zig.SrcHash,
|
||||
}) !void {
|
||||
const astgen = gz.astgen;
|
||||
const gpa = astgen.gpa;
|
||||
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, 5);
|
||||
const payload_index: u32 = @intCast(astgen.extra.items.len);
|
||||
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
|
||||
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 5);
|
||||
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.EnumDecl{
|
||||
.fields_hash_0 = fields_hash_arr[0],
|
||||
.fields_hash_1 = fields_hash_arr[1],
|
||||
.fields_hash_2 = fields_hash_arr[2],
|
||||
.fields_hash_3 = fields_hash_arr[3],
|
||||
});
|
||||
|
||||
if (args.src_node != 0) {
|
||||
const node_offset = gz.nodeIndexToRelative(args.src_node);
|
||||
|
||||
@@ -3497,7 +3497,7 @@ fn walkInstruction(
|
||||
};
|
||||
|
||||
const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small));
|
||||
var extra_index: usize = extended.operand;
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len;
|
||||
|
||||
const src_node: ?i32 = if (small.has_src_node) blk: {
|
||||
const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
|
||||
@@ -3627,7 +3627,7 @@ fn walkInstruction(
|
||||
};
|
||||
|
||||
const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small));
|
||||
var extra_index: usize = extended.operand;
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len;
|
||||
|
||||
const src_node: ?i32 = if (small.has_src_node) blk: {
|
||||
const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
|
||||
@@ -3778,7 +3778,7 @@ fn walkInstruction(
|
||||
};
|
||||
|
||||
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
|
||||
var extra_index: usize = extended.operand;
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
|
||||
|
||||
const src_node: ?i32 = if (small.has_src_node) blk: {
|
||||
const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
|
||||
|
||||
@@ -156,6 +156,7 @@ time_report: bool,
|
||||
stack_report: bool,
|
||||
debug_compiler_runtime_libs: bool,
|
||||
debug_compile_errors: bool,
|
||||
debug_incremental: bool,
|
||||
job_queued_compiler_rt_lib: bool = false,
|
||||
job_queued_compiler_rt_obj: bool = false,
|
||||
job_queued_update_builtin_zig: bool,
|
||||
@@ -1079,6 +1080,7 @@ pub const CreateOptions = struct {
|
||||
verbose_llvm_cpu_features: bool = false,
|
||||
debug_compiler_runtime_libs: bool = false,
|
||||
debug_compile_errors: bool = false,
|
||||
debug_incremental: bool = false,
|
||||
/// Normally when you create a `Compilation`, Zig will automatically build
|
||||
/// and link in required dependencies, such as compiler-rt and libc. When
|
||||
/// building such dependencies themselves, this flag must be set to avoid
|
||||
@@ -1508,6 +1510,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
.test_name_prefix = options.test_name_prefix,
|
||||
.debug_compiler_runtime_libs = options.debug_compiler_runtime_libs,
|
||||
.debug_compile_errors = options.debug_compile_errors,
|
||||
.debug_incremental = options.debug_incremental,
|
||||
.libcxx_abi_version = options.libcxx_abi_version,
|
||||
.root_name = root_name,
|
||||
.sysroot = sysroot,
|
||||
@@ -2141,7 +2144,6 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void
|
||||
|
||||
if (comp.module) |module| {
|
||||
module.compile_log_text.shrinkAndFree(gpa, 0);
|
||||
module.generation += 1;
|
||||
|
||||
// Make sure std.zig is inside the import_table. We unconditionally need
|
||||
// it for start.zig.
|
||||
@@ -2807,6 +2809,13 @@ const Header = extern struct {
|
||||
limbs_len: u32,
|
||||
string_bytes_len: u32,
|
||||
tracked_insts_len: u32,
|
||||
src_hash_deps_len: u32,
|
||||
decl_val_deps_len: u32,
|
||||
namespace_deps_len: u32,
|
||||
namespace_name_deps_len: u32,
|
||||
first_dependency_len: u32,
|
||||
dep_entries_len: u32,
|
||||
free_dep_entries_len: u32,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -2814,7 +2823,7 @@ const Header = extern struct {
|
||||
/// saved, such as the target and most CLI flags. A cache hit will only occur
|
||||
/// when subsequent compiler invocations use the same set of flags.
|
||||
pub fn saveState(comp: *Compilation) !void {
|
||||
var bufs_list: [7]std.os.iovec_const = undefined;
|
||||
var bufs_list: [19]std.os.iovec_const = undefined;
|
||||
var bufs_len: usize = 0;
|
||||
|
||||
const lf = comp.bin_file orelse return;
|
||||
@@ -2828,6 +2837,13 @@ pub fn saveState(comp: *Compilation) !void {
|
||||
.limbs_len = @intCast(ip.limbs.items.len),
|
||||
.string_bytes_len = @intCast(ip.string_bytes.items.len),
|
||||
.tracked_insts_len = @intCast(ip.tracked_insts.count()),
|
||||
.src_hash_deps_len = @intCast(ip.src_hash_deps.count()),
|
||||
.decl_val_deps_len = @intCast(ip.decl_val_deps.count()),
|
||||
.namespace_deps_len = @intCast(ip.namespace_deps.count()),
|
||||
.namespace_name_deps_len = @intCast(ip.namespace_name_deps.count()),
|
||||
.first_dependency_len = @intCast(ip.first_dependency.count()),
|
||||
.dep_entries_len = @intCast(ip.dep_entries.items.len),
|
||||
.free_dep_entries_len = @intCast(ip.free_dep_entries.items.len),
|
||||
},
|
||||
};
|
||||
addBuf(&bufs_list, &bufs_len, mem.asBytes(&header));
|
||||
@@ -2838,6 +2854,20 @@ pub fn saveState(comp: *Compilation) !void {
|
||||
addBuf(&bufs_list, &bufs_len, ip.string_bytes.items);
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys()));
|
||||
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.values()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.values()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.values()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.values()));
|
||||
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.keys()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.values()));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.dep_entries.items));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.free_dep_entries.items));
|
||||
|
||||
// TODO: compilation errors
|
||||
// TODO: files
|
||||
// TODO: namespaces
|
||||
@@ -3463,9 +3493,7 @@ pub fn performAllTheWork(
|
||||
|
||||
if (comp.module) |mod| {
|
||||
try reportMultiModuleErrors(mod);
|
||||
}
|
||||
|
||||
if (comp.module) |mod| {
|
||||
try mod.flushRetryableFailures();
|
||||
mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
|
||||
mod.sema_prog_node.activate();
|
||||
}
|
||||
@@ -3486,6 +3514,17 @@ pub fn performAllTheWork(
|
||||
try processOneJob(comp, work_item, main_progress_node);
|
||||
continue;
|
||||
}
|
||||
if (comp.module) |zcu| {
|
||||
// If there's no work queued, check if there's anything outdated
|
||||
// which we need to work on, and queue it if so.
|
||||
if (try zcu.findOutdatedToAnalyze()) |outdated| {
|
||||
switch (outdated.unwrap()) {
|
||||
.decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }),
|
||||
.func => |func| try comp.work_queue.writeItem(.{ .codegen_func = func }),
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -3509,17 +3548,14 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
|
||||
switch (decl.analysis) {
|
||||
.unreferenced => unreachable,
|
||||
.in_progress => unreachable,
|
||||
.outdated => unreachable,
|
||||
|
||||
.file_failure,
|
||||
.sema_failure,
|
||||
.liveness_failure,
|
||||
.codegen_failure,
|
||||
.dependency_failure,
|
||||
.sema_failure_retryable,
|
||||
=> return,
|
||||
|
||||
.complete, .codegen_failure_retryable => {
|
||||
.complete => {
|
||||
const named_frame = tracy.namedFrame("codegen_decl");
|
||||
defer named_frame.end();
|
||||
|
||||
@@ -3554,17 +3590,15 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
|
||||
switch (decl.analysis) {
|
||||
.unreferenced => unreachable,
|
||||
.in_progress => unreachable,
|
||||
.outdated => unreachable,
|
||||
|
||||
.file_failure,
|
||||
.sema_failure,
|
||||
.dependency_failure,
|
||||
.sema_failure_retryable,
|
||||
=> return,
|
||||
|
||||
// emit-h only requires semantic analysis of the Decl to be complete,
|
||||
// it does not depend on machine code generation to succeed.
|
||||
.liveness_failure, .codegen_failure, .codegen_failure_retryable, .complete => {
|
||||
.codegen_failure, .complete => {
|
||||
const named_frame = tracy.namedFrame("emit_h_decl");
|
||||
defer named_frame.end();
|
||||
|
||||
@@ -3636,7 +3670,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
|
||||
"unable to update line number: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
decl.analysis = .codegen_failure_retryable;
|
||||
decl.analysis = .codegen_failure;
|
||||
try module.retryable_failures.append(gpa, InternPool.Depender.wrap(.{ .decl = decl_index }));
|
||||
};
|
||||
},
|
||||
.analyze_mod => |pkg| {
|
||||
|
||||
@@ -58,6 +58,38 @@ string_table: std.HashMapUnmanaged(
|
||||
/// persists across incremental updates.
|
||||
tracked_insts: std.AutoArrayHashMapUnmanaged(TrackedInst, void) = .{},
|
||||
|
||||
/// Dependencies on the source code hash associated with a ZIR instruction.
|
||||
/// * For a `declaration`, this is the entire declaration body.
|
||||
/// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations).
|
||||
/// * For a `func`, this is the source of the full function signature.
|
||||
/// These are also invalidated if tracking fails for this instruction.
|
||||
/// Value is index into `dep_entries` of the first dependency on this hash.
|
||||
src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{},
|
||||
/// Dependencies on the value of a Decl.
|
||||
/// Value is index into `dep_entries` of the first dependency on this Decl value.
|
||||
decl_val_deps: std.AutoArrayHashMapUnmanaged(DeclIndex, DepEntry.Index) = .{},
|
||||
/// Dependencies on the full set of names in a ZIR namespace.
|
||||
/// Key refers to a `struct_decl`, `union_decl`, etc.
|
||||
/// Value is index into `dep_entries` of the first dependency on this namespace.
|
||||
namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{},
|
||||
/// Dependencies on the (non-)existence of some name in a namespace.
|
||||
/// Value is index into `dep_entries` of the first dependency on this name.
|
||||
namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index) = .{},
|
||||
|
||||
/// Given a `Depender`, points to an entry in `dep_entries` whose `depender`
|
||||
/// matches. The `next_dependee` field can be used to iterate all such entries
|
||||
/// and remove them from the corresponding lists.
|
||||
first_dependency: std.AutoArrayHashMapUnmanaged(Depender, DepEntry.Index) = .{},
|
||||
|
||||
/// Stores dependency information. The hashmaps declared above are used to look
|
||||
/// up entries in this list as required. This is not stored in `extra` so that
|
||||
/// we can use `free_dep_entries` to track free indices, since dependencies are
|
||||
/// removed frequently.
|
||||
dep_entries: std.ArrayListUnmanaged(DepEntry) = .{},
|
||||
/// Stores unused indices in `dep_entries` which can be reused without a full
|
||||
/// garbage collection pass.
|
||||
free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .{},
|
||||
|
||||
pub const TrackedInst = extern struct {
|
||||
path_digest: Cache.BinDigest,
|
||||
inst: Zir.Inst.Index,
|
||||
@@ -70,6 +102,19 @@ pub const TrackedInst = extern struct {
|
||||
pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) Zir.Inst.Index {
|
||||
return ip.tracked_insts.keys()[@intFromEnum(i)].inst;
|
||||
}
|
||||
pub fn toOptional(i: TrackedInst.Index) Optional {
|
||||
return @enumFromInt(@intFromEnum(i));
|
||||
}
|
||||
pub const Optional = enum(u32) {
|
||||
none = std.math.maxInt(u32),
|
||||
_,
|
||||
pub fn unwrap(opt: Optional) ?TrackedInst.Index {
|
||||
return switch (opt) {
|
||||
.none => null,
|
||||
_ => @enumFromInt(@intFromEnum(opt)),
|
||||
};
|
||||
}
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -82,6 +127,202 @@ pub fn trackZir(ip: *InternPool, gpa: Allocator, file: *Module.File, inst: Zir.I
|
||||
return @enumFromInt(gop.index);
|
||||
}
|
||||
|
||||
/// Reperesents the "source" of a dependency edge, i.e. either a Decl or a
|
||||
/// runtime function (represented as an InternPool index).
|
||||
/// MSB is 0 for a Decl, 1 for a function.
|
||||
pub const Depender = enum(u32) {
|
||||
_,
|
||||
pub const Unwrapped = union(enum) {
|
||||
decl: DeclIndex,
|
||||
func: InternPool.Index,
|
||||
};
|
||||
pub fn unwrap(dep: Depender) Unwrapped {
|
||||
const tag: u1 = @truncate(@intFromEnum(dep) >> 31);
|
||||
const val: u31 = @truncate(@intFromEnum(dep));
|
||||
return switch (tag) {
|
||||
0 => .{ .decl = @enumFromInt(val) },
|
||||
1 => .{ .func = @enumFromInt(val) },
|
||||
};
|
||||
}
|
||||
pub fn wrap(raw: Unwrapped) Depender {
|
||||
return @enumFromInt(switch (raw) {
|
||||
.decl => |decl| @intFromEnum(decl),
|
||||
.func => |func| (1 << 31) | @intFromEnum(func),
|
||||
});
|
||||
}
|
||||
pub fn toOptional(dep: Depender) Optional {
|
||||
return @enumFromInt(@intFromEnum(dep));
|
||||
}
|
||||
pub const Optional = enum(u32) {
|
||||
none = std.math.maxInt(u32),
|
||||
_,
|
||||
pub fn unwrap(opt: Optional) ?Depender {
|
||||
return switch (opt) {
|
||||
.none => null,
|
||||
_ => @enumFromInt(@intFromEnum(opt)),
|
||||
};
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const Dependee = union(enum) {
|
||||
src_hash: TrackedInst.Index,
|
||||
decl_val: DeclIndex,
|
||||
namespace: TrackedInst.Index,
|
||||
namespace_name: NamespaceNameKey,
|
||||
};
|
||||
|
||||
pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: Depender) void {
|
||||
var opt_idx = (ip.first_dependency.fetchSwapRemove(depender) orelse return).value.toOptional();
|
||||
|
||||
while (opt_idx.unwrap()) |idx| {
|
||||
const dep = ip.dep_entries.items[@intFromEnum(idx)];
|
||||
opt_idx = dep.next_dependee;
|
||||
|
||||
const prev_idx = dep.prev.unwrap() orelse {
|
||||
// This entry is the start of a list in some `*_deps`.
|
||||
// We cannot easily remove this mapping, so this must remain as a dummy entry.
|
||||
ip.dep_entries.items[@intFromEnum(idx)].depender = .none;
|
||||
continue;
|
||||
};
|
||||
|
||||
ip.dep_entries.items[@intFromEnum(prev_idx)].next = dep.next;
|
||||
if (dep.next.unwrap()) |next_idx| {
|
||||
ip.dep_entries.items[@intFromEnum(next_idx)].prev = dep.prev;
|
||||
}
|
||||
|
||||
ip.free_dep_entries.append(gpa, idx) catch {
|
||||
// This memory will be reclaimed on the next garbage collection.
|
||||
// Thus, we do not need to propagate this error.
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub const DependencyIterator = struct {
|
||||
ip: *const InternPool,
|
||||
next_entry: DepEntry.Index.Optional,
|
||||
pub fn next(it: *DependencyIterator) ?Depender {
|
||||
const idx = it.next_entry.unwrap() orelse return null;
|
||||
const entry = it.ip.dep_entries.items[@intFromEnum(idx)];
|
||||
it.next_entry = entry.next;
|
||||
return entry.depender.unwrap().?;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator {
|
||||
const first_entry = switch (dependee) {
|
||||
.src_hash => |x| ip.src_hash_deps.get(x),
|
||||
.decl_val => |x| ip.decl_val_deps.get(x),
|
||||
.namespace => |x| ip.namespace_deps.get(x),
|
||||
.namespace_name => |x| ip.namespace_name_deps.get(x),
|
||||
} orelse return .{
|
||||
.ip = ip,
|
||||
.next_entry = .none,
|
||||
};
|
||||
if (ip.dep_entries.items[@intFromEnum(first_entry)].depender == .none) return .{
|
||||
.ip = ip,
|
||||
.next_entry = .none,
|
||||
};
|
||||
return .{
|
||||
.ip = ip,
|
||||
.next_entry = first_entry.toOptional(),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: Depender, dependee: Dependee) Allocator.Error!void {
|
||||
const first_depender_dep: DepEntry.Index.Optional = if (ip.first_dependency.get(depender)) |idx| dep: {
|
||||
// The entry already exists, so there is capacity to overwrite it later.
|
||||
break :dep idx.toOptional();
|
||||
} else none: {
|
||||
// Ensure there is capacity available to add this dependency later.
|
||||
try ip.first_dependency.ensureUnusedCapacity(gpa, 1);
|
||||
break :none .none;
|
||||
};
|
||||
|
||||
// We're very likely to need space for a new entry - reserve it now to avoid
|
||||
// the need for error cleanup logic.
|
||||
if (ip.free_dep_entries.items.len == 0) {
|
||||
try ip.dep_entries.ensureUnusedCapacity(gpa, 1);
|
||||
}
|
||||
|
||||
// This block should allocate an entry and prepend it to the relevant `*_deps` list.
|
||||
// The `next` field should be correctly initialized; all other fields may be undefined.
|
||||
const new_index: DepEntry.Index = switch (dependee) {
|
||||
inline else => |dependee_payload, tag| new_index: {
|
||||
const gop = try switch (tag) {
|
||||
.src_hash => ip.src_hash_deps,
|
||||
.decl_val => ip.decl_val_deps,
|
||||
.namespace => ip.namespace_deps,
|
||||
.namespace_name => ip.namespace_name_deps,
|
||||
}.getOrPut(gpa, dependee_payload);
|
||||
|
||||
if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) {
|
||||
// Dummy entry, so we can reuse it rather than allocating a new one!
|
||||
ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].next = .none;
|
||||
break :new_index gop.value_ptr.*;
|
||||
}
|
||||
|
||||
// Prepend a new dependency.
|
||||
const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: {
|
||||
break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] };
|
||||
} else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() };
|
||||
ptr.next = if (gop.found_existing) gop.value_ptr.*.toOptional() else .none;
|
||||
gop.value_ptr.* = new_index;
|
||||
break :new_index new_index;
|
||||
},
|
||||
};
|
||||
|
||||
ip.dep_entries.items[@intFromEnum(new_index)].depender = depender.toOptional();
|
||||
ip.dep_entries.items[@intFromEnum(new_index)].prev = .none;
|
||||
ip.dep_entries.items[@intFromEnum(new_index)].next_dependee = first_depender_dep;
|
||||
ip.first_dependency.putAssumeCapacity(depender, new_index);
|
||||
}
|
||||
|
||||
/// String is the name whose existence the dependency is on.
|
||||
/// DepEntry.Index refers to the first such dependency.
|
||||
pub const NamespaceNameKey = struct {
|
||||
/// The instruction (`struct_decl` etc) which owns the namespace in question.
|
||||
namespace: TrackedInst.Index,
|
||||
/// The name whose existence the dependency is on.
|
||||
name: NullTerminatedString,
|
||||
};
|
||||
|
||||
pub const DepEntry = extern struct {
|
||||
/// If null, this is a dummy entry - all other fields are `undefined`. It is
|
||||
/// the first and only entry in one of `intern_pool.*_deps`, and does not
|
||||
/// appear in any list by `first_dependency`, but is not in
|
||||
/// `free_dep_entries` since `*_deps` stores a reference to it.
|
||||
depender: Depender.Optional,
|
||||
/// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee.
|
||||
/// Used to iterate all dependers for a given dependee during an update.
|
||||
/// null if this is the end of the list.
|
||||
next: DepEntry.Index.Optional,
|
||||
/// The other link for `next`.
|
||||
/// null if this is the start of the list.
|
||||
prev: DepEntry.Index.Optional,
|
||||
/// Index into `dep_entries` forming a singly linked list of dependencies *of* `depender`.
|
||||
/// Used to efficiently remove all `DepEntry`s for a single `depender` when it is re-analyzed.
|
||||
/// null if this is the end of the list.
|
||||
next_dependee: DepEntry.Index.Optional,
|
||||
|
||||
pub const Index = enum(u32) {
|
||||
_,
|
||||
pub fn toOptional(dep: DepEntry.Index) Optional {
|
||||
return @enumFromInt(@intFromEnum(dep));
|
||||
}
|
||||
pub const Optional = enum(u32) {
|
||||
none = std.math.maxInt(u32),
|
||||
_,
|
||||
pub fn unwrap(opt: Optional) ?DepEntry.Index {
|
||||
return switch (opt) {
|
||||
.none => null,
|
||||
_ => @enumFromInt(@intFromEnum(opt)),
|
||||
};
|
||||
}
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false);
|
||||
|
||||
const builtin = @import("builtin");
|
||||
@@ -428,6 +669,7 @@ pub const Key = union(enum) {
|
||||
decl: DeclIndex,
|
||||
/// Represents the declarations inside this opaque.
|
||||
namespace: NamespaceIndex,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
};
|
||||
|
||||
/// Although packed structs and non-packed structs are encoded differently,
|
||||
@@ -440,7 +682,7 @@ pub const Key = union(enum) {
|
||||
/// `none` when the struct has no declarations.
|
||||
namespace: OptionalNamespaceIndex,
|
||||
/// Index of the struct_decl ZIR instruction.
|
||||
zir_index: TrackedInst.Index,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
field_names: NullTerminatedString.Slice,
|
||||
field_types: Index.Slice,
|
||||
@@ -684,7 +926,7 @@ pub const Key = union(enum) {
|
||||
}
|
||||
|
||||
/// Asserts the struct is not packed.
|
||||
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index) void {
|
||||
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
|
||||
assert(s.layout != .Packed);
|
||||
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
|
||||
ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
|
||||
@@ -800,7 +1042,7 @@ pub const Key = union(enum) {
|
||||
flags: Tag.TypeUnion.Flags,
|
||||
/// The enum that provides the list of field names and values.
|
||||
enum_tag_ty: Index,
|
||||
zir_index: TrackedInst.Index,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
|
||||
/// The returned pointer expires with any addition to the `InternPool`.
|
||||
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeUnion.Flags {
|
||||
@@ -889,6 +1131,7 @@ pub const Key = union(enum) {
|
||||
/// This is ignored by `get` but will be provided by `indexToKey` when
|
||||
/// a value map exists.
|
||||
values_map: OptionalMapIndex = .none,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
|
||||
pub const TagMode = enum {
|
||||
/// The integer tag type was auto-numbered by zig.
|
||||
@@ -953,6 +1196,7 @@ pub const Key = union(enum) {
|
||||
tag_mode: EnumType.TagMode,
|
||||
/// This may be updated via `setTagType` later.
|
||||
tag_ty: Index = .none,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
|
||||
pub fn toEnumType(self: @This()) EnumType {
|
||||
return .{
|
||||
@@ -962,6 +1206,7 @@ pub const Key = union(enum) {
|
||||
.tag_mode = self.tag_mode,
|
||||
.names = .{ .start = 0, .len = 0 },
|
||||
.values = .{ .start = 0, .len = 0 },
|
||||
.zir_index = self.zir_index,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1909,7 +2154,7 @@ pub const UnionType = struct {
|
||||
/// If this slice has length 0 it means all elements are `none`.
|
||||
field_aligns: Alignment.Slice,
|
||||
/// Index of the union_decl ZIR instruction.
|
||||
zir_index: TrackedInst.Index,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
/// Index into extra array of the `flags` field.
|
||||
flags_index: u32,
|
||||
/// Copied from `enum_tag_ty`.
|
||||
@@ -2003,10 +2248,10 @@ pub const UnionType = struct {
|
||||
}
|
||||
|
||||
/// This does not mutate the field of UnionType.
|
||||
pub fn setZirIndex(self: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index) void {
|
||||
pub fn setZirIndex(self: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
|
||||
const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
|
||||
const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?;
|
||||
const ptr: *TrackedInst.Index =
|
||||
const ptr: *TrackedInst.Index.Optional =
|
||||
@ptrCast(&ip.extra.items[self.flags_index - flags_field_index + zir_index_field_index]);
|
||||
ptr.* = new_zir_index;
|
||||
}
|
||||
@@ -3099,7 +3344,7 @@ pub const Tag = enum(u8) {
|
||||
namespace: NamespaceIndex,
|
||||
/// The enum that provides the list of field names and values.
|
||||
tag_ty: Index,
|
||||
zir_index: TrackedInst.Index,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
|
||||
pub const Flags = packed struct(u32) {
|
||||
runtime_tag: UnionType.RuntimeTag,
|
||||
@@ -3121,7 +3366,7 @@ pub const Tag = enum(u8) {
|
||||
/// 2. init: Index for each fields_len // if tag is type_struct_packed_inits
|
||||
pub const TypeStructPacked = struct {
|
||||
decl: DeclIndex,
|
||||
zir_index: TrackedInst.Index,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
fields_len: u32,
|
||||
namespace: OptionalNamespaceIndex,
|
||||
backing_int_ty: Index,
|
||||
@@ -3168,7 +3413,7 @@ pub const Tag = enum(u8) {
|
||||
/// 7. field_offset: u32 // for each field in declared order, undef until layout_resolved
|
||||
pub const TypeStruct = struct {
|
||||
decl: DeclIndex,
|
||||
zir_index: TrackedInst.Index,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
fields_len: u32,
|
||||
flags: Flags,
|
||||
size: u32,
|
||||
@@ -3238,6 +3483,11 @@ pub const FuncAnalysis = packed struct(u32) {
|
||||
/// This function might be OK but it depends on another Decl which did not
|
||||
/// successfully complete semantic analysis.
|
||||
dependency_failure,
|
||||
/// There will be a corresponding ErrorMsg in Module.failed_decls.
|
||||
/// Indicates that semantic analysis succeeded, but code generation for
|
||||
/// this function failed.
|
||||
codegen_failure,
|
||||
/// Semantic analysis and code generation of this function succeeded.
|
||||
success,
|
||||
};
|
||||
};
|
||||
@@ -3523,6 +3773,7 @@ pub const EnumExplicit = struct {
|
||||
/// If this is `none`, it means the trailing tag values are absent because
|
||||
/// they are auto-numbered.
|
||||
values_map: OptionalMapIndex,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
};
|
||||
|
||||
/// Trailing:
|
||||
@@ -3538,6 +3789,7 @@ pub const EnumAuto = struct {
|
||||
fields_len: u32,
|
||||
/// Maps field names to declaration index.
|
||||
names_map: MapIndex,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
};
|
||||
|
||||
pub const PackedU64 = packed struct(u64) {
|
||||
@@ -3759,6 +4011,16 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
|
||||
|
||||
ip.tracked_insts.deinit(gpa);
|
||||
|
||||
ip.src_hash_deps.deinit(gpa);
|
||||
ip.decl_val_deps.deinit(gpa);
|
||||
ip.namespace_deps.deinit(gpa);
|
||||
ip.namespace_name_deps.deinit(gpa);
|
||||
|
||||
ip.first_dependency.deinit(gpa);
|
||||
|
||||
ip.dep_entries.deinit(gpa);
|
||||
ip.free_dep_entries.deinit(gpa);
|
||||
|
||||
ip.* = undefined;
|
||||
}
|
||||
|
||||
@@ -3885,6 +4147,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||
.tag_mode = .auto,
|
||||
.names_map = enum_auto.data.names_map.toOptional(),
|
||||
.values_map = .none,
|
||||
.zir_index = enum_auto.data.zir_index,
|
||||
} };
|
||||
},
|
||||
.type_enum_explicit => ip.indexToKeyEnum(data, .explicit),
|
||||
@@ -4493,6 +4756,7 @@ fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMo
|
||||
.tag_mode = tag_mode,
|
||||
.names_map = enum_explicit.data.names_map.toOptional(),
|
||||
.values_map = enum_explicit.data.values_map,
|
||||
.zir_index = enum_explicit.data.zir_index,
|
||||
} };
|
||||
}
|
||||
|
||||
@@ -5329,7 +5593,7 @@ pub const UnionTypeInit = struct {
|
||||
flags: Tag.TypeUnion.Flags,
|
||||
decl: DeclIndex,
|
||||
namespace: NamespaceIndex,
|
||||
zir_index: TrackedInst.Index,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
fields_len: u32,
|
||||
enum_tag_ty: Index,
|
||||
/// May have length 0 which leaves the values unset until later.
|
||||
@@ -5401,7 +5665,7 @@ pub const StructTypeInit = struct {
|
||||
decl: DeclIndex,
|
||||
namespace: OptionalNamespaceIndex,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
zir_index: TrackedInst.Index,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
fields_len: u32,
|
||||
known_non_opv: bool,
|
||||
requires_comptime: RequiresComptime,
|
||||
@@ -5923,7 +6187,6 @@ pub const GetFuncInstanceKey = struct {
|
||||
is_noinline: bool,
|
||||
generic_owner: Index,
|
||||
inferred_error_set: bool,
|
||||
generation: u32,
|
||||
};
|
||||
|
||||
pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) Allocator.Error!Index {
|
||||
@@ -5990,7 +6253,6 @@ pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey)
|
||||
generic_owner,
|
||||
func_index,
|
||||
func_extra_index,
|
||||
arg.generation,
|
||||
func_ty,
|
||||
arg.section,
|
||||
);
|
||||
@@ -6122,7 +6384,6 @@ pub fn getFuncInstanceIes(
|
||||
generic_owner,
|
||||
func_index,
|
||||
func_extra_index,
|
||||
arg.generation,
|
||||
func_ty,
|
||||
arg.section,
|
||||
);
|
||||
@@ -6134,7 +6395,6 @@ fn finishFuncInstance(
|
||||
generic_owner: Index,
|
||||
func_index: Index,
|
||||
func_extra_index: u32,
|
||||
generation: u32,
|
||||
func_ty: Index,
|
||||
section: OptionalNullTerminatedString,
|
||||
) Allocator.Error!Index {
|
||||
@@ -6154,7 +6414,6 @@ fn finishFuncInstance(
|
||||
.analysis = .complete,
|
||||
.zir_decl_index = fn_owner_decl.zir_decl_index,
|
||||
.src_scope = fn_owner_decl.src_scope,
|
||||
.generation = generation,
|
||||
.is_pub = fn_owner_decl.is_pub,
|
||||
.is_exported = fn_owner_decl.is_exported,
|
||||
.alive = true,
|
||||
@@ -6264,6 +6523,7 @@ fn getIncompleteEnumAuto(
|
||||
.int_tag_type = int_tag_type,
|
||||
.names_map = names_map,
|
||||
.fields_len = enum_type.fields_len,
|
||||
.zir_index = enum_type.zir_index,
|
||||
});
|
||||
|
||||
ip.items.appendAssumeCapacity(.{
|
||||
@@ -6314,6 +6574,7 @@ fn getIncompleteEnumExplicit(
|
||||
.fields_len = enum_type.fields_len,
|
||||
.names_map = names_map,
|
||||
.values_map = values_map,
|
||||
.zir_index = enum_type.zir_index,
|
||||
});
|
||||
|
||||
ip.items.appendAssumeCapacity(.{
|
||||
@@ -6339,6 +6600,7 @@ pub const GetEnumInit = struct {
|
||||
names: []const NullTerminatedString,
|
||||
values: []const Index,
|
||||
tag_mode: Key.EnumType.TagMode,
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
};
|
||||
|
||||
pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Error!Index {
|
||||
@@ -6355,6 +6617,7 @@ pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Erro
|
||||
.tag_mode = undefined,
|
||||
.names_map = undefined,
|
||||
.values_map = undefined,
|
||||
.zir_index = undefined,
|
||||
},
|
||||
}, adapter);
|
||||
if (gop.found_existing) return @enumFromInt(gop.index);
|
||||
@@ -6380,6 +6643,7 @@ pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Erro
|
||||
.int_tag_type = ini.tag_ty,
|
||||
.names_map = names_map,
|
||||
.fields_len = fields_len,
|
||||
.zir_index = ini.zir_index,
|
||||
}),
|
||||
});
|
||||
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
|
||||
@@ -6416,6 +6680,7 @@ pub fn finishGetEnum(
|
||||
.fields_len = fields_len,
|
||||
.names_map = names_map,
|
||||
.values_map = values_map,
|
||||
.zir_index = ini.zir_index,
|
||||
}),
|
||||
});
|
||||
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
|
||||
@@ -6507,6 +6772,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
|
||||
OptionalNullTerminatedString,
|
||||
Tag.TypePointer.VectorIndex,
|
||||
TrackedInst.Index,
|
||||
TrackedInst.Index.Optional,
|
||||
=> @intFromEnum(@field(extra, field.name)),
|
||||
|
||||
u32,
|
||||
@@ -6583,6 +6849,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct
|
||||
OptionalNullTerminatedString,
|
||||
Tag.TypePointer.VectorIndex,
|
||||
TrackedInst.Index,
|
||||
TrackedInst.Index.Optional,
|
||||
=> @enumFromInt(int32),
|
||||
|
||||
u32,
|
||||
|
||||
846
src/Module.zig
846
src/Module.zig
File diff suppressed because it is too large
Load Diff
116
src/Sema.zig
116
src/Sema.zig
@@ -2583,7 +2583,6 @@ fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.ErrorMsg)
|
||||
ip.funcAnalysis(sema.owner_func_index).state = .sema_failure;
|
||||
} else {
|
||||
sema.owner_decl.analysis = .sema_failure;
|
||||
sema.owner_decl.generation = mod.generation;
|
||||
}
|
||||
if (sema.func_index != .none) {
|
||||
ip.funcAnalysis(sema.func_index).state = .sema_failure;
|
||||
@@ -2718,7 +2717,7 @@ pub fn getStructType(
|
||||
assert(extended.opcode == .struct_decl);
|
||||
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
|
||||
var extra_index: usize = extended.operand;
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
|
||||
extra_index += @intFromBool(small.has_src_node);
|
||||
const fields_len = if (small.has_fields_len) blk: {
|
||||
const fields_len = sema.code.extra[extra_index];
|
||||
@@ -2748,7 +2747,7 @@ pub fn getStructType(
|
||||
const ty = try ip.getStructType(gpa, .{
|
||||
.decl = decl,
|
||||
.namespace = namespace.toOptional(),
|
||||
.zir_index = tracked_inst,
|
||||
.zir_index = tracked_inst.toOptional(),
|
||||
.layout = small.layout,
|
||||
.known_non_opv = small.known_non_opv,
|
||||
.is_tuple = small.is_tuple,
|
||||
@@ -2773,7 +2772,7 @@ fn zirStructDecl(
|
||||
const ip = &mod.intern_pool;
|
||||
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
const src: LazySrcLoc = if (small.has_src_node) blk: {
|
||||
const node_offset: i32 = @bitCast(sema.code.extra[extended.operand]);
|
||||
const node_offset: i32 = @bitCast(sema.code.extra[extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len]);
|
||||
break :blk LazySrcLoc.nodeOffset(node_offset);
|
||||
} else sema.src;
|
||||
|
||||
@@ -2789,6 +2788,14 @@ fn zirStructDecl(
|
||||
new_decl.owns_tv = true;
|
||||
errdefer mod.abortAnonDecl(new_decl_index);
|
||||
|
||||
if (sema.mod.comp.debug_incremental) {
|
||||
try ip.addDependency(
|
||||
sema.gpa,
|
||||
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
|
||||
.{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) },
|
||||
);
|
||||
}
|
||||
|
||||
const new_namespace_index = try mod.createNamespace(.{
|
||||
.parent = block.namespace.toOptional(),
|
||||
.ty = undefined,
|
||||
@@ -2927,7 +2934,7 @@ fn zirEnumDecl(
|
||||
const mod = sema.mod;
|
||||
const gpa = sema.gpa;
|
||||
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
|
||||
var extra_index: usize = extended.operand;
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len;
|
||||
|
||||
const src: LazySrcLoc = if (small.has_src_node) blk: {
|
||||
const node_offset: i32 = @bitCast(sema.code.extra[extra_index]);
|
||||
@@ -2973,6 +2980,14 @@ fn zirEnumDecl(
|
||||
new_decl.owns_tv = true;
|
||||
errdefer if (!done) mod.abortAnonDecl(new_decl_index);
|
||||
|
||||
if (sema.mod.comp.debug_incremental) {
|
||||
try mod.intern_pool.addDependency(
|
||||
sema.gpa,
|
||||
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
|
||||
.{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
|
||||
);
|
||||
}
|
||||
|
||||
const new_namespace_index = try mod.createNamespace(.{
|
||||
.parent = block.namespace.toOptional(),
|
||||
.ty = undefined,
|
||||
@@ -3008,6 +3023,7 @@ fn zirEnumDecl(
|
||||
.auto
|
||||
else
|
||||
.explicit,
|
||||
.zir_index = (try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst)).toOptional(),
|
||||
});
|
||||
if (sema.builtin_type_target_index != .none) {
|
||||
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, incomplete_enum.index);
|
||||
@@ -3191,7 +3207,7 @@ fn zirUnionDecl(
|
||||
const mod = sema.mod;
|
||||
const gpa = sema.gpa;
|
||||
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
|
||||
var extra_index: usize = extended.operand;
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len;
|
||||
|
||||
const src: LazySrcLoc = if (small.has_src_node) blk: {
|
||||
const node_offset: i32 = @bitCast(sema.code.extra[extra_index]);
|
||||
@@ -3225,6 +3241,14 @@ fn zirUnionDecl(
|
||||
new_decl.owns_tv = true;
|
||||
errdefer mod.abortAnonDecl(new_decl_index);
|
||||
|
||||
if (sema.mod.comp.debug_incremental) {
|
||||
try mod.intern_pool.addDependency(
|
||||
sema.gpa,
|
||||
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
|
||||
.{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
|
||||
);
|
||||
}
|
||||
|
||||
const new_namespace_index = try mod.createNamespace(.{
|
||||
.parent = block.namespace.toOptional(),
|
||||
.ty = undefined,
|
||||
@@ -3254,7 +3278,7 @@ fn zirUnionDecl(
|
||||
},
|
||||
.decl = new_decl_index,
|
||||
.namespace = new_namespace_index,
|
||||
.zir_index = try mod.intern_pool.trackZir(gpa, block.getFileScope(mod), inst),
|
||||
.zir_index = (try mod.intern_pool.trackZir(gpa, block.getFileScope(mod), inst)).toOptional(),
|
||||
.fields_len = fields_len,
|
||||
.enum_tag_ty = .none,
|
||||
.field_types = &.{},
|
||||
@@ -3318,6 +3342,14 @@ fn zirOpaqueDecl(
|
||||
new_decl.owns_tv = true;
|
||||
errdefer mod.abortAnonDecl(new_decl_index);
|
||||
|
||||
if (sema.mod.comp.debug_incremental) {
|
||||
try mod.intern_pool.addDependency(
|
||||
sema.gpa,
|
||||
InternPool.Depender.wrap(.{ .decl = new_decl_index }),
|
||||
.{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
|
||||
);
|
||||
}
|
||||
|
||||
const new_namespace_index = try mod.createNamespace(.{
|
||||
.parent = block.namespace.toOptional(),
|
||||
.ty = undefined,
|
||||
@@ -3329,6 +3361,7 @@ fn zirOpaqueDecl(
|
||||
const opaque_ty = try mod.intern(.{ .opaque_type = .{
|
||||
.decl = new_decl_index,
|
||||
.namespace = new_namespace_index,
|
||||
.zir_index = (try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst)).toOptional(),
|
||||
} });
|
||||
// TODO: figure out InternPool removals for incremental compilation
|
||||
//errdefer mod.intern_pool.remove(opaque_ty);
|
||||
@@ -7890,6 +7923,8 @@ fn instantiateGenericCall(
|
||||
const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func;
|
||||
const generic_owner_ty_info = mod.typeToFunc(Type.fromInterned(generic_owner_func.ty)).?;
|
||||
|
||||
try sema.declareDependency(.{ .src_hash = generic_owner_func.zir_body_inst });
|
||||
|
||||
// Even though there may already be a generic instantiation corresponding
|
||||
// to this callsite, we must evaluate the expressions of the generic
|
||||
// function signature with the values of the callsite plugged in.
|
||||
@@ -9440,7 +9475,6 @@ fn funcCommon(
|
||||
.inferred_error_set = inferred_error_set,
|
||||
.generic_owner = sema.generic_owner,
|
||||
.comptime_args = sema.comptime_args,
|
||||
.generation = mod.generation,
|
||||
});
|
||||
return finishFunc(
|
||||
sema,
|
||||
@@ -13598,6 +13632,12 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
||||
});
|
||||
|
||||
try sema.checkNamespaceType(block, lhs_src, container_type);
|
||||
if (container_type.typeDeclInst(mod)) |type_decl_inst| {
|
||||
try sema.declareDependency(.{ .namespace_name = .{
|
||||
.namespace = type_decl_inst,
|
||||
.name = decl_name,
|
||||
} });
|
||||
}
|
||||
|
||||
const namespace = container_type.getNamespaceIndex(mod).unwrap() orelse
|
||||
return .bool_false;
|
||||
@@ -17451,6 +17491,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const type_info_ty = try sema.getBuiltinType("Type");
|
||||
const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
|
||||
|
||||
if (ty.typeDeclInst(mod)) |type_decl_inst| {
|
||||
try sema.declareDependency(.{ .namespace = type_decl_inst });
|
||||
}
|
||||
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Type,
|
||||
.Void,
|
||||
@@ -21318,6 +21362,7 @@ fn zirReify(
|
||||
else
|
||||
.explicit,
|
||||
.tag_ty = int_tag_ty.toIntern(),
|
||||
.zir_index = .none,
|
||||
});
|
||||
// TODO: figure out InternPool removals for incremental compilation
|
||||
//errdefer ip.remove(incomplete_enum.index);
|
||||
@@ -21415,6 +21460,7 @@ fn zirReify(
|
||||
const opaque_ty = try mod.intern(.{ .opaque_type = .{
|
||||
.decl = new_decl_index,
|
||||
.namespace = new_namespace_index,
|
||||
.zir_index = .none,
|
||||
} });
|
||||
// TODO: figure out InternPool removals for incremental compilation
|
||||
//errdefer ip.remove(opaque_ty);
|
||||
@@ -21633,7 +21679,7 @@ fn zirReify(
|
||||
.namespace = new_namespace_index,
|
||||
.enum_tag_ty = enum_tag_ty,
|
||||
.fields_len = fields_len,
|
||||
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst), // TODO: should reified types be handled differently?
|
||||
.zir_index = .none,
|
||||
.flags = .{
|
||||
.layout = layout,
|
||||
.status = .have_field_types,
|
||||
@@ -21801,7 +21847,7 @@ fn reifyStruct(
|
||||
const ty = try ip.getStructType(gpa, .{
|
||||
.decl = new_decl_index,
|
||||
.namespace = .none,
|
||||
.zir_index = try mod.intern_pool.trackZir(gpa, block.getFileScope(mod), inst), // TODO: should reified types be handled differently?
|
||||
.zir_index = .none,
|
||||
.layout = layout,
|
||||
.known_non_opv = false,
|
||||
.fields_len = fields_len,
|
||||
@@ -25922,7 +25968,6 @@ fn zirBuiltinExtern(
|
||||
new_decl.has_tv = true;
|
||||
new_decl.owns_tv = true;
|
||||
new_decl.analysis = .complete;
|
||||
new_decl.generation = mod.generation;
|
||||
|
||||
try sema.ensureDeclAnalyzed(new_decl_index);
|
||||
|
||||
@@ -26421,6 +26466,7 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
|
||||
// owns the function.
|
||||
try sema.ensureDeclAnalyzed(decl_index);
|
||||
const tv = try mod.declPtr(decl_index).typedValue();
|
||||
try sema.declareDependency(.{ .decl_val = decl_index });
|
||||
assert(tv.ty.zigTypeTag(mod) == .Fn);
|
||||
assert(try sema.fnHasRuntimeBits(tv.ty));
|
||||
const func_index = tv.val.toIntern();
|
||||
@@ -26842,6 +26888,13 @@ fn fieldVal(
|
||||
const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
|
||||
const child_type = val.toType();
|
||||
|
||||
if (child_type.typeDeclInst(mod)) |type_decl_inst| {
|
||||
try sema.declareDependency(.{ .namespace_name = .{
|
||||
.namespace = type_decl_inst,
|
||||
.name = field_name,
|
||||
} });
|
||||
}
|
||||
|
||||
switch (try child_type.zigTypeTagOrPoison(mod)) {
|
||||
.ErrorSet => {
|
||||
switch (ip.indexToKey(child_type.toIntern())) {
|
||||
@@ -27065,6 +27118,13 @@ fn fieldPtr(
|
||||
const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
|
||||
const child_type = val.toType();
|
||||
|
||||
if (child_type.typeDeclInst(mod)) |type_decl_inst| {
|
||||
try sema.declareDependency(.{ .namespace_name = .{
|
||||
.namespace = type_decl_inst,
|
||||
.name = field_name,
|
||||
} });
|
||||
}
|
||||
|
||||
switch (child_type.zigTypeTag(mod)) {
|
||||
.ErrorSet => {
|
||||
switch (ip.indexToKey(child_type.toIntern())) {
|
||||
@@ -31134,6 +31194,7 @@ fn beginComptimePtrLoad(
|
||||
const is_mutable = ptr.addr == .mut_decl;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_tv = try decl.typedValue();
|
||||
try sema.declareDependency(.{ .decl_val = decl_index });
|
||||
if (decl.val.getVariable(mod) != null) return error.RuntimeLoad;
|
||||
|
||||
const layout_defined = decl.ty.hasWellDefinedLayout(mod);
|
||||
@@ -32387,6 +32448,8 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn
|
||||
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_tv = try decl.typedValue();
|
||||
// TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type
|
||||
try sema.declareDependency(.{ .decl_val = decl_index });
|
||||
const ptr_ty = try sema.ptrType(.{
|
||||
.child = decl_tv.ty.toIntern(),
|
||||
.flags = .{
|
||||
@@ -35683,13 +35746,13 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp
|
||||
break :blk accumulator;
|
||||
};
|
||||
|
||||
const zir_index = struct_type.zir_index.resolve(ip);
|
||||
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
|
||||
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
|
||||
assert(extended.opcode == .struct_decl);
|
||||
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
|
||||
if (small.has_backing_int) {
|
||||
var extra_index: usize = extended.operand;
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
|
||||
extra_index += @intFromBool(small.has_src_node);
|
||||
extra_index += @intFromBool(small.has_fields_len);
|
||||
extra_index += @intFromBool(small.has_decls_len);
|
||||
@@ -36162,10 +36225,8 @@ pub fn resolveTypeFieldsStruct(
|
||||
.file_failure,
|
||||
.dependency_failure,
|
||||
.sema_failure,
|
||||
.sema_failure_retryable,
|
||||
=> {
|
||||
sema.owner_decl.analysis = .dependency_failure;
|
||||
sema.owner_decl.generation = mod.generation;
|
||||
return error.AnalysisFail;
|
||||
},
|
||||
else => {},
|
||||
@@ -36221,10 +36282,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Key.
|
||||
.file_failure,
|
||||
.dependency_failure,
|
||||
.sema_failure,
|
||||
.sema_failure_retryable,
|
||||
=> {
|
||||
sema.owner_decl.analysis = .dependency_failure;
|
||||
sema.owner_decl.generation = mod.generation;
|
||||
return error.AnalysisFail;
|
||||
},
|
||||
else => {},
|
||||
@@ -36404,7 +36463,7 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
|
||||
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
|
||||
assert(extended.opcode == .struct_decl);
|
||||
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
var extra_index: usize = extended.operand;
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
|
||||
|
||||
extra_index += @intFromBool(small.has_src_node);
|
||||
|
||||
@@ -36448,7 +36507,7 @@ fn semaStructFields(
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace;
|
||||
const zir = mod.namespacePtr(namespace_index).file_scope.zir;
|
||||
const zir_index = struct_type.zir_index.resolve(ip);
|
||||
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
|
||||
|
||||
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
|
||||
|
||||
@@ -36719,7 +36778,7 @@ fn semaStructFieldInits(
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace;
|
||||
const zir = mod.namespacePtr(namespace_index).file_scope.zir;
|
||||
const zir_index = struct_type.zir_index.resolve(ip);
|
||||
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
|
||||
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
|
||||
|
||||
var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa);
|
||||
@@ -36868,11 +36927,11 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
|
||||
const ip = &mod.intern_pool;
|
||||
const decl_index = union_type.decl;
|
||||
const zir = mod.namespacePtr(union_type.namespace).file_scope.zir;
|
||||
const zir_index = union_type.zir_index.resolve(ip);
|
||||
const zir_index = union_type.zir_index.unwrap().?.resolve(ip);
|
||||
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
|
||||
assert(extended.opcode == .union_decl);
|
||||
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
|
||||
var extra_index: usize = extended.operand;
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len;
|
||||
|
||||
const src = LazySrcLoc.nodeOffset(0);
|
||||
extra_index += @intFromBool(small.has_src_node);
|
||||
@@ -37312,6 +37371,7 @@ fn generateUnionTagTypeNumbered(
|
||||
.names = enum_field_names,
|
||||
.values = enum_field_vals,
|
||||
.tag_mode = .explicit,
|
||||
.zir_index = .none,
|
||||
});
|
||||
|
||||
new_decl.ty = Type.type;
|
||||
@@ -37362,6 +37422,7 @@ fn generateUnionTagTypeSimple(
|
||||
.names = enum_field_names,
|
||||
.values = &.{},
|
||||
.tag_mode = .auto,
|
||||
.zir_index = .none,
|
||||
});
|
||||
|
||||
const new_decl = mod.declPtr(new_decl_index);
|
||||
@@ -38876,3 +38937,14 @@ fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type {
|
||||
}
|
||||
return sema.mod.ptrType(info);
|
||||
}
|
||||
|
||||
pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
|
||||
if (!sema.mod.comp.debug_incremental) return;
|
||||
const depender = InternPool.Depender.wrap(
|
||||
if (sema.owner_func_index != .none)
|
||||
.{ .func = sema.owner_func_index }
|
||||
else
|
||||
.{ .decl = sema.owner_decl_index },
|
||||
);
|
||||
try sema.mod.intern_pool.addDependency(sema.gpa, depender, dependee);
|
||||
}
|
||||
|
||||
134
src/Zir.zig
134
src/Zir.zig
@@ -2497,6 +2497,7 @@ pub const Inst = struct {
|
||||
/// }
|
||||
/// 2. body: Index // for each body_len
|
||||
/// 3. src_locs: SrcLocs // if body_len != 0
|
||||
/// 4. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype
|
||||
pub const Func = struct {
|
||||
/// If this is 0 it means a void return type.
|
||||
/// If this is 1 it means return_type is a simple Ref
|
||||
@@ -2558,6 +2559,7 @@ pub const Inst = struct {
|
||||
/// - each bit starting with LSB corresponds to parameter indexes
|
||||
/// 17. body: Index // for each body_len
|
||||
/// 18. src_locs: Func.SrcLocs // if body_len != 0
|
||||
/// 19. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype
|
||||
pub const FuncFancy = struct {
|
||||
/// Points to the block that contains the param instructions for this function.
|
||||
/// If this is a `declaration`, it refers to the declaration's value body.
|
||||
@@ -3040,6 +3042,12 @@ pub const Inst = struct {
|
||||
/// init_body_inst: Inst, // for each init_body_len
|
||||
/// }
|
||||
pub const StructDecl = struct {
|
||||
// These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`.
|
||||
// This hash contains the source of all fields, and any specified attributes (`extern`, backing type, etc).
|
||||
fields_hash_0: u32,
|
||||
fields_hash_1: u32,
|
||||
fields_hash_2: u32,
|
||||
fields_hash_3: u32,
|
||||
pub const Small = packed struct {
|
||||
has_src_node: bool,
|
||||
has_fields_len: bool,
|
||||
@@ -3102,6 +3110,12 @@ pub const Inst = struct {
|
||||
/// value: Ref, // if corresponding bit is set
|
||||
/// }
|
||||
pub const EnumDecl = struct {
|
||||
// These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`.
|
||||
// This hash contains the source of all fields, and the backing type if specified.
|
||||
fields_hash_0: u32,
|
||||
fields_hash_1: u32,
|
||||
fields_hash_2: u32,
|
||||
fields_hash_3: u32,
|
||||
pub const Small = packed struct {
|
||||
has_src_node: bool,
|
||||
has_tag_type: bool,
|
||||
@@ -3137,6 +3151,12 @@ pub const Inst = struct {
|
||||
/// tag_value: Ref, // if corresponding bit is set
|
||||
/// }
|
||||
pub const UnionDecl = struct {
|
||||
// These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`.
|
||||
// This hash contains the source of all fields, and any specified attributes (`extern` etc).
|
||||
fields_hash_0: u32,
|
||||
fields_hash_1: u32,
|
||||
fields_hash_2: u32,
|
||||
fields_hash_3: u32,
|
||||
pub const Small = packed struct {
|
||||
has_src_node: bool,
|
||||
has_tag_type: bool,
|
||||
@@ -3455,7 +3475,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
|
||||
switch (extended.opcode) {
|
||||
.struct_decl => {
|
||||
const small: Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
var extra_index: u32 = extended.operand;
|
||||
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.StructDecl).Struct.fields.len);
|
||||
extra_index += @intFromBool(small.has_src_node);
|
||||
extra_index += @intFromBool(small.has_fields_len);
|
||||
const decls_len = if (small.has_decls_len) decls_len: {
|
||||
@@ -3482,7 +3502,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
|
||||
},
|
||||
.enum_decl => {
|
||||
const small: Inst.EnumDecl.Small = @bitCast(extended.small);
|
||||
var extra_index: u32 = extended.operand;
|
||||
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.EnumDecl).Struct.fields.len);
|
||||
extra_index += @intFromBool(small.has_src_node);
|
||||
extra_index += @intFromBool(small.has_tag_type);
|
||||
extra_index += @intFromBool(small.has_body_len);
|
||||
@@ -3501,7 +3521,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
|
||||
},
|
||||
.union_decl => {
|
||||
const small: Inst.UnionDecl.Small = @bitCast(extended.small);
|
||||
var extra_index: u32 = extended.operand;
|
||||
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.UnionDecl).Struct.fields.len);
|
||||
extra_index += @intFromBool(small.has_src_node);
|
||||
extra_index += @intFromBool(small.has_tag_type);
|
||||
extra_index += @intFromBool(small.has_body_len);
|
||||
@@ -3938,3 +3958,111 @@ pub fn getDeclaration(zir: Zir, inst: Zir.Inst.Index) struct { Inst.Declaration,
|
||||
@intCast(extra.end),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash {
|
||||
const tag = zir.instructions.items(.tag);
|
||||
const data = zir.instructions.items(.data);
|
||||
switch (tag[@intFromEnum(inst)]) {
|
||||
.declaration => {
|
||||
const pl_node = data[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(Inst.Declaration, pl_node.payload_index);
|
||||
return @bitCast([4]u32{
|
||||
extra.data.src_hash_0,
|
||||
extra.data.src_hash_1,
|
||||
extra.data.src_hash_2,
|
||||
extra.data.src_hash_3,
|
||||
});
|
||||
},
|
||||
.func, .func_inferred => {
|
||||
const pl_node = data[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(Inst.Func, pl_node.payload_index);
|
||||
if (extra.data.body_len == 0) {
|
||||
// Function type or extern fn - no associated hash
|
||||
return null;
|
||||
}
|
||||
const extra_index = extra.end +
|
||||
1 +
|
||||
extra.data.body_len +
|
||||
@typeInfo(Inst.Func.SrcLocs).Struct.fields.len;
|
||||
return @bitCast([4]u32{
|
||||
zir.extra[extra_index + 0],
|
||||
zir.extra[extra_index + 1],
|
||||
zir.extra[extra_index + 2],
|
||||
zir.extra[extra_index + 3],
|
||||
});
|
||||
},
|
||||
.func_fancy => {
|
||||
const pl_node = data[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(Inst.FuncFancy, pl_node.payload_index);
|
||||
if (extra.data.body_len == 0) {
|
||||
// Function type or extern fn - no associated hash
|
||||
return null;
|
||||
}
|
||||
const bits = extra.data.bits;
|
||||
var extra_index = extra.end;
|
||||
extra_index += @intFromBool(bits.has_lib_name);
|
||||
if (bits.has_align_body) {
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1 + body_len;
|
||||
} else extra_index += @intFromBool(bits.has_align_ref);
|
||||
if (bits.has_addrspace_body) {
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1 + body_len;
|
||||
} else extra_index += @intFromBool(bits.has_addrspace_ref);
|
||||
if (bits.has_section_body) {
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1 + body_len;
|
||||
} else extra_index += @intFromBool(bits.has_section_ref);
|
||||
if (bits.has_cc_body) {
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1 + body_len;
|
||||
} else extra_index += @intFromBool(bits.has_cc_ref);
|
||||
if (bits.has_ret_ty_body) {
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1 + body_len;
|
||||
} else extra_index += @intFromBool(bits.has_ret_ty_ref);
|
||||
extra_index += @intFromBool(bits.has_any_noalias);
|
||||
extra_index += extra.data.body_len;
|
||||
extra_index += @typeInfo(Zir.Inst.Func.SrcLocs).Struct.fields.len;
|
||||
return @bitCast([4]u32{
|
||||
zir.extra[extra_index + 0],
|
||||
zir.extra[extra_index + 1],
|
||||
zir.extra[extra_index + 2],
|
||||
zir.extra[extra_index + 3],
|
||||
});
|
||||
},
|
||||
.extended => {},
|
||||
else => return null,
|
||||
}
|
||||
const extended = data[@intFromEnum(inst)].extended;
|
||||
switch (extended.opcode) {
|
||||
.struct_decl => {
|
||||
const extra = zir.extraData(Inst.StructDecl, extended.operand).data;
|
||||
return @bitCast([4]u32{
|
||||
extra.fields_hash_0,
|
||||
extra.fields_hash_1,
|
||||
extra.fields_hash_2,
|
||||
extra.fields_hash_3,
|
||||
});
|
||||
},
|
||||
.union_decl => {
|
||||
const extra = zir.extraData(Inst.UnionDecl, extended.operand).data;
|
||||
return @bitCast([4]u32{
|
||||
extra.fields_hash_0,
|
||||
extra.fields_hash_1,
|
||||
extra.fields_hash_2,
|
||||
extra.fields_hash_3,
|
||||
});
|
||||
},
|
||||
.enum_decl => {
|
||||
const extra = zir.extraData(Inst.EnumDecl, extended.operand).data;
|
||||
return @bitCast([4]u32{
|
||||
extra.fields_hash_0,
|
||||
extra.fields_hash_1,
|
||||
extra.fields_hash_2,
|
||||
extra.fields_hash_3,
|
||||
});
|
||||
},
|
||||
else => return null,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3255,6 +3255,7 @@ fn buildOutputType(
|
||||
.cache_mode = cache_mode,
|
||||
.subsystem = subsystem,
|
||||
.debug_compile_errors = debug_compile_errors,
|
||||
.debug_incremental = debug_incremental,
|
||||
.enable_link_snapshots = enable_link_snapshots,
|
||||
.install_name = install_name,
|
||||
.entitlements = entitlements,
|
||||
|
||||
@@ -1401,7 +1401,17 @@ const Writer = struct {
|
||||
fn writeStructDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
|
||||
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
|
||||
|
||||
var extra_index: usize = extended.operand;
|
||||
const extra = self.code.extraData(Zir.Inst.StructDecl, extended.operand);
|
||||
const fields_hash: std.zig.SrcHash = @bitCast([4]u32{
|
||||
extra.data.fields_hash_0,
|
||||
extra.data.fields_hash_1,
|
||||
extra.data.fields_hash_2,
|
||||
extra.data.fields_hash_3,
|
||||
});
|
||||
|
||||
try stream.print("hash({}) ", .{std.fmt.fmtSliceHexLower(&fields_hash)});
|
||||
|
||||
var extra_index: usize = extra.end;
|
||||
|
||||
const src_node: ?i32 = if (small.has_src_node) blk: {
|
||||
const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
|
||||
@@ -1591,7 +1601,17 @@ const Writer = struct {
|
||||
fn writeUnionDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
|
||||
const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small));
|
||||
|
||||
var extra_index: usize = extended.operand;
|
||||
const extra = self.code.extraData(Zir.Inst.UnionDecl, extended.operand);
|
||||
const fields_hash: std.zig.SrcHash = @bitCast([4]u32{
|
||||
extra.data.fields_hash_0,
|
||||
extra.data.fields_hash_1,
|
||||
extra.data.fields_hash_2,
|
||||
extra.data.fields_hash_3,
|
||||
});
|
||||
|
||||
try stream.print("hash({}) ", .{std.fmt.fmtSliceHexLower(&fields_hash)});
|
||||
|
||||
var extra_index: usize = extra.end;
|
||||
|
||||
const src_node: ?i32 = if (small.has_src_node) blk: {
|
||||
const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
|
||||
@@ -1733,7 +1753,18 @@ const Writer = struct {
|
||||
|
||||
fn writeEnumDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
|
||||
const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small));
|
||||
var extra_index: usize = extended.operand;
|
||||
|
||||
const extra = self.code.extraData(Zir.Inst.EnumDecl, extended.operand);
|
||||
const fields_hash: std.zig.SrcHash = @bitCast([4]u32{
|
||||
extra.data.fields_hash_0,
|
||||
extra.data.fields_hash_1,
|
||||
extra.data.fields_hash_2,
|
||||
extra.data.fields_hash_3,
|
||||
});
|
||||
|
||||
try stream.print("hash({}) ", .{std.fmt.fmtSliceHexLower(&fields_hash)});
|
||||
|
||||
var extra_index: usize = extra.end;
|
||||
|
||||
const src_node: ?i32 = if (small.has_src_node) blk: {
|
||||
const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
|
||||
|
||||
12
src/type.zig
12
src/type.zig
@@ -4,6 +4,7 @@ const Value = @import("Value.zig");
|
||||
const assert = std.debug.assert;
|
||||
const Target = std.Target;
|
||||
const Module = @import("Module.zig");
|
||||
const Zcu = Module;
|
||||
const log = std.log.scoped(.Type);
|
||||
const target_util = @import("target.zig");
|
||||
const TypedValue = @import("TypedValue.zig");
|
||||
@@ -3228,6 +3229,17 @@ pub const Type = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
|
||||
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
|
||||
inline .struct_type,
|
||||
.union_type,
|
||||
.enum_type,
|
||||
.opaque_type,
|
||||
=> |info| info.zir_index.unwrap(),
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub const @"u1": Type = .{ .ip_index = .u1_type };
|
||||
pub const @"u8": Type = .{ .ip_index = .u8_type };
|
||||
pub const @"u16": Type = .{ .ip_index = .u16_type };
|
||||
|
||||
Reference in New Issue
Block a user