From 895267c916f874593b0788b198b7de140b6b335b Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 11 Aug 2024 23:16:06 +0100 Subject: [PATCH 01/25] frontend: incremental progress This commit makes more progress towards incremental compilation, fixing some crashes in the frontend. Notably, it fixes the regressions introduced by #20964. It also cleans up the "outdated file root" mechanism, by virtue of deleting it: we now detect outdated file roots just after updating ZIR refs, and re-scan their namespaces. --- src/Compilation.zig | 40 +++-- src/InternPool.zig | 152 ++++++++++++++++-- src/Sema.zig | 29 ++-- src/Type.zig | 2 +- src/Zcu.zig | 119 +++++--------- src/Zcu/PerThread.zig | 355 ++++++++++++++++++++++-------------------- src/codegen.zig | 2 +- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 6 +- 9 files changed, 406 insertions(+), 301 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 1c469a95c7..fd55db0139 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3081,7 +3081,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 { for (zcu.failed_analysis.keys()) |anal_unit| { const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, - .func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip).file, + .func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file, }; if (zcu.fileByIndex(file_index).okToReportErrors()) { total += 1; @@ -3091,11 +3091,13 @@ pub fn totalErrorCount(comp: *Compilation) u32 { } } - if (zcu.intern_pool.global_error_set.getNamesFromMainThread().len > zcu.error_limit) { - total += 1; + for (zcu.failed_codegen.keys()) |nav| { + if (zcu.navFileScope(nav).okToReportErrors()) { + total += 1; + } } - for (zcu.failed_codegen.keys()) |_| { + if (zcu.intern_pool.global_error_set.getNamesFromMainThread().len > zcu.error_limit) { total += 1; } } @@ -3114,7 +3116,13 @@ pub fn totalErrorCount(comp: *Compilation) u32 { } } - return @as(u32, @intCast(total)); + if (comp.module) |zcu| { + if (total == 0 and zcu.transitive_failed_analysis.count() > 0) { + @panic("Transitive analysis errors, but none actually emitted"); + } + } + + return @intCast(total); } /// This function is temporally single-threaded. @@ -3214,7 +3222,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, - .func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip).file, + .func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file, }; // Skip errors for AnalUnits within files that had a parse failure. @@ -3243,7 +3251,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } } - for (zcu.failed_codegen.values()) |error_msg| { + for (zcu.failed_codegen.keys(), zcu.failed_codegen.values()) |nav, error_msg| { + if (!zcu.navFileScope(nav).okToReportErrors()) continue; try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); } for (zcu.failed_exports.values()) |value| { @@ -3608,10 +3617,9 @@ fn performAllTheWorkInner( // Pre-load these things from our single-threaded context since they // will be needed by the worker threads. const path_digest = zcu.filePathDigest(file_index); - const old_root_type = zcu.fileRootType(file_index); const file = zcu.fileByIndex(file_index); comp.thread_pool.spawnWgId(&astgen_wait_group, workerAstGenFile, .{ - comp, file, file_index, path_digest, old_root_type, zir_prog_node, &astgen_wait_group, .root, + comp, file, file_index, path_digest, zir_prog_node, &astgen_wait_group, .root, }); } } @@ -3649,6 +3657,7 @@ fn performAllTheWorkInner( } try reportMultiModuleErrors(pt); try zcu.flushRetryableFailures(); + zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); } @@ -4283,7 +4292,6 @@ fn workerAstGenFile( file: *Zcu.File, file_index: Zcu.File.Index, path_digest: Cache.BinDigest, - old_root_type: InternPool.Index, prog_node: std.Progress.Node, wg: *WaitGroup, src: Zcu.AstGenSrc, @@ -4292,7 +4300,7 @@ fn workerAstGenFile( defer child_prog_node.end(); const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; - pt.astGenFile(file, path_digest, old_root_type) catch |err| switch (err) { + pt.astGenFile(file, path_digest) catch |err| switch (err) { error.AnalysisFail => return, else => { file.status = .retryable_failure; @@ -4323,7 +4331,7 @@ fn workerAstGenFile( // `@import("builtin")` is handled specially. if (mem.eql(u8, import_path, "builtin")) continue; - const import_result, const imported_path_digest, const imported_root_type = blk: { + const import_result, const imported_path_digest = blk: { comp.mutex.lock(); defer comp.mutex.unlock(); @@ -4338,8 +4346,7 @@ fn workerAstGenFile( comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue; }; const imported_path_digest = pt.zcu.filePathDigest(res.file_index); - const imported_root_type = pt.zcu.fileRootType(res.file_index); - break :blk .{ res, imported_path_digest, imported_root_type }; + break :blk .{ res, imported_path_digest }; }; if (import_result.is_new) { log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{ @@ -4350,7 +4357,7 @@ fn workerAstGenFile( .import_tok = item.data.token, } }; comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{ - comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_type, prog_node, wg, sub_src, + comp, import_result.file, import_result.file_index, imported_path_digest, prog_node, wg, sub_src, }); } } @@ -6443,7 +6450,8 @@ fn buildOutputFromZig( try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node); - assert(out.* == null); + // Under incremental compilation, `out` may already be populated from a prior update. + assert(out.* == null or comp.incremental); out.* = try sub_compilation.toCrtFile(); } diff --git a/src/InternPool.zig b/src/InternPool.zig index 571668772b..91a58e10e7 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -65,19 +65,49 @@ pub const single_threaded = builtin.single_threaded or !want_multi_threaded; pub const TrackedInst = extern struct { file: FileIndex, inst: Zir.Inst.Index, - comptime { - // The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`. - assert(@sizeOf(@This()) == @sizeOf(FileIndex) + @sizeOf(Zir.Inst.Index)); - } + + pub const MaybeLost = extern struct { + file: FileIndex, + inst: ZirIndex, + pub const ZirIndex = enum(u32) { + /// Tracking failed for this ZIR instruction. Uses of it should fail. + lost = std.math.maxInt(u32), + _, + pub fn unwrap(inst: ZirIndex) ?Zir.Inst.Index { + return switch (inst) { + .lost => null, + _ => @enumFromInt(@intFromEnum(inst)), + }; + } + pub fn wrap(inst: Zir.Inst.Index) ZirIndex { + return @enumFromInt(@intFromEnum(inst)); + } + }; + comptime { + // The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`. + assert(@sizeOf(@This()) == @sizeOf(FileIndex) + @sizeOf(ZirIndex)); + } + }; + pub const Index = enum(u32) { _, - pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) TrackedInst { + pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) ?TrackedInst { const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip); const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire(); - return tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index]; + const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index]; + return .{ + .file = maybe_lost.file, + .inst = maybe_lost.inst.unwrap() orelse return null, + }; } - pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) Zir.Inst.Index { - return i.resolveFull(ip).inst; + pub fn resolveFile(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) FileIndex { + const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip); + const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire(); + const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index]; + return maybe_lost.file; + } + pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) ?Zir.Inst.Index { + return (i.resolveFull(ip) orelse return null).inst; } pub fn toOptional(i: TrackedInst.Index) Optional { @@ -120,7 +150,11 @@ pub fn trackZir( tid: Zcu.PerThread.Id, key: TrackedInst, ) Allocator.Error!TrackedInst.Index { - const full_hash = Hash.hash(0, std.mem.asBytes(&key)); + const maybe_lost_key: TrackedInst.MaybeLost = .{ + .file = key.file, + .inst = TrackedInst.MaybeLost.ZirIndex.wrap(key.inst), + }; + const full_hash = Hash.hash(0, std.mem.asBytes(&maybe_lost_key)); const hash: u32 = @truncate(full_hash >> 32); const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; var map = shard.shared.tracked_inst_map.acquire(); @@ -132,12 +166,11 @@ pub fn trackZir( const entry = &map.entries[map_index]; const index = entry.acquire().unwrap() orelse break; if (entry.hash != hash) continue; - if (std.meta.eql(index.resolveFull(ip), key)) return index; + if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index; } shard.mutate.tracked_inst_map.mutex.lock(); defer shard.mutate.tracked_inst_map.mutex.unlock(); if (map.entries != shard.shared.tracked_inst_map.entries) { - shard.mutate.tracked_inst_map.len += 1; map = shard.shared.tracked_inst_map; map_mask = map.header().mask(); map_index = hash; @@ -147,7 +180,7 @@ pub fn trackZir( const entry = &map.entries[map_index]; const index = entry.acquire().unwrap() orelse break; if (entry.hash != hash) continue; - if (std.meta.eql(index.resolveFull(ip), key)) return index; + if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index; } defer shard.mutate.tracked_inst_map.len += 1; const local = ip.getLocal(tid); @@ -161,7 +194,7 @@ pub fn trackZir( .tid = tid, .index = list.mutate.len, }).wrap(ip); - list.appendAssumeCapacity(.{key}); + list.appendAssumeCapacity(.{maybe_lost_key}); entry.release(index.toOptional()); return index; } @@ -205,12 +238,91 @@ pub fn trackZir( .tid = tid, .index = list.mutate.len, }).wrap(ip); - list.appendAssumeCapacity(.{key}); + list.appendAssumeCapacity(.{maybe_lost_key}); map.entries[map_index] = .{ .value = index.toOptional(), .hash = hash }; shard.shared.tracked_inst_map.release(new_map); return index; } +pub fn rehashTrackedInsts( + ip: *InternPool, + gpa: Allocator, + /// TODO: maybe don't take this? it doesn't actually matter, only one thread is running at this point + tid: Zcu.PerThread.Id, +) Allocator.Error!void { + // TODO: this function doesn't handle OOM well. What should it do? + // Indeed, what should anyone do when they run out of memory? + + // We don't lock anything, as this function assumes that no other thread is + // accessing `tracked_insts`. This is necessary because we're going to be + // iterating the `TrackedInst`s in each `Local`, so we have to know that + // none will be added as we work. + + // Figure out how big each shard need to be and store it in its mutate `len`. + for (ip.shards) |*shard| shard.mutate.tracked_inst_map.len = 0; + for (ip.locals) |*local| { + // `getMutableTrackedInsts` is okay only because no other thread is currently active. + // We need the `mutate` for the len. + for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0")) |tracked_inst| { + if (tracked_inst.inst == .lost) continue; // we can ignore this one! + const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst)); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + shard.mutate.tracked_inst_map.len += 1; + } + } + + const Map = Shard.Map(TrackedInst.Index.Optional); + + const arena_state = &ip.getLocal(tid).mutate.arena; + + // We know how big each shard must be, so ensure we have the capacity we need. + for (ip.shards) |*shard| { + const want_capacity = std.math.ceilPowerOfTwo(u32, shard.mutate.tracked_inst_map.len * 5 / 3) catch unreachable; + const have_capacity = shard.shared.tracked_inst_map.header().capacity; // no acquire because we hold the mutex + if (have_capacity >= want_capacity) { + @memset(shard.shared.tracked_inst_map.entries[0..have_capacity], .{ .value = .none, .hash = undefined }); + continue; + } + var arena = arena_state.promote(gpa); + defer arena_state.* = arena.state; + const new_map_buf = try arena.allocator().alignedAlloc( + u8, + Map.alignment, + Map.entries_offset + want_capacity * @sizeOf(Map.Entry), + ); + const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) }; + new_map.header().* = .{ .capacity = want_capacity }; + @memset(new_map.entries[0..want_capacity], .{ .value = .none, .hash = undefined }); + shard.shared.tracked_inst_map.release(new_map); + } + + // Now, actually insert the items. + for (ip.locals, 0..) |*local, local_tid| { + // `getMutableTrackedInsts` is okay only because no other thread is currently active. + // We need the `mutate` for the len. + for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0"), 0..) |tracked_inst, local_inst_index| { + if (tracked_inst.inst == .lost) continue; // we can ignore this one! + const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst)); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + const map = shard.shared.tracked_inst_map; // no acquire because we hold the mutex + const map_mask = map.header().mask(); + var map_index = hash; + const entry = while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + if (entry.acquire() == .none) break entry; + }; + const index = TrackedInst.Index.Unwrapped.wrap(.{ + .tid = @enumFromInt(local_tid), + .index = @intCast(local_inst_index), + }, ip); + entry.hash = hash; + entry.release(index.toOptional()); + } + } +} + /// Analysis Unit. Represents a single entity which undergoes semantic analysis. /// This is either a `Cau` or a runtime function. /// The LSB is used as a tag bit. @@ -728,7 +840,7 @@ const Local = struct { else => @compileError("unsupported host"), }; const Strings = List(struct { u8 }); - const TrackedInsts = List(struct { TrackedInst }); + const TrackedInsts = List(struct { TrackedInst.MaybeLost }); const Maps = List(struct { FieldMap }); const Caus = List(struct { Cau }); const Navs = List(Nav.Repr); @@ -959,6 +1071,14 @@ const Local = struct { mutable.list.release(new_list); } + pub fn viewAllowEmpty(mutable: Mutable) View { + const capacity = mutable.list.header().capacity; + return .{ + .bytes = mutable.list.bytes, + .len = mutable.mutate.len, + .capacity = capacity, + }; + } pub fn view(mutable: Mutable) View { const capacity = mutable.list.header().capacity; assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` @@ -996,7 +1116,6 @@ const Local = struct { fn header(list: ListSelf) *Header { return @ptrFromInt(@intFromPtr(list.bytes) - bytes_offset); } - pub fn view(list: ListSelf) View { const capacity = list.header().capacity; assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` @@ -11000,7 +11119,6 @@ pub fn getOrPutTrailingString( shard.mutate.string_map.mutex.lock(); defer shard.mutate.string_map.mutex.unlock(); if (map.entries != shard.shared.string_map.entries) { - shard.mutate.string_map.len += 1; map = shard.shared.string_map; map_mask = map.header().mask(); map_index = hash; diff --git a/src/Sema.zig b/src/Sema.zig index d891995fd0..84999c8a86 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -999,7 +999,7 @@ fn analyzeBodyInner( // The hashmap lookup in here is a little expensive, and LLVM fails to optimize it away. if (build_options.enable_logging) { std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ sub_file_path: { - const file_index = block.src_base_inst.resolveFull(&zcu.intern_pool).file; + const file_index = block.src_base_inst.resolveFile(&zcu.intern_pool); const file = zcu.fileByIndex(file_index); break :sub_file_path file.sub_file_path; }, inst }); @@ -2873,7 +2873,7 @@ fn createTypeName( .anon => {}, // handled after switch .parent => return block.type_name_ctx, .func => func_strat: { - const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip)); + const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip) orelse return error.AnalysisFail); const zir_tags = sema.code.instructions.items(.tag); var buf: std.ArrayListUnmanaged(u8) = .{}; @@ -5487,7 +5487,7 @@ fn failWithBadMemberAccess( .Enum => "enum", else => unreachable, }; - if (agg_ty.typeDeclInst(zcu)) |inst| if (inst.resolve(ip) == .main_struct_inst) { + if (agg_ty.typeDeclInst(zcu)) |inst| if ((inst.resolve(ip) orelse return error.AnalysisFail) == .main_struct_inst) { return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{ agg_ty.fmt(pt), field_name.fmt(ip), }); @@ -6041,8 +6041,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); const path_digest = zcu.filePathDigest(result.file_index); - const old_root_type = zcu.fileRootType(result.file_index); - pt.astGenFile(result.file, path_digest, old_root_type) catch |err| + pt.astGenFile(result.file, path_digest) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); // TODO: register some kind of dependency on the file. @@ -7778,7 +7777,7 @@ fn analyzeCall( // the AIR instructions of the callsite. The callee could be a generic function // which means its parameter type expressions must be resolved in order and used // to successively coerce the arguments. - const fn_info = ics.callee().code.getFnInfo(module_fn.zir_body_inst.resolve(ip)); + const fn_info = ics.callee().code.getFnInfo(module_fn.zir_body_inst.resolve(ip) orelse return error.AnalysisFail); try ics.callee().inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); var arg_i: u32 = 0; @@ -7823,7 +7822,7 @@ fn analyzeCall( // each of the parameters, resolving the return type and providing it to the child // `Sema` so that it can be used for the `ret_ptr` instruction. const ret_ty_inst = if (fn_info.ret_ty_body.len != 0) - try sema.resolveInlineBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst.resolve(ip)) + try sema.resolveInlineBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst.resolve(ip) orelse return error.AnalysisFail) else try sema.resolveInst(fn_info.ret_ty_ref); const ret_ty_src: LazySrcLoc = .{ .base_node_inst = module_fn.zir_body_inst, .offset = .{ .node_offset_fn_type_ret_ty = 0 } }; @@ -8210,7 +8209,7 @@ fn instantiateGenericCall( const fn_nav = ip.getNav(generic_owner_func.owner_nav); const fn_cau = ip.getCau(fn_nav.analysis_owner.unwrap().?); const fn_zir = zcu.namespacePtr(fn_cau.namespace).fileScope(zcu).zir; - const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst.resolve(ip)); + const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst.resolve(ip) orelse return error.AnalysisFail); const comptime_args = try sema.arena.alloc(InternPool.Index, args_info.count()); @memset(comptime_args, .none); @@ -9416,7 +9415,7 @@ fn zirFunc( break :cau generic_owner_nav.analysis_owner.unwrap().?; } else sema.owner.unwrap().cau; const fn_is_exported = exported: { - const decl_inst = ip.getCau(func_decl_cau).zir_index.resolve(ip); + const decl_inst = ip.getCau(func_decl_cau).zir_index.resolve(ip) orelse return error.AnalysisFail; const zir_decl = sema.code.getDeclaration(decl_inst)[0]; break :exported zir_decl.flags.is_export; }; @@ -26125,7 +26124,7 @@ fn zirVarExtended( const addrspace_src = block.src(.{ .node_offset_var_decl_addrspace = 0 }); const decl_inst, const decl_bodies = decl: { - const decl_inst = sema.getOwnerCauDeclInst().resolve(ip); + const decl_inst = sema.getOwnerCauDeclInst().resolve(ip) orelse return error.AnalysisFail; const zir_decl, const extra_end = sema.code.getDeclaration(decl_inst); break :decl .{ decl_inst, zir_decl.getBodies(extra_end, sema.code) }; }; @@ -26354,7 +26353,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A break :decl_inst cau.zir_index; } else sema.getOwnerCauDeclInst(); // not an instantiation so we're analyzing a function declaration Cau - const zir_decl = sema.code.getDeclaration(decl_inst.resolve(&mod.intern_pool))[0]; + const zir_decl = sema.code.getDeclaration(decl_inst.resolve(&mod.intern_pool) orelse return error.AnalysisFail)[0]; if (zir_decl.flags.is_export) { break :cc .C; } @@ -35505,7 +35504,7 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp break :blk accumulator; }; - const zir_index = struct_type.zir_index.unwrap().?.resolve(ip); + const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail; const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); @@ -36120,7 +36119,7 @@ fn semaStructFields( const cau_index = struct_type.cau.unwrap().?; const namespace_index = ip.getCau(cau_index).namespace; const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; - const zir_index = struct_type.zir_index.unwrap().?.resolve(ip); + const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail; const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); @@ -36343,7 +36342,7 @@ fn semaStructFieldInits( const cau_index = struct_type.cau.unwrap().?; const namespace_index = ip.getCau(cau_index).namespace; const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; - const zir_index = struct_type.zir_index.unwrap().?.resolve(ip); + const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail; const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); @@ -36477,7 +36476,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_ty: InternPool.Ind const ip = &zcu.intern_pool; const cau_index = union_type.cau; const zir = zcu.namespacePtr(union_type.namespace).fileScope(zcu).zir; - const zir_index = union_type.zir_index.resolve(ip); + const zir_index = union_type.zir_index.resolve(ip) orelse return error.AnalysisFail; const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .union_decl); const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); diff --git a/src/Type.zig b/src/Type.zig index 2e3a493f18..c113e0734e 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3437,7 +3437,7 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 { }, else => return null, }; - const info = tracked.resolveFull(&zcu.intern_pool); + const info = tracked.resolveFull(&zcu.intern_pool) orelse return null; const file = zcu.fileByIndex(info.file); assert(file.zir_loaded); const zir = file.zir; diff --git a/src/Zcu.zig b/src/Zcu.zig index da78e20cf3..d0889ea62c 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -162,12 +162,6 @@ outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, /// Such `AnalUnit`s are ready for immediate re-analysis. /// See `findOutdatedToAnalyze` for details. outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, -/// This contains a set of struct types whose corresponding `Cau` may not be in -/// `outdated`, but are the root types of files which have updated source and -/// thus must be re-analyzed. If such a type is only in this set, the struct type -/// index may be preserved (only the namespace might change). If its owned `Cau` -/// is also outdated, the struct type index must be recreated. -outdated_file_root: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// This contains a list of AnalUnit whose analysis or codegen failed, but the /// failure was something like running out of disk space, and trying again may /// succeed. On the next update, we will flush this list, marking all members of @@ -2025,7 +2019,7 @@ pub const LazySrcLoc = struct { pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) struct { *File, Ast.Node.Index } { const ip = &zcu.intern_pool; const file_index, const zir_inst = inst: { - const info = base_node_inst.resolveFull(ip); + const info = base_node_inst.resolveFull(ip) orelse @panic("TODO: resolve source location relative to lost inst"); break :inst .{ info.file, info.inst }; }; const file = zcu.fileByIndex(file_index); @@ -2148,7 +2142,6 @@ pub fn deinit(zcu: *Zcu) void { zcu.potentially_outdated.deinit(gpa); zcu.outdated.deinit(gpa); zcu.outdated_ready.deinit(gpa); - zcu.outdated_file_root.deinit(gpa); zcu.retryable_failures.deinit(gpa); zcu.test_functions.deinit(gpa); @@ -2355,8 +2348,6 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { if (!zcu.comp.incremental) return null; - if (true) @panic("TODO: findOutdatedToAnalyze"); - if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { log.debug("findOutdatedToAnalyze: no outdated depender", .{}); return null; @@ -2381,87 +2372,57 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { return zcu.outdated_ready.keys()[0]; } - // Next, we will see if there is any outdated file root which was not in - // `outdated`. This set will be small (number of files changed in this - // update), so it's alright for us to just iterate here. - for (zcu.outdated_file_root.keys()) |file_decl| { - const decl_depender = AnalUnit.wrap(.{ .decl = file_decl }); - if (zcu.outdated.contains(decl_depender)) { - // Since we didn't hit this in the first loop, this Decl must have - // pending dependencies, so is ineligible. - continue; - } - if (zcu.potentially_outdated.contains(decl_depender)) { - // This Decl's struct may or may not need to be recreated depending - // on whether it is outdated. If we analyzed it now, we would have - // to assume it was outdated and recreate it! - continue; - } - log.debug("findOutdatedToAnalyze: outdated file root decl '{d}'", .{file_decl}); - return decl_depender; - } + // There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some + // Cau with PO dependencies is outdated -- e.g. in the above example we arbitrarily pick one of + // A or B. We should select a Cau, since a Cau is definitely responsible for the loop in the + // dependency graph (since IES dependencies can't have loops). We should also, of course, not + // select a Cau owned by a `comptime` declaration, since you can't depend on those! - // There is no single AnalUnit which is ready for re-analysis. Instead, we - // must assume that some Decl with PO dependencies is outdated - e.g. in the - // above example we arbitrarily pick one of A or B. We should select a Decl, - // since a Decl is definitely responsible for the loop in the dependency - // graph (since you can't depend on a runtime function analysis!). - - // The choice of this Decl could have a big impact on how much total - // analysis we perform, since if analysis concludes its tyval is unchanged, - // then other PO AnalUnit may be resolved as up-to-date. To hopefully avoid - // doing too much work, let's find a Decl which the most things depend on - - // the idea is that this will resolve a lot of loops (but this is only a - // heuristic). + // The choice of this Cau could have a big impact on how much total analysis we perform, since + // if analysis concludes any dependencies on its result are up-to-date, then other PO AnalUnit + // may be resolved as up-to-date. To hopefully avoid doing too much work, let's find a Decl + // which the most things depend on - the idea is that this will resolve a lot of loops (but this + // is only a heuristic). log.debug("findOutdatedToAnalyze: no trivial ready, using heuristic; {d} outdated, {d} PO", .{ zcu.outdated.count(), zcu.potentially_outdated.count(), }); - const Decl = {}; + const ip = &zcu.intern_pool; - var chosen_decl_idx: ?Decl.Index = null; - var chosen_decl_dependers: u32 = undefined; + var chosen_cau: ?InternPool.Cau.Index = null; + var chosen_cau_dependers: u32 = undefined; - for (zcu.outdated.keys()) |depender| { - const decl_index = switch (depender.unwrap()) { - .decl => |d| d, - .func => continue, - }; + inline for (.{ zcu.outdated.keys(), zcu.potentially_outdated.keys() }) |outdated_units| { + for (outdated_units) |unit| { + const cau = switch (unit.unwrap()) { + .cau => |cau| cau, + .func => continue, // a `func` definitely can't be causing the loop so it is a bad choice + }; + const cau_owner = ip.getCau(cau).owner; - var n: u32 = 0; - var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index }); - while (it.next()) |_| n += 1; + var n: u32 = 0; + var it = ip.dependencyIterator(switch (cau_owner.unwrap()) { + .none => continue, // there can be no dependencies on this `Cau` so it is a terrible choice + .type => |ty| .{ .interned = ty }, + .nav => |nav| .{ .nav_val = nav }, + }); + while (it.next()) |_| n += 1; - if (chosen_decl_idx == null or n > chosen_decl_dependers) { - chosen_decl_idx = decl_index; - chosen_decl_dependers = n; + if (chosen_cau == null or n > chosen_cau_dependers) { + chosen_cau = cau; + chosen_cau_dependers = n; + } } } - for (zcu.potentially_outdated.keys()) |depender| { - const decl_index = switch (depender.unwrap()) { - .decl => |d| d, - .func => continue, - }; - - var n: u32 = 0; - var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index }); - while (it.next()) |_| n += 1; - - if (chosen_decl_idx == null or n > chosen_decl_dependers) { - chosen_decl_idx = decl_index; - chosen_decl_dependers = n; - } - } - - log.debug("findOutdatedToAnalyze: heuristic returned Decl {d} ({d} dependers)", .{ - chosen_decl_idx.?, - chosen_decl_dependers, + log.debug("findOutdatedToAnalyze: heuristic returned Cau {d} ({d} dependers)", .{ + @intFromEnum(chosen_cau.?), + chosen_cau_dependers, }); - return AnalUnit.wrap(.{ .decl = chosen_decl_idx.? }); + return AnalUnit.wrap(.{ .cau = chosen_cau.? }); } /// During an incremental update, before semantic analysis, call this to flush all values from @@ -2583,7 +2544,7 @@ pub fn mapOldZirToNew( break :inst unnamed_tests.items[unnamed_test_idx]; }, _ => inst: { - const name_nts = new_decl.name.toString(old_zir).?; + const name_nts = new_decl.name.toString(new_zir).?; const name = new_zir.nullTerminatedString(name_nts); if (new_decl.name.isNamedTest(new_zir)) { break :inst named_tests.get(name) orelse continue; @@ -3093,7 +3054,7 @@ pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc { pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 { const ip = &zcu.intern_pool; - const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip); + const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?; const zir = zcu.fileByIndex(inst_info.file).zir; const inst = zir.instructions.get(@intFromEnum(inst_info.inst)); assert(inst.tag == .declaration); @@ -3106,7 +3067,7 @@ pub fn navValue(zcu: *const Zcu, nav_index: InternPool.Nav.Index) Value { pub fn navFileScopeIndex(zcu: *Zcu, nav: InternPool.Nav.Index) File.Index { const ip = &zcu.intern_pool; - return ip.getNav(nav).srcInst(ip).resolveFull(ip).file; + return ip.getNav(nav).srcInst(ip).resolveFile(ip); } pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File { @@ -3115,6 +3076,6 @@ pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File { pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File { const ip = &zcu.intern_pool; - const file_index = ip.getCau(cau).zir_index.resolveFull(ip).file; + const file_index = ip.getCau(cau).zir_index.resolveFile(ip); return zcu.fileByIndex(file_index); } diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index b353331d95..37a3aced09 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -39,7 +39,6 @@ pub fn astGenFile( pt: Zcu.PerThread, file: *Zcu.File, path_digest: Cache.BinDigest, - old_root_type: InternPool.Index, ) !void { dev.check(.ast_gen); assert(!file.mod.isBuiltin()); @@ -299,25 +298,15 @@ pub fn astGenFile( file.status = .astgen_failure; return error.AnalysisFail; } - - if (old_root_type != .none) { - // The root of this file must be re-analyzed, since the file has changed. - comp.mutex.lock(); - defer comp.mutex.unlock(); - - log.debug("outdated file root type: {}", .{old_root_type}); - try zcu.outdated_file_root.put(gpa, old_root_type, {}); - } } const UpdatedFile = struct { - file_index: Zcu.File.Index, file: *Zcu.File, inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index), }; -fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.ArrayListUnmanaged(UpdatedFile)) void { - for (updated_files.items) |*elem| elem.inst_map.deinit(gpa); +fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile)) void { + for (updated_files.values()) |*elem| elem.inst_map.deinit(gpa); updated_files.deinit(gpa); } @@ -328,143 +317,166 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { const gpa = zcu.gpa; // We need to visit every updated File for every TrackedInst in InternPool. - var updated_files: std.ArrayListUnmanaged(UpdatedFile) = .{}; + var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .{}; defer cleanupUpdatedFiles(gpa, &updated_files); for (zcu.import_table.values()) |file_index| { const file = zcu.fileByIndex(file_index); const old_zir = file.prev_zir orelse continue; const new_zir = file.zir; - try updated_files.append(gpa, .{ - .file_index = file_index, + const gop = try updated_files.getOrPut(gpa, file_index); + assert(!gop.found_existing); + gop.value_ptr.* = .{ .file = file, .inst_map = .{}, - }); - const inst_map = &updated_files.items[updated_files.items.len - 1].inst_map; - try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, inst_map); + }; + if (!new_zir.hasCompileErrors()) { + try Zcu.mapOldZirToNew(gpa, old_zir.*, file.zir, &gop.value_ptr.inst_map); + } } - if (updated_files.items.len == 0) + if (updated_files.count() == 0) return; for (ip.locals, 0..) |*local, tid| { const tracked_insts_list = local.getMutableTrackedInsts(gpa); - for (tracked_insts_list.view().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| { - for (updated_files.items) |updated_file| { - const file_index = updated_file.file_index; - if (tracked_inst.file != file_index) continue; + for (tracked_insts_list.viewAllowEmpty().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| { + const file_index = tracked_inst.file; + const updated_file = updated_files.get(file_index) orelse continue; - const file = updated_file.file; - const old_zir = file.prev_zir.?.*; - const new_zir = file.zir; - const old_tag = old_zir.instructions.items(.tag); - const old_data = old_zir.instructions.items(.data); - const inst_map = &updated_file.inst_map; + const file = updated_file.file; - const old_inst = tracked_inst.inst; - const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{ - .tid = @enumFromInt(tid), - .index = @intCast(tracked_inst_unwrapped_index), - }).wrap(ip); - tracked_inst.inst = inst_map.get(old_inst) orelse { - // Tracking failed for this instruction. Invalidate associated `src_hash` deps. - log.debug("tracking failed for %{d}", .{old_inst}); - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); - continue; - }; + if (file.zir.hasCompileErrors()) { + // If we mark this as outdated now, users of this inst will just get a transitive analysis failure. + // Ultimately, they would end up throwing out potentially useful analysis results. + // So, do nothing. We already have the file failure -- that's sufficient for now! + continue; + } + const old_inst = tracked_inst.inst.unwrap() orelse continue; // we can't continue tracking lost insts + const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{ + .tid = @enumFromInt(tid), + .index = @intCast(tracked_inst_unwrapped_index), + }).wrap(ip); + const new_inst = updated_file.inst_map.get(old_inst) orelse { + // Tracking failed for this instruction. Invalidate associated `src_hash` deps. + log.debug("tracking failed for %{d}", .{old_inst}); + tracked_inst.inst = .lost; + try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + continue; + }; + tracked_inst.inst = InternPool.TrackedInst.MaybeLost.ZirIndex.wrap(new_inst); - if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { - if (new_zir.getAssociatedSrcHash(tracked_inst.inst)) |new_hash| { - if (std.zig.srcHashEql(old_hash, new_hash)) { - break :hash_changed; - } - log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ - old_inst, - tracked_inst.inst, - std.fmt.fmtSliceHexLower(&old_hash), - std.fmt.fmtSliceHexLower(&new_hash), - }); + const old_zir = file.prev_zir.?.*; + const new_zir = file.zir; + const old_tag = old_zir.instructions.items(.tag); + const old_data = old_zir.instructions.items(.data); + + if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { + if (new_zir.getAssociatedSrcHash(new_inst)) |new_hash| { + if (std.zig.srcHashEql(old_hash, new_hash)) { + break :hash_changed; } - // The source hash associated with this instruction changed - invalidate relevant dependencies. - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ + old_inst, + new_inst, + std.fmt.fmtSliceHexLower(&old_hash), + std.fmt.fmtSliceHexLower(&new_hash), + }); } + // The source hash associated with this instruction changed - invalidate relevant dependencies. + try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + } - // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. - const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { - .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { - .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, - else => false, - }, + // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. + const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { + .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { + .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, else => false, - }; - if (!has_namespace) continue; + }, + else => false, + }; + if (!has_namespace) continue; - var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer old_names.deinit(zcu.gpa); - { - var it = old_zir.declIterator(old_inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - pt.tid, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - try old_names.put(zcu.gpa, name_ip, {}); + var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer old_names.deinit(zcu.gpa); + { + var it = old_zir.declIterator(old_inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + try old_names.put(zcu.gpa, name_ip, {}); } - var any_change = false; - { - var it = new_zir.declIterator(tracked_inst.inst); - while (it.next()) |decl_inst| { - const decl_name = new_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(new_zir)) continue, - } - const name_zir = decl_name.toString(new_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - pt.tid, - new_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - if (!old_names.swapRemove(name_ip)) continue; - // Name added - any_change = true; - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = tracked_inst_index, - .name = name_ip, - } }); + } + var any_change = false; + { + var it = new_zir.declIterator(new_inst); + while (it.next()) |decl_inst| { + const decl_name = new_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(new_zir)) continue, } - } - // The only elements remaining in `old_names` now are any names which were removed. - for (old_names.keys()) |name_ip| { + const name_zir = decl_name.toString(new_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + new_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + if (!old_names.swapRemove(name_ip)) continue; + // Name added any_change = true; try zcu.markDependeeOutdated(.{ .namespace_name = .{ .namespace = tracked_inst_index, .name = name_ip, } }); } + } + // The only elements remaining in `old_names` now are any names which were removed. + for (old_names.keys()) |name_ip| { + any_change = true; + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = tracked_inst_index, + .name = name_ip, + } }); + } - if (any_change) { - try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); - } + if (any_change) { + try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); } } } - for (updated_files.items) |updated_file| { + try ip.rehashTrackedInsts(gpa, pt.tid); + + for (updated_files.keys(), updated_files.values()) |file_index, updated_file| { const file = updated_file.file; - const prev_zir = file.prev_zir.?; - file.prev_zir = null; - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); + if (file.zir.hasCompileErrors()) { + // Keep `prev_zir` around: it's the last non-error ZIR. + // Don't update the namespace, as we have no new data to update *to*. + } else { + const prev_zir = file.prev_zir.?; + file.prev_zir = null; + prev_zir.deinit(gpa); + gpa.destroy(prev_zir); + + // For every file which has changed, re-scan the namespace of the file's root struct type. + // These types are special-cased because they don't have an enclosing declaration which will + // be re-analyzed (causing the struct's namespace to be re-scanned). It's fine to do this + // now because this work is fast (no actual Sema work is happening, we're just updating the + // namespace contents). We must do this after updating ZIR refs above, since `scanNamespace` + // will track some instructions. + try pt.updateFileNamespace(file_index); + } } } @@ -473,6 +485,8 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const file_root_type = pt.zcu.fileRootType(file_index); if (file_root_type != .none) { + // The namespace is already up-to-date thanks to the `updateFileNamespace` calls at the + // start of this update. We just have to check whether the type itself is okay! const file_root_type_cau = pt.zcu.intern_pool.loadStructType(file_root_type).cau.unwrap().?; return pt.ensureCauAnalyzed(file_root_type_cau); } else { @@ -493,7 +507,6 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); - const inst_info = cau.zir_index.resolveFull(ip); log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); @@ -516,12 +529,9 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu _ = zcu.outdated_ready.swapRemove(anal_unit); } - // TODO: this only works if namespace lookups in Sema trigger `ensureCauAnalyzed`, because - // `outdated_file_root` information is not "viral", so we need that a namespace lookup first - // handles the case where the file root is not an outdated *type* but does have an outdated - // *namespace*. A more logically simple alternative may be for a file's root struct to register - // a dependency on the file's entire source code (hash). Alternatively, we could make sure that - // these are always handled first in an update. Actually, that's probably the best option. + const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + + // TODO: document this elsewhere mlugg! // For my own benefit, here's how a namespace update for a normal (non-file-root) type works: // `const S = struct { ... };` // We are adding or removing a declaration within this `struct`. @@ -535,16 +545,12 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu // * we basically do `scanDecls`, updating the namespace as needed // * TODO: optimize this to make sure we only do it once a generation i guess? // * so everyone lived happily ever after - const file_root_outdated = switch (cau.owner.unwrap()) { - .type => |ty| zcu.outdated_file_root.swapRemove(ty), - .nav, .none => false, - }; if (zcu.fileByIndex(inst_info.file).status != .success_zir) { return error.AnalysisFail; } - if (!cau_outdated and !file_root_outdated) { + if (!cau_outdated) { // We can trust the current information about this `Cau`. if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { return error.AnalysisFail; @@ -571,10 +577,13 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const sema_result: SemaCauResult = res: { if (inst_info.inst == .main_struct_inst) { - const changed = try pt.semaFileUpdate(inst_info.file, cau_outdated); + // Note that this is definitely a *recreation* due to outdated, because + // this instruction indicates that `cau.owner` is a `type`, which only + // reaches here if `cau_outdated`. + try pt.recreateFileRoot(inst_info.file); break :res .{ - .invalidate_decl_val = changed, - .invalidate_decl_ref = changed, + .invalidate_decl_val = true, + .invalidate_decl_ref = true, }; } @@ -690,8 +699,8 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter zcu.potentially_outdated.swapRemove(anal_unit); if (func_outdated) { - dev.check(.incremental); _ = zcu.outdated_ready.swapRemove(anal_unit); + dev.check(.incremental); zcu.deleteUnitExports(anal_unit); zcu.deleteUnitReferences(anal_unit); } @@ -920,12 +929,9 @@ fn createFileRootStruct( return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); } -/// Re-analyze the root type of a file on an incremental update. -/// If `type_outdated`, the struct type itself is considered outdated and is -/// reconstructed at a new InternPool index. Otherwise, the namespace is just -/// re-analyzed. Returns whether the decl's tyval was invalidated. -/// Returns `error.AnalysisFail` if the file has an error. -fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: bool) Zcu.SemaError!bool { +/// Recreate the root type of a file after it becomes outdated. A new struct type +/// is constructed at a new InternPool index, reusing the namespace for efficiency. +fn recreateFileRoot(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; const file = zcu.fileByIndex(file_index); @@ -934,48 +940,58 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: assert(file_root_type != .none); - log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{ + log.debug("recreateFileRoot mod={s} sub_file_path={s}", .{ file.mod.fully_qualified_name, file.sub_file_path, - type_outdated, }); if (file.status != .success_zir) { return error.AnalysisFail; } - if (type_outdated) { - // Invalidate the existing type, reusing its namespace. - const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?; - ip.removeDependenciesForDepender( - zcu.gpa, - InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), - ); - ip.remove(pt.tid, file_root_type); - _ = try pt.createFileRootStruct(file_index, namespace_index); - return true; - } + // Invalidate the existing type, reusing its namespace. + const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?; + ip.removeDependenciesForDepender( + zcu.gpa, + InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), + ); + ip.remove(pt.tid, file_root_type); + _ = try pt.createFileRootStruct(file_index, namespace_index); +} - // Only the struct's namespace is outdated. - // Preserve the type - just scan the namespace again. +/// Re-scan the namespace of a file's root struct type on an incremental update. +/// The file must have successfully populated ZIR. +/// If the file's root struct type is not populated (the file is unreferenced), nothing is done. +/// This is called by `updateZirRefs` for all updated files before the main work loop. +/// This function does not perform any semantic analysis. +fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator.Error!void { + const zcu = pt.zcu; - const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; - const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + const file = zcu.fileByIndex(file_index); + assert(file.status == .success_zir); + const file_root_type = zcu.fileRootType(file_index); + if (file_root_type == .none) return; - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; - extra_index += @intFromBool(small.has_fields_len); - const decls_len = if (small.has_decls_len) blk: { - const decls_len = file.zir.extra[extra_index]; - extra_index += 1; - break :blk decls_len; - } else 0; - const decls = file.zir.bodySlice(extra_index, decls_len); + log.debug("updateFileNamespace mod={s} sub_file_path={s}", .{ + file.mod.fully_qualified_name, + file.sub_file_path, + }); - if (!type_outdated) { - try pt.scanNamespace(namespace_index, decls); - } + const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu); + const decls = decls: { + const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); - return false; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = file.zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + break :decls file.zir.bodySlice(extra_index, decls_len); + }; + try pt.scanNamespace(namespace_index, decls); } /// Regardless of the file status, will create a `Decl` if none exists so that we can track @@ -1052,7 +1068,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); - const inst_info = cau.zir_index.resolveFull(ip); + const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_info.file); const zir = file.zir; @@ -1944,6 +1960,9 @@ const ScanDeclIter = struct { const cau, const nav = if (existing_cau) |cau_index| cau_nav: { const nav_index = ip.getCau(cau_index).owner.unwrap().nav; const nav = ip.getNav(nav_index); + if (nav.name != name) { + std.debug.panic("'{}' vs '{}'", .{ nav.name.fmt(ip), name.fmt(ip) }); + } assert(nav.name == name); assert(nav.fqn == fqn); break :cau_nav .{ cau_index, nav_index }; @@ -2011,7 +2030,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError! const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); const func = zcu.funcInfo(func_index); - const inst_info = func.zir_body_inst.resolveFull(ip); + const inst_info = func.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_info.file); const zir = file.zir; @@ -2097,7 +2116,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError! }; defer inner_block.instructions.deinit(gpa); - const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip)); + const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip) orelse return error.AnalysisFail); // Here we are performing "runtime semantic analysis" for a function body, which means // we must map the parameter ZIR instructions to `arg` AIR instructions. diff --git a/src/codegen.zig b/src/codegen.zig index af77dcc50c..0c592c6f19 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -98,7 +98,7 @@ pub fn generateLazyFunction( debug_output: DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; - const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(&zcu.intern_pool).file; + const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(&zcu.intern_pool); const target = zcu.fileByIndex(file).mod.resolved_target.result; switch (target_util.zigBackend(target, false)) { else => unreachable, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 03a1ea3746..7c35a178a0 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -2585,7 +2585,7 @@ pub fn genTypeDecl( const ty = Type.fromInterned(index); _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{}); try writer.writeByte(';'); - const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file; + const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip); if (!zcu.fileByIndex(file_scope).mod.strip) try writer.print(" /* {} */", .{ ty.containerTypeName(ip).fmt(ip), }); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index fd95082428..acceea175a 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1959,7 +1959,7 @@ pub const Object = struct { ); } - const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file); + const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip)); const scope = if (ty.getParentNamespace(zcu).unwrap()) |parent_namespace| try o.namespaceToDebugScope(parent_namespace) else @@ -2137,7 +2137,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); - const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file); + const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip)); const scope = if (ty.getParentNamespace(zcu).unwrap()) |parent_namespace| try o.namespaceToDebugScope(parent_namespace) else @@ -2772,7 +2772,7 @@ pub const Object = struct { fn makeEmptyNamespaceDebugType(o: *Object, ty: Type) !Builder.Metadata { const zcu = o.pt.zcu; const ip = &zcu.intern_pool; - const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file); + const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip)); const scope = if (ty.getParentNamespace(zcu).unwrap()) |parent_namespace| try o.namespaceToDebugScope(parent_namespace) else From b65865b027f5531408654eae82cec05468b2c082 Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 12 Aug 2024 02:03:34 +0100 Subject: [PATCH 02/25] tools: improve incr-check And add a new incremental test to match! --- test/incremental/add_decl | 59 +++++++++++++++++++++++++++++++++ tools/incr-check.zig | 68 ++++++++++++++++++++++++++++++++++----- 2 files changed, 119 insertions(+), 8 deletions(-) create mode 100644 test/incremental/add_decl diff --git a/test/incremental/add_decl b/test/incremental/add_decl new file mode 100644 index 0000000000..376a725efc --- /dev/null +++ b/test/incremental/add_decl @@ -0,0 +1,59 @@ +#target=x86_64-linux +#update=initial version +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(foo); +} +const foo = "good morning\n"; +#expect_stdout="good morning\n" + +#update=add new declaration +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(foo); +} +const foo = "good morning\n"; +const bar = "good evening\n"; +#expect_stdout="good morning\n" + +#update=reference new declaration +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(bar); +} +const foo = "good morning\n"; +const bar = "good evening\n"; +#expect_stdout="good evening\n" + +#update=reference missing declaration +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(qux); +} +const foo = "good morning\n"; +const bar = "good evening\n"; +#expect_error=ignored + +#update=add missing declaration +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(qux); +} +const foo = "good morning\n"; +const bar = "good evening\n"; +const qux = "good night\n"; +#expect_stdout="good night\n" + +#update=remove unused declarations +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(qux); +} +const qux = "good night\n"; +#expect_stdout="good night\n" diff --git a/tools/incr-check.zig b/tools/incr-check.zig index 56858b88a3..803daa05f4 100644 --- a/tools/incr-check.zig +++ b/tools/incr-check.zig @@ -2,14 +2,41 @@ const std = @import("std"); const fatal = std.process.fatal; const Allocator = std.mem.Allocator; +const usage = "usage: incr-check [-fno-emit-bin] [--zig-lib-dir lib]"; + pub fn main() !void { var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena_instance.deinit(); const arena = arena_instance.allocator(); - const args = try std.process.argsAlloc(arena); - const zig_exe = args[1]; - const input_file_name = args[2]; + var opt_zig_exe: ?[]const u8 = null; + var opt_input_file_name: ?[]const u8 = null; + var opt_lib_dir: ?[]const u8 = null; + var no_bin = false; + + var arg_it = try std.process.argsWithAllocator(arena); + _ = arg_it.skip(); + while (arg_it.next()) |arg| { + if (arg.len > 0 and arg[0] == '-') { + if (std.mem.eql(u8, arg, "-fno-emit-bin")) { + no_bin = true; + } else if (std.mem.eql(u8, arg, "--zig-lib-dir")) { + opt_lib_dir = arg_it.next() orelse fatal("expected arg after '--zig-lib-dir'\n{s}", .{usage}); + } else { + fatal("unknown option '{s}'\n{s}", .{ arg, usage }); + } + continue; + } + if (opt_zig_exe == null) { + opt_zig_exe = arg; + } else if (opt_input_file_name == null) { + opt_input_file_name = arg; + } else { + fatal("unknown argument '{s}'\n{s}", .{ arg, usage }); + } + } + const zig_exe = opt_zig_exe orelse fatal("missing path to zig\n{s}", .{usage}); + const input_file_name = opt_input_file_name orelse fatal("missing input file\n{s}", .{usage}); const input_file_bytes = try std.fs.cwd().readFileAlloc(arena, input_file_name, std.math.maxInt(u32)); const case = try Case.parse(arena, input_file_bytes); @@ -24,13 +51,12 @@ pub fn main() !void { const child_prog_node = prog_node.start("zig build-exe", 0); defer child_prog_node.end(); - var child = std.process.Child.init(&.{ + var child_args: std.ArrayListUnmanaged([]const u8) = .{}; + try child_args.appendSlice(arena, &.{ // Convert incr-check-relative path to subprocess-relative path. try std.fs.path.relative(arena, tmp_dir_path, zig_exe), "build-exe", case.root_source_file, - "-fno-llvm", - "-fno-lld", "-fincremental", "-target", case.target_query, @@ -39,8 +65,17 @@ pub fn main() !void { "--global-cache-dir", ".global_cache", "--listen=-", - }, arena); + }); + if (opt_lib_dir) |lib_dir| { + try child_args.appendSlice(arena, &.{ "--zig-lib-dir", lib_dir }); + } + if (no_bin) { + try child_args.append(arena, "-fno-emit-bin"); + } else { + try child_args.appendSlice(arena, &.{ "-fno-llvm", "-fno-lld" }); + } + var child = std.process.Child.init(child_args.items, arena); child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; @@ -65,6 +100,8 @@ pub fn main() !void { defer poller.deinit(); for (case.updates) |update| { + var update_node = prog_node.start(update.name, 0); + defer update_node.end(); eval.write(update); try eval.requestUpdate(); try eval.check(&poller, update); @@ -138,7 +175,17 @@ const Eval = struct { const stderr_data = try stderr.toOwnedSlice(); fatal("error_bundle included unexpected stderr:\n{s}", .{stderr_data}); } - try eval.checkErrorOutcome(update, result_error_bundle); + if (result_error_bundle.errorMessageCount() == 0) { + // Empty bundle indicates successful update in a `-fno-emit-bin` build. + // We can't do a full success check since we don't have a binary, but let's + // at least check that no errors were expected. + switch (update.outcome) { + .unknown, .stdout, .exit_code => {}, + .compile_errors => fatal("expected compile errors but compilation incorrectly succeeded", .{}), + } + } else { + try eval.checkErrorOutcome(update, result_error_bundle); + } // This message indicates the end of the update. stdout.discard(body.len); return; @@ -357,6 +404,11 @@ const Case = struct { fatal("line {d}: bad string literal: {s}", .{ line_n, @errorName(err) }); }, }; + } else if (std.mem.eql(u8, key, "expect_error")) { + if (updates.items.len == 0) fatal("line {d}: expect directive before update", .{line_n}); + const last_update = &updates.items[updates.items.len - 1]; + if (last_update.outcome != .unknown) fatal("line {d}: conflicting expect directive", .{line_n}); + last_update.outcome = .{ .compile_errors = &.{} }; } else { fatal("line {d}: unrecognized key '{s}'", .{ line_n, key }); } From 6faa4cc7e60c2ecd26759878a6f9e277d69a4968 Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 12 Aug 2024 11:02:18 +0100 Subject: [PATCH 03/25] Zcu: construct full reference graph This commit updates `Zcu.resolveReferences` to traverse the graph of `AnalUnit` references (starting from the 1-3 roots of analysis) in order to determine which `AnalUnit`s are referenced in an update. Errors for unreferenced entities are omitted from the error bundle. However, note that unreferenced `Nav`s are not removed from the binary. --- src/Compilation.zig | 44 +++++--- src/Sema.zig | 76 ++++++++++--- src/Zcu.zig | 260 ++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 330 insertions(+), 50 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index fd55db0139..f2d61db0fd 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2264,13 +2264,19 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } + zcu.analysis_roots.resize(0) catch unreachable; + try comp.queueJob(.{ .analyze_mod = std_mod }); + zcu.analysis_roots.appendAssumeCapacity(std_mod); + if (comp.config.is_test) { try comp.queueJob(.{ .analyze_mod = zcu.main_mod }); + zcu.analysis_roots.appendAssumeCapacity(zcu.main_mod); } if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| { try comp.queueJob(.{ .analyze_mod = compiler_rt_mod }); + zcu.analysis_roots.appendAssumeCapacity(compiler_rt_mod); } } @@ -3059,6 +3065,9 @@ pub fn totalErrorCount(comp: *Compilation) u32 { if (comp.module) |zcu| { const ip = &zcu.intern_pool; + var all_references = try zcu.resolveReferences(); + defer all_references.deinit(zcu.gpa); + total += zcu.failed_exports.count(); total += zcu.failed_embed_files.count(); @@ -3079,6 +3088,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. for (zcu.failed_analysis.keys()) |anal_unit| { + if (!all_references.contains(anal_unit)) continue; const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, .func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file, @@ -3116,12 +3126,6 @@ pub fn totalErrorCount(comp: *Compilation) u32 { } } - if (comp.module) |zcu| { - if (total == 0 and zcu.transitive_failed_analysis.count() > 0) { - @panic("Transitive analysis errors, but none actually emitted"); - } - } - return @intCast(total); } @@ -3167,12 +3171,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { .msg = try bundle.addString("memory allocation failure"), }); } + + var all_references = if (comp.module) |zcu| try zcu.resolveReferences() else undefined; + defer if (comp.module != null) all_references.deinit(gpa); + if (comp.module) |zcu| { const ip = &zcu.intern_pool; - var all_references = try zcu.resolveReferences(); - defer all_references.deinit(gpa); - for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { try addModuleErrorMsg(zcu, &bundle, msg.*, &all_references); @@ -3220,6 +3225,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (err) |e| return e; } for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { + if (!all_references.contains(anal_unit)) continue; + const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, .func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file, @@ -3313,9 +3320,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (comp.module) |zcu| { if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) { - var all_references = try zcu.resolveReferences(); - defer all_references.deinit(gpa); - const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. const src_loc = values[0].src(); @@ -3339,6 +3343,17 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { assert(comp.totalErrorCount() == bundle.root_list.items.len); + if (comp.module) |zcu| { + if (bundle.root_list.items.len == 0) { + const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| { + if (all_references.contains(failed_unit)) break true; + } else false; + if (should_have_error) { + @panic("referenced transitive analysis errors, but none actually emitted"); + } + } + } + const compile_log_text = if (comp.module) |m| m.compile_log_text.items else ""; return bundle.toOwnedBundle(compile_log_text); } @@ -3393,7 +3408,7 @@ pub fn addModuleErrorMsg( mod: *Zcu, eb: *ErrorBundle.Wip, module_err_msg: Zcu.ErrorMsg, - all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, Zcu.ResolvedReference), + all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference), ) !void { const gpa = eb.gpa; const ip = &mod.intern_pool; @@ -3423,7 +3438,8 @@ pub fn addModuleErrorMsg( const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len; var referenced_by = rt_root; - while (all_references.get(referenced_by)) |ref| { + while (all_references.get(referenced_by)) |maybe_ref| { + const ref = maybe_ref orelse break; const gop = try seen.getOrPut(gpa, ref.referencer); if (gop.found_existing) break; if (ref_traces.items.len < max_references) { diff --git a/src/Sema.zig b/src/Sema.zig index 84999c8a86..1a74b88e05 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -110,6 +110,7 @@ exports: std.ArrayListUnmanaged(Zcu.Export) = .{}, /// of data stored in `Zcu.all_references`. It exists to avoid adding references to /// a given `AnalUnit` multiple times. references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, +type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, const MaybeComptimeAlloc = struct { /// The runtime index of the `alloc` instruction. @@ -877,6 +878,7 @@ pub fn deinit(sema: *Sema) void { sema.comptime_allocs.deinit(gpa); sema.exports.deinit(gpa); sema.references.deinit(gpa); + sema.type_references.deinit(gpa); sema.* = undefined; } @@ -2809,7 +2811,11 @@ fn zirStructDecl( }; const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init)) { .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty); + if (!try sema.maybeRemoveOutdatedType(ty)) { + try sema.declareDependency(.{ .interned = ty }); + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); + } break :wip (try ip.getStructType(gpa, pt.tid, struct_init)).wip; }, .wip => |wip| wip, @@ -2850,8 +2856,8 @@ fn zirStructDecl( if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); try sema.declareDependency(.{ .interned = wip_ty.index }); + try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } @@ -3031,7 +3037,11 @@ fn zirEnumDecl( }; const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init)) { .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty); + if (!try sema.maybeRemoveOutdatedType(ty)) { + try sema.declareDependency(.{ .interned = ty }); + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); + } break :wip (try ip.getEnumType(gpa, pt.tid, enum_init)).wip; }, .wip => |wip| wip, @@ -3071,8 +3081,8 @@ fn zirEnumDecl( try pt.scanNamespace(new_namespace_index, decls); - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); try sema.declareDependency(.{ .interned = wip_ty.index }); + try sema.addTypeReferenceEntry(src, wip_ty.index); // We've finished the initial construction of this type, and are about to perform analysis. // Set the Cau and namespace appropriately, and don't destroy anything on failure. @@ -3297,7 +3307,11 @@ fn zirUnionDecl( }; const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init)) { .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty); + if (!try sema.maybeRemoveOutdatedType(ty)) { + try sema.declareDependency(.{ .interned = ty }); + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); + } break :wip (try ip.getUnionType(gpa, pt.tid, union_init)).wip; }, .wip => |wip| wip, @@ -3338,8 +3352,8 @@ fn zirUnionDecl( if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); try sema.declareDependency(.{ .interned = wip_ty.index }); + try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } @@ -3388,7 +3402,10 @@ fn zirOpaqueDecl( // No `wrapWipTy` needed as no std.builtin types are opaque. const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) { // No `maybeRemoveOutdatedType` as opaque types are never outdated. - .existing => |ty| return Air.internedToRef(ty), + .existing => |ty| { + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); + }, .wip => |wip| wip, }; errdefer wip_ty.cancel(ip, pt.tid); @@ -3416,6 +3433,7 @@ fn zirOpaqueDecl( if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } + try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index)); } @@ -21820,7 +21838,10 @@ fn zirReify( .zir_index = try block.trackZir(inst), } }, })) { - .existing => |ty| return Air.internedToRef(ty), + .existing => |ty| { + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); + }, .wip => |wip| wip, }; errdefer wip_ty.cancel(ip, pt.tid); @@ -21839,6 +21860,7 @@ fn zirReify( .file_scope = block.getFileScopeIndex(mod), }); + try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index)); }, .Union => { @@ -22020,7 +22042,11 @@ fn reifyEnum( } }, })) { .wip => |wip| wip, - .existing => |ty| return Air.internedToRef(ty), + .existing => |ty| { + try sema.declareDependency(.{ .interned = ty }); + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); + }, }; errdefer wip_ty.cancel(ip, pt.tid); @@ -22044,6 +22070,8 @@ fn reifyEnum( const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); + try sema.declareDependency(.{ .interned = wip_ty.index }); + try sema.addTypeReferenceEntry(src, wip_ty.index); wip_ty.prepare(ip, new_cau_index, new_namespace_index); wip_ty.setTagTy(ip, tag_ty.toIntern()); @@ -22182,7 +22210,11 @@ fn reifyUnion( } }, })) { .wip => |wip| wip, - .existing => |ty| return Air.internedToRef(ty), + .existing => |ty| { + try sema.declareDependency(.{ .interned = ty }); + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); + }, }; errdefer wip_ty.cancel(ip, pt.tid); @@ -22347,7 +22379,8 @@ fn reifyUnion( if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); + try sema.declareDependency(.{ .interned = wip_ty.index }); + try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } @@ -22447,7 +22480,11 @@ fn reifyStruct( } }, })) { .wip => |wip| wip, - .existing => |ty| return Air.internedToRef(ty), + .existing => |ty| { + try sema.declareDependency(.{ .interned = ty }); + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); + }, }; errdefer wip_ty.cancel(ip, pt.tid); @@ -22625,7 +22662,8 @@ fn reifyStruct( if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); + try sema.declareDependency(.{ .interned = wip_ty.index }); + try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } @@ -32231,6 +32269,18 @@ fn addReferenceEntry( try zcu.addUnitReference(sema.owner, referenced_unit, src); } +fn addTypeReferenceEntry( + sema: *Sema, + src: LazySrcLoc, + referenced_type: InternPool.Index, +) !void { + const zcu = sema.pt.zcu; + if (zcu.comp.reference_trace == 0) return; + const gop = try sema.type_references.getOrPut(sema.gpa, referenced_type); + if (gop.found_existing) return; + try zcu.addTypeReference(sema.owner, referenced_type, src); +} + pub fn ensureNavResolved(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; diff --git a/src/Zcu.zig b/src/Zcu.zig index d0889ea62c..2ce001b92a 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -168,6 +168,10 @@ outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, /// it as outdated. retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .{}, +/// These are the modules which we initially queue for analysis in `Compilation.update`. +/// `resolveReferences` will use these as the root of its reachability traversal. +analysis_roots: std.BoundedArray(*Package.Module, 3) = .{}, + stage1_flags: packed struct { have_winmain: bool = false, have_wwinmain: bool = false, @@ -186,7 +190,7 @@ global_assembly: std.AutoArrayHashMapUnmanaged(InternPool.Cau.Index, []u8) = .{} /// Key is the `AnalUnit` *performing* the reference. This representation allows /// incremental updates to quickly delete references caused by a specific `AnalUnit`. -/// Value is index into `all_reference` of the first reference triggered by the unit. +/// Value is index into `all_references` of the first reference triggered by the unit. /// The `next` field on the `Reference` forms a linked list of all references /// triggered by the key `AnalUnit`. reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, @@ -194,6 +198,16 @@ all_references: std.ArrayListUnmanaged(Reference) = .{}, /// Freelist of indices in `all_references`. free_references: std.ArrayListUnmanaged(u32) = .{}, +/// Key is the `AnalUnit` *performing* the reference. This representation allows +/// incremental updates to quickly delete references caused by a specific `AnalUnit`. +/// Value is index into `all_type_reference` of the first reference triggered by the unit. +/// The `next` field on the `TypeReference` forms a linked list of all type references +/// triggered by the key `AnalUnit`. +type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +all_type_references: std.ArrayListUnmanaged(TypeReference) = .{}, +/// Freelist of indices in `all_type_references`. +free_type_references: std.ArrayListUnmanaged(u32) = .{}, + panic_messages: [PanicId.len]InternPool.Nav.Index.Optional = .{.none} ** PanicId.len, /// The panic function body. panic_func_index: InternPool.Index = .none, @@ -302,6 +316,16 @@ pub const Reference = struct { src: LazySrcLoc, }; +pub const TypeReference = struct { + /// The container type which was referenced. + referenced: InternPool.Index, + /// Index into `all_type_references` of the next `TypeReference` triggered by the same `AnalUnit`. + /// `std.math.maxInt(u32)` is the sentinel. + next: u32, + /// The source location of the reference. + src: LazySrcLoc, +}; + /// The container that structs, enums, unions, and opaques have. pub const Namespace = struct { parent: OptionalIndex, @@ -2155,6 +2179,10 @@ pub fn deinit(zcu: *Zcu) void { zcu.all_references.deinit(gpa); zcu.free_references.deinit(gpa); + zcu.type_reference_table.deinit(gpa); + zcu.all_type_references.deinit(gpa); + zcu.free_type_references.deinit(gpa); + zcu.intern_pool.deinit(gpa); } @@ -2660,16 +2688,32 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { const gpa = zcu.gpa; - const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return; - var idx = kv.value; + unit_refs: { + const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return; + var idx = kv.value; - while (idx != std.math.maxInt(u32)) { - zcu.free_references.append(gpa, idx) catch { - // This space will be reused eventually, so we need not propagate this error. - // Just leak it for now, and let GC reclaim it later on. - return; - }; - idx = zcu.all_references.items[idx].next; + while (idx != std.math.maxInt(u32)) { + zcu.free_references.append(gpa, idx) catch { + // This space will be reused eventually, so we need not propagate this error. + // Just leak it for now, and let GC reclaim it later on. + break :unit_refs; + }; + idx = zcu.all_references.items[idx].next; + } + } + + type_refs: { + const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse return; + var idx = kv.value; + + while (idx != std.math.maxInt(u32)) { + zcu.free_type_references.append(gpa, idx) catch { + // This space will be reused eventually, so we need not propagate this error. + // Just leak it for now, and let GC reclaim it later on. + break :type_refs; + }; + idx = zcu.all_type_references.items[idx].next; + } } } @@ -2696,6 +2740,29 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit gop.value_ptr.* = @intCast(ref_idx); } +pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPool.Index, ref_src: LazySrcLoc) Allocator.Error!void { + const gpa = zcu.gpa; + + try zcu.type_reference_table.ensureUnusedCapacity(gpa, 1); + + const ref_idx = zcu.free_type_references.popOrNull() orelse idx: { + _ = try zcu.all_type_references.addOne(gpa); + break :idx zcu.all_type_references.items.len - 1; + }; + + errdefer comptime unreachable; + + const gop = zcu.type_reference_table.getOrPutAssumeCapacity(src_unit); + + zcu.all_type_references.items[ref_idx] = .{ + .referenced = referenced_type, + .next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32), + .src = ref_src, + }; + + gop.value_ptr.* = @intCast(ref_idx); +} + pub fn errorSetBits(mod: *Zcu) u16 { if (mod.error_limit == 0) return 0; return @as(u16, std.math.log2_int(ErrorInt, mod.error_limit)) + 1; @@ -2990,28 +3057,175 @@ pub const ResolvedReference = struct { }; /// Returns a mapping from an `AnalUnit` to where it is referenced. -/// TODO: in future, this must be adapted to traverse from roots of analysis. That way, we can -/// use the returned map to determine which units have become unreferenced in an incremental update. -pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) { +/// If the value is `null`, the `AnalUnit` is a root of analysis. +/// If an `AnalUnit` is not in the returned map, it is unreferenced. +pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) { const gpa = zcu.gpa; + const comp = zcu.comp; + const ip = &zcu.intern_pool; - var result: std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) = .{}; + var result: std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{}; errdefer result.deinit(gpa); + var checked_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; + var type_queue: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .{}; + var unit_queue: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{}; + defer { + checked_types.deinit(gpa); + type_queue.deinit(gpa); + unit_queue.deinit(gpa); + } + // This is not a sufficient size, but a lower bound. try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count())); - for (zcu.reference_table.keys(), zcu.reference_table.values()) |referencer, first_ref_idx| { - assert(first_ref_idx != std.math.maxInt(u32)); - var ref_idx = first_ref_idx; - while (ref_idx != std.math.maxInt(u32)) { - const ref = zcu.all_references.items[ref_idx]; - const gop = try result.getOrPut(gpa, ref.referenced); - if (!gop.found_existing) { - gop.value_ptr.* = .{ .referencer = referencer, .src = ref.src }; + try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots.len); + for (zcu.analysis_roots.slice()) |mod| { + // Logic ripped from `Zcu.PerThread.importPkg`. + // TODO: this is silly, `Module` should just store a reference to its root `File`. + const resolved_path = try std.fs.path.resolve(gpa, &.{ + mod.root.root_dir.path orelse ".", + mod.root.sub_path, + mod.root_src_path, + }); + defer gpa.free(resolved_path); + const file = zcu.import_table.get(resolved_path).?; + if (zcu.fileByIndex(file).status != .success_zir) continue; + const root_ty = zcu.fileRootType(file); + if (root_ty == .none) continue; + type_queue.putAssumeCapacityNoClobber(root_ty, null); + } + + while (true) { + if (type_queue.popOrNull()) |kv| { + const ty = kv.key; + const referencer = kv.value; + try checked_types.putNoClobber(gpa, ty, {}); + + // If this type has a `Cau` for resolution, it's automatically referenced. + const resolution_cau: InternPool.Cau.Index.Optional = switch (ip.indexToKey(ty)) { + .struct_type => ip.loadStructType(ty).cau, + .union_type => ip.loadUnionType(ty).cau.toOptional(), + .enum_type => ip.loadEnumType(ty).cau, + .opaque_type => .none, + else => unreachable, + }; + if (resolution_cau.unwrap()) |cau| { + // this should only be referenced by the type + const unit = AnalUnit.wrap(.{ .cau = cau }); + assert(!result.contains(unit)); + try unit_queue.putNoClobber(gpa, unit, referencer); } - ref_idx = ref.next; + + // If this is a union with a generated tag, its tag type is automatically referenced. + // We don't add this reference for non-generated tags, as those will already be referenced via the union's `Cau`, with a better source location. + if (zcu.typeToUnion(Type.fromInterned(ty))) |union_obj| { + const tag_ty = union_obj.enum_tag_ty; + if (tag_ty != .none) { + if (ip.indexToKey(tag_ty).enum_type == .generated_tag) { + if (!checked_types.contains(tag_ty)) { + try type_queue.put(gpa, tag_ty, referencer); + } + } + } + } + + // Queue any decls within this type which would be automatically analyzed. + // Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`. + const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap() orelse continue; + for (zcu.namespacePtr(ns).other_decls.items) |cau| { + // These are `comptime` and `test` declarations. + // `comptime` decls are always analyzed; `test` declarations are analyzed depending on the test filter. + const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue; + const file = zcu.fileByIndex(inst_info.file); + const zir = file.zir; + const declaration = zir.getDeclaration(inst_info.inst)[0]; + const want_analysis = switch (declaration.name) { + .@"usingnamespace" => unreachable, + .@"comptime" => true, + else => a: { + if (!comp.config.is_test) break :a false; + if (file.mod != zcu.main_mod) break :a false; + if (declaration.name.isNamedTest(zir) or declaration.name == .decltest) { + const nav = ip.getCau(cau).owner.unwrap().nav; + const fqn_slice = ip.getNav(nav).fqn.toSlice(ip); + for (comp.test_filters) |test_filter| { + if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break; + } else break :a false; + } + break :a true; + }, + }; + if (want_analysis) { + const unit = AnalUnit.wrap(.{ .cau = cau }); + if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + } + } + for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| { + // These are named declarations. They are analyzed only if marked `export`. + const cau = ip.getNav(nav).analysis_owner.unwrap().?; + const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue; + const declaration = zcu.fileByIndex(inst_info.file).zir.getDeclaration(inst_info.inst)[0]; + if (declaration.flags.is_export) { + const unit = AnalUnit.wrap(.{ .cau = cau }); + if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + } + } + for (zcu.namespacePtr(ns).priv_decls.keys()) |nav| { + // These are named declarations. They are analyzed only if marked `export`. + const cau = ip.getNav(nav).analysis_owner.unwrap().?; + const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue; + const declaration = zcu.fileByIndex(inst_info.file).zir.getDeclaration(inst_info.inst)[0]; + if (declaration.flags.is_export) { + const unit = AnalUnit.wrap(.{ .cau = cau }); + if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + } + } + // Incremental compilation does not support `usingnamespace`. + // These are only included to keep good reference traces in non-incremental updates. + for (zcu.namespacePtr(ns).pub_usingnamespace.items) |nav| { + const cau = ip.getNav(nav).analysis_owner.unwrap().?; + const unit = AnalUnit.wrap(.{ .cau = cau }); + if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + } + for (zcu.namespacePtr(ns).priv_usingnamespace.items) |nav| { + const cau = ip.getNav(nav).analysis_owner.unwrap().?; + const unit = AnalUnit.wrap(.{ .cau = cau }); + if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + } + continue; } + if (unit_queue.popOrNull()) |kv| { + const unit = kv.key; + try result.putNoClobber(gpa, unit, kv.value); + + if (zcu.reference_table.get(unit)) |first_ref_idx| { + assert(first_ref_idx != std.math.maxInt(u32)); + var ref_idx = first_ref_idx; + while (ref_idx != std.math.maxInt(u32)) { + const ref = zcu.all_references.items[ref_idx]; + if (!result.contains(ref.referenced)) try unit_queue.put(gpa, ref.referenced, .{ + .referencer = unit, + .src = ref.src, + }); + ref_idx = ref.next; + } + } + if (zcu.type_reference_table.get(unit)) |first_ref_idx| { + assert(first_ref_idx != std.math.maxInt(u32)); + var ref_idx = first_ref_idx; + while (ref_idx != std.math.maxInt(u32)) { + const ref = zcu.all_type_references.items[ref_idx]; + if (!checked_types.contains(ref.referenced)) try type_queue.put(gpa, ref.referenced, .{ + .referencer = unit, + .src = ref.src, + }); + ref_idx = ref.next; + } + } + continue; + } + break; } return result; From aa6c1c40ec29d581844ebb5db09a33453c76d4ba Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 03:05:19 +0100 Subject: [PATCH 04/25] frontend: yet more incremental work --- src/Compilation.zig | 19 +-- src/Sema.zig | 50 +++---- src/Zcu.zig | 241 +++++++++++++++++++++++++++------ src/Zcu/PerThread.zig | 300 ++++++++++++++++++++++++++---------------- 4 files changed, 418 insertions(+), 192 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index f2d61db0fd..61f07d3e3b 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2300,7 +2300,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { zcu.intern_pool.dumpGenericInstances(gpa); } - if (comp.config.is_test and comp.totalErrorCount() == 0) { + if (comp.config.is_test and try comp.totalErrorCount() == 0) { // The `test_functions` decl has been intentionally postponed until now, // at which point we must populate it with the list of test functions that // have been discovered and not filtered out. @@ -2310,7 +2310,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { try pt.processExports(); } - if (comp.totalErrorCount() != 0) { + if (try comp.totalErrorCount() != 0) { // Skip flushing and keep source files loaded for error reporting. comp.link_error_flags = .{}; return; @@ -2394,7 +2394,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } try flush(comp, arena, .main, main_progress_node); - if (comp.totalErrorCount() != 0) return; + if (try comp.totalErrorCount() != 0) return; // Failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { @@ -2411,7 +2411,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { }, .incremental => { try flush(comp, arena, .main, main_progress_node); - if (comp.totalErrorCount() != 0) return; + if (try comp.totalErrorCount() != 0) return; }, } } @@ -3048,7 +3048,7 @@ fn addBuf(list: *std.ArrayList(std.posix.iovec_const), buf: []const u8) void { } /// This function is temporally single-threaded. -pub fn totalErrorCount(comp: *Compilation) u32 { +pub fn totalErrorCount(comp: *Compilation) Allocator.Error!u32 { var total: usize = comp.misc_failures.count() + @intFromBool(comp.alloc_failure_occurred) + @@ -3088,7 +3088,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. for (zcu.failed_analysis.keys()) |anal_unit| { - if (!all_references.contains(anal_unit)) continue; + if (comp.incremental and !all_references.contains(anal_unit)) continue; const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, .func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file, @@ -3225,7 +3225,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (err) |e| return e; } for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { - if (!all_references.contains(anal_unit)) continue; + if (comp.incremental and !all_references.contains(anal_unit)) continue; const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, @@ -3341,10 +3341,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } - assert(comp.totalErrorCount() == bundle.root_list.items.len); + assert(try comp.totalErrorCount() == bundle.root_list.items.len); if (comp.module) |zcu| { - if (bundle.root_list.items.len == 0) { + if (comp.incremental and bundle.root_list.items.len == 0) { const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| { if (all_references.contains(failed_unit)) break true; } else false; @@ -3448,6 +3448,7 @@ pub fn addModuleErrorMsg( const span = try src.span(gpa); const loc = std.zig.findLineColumn(source.bytes, span.main); const rt_file_path = try src.file_scope.fullPath(gpa); + defer gpa.free(rt_file_path); const name = switch (ref.referencer.unwrap()) { .cau => |cau| switch (ip.getCau(cau).owner.unwrap()) { .nav => |nav| ip.getNav(nav).name.toSlice(ip), diff --git a/src/Sema.zig b/src/Sema.zig index 1a74b88e05..2e970c43f4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -112,6 +112,11 @@ exports: std.ArrayListUnmanaged(Zcu.Export) = .{}, references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, +/// All dependencies registered so far by this `Sema`. This is a temporary duplicate +/// of the main dependency data. It exists to avoid adding dependencies to a given +/// `AnalUnit` multiple times. +dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .{}, + const MaybeComptimeAlloc = struct { /// The runtime index of the `alloc` instruction. runtime_index: Value.RuntimeIndex, @@ -879,6 +884,7 @@ pub fn deinit(sema: *Sema) void { sema.exports.deinit(gpa); sema.references.deinit(gpa); sema.type_references.deinit(gpa); + sema.dependencies.deinit(gpa); sema.* = undefined; } @@ -2740,7 +2746,7 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { _ = zcu.outdated_ready.swapRemove(cau_unit); zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit); zcu.intern_pool.remove(pt.tid, ty); - try zcu.markDependeeOutdated(.{ .interned = ty }); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); return true; } @@ -6066,7 +6072,9 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); - return Air.internedToRef(zcu.fileRootType(result.file_index)); + const ty = zcu.fileRootType(result.file_index); + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); } fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6820,6 +6828,13 @@ fn lookupInNamespace( const src_file = zcu.namespacePtr(block.namespace).file_scope; + if (Type.fromInterned(namespace.owner_type).typeDeclInst(zcu)) |type_decl_inst| { + try sema.declareDependency(.{ .namespace_name = .{ + .namespace = type_decl_inst, + .name = ident_name, + } }); + } + if (observe_usingnamespace and (namespace.pub_usingnamespace.items.len != 0 or namespace.priv_usingnamespace.items.len != 0)) { const gpa = sema.gpa; var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{}; @@ -13981,12 +13996,6 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }); try sema.checkNamespaceType(block, lhs_src, container_type); - if (container_type.typeDeclInst(mod)) |type_decl_inst| { - try sema.declareDependency(.{ .namespace_name = .{ - .namespace = type_decl_inst, - .name = decl_name, - } }); - } const namespace = container_type.getNamespace(mod).unwrap() orelse return .bool_false; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |lookup| { @@ -14026,7 +14035,9 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); - return Air.internedToRef(zcu.fileRootType(result.file_index)); + const ty = zcu.fileRootType(result.file_index); + try sema.addTypeReferenceEntry(operand_src, ty); + return Air.internedToRef(ty); } fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -27696,13 +27707,6 @@ fn fieldVal( const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; const child_type = val.toType(); - if (child_type.typeDeclInst(mod)) |type_decl_inst| { - try sema.declareDependency(.{ .namespace_name = .{ - .namespace = type_decl_inst, - .name = field_name, - } }); - } - switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { switch (ip.indexToKey(child_type.toIntern())) { @@ -27934,13 +27938,6 @@ fn fieldPtr( const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; const child_type = val.toType(); - if (child_type.typeDeclInst(mod)) |type_decl_inst| { - try sema.declareDependency(.{ .namespace_name = .{ - .namespace = type_decl_inst, - .name = field_name, - } }); - } - switch (child_type.zigTypeTag(mod)) { .ErrorSet => { switch (ip.indexToKey(child_type.toIntern())) { @@ -32260,7 +32257,7 @@ fn addReferenceEntry( referenced_unit: AnalUnit, ) !void { const zcu = sema.pt.zcu; - if (zcu.comp.reference_trace == 0) return; + if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return; const gop = try sema.references.getOrPut(sema.gpa, referenced_unit); if (gop.found_existing) return; // TODO: we need to figure out how to model inline calls here. @@ -32275,7 +32272,7 @@ fn addTypeReferenceEntry( referenced_type: InternPool.Index, ) !void { const zcu = sema.pt.zcu; - if (zcu.comp.reference_trace == 0) return; + if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return; const gop = try sema.type_references.getOrPut(sema.gpa, referenced_type); if (gop.found_existing) return; try zcu.addTypeReference(sema.owner, referenced_type, src); @@ -38272,6 +38269,9 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { const zcu = sema.pt.zcu; if (!zcu.comp.incremental) return; + const gop = try sema.dependencies.getOrPut(sema.gpa, dependee); + if (gop.found_existing) return; + // Avoid creating dependencies on ourselves. This situation can arise when we analyze the fields // of a type and they use `@This()`. This dependency would be unnecessary, and in fact would // just result in over-analysis since `Zcu.findOutdatedToAnalyze` would never be able to resolve diff --git a/src/Zcu.zig b/src/Zcu.zig index 2ce001b92a..c78abb69bf 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -10,7 +10,7 @@ const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; -const log = std.log.scoped(.module); +const log = std.log.scoped(.zcu); const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Target = std.Target; @@ -153,9 +153,11 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = . /// Maximum amount of distinct error values, set by --error-limit error_limit: ErrorInt, -/// Value is the number of PO or outdated Decls which this AnalUnit depends on. +/// Value is the number of PO dependencies of this AnalUnit. +/// This value will decrease as we perform semantic analysis to learn what is outdated. +/// If any of these PO deps is outdated, this value will be moved to `outdated`. potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, -/// Value is the number of PO or outdated Decls which this AnalUnit depends on. +/// Value is the number of PO dependencies of this AnalUnit. /// Once this value drops to 0, the AnalUnit is a candidate for re-analysis. outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, /// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0. @@ -2276,55 +2278,90 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.F return zir; } -pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void { - log.debug("outdated dependee: {}", .{dependee}); +pub fn markDependeeOutdated( + zcu: *Zcu, + /// When we are diffing ZIR and marking things as outdated, we won't yet have marked the dependencies as PO. + /// However, when we discover during analysis that something was outdated, the `Dependee` was already + /// marked as PO, so we need to decrement the PO dep count for each depender. + marked_po: enum { not_marked_po, marked_po }, + dependee: InternPool.Dependee, +) !void { + log.debug("outdated dependee: {}", .{fmtDependee(dependee, zcu)}); var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { - if (zcu.outdated.contains(depender)) { - // We do not need to increment the PO dep count, as if the outdated - // dependee is a Decl, we had already marked this as PO. + if (zcu.outdated.getPtr(depender)) |po_dep_count| { + switch (marked_po) { + .not_marked_po => {}, + .marked_po => { + po_dep_count.* -= 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* }); + if (po_dep_count.* == 0) { + log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); + try zcu.outdated_ready.put(zcu.gpa, depender, {}); + } + }, + } continue; } const opt_po_entry = zcu.potentially_outdated.fetchSwapRemove(depender); + const new_po_dep_count = switch (marked_po) { + .not_marked_po => if (opt_po_entry) |e| e.value else 0, + .marked_po => if (opt_po_entry) |e| e.value - 1 else { + // This dependency has been registered during in-progress analysis, but the unit is + // not in `potentially_outdated` because analysis is in-progress. Nothing to do. + continue; + }, + }; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), new_po_dep_count }); try zcu.outdated.putNoClobber( zcu.gpa, depender, - // We do not need to increment this count for the same reason as above. - if (opt_po_entry) |e| e.value else 0, + new_po_dep_count, ); - log.debug("outdated: {}", .{depender}); - if (opt_po_entry == null) { - // This is a new entry with no PO dependencies. + log.debug("outdated: {}", .{fmtAnalUnit(depender, zcu)}); + if (new_po_dep_count == 0) { + log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } // If this is a Decl and was not previously PO, we must recursively // mark dependencies on its tyval as PO. if (opt_po_entry == null) { + assert(marked_po == .not_marked_po); try zcu.markTransitiveDependersPotentiallyOutdated(depender); } } } pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { + log.debug("up-to-date dependee: {}", .{fmtDependee(dependee, zcu)}); var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { if (zcu.outdated.getPtr(depender)) |po_dep_count| { // This depender is already outdated, but it now has one // less PO dependency! po_dep_count.* -= 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* }); if (po_dep_count.* == 0) { + log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } continue; } // This depender is definitely at least PO, because this Decl was just analyzed // due to being outdated. - const ptr = zcu.potentially_outdated.getPtr(depender).?; + const ptr = zcu.potentially_outdated.getPtr(depender) orelse { + // This dependency has been registered during in-progress analysis, but the unit is + // not in `potentially_outdated` because analysis is in-progress. Nothing to do. + continue; + }; if (ptr.* > 1) { ptr.* -= 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), ptr.* }); continue; } + log.debug("up-to-date (po deps = 0): {}", .{fmtAnalUnit(depender, zcu)}); + // This dependency is no longer PO, i.e. is known to be up-to-date. assert(zcu.potentially_outdated.swapRemove(depender)); // If this is a Decl, we must recursively mark dependencies on its tyval @@ -2344,14 +2381,16 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { /// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES. fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void { const ip = &zcu.intern_pool; - var it = ip.dependencyIterator(switch (maybe_outdated.unwrap()) { + const dependee: InternPool.Dependee = switch (maybe_outdated.unwrap()) { .cau => |cau| switch (ip.getCau(cau).owner.unwrap()) { .nav => |nav| .{ .nav_val = nav }, // TODO: also `nav_ref` deps when introduced - .none, .type => return, // analysis of this `Cau` can't outdate any dependencies + .type => |ty| .{ .interned = ty }, + .none => return, // analysis of this `Cau` can't outdate any dependencies }, .func => |func_index| .{ .interned = func_index }, // IES - }); - + }; + log.debug("marking dependee po: {}", .{fmtDependee(dependee, zcu)}); + var it = ip.dependencyIterator(dependee); while (it.next()) |po| { if (zcu.outdated.getPtr(po)) |po_dep_count| { // This dependency is already outdated, but it now has one more PO @@ -2360,14 +2399,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni _ = zcu.outdated_ready.swapRemove(po); } po_dep_count.* += 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), po_dep_count.* }); continue; } if (zcu.potentially_outdated.getPtr(po)) |n| { // There is now one more PO dependency. n.* += 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), n.* }); continue; } try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1); + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), 1 }); // This AnalUnit was not already PO, so we must recursively mark its dependers as also PO. try zcu.markTransitiveDependersPotentiallyOutdated(po); } @@ -2391,13 +2433,9 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { // In this case, we must defer to more complex logic below. if (zcu.outdated_ready.count() > 0) { - log.debug("findOutdatedToAnalyze: trivial '{s} {d}'", .{ - @tagName(zcu.outdated_ready.keys()[0].unwrap()), - switch (zcu.outdated_ready.keys()[0].unwrap()) { - inline else => |x| @intFromEnum(x), - }, - }); - return zcu.outdated_ready.keys()[0]; + const unit = zcu.outdated_ready.keys()[0]; + log.debug("findOutdatedToAnalyze: trivial {}", .{fmtAnalUnit(unit, zcu)}); + return unit; } // There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some @@ -2445,8 +2483,16 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { } } - log.debug("findOutdatedToAnalyze: heuristic returned Cau {d} ({d} dependers)", .{ - @intFromEnum(chosen_cau.?), + if (chosen_cau == null) { + for (zcu.outdated.keys(), zcu.outdated.values()) |o, opod| { + const func = o.unwrap().func; + const nav = zcu.funcInfo(func).owner_nav; + std.io.getStdErr().writer().print("outdated: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {}; + } + } + + log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{ + fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? }), zcu), chosen_cau_dependers, }); @@ -3090,7 +3136,6 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve }); defer gpa.free(resolved_path); const file = zcu.import_table.get(resolved_path).?; - if (zcu.fileByIndex(file).status != .success_zir) continue; const root_ty = zcu.fileRootType(file); if (root_ty == .none) continue; type_queue.putAssumeCapacityNoClobber(root_ty, null); @@ -3102,6 +3147,8 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const referencer = kv.value; try checked_types.putNoClobber(gpa, ty, {}); + log.debug("handle type '{}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)}); + // If this type has a `Cau` for resolution, it's automatically referenced. const resolution_cau: InternPool.Cau.Index.Optional = switch (ip.indexToKey(ty)) { .struct_type => ip.loadStructType(ty).cau, @@ -3132,13 +3179,14 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve // Queue any decls within this type which would be automatically analyzed. // Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`. - const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap() orelse continue; + const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap().?; for (zcu.namespacePtr(ns).other_decls.items) |cau| { // These are `comptime` and `test` declarations. // `comptime` decls are always analyzed; `test` declarations are analyzed depending on the test filter. const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue; const file = zcu.fileByIndex(inst_info.file); - const zir = file.zir; + // If the file failed AstGen, the TrackedInst refers to the old ZIR. + const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*; const declaration = zir.getDeclaration(inst_info.inst)[0]; const want_analysis = switch (declaration.name) { .@"usingnamespace" => unreachable, @@ -3158,27 +3206,51 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve }; if (want_analysis) { const unit = AnalUnit.wrap(.{ .cau = cau }); - if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + if (!result.contains(unit)) { + log.debug("type '{}': ref cau %{}", .{ + Type.fromInterned(ty).containerTypeName(ip).fmt(ip), + @intFromEnum(inst_info.inst), + }); + try unit_queue.put(gpa, unit, referencer); + } } } for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| { // These are named declarations. They are analyzed only if marked `export`. const cau = ip.getNav(nav).analysis_owner.unwrap().?; const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue; - const declaration = zcu.fileByIndex(inst_info.file).zir.getDeclaration(inst_info.inst)[0]; + const file = zcu.fileByIndex(inst_info.file); + // If the file failed AstGen, the TrackedInst refers to the old ZIR. + const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*; + const declaration = zir.getDeclaration(inst_info.inst)[0]; if (declaration.flags.is_export) { const unit = AnalUnit.wrap(.{ .cau = cau }); - if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + if (!result.contains(unit)) { + log.debug("type '{}': ref cau %{}", .{ + Type.fromInterned(ty).containerTypeName(ip).fmt(ip), + @intFromEnum(inst_info.inst), + }); + try unit_queue.put(gpa, unit, referencer); + } } } for (zcu.namespacePtr(ns).priv_decls.keys()) |nav| { // These are named declarations. They are analyzed only if marked `export`. const cau = ip.getNav(nav).analysis_owner.unwrap().?; const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue; - const declaration = zcu.fileByIndex(inst_info.file).zir.getDeclaration(inst_info.inst)[0]; + const file = zcu.fileByIndex(inst_info.file); + // If the file failed AstGen, the TrackedInst refers to the old ZIR. + const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*; + const declaration = zir.getDeclaration(inst_info.inst)[0]; if (declaration.flags.is_export) { const unit = AnalUnit.wrap(.{ .cau = cau }); - if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + if (!result.contains(unit)) { + log.debug("type '{}': ref cau %{}", .{ + Type.fromInterned(ty).containerTypeName(ip).fmt(ip), + @intFromEnum(inst_info.inst), + }); + try unit_queue.put(gpa, unit, referencer); + } } } // Incremental compilation does not support `usingnamespace`. @@ -3199,15 +3271,23 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const unit = kv.key; try result.putNoClobber(gpa, unit, kv.value); + log.debug("handle unit '{}'", .{fmtAnalUnit(unit, zcu)}); + if (zcu.reference_table.get(unit)) |first_ref_idx| { assert(first_ref_idx != std.math.maxInt(u32)); var ref_idx = first_ref_idx; while (ref_idx != std.math.maxInt(u32)) { const ref = zcu.all_references.items[ref_idx]; - if (!result.contains(ref.referenced)) try unit_queue.put(gpa, ref.referenced, .{ - .referencer = unit, - .src = ref.src, - }); + if (!result.contains(ref.referenced)) { + log.debug("unit '{}': ref unit '{}'", .{ + fmtAnalUnit(unit, zcu), + fmtAnalUnit(ref.referenced, zcu), + }); + try unit_queue.put(gpa, ref.referenced, .{ + .referencer = unit, + .src = ref.src, + }); + } ref_idx = ref.next; } } @@ -3216,10 +3296,16 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve var ref_idx = first_ref_idx; while (ref_idx != std.math.maxInt(u32)) { const ref = zcu.all_type_references.items[ref_idx]; - if (!checked_types.contains(ref.referenced)) try type_queue.put(gpa, ref.referenced, .{ - .referencer = unit, - .src = ref.src, - }); + if (!checked_types.contains(ref.referenced)) { + log.debug("unit '{}': ref type '{}'", .{ + fmtAnalUnit(unit, zcu), + Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip), + }); + try type_queue.put(gpa, ref.referenced, .{ + .referencer = unit, + .src = ref.src, + }); + } ref_idx = ref.next; } } @@ -3293,3 +3379,72 @@ pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File { const file_index = ip.getCau(cau).zir_index.resolveFile(ip); return zcu.fileByIndex(file_index); } + +fn fmtAnalUnit(unit: AnalUnit, zcu: *Zcu) std.fmt.Formatter(formatAnalUnit) { + return .{ .data = .{ .unit = unit, .zcu = zcu } }; +} +fn fmtDependee(d: InternPool.Dependee, zcu: *Zcu) std.fmt.Formatter(formatDependee) { + return .{ .data = .{ .dependee = d, .zcu = zcu } }; +} + +fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = .{ fmt, options }; + const zcu = data.zcu; + const ip = &zcu.intern_pool; + switch (data.unit.unwrap()) { + .cau => |cau_index| { + const cau = ip.getCau(cau_index); + switch (cau.owner.unwrap()) { + .nav => |nav| return writer.print("cau(decl='{}')", .{ip.getNav(nav).fqn.fmt(ip)}), + .type => |ty| return writer.print("cau(ty='{}')", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)}), + .none => if (cau.zir_index.resolveFull(ip)) |resolved| { + const file_path = zcu.fileByIndex(resolved.file).sub_file_path; + return writer.print("cau(inst=('{s}', %{}))", .{ file_path, @intFromEnum(resolved.inst) }); + } else { + return writer.writeAll("cau(inst=)"); + }, + } + }, + .func => |func| { + const nav = zcu.funcInfo(func).owner_nav; + return writer.print("func('{}')", .{ip.getNav(nav).fqn.fmt(ip)}); + }, + } +} +fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = .{ fmt, options }; + const zcu = data.zcu; + const ip = &zcu.intern_pool; + switch (data.dependee) { + .src_hash => |ti| { + const info = ti.resolveFull(ip) orelse { + return writer.writeAll("inst()"); + }; + const file_path = zcu.fileByIndex(info.file).sub_file_path; + return writer.print("inst('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) }); + }, + .nav_val => |nav| { + const fqn = ip.getNav(nav).fqn; + return writer.print("nav('{}')", .{fqn.fmt(ip)}); + }, + .interned => |ip_index| switch (ip.indexToKey(ip_index)) { + .struct_type, .union_type, .enum_type => return writer.print("type('{}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}), + .func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}), + else => unreachable, + }, + .namespace => |ti| { + const info = ti.resolveFull(ip) orelse { + return writer.writeAll("namespace()"); + }; + const file_path = zcu.fileByIndex(info.file).sub_file_path; + return writer.print("namespace('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) }); + }, + .namespace_name => |k| { + const info = k.namespace.resolveFull(ip) orelse { + return writer.print("namespace(, '{}')", .{k.name.fmt(ip)}); + }; + const file_path = zcu.fileByIndex(info.file).sub_file_path; + return writer.print("namespace('{s}', %{d}, '{}')", .{ file_path, @intFromEnum(info.inst), k.name.fmt(ip) }); + }, + } +} diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 37a3aced09..3c22abb4b8 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -360,7 +360,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { // Tracking failed for this instruction. Invalidate associated `src_hash` deps. log.debug("tracking failed for %{d}", .{old_inst}); tracked_inst.inst = .lost; - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index }); continue; }; tracked_inst.inst = InternPool.TrackedInst.MaybeLost.ZirIndex.wrap(new_inst); @@ -383,7 +383,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { }); } // The source hash associated with this instruction changed - invalidate relevant dependencies. - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index }); } // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. @@ -435,7 +435,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { if (!old_names.swapRemove(name_ip)) continue; // Name added any_change = true; - try zcu.markDependeeOutdated(.{ .namespace_name = .{ + try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{ .namespace = tracked_inst_index, .name = name_ip, } }); @@ -444,14 +444,14 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { // The only elements remaining in `old_names` now are any names which were removed. for (old_names.keys()) |name_ip| { any_change = true; - try zcu.markDependeeOutdated(.{ .namespace_name = .{ + try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{ .namespace = tracked_inst_index, .name = name_ip, } }); } if (any_change) { - try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); + try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace = tracked_inst_index }); } } } @@ -508,7 +508,7 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); - log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); + //log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); assert(!zcu.analysis_in_progress.contains(anal_unit)); @@ -527,8 +527,91 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu if (cau_outdated) { _ = zcu.outdated_ready.swapRemove(anal_unit); + } else { + // We can trust the current information about this `Cau`. + if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { + return error.AnalysisFail; + } + // If it wasn't failed and wasn't marked outdated, then either... + // * it is a type and is up-to-date, or + // * it is a `comptime` decl and is up-to-date, or + // * it is another decl and is EITHER up-to-date OR never-referenced (so unresolved) + // We just need to check for that last case. + switch (cau.owner.unwrap()) { + .type, .none => return, + .nav => |nav| if (ip.getNav(nav).status == .resolved) return, + } } + const sema_result: SemaCauResult, const analysis_fail = if (pt.ensureCauAnalyzedInner(cau_index, cau_outdated)) |result| + .{ result, false } + else |err| switch (err) { + error.AnalysisFail => res: { + if (!zcu.failed_analysis.contains(anal_unit)) { + // If this `Cau` caused the error, it would have an entry in `failed_analysis`. + // Since it does not, this must be a transitive failure. + try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); + } + // We treat errors as up-to-date, since those uses would just trigger a transitive error + break :res .{ .{ + .invalidate_decl_val = false, + .invalidate_decl_ref = false, + }, true }; + }, + error.OutOfMemory => res: { + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + try zcu.retryable_failures.ensureUnusedCapacity(gpa, 1); + const msg = try Zcu.ErrorMsg.create( + gpa, + .{ .base_node_inst = cau.zir_index, .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0) }, + "unable to analyze: OutOfMemory", + .{}, + ); + zcu.retryable_failures.appendAssumeCapacity(anal_unit); + zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, msg); + // We treat errors as up-to-date, since those uses would just trigger a transitive error + break :res .{ .{ + .invalidate_decl_val = false, + .invalidate_decl_ref = false, + }, true }; + }, + }; + + if (cau_outdated) { + // TODO: we do not yet have separate dependencies for decl values vs types. + const invalidate = sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref; + const dependee: InternPool.Dependee = switch (cau.owner.unwrap()) { + .none => return, // there are no dependencies on a `comptime` decl! + .nav => |nav_index| .{ .nav_val = nav_index }, + .type => |ty| .{ .interned = ty }, + }; + + if (invalidate) { + // This dependency was marked as PO, meaning dependees were waiting + // on its analysis result, and it has turned out to be outdated. + // Update dependees accordingly. + try zcu.markDependeeOutdated(.marked_po, dependee); + } else { + // This dependency was previously PO, but turned out to be up-to-date. + // We do not need to queue successive analysis. + try zcu.markPoDependeeUpToDate(dependee); + } + } + + if (analysis_fail) return error.AnalysisFail; +} + +fn ensureCauAnalyzedInner( + pt: Zcu.PerThread, + cau_index: InternPool.Cau.Index, + cau_outdated: bool, +) Zcu.SemaError!SemaCauResult { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + + const cau = ip.getCau(cau_index); + const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); + const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; // TODO: document this elsewhere mlugg! @@ -550,22 +633,6 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu return error.AnalysisFail; } - if (!cau_outdated) { - // We can trust the current information about this `Cau`. - if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { - return error.AnalysisFail; - } - // If it wasn't failed and wasn't marked outdated, then either... - // * it is a type and is up-to-date, or - // * it is a `comptime` decl and is up-to-date, or - // * it is another decl and is EITHER up-to-date OR never-referenced (so unresolved) - // We just need to check for that last case. - switch (cau.owner.unwrap()) { - .type, .none => return, - .nav => |nav| if (ip.getNav(nav).status == .resolved) return, - } - } - // `cau_outdated` can be true in the initial update for `comptime` declarations, // so this isn't a `dev.check`. if (cau_outdated and dev.env.supports(.incremental)) { @@ -573,76 +640,34 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu // prior to re-analysis. zcu.deleteUnitExports(anal_unit); zcu.deleteUnitReferences(anal_unit); - } - - const sema_result: SemaCauResult = res: { - if (inst_info.inst == .main_struct_inst) { - // Note that this is definitely a *recreation* due to outdated, because - // this instruction indicates that `cau.owner` is a `type`, which only - // reaches here if `cau_outdated`. - try pt.recreateFileRoot(inst_info.file); - break :res .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; + if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| { + kv.value.destroy(zcu.gpa); } + _ = zcu.transitive_failed_analysis.swapRemove(anal_unit); + } - const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) { - .nav => |nav| ip.getNav(nav).fqn.toSlice(ip), - .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), - .none => "comptime", - }, 0); - defer decl_prog_node.end(); - - break :res pt.semaCau(cau_index) catch |err| switch (err) { - error.AnalysisFail => { - if (!zcu.failed_analysis.contains(anal_unit)) { - // If this `Cau` caused the error, it would have an entry in `failed_analysis`. - // Since it does not, this must be a transitive failure. - try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); - } - return error.AnalysisFail; - }, - error.GenericPoison => unreachable, - error.ComptimeBreak => unreachable, - error.ComptimeReturn => unreachable, - error.OutOfMemory => { - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - try zcu.retryable_failures.append(gpa, anal_unit); - zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, try Zcu.ErrorMsg.create( - gpa, - .{ .base_node_inst = cau.zir_index, .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0) }, - "unable to analyze: OutOfMemory", - .{}, - )); - return error.AnalysisFail; - }, + if (inst_info.inst == .main_struct_inst) { + // Note that this is definitely a *recreation* due to outdated, because + // this instruction indicates that `cau.owner` is a `type`, which only + // reaches here if `cau_outdated`. + try pt.recreateFileRoot(inst_info.file); + return .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, }; - }; - - if (!cau_outdated) { - // We definitely don't need to do any dependency tracking, so our work is done. - return; } - // TODO: we do not yet have separate dependencies for decl values vs types. - const invalidate = sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref; - const dependee: InternPool.Dependee = switch (cau.owner.unwrap()) { - .none => return, // there are no dependencies on a `comptime` decl! - .nav => |nav_index| .{ .nav_val = nav_index }, - .type => |ty| .{ .interned = ty }, - }; + const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) { + .nav => |nav| ip.getNav(nav).fqn.toSlice(ip), + .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), + .none => "comptime", + }, 0); + defer decl_prog_node.end(); - if (invalidate) { - // This dependency was marked as PO, meaning dependees were waiting - // on its analysis result, and it has turned out to be outdated. - // Update dependees accordingly. - try zcu.markDependeeOutdated(dependee); - } else { - // This dependency was previously PO, but turned out to be up-to-date. - // We do not need to queue successive analysis. - try zcu.markPoDependeeUpToDate(dependee); - } + return pt.semaCau(cau_index) catch |err| switch (err) { + error.GenericPoison, error.ComptimeBreak, error.ComptimeReturn => unreachable, + error.AnalysisFail, error.OutOfMemory => |e| return e, + }; } pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: InternPool.Index) Zcu.SemaError!void { @@ -660,7 +685,64 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter const func = zcu.funcInfo(maybe_coerced_func_index); - log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)}); + //log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)}); + + const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const func_outdated = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); + + if (func_outdated) { + _ = zcu.outdated_ready.swapRemove(anal_unit); + } else { + // We can trust the current information about this function. + if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { + return error.AnalysisFail; + } + switch (func.analysisUnordered(ip).state) { + .unreferenced => {}, // this is the first reference + .queued => {}, // we're waiting on first-time analysis + .analyzed => return, // up-to-date + } + } + + const ies_outdated, const analysis_fail = if (pt.ensureFuncBodyAnalyzedInner(func_index, func_outdated)) |result| + .{ result.ies_outdated, false } + else |err| switch (err) { + error.AnalysisFail => res: { + if (!zcu.failed_analysis.contains(anal_unit)) { + // If this function caused the error, it would have an entry in `failed_analysis`. + // Since it does not, this must be a transitive failure. + try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); + } + break :res .{ false, true }; // we treat errors as up-to-date IES, since those uses would just trigger a transitive error + }, + error.OutOfMemory => return error.OutOfMemory, // TODO: graceful handling like `ensureCauAnalyzed` + }; + + if (func_outdated) { + if (ies_outdated) { + log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)}); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index }); + } else { + log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)}); + try zcu.markPoDependeeUpToDate(.{ .interned = func_index }); + } + } + + if (analysis_fail) return error.AnalysisFail; +} + +fn ensureFuncBodyAnalyzedInner( + pt: Zcu.PerThread, + func_index: InternPool.Index, + func_outdated: bool, +) Zcu.SemaError!struct { ies_outdated: bool } { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const func = zcu.funcInfo(func_index); + const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); // Here's an interesting question: is this function actually valid? // Maybe the signature changed, so we'll end up creating a whole different `func` @@ -681,7 +763,9 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter }); if (ip.isRemoved(func_index) or (func.generic_owner != .none and ip.isRemoved(func.generic_owner))) { - try zcu.markDependeeOutdated(.{ .interned = func_index }); // IES + if (func_outdated) { + try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index }); // IES + } ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); ip.remove(pt.tid, func_index); @panic("TODO: remove orphaned function from binary"); @@ -694,15 +778,14 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter else .none; - const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); - const func_outdated = zcu.outdated.swapRemove(anal_unit) or - zcu.potentially_outdated.swapRemove(anal_unit); - if (func_outdated) { - _ = zcu.outdated_ready.swapRemove(anal_unit); dev.check(.incremental); zcu.deleteUnitExports(anal_unit); zcu.deleteUnitReferences(anal_unit); + if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| { + kv.value.destroy(gpa); + } + _ = zcu.transitive_failed_analysis.swapRemove(anal_unit); } if (!func_outdated) { @@ -713,7 +796,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter switch (func.analysisUnordered(ip).state) { .unreferenced => {}, // this is the first reference .queued => {}, // we're waiting on first-time analysis - .analyzed => return, // up-to-date + .analyzed => return .{ .ies_outdated = false }, // up-to-date } } @@ -722,28 +805,11 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter if (func_outdated) "outdated" else "never analyzed", }); - var air = pt.analyzeFnBody(func_index) catch |err| switch (err) { - error.AnalysisFail => { - if (!zcu.failed_analysis.contains(anal_unit)) { - // If this function caused the error, it would have an entry in `failed_analysis`. - // Since it does not, this must be a transitive failure. - try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); - } - return error.AnalysisFail; - }, - error.OutOfMemory => return error.OutOfMemory, - }; + var air = try pt.analyzeFnBody(func_index); errdefer air.deinit(gpa); - if (func_outdated) { - if (!func.analysisUnordered(ip).inferred_error_set or func.resolvedErrorSetUnordered(ip) != old_resolved_ies) { - log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)}); - try zcu.markDependeeOutdated(.{ .interned = func_index }); - } else { - log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)}); - try zcu.markPoDependeeUpToDate(.{ .interned = func_index }); - } - } + const ies_outdated = func_outdated and + (!func.analysisUnordered(ip).inferred_error_set or func.resolvedErrorSetUnordered(ip) != old_resolved_ies); const comp = zcu.comp; @@ -752,13 +818,15 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { air.deinit(gpa); - return; + return .{ .ies_outdated = ies_outdated }; } try comp.queueJob(.{ .codegen_func = .{ .func = func_index, .air = air, } }); + + return .{ .ies_outdated = ies_outdated }; } /// Takes ownership of `air`, even on error. @@ -1935,6 +2003,8 @@ const ScanDeclIter = struct { .@"comptime" => cau: { const cau = existing_cau orelse try ip.createComptimeCau(gpa, pt.tid, tracked_inst, namespace_index); + try namespace.other_decls.append(gpa, cau); + // For a `comptime` declaration, whether to re-analyze is based solely on whether the // `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already. const unit = InternPool.AnalUnit.wrap(.{ .cau = cau }); From 936a79f428241f25468f5e54bd24bb6e9a78adbd Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 03:05:41 +0100 Subject: [PATCH 05/25] tools,test: improve incr-check and add new incremental tests --- test/incremental/add_decl_namespaced | 59 ++++++++++++++++++++++++++++ test/incremental/unreferenced_error | 38 ++++++++++++++++++ tools/incr-check.zig | 40 +++++++++++++++++-- 3 files changed, 133 insertions(+), 4 deletions(-) create mode 100644 test/incremental/add_decl_namespaced create mode 100644 test/incremental/unreferenced_error diff --git a/test/incremental/add_decl_namespaced b/test/incremental/add_decl_namespaced new file mode 100644 index 0000000000..43123e0d0c --- /dev/null +++ b/test/incremental/add_decl_namespaced @@ -0,0 +1,59 @@ +#target=x86_64-linux +#update=initial version +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(@This().foo); +} +const foo = "good morning\n"; +#expect_stdout="good morning\n" + +#update=add new declaration +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(@This().foo); +} +const foo = "good morning\n"; +const bar = "good evening\n"; +#expect_stdout="good morning\n" + +#update=reference new declaration +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(@This().bar); +} +const foo = "good morning\n"; +const bar = "good evening\n"; +#expect_stdout="good evening\n" + +#update=reference missing declaration +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(@This().qux); +} +const foo = "good morning\n"; +const bar = "good evening\n"; +#expect_error=ignored + +#update=add missing declaration +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(@This().qux); +} +const foo = "good morning\n"; +const bar = "good evening\n"; +const qux = "good night\n"; +#expect_stdout="good night\n" + +#update=remove unused declarations +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(@This().qux); +} +const qux = "good night\n"; +#expect_stdout="good night\n" diff --git a/test/incremental/unreferenced_error b/test/incremental/unreferenced_error new file mode 100644 index 0000000000..e3d67d6ad8 --- /dev/null +++ b/test/incremental/unreferenced_error @@ -0,0 +1,38 @@ +#target=x86_64-linux +#update=initial version +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(a); +} +const a = "Hello, World!\n"; +#expect_stdout="Hello, World!\n" + +#update=introduce compile error +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(a); +} +const a = @compileError("bad a"); +#expect_error=ignored + +#update=remove error reference +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(b); +} +const a = @compileError("bad a"); +const b = "Hi there!\n"; +#expect_stdout="Hi there!\n" + +#update=introduce and remove reference to error +#file=main.zig +const std = @import("std"); +pub fn main() !void { + try std.io.getStdOut().writeAll(a); +} +const a = "Back to a\n"; +const b = @compileError("bad b"); +#expect_stdout="Back to a\n" diff --git a/tools/incr-check.zig b/tools/incr-check.zig index 803daa05f4..6bf2de921a 100644 --- a/tools/incr-check.zig +++ b/tools/incr-check.zig @@ -2,7 +2,7 @@ const std = @import("std"); const fatal = std.process.fatal; const Allocator = std.mem.Allocator; -const usage = "usage: incr-check [-fno-emit-bin] [--zig-lib-dir lib]"; +const usage = "usage: incr-check [-fno-emit-bin] [--zig-lib-dir lib] [--debug-zcu]"; pub fn main() !void { var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator); @@ -13,6 +13,7 @@ pub fn main() !void { var opt_input_file_name: ?[]const u8 = null; var opt_lib_dir: ?[]const u8 = null; var no_bin = false; + var debug_zcu = false; var arg_it = try std.process.argsWithAllocator(arena); _ = arg_it.skip(); @@ -20,6 +21,8 @@ pub fn main() !void { if (arg.len > 0 and arg[0] == '-') { if (std.mem.eql(u8, arg, "-fno-emit-bin")) { no_bin = true; + } else if (std.mem.eql(u8, arg, "--debug-zcu")) { + debug_zcu = true; } else if (std.mem.eql(u8, arg, "--zig-lib-dir")) { opt_lib_dir = arg_it.next() orelse fatal("expected arg after '--zig-lib-dir'\n{s}", .{usage}); } else { @@ -48,6 +51,13 @@ pub fn main() !void { const tmp_dir_path = "tmp_" ++ std.fmt.hex(rand_int); const tmp_dir = try std.fs.cwd().makeOpenPath(tmp_dir_path, .{}); + if (opt_lib_dir) |lib_dir| { + if (!std.fs.path.isAbsolute(lib_dir)) { + // The cwd of the subprocess is within the tmp dir, so prepend `..` to the path. + opt_lib_dir = try std.fs.path.join(arena, &.{ "..", lib_dir }); + } + } + const child_prog_node = prog_node.start("zig build-exe", 0); defer child_prog_node.end(); @@ -74,6 +84,9 @@ pub fn main() !void { } else { try child_args.appendSlice(arena, &.{ "-fno-llvm", "-fno-lld" }); } + if (debug_zcu) { + try child_args.appendSlice(arena, &.{ "--debug-log", "zcu" }); + } var child = std.process.Child.init(child_args.items, arena); child.stdin_behavior = .Pipe; @@ -89,6 +102,7 @@ pub fn main() !void { .tmp_dir = tmp_dir, .tmp_dir_path = tmp_dir_path, .child = &child, + .allow_stderr = debug_zcu, }; try child.spawn(); @@ -102,6 +116,11 @@ pub fn main() !void { for (case.updates) |update| { var update_node = prog_node.start(update.name, 0); defer update_node.end(); + + if (debug_zcu) { + std.log.info("=== START UPDATE '{s}' ===", .{update.name}); + } + eval.write(update); try eval.requestUpdate(); try eval.check(&poller, update); @@ -118,6 +137,7 @@ const Eval = struct { tmp_dir: std.fs.Dir, tmp_dir_path: []const u8, child: *std.process.Child, + allow_stderr: bool, const StreamEnum = enum { stdout, stderr }; const Poller = std.io.Poller(StreamEnum); @@ -173,7 +193,11 @@ const Eval = struct { }; if (stderr.readableLength() > 0) { const stderr_data = try stderr.toOwnedSlice(); - fatal("error_bundle included unexpected stderr:\n{s}", .{stderr_data}); + if (eval.allow_stderr) { + std.log.info("error_bundle included stderr:\n{s}", .{stderr_data}); + } else { + fatal("error_bundle included unexpected stderr:\n{s}", .{stderr_data}); + } } if (result_error_bundle.errorMessageCount() == 0) { // Empty bundle indicates successful update in a `-fno-emit-bin` build. @@ -197,7 +221,11 @@ const Eval = struct { const result_binary = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]); if (stderr.readableLength() > 0) { const stderr_data = try stderr.toOwnedSlice(); - fatal("emit_bin_path included unexpected stderr:\n{s}", .{stderr_data}); + if (eval.allow_stderr) { + std.log.info("emit_bin_path included stderr:\n{s}", .{stderr_data}); + } else { + fatal("emit_bin_path included unexpected stderr:\n{s}", .{stderr_data}); + } } try eval.checkSuccessOutcome(update, result_binary); // This message indicates the end of the update. @@ -213,7 +241,11 @@ const Eval = struct { if (stderr.readableLength() > 0) { const stderr_data = try stderr.toOwnedSlice(); - fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data }); + if (eval.allow_stderr) { + std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr_data }); + } else { + fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data }); + } } waitChild(eval.child); From 434ad906101a72c3c94f6a0fec1aa11d36b46ebb Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 03:08:55 +0100 Subject: [PATCH 06/25] Sema: disable comptime call memoization under -fincremental --- src/Sema.zig | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 2e970c43f4..9b6ff7cd8e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7560,14 +7560,14 @@ fn analyzeCall( operation: CallOperation, ) CompileError!Air.Inst.Ref { const pt = sema.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const callee_ty = sema.typeOf(func); - const func_ty_info = mod.typeToFunc(func_ty).?; + const func_ty_info = zcu.typeToFunc(func_ty).?; const cc = func_ty_info.cc; if (try sema.resolveValue(func)) |func_val| - if (func_val.isUndef(mod)) + if (func_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, call_src); if (cc == .Naked) { const maybe_func_inst = try sema.funcDeclSrcInst(func); @@ -7679,7 +7679,7 @@ fn analyzeCall( .needed_comptime_reason = "function being called at comptime must be comptime-known", .block_comptime_reason = comptime_reason, }); - const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + const module_fn_index = switch (zcu.intern_pool.indexToKey(func_val.toIntern())) { .@"extern" => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), @@ -7696,7 +7696,7 @@ fn analyzeCall( }, else => {}, } - assert(callee_ty.isPtrAtRuntime(mod)); + assert(callee_ty.isPtrAtRuntime(zcu)); return sema.fail(block, call_src, "{s} call of function pointer", .{ if (is_comptime_call) "comptime" else "inline", }); @@ -7736,7 +7736,7 @@ fn analyzeCall( }, }; - const module_fn = mod.funcInfo(module_fn_index); + const module_fn = zcu.funcInfo(module_fn_index); // This is not a function instance, so the function's `Nav` has a // `Cau` -- we don't need to check `generic_owner`. @@ -7750,7 +7750,7 @@ fn analyzeCall( // whenever performing an operation where the difference matters. var ics = InlineCallSema.init( sema, - mod.cauFileScope(fn_cau_index).zir, + zcu.cauFileScope(fn_cau_index).zir, module_fn_index, block.error_return_trace_index, ); @@ -7784,13 +7784,16 @@ fn analyzeCall( // Whether this call should be memoized, set to false if the call can // mutate comptime state. - var should_memoize = true; + // TODO: comptime call memoization is currently not supported under incremental compilation + // since dependencies are not marked on callers. If we want to keep this around (we should + // check that it's worthwhile first!), each memoized call needs a `Cau`. + var should_memoize = !zcu.comp.incremental; // If it's a comptime function call, we need to memoize it as long as no external // comptime memory is mutated. const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); - const owner_info = mod.typeToFunc(Type.fromInterned(module_fn.ty)).?; + const owner_info = zcu.typeToFunc(Type.fromInterned(module_fn.ty)).?; const new_param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len); var new_fn_info: InternPool.GetFuncTypeKey = .{ .param_types = new_param_types, @@ -7875,12 +7878,12 @@ fn analyzeCall( // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { - if (mod.intern_pool.getIfExists(.{ .memoized_call = .{ + if (zcu.intern_pool.getIfExists(.{ .memoized_call = .{ .func = module_fn_index, .arg_values = memoized_arg_values, .result = .none, } })) |memoized_call_index| { - const memoized_call = mod.intern_pool.indexToKey(memoized_call_index).memoized_call; + const memoized_call = zcu.intern_pool.indexToKey(memoized_call_index).memoized_call; break :res2 Air.internedToRef(memoized_call.result); } } @@ -7939,7 +7942,7 @@ fn analyzeCall( // a reference to `comptime_allocs` so is not stable across instances of `Sema`. // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. - if (should_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(mod)) { + if (should_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(zcu)) { _ = try pt.intern(.{ .memoized_call = .{ .func = module_fn_index, .arg_values = memoized_arg_values, @@ -7978,7 +7981,7 @@ fn analyzeCall( if (param_ty) |t| assert(!t.isGenericPoison()); arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func); try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_idx), arg_out.*); - if (sema.typeOf(arg_out.*).zigTypeTag(mod) == .NoReturn) { + if (sema.typeOf(arg_out.*).zigTypeTag(zcu) == .NoReturn) { return arg_out.*; } } @@ -7987,15 +7990,15 @@ fn analyzeCall( switch (sema.owner.unwrap()) { .cau => {}, - .func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(mod)) { + .func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(zcu)) { ip.funcSetCallsOrAwaitsErrorableFn(owner_func); }, } if (try sema.resolveValue(func)) |func_val| { - if (mod.intern_pool.isFuncBody(func_val.toIntern())) { + if (zcu.intern_pool.isFuncBody(func_val.toIntern())) { try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = func_val.toIntern() })); - try mod.ensureFuncBodyAnalysisQueued(func_val.toIntern()); + try zcu.ensureFuncBodyAnalysisQueued(func_val.toIntern()); } } @@ -8022,7 +8025,7 @@ fn analyzeCall( // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. if (try sema.resolveValue(func)) |func_val| { - switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + switch (zcu.intern_pool.indexToKey(func_val.toIntern())) { .func => break :skip_safety, .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .nav => |nav| if (!ip.getNav(nav).isExtern(ip)) break :skip_safety, From 93f2d9a77f659a344fc0c003ce149885fc7df99a Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 04:02:31 +0100 Subject: [PATCH 07/25] Zcu: typo We were accidentally over-reporting most `namespace_name` deps and *not* reporting some actually outdated ones! --- src/Zcu/PerThread.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 3c22abb4b8..5f1856f6eb 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -432,7 +432,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { new_zir.nullTerminatedString(name_zir), .no_embedded_nulls, ); - if (!old_names.swapRemove(name_ip)) continue; + if (old_names.swapRemove(name_ip)) continue; // Name added any_change = true; try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{ From 8f8fe892761c9c5c9f7b89d8c53ac287d02b1474 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 04:06:11 +0100 Subject: [PATCH 08/25] Zcu: panic on usingnamespace with -fincremental --- src/Zcu/PerThread.zig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 5f1856f6eb..2720edd2f2 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2040,6 +2040,9 @@ const ScanDeclIter = struct { const want_analysis = switch (kind) { .@"comptime" => unreachable, .@"usingnamespace" => a: { + if (comp.incremental) { + @panic("'usingnamespace' is not supported by incremental compilation"); + } if (declaration.flags.is_pub) { try namespace.pub_usingnamespace.append(gpa, nav); } else { From 4e5834a9f247c60fcc1d1da5f3b2c00efdb8f4e4 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 19:47:12 +0100 Subject: [PATCH 09/25] Compilation: don't queue std analysis twice when testing std --- src/Compilation.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 61f07d3e3b..5c49e96db2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2269,7 +2269,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { try comp.queueJob(.{ .analyze_mod = std_mod }); zcu.analysis_roots.appendAssumeCapacity(std_mod); - if (comp.config.is_test) { + if (comp.config.is_test and zcu.main_mod != std_mod) { try comp.queueJob(.{ .analyze_mod = zcu.main_mod }); zcu.analysis_roots.appendAssumeCapacity(zcu.main_mod); } From 50960fac80b1d04f7858215d963fa64a7583210b Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 20:20:38 +0100 Subject: [PATCH 10/25] compiler: be more cautious about source locations Two fixes here. * Prevent a crash when sorting the list of analysis errors when some errors refer to lost source locations. These errors can be sorted anywhere in the list, because they are (in theory) guaranteed to never be emitted by the `resolveReferences` logic. This case occurs, for instance, when a declaration has compile errors in the initial update and is deleted in the second update. * Prevent a crash when resolving the source location for `entire_file` errors for a non-existent file. This is the bug underlying #20954. Resolves: #20954. --- src/Compilation.zig | 10 ++++++++-- src/Sema.zig | 4 ++-- src/Zcu.zig | 15 ++++++++++++--- src/crash_report.zig | 16 ++++++++++++++-- 4 files changed, 36 insertions(+), 9 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 5c49e96db2..8787d679e6 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3203,8 +3203,14 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool { if (ctx.err.*) |_| return lhs_index < rhs_index; const errors = ctx.zcu.failed_analysis.values(); - const lhs_src_loc = errors[lhs_index].src_loc.upgrade(ctx.zcu); - const rhs_src_loc = errors[rhs_index].src_loc.upgrade(ctx.zcu); + const lhs_src_loc = errors[lhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse { + // LHS source location lost, so should never be referenced. Just sort it to the end. + return false; + }; + const rhs_src_loc = errors[rhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse { + // RHS source location lost, so should never be referenced. Just sort it to the end. + return true; + }; return if (lhs_src_loc.file_scope != rhs_src_loc.file_scope) std.mem.order( u8, lhs_src_loc.file_scope.sub_file_path, diff --git a/src/Sema.zig b/src/Sema.zig index 9b6ff7cd8e..a679f69a9c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -17729,7 +17729,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const msg = msg: { const name = name: { // TODO: we should probably store this name in the ZIR to avoid this complexity. - const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod); + const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod).?; const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ @@ -17757,7 +17757,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (!block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { - const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod); + const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod).?; const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ diff --git a/src/Zcu.zig b/src/Zcu.zig index c78abb69bf..8626a147b6 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2042,10 +2042,11 @@ pub const LazySrcLoc = struct { .offset = .unneeded, }; - pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) struct { *File, Ast.Node.Index } { + /// Returns `null` if the ZIR instruction has been lost across incremental updates. + pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) ?struct { *File, Ast.Node.Index } { const ip = &zcu.intern_pool; const file_index, const zir_inst = inst: { - const info = base_node_inst.resolveFull(ip) orelse @panic("TODO: resolve source location relative to lost inst"); + const info = base_node_inst.resolveFull(ip) orelse return null; break :inst .{ info.file, info.inst }; }; const file = zcu.fileByIndex(file_index); @@ -2071,7 +2072,15 @@ pub const LazySrcLoc = struct { /// Resolve the file and AST node of `base_node_inst` to get a resolved `SrcLoc`. /// The resulting `SrcLoc` should only be used ephemerally, as it is not correct across incremental updates. pub fn upgrade(lazy: LazySrcLoc, zcu: *Zcu) SrcLoc { - const file, const base_node = resolveBaseNode(lazy.base_node_inst, zcu); + return lazy.upgradeOrLost(zcu).?; + } + + /// Like `upgrade`, but returns `null` if the source location has been lost across incremental updates. + pub fn upgradeOrLost(lazy: LazySrcLoc, zcu: *Zcu) ?SrcLoc { + const file, const base_node: Ast.Node.Index = if (lazy.offset == .entire_file) .{ + zcu.fileByIndex(lazy.base_node_inst.resolveFile(&zcu.intern_pool)), + 0, + } else resolveBaseNode(lazy.base_node_inst, zcu) orelse return null; return .{ .file_scope = file, .base_node = base_node, diff --git a/src/crash_report.zig b/src/crash_report.zig index d4fe72a8e8..67ec0e0eb0 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -78,7 +78,13 @@ fn dumpStatusReport() !void { const block: *Sema.Block = anal.block; const zcu = anal.sema.pt.zcu; - const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu); + const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu) orelse { + const file = zcu.fileByIndex(block.src_base_inst.resolveFile(&zcu.intern_pool)); + try stderr.writeAll("Analyzing lost instruction in file '"); + try writeFilePath(file, stderr); + try stderr.writeAll("'. This should not happen!\n\n"); + return; + }; try stderr.writeAll("Analyzing "); try writeFilePath(file, stderr); @@ -104,7 +110,13 @@ fn dumpStatusReport() !void { while (parent) |curr| { fba.reset(); try stderr.writeAll(" in "); - const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu); + const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu) orelse { + const cur_block_file = zcu.fileByIndex(curr.block.src_base_inst.resolveFile(&zcu.intern_pool)); + try writeFilePath(cur_block_file, stderr); + try stderr.writeAll("\n > [lost instruction; this should not happen]\n"); + parent = curr.parent; + continue; + }; try writeFilePath(cur_block_file, stderr); try stderr.writeAll("\n > "); print_zir.renderSingleInstruction( From 3fb5cad07dd3b10ad32a116cbd7195218b0a93fe Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 14 Aug 2024 00:08:38 +0100 Subject: [PATCH 11/25] Sema: don't delete reified enum type with error in field An enum type is kind of like a struct or union type, in that field errors are happening during type resolution. The only difference is that type resolution happens at the time the type is created. So, errors in fields should not cause the type to be deleted: we've already added a reference entry, and incremenetal dependencies which must be invalidated if the compile error is fixed. Once we call `WipEnumType.prepare`, we should never call `WipEnumType.cancel`. This is analagous to logic for enum declarations in `Sema.zirEnumDecl`. --- src/Sema.zig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index a679f69a9c..1aedd745ea 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -22062,7 +22062,8 @@ fn reifyEnum( return Air.internedToRef(ty); }, }; - errdefer wip_ty.cancel(ip, pt.tid); + var done = false; + errdefer if (!done) wip_ty.cancel(ip, pt.tid); if (tag_ty.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); @@ -22088,6 +22089,7 @@ fn reifyEnum( try sema.addTypeReferenceEntry(src, wip_ty.index); wip_ty.prepare(ip, new_cau_index, new_namespace_index); wip_ty.setTagTy(ip, tag_ty.toIntern()); + done = true; for (0..fields_len) |field_idx| { const field_info = try fields_val.elemValue(pt, field_idx); From 1ccbc6ca2066bec7a8b723598a9a17d70da66bc7 Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 14 Aug 2024 00:18:58 +0100 Subject: [PATCH 12/25] test: add new incremental test This case is adapted from #11344, and passes with `-fno-emit-bin`. Resolves: #11344 --- test/incremental/delete_comptime_decls | 38 ++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 test/incremental/delete_comptime_decls diff --git a/test/incremental/delete_comptime_decls b/test/incremental/delete_comptime_decls new file mode 100644 index 0000000000..424dc37ea8 --- /dev/null +++ b/test/incremental/delete_comptime_decls @@ -0,0 +1,38 @@ +#target=x86_64-linux +#update=initial version +#file=main.zig +pub fn main() void {} +comptime { + var array = [_:0]u8{ 1, 2, 3, 4 }; + const src_slice: [:0]u8 = &array; + const slice = src_slice[2..6]; + _ = slice; +} +comptime { + var array = [_:0]u8{ 1, 2, 3, 4 }; + const slice = array[2..6]; + _ = slice; +} +comptime { + var array = [_]u8{ 1, 2, 3, 4 }; + const slice = array[2..5]; + _ = slice; +} +comptime { + var array = [_:0]u8{ 1, 2, 3, 4 }; + const slice = array[3..2]; + _ = slice; +} +#expect_error=ignored + +#update=delete and modify comptime decls +#file=main.zig +pub fn main() void {} +comptime { + const x: [*c]u8 = null; + var runtime_len: usize = undefined; + runtime_len = 0; + const y = x[0..runtime_len]; + _ = y; +} +#expect_error=ignored From 978fe68a65be2b5a1551ab5eafdcdbfa467ba891 Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 14 Aug 2024 02:44:05 +0100 Subject: [PATCH 13/25] Compilation: actually do codegen on non-initial updates --- src/Compilation.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 8787d679e6..af98fc6f6e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3685,7 +3685,10 @@ fn performAllTheWorkInner( zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); } - if (!InternPool.single_threaded) comp.thread_pool.spawnWgId(&work_queue_wait_group, codegenThread, .{comp}); + if (!InternPool.single_threaded) { + comp.codegen_work.done = false; // may be `true` from a prior update + comp.thread_pool.spawnWgId(&work_queue_wait_group, codegenThread, .{comp}); + } defer if (!InternPool.single_threaded) { { comp.codegen_work.mutex.lock(); From 46388d338a93a35d139866411f80115a03b30a6a Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 14 Aug 2024 08:10:49 +0100 Subject: [PATCH 14/25] InternPool: don't remove outdated types When a type becomes outdated, there will still be lingering references to the old index -- for instance, any declaration whose value was that type holds a reference to that index. These references may live for an arbitrarily long time in some cases. So, we can't just remove the type from the pool -- the old `Index` must remain valid! Instead, we want to preserve the old `Index`, but avoid it from ever appearing in lookups. (It's okay if analysis of something referencing the old `Index` does weird stuff -- such analysis are guaranteed by the incremental compilation model to always be unreferenced.) So, we use the new `InternPool.putKeyReplace` to replace the shard entry for this index with the newly-created index. --- src/InternPool.zig | 71 +++++++++++++++++++++++++++++++++++++++---- src/Sema.zig | 33 ++++++++++---------- src/Zcu/PerThread.zig | 15 ++++----- 3 files changed, 87 insertions(+), 32 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 91a58e10e7..7c1b37d3d4 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -7077,6 +7077,7 @@ fn getOrPutKeyEnsuringAdditionalCapacity( const index = entry.acquire(); if (index == .none) break; if (entry.hash != hash) continue; + if (ip.isRemoved(index)) continue; if (ip.indexToKey(index).eql(key, ip)) return .{ .existing = index }; } shard.mutate.map.mutex.lock(); @@ -7151,6 +7152,43 @@ fn getOrPutKeyEnsuringAdditionalCapacity( .map_index = map_index, } }; } +/// Like `getOrPutKey`, but asserts that the key already exists, and prepares to replace +/// its shard entry with a new `Index` anyway. After finalizing this, the old index remains +/// valid (in that `indexToKey` and similar queries will behave as before), but it will +/// never be returned from a lookup (`getOrPutKey` etc). +/// This is used by incremental compilation when an existing container type is outdated. In +/// this case, the type must be recreated at a new `InternPool.Index`, but the old index must +/// remain valid since now-unreferenced `AnalUnit`s may retain references to it. The old index +/// will be cleaned up when the `Zcu` undergoes garbage collection. +fn putKeyReplace( + ip: *InternPool, + tid: Zcu.PerThread.Id, + key: Key, +) GetOrPutKey { + const full_hash = key.hash64(ip); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + shard.mutate.map.mutex.lock(); + errdefer shard.mutate.map.mutex.unlock(); + const map = shard.shared.map; + const map_mask = map.header().mask(); + var map_index = hash; + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + const index = entry.value; + assert(index != .none); // key not present + if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) { + break; // we found the entry to replace + } + } + return .{ .new = .{ + .ip = ip, + .tid = tid, + .shard = shard, + .map_index = map_index, + } }; +} pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index { var gop = try ip.getOrPutKey(gpa, tid, key); @@ -7990,8 +8028,11 @@ pub fn getUnionType( gpa: Allocator, tid: Zcu.PerThread.Id, ini: UnionTypeInit, + /// If it is known that there is an existing type with this key which is outdated, + /// this is passed as `true`, and the type is replaced with one at a fresh index. + replace_existing: bool, ) Allocator.Error!WipNamespaceType.Result { - var gop = try ip.getOrPutKey(gpa, tid, .{ .union_type = switch (ini.key) { + const key: Key = .{ .union_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -8000,7 +8041,11 @@ pub fn getUnionType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }); + } }; + var gop = if (replace_existing) + ip.putKeyReplace(tid, key) + else + try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; @@ -8166,8 +8211,11 @@ pub fn getStructType( gpa: Allocator, tid: Zcu.PerThread.Id, ini: StructTypeInit, + /// If it is known that there is an existing type with this key which is outdated, + /// this is passed as `true`, and the type is replaced with one at a fresh index. + replace_existing: bool, ) Allocator.Error!WipNamespaceType.Result { - var gop = try ip.getOrPutKey(gpa, tid, .{ .struct_type = switch (ini.key) { + const key: Key = .{ .struct_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -8176,7 +8224,11 @@ pub fn getStructType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }); + } }; + var gop = if (replace_existing) + ip.putKeyReplace(tid, key) + else + try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; @@ -9200,8 +9252,11 @@ pub fn getEnumType( gpa: Allocator, tid: Zcu.PerThread.Id, ini: EnumTypeInit, + /// If it is known that there is an existing type with this key which is outdated, + /// this is passed as `true`, and the type is replaced with one at a fresh index. + replace_existing: bool, ) Allocator.Error!WipEnumType.Result { - var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = switch (ini.key) { + const key: Key = .{ .enum_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -9210,7 +9265,11 @@ pub fn getEnumType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }); + } }; + var gop = if (replace_existing) + ip.putKeyReplace(tid, key) + else + try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; diff --git a/src/Sema.zig b/src/Sema.zig index 1aedd745ea..d760927c4d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2724,9 +2724,9 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) { } /// Given a type just looked up in the `InternPool`, check whether it is -/// considered outdated on this update. If so, remove it from the pool -/// and return `true`. -fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { +/// considered outdated on this update. If so, returns `true`, and the +/// caller must replace the outdated type with a fresh one. +fn checkOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -2745,7 +2745,6 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { if (!was_outdated) return false; _ = zcu.outdated_ready.swapRemove(cau_unit); zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit); - zcu.intern_pool.remove(pt.tid, ty); try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); return true; } @@ -2815,14 +2814,14 @@ fn zirStructDecl( .captures = captures, } }, }; - const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init)) { + const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init, false)) { .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) { + if (!try sema.checkOutdatedType(ty)) { try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); } - break :wip (try ip.getStructType(gpa, pt.tid, struct_init)).wip; + break :wip (try ip.getStructType(gpa, pt.tid, struct_init, true)).wip; }, .wip => |wip| wip, }); @@ -3041,14 +3040,14 @@ fn zirEnumDecl( .captures = captures, } }, }; - const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init)) { + const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init, false)) { .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) { + if (!try sema.checkOutdatedType(ty)) { try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); } - break :wip (try ip.getEnumType(gpa, pt.tid, enum_init)).wip; + break :wip (try ip.getEnumType(gpa, pt.tid, enum_init, true)).wip; }, .wip => |wip| wip, }); @@ -3311,14 +3310,14 @@ fn zirUnionDecl( .captures = captures, } }, }; - const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init)) { + const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init, false)) { .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) { + if (!try sema.checkOutdatedType(ty)) { try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); } - break :wip (try ip.getUnionType(gpa, pt.tid, union_init)).wip; + break :wip (try ip.getUnionType(gpa, pt.tid, union_init, true)).wip; }, .wip => |wip| wip, }); @@ -3407,7 +3406,7 @@ fn zirOpaqueDecl( }; // No `wrapWipTy` needed as no std.builtin types are opaque. const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) { - // No `maybeRemoveOutdatedType` as opaque types are never outdated. + // No `checkOutdatedType` as opaque types are never outdated. .existing => |ty| { try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); @@ -22054,7 +22053,7 @@ fn reifyEnum( .zir_index = tracked_inst, .type_hash = hasher.final(), } }, - })) { + }, false)) { .wip => |wip| wip, .existing => |ty| { try sema.declareDependency(.{ .interned = ty }); @@ -22224,7 +22223,7 @@ fn reifyUnion( .zir_index = tracked_inst, .type_hash = hasher.final(), } }, - })) { + }, false)) { .wip => |wip| wip, .existing => |ty| { try sema.declareDependency(.{ .interned = ty }); @@ -22494,7 +22493,7 @@ fn reifyStruct( .zir_index = tracked_inst, .type_hash = hasher.final(), } }, - })) { + }, false)) { .wip => |wip| wip, .existing => |ty| { try sema.declareDependency(.{ .interned = ty }); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 2720edd2f2..83a7dce4fc 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -925,6 +925,7 @@ fn createFileRootStruct( pt: Zcu.PerThread, file_index: Zcu.File.Index, namespace_index: Zcu.Namespace.Index, + replace_existing: bool, ) Allocator.Error!InternPool.Index { const zcu = pt.zcu; const gpa = zcu.gpa; @@ -968,7 +969,7 @@ fn createFileRootStruct( .zir_index = tracked_inst, .captures = &.{}, } }, - })) { + }, replace_existing)) { .existing => unreachable, // we wouldn't be analysing the file root if this type existed .wip => |wip| wip, }; @@ -1023,8 +1024,7 @@ fn recreateFileRoot(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError zcu.gpa, InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), ); - ip.remove(pt.tid, file_root_type); - _ = try pt.createFileRootStruct(file_index, namespace_index); + _ = try pt.createFileRootStruct(file_index, namespace_index, true); } /// Re-scan the namespace of a file's root struct type on an incremental update. @@ -1062,8 +1062,6 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator. try pt.scanNamespace(namespace_index, decls); } -/// Regardless of the file status, will create a `Decl` if none exists so that we can track -/// dependencies and re-analyze when the file becomes outdated. fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1083,7 +1081,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { .owner_type = undefined, // set in `createFileRootStruct` .file_scope = file_index, }); - const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index); + const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index, false); errdefer zcu.intern_pool.remove(pt.tid, struct_ty); switch (zcu.comp.cache_use) { @@ -1153,11 +1151,10 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { // This declaration has no value so is definitely not a std.builtin type. break :ip_index .none; }, - .type => |ty| { + .type => { // This is an incremental update, and this type is being re-analyzed because it is outdated. // The type must be recreated at a new `InternPool.Index`. - // Remove it from the InternPool and mark it outdated so that creation sites are re-analyzed. - ip.remove(pt.tid, ty); + // Mark it outdated so that creation sites are re-analyzed. return .{ .invalidate_decl_val = true, .invalidate_decl_ref = true, From 5a8780838fb3fc18f7c7ebfbf164b37032c2f829 Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 14 Aug 2024 08:48:31 +0100 Subject: [PATCH 15/25] Sema: don't set union tag type if it's not an enum --- src/Sema.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index d760927c4d..dab4262bdd 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36641,11 +36641,11 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_ty: InternPool.Ind } } else { // The provided type is the enum tag type. - union_type.setTagType(ip, provided_ty.toIntern()); const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) { .enum_type => ip.loadEnumType(provided_ty.toIntern()), else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(pt)}), }; + union_type.setTagType(ip, provided_ty.toIntern()); // The fields of the union must match the enum exactly. // A flag per field is used to check for missing and extraneous fields. explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); From 65cbdefe4d923efd8fd2cfb555cc02a52c5635fc Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 16 Aug 2024 12:46:24 +0100 Subject: [PATCH 16/25] tools: add CBE option to incr-check --- tools/incr-check.zig | 151 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 120 insertions(+), 31 deletions(-) diff --git a/tools/incr-check.zig b/tools/incr-check.zig index 6bf2de921a..0386f1a12d 100644 --- a/tools/incr-check.zig +++ b/tools/incr-check.zig @@ -2,7 +2,13 @@ const std = @import("std"); const fatal = std.process.fatal; const Allocator = std.mem.Allocator; -const usage = "usage: incr-check [-fno-emit-bin] [--zig-lib-dir lib] [--debug-zcu]"; +const usage = "usage: incr-check [--zig-lib-dir lib] [--debug-zcu] [--emit none|bin|c] [--zig-cc-binary /path/to/zig]"; + +const EmitMode = enum { + none, + bin, + c, +}; pub fn main() !void { var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator); @@ -12,19 +18,24 @@ pub fn main() !void { var opt_zig_exe: ?[]const u8 = null; var opt_input_file_name: ?[]const u8 = null; var opt_lib_dir: ?[]const u8 = null; - var no_bin = false; + var opt_cc_zig: ?[]const u8 = null; + var emit: EmitMode = .bin; var debug_zcu = false; var arg_it = try std.process.argsWithAllocator(arena); _ = arg_it.skip(); while (arg_it.next()) |arg| { if (arg.len > 0 and arg[0] == '-') { - if (std.mem.eql(u8, arg, "-fno-emit-bin")) { - no_bin = true; - } else if (std.mem.eql(u8, arg, "--debug-zcu")) { - debug_zcu = true; + if (std.mem.eql(u8, arg, "--emit")) { + const emit_str = arg_it.next() orelse fatal("expected arg after '--emit'\n{s}", .{usage}); + emit = std.meta.stringToEnum(EmitMode, emit_str) orelse + fatal("invalid emit mode '{s}'\n{s}", .{ emit_str, usage }); } else if (std.mem.eql(u8, arg, "--zig-lib-dir")) { opt_lib_dir = arg_it.next() orelse fatal("expected arg after '--zig-lib-dir'\n{s}", .{usage}); + } else if (std.mem.eql(u8, arg, "--debug-zcu")) { + debug_zcu = true; + } else if (std.mem.eql(u8, arg, "--zig-cc-binary")) { + opt_cc_zig = arg_it.next() orelse fatal("expect arg after '--zig-cc-binary'\n{s}", .{usage}); } else { fatal("unknown option '{s}'\n{s}", .{ arg, usage }); } @@ -51,20 +62,19 @@ pub fn main() !void { const tmp_dir_path = "tmp_" ++ std.fmt.hex(rand_int); const tmp_dir = try std.fs.cwd().makeOpenPath(tmp_dir_path, .{}); - if (opt_lib_dir) |lib_dir| { - if (!std.fs.path.isAbsolute(lib_dir)) { - // The cwd of the subprocess is within the tmp dir, so prepend `..` to the path. - opt_lib_dir = try std.fs.path.join(arena, &.{ "..", lib_dir }); - } - } - const child_prog_node = prog_node.start("zig build-exe", 0); defer child_prog_node.end(); + // Convert paths to be relative to the cwd of the subprocess. + const resolved_zig_exe = try std.fs.path.relative(arena, tmp_dir_path, zig_exe); + const opt_resolved_lib_dir = if (opt_lib_dir) |lib_dir| + try std.fs.path.relative(arena, tmp_dir_path, lib_dir) + else + null; + var child_args: std.ArrayListUnmanaged([]const u8) = .{}; try child_args.appendSlice(arena, &.{ - // Convert incr-check-relative path to subprocess-relative path. - try std.fs.path.relative(arena, tmp_dir_path, zig_exe), + resolved_zig_exe, "build-exe", case.root_source_file, "-fincremental", @@ -76,13 +86,13 @@ pub fn main() !void { ".global_cache", "--listen=-", }); - if (opt_lib_dir) |lib_dir| { - try child_args.appendSlice(arena, &.{ "--zig-lib-dir", lib_dir }); + if (opt_resolved_lib_dir) |resolved_lib_dir| { + try child_args.appendSlice(arena, &.{ "--zig-lib-dir", resolved_lib_dir }); } - if (no_bin) { - try child_args.append(arena, "-fno-emit-bin"); - } else { - try child_args.appendSlice(arena, &.{ "-fno-llvm", "-fno-lld" }); + switch (emit) { + .bin => try child_args.appendSlice(arena, &.{ "-fno-llvm", "-fno-lld" }), + .none => try child_args.append(arena, "-fno-emit-bin"), + .c => try child_args.appendSlice(arena, &.{ "-ofmt=c", "-lc" }), } if (debug_zcu) { try child_args.appendSlice(arena, &.{ "--debug-log", "zcu" }); @@ -96,6 +106,24 @@ pub fn main() !void { child.cwd_dir = tmp_dir; child.cwd = tmp_dir_path; + var cc_child_args: std.ArrayListUnmanaged([]const u8) = .{}; + if (emit == .c) { + const resolved_cc_zig_exe = if (opt_cc_zig) |cc_zig_exe| + try std.fs.path.relative(arena, tmp_dir_path, cc_zig_exe) + else + resolved_zig_exe; + + try cc_child_args.appendSlice(arena, &.{ + resolved_cc_zig_exe, + "cc", + "-target", + case.target_query, + "-I", + opt_resolved_lib_dir orelse fatal("'--zig-lib-dir' required when using '--emit c'", .{}), + "-o", + }); + } + var eval: Eval = .{ .arena = arena, .case = case, @@ -103,6 +131,8 @@ pub fn main() !void { .tmp_dir_path = tmp_dir_path, .child = &child, .allow_stderr = debug_zcu, + .emit = emit, + .cc_child_args = &cc_child_args, }; try child.spawn(); @@ -123,7 +153,7 @@ pub fn main() !void { eval.write(update); try eval.requestUpdate(); - try eval.check(&poller, update); + try eval.check(&poller, update, update_node); } try eval.end(&poller); @@ -138,6 +168,10 @@ const Eval = struct { tmp_dir_path: []const u8, child: *std.process.Child, allow_stderr: bool, + emit: EmitMode, + /// When `emit == .c`, this contains the first few arguments to `zig cc` to build the generated binary. + /// The arguments `out.c in.c` must be appended before spawning the subprocess. + cc_child_args: *std.ArrayListUnmanaged([]const u8), const StreamEnum = enum { stdout, stderr }; const Poller = std.io.Poller(StreamEnum); @@ -159,7 +193,7 @@ const Eval = struct { } } - fn check(eval: *Eval, poller: *Poller, update: Case.Update) !void { + fn check(eval: *Eval, poller: *Poller, update: Case.Update, prog_node: std.Progress.Node) !void { const arena = eval.arena; const Header = std.zig.Server.Message.Header; const stdout = poller.fifo(.stdout); @@ -201,12 +235,7 @@ const Eval = struct { } if (result_error_bundle.errorMessageCount() == 0) { // Empty bundle indicates successful update in a `-fno-emit-bin` build. - // We can't do a full success check since we don't have a binary, but let's - // at least check that no errors were expected. - switch (update.outcome) { - .unknown, .stdout, .exit_code => {}, - .compile_errors => fatal("expected compile errors but compilation incorrectly succeeded", .{}), - } + try eval.checkSuccessOutcome(update, null, prog_node); } else { try eval.checkErrorOutcome(update, result_error_bundle); } @@ -227,7 +256,7 @@ const Eval = struct { fatal("emit_bin_path included unexpected stderr:\n{s}", .{stderr_data}); } } - try eval.checkSuccessOutcome(update, result_binary); + try eval.checkSuccessOutcome(update, result_binary, prog_node); // This message indicates the end of the update. stdout.discard(body.len); return; @@ -270,12 +299,28 @@ const Eval = struct { } } - fn checkSuccessOutcome(eval: *Eval, update: Case.Update, binary_path: []const u8) !void { + fn checkSuccessOutcome(eval: *Eval, update: Case.Update, opt_emitted_path: ?[]const u8, prog_node: std.Progress.Node) !void { switch (update.outcome) { .unknown => return, .compile_errors => fatal("expected compile errors but compilation incorrectly succeeded", .{}), .stdout, .exit_code => {}, } + const emitted_path = opt_emitted_path orelse { + std.debug.assert(eval.emit == .none); + return; + }; + + const binary_path = switch (eval.emit) { + .none => unreachable, + .bin => emitted_path, + .c => bin: { + const rand_int = std.crypto.random.int(u64); + const out_bin_name = "./out_" ++ std.fmt.hex(rand_int); + try eval.buildCOutput(update, emitted_path, out_bin_name, prog_node); + break :bin out_bin_name; + }, + }; + const result = std.process.Child.run(.{ .allocator = eval.arena, .argv = &.{binary_path}, @@ -345,6 +390,50 @@ const Eval = struct { fatal("unexpected stderr:\n{s}", .{stderr_data}); } } + + fn buildCOutput(eval: *Eval, update: Case.Update, c_path: []const u8, out_path: []const u8, prog_node: std.Progress.Node) !void { + std.debug.assert(eval.cc_child_args.items.len > 0); + + const child_prog_node = prog_node.start("build cbe output", 0); + defer child_prog_node.end(); + + try eval.cc_child_args.appendSlice(eval.arena, &.{ out_path, c_path }); + defer eval.cc_child_args.items.len -= 2; + + const result = std.process.Child.run(.{ + .allocator = eval.arena, + .argv = eval.cc_child_args.items, + .cwd_dir = eval.tmp_dir, + .cwd = eval.tmp_dir_path, + .progress_node = child_prog_node, + }) catch |err| { + fatal("update '{s}': failed to spawn zig cc for '{s}': {s}", .{ + update.name, c_path, @errorName(err), + }); + }; + switch (result.term) { + .Exited => |code| if (code != 0) { + if (result.stderr.len != 0) { + std.log.err("update '{s}': zig cc stderr:\n{s}", .{ + update.name, result.stderr, + }); + } + fatal("update '{s}': zig cc for '{s}' failed with code {d}", .{ + update.name, c_path, code, + }); + }, + .Signal, .Stopped, .Unknown => { + if (result.stderr.len != 0) { + std.log.err("update '{s}': zig cc stderr:\n{s}", .{ + update.name, result.stderr, + }); + } + fatal("update '{s}': zig cc for '{s}' terminated unexpectedly", .{ + update.name, c_path, + }); + }, + } + } }; const Case = struct { From 84c2ebd6c6b16752d8d030d5904d0a525283cbf5 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 16 Aug 2024 12:46:52 +0100 Subject: [PATCH 17/25] frontend: incremental compilation progress Another big commit, sorry! This commit makes all fixes necessary for incremental updates of the compiler itself (specifically, adding a breakpoint to `zirCompileLog`) to succeed, at least on the frontend. The biggest change here is a reform to how types are handled. It works like this: * When a type is first created in `zirStructDecl` etc, its namespace is scanned. If the type requires resolution, an `interned` dependency is declared for the containing `AnalUnit`. * `zirThis` also declared an `interned` dependency for its `AnalUnit` on the namespace's owner type. * If the type's namespace changes, the surrounding source declaration changes hash, so `zirStructDecl` etc will be hit again. We check whether the namespace has been scanned this generation, and re-scan it if not. * Namespace lookups also check whether the namespace in question requires a re-scan based on the generation. This is because there's no guarantee that the `zirStructDecl` is re-analyzed before the namespace lookup is re-analyzed. * If a type's structure (essentially its fields) change, then the type's `Cau` is considered outdated. When the type is re-analyzed due to being outdated, or the `zirStructDecl` is re-analyzed by being transitively outdated, or a corresponding `zirThis` is re-analyzed by being transitively outdated, the struct type is recreated at a new `InternPool` index. The namespace's owner is updated (but not re-scanned, since that is handled by the mechanisms above), and the old type, while remaining a valid `Index`, is removed from the map metadata so it will never be found by lookups. `zirStructDecl` and `zirThis` store an `interned` dependency on the *new* type. --- src/Compilation.zig | 2 + src/InternPool.zig | 113 ++++++-- src/Sema.zig | 452 ++++++++++++++++------------- src/Zcu.zig | 61 ++-- src/Zcu/PerThread.zig | 654 ++++++++++++++++++++++++++++++++++++------ 5 files changed, 949 insertions(+), 333 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index af98fc6f6e..9c66d17507 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3569,6 +3569,8 @@ pub fn performAllTheWork( mod.sema_prog_node = std.Progress.Node.none; mod.codegen_prog_node.end(); mod.codegen_prog_node = std.Progress.Node.none; + + mod.generation += 1; }; try comp.performAllTheWorkInner(main_progress_node); if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error; diff --git a/src/InternPool.zig b/src/InternPool.zig index 7c1b37d3d4..8259f94812 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -684,10 +684,6 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI .ip = ip, .next_entry = .none, }; - if (ip.dep_entries.items[@intFromEnum(first_entry)].depender == .none) return .{ - .ip = ip, - .next_entry = .none, - }; return .{ .ip = ip, .next_entry = first_entry.toOptional(), @@ -724,7 +720,6 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) { // Dummy entry, so we can reuse it rather than allocating a new one! - ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].next = .none; break :new_index gop.value_ptr.*; } @@ -732,7 +727,12 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: { break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] }; } else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() }; - ptr.next = if (gop.found_existing) gop.value_ptr.*.toOptional() else .none; + if (gop.found_existing) { + ptr.next = gop.value_ptr.*.toOptional(); + ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].prev = new_index.toOptional(); + } else { + ptr.next = .none; + } gop.value_ptr.* = new_index; break :new_index new_index; }, @@ -754,10 +754,9 @@ pub const NamespaceNameKey = struct { }; pub const DepEntry = extern struct { - /// If null, this is a dummy entry - all other fields are `undefined`. It is - /// the first and only entry in one of `intern_pool.*_deps`, and does not - /// appear in any list by `first_dependency`, but is not in - /// `free_dep_entries` since `*_deps` stores a reference to it. + /// If null, this is a dummy entry. `next_dependee` is undefined. This is the first + /// entry in one of `*_deps`, and does not appear in any list by `first_dependency`, + /// but is not in `free_dep_entries` since `*_deps` stores a reference to it. depender: AnalUnit.Optional, /// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee. /// Used to iterate all dependers for a given dependee during an update. @@ -2689,7 +2688,12 @@ pub const Key = union(enum) { .variable => |a_info| { const b_info = b.variable; - return a_info.owner_nav == b_info.owner_nav; + return a_info.owner_nav == b_info.owner_nav and + a_info.ty == b_info.ty and + a_info.init == b_info.init and + a_info.lib_name == b_info.lib_name and + a_info.is_threadlocal == b_info.is_threadlocal and + a_info.is_weak_linkage == b_info.is_weak_linkage; }, .@"extern" => |a_info| { const b_info = b.@"extern"; @@ -8016,6 +8020,10 @@ pub const UnionTypeInit = struct { zir_index: TrackedInst.Index, captures: []const CaptureValue, }, + declared_owned_captures: struct { + zir_index: TrackedInst.Index, + captures: CaptureValue.Slice, + }, reified: struct { zir_index: TrackedInst.Index, type_hash: u64, @@ -8037,6 +8045,10 @@ pub fn getUnionType( .zir_index = d.zir_index, .captures = .{ .external = d.captures }, } }, + .declared_owned_captures => |d| .{ .declared = .{ + .zir_index = d.zir_index, + .captures = .{ .owned = d.captures }, + } }, .reified => |r| .{ .reified = .{ .zir_index = r.zir_index, .type_hash = r.type_hash, @@ -8060,7 +8072,7 @@ pub fn getUnionType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len, + inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -8069,7 +8081,10 @@ pub fn getUnionType( const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{ .flags = .{ - .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0, + .any_captures = switch (ini.key) { + inline .declared, .declared_owned_captures => |d| d.captures.len != 0, + .reified => false, + }, .runtime_tag = ini.flags.runtime_tag, .any_aligned_fields = ini.flags.any_aligned_fields, .layout = ini.flags.layout, @@ -8078,7 +8093,10 @@ pub fn getUnionType( .assumed_runtime_bits = ini.flags.assumed_runtime_bits, .assumed_pointer_aligned = ini.flags.assumed_pointer_aligned, .alignment = ini.flags.alignment, - .is_reified = ini.key == .reified, + .is_reified = switch (ini.key) { + .declared, .declared_owned_captures => false, + .reified => true, + }, }, .fields_len = ini.fields_len, .size = std.math.maxInt(u32), @@ -8102,6 +8120,10 @@ pub fn getUnionType( extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, + .declared_owned_captures => |d| if (d.captures.len != 0) { + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}); + }, .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } @@ -8199,6 +8221,10 @@ pub const StructTypeInit = struct { zir_index: TrackedInst.Index, captures: []const CaptureValue, }, + declared_owned_captures: struct { + zir_index: TrackedInst.Index, + captures: CaptureValue.Slice, + }, reified: struct { zir_index: TrackedInst.Index, type_hash: u64, @@ -8220,6 +8246,10 @@ pub fn getStructType( .zir_index = d.zir_index, .captures = .{ .external = d.captures }, } }, + .declared_owned_captures => |d| .{ .declared = .{ + .zir_index = d.zir_index, + .captures = .{ .owned = d.captures }, + } }, .reified => |r| .{ .reified = .{ .zir_index = r.zir_index, .type_hash = r.type_hash, @@ -8251,7 +8281,7 @@ pub fn getStructType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len, + inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -8267,10 +8297,16 @@ pub fn getStructType( .backing_int_ty = .none, .names_map = names_map, .flags = .{ - .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0, + .any_captures = switch (ini.key) { + inline .declared, .declared_owned_captures => |d| d.captures.len != 0, + .reified => false, + }, .field_inits_wip = false, .inits_resolved = ini.inits_resolved, - .is_reified = ini.key == .reified, + .is_reified = switch (ini.key) { + .declared, .declared_owned_captures => false, + .reified => true, + }, }, }); try items.append(.{ @@ -8282,6 +8318,10 @@ pub fn getStructType( extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, + .declared_owned_captures => |d| if (d.captures.len != 0) { + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}); + }, .reified => |r| { _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)); }, @@ -8309,7 +8349,7 @@ pub fn getStructType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len, + inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -8324,7 +8364,10 @@ pub fn getStructType( .fields_len = ini.fields_len, .size = std.math.maxInt(u32), .flags = .{ - .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0, + .any_captures = switch (ini.key) { + inline .declared, .declared_owned_captures => |d| d.captures.len != 0, + .reified => false, + }, .is_extern = is_extern, .known_non_opv = ini.known_non_opv, .requires_comptime = ini.requires_comptime, @@ -8342,7 +8385,10 @@ pub fn getStructType( .field_inits_wip = false, .inits_resolved = ini.inits_resolved, .fully_resolved = false, - .is_reified = ini.key == .reified, + .is_reified = switch (ini.key) { + .declared, .declared_owned_captures => false, + .reified => true, + }, }, }); try items.append(.{ @@ -8354,6 +8400,10 @@ pub fn getStructType( extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, + .declared_owned_captures => |d| if (d.captures.len != 0) { + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}); + }, .reified => |r| { _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)); }, @@ -9157,6 +9207,10 @@ pub const EnumTypeInit = struct { zir_index: TrackedInst.Index, captures: []const CaptureValue, }, + declared_owned_captures: struct { + zir_index: TrackedInst.Index, + captures: CaptureValue.Slice, + }, reified: struct { zir_index: TrackedInst.Index, type_hash: u64, @@ -9261,6 +9315,10 @@ pub fn getEnumType( .zir_index = d.zir_index, .captures = .{ .external = d.captures }, } }, + .declared_owned_captures => |d| .{ .declared = .{ + .zir_index = d.zir_index, + .captures = .{ .owned = d.captures }, + } }, .reified => |r| .{ .reified = .{ .zir_index = r.zir_index, .type_hash = r.type_hash, @@ -9288,7 +9346,7 @@ pub fn getEnumType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| d.captures.len, + inline .declared, .declared_owned_captures => |d| d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -9298,7 +9356,7 @@ pub fn getEnumType( const extra_index = addExtraAssumeCapacity(extra, EnumAuto{ .name = undefined, // set by `prepare` .captures_len = switch (ini.key) { - .declared => |d| @intCast(d.captures.len), + inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len), .reified => std.math.maxInt(u32), }, .namespace = undefined, // set by `prepare` @@ -9317,6 +9375,7 @@ pub fn getEnumType( extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish` switch (ini.key) { .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), + .declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}), .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } const names_start = extra.mutate.len; @@ -9347,7 +9406,7 @@ pub fn getEnumType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| d.captures.len, + inline .declared, .declared_owned_captures => |d| d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -9358,7 +9417,7 @@ pub fn getEnumType( const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{ .name = undefined, // set by `prepare` .captures_len = switch (ini.key) { - .declared => |d| @intCast(d.captures.len), + inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len), .reified => std.math.maxInt(u32), }, .namespace = undefined, // set by `prepare` @@ -9382,6 +9441,7 @@ pub fn getEnumType( extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish` switch (ini.key) { .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), + .declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}), .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } const names_start = extra.mutate.len; @@ -9445,10 +9505,12 @@ pub fn getGeneratedTagEnumType( .tid = tid, .index = items.mutate.len, }, ip); + const parent_namespace = ip.namespacePtr(ini.parent_namespace); const namespace = try ip.createNamespace(gpa, tid, .{ .parent = ini.parent_namespace.toOptional(), .owner_type = enum_index, - .file_scope = ip.namespacePtr(ini.parent_namespace).file_scope, + .file_scope = parent_namespace.file_scope, + .generation = parent_namespace.generation, }); errdefer ip.destroyNamespace(tid, namespace); @@ -11044,6 +11106,7 @@ pub fn destroyNamespace( .parent = undefined, .file_scope = undefined, .owner_type = undefined, + .generation = undefined, }; @field(namespace, Local.namespace_next_free_field) = @enumFromInt(local.mutate.namespaces.free_list); diff --git a/src/Sema.zig b/src/Sema.zig index dab4262bdd..c4345c4464 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2723,32 +2723,6 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) { return new; } -/// Given a type just looked up in the `InternPool`, check whether it is -/// considered outdated on this update. If so, returns `true`, and the -/// caller must replace the outdated type with a fresh one. -fn checkOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { - const pt = sema.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - - if (!zcu.comp.incremental) return false; - - const cau_index = switch (ip.indexToKey(ty)) { - .struct_type => ip.loadStructType(ty).cau.unwrap().?, - .union_type => ip.loadUnionType(ty).cau, - .enum_type => ip.loadEnumType(ty).cau.unwrap().?, - else => unreachable, - }; - const cau_unit = AnalUnit.wrap(.{ .cau = cau_index }); - const was_outdated = zcu.outdated.swapRemove(cau_unit) or - zcu.potentially_outdated.swapRemove(cau_unit); - if (!was_outdated) return false; - _ = zcu.outdated_ready.swapRemove(cau_unit); - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit); - try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); - return true; -} - fn zirStructDecl( sema: *Sema, block: *Block, @@ -2815,13 +2789,16 @@ fn zirStructDecl( } }, }; const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init, false)) { - .existing => |ty| wip: { - if (!try sema.checkOutdatedType(ty)) { - try sema.declareDependency(.{ .interned = ty }); - try sema.addTypeReferenceEntry(src, ty); - return Air.internedToRef(ty); - } - break :wip (try ip.getStructType(gpa, pt.tid, struct_init, true)).wip; + .existing => |ty| { + const new_ty = try pt.ensureTypeUpToDate(ty, false); + + // Make sure we update the namespace if the declaration is re-analyzed, to pick + // up on e.g. changed comptime decls. + try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod)); + + try sema.declareDependency(.{ .interned = new_ty }); + try sema.addTypeReferenceEntry(src, new_ty); + return Air.internedToRef(new_ty); }, .wip => |wip| wip, }); @@ -2839,6 +2816,7 @@ fn zirStructDecl( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); errdefer pt.destroyNamespace(new_namespace_index); @@ -2977,7 +2955,6 @@ fn zirEnumDecl( const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; - const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } }; const tag_type_ref = if (small.has_tag_type) blk: { const tag_type_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); @@ -3041,13 +3018,16 @@ fn zirEnumDecl( } }, }; const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init, false)) { - .existing => |ty| wip: { - if (!try sema.checkOutdatedType(ty)) { - try sema.declareDependency(.{ .interned = ty }); - try sema.addTypeReferenceEntry(src, ty); - return Air.internedToRef(ty); - } - break :wip (try ip.getEnumType(gpa, pt.tid, enum_init, true)).wip; + .existing => |ty| { + const new_ty = try pt.ensureTypeUpToDate(ty, false); + + // Make sure we update the namespace if the declaration is re-analyzed, to pick + // up on e.g. changed comptime decls. + try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod)); + + try sema.declareDependency(.{ .interned = new_ty }); + try sema.addTypeReferenceEntry(src, new_ty); + return Air.internedToRef(new_ty); }, .wip => |wip| wip, }); @@ -3071,19 +3051,12 @@ fn zirEnumDecl( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); errdefer if (!done) pt.destroyNamespace(new_namespace_index); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); - if (pt.zcu.comp.incremental) { - try mod.intern_pool.addDependency( - gpa, - AnalUnit.wrap(.{ .cau = new_cau_index }), - .{ .src_hash = try block.trackZir(inst) }, - ); - } - try pt.scanNamespace(new_namespace_index, decls); try sema.declareDependency(.{ .interned = wip_ty.index }); @@ -3094,144 +3067,22 @@ fn zirEnumDecl( wip_ty.prepare(ip, new_cau_index, new_namespace_index); done = true; - const int_tag_ty = ty: { - // We create a block for the field type instructions because they - // may need to reference Decls from inside the enum namespace. - // Within the field type, default value, and alignment expressions, the owner should be the enum's `Cau`. - - const prev_owner = sema.owner; - sema.owner = AnalUnit.wrap(.{ .cau = new_cau_index }); - defer sema.owner = prev_owner; - - const prev_func_index = sema.func_index; - sema.func_index = .none; - defer sema.func_index = prev_func_index; - - var enum_block: Block = .{ - .parent = null, - .sema = sema, - .namespace = new_namespace_index, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = tracked_inst, - .type_name_ctx = type_name, - }; - defer enum_block.instructions.deinit(sema.gpa); - - if (body.len != 0) { - _ = try sema.analyzeInlineBody(&enum_block, body, inst); - } - - if (tag_type_ref != .none) { - const ty = try sema.resolveType(&enum_block, tag_ty_src, tag_type_ref); - if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { - return sema.fail(&enum_block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)}); - } - break :ty ty; - } else if (fields_len == 0) { - break :ty try pt.intType(.unsigned, 0); - } else { - const bits = std.math.log2_int_ceil(usize, fields_len); - break :ty try pt.intType(.unsigned, bits); - } - }; - - wip_ty.setTagTy(ip, int_tag_ty.toIntern()); - - if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) { - return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); - } - } - - var bit_bag_index: usize = body_end; - var cur_bit_bag: u32 = undefined; - var field_i: u32 = 0; - var last_tag_val: ?Value = null; - while (field_i < fields_len) : (field_i += 1) { - if (field_i % 32 == 0) { - cur_bit_bag = sema.code.extra[bit_bag_index]; - bit_bag_index += 1; - } - const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; - cur_bit_bag >>= 1; - - const field_name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]); - const field_name_zir = sema.code.nullTerminatedString(field_name_index); - extra_index += 2; // field name, doc comment - - const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); - - const value_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_value = field_i }, - }; - - const tag_overflow = if (has_tag_value) overflow: { - const tag_val_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); - extra_index += 1; - const tag_inst = try sema.resolveInst(tag_val_ref); - last_tag_val = try sema.resolveConstDefinedValue(block, .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_name = field_i }, - }, tag_inst, .{ - .needed_comptime_reason = "enum tag value must be comptime-known", - }); - if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; - last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); - if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| { - assert(conflict.kind == .value); // AstGen validated names are unique - const other_field_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_value = conflict.prev_field_idx }, - }; - const msg = msg: { - const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, sema)}); - errdefer msg.destroy(gpa); - try sema.errNote(other_field_src, msg, "other occurrence here", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); - } - break :overflow false; - } else if (any_values) overflow: { - var overflow: ?usize = null; - last_tag_val = if (last_tag_val) |val| - try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow) - else - try pt.intValue(int_tag_ty, 0); - if (overflow != null) break :overflow true; - if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| { - assert(conflict.kind == .value); // AstGen validated names are unique - const other_field_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_value = conflict.prev_field_idx }, - }; - const msg = msg: { - const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, sema)}); - errdefer msg.destroy(gpa); - try sema.errNote(other_field_src, msg, "other occurrence here", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); - } - break :overflow false; - } else overflow: { - assert(wip_ty.nextField(&mod.intern_pool, field_name, .none) == null); - last_tag_val = try pt.intValue(Type.comptime_int, field_i); - if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; - last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); - break :overflow false; - }; - - if (tag_overflow) { - const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{ - last_tag_val.?.fmtValueSema(pt, sema), int_tag_ty.fmt(pt), - }); - return sema.failWithOwnedErrorMsg(block, msg); - } - } + try Sema.resolveDeclaredEnum( + pt, + wip_ty, + inst, + tracked_inst, + new_namespace_index, + type_name, + new_cau_index, + small, + body, + tag_type_ref, + any_values, + fields_len, + sema.code, + body_end, + ); codegen_type: { if (mod.comp.config.use_llvm) break :codegen_type; @@ -3311,13 +3162,16 @@ fn zirUnionDecl( } }, }; const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init, false)) { - .existing => |ty| wip: { - if (!try sema.checkOutdatedType(ty)) { - try sema.declareDependency(.{ .interned = ty }); - try sema.addTypeReferenceEntry(src, ty); - return Air.internedToRef(ty); - } - break :wip (try ip.getUnionType(gpa, pt.tid, union_init, true)).wip; + .existing => |ty| { + const new_ty = try pt.ensureTypeUpToDate(ty, false); + + // Make sure we update the namespace if the declaration is re-analyzed, to pick + // up on e.g. changed comptime decls. + try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod)); + + try sema.declareDependency(.{ .interned = new_ty }); + try sema.addTypeReferenceEntry(src, new_ty); + return Air.internedToRef(new_ty); }, .wip => |wip| wip, }); @@ -3335,6 +3189,7 @@ fn zirUnionDecl( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); errdefer pt.destroyNamespace(new_namespace_index); @@ -3344,7 +3199,7 @@ fn zirUnionDecl( try mod.intern_pool.addDependency( gpa, AnalUnit.wrap(.{ .cau = new_cau_index }), - .{ .src_hash = try block.trackZir(inst) }, + .{ .src_hash = tracked_inst }, ); } @@ -3406,8 +3261,12 @@ fn zirOpaqueDecl( }; // No `wrapWipTy` needed as no std.builtin types are opaque. const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) { - // No `checkOutdatedType` as opaque types are never outdated. .existing => |ty| { + // Make sure we update the namespace if the declaration is re-analyzed, to pick + // up on e.g. changed comptime decls. + try pt.ensureNamespaceUpToDate(Type.fromInterned(ty).getNamespaceIndex(mod)); + + try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); }, @@ -3427,6 +3286,7 @@ fn zirOpaqueDecl( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); errdefer pt.destroyNamespace(new_namespace_index); @@ -6072,6 +5932,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); + try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); } @@ -6821,6 +6682,8 @@ fn lookupInNamespace( const zcu = pt.zcu; const ip = &zcu.intern_pool; + try pt.ensureNamespaceUpToDate(namespace_index); + const namespace = zcu.namespacePtr(namespace_index); const adapter: Zcu.Namespace.NameAdapter = .{ .zcu = zcu }; @@ -14038,6 +13901,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); + try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(operand_src, ty); return Air.internedToRef(ty); } @@ -17703,7 +17567,13 @@ fn zirThis( _ = extended; const pt = sema.pt; const namespace = pt.zcu.namespacePtr(block.namespace); - return Air.internedToRef(namespace.owner_type); + const new_ty = try pt.ensureTypeUpToDate(namespace.owner_type, false); + switch (pt.zcu.intern_pool.indexToKey(new_ty)) { + .struct_type, .union_type, .enum_type => try sema.declareDependency(.{ .interned = new_ty }), + .opaque_type => {}, + else => unreachable, + } + return Air.internedToRef(new_ty); } fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -19005,6 +18875,7 @@ fn typeInfoNamespaceDecls( const ip = &zcu.intern_pool; const namespace_index = opt_namespace_index.unwrap() orelse return; + try pt.ensureNamespaceUpToDate(namespace_index); const namespace = zcu.namespacePtr(namespace_index); const gop = try seen_namespaces.getOrPut(namespace); @@ -21871,6 +21742,7 @@ fn zirReify( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); try sema.addTypeReferenceEntry(src, wip_ty.index); @@ -22080,6 +21952,7 @@ fn reifyEnum( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); @@ -22384,6 +22257,7 @@ fn reifyUnion( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); @@ -22667,6 +22541,7 @@ fn reifyStruct( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); @@ -35373,7 +35248,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { if (struct_type.haveLayout(ip)) return; - try ty.resolveFields(pt); + try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); if (struct_type.layout == .@"packed") { semaBackingIntType(pt, struct_type) catch |err| switch (err) { @@ -38499,6 +38374,187 @@ fn getOwnerFuncDeclInst(sema: *Sema) InternPool.TrackedInst.Index { return ip.getCau(cau).zir_index; } +/// Called as soon as a `declared` enum type is created. +/// Resolves the tag type and field inits. +/// Marks the `src_inst` dependency on the enum's declaration, so call sites need not do this. +pub fn resolveDeclaredEnum( + pt: Zcu.PerThread, + wip_ty: InternPool.WipEnumType, + inst: Zir.Inst.Index, + tracked_inst: InternPool.TrackedInst.Index, + namespace: InternPool.NamespaceIndex, + type_name: InternPool.NullTerminatedString, + enum_cau: InternPool.Cau.Index, + small: Zir.Inst.EnumDecl.Small, + body: []const Zir.Inst.Index, + tag_type_ref: Zir.Inst.Ref, + any_values: bool, + fields_len: u32, + zir: Zir, + body_end: usize, +) Zcu.CompileError!void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable; + + const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; + const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } }; + + const anal_unit = AnalUnit.wrap(.{ .cau = enum_cau }); + + var arena = std.heap.ArenaAllocator.init(gpa); + defer arena.deinit(); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + var sema: Sema = .{ + .pt = pt, + .gpa = gpa, + .arena = arena.allocator(), + .code = zir, + .owner = anal_unit, + .func_index = .none, + .func_is_naked = false, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .comptime_err_ret_trace = &comptime_err_ret_trace, + }; + defer sema.deinit(); + + try sema.declareDependency(.{ .src_hash = tracked_inst }); + + var block: Block = .{ + .parent = null, + .sema = &sema, + .namespace = namespace, + .instructions = .{}, + .inlining = null, + .is_comptime = true, + .src_base_inst = tracked_inst, + .type_name_ctx = type_name, + }; + defer block.instructions.deinit(gpa); + + const int_tag_ty = ty: { + if (body.len != 0) { + _ = try sema.analyzeInlineBody(&block, body, inst); + } + + if (tag_type_ref != .none) { + const ty = try sema.resolveType(&block, tag_ty_src, tag_type_ref); + if (ty.zigTypeTag(zcu) != .Int and ty.zigTypeTag(zcu) != .ComptimeInt) { + return sema.fail(&block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)}); + } + break :ty ty; + } else if (fields_len == 0) { + break :ty try pt.intType(.unsigned, 0); + } else { + const bits = std.math.log2_int_ceil(usize, fields_len); + break :ty try pt.intType(.unsigned, bits); + } + }; + + wip_ty.setTagTy(ip, int_tag_ty.toIntern()); + + if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) { + return sema.fail(&block, src, "non-exhaustive enum specifies every value", .{}); + } + } + + var extra_index = body_end + bit_bags_count; + var bit_bag_index: usize = body_end; + var cur_bit_bag: u32 = undefined; + var last_tag_val: ?Value = null; + for (0..fields_len) |field_i_usize| { + const field_i: u32 = @intCast(field_i_usize); + if (field_i % 32 == 0) { + cur_bit_bag = zir.extra[bit_bag_index]; + bit_bag_index += 1; + } + const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; + cur_bit_bag >>= 1; + + const field_name_index: Zir.NullTerminatedString = @enumFromInt(zir.extra[extra_index]); + const field_name_zir = zir.nullTerminatedString(field_name_index); + extra_index += 2; // field name, doc comment + + const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); + + const value_src: LazySrcLoc = .{ + .base_node_inst = tracked_inst, + .offset = .{ .container_field_value = field_i }, + }; + + const tag_overflow = if (has_tag_value) overflow: { + const tag_val_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); + extra_index += 1; + const tag_inst = try sema.resolveInst(tag_val_ref); + last_tag_val = try sema.resolveConstDefinedValue(&block, .{ + .base_node_inst = tracked_inst, + .offset = .{ .container_field_name = field_i }, + }, tag_inst, .{ + .needed_comptime_reason = "enum tag value must be comptime-known", + }); + if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; + last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); + if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| { + assert(conflict.kind == .value); // AstGen validated names are unique + const other_field_src: LazySrcLoc = .{ + .base_node_inst = tracked_inst, + .offset = .{ .container_field_value = conflict.prev_field_idx }, + }; + const msg = msg: { + const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)}); + errdefer msg.destroy(gpa); + try sema.errNote(other_field_src, msg, "other occurrence here", .{}); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(&block, msg); + } + break :overflow false; + } else if (any_values) overflow: { + var overflow: ?usize = null; + last_tag_val = if (last_tag_val) |val| + try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow) + else + try pt.intValue(int_tag_ty, 0); + if (overflow != null) break :overflow true; + if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| { + assert(conflict.kind == .value); // AstGen validated names are unique + const other_field_src: LazySrcLoc = .{ + .base_node_inst = tracked_inst, + .offset = .{ .container_field_value = conflict.prev_field_idx }, + }; + const msg = msg: { + const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)}); + errdefer msg.destroy(gpa); + try sema.errNote(other_field_src, msg, "other occurrence here", .{}); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(&block, msg); + } + break :overflow false; + } else overflow: { + assert(wip_ty.nextField(ip, field_name, .none) == null); + last_tag_val = try pt.intValue(Type.comptime_int, field_i); + if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; + last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); + break :overflow false; + }; + + if (tag_overflow) { + const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{ + last_tag_val.?.fmtValueSema(pt, &sema), int_tag_ty.fmt(pt), + }); + return sema.failWithOwnedErrorMsg(&block, msg); + } + } +} + pub const bitCastVal = @import("Sema/bitcast.zig").bitCast; pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice; diff --git a/src/Zcu.zig b/src/Zcu.zig index 8626a147b6..63feb2d00c 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -215,6 +215,8 @@ panic_messages: [PanicId.len]InternPool.Nav.Index.Optional = .{.none} ** PanicId panic_func_index: InternPool.Index = .none, null_stack_trace: InternPool.Index = .none, +generation: u32 = 0, + pub const PerThread = @import("Zcu/PerThread.zig"); pub const PanicId = enum { @@ -332,6 +334,7 @@ pub const TypeReference = struct { pub const Namespace = struct { parent: OptionalIndex, file_scope: File.Index, + generation: u32, /// Will be a struct, enum, union, or opaque. owner_type: InternPool.Index, /// Members of the namespace which are marked `pub`. @@ -2295,7 +2298,7 @@ pub fn markDependeeOutdated( marked_po: enum { not_marked_po, marked_po }, dependee: InternPool.Dependee, ) !void { - log.debug("outdated dependee: {}", .{fmtDependee(dependee, zcu)}); + log.debug("outdated dependee: {}", .{zcu.fmtDependee(dependee)}); var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { if (zcu.outdated.getPtr(depender)) |po_dep_count| { @@ -2303,9 +2306,9 @@ pub fn markDependeeOutdated( .not_marked_po => {}, .marked_po => { po_dep_count.* -= 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* }); + log.debug("outdated {} => already outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* }); if (po_dep_count.* == 0) { - log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } }, @@ -2316,20 +2319,19 @@ pub fn markDependeeOutdated( const new_po_dep_count = switch (marked_po) { .not_marked_po => if (opt_po_entry) |e| e.value else 0, .marked_po => if (opt_po_entry) |e| e.value - 1 else { - // This dependency has been registered during in-progress analysis, but the unit is - // not in `potentially_outdated` because analysis is in-progress. Nothing to do. + // This `AnalUnit` has already been re-analyzed this update, and registered a dependency + // on this thing, but already has sufficiently up-to-date information. Nothing to do. continue; }, }; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), new_po_dep_count }); try zcu.outdated.putNoClobber( zcu.gpa, depender, new_po_dep_count, ); - log.debug("outdated: {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("outdated {} => new outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count }); if (new_po_dep_count == 0) { - log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } // If this is a Decl and was not previously PO, we must recursively @@ -2342,16 +2344,16 @@ pub fn markDependeeOutdated( } pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { - log.debug("up-to-date dependee: {}", .{fmtDependee(dependee, zcu)}); + log.debug("up-to-date dependee: {}", .{zcu.fmtDependee(dependee)}); var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { if (zcu.outdated.getPtr(depender)) |po_dep_count| { // This depender is already outdated, but it now has one // less PO dependency! po_dep_count.* -= 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* }); + log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* }); if (po_dep_count.* == 0) { - log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } continue; @@ -2365,11 +2367,11 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { }; if (ptr.* > 1) { ptr.* -= 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), ptr.* }); + log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* }); continue; } - log.debug("up-to-date (po deps = 0): {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("up-to-date {} => {} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) }); // This dependency is no longer PO, i.e. is known to be up-to-date. assert(zcu.potentially_outdated.swapRemove(depender)); @@ -2398,7 +2400,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni }, .func => |func_index| .{ .interned = func_index }, // IES }; - log.debug("marking dependee po: {}", .{fmtDependee(dependee, zcu)}); + log.debug("potentially outdated dependee: {}", .{zcu.fmtDependee(dependee)}); var it = ip.dependencyIterator(dependee); while (it.next()) |po| { if (zcu.outdated.getPtr(po)) |po_dep_count| { @@ -2408,17 +2410,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni _ = zcu.outdated_ready.swapRemove(po); } po_dep_count.* += 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), po_dep_count.* }); + log.debug("po {} => {} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* }); continue; } if (zcu.potentially_outdated.getPtr(po)) |n| { // There is now one more PO dependency. n.* += 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), n.* }); + log.debug("po {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* }); continue; } try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1); - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), 1 }); + log.debug("po {} => {} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) }); // This AnalUnit was not already PO, so we must recursively mark its dependers as also PO. try zcu.markTransitiveDependersPotentiallyOutdated(po); } @@ -2443,7 +2445,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { if (zcu.outdated_ready.count() > 0) { const unit = zcu.outdated_ready.keys()[0]; - log.debug("findOutdatedToAnalyze: trivial {}", .{fmtAnalUnit(unit, zcu)}); + log.debug("findOutdatedToAnalyze: trivial {}", .{zcu.fmtAnalUnit(unit)}); return unit; } @@ -2498,10 +2500,15 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { const nav = zcu.funcInfo(func).owner_nav; std.io.getStdErr().writer().print("outdated: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {}; } + for (zcu.potentially_outdated.keys(), zcu.potentially_outdated.values()) |o, opod| { + const func = o.unwrap().func; + const nav = zcu.funcInfo(func).owner_nav; + std.io.getStdErr().writer().print("po: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {}; + } } log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{ - fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? }), zcu), + zcu.fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? })), chosen_cau_dependers, }); @@ -2744,7 +2751,7 @@ pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { const gpa = zcu.gpa; unit_refs: { - const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return; + const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse break :unit_refs; var idx = kv.value; while (idx != std.math.maxInt(u32)) { @@ -2758,7 +2765,7 @@ pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { } type_refs: { - const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse return; + const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse break :type_refs; var idx = kv.value; while (idx != std.math.maxInt(u32)) { @@ -3280,7 +3287,7 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const unit = kv.key; try result.putNoClobber(gpa, unit, kv.value); - log.debug("handle unit '{}'", .{fmtAnalUnit(unit, zcu)}); + log.debug("handle unit '{}'", .{zcu.fmtAnalUnit(unit)}); if (zcu.reference_table.get(unit)) |first_ref_idx| { assert(first_ref_idx != std.math.maxInt(u32)); @@ -3289,8 +3296,8 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const ref = zcu.all_references.items[ref_idx]; if (!result.contains(ref.referenced)) { log.debug("unit '{}': ref unit '{}'", .{ - fmtAnalUnit(unit, zcu), - fmtAnalUnit(ref.referenced, zcu), + zcu.fmtAnalUnit(unit), + zcu.fmtAnalUnit(ref.referenced), }); try unit_queue.put(gpa, ref.referenced, .{ .referencer = unit, @@ -3307,7 +3314,7 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const ref = zcu.all_type_references.items[ref_idx]; if (!checked_types.contains(ref.referenced)) { log.debug("unit '{}': ref type '{}'", .{ - fmtAnalUnit(unit, zcu), + zcu.fmtAnalUnit(unit), Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip), }); try type_queue.put(gpa, ref.referenced, .{ @@ -3389,10 +3396,10 @@ pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File { return zcu.fileByIndex(file_index); } -fn fmtAnalUnit(unit: AnalUnit, zcu: *Zcu) std.fmt.Formatter(formatAnalUnit) { +pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Formatter(formatAnalUnit) { return .{ .data = .{ .unit = unit, .zcu = zcu } }; } -fn fmtDependee(d: InternPool.Dependee, zcu: *Zcu) std.fmt.Formatter(formatDependee) { +pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Formatter(formatDependee) { return .{ .data = .{ .dependee = d, .zcu = zcu } }; } diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 83a7dce4fc..b2f6d600e6 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -485,10 +485,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const file_root_type = pt.zcu.fileRootType(file_index); if (file_root_type != .none) { - // The namespace is already up-to-date thanks to the `updateFileNamespace` calls at the - // start of this update. We just have to check whether the type itself is okay! - const file_root_type_cau = pt.zcu.intern_pool.loadStructType(file_root_type).cau.unwrap().?; - return pt.ensureCauAnalyzed(file_root_type_cau); + _ = try pt.ensureTypeUpToDate(file_root_type, false); } else { return pt.semaFile(file_index); } @@ -505,10 +502,10 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); + const anal_unit = AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); - //log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); + log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); assert(!zcu.analysis_in_progress.contains(anal_unit)); @@ -552,10 +549,12 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu // Since it does not, this must be a transitive failure. try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); } - // We treat errors as up-to-date, since those uses would just trigger a transitive error + // We treat errors as up-to-date, since those uses would just trigger a transitive error. + // The exception is types, since type declarations may require re-analysis if the type, e.g. its captures, changed. + const outdated = cau.owner.unwrap() == .type; break :res .{ .{ - .invalidate_decl_val = false, - .invalidate_decl_ref = false, + .invalidate_decl_val = outdated, + .invalidate_decl_ref = outdated, }, true }; }, error.OutOfMemory => res: { @@ -610,7 +609,7 @@ fn ensureCauAnalyzedInner( const ip = &zcu.intern_pool; const cau = ip.getCau(cau_index); - const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); + const anal_unit = AnalUnit.wrap(.{ .cau = cau_index }); const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; @@ -626,7 +625,6 @@ fn ensureCauAnalyzedInner( // * so, it uses the same `struct` // * but this doesn't stop it from updating the namespace! // * we basically do `scanDecls`, updating the namespace as needed - // * TODO: optimize this to make sure we only do it once a generation i guess? // * so everyone lived happily ever after if (zcu.fileByIndex(inst_info.file).status != .success_zir) { @@ -646,17 +644,6 @@ fn ensureCauAnalyzedInner( _ = zcu.transitive_failed_analysis.swapRemove(anal_unit); } - if (inst_info.inst == .main_struct_inst) { - // Note that this is definitely a *recreation* due to outdated, because - // this instruction indicates that `cau.owner` is a `type`, which only - // reaches here if `cau_outdated`. - try pt.recreateFileRoot(inst_info.file); - return .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; - } - const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) { .nav => |nav| ip.getNav(nav).fqn.toSlice(ip), .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), @@ -685,9 +672,9 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter const func = zcu.funcInfo(maybe_coerced_func_index); - //log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)}); + log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)}); - const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const anal_unit = AnalUnit.wrap(.{ .func = func_index }); const func_outdated = zcu.outdated.swapRemove(anal_unit) or zcu.potentially_outdated.swapRemove(anal_unit); @@ -742,7 +729,7 @@ fn ensureFuncBodyAnalyzedInner( const ip = &zcu.intern_pool; const func = zcu.funcInfo(func_index); - const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const anal_unit = AnalUnit.wrap(.{ .func = func_index }); // Here's an interesting question: is this function actually valid? // Maybe the signature changed, so we'll end up creating a whole different `func` @@ -766,7 +753,7 @@ fn ensureFuncBodyAnalyzedInner( if (func_outdated) { try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index }); // IES } - ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index })); ip.remove(pt.tid, func_index); @panic("TODO: remove orphaned function from binary"); } @@ -901,7 +888,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai "unable to codegen: {s}", .{@errorName(err)}, )); - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { @@ -982,7 +969,7 @@ fn createFileRootStruct( if (zcu.comp.incremental) { try ip.addDependency( gpa, - InternPool.AnalUnit.wrap(.{ .cau = new_cau_index }), + AnalUnit.wrap(.{ .cau = new_cau_index }), .{ .src_hash = tracked_inst }, ); } @@ -998,35 +985,6 @@ fn createFileRootStruct( return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); } -/// Recreate the root type of a file after it becomes outdated. A new struct type -/// is constructed at a new InternPool index, reusing the namespace for efficiency. -fn recreateFileRoot(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const file = zcu.fileByIndex(file_index); - const file_root_type = zcu.fileRootType(file_index); - const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu); - - assert(file_root_type != .none); - - log.debug("recreateFileRoot mod={s} sub_file_path={s}", .{ - file.mod.fully_qualified_name, - file.sub_file_path, - }); - - if (file.status != .success_zir) { - return error.AnalysisFail; - } - - // Invalidate the existing type, reusing its namespace. - const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?; - ip.removeDependenciesForDepender( - zcu.gpa, - InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), - ); - _ = try pt.createFileRootStruct(file_index, namespace_index, true); -} - /// Re-scan the namespace of a file's root struct type on an incremental update. /// The file must have successfully populated ZIR. /// If the file's root struct type is not populated (the file is unreferenced), nothing is done. @@ -1060,6 +1018,7 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator. break :decls file.zir.bodySlice(extra_index, decls_len); }; try pt.scanNamespace(namespace_index, decls); + zcu.namespacePtr(namespace_index).generation = zcu.generation; } fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { @@ -1080,6 +1039,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { .parent = .none, .owner_type = undefined, // set in `createFileRootStruct` .file_scope = file_index, + .generation = zcu.generation, }); const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index, false); errdefer zcu.intern_pool.remove(pt.tid, struct_ty); @@ -1131,7 +1091,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); + const anal_unit = AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; @@ -1151,10 +1111,12 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { // This declaration has no value so is definitely not a std.builtin type. break :ip_index .none; }, - .type => { + .type => |ty| { // This is an incremental update, and this type is being re-analyzed because it is outdated. - // The type must be recreated at a new `InternPool.Index`. - // Mark it outdated so that creation sites are re-analyzed. + // Create a new type in its place, and mark the old one as outdated so that use sites will + // be re-analyzed and discover an up-to-date type. + const new_ty = try pt.ensureTypeUpToDate(ty, true); + assert(new_ty != ty); return .{ .invalidate_decl_val = true, .invalidate_decl_ref = true, @@ -2002,21 +1964,23 @@ const ScanDeclIter = struct { try namespace.other_decls.append(gpa, cau); - // For a `comptime` declaration, whether to re-analyze is based solely on whether the - // `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already. - const unit = InternPool.AnalUnit.wrap(.{ .cau = cau }); - if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| { - try zcu.outdated.ensureUnusedCapacity(gpa, 1); - try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); - zcu.outdated.putAssumeCapacityNoClobber(unit, kv.value); - if (kv.value == 0) { // no PO deps + if (existing_cau == null) { + // For a `comptime` declaration, whether to analyze is based solely on whether the + // `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already. + const unit = AnalUnit.wrap(.{ .cau = cau }); + if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| { + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + zcu.outdated.putAssumeCapacityNoClobber(unit, kv.value); + if (kv.value == 0) { // no PO deps + zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {}); + } + } else if (!zcu.outdated.contains(unit)) { + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + zcu.outdated.putAssumeCapacityNoClobber(unit, 0); zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {}); } - } else if (!zcu.outdated.contains(unit)) { - try zcu.outdated.ensureUnusedCapacity(gpa, 1); - try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); - zcu.outdated.putAssumeCapacityNoClobber(unit, 0); - zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {}); } break :cau .{ cau, true }; @@ -2027,9 +1991,6 @@ const ScanDeclIter = struct { const cau, const nav = if (existing_cau) |cau_index| cau_nav: { const nav_index = ip.getCau(cau_index).owner.unwrap().nav; const nav = ip.getNav(nav_index); - if (nav.name != name) { - std.debug.panic("'{}' vs '{}'", .{ nav.name.fmt(ip), name.fmt(ip) }); - } assert(nav.name == name); assert(nav.fqn == fqn); break :cau_nav .{ cau_index, nav_index }; @@ -2078,7 +2039,7 @@ const ScanDeclIter = struct { }, }; - if (want_analysis or declaration.flags.is_export) { + if (existing_cau == null and (want_analysis or declaration.flags.is_export)) { log.debug( "scanDecl queue analyze_cau file='{s}' cau_index={d}", .{ namespace.fileScope(zcu).sub_file_path, cau }, @@ -2098,7 +2059,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError! const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const anal_unit = AnalUnit.wrap(.{ .func = func_index }); const func = zcu.funcInfo(func_index); const inst_info = func.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_info.file); @@ -2484,7 +2445,7 @@ fn processExportsInner( const nav = ip.getNav(nav_index); if (zcu.failed_codegen.contains(nav_index)) break :failed true; if (nav.analysis_owner.unwrap()) |cau| { - const cau_unit = InternPool.AnalUnit.wrap(.{ .cau = cau }); + const cau_unit = AnalUnit.wrap(.{ .cau = cau }); if (zcu.failed_analysis.contains(cau_unit)) break :failed true; if (zcu.transitive_failed_analysis.contains(cau_unit)) break :failed true; } @@ -2494,7 +2455,7 @@ fn processExportsInner( }; // If the value is a function, we also need to check if that function succeeded analysis. if (val.typeOf(zcu).zigTypeTag(zcu) == .Fn) { - const func_unit = InternPool.AnalUnit.wrap(.{ .func = val.toIntern() }); + const func_unit = AnalUnit.wrap(.{ .func = val.toIntern() }); if (zcu.failed_analysis.contains(func_unit)) break :failed true; if (zcu.transitive_failed_analysis.contains(func_unit)) break :failed true; } @@ -2669,7 +2630,7 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void .{@errorName(err)}, )); if (nav.analysis_owner.unwrap()) |cau| { - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .cau = cau })); + try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .cau = cau })); } else { // TODO: we don't have a way to indicate that this failure is retryable! // Since these are really rare, we could as a cop-out retry the whole build next update. @@ -2782,7 +2743,7 @@ pub fn reportRetryableFileError( gop.value_ptr.* = err_msg; } -/// Shortcut for calling `intern_pool.get`. +///Shortcut for calling `intern_pool.get`. pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index { return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key); } @@ -3367,6 +3328,532 @@ pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPo return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(pt); } +/// Given a container type requiring resolution, ensures that it is up-to-date. +/// If not, the type is recreated at a new `InternPool.Index`. +/// The new index is returned. This is the same as the old index if the fields were up-to-date. +/// If `already_updating` is set, assumes the type is already outdated and undergoing re-analysis rather than checking `zcu.outdated`. +pub fn ensureTypeUpToDate(pt: Zcu.PerThread, ty: InternPool.Index, already_updating: bool) Zcu.SemaError!InternPool.Index { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + switch (ip.indexToKey(ty)) { + .struct_type => |key| { + const struct_obj = ip.loadStructType(ty); + const outdated = already_updating or o: { + const anal_unit = AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? }); + const o = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); + if (o) { + _ = zcu.outdated_ready.swapRemove(anal_unit); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); + } + break :o o; + }; + if (!outdated) return ty; + return pt.recreateStructType(ty, key, struct_obj); + }, + .union_type => |key| { + const union_obj = ip.loadUnionType(ty); + const outdated = already_updating or o: { + const anal_unit = AnalUnit.wrap(.{ .cau = union_obj.cau }); + const o = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); + if (o) { + _ = zcu.outdated_ready.swapRemove(anal_unit); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); + } + break :o o; + }; + if (!outdated) return ty; + return pt.recreateUnionType(ty, key, union_obj); + }, + .enum_type => |key| { + const enum_obj = ip.loadEnumType(ty); + const outdated = already_updating or o: { + const anal_unit = AnalUnit.wrap(.{ .cau = enum_obj.cau.unwrap().? }); + const o = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); + if (o) { + _ = zcu.outdated_ready.swapRemove(anal_unit); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); + } + break :o o; + }; + if (!outdated) return ty; + return pt.recreateEnumType(ty, key, enum_obj); + }, + .opaque_type => { + assert(!already_updating); + return ty; + }, + else => unreachable, + } +} + +fn recreateStructType( + pt: Zcu.PerThread, + ty: InternPool.Index, + full_key: InternPool.Key.NamespaceType, + struct_obj: InternPool.LoadedStructType, +) Zcu.SemaError!InternPool.Index { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const key = switch (full_key) { + .reified => unreachable, // never outdated + .empty_struct => unreachable, // never outdated + .generated_tag => unreachable, // not a struct + .declared => |d| d, + }; + + if (@intFromEnum(ty) <= InternPool.static_len) { + @panic("TODO: recreate resolved builtin type"); + } + + const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + const file = zcu.fileByIndex(inst_info.file); + if (file.status != .success_zir) return error.AnalysisFail; + const zir = file.zir; + + assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); + const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; + assert(extended.opcode == .struct_decl); + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand); + var extra_index = extra.end; + + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + const fields_len = if (small.has_fields_len) blk: { + const fields_len = zir.extra[extra_index]; + extra_index += 1; + break :blk fields_len; + } else 0; + + if (captures_len != key.captures.owned.len) return error.AnalysisFail; + if (fields_len != struct_obj.field_types.len) return error.AnalysisFail; + + // The old type will be unused, so drop its dependency information. + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? })); + + const namespace_index = struct_obj.namespace.unwrap().?; + + const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{ + .layout = small.layout, + .fields_len = fields_len, + .known_non_opv = small.known_non_opv, + .requires_comptime = if (small.known_comptime_only) .yes else .unknown, + .is_tuple = small.is_tuple, + .any_comptime_fields = small.any_comptime_fields, + .any_default_inits = small.any_default_inits, + .inits_resolved = false, + .any_aligned_fields = small.any_aligned_fields, + .key = .{ .declared_owned_captures = .{ + .zir_index = key.zir_index, + .captures = key.captures.owned, + } }, + }, true)) { + .wip => |wip| wip, + .existing => unreachable, // we passed `replace_existing` + }; + errdefer wip_ty.cancel(ip, pt.tid); + + wip_ty.setName(ip, struct_obj.name); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index); + try ip.addDependency( + gpa, + AnalUnit.wrap(.{ .cau = new_cau_index }), + .{ .src_hash = key.zir_index }, + ); + zcu.namespacePtr(namespace_index).owner_type = wip_ty.index; + // No need to re-scan the namespace -- `zirStructDecl` will ultimately do that if the type is still alive. + try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); + + const new_ty = wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); + if (inst_info.inst == .main_struct_inst) { + // This is the root type of a file! Update the reference. + zcu.setFileRootType(inst_info.file, new_ty); + } + return new_ty; +} + +fn recreateUnionType( + pt: Zcu.PerThread, + ty: InternPool.Index, + full_key: InternPool.Key.NamespaceType, + union_obj: InternPool.LoadedUnionType, +) Zcu.SemaError!InternPool.Index { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const key = switch (full_key) { + .reified => unreachable, // never outdated + .empty_struct => unreachable, // never outdated + .generated_tag => unreachable, // not a union + .declared => |d| d, + }; + + if (@intFromEnum(ty) <= InternPool.static_len) { + @panic("TODO: recreate resolved builtin type"); + } + + const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + const file = zcu.fileByIndex(inst_info.file); + if (file.status != .success_zir) return error.AnalysisFail; + const zir = file.zir; + + assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); + const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; + assert(extended.opcode == .union_decl); + const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand); + var extra_index = extra.end; + + extra_index += @intFromBool(small.has_tag_type); + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + extra_index += @intFromBool(small.has_body_len); + const fields_len = if (small.has_fields_len) blk: { + const fields_len = zir.extra[extra_index]; + extra_index += 1; + break :blk fields_len; + } else 0; + + if (captures_len != key.captures.owned.len) return error.AnalysisFail; + if (fields_len != union_obj.field_types.len) return error.AnalysisFail; + + // The old type will be unused, so drop its dependency information. + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = union_obj.cau })); + + const namespace_index = union_obj.namespace; + + const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{ + .flags = .{ + .layout = small.layout, + .status = .none, + .runtime_tag = if (small.has_tag_type or small.auto_enum_tag) + .tagged + else if (small.layout != .auto) + .none + else switch (true) { // TODO + true => .safety, + false => .none, + }, + .any_aligned_fields = small.any_aligned_fields, + .requires_comptime = .unknown, + .assumed_runtime_bits = false, + .assumed_pointer_aligned = false, + .alignment = .none, + }, + .fields_len = fields_len, + .enum_tag_ty = .none, // set later + .field_types = &.{}, // set later + .field_aligns = &.{}, // set later + .key = .{ .declared_owned_captures = .{ + .zir_index = key.zir_index, + .captures = key.captures.owned, + } }, + }, true)) { + .wip => |wip| wip, + .existing => unreachable, // we passed `replace_existing` + }; + errdefer wip_ty.cancel(ip, pt.tid); + + wip_ty.setName(ip, union_obj.name); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index); + try ip.addDependency( + gpa, + AnalUnit.wrap(.{ .cau = new_cau_index }), + .{ .src_hash = key.zir_index }, + ); + zcu.namespacePtr(namespace_index).owner_type = wip_ty.index; + // No need to re-scan the namespace -- `zirUnionDecl` will ultimately do that if the type is still alive. + try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); + return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); +} + +fn recreateEnumType( + pt: Zcu.PerThread, + ty: InternPool.Index, + full_key: InternPool.Key.NamespaceType, + enum_obj: InternPool.LoadedEnumType, +) Zcu.SemaError!InternPool.Index { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const key = switch (full_key) { + .reified => unreachable, // never outdated + .empty_struct => unreachable, // never outdated + .generated_tag => unreachable, // never outdated + .declared => |d| d, + }; + + if (@intFromEnum(ty) <= InternPool.static_len) { + @panic("TODO: recreate resolved builtin type"); + } + + const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + const file = zcu.fileByIndex(inst_info.file); + if (file.status != .success_zir) return error.AnalysisFail; + const zir = file.zir; + + assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); + const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; + assert(extended.opcode == .enum_decl); + const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand); + var extra_index = extra.end; + + const tag_type_ref = if (small.has_tag_type) blk: { + const tag_type_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); + extra_index += 1; + break :blk tag_type_ref; + } else .none; + + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + + const body_len = if (small.has_body_len) blk: { + const body_len = zir.extra[extra_index]; + extra_index += 1; + break :blk body_len; + } else 0; + + const fields_len = if (small.has_fields_len) blk: { + const fields_len = zir.extra[extra_index]; + extra_index += 1; + break :blk fields_len; + } else 0; + + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + + if (captures_len != key.captures.owned.len) return error.AnalysisFail; + if (fields_len != enum_obj.names.len) return error.AnalysisFail; + + extra_index += captures_len; + extra_index += decls_len; + + const body = zir.bodySlice(extra_index, body_len); + extra_index += body.len; + + const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable; + const body_end = extra_index; + extra_index += bit_bags_count; + + const any_values = for (zir.extra[body_end..][0..bit_bags_count]) |bag| { + if (bag != 0) break true; + } else false; + + // The old type will be unused, so drop its dependency information. + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = enum_obj.cau.unwrap().? })); + + const namespace_index = enum_obj.namespace; + + const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{ + .has_values = any_values, + .tag_mode = if (small.nonexhaustive) + .nonexhaustive + else if (tag_type_ref == .none) + .auto + else + .explicit, + .fields_len = fields_len, + .key = .{ .declared_owned_captures = .{ + .zir_index = key.zir_index, + .captures = key.captures.owned, + } }, + }, true)) { + .wip => |wip| wip, + .existing => unreachable, // we passed `replace_existing` + }; + var done = true; + errdefer if (!done) wip_ty.cancel(ip, pt.tid); + + wip_ty.setName(ip, enum_obj.name); + + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index); + + zcu.namespacePtr(namespace_index).owner_type = wip_ty.index; + // No need to re-scan the namespace -- `zirEnumDecl` will ultimately do that if the type is still alive. + + wip_ty.prepare(ip, new_cau_index, namespace_index); + done = true; + + Sema.resolveDeclaredEnum( + pt, + wip_ty, + inst_info.inst, + key.zir_index, + namespace_index, + enum_obj.name, + new_cau_index, + small, + body, + tag_type_ref, + any_values, + fields_len, + zir, + body_end, + ) catch |err| switch (err) { + error.GenericPoison => unreachable, + error.ComptimeBreak => unreachable, + error.ComptimeReturn => unreachable, + error.AnalysisFail, error.OutOfMemory => |e| return e, + }; + + return wip_ty.index; +} + +/// Given a namespace, re-scan its declarations from the type definition if they have not +/// yet been re-scanned on this update. +/// If the type declaration instruction has been lost, returns `error.AnalysisFail`. +/// This will effectively short-circuit the caller, which will be semantic analysis of a +/// guaranteed-unreferenced `AnalUnit`, to trigger a transitive analysis error. +pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) Zcu.SemaError!void { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const namespace = zcu.namespacePtr(namespace_index); + + if (namespace.generation == zcu.generation) return; + + const Container = enum { @"struct", @"union", @"enum", @"opaque" }; + const container: Container, const full_key = switch (ip.indexToKey(namespace.owner_type)) { + .struct_type => |k| .{ .@"struct", k }, + .union_type => |k| .{ .@"union", k }, + .enum_type => |k| .{ .@"enum", k }, + .opaque_type => |k| .{ .@"opaque", k }, + else => unreachable, // namespaces are owned by a container type + }; + + const key = switch (full_key) { + .reified, .empty_struct, .generated_tag => { + // Namespace always empty, so up-to-date. + namespace.generation = zcu.generation; + return; + }, + .declared => |d| d, + }; + + // Namespace outdated -- re-scan the type if necessary. + + const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + const file = zcu.fileByIndex(inst_info.file); + if (file.status != .success_zir) return error.AnalysisFail; + const zir = file.zir; + + assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); + const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; + + const decls = switch (container) { + .@"struct" => decls: { + assert(extended.opcode == .struct_decl); + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand); + var extra_index = extra.end; + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + if (small.has_backing_int) { + const backing_int_body_len = zir.extra[extra_index]; + extra_index += 1; // backing_int_body_len + if (backing_int_body_len == 0) { + extra_index += 1; // backing_int_ref + } else { + extra_index += backing_int_body_len; // backing_int_body_inst + } + } + break :decls zir.bodySlice(extra_index, decls_len); + }, + .@"union" => decls: { + assert(extended.opcode == .union_decl); + const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand); + var extra_index = extra.end; + extra_index += @intFromBool(small.has_tag_type); + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + extra_index += @intFromBool(small.has_body_len); + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + break :decls zir.bodySlice(extra_index, decls_len); + }, + .@"enum" => decls: { + assert(extended.opcode == .enum_decl); + const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand); + var extra_index = extra.end; + extra_index += @intFromBool(small.has_tag_type); + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + extra_index += @intFromBool(small.has_body_len); + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + break :decls zir.bodySlice(extra_index, decls_len); + }, + .@"opaque" => decls: { + assert(extended.opcode == .opaque_decl); + const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.OpaqueDecl, extended.operand); + var extra_index = extra.end; + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + break :decls zir.bodySlice(extra_index, decls_len); + }, + }; + + try pt.scanNamespace(namespace_index, decls); + namespace.generation = zcu.generation; +} + const Air = @import("../Air.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -3379,6 +3866,7 @@ const builtin = @import("builtin"); const Cache = std.Build.Cache; const dev = @import("../dev.zig"); const InternPool = @import("../InternPool.zig"); +const AnalUnit = InternPool.AnalUnit; const isUpDir = @import("../introspect.zig").isUpDir; const Liveness = @import("../Liveness.zig"); const log = std.log.scoped(.zcu); From 89f02d1c107a159dc6433863c7d9167872b4c0c8 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 16 Aug 2024 17:34:30 +0100 Subject: [PATCH 18/25] std.zig.Zir: fix declaration traversal The old logic here had bitrotted, largely because there were some incorrect `else` cases. This is now implemented correctly for all current ZIR instructions. This prevents instructions being lost in incremental updates, which is important for updates to be minimal. --- lib/std/zig/Zir.zig | 660 ++++++++++++++++++++++++++++++++++++++++---- src/Zcu.zig | 16 +- 2 files changed, 614 insertions(+), 62 deletions(-) diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig index 051a799db6..7349677a22 100644 --- a/lib/std/zig/Zir.zig +++ b/lib/std/zig/Zir.zig @@ -603,7 +603,7 @@ pub const Inst = struct { /// Uses the `un_node` field. typeof, /// Implements `@TypeOf` for one operand. - /// Uses the `pl_node` field. + /// Uses the `pl_node` field. Payload is `Block`. typeof_builtin, /// Given a value, look at the type of it, which must be an integer type. /// Returns the integer type for the RHS of a shift operation. @@ -2727,6 +2727,9 @@ pub const Inst = struct { field_name_start: NullTerminatedString, }; + /// There is a body of instructions at `extra[body_index..][0..body_len]`. + /// Trailing: + /// 0. operand: Ref // for each `operands_len` pub const TypeOfPeer = struct { src_node: i32, body_len: u32, @@ -2844,6 +2847,40 @@ pub const Inst = struct { src_line: u32, }; + /// Trailing: + /// 0. multi_cases_len: u32 // if `has_multi_cases` + /// 1. err_capture_inst: u32 // if `any_uses_err_capture` + /// 2. non_err_body { + /// info: ProngInfo, + /// inst: Index // for every `info.body_len` + /// } + /// 3. else_body { // if `has_else` + /// info: ProngInfo, + /// inst: Index // for every `info.body_len` + /// } + /// 4. scalar_cases: { // for every `scalar_cases_len` + /// item: Ref, + /// info: ProngInfo, + /// inst: Index // for every `info.body_len` + /// } + /// 5. multi_cases: { // for every `multi_cases_len` + /// items_len: u32, + /// ranges_len: u32, + /// info: ProngInfo, + /// item: Ref // for every `items_len` + /// ranges: { // for every `ranges_len` + /// item_first: Ref, + /// item_last: Ref, + /// } + /// inst: Index // for every `info.body_len` + /// } + /// + /// When analyzing a case body, the switch instruction itself refers to the + /// captured error, or to the success value in `non_err_body`. Whether this + /// is captured by reference or by value depends on whether the `byref` bit + /// is set for the corresponding body. `err_capture_inst` refers to the error + /// capture outside of the `switch`, i.e. `err` in + /// `x catch |err| switch (err) { ... }`. pub const SwitchBlockErrUnion = struct { operand: Ref, bits: Bits, @@ -3153,7 +3190,7 @@ pub const Inst = struct { /// 1. captures_len: u32 // if has_captures_len /// 2. body_len: u32, // if has_body_len /// 3. fields_len: u32, // if has_fields_len - /// 4. decls_len: u37, // if has_decls_len + /// 4. decls_len: u32, // if has_decls_len /// 5. capture: Capture // for every captures_len /// 6. decl: Index, // for every decls_len; points to a `declaration` instruction /// 7. inst: Index // for every body_len @@ -3624,33 +3661,492 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { } } -/// The iterator would have to allocate memory anyway to iterate. So here we populate -/// an ArrayList as the result. -pub fn findDecls(zir: Zir, list: *std.ArrayList(Inst.Index), decl_inst: Zir.Inst.Index) !void { +/// Find all type declarations, recursively, within a `declaration` instruction. Does not recurse through +/// said type declarations' declarations; to find all declarations, call this function on the declarations +/// of the discovered types recursively. +/// The iterator would have to allocate memory anyway to iterate, so an `ArrayList` is populated as the result. +pub fn findDecls(zir: Zir, gpa: Allocator, list: *std.ArrayListUnmanaged(Inst.Index), decl_inst: Zir.Inst.Index) !void { list.clearRetainingCapacity(); const declaration, const extra_end = zir.getDeclaration(decl_inst); const bodies = declaration.getBodies(extra_end, zir); - try zir.findDeclsBody(list, bodies.value_body); - if (bodies.align_body) |b| try zir.findDeclsBody(list, b); - if (bodies.linksection_body) |b| try zir.findDeclsBody(list, b); - if (bodies.addrspace_body) |b| try zir.findDeclsBody(list, b); + // `defer` instructions duplicate the same body arbitrarily many times, but we only want to traverse + // their contents once per defer. So, we store the extra index of the body here to deduplicate. + var found_defers: std.AutoHashMapUnmanaged(u32, void) = .{}; + defer found_defers.deinit(gpa); + + try zir.findDeclsBody(gpa, list, &found_defers, bodies.value_body); + if (bodies.align_body) |b| try zir.findDeclsBody(gpa, list, &found_defers, b); + if (bodies.linksection_body) |b| try zir.findDeclsBody(gpa, list, &found_defers, b); + if (bodies.addrspace_body) |b| try zir.findDeclsBody(gpa, list, &found_defers, b); } fn findDeclsInner( zir: Zir, - list: *std.ArrayList(Inst.Index), + gpa: Allocator, + list: *std.ArrayListUnmanaged(Inst.Index), + defers: *std.AutoHashMapUnmanaged(u32, void), inst: Inst.Index, ) Allocator.Error!void { const tags = zir.instructions.items(.tag); const datas = zir.instructions.items(.data); switch (tags[@intFromEnum(inst)]) { + .declaration => unreachable, + + // Boring instruction tags first. These have no body and are not declarations or type declarations. + .add, + .addwrap, + .add_sat, + .add_unsafe, + .sub, + .subwrap, + .sub_sat, + .mul, + .mulwrap, + .mul_sat, + .div_exact, + .div_floor, + .div_trunc, + .mod, + .rem, + .mod_rem, + .shl, + .shl_exact, + .shl_sat, + .shr, + .shr_exact, + .param_anytype, + .param_anytype_comptime, + .array_cat, + .array_mul, + .array_type, + .array_type_sentinel, + .vector_type, + .elem_type, + .indexable_ptr_elem_type, + .vector_elem_type, + .indexable_ptr_len, + .anyframe_type, + .as_node, + .as_shift_operand, + .bit_and, + .bitcast, + .bit_not, + .bit_or, + .bool_not, + .bool_br_and, + .bool_br_or, + .@"break", + .break_inline, + .check_comptime_control_flow, + .builtin_call, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .error_set_decl, + .dbg_stmt, + .dbg_var_ptr, + .dbg_var_val, + .decl_ref, + .decl_val, + .load, + .div, + .elem_ptr_node, + .elem_ptr, + .elem_val_node, + .elem_val, + .elem_val_imm, + .ensure_result_used, + .ensure_result_non_error, + .ensure_err_union_payload_void, + .error_union_type, + .error_value, + .@"export", + .export_value, + .field_ptr, + .field_val, + .field_ptr_named, + .field_val_named, + .import, + .int, + .int_big, + .float, + .float128, + .int_type, + .is_non_null, + .is_non_null_ptr, + .is_non_err, + .is_non_err_ptr, + .ret_is_non_err, + .repeat, + .repeat_inline, + .for_len, + .merge_error_sets, + .ref, + .ret_node, + .ret_load, + .ret_implicit, + .ret_err_value, + .ret_err_value_code, + .ret_ptr, + .ret_type, + .ptr_type, + .slice_start, + .slice_end, + .slice_sentinel, + .slice_length, + .store_node, + .store_to_inferred_ptr, + .str, + .negate, + .negate_wrap, + .typeof, + .typeof_log2_int_type, + .@"unreachable", + .xor, + .optional_type, + .optional_payload_safe, + .optional_payload_unsafe, + .optional_payload_safe_ptr, + .optional_payload_unsafe_ptr, + .err_union_payload_unsafe, + .err_union_payload_unsafe_ptr, + .err_union_code, + .err_union_code_ptr, + .enum_literal, + .validate_deref, + .validate_destructure, + .field_type_ref, + .opt_eu_base_ptr_init, + .coerce_ptr_elem_ty, + .validate_ref_ty, + .struct_init_empty, + .struct_init_empty_result, + .struct_init_empty_ref_result, + .struct_init_anon, + .struct_init, + .struct_init_ref, + .validate_struct_init_ty, + .validate_struct_init_result_ty, + .validate_ptr_struct_init, + .struct_init_field_type, + .struct_init_field_ptr, + .array_init_anon, + .array_init, + .array_init_ref, + .validate_array_init_ty, + .validate_array_init_result_ty, + .validate_array_init_ref_ty, + .validate_ptr_array_init, + .array_init_elem_type, + .array_init_elem_ptr, + .union_init, + .type_info, + .size_of, + .bit_size_of, + .int_from_ptr, + .compile_error, + .set_eval_branch_quota, + .int_from_enum, + .align_of, + .int_from_bool, + .embed_file, + .error_name, + .panic, + .trap, + .set_runtime_safety, + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .abs, + .floor, + .ceil, + .trunc, + .round, + .tag_name, + .type_name, + .frame_type, + .frame_size, + .int_from_float, + .float_from_int, + .ptr_from_int, + .enum_from_int, + .float_cast, + .int_cast, + .ptr_cast, + .truncate, + .has_decl, + .has_field, + .clz, + .ctz, + .pop_count, + .byte_swap, + .bit_reverse, + .bit_offset_of, + .offset_of, + .splat, + .reduce, + .shuffle, + .atomic_load, + .atomic_rmw, + .atomic_store, + .mul_add, + .memcpy, + .memset, + .min, + .max, + .alloc, + .alloc_mut, + .alloc_comptime_mut, + .alloc_inferred, + .alloc_inferred_mut, + .alloc_inferred_comptime, + .alloc_inferred_comptime_mut, + .resolve_inferred_alloc, + .make_ptr_const, + .@"resume", + .@"await", + .save_err_ret_index, + .restore_err_ret_index_unconditional, + .restore_err_ret_index_fn_entry, + => return, + + .extended => { + const extended = datas[@intFromEnum(inst)].extended; + switch (extended.opcode) { + .value_placeholder => unreachable, + + // Once again, we start with the boring tags. + .variable, + .this, + .ret_addr, + .builtin_src, + .error_return_trace, + .frame, + .frame_address, + .alloc, + .builtin_extern, + .@"asm", + .asm_expr, + .compile_log, + .min_multi, + .max_multi, + .add_with_overflow, + .sub_with_overflow, + .mul_with_overflow, + .shl_with_overflow, + .c_undef, + .c_include, + .c_define, + .wasm_memory_size, + .wasm_memory_grow, + .prefetch, + .fence, + .set_float_mode, + .set_align_stack, + .set_cold, + .error_cast, + .await_nosuspend, + .breakpoint, + .disable_instrumentation, + .select, + .int_from_error, + .error_from_int, + .builtin_async_call, + .cmpxchg, + .c_va_arg, + .c_va_copy, + .c_va_end, + .c_va_start, + .ptr_cast_full, + .ptr_cast_no_dest, + .work_item_id, + .work_group_size, + .work_group_id, + .in_comptime, + .restore_err_ret_index, + .closure_get, + .field_parent_ptr, + => return, + + // `@TypeOf` has a body. + .typeof_peer => { + const extra = zir.extraData(Zir.Inst.TypeOfPeer, extended.operand); + const body = zir.bodySlice(extra.data.body_index, extra.data.body_len); + try zir.findDeclsBody(gpa, list, defers, body); + }, + + // Reifications and opaque declarations need tracking, but have no body. + .reify, .opaque_decl => return list.append(gpa, inst), + + // Struct declarations need tracking and have bodies. + .struct_decl => { + try list.append(gpa, inst); + + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand); + var extra_index = extra.end; + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + const fields_len = if (small.has_fields_len) blk: { + const fields_len = zir.extra[extra_index]; + extra_index += 1; + break :blk fields_len; + } else 0; + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + if (small.has_backing_int) { + const backing_int_body_len = zir.extra[extra_index]; + extra_index += 1; + if (backing_int_body_len == 0) { + extra_index += 1; // backing_int_ref + } else { + const body = zir.bodySlice(extra_index, backing_int_body_len); + extra_index += backing_int_body_len; + try zir.findDeclsBody(gpa, list, defers, body); + } + } + extra_index += decls_len; + + // This ZIR is structured in a slightly awkward way, so we have to split up the iteration. + // `extra_index` iterates `flags` (bags of bits). + // `fields_extra_index` iterates `fields`. + // We accumulate the total length of bodies into `total_bodies_len`. This is sufficient because + // the bodies are packed together in `extra` and we only need to traverse their instructions (we + // don't really care about the structure). + + const bits_per_field = 4; + const fields_per_u32 = 32 / bits_per_field; + const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; + var cur_bit_bag: u32 = undefined; + + var fields_extra_index = extra_index + bit_bags_count; + var total_bodies_len: u32 = 0; + + for (0..fields_len) |field_i| { + if (field_i % fields_per_u32 == 0) { + cur_bit_bag = zir.extra[extra_index]; + extra_index += 1; + } + + const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; + cur_bit_bag >>= 1; + const has_init = @as(u1, @truncate(cur_bit_bag)) != 0; + cur_bit_bag >>= 2; // also skip `is_comptime`; we don't care + const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; + cur_bit_bag >>= 1; + + fields_extra_index += @intFromBool(!small.is_tuple); // field_name + fields_extra_index += 1; // doc_comment + + if (has_type_body) { + const field_type_body_len = zir.extra[fields_extra_index]; + total_bodies_len += field_type_body_len; + } + fields_extra_index += 1; // field_type or field_type_body_len + + if (has_align) { + const align_body_len = zir.extra[fields_extra_index]; + fields_extra_index += 1; + total_bodies_len += align_body_len; + } + + if (has_init) { + const init_body_len = zir.extra[fields_extra_index]; + fields_extra_index += 1; + total_bodies_len += init_body_len; + } + } + + // Now, `fields_extra_index` points to `bodies`. Let's treat this as one big body. + const merged_bodies = zir.bodySlice(fields_extra_index, total_bodies_len); + try zir.findDeclsBody(gpa, list, defers, merged_bodies); + }, + + // Union declarations need tracking and have a body. + .union_decl => { + try list.append(gpa, inst); + + const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand); + var extra_index = extra.end; + extra_index += @intFromBool(small.has_tag_type); + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + const body_len = if (small.has_body_len) blk: { + const body_len = zir.extra[extra_index]; + extra_index += 1; + break :blk body_len; + } else 0; + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + extra_index += decls_len; + const body = zir.bodySlice(extra_index, body_len); + try zir.findDeclsBody(gpa, list, defers, body); + }, + + // Enum declarations need tracking and have a body. + .enum_decl => { + try list.append(gpa, inst); + + const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand); + var extra_index = extra.end; + extra_index += @intFromBool(small.has_tag_type); + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + const body_len = if (small.has_body_len) blk: { + const body_len = zir.extra[extra_index]; + extra_index += 1; + break :blk body_len; + } else 0; + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + extra_index += decls_len; + const body = zir.bodySlice(extra_index, body_len); + try zir.findDeclsBody(gpa, list, defers, body); + }, + } + }, + // Functions instructions are interesting and have a body. .func, .func_inferred, => { - try list.append(inst); + try list.append(gpa, inst); const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = zir.extraData(Inst.Func, inst_data.payload_index); @@ -3661,14 +4157,14 @@ fn findDeclsInner( else => { const body = zir.bodySlice(extra_index, extra.data.ret_body_len); extra_index += body.len; - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); }, } const body = zir.bodySlice(extra_index, extra.data.body_len); - return zir.findDeclsBody(list, body); + return zir.findDeclsBody(gpa, list, defers, body); }, .func_fancy => { - try list.append(inst); + try list.append(gpa, inst); const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); @@ -3679,7 +4175,7 @@ fn findDeclsInner( const body_len = zir.extra[extra_index]; extra_index += 1; const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); extra_index += body.len; } else if (extra.data.bits.has_align_ref) { extra_index += 1; @@ -3689,7 +4185,7 @@ fn findDeclsInner( const body_len = zir.extra[extra_index]; extra_index += 1; const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); extra_index += body.len; } else if (extra.data.bits.has_addrspace_ref) { extra_index += 1; @@ -3699,7 +4195,7 @@ fn findDeclsInner( const body_len = zir.extra[extra_index]; extra_index += 1; const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); extra_index += body.len; } else if (extra.data.bits.has_section_ref) { extra_index += 1; @@ -3709,7 +4205,7 @@ fn findDeclsInner( const body_len = zir.extra[extra_index]; extra_index += 1; const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); extra_index += body.len; } else if (extra.data.bits.has_cc_ref) { extra_index += 1; @@ -3719,7 +4215,7 @@ fn findDeclsInner( const body_len = zir.extra[extra_index]; extra_index += 1; const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); extra_index += body.len; } else if (extra.data.bits.has_ret_ty_ref) { extra_index += 1; @@ -3728,62 +4224,99 @@ fn findDeclsInner( extra_index += @intFromBool(extra.data.bits.has_any_noalias); const body = zir.bodySlice(extra_index, extra.data.body_len); - return zir.findDeclsBody(list, body); - }, - .extended => { - const extended = datas[@intFromEnum(inst)].extended; - switch (extended.opcode) { - - // Decl instructions are interesting but have no body. - // TODO yes they do have a body actually. recurse over them just like block instructions. - .struct_decl, - .union_decl, - .enum_decl, - .opaque_decl, - .reify, - => return list.append(inst), - - else => return, - } + return zir.findDeclsBody(gpa, list, defers, body); }, // Block instructions, recurse over the bodies. - .block, .block_comptime, .block_inline => { + .block, + .block_comptime, + .block_inline, + .c_import, + .typeof_builtin, + .loop, + => { const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = zir.extraData(Inst.Block, inst_data.payload_index); const body = zir.bodySlice(extra.end, extra.data.body_len); - return zir.findDeclsBody(list, body); + return zir.findDeclsBody(gpa, list, defers, body); }, .condbr, .condbr_inline => { const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = zir.extraData(Inst.CondBr, inst_data.payload_index); const then_body = zir.bodySlice(extra.end, extra.data.then_body_len); const else_body = zir.bodySlice(extra.end + then_body.len, extra.data.else_body_len); - try zir.findDeclsBody(list, then_body); - try zir.findDeclsBody(list, else_body); + try zir.findDeclsBody(gpa, list, defers, then_body); + try zir.findDeclsBody(gpa, list, defers, else_body); }, .@"try", .try_ptr => { const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = zir.extraData(Inst.Try, inst_data.payload_index); const body = zir.bodySlice(extra.end, extra.data.body_len); - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); }, - .switch_block => return findDeclsSwitch(zir, list, inst), + .switch_block, .switch_block_ref => return zir.findDeclsSwitch(gpa, list, defers, inst, .normal), + .switch_block_err_union => return zir.findDeclsSwitch(gpa, list, defers, inst, .err_union), .suspend_block => @panic("TODO iterate suspend block"), - else => return, // Regular instruction, not interesting. + .param, .param_comptime => { + const inst_data = datas[@intFromEnum(inst)].pl_tok; + const extra = zir.extraData(Inst.Param, inst_data.payload_index); + const body = zir.bodySlice(extra.end, extra.data.body_len); + try zir.findDeclsBody(gpa, list, defers, body); + }, + + inline .call, .field_call => |tag| { + const inst_data = datas[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(switch (tag) { + .call => Inst.Call, + .field_call => Inst.FieldCall, + else => unreachable, + }, inst_data.payload_index); + // It's easiest to just combine all the arg bodies into one body, like we do above for `struct_decl`. + const args_len = extra.data.flags.args_len; + if (args_len > 0) { + const first_arg_start_off = args_len; + const final_arg_end_off = zir.extra[extra.end + args_len - 1]; + const args_body = zir.bodySlice(extra.end + first_arg_start_off, final_arg_end_off - first_arg_start_off); + try zir.findDeclsBody(gpa, list, defers, args_body); + } + }, + .@"defer" => { + const inst_data = datas[@intFromEnum(inst)].@"defer"; + const gop = try defers.getOrPut(gpa, inst_data.index); + if (!gop.found_existing) { + const body = zir.bodySlice(inst_data.index, inst_data.len); + try zir.findDeclsBody(gpa, list, defers, body); + } + }, + .defer_err_code => { + const inst_data = datas[@intFromEnum(inst)].defer_err_code; + const extra = zir.extraData(Inst.DeferErrCode, inst_data.payload_index).data; + const gop = try defers.getOrPut(gpa, extra.index); + if (!gop.found_existing) { + const body = zir.bodySlice(extra.index, extra.len); + try zir.findDeclsBody(gpa, list, defers, body); + } + }, } } fn findDeclsSwitch( zir: Zir, - list: *std.ArrayList(Inst.Index), + gpa: Allocator, + list: *std.ArrayListUnmanaged(Inst.Index), + defers: *std.AutoHashMapUnmanaged(u32, void), inst: Inst.Index, + /// Distinguishes between `switch_block[_ref]` and `switch_block_err_union`. + comptime kind: enum { normal, err_union }, ) Allocator.Error!void { const inst_data = zir.instructions.items(.data)[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index); + const extra = zir.extraData(switch (kind) { + .normal => Inst.SwitchBlock, + .err_union => Inst.SwitchBlockErrUnion, + }, inst_data.payload_index); var extra_index: usize = extra.end; @@ -3793,18 +4326,35 @@ fn findDeclsSwitch( break :blk multi_cases_len; } else 0; - if (extra.data.bits.any_has_tag_capture) { + if (switch (kind) { + .normal => extra.data.bits.any_has_tag_capture, + .err_union => extra.data.bits.any_uses_err_capture, + }) { extra_index += 1; } - const special_prong = extra.data.bits.specialProng(); - if (special_prong != .none) { + const has_special = switch (kind) { + .normal => extra.data.bits.specialProng() != .none, + .err_union => has_special: { + // Handle `non_err_body` first. + const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]); + extra_index += 1; + const body = zir.bodySlice(extra_index, prong_info.body_len); + extra_index += body.len; + + try zir.findDeclsBody(gpa, list, defers, body); + + break :has_special extra.data.bits.has_else; + }, + }; + + if (has_special) { const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]); extra_index += 1; const body = zir.bodySlice(extra_index, prong_info.body_len); extra_index += body.len; - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); } { @@ -3816,7 +4366,7 @@ fn findDeclsSwitch( const body = zir.bodySlice(extra_index, prong_info.body_len); extra_index += body.len; - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); } } { @@ -3833,18 +4383,20 @@ fn findDeclsSwitch( const body = zir.bodySlice(extra_index, prong_info.body_len); extra_index += body.len; - try zir.findDeclsBody(list, body); + try zir.findDeclsBody(gpa, list, defers, body); } } } fn findDeclsBody( zir: Zir, - list: *std.ArrayList(Inst.Index), + gpa: Allocator, + list: *std.ArrayListUnmanaged(Inst.Index), + defers: *std.AutoHashMapUnmanaged(u32, void), body: []const Inst.Index, ) Allocator.Error!void { for (body) |member| { - try zir.findDeclsInner(list, member); + try zir.findDeclsInner(gpa, list, defers, member); } } @@ -4042,7 +4594,7 @@ pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash { return null; } const extra_index = extra.end + - 1 + + extra.data.ret_body_len + extra.data.body_len + @typeInfo(Inst.Func.SrcLocs).Struct.fields.len; return @bitCast([4]u32{ diff --git a/src/Zcu.zig b/src/Zcu.zig index 63feb2d00c..f191f6a5f1 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2557,10 +2557,10 @@ pub fn mapOldZirToNew( }); // Used as temporary buffers for namespace declaration instructions - var old_decls = std.ArrayList(Zir.Inst.Index).init(gpa); - defer old_decls.deinit(); - var new_decls = std.ArrayList(Zir.Inst.Index).init(gpa); - defer new_decls.deinit(); + var old_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + defer old_decls.deinit(gpa); + var new_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + defer new_decls.deinit(gpa); while (match_stack.popOrNull()) |match_item| { // Match the namespace declaration itself @@ -2647,11 +2647,11 @@ pub fn mapOldZirToNew( // Match the `declaration` instruction try inst_map.put(gpa, old_decl_inst, new_decl_inst); - // Find namespace declarations within this declaration - try old_zir.findDecls(&old_decls, old_decl_inst); - try new_zir.findDecls(&new_decls, new_decl_inst); + // Find container type declarations within this declaration + try old_zir.findDecls(gpa, &old_decls, old_decl_inst); + try new_zir.findDecls(gpa, &new_decls, new_decl_inst); - // We don't have any smart way of matching up these namespace declarations, so we always + // We don't have any smart way of matching up these type declarations, so we always // correlate them based on source order. const n = @min(old_decls.items.len, new_decls.items.len); try match_stack.ensureUnusedCapacity(gpa, n); From 90116d92b08ff882715ca94ecc79934bc0a73762 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 17 Aug 2024 08:30:56 +0100 Subject: [PATCH 19/25] Compilation: don't call `resolveReferences` unnecessarily This function is slow and should only be called in compile error cases. --- src/Compilation.zig | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 9c66d17507..05a9661101 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3065,8 +3065,8 @@ pub fn totalErrorCount(comp: *Compilation) Allocator.Error!u32 { if (comp.module) |zcu| { const ip = &zcu.intern_pool; - var all_references = try zcu.resolveReferences(); - defer all_references.deinit(zcu.gpa); + var all_references: ?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference) = null; + defer if (all_references) |*a| a.deinit(zcu.gpa); total += zcu.failed_exports.count(); total += zcu.failed_embed_files.count(); @@ -3088,7 +3088,12 @@ pub fn totalErrorCount(comp: *Compilation) Allocator.Error!u32 { // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. for (zcu.failed_analysis.keys()) |anal_unit| { - if (comp.incremental and !all_references.contains(anal_unit)) continue; + if (comp.incremental) { + if (all_references == null) { + all_references = try zcu.resolveReferences(); + } + if (!all_references.?.contains(anal_unit)) continue; + } const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, .func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file, @@ -3172,8 +3177,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }); } - var all_references = if (comp.module) |zcu| try zcu.resolveReferences() else undefined; - defer if (comp.module != null) all_references.deinit(gpa); + var all_references: ?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference) = null; + defer if (all_references) |*a| a.deinit(gpa); if (comp.module) |zcu| { const ip = &zcu.intern_pool; @@ -3231,7 +3236,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (err) |e| return e; } for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { - if (comp.incremental and !all_references.contains(anal_unit)) continue; + if (comp.incremental) { + if (all_references == null) { + all_references = try zcu.resolveReferences(); + } + if (!all_references.?.contains(anal_unit)) continue; + } const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, @@ -3352,7 +3362,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (comp.module) |zcu| { if (comp.incremental and bundle.root_list.items.len == 0) { const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| { - if (all_references.contains(failed_unit)) break true; + if (all_references == null) { + all_references = try zcu.resolveReferences(); + } + if (all_references.?.contains(failed_unit)) break true; } else false; if (should_have_error) { @panic("referenced transitive analysis errors, but none actually emitted"); @@ -3414,7 +3427,7 @@ pub fn addModuleErrorMsg( mod: *Zcu, eb: *ErrorBundle.Wip, module_err_msg: Zcu.ErrorMsg, - all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference), + all_references: *?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference), ) !void { const gpa = eb.gpa; const ip = &mod.intern_pool; @@ -3438,13 +3451,17 @@ pub fn addModuleErrorMsg( defer ref_traces.deinit(gpa); if (module_err_msg.reference_trace_root.unwrap()) |rt_root| { + if (all_references.* == null) { + all_references.* = try mod.resolveReferences(); + } + var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .{}; defer seen.deinit(gpa); const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len; var referenced_by = rt_root; - while (all_references.get(referenced_by)) |maybe_ref| { + while (all_references.*.?.get(referenced_by)) |maybe_ref| { const ref = maybe_ref orelse break; const gop = try seen.getOrPut(gpa, ref.referencer); if (gop.found_existing) break; From 04b13547e13e3410b911fdbb06cbb27b5051cfbf Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 17 Aug 2024 08:31:33 +0100 Subject: [PATCH 20/25] Zcu: avoid unnecessary re-analysis in some dependency loop situations I'm like 80% sure this is correct --- src/Zcu.zig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Zcu.zig b/src/Zcu.zig index f191f6a5f1..fc7978b15f 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2429,7 +2429,11 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { if (!zcu.comp.incremental) return null; - if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { + if (zcu.outdated.count() == 0) { + // Any units in `potentially_outdated` must just be stuck in loops with one another: none of those + // units have had any outdated dependencies so far, and all of their remaining PO deps are triggered + // by other units in `potentially_outdated`. So, we can safety assume those units up-to-date. + zcu.potentially_outdated.clear(); log.debug("findOutdatedToAnalyze: no outdated depender", .{}); return null; } From d63d9b99185c080c6ed7a6be3dc43c897f2aeadc Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 17 Aug 2024 10:18:54 +0100 Subject: [PATCH 21/25] compiler: use incremental cache mode with -fincremental This results in correct reporting to the build system of file system inputs with `-fincremental --watch` on a `fno-emit-bin` build. --- src/main.zig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/main.zig b/src/main.zig index 62c721f157..3429500bf8 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3257,9 +3257,12 @@ fn buildOutputType( else => false, }; + const incremental = opt_incremental orelse false; + const disable_lld_caching = !output_to_cache; const cache_mode: Compilation.CacheMode = b: { + if (incremental) break :b .incremental; if (disable_lld_caching) break :b .incremental; if (!create_module.resolved_options.have_zcu) break :b .whole; @@ -3272,8 +3275,6 @@ fn buildOutputType( break :b .incremental; }; - const incremental = opt_incremental orelse false; - process.raiseFileDescriptorLimit(); var file_system_inputs: std.ArrayListUnmanaged(u8) = .{}; From 9c3324173de2d8de80e95fb6be23eb63dfbc5169 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 17 Aug 2024 11:38:19 +0100 Subject: [PATCH 22/25] compiler: merge conflicts and typos --- src/Sema.zig | 6 +++--- src/Zcu.zig | 2 +- src/link/Dwarf.zig | 22 +++++++++++----------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index c4345c4464..7930c8f080 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2504,12 +2504,12 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error const mod = sema.pt.zcu; if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { - var all_references = mod.resolveReferences() catch @panic("out of memory"); + var all_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?Zcu.ResolvedReference) = null; var wip_errors: std.zig.ErrorBundle.Wip = undefined; wip_errors.init(gpa) catch @panic("out of memory"); - Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*, &all_references) catch unreachable; + Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*, &all_references) catch @panic("out of memory"); std.debug.print("compile error during Sema:\n", .{}); - var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; + var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory"); error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); crash_report.compilerPanic("unexpected compile error occurred", null, null); } diff --git a/src/Zcu.zig b/src/Zcu.zig index fc7978b15f..a47979bf60 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2433,7 +2433,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { // Any units in `potentially_outdated` must just be stuck in loops with one another: none of those // units have had any outdated dependencies so far, and all of their remaining PO deps are triggered // by other units in `potentially_outdated`. So, we can safety assume those units up-to-date. - zcu.potentially_outdated.clear(); + zcu.potentially_outdated.clearRetainingCapacity(); log.debug("findOutdatedToAnalyze: no outdated depender", .{}); return null; } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 5bc7ae5dc1..e6cf89a4ce 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -786,7 +786,7 @@ const Entry = struct { const ip = &zcu.intern_pool; for (dwarf.types.keys(), dwarf.types.values()) |ty, other_entry| { const ty_unit: Unit.Index = if (Type.fromInterned(ty).typeDeclInst(zcu)) |inst_index| - dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFull(ip).file).mod) catch unreachable + dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod) catch unreachable else .main; if (sec.getUnit(ty_unit) == unit and unit.getEntry(other_entry) == entry) @@ -796,7 +796,7 @@ const Entry = struct { }); } for (dwarf.navs.keys(), dwarf.navs.values()) |nav, other_entry| { - const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFull(ip).file).mod) catch unreachable; + const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFile(ip)).mod) catch unreachable; if (sec.getUnit(nav_unit) == unit and unit.getEntry(other_entry) == entry) log.err("missing Nav({}({d}))", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }); } @@ -1201,7 +1201,7 @@ pub const WipNav = struct { const ip = &zcu.intern_pool; const maybe_inst_index = ty.typeDeclInst(zcu); const unit = if (maybe_inst_index) |inst_index| - try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFull(ip).file).mod) + try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod) else .main; const gop = try wip_nav.dwarf.types.getOrPut(wip_nav.dwarf.gpa, ty.toIntern()); @@ -1539,7 +1539,7 @@ pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.In const nav = ip.getNav(nav_index); log.debug("initWipNav({})", .{nav.fqn.fmt(ip)}); - const inst_info = nav.srcInst(ip).resolveFull(ip); + const inst_info = nav.srcInst(ip).resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); const unit = try dwarf.getUnit(file.mod); @@ -1874,7 +1874,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool const nav = ip.getNav(nav_index); log.debug("updateComptimeNav({})", .{nav.fqn.fmt(ip)}); - const inst_info = nav.srcInst(ip).resolveFull(ip); + const inst_info = nav.srcInst(ip).resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); assert(file.zir_loaded); const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst)); @@ -1937,7 +1937,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool }; break :value_inst value_inst; }; - const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip); + const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip).?; if (type_inst_info.inst != value_inst) break :decl_struct; const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); @@ -2053,7 +2053,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool }; break :value_inst value_inst; }; - const type_inst_info = loaded_enum.zir_index.unwrap().?.resolveFull(ip); + const type_inst_info = loaded_enum.zir_index.unwrap().?.resolveFull(ip).?; if (type_inst_info.inst != value_inst) break :decl_enum; const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); @@ -2127,7 +2127,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool }; break :value_inst value_inst; }; - const type_inst_info = loaded_union.zir_index.resolveFull(ip); + const type_inst_info = loaded_union.zir_index.resolveFull(ip).?; if (type_inst_info.inst != value_inst) break :decl_union; const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); @@ -2240,7 +2240,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool }; break :value_inst value_inst; }; - const type_inst_info = loaded_opaque.zir_index.resolveFull(ip); + const type_inst_info = loaded_opaque.zir_index.resolveFull(ip).?; if (type_inst_info.inst != value_inst) break :decl_opaque; const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); @@ -2704,7 +2704,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP const ty = Type.fromInterned(type_index); log.debug("updateContainerType({}({d}))", .{ ty.fmt(pt), @intFromEnum(type_index) }); - const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip); + const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip).?; const file = zcu.fileByIndex(inst_info.file); if (inst_info.inst == .main_struct_inst) { const unit = try dwarf.getUnit(file.mod); @@ -2922,7 +2922,7 @@ pub fn updateNavLineNumber(dwarf: *Dwarf, zcu: *Zcu, nav_index: InternPool.Nav.I const ip = &zcu.intern_pool; const zir_index = ip.getCau(ip.getNav(nav_index).analysis_owner.unwrap() orelse return).zir_index; - const inst_info = zir_index.resolveFull(ip); + const inst_info = zir_index.resolveFull(ip).?; assert(inst_info.inst != .main_struct_inst); const file = zcu.fileByIndex(inst_info.file); From 7f2466e65fe0b7411792ce3d3f186893ed22379d Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 17 Aug 2024 11:40:25 +0100 Subject: [PATCH 23/25] std.BoundedArray: add clear() --- lib/std/bounded_array.zig | 5 +++++ src/Compilation.zig | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/std/bounded_array.zig b/lib/std/bounded_array.zig index 59f68e8cf8..85a175003d 100644 --- a/lib/std/bounded_array.zig +++ b/lib/std/bounded_array.zig @@ -72,6 +72,11 @@ pub fn BoundedArrayAligned( self.len = @intCast(len); } + /// Remove all elements from the slice. + pub fn clear(self: *Self) void { + self.len = 0; + } + /// Copy the content of an existing slice. pub fn fromSlice(m: []const T) error{Overflow}!Self { var list = try init(m.len); diff --git a/src/Compilation.zig b/src/Compilation.zig index 05a9661101..86f5a6e4c4 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2264,7 +2264,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } - zcu.analysis_roots.resize(0) catch unreachable; + zcu.analysis_roots.clear(); try comp.queueJob(.{ .analyze_mod = std_mod }); zcu.analysis_roots.appendAssumeCapacity(std_mod); From 9e6318a4ea042e3fab7a1b2347600cde3d804e1a Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 17 Aug 2024 12:05:12 +0100 Subject: [PATCH 24/25] compiler: add some doc comments --- src/InternPool.zig | 23 +++++++++++++++++++++-- src/Zcu/PerThread.zig | 3 +++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 8259f94812..d0dc16c47d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -62,10 +62,18 @@ const want_multi_threaded = true; /// Whether a single-threaded intern pool impl is in use. pub const single_threaded = builtin.single_threaded or !want_multi_threaded; +/// A `TrackedInst.Index` provides a single, unchanging reference to a ZIR instruction across a whole +/// compilation. From this index, you can acquire a `TrackedInst`, which containss a reference to both +/// the file which the instruction lives in, and the instruction index itself, which is updated on +/// incremental updates by `Zcu.updateZirRefs`. pub const TrackedInst = extern struct { file: FileIndex, inst: Zir.Inst.Index, + /// It is possible on an incremental update that we "lose" a ZIR instruction: some tracked `%x` in + /// the old ZIR failed to map to any `%y` in the new ZIR. For this reason, we actually store values + /// of type `MaybeLost`, which uses `ZirIndex.lost` to represent this case. `Index.resolve` etc + /// return `null` when the `TrackedInst` being resolved has been lost. pub const MaybeLost = extern struct { file: FileIndex, inst: ZirIndex, @@ -244,14 +252,17 @@ pub fn trackZir( return index; } +/// At the start of an incremental update, we update every entry in `tracked_insts` to include +/// the new ZIR index. Once this is done, we must update the hashmap metadata so that lookups +/// return correct entries where they already exist. pub fn rehashTrackedInsts( ip: *InternPool, gpa: Allocator, - /// TODO: maybe don't take this? it doesn't actually matter, only one thread is running at this point tid: Zcu.PerThread.Id, ) Allocator.Error!void { + assert(tid == .main); // we shouldn't have any other threads active right now + // TODO: this function doesn't handle OOM well. What should it do? - // Indeed, what should anyone do when they run out of memory? // We don't lock anything, as this function assumes that no other thread is // accessing `tracked_insts`. This is necessary because we're going to be @@ -795,6 +806,14 @@ const Local = struct { /// This state is fully local to the owning thread and does not require any /// atomic access. mutate: struct { + /// When we need to allocate any long-lived buffer for mutating the `InternPool`, it is + /// allocated into this `arena` (for the `Id` of the thread performing the mutation). An + /// arena is used to avoid contention on the GPA, and to ensure that any code which retains + /// references to old state remains valid. For instance, when reallocing hashmap metadata, + /// a racing lookup on another thread may still retain a handle to the old metadata pointer, + /// so it must remain valid. + /// This arena's lifetime is tied to that of `Compilation`, although it can be cleared on + /// garbage collection (currently vaporware). arena: std.heap.ArenaAllocator.State, items: ListMutate, diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index b2f6d600e6..700d8708b1 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1,3 +1,6 @@ +//! This type provides a wrapper around a `*Zcu` for uses which require a thread `Id`. +//! Any operation which mutates `InternPool` state lives here rather than on `Zcu`. + zcu: *Zcu, /// Dense, per-thread unique index. From f0374fe3f04925a6e686077c2ffcb51b8eafc926 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 17 Aug 2024 12:18:51 +0100 Subject: [PATCH 25/25] Compilation: simplify `totalErrorCount` This function now has to allocate anyway to resolve references, so we may as well just build the error bundle and check its length. Also remove some unnecessary calls of this function for efficiency. --- src/Compilation.zig | 99 ++++----------------------------------------- 1 file changed, 8 insertions(+), 91 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 86f5a6e4c4..fab0496b22 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2300,7 +2300,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { zcu.intern_pool.dumpGenericInstances(gpa); } - if (comp.config.is_test and try comp.totalErrorCount() == 0) { + if (comp.config.is_test) { // The `test_functions` decl has been intentionally postponed until now, // at which point we must populate it with the list of test functions that // have been discovered and not filtered out. @@ -2394,6 +2394,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } try flush(comp, arena, .main, main_progress_node); + if (try comp.totalErrorCount() != 0) return; // Failure here only means an unnecessary cache miss. @@ -2411,7 +2412,6 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { }, .incremental => { try flush(comp, arena, .main, main_progress_node); - if (try comp.totalErrorCount() != 0) return; }, } } @@ -3047,93 +3047,6 @@ fn addBuf(list: *std.ArrayList(std.posix.iovec_const), buf: []const u8) void { list.appendAssumeCapacity(.{ .base = buf.ptr, .len = buf.len }); } -/// This function is temporally single-threaded. -pub fn totalErrorCount(comp: *Compilation) Allocator.Error!u32 { - var total: usize = - comp.misc_failures.count() + - @intFromBool(comp.alloc_failure_occurred) + - comp.lld_errors.items.len; - - for (comp.failed_c_objects.values()) |bundle| { - total += bundle.diags.len; - } - - for (comp.failed_win32_resources.values()) |errs| { - total += errs.errorMessageCount(); - } - - if (comp.module) |zcu| { - const ip = &zcu.intern_pool; - - var all_references: ?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference) = null; - defer if (all_references) |*a| a.deinit(zcu.gpa); - - total += zcu.failed_exports.count(); - total += zcu.failed_embed_files.count(); - - for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { - if (error_msg) |_| { - total += 1; - } else { - assert(file.zir_loaded); - const payload_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.compile_errors)]; - assert(payload_index != 0); - const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index); - total += header.data.items_len; - } - } - - // Skip errors for Decls within files that failed parsing. - // When a parse error is introduced, we keep all the semantic analysis for - // the previous parse success, including compile errors, but we cannot - // emit them until the file succeeds parsing. - for (zcu.failed_analysis.keys()) |anal_unit| { - if (comp.incremental) { - if (all_references == null) { - all_references = try zcu.resolveReferences(); - } - if (!all_references.?.contains(anal_unit)) continue; - } - const file_index = switch (anal_unit.unwrap()) { - .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, - .func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file, - }; - if (zcu.fileByIndex(file_index).okToReportErrors()) { - total += 1; - if (zcu.cimport_errors.get(anal_unit)) |errors| { - total += errors.errorMessageCount(); - } - } - } - - for (zcu.failed_codegen.keys()) |nav| { - if (zcu.navFileScope(nav).okToReportErrors()) { - total += 1; - } - } - - if (zcu.intern_pool.global_error_set.getNamesFromMainThread().len > zcu.error_limit) { - total += 1; - } - } - - // The "no entry point found" error only counts if there are no semantic analysis errors. - if (total == 0) { - total += @intFromBool(comp.link_error_flags.no_entry_point_found); - } - total += @intFromBool(comp.link_error_flags.missing_libc); - total += comp.link_errors.items.len; - - // Compile log errors only count if there are no other errors. - if (total == 0) { - if (comp.module) |zcu| { - total += @intFromBool(zcu.compile_log_sources.count() != 0); - } - } - - return @intCast(total); -} - /// This function is temporally single-threaded. pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { const gpa = comp.gpa; @@ -3357,8 +3270,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } - assert(try comp.totalErrorCount() == bundle.root_list.items.len); - if (comp.module) |zcu| { if (comp.incremental and bundle.root_list.items.len == 0) { const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| { @@ -3377,6 +3288,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { return bundle.toOwnedBundle(compile_log_text); } +fn totalErrorCount(comp: *Compilation) !u32 { + var errors = try comp.getAllErrorsAlloc(); + defer errors.deinit(comp.gpa); + return errors.errorMessageCount(); +} + pub const ErrorNoteHashContext = struct { eb: *const ErrorBundle.Wip,