InternPool: add FileIndex to *File mapping

This commit is contained in:
Jacob Young
2024-07-10 10:04:33 -04:00
parent f93a10f664
commit 3d2dfbe828
7 changed files with 314 additions and 251 deletions

View File

@@ -2119,12 +2119,14 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
if (comp.module) |zcu| {
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
zcu.compile_log_text.shrinkAndFree(gpa, 0);
// Make sure std.zig is inside the import_table. We unconditionally need
// it for start.zig.
const std_mod = zcu.std_mod;
_ = try zcu.importPkg(std_mod);
_ = try pt.importPkg(std_mod);
// Normally we rely on importing std to in turn import the root source file
// in the start code, but when using the stage1 backend that won't happen,
@@ -2133,20 +2135,19 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
// Likewise, in the case of `zig test`, the test runner is the root source file,
// and so there is nothing to import the main file.
if (comp.config.is_test) {
_ = try zcu.importPkg(zcu.main_mod);
_ = try pt.importPkg(zcu.main_mod);
}
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
_ = try zcu.importPkg(compiler_rt_mod);
_ = try pt.importPkg(compiler_rt_mod);
}
// Put a work item in for every known source file to detect if
// it changed, and, if so, re-compute ZIR and then queue the job
// to update it.
try comp.astgen_work_queue.ensureUnusedCapacity(zcu.import_table.count());
for (zcu.import_table.values(), 0..) |file, file_index_usize| {
const file_index: Zcu.File.Index = @enumFromInt(file_index_usize);
if (file.mod.isBuiltin()) continue;
for (zcu.import_table.values()) |file_index| {
if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue;
comp.astgen_work_queue.writeItemAssumeCapacity(file_index);
}
@@ -2641,7 +2642,8 @@ fn resolveEmitLoc(
return slice.ptr;
}
fn reportMultiModuleErrors(zcu: *Zcu) !void {
fn reportMultiModuleErrors(pt: Zcu.PerThread) !void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
// Some cases can give you a whole bunch of multi-module errors, which it's not helpful to
@@ -2651,14 +2653,13 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void {
// Attach the "some omitted" note to the final error message
var last_err: ?*Zcu.ErrorMsg = null;
for (zcu.import_table.values(), 0..) |file, file_index_usize| {
for (zcu.import_table.values()) |file_index| {
const file = zcu.fileByIndex(file_index);
if (!file.multi_pkg) continue;
num_errors += 1;
if (num_errors > max_errors) continue;
const file_index: Zcu.File.Index = @enumFromInt(file_index_usize);
const err = err_blk: {
// Like with errors, let's cap the number of notes to prevent a huge error spew.
const max_notes = 5;
@@ -2749,8 +2750,9 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void {
// to add this flag after reporting the errors however, as otherwise
// we'd get an error for every single downstream file, which wouldn't be
// very useful.
for (zcu.import_table.values()) |file| {
if (file.multi_pkg) file.recursiveMarkMultiPkg(zcu);
for (zcu.import_table.values()) |file_index| {
const file = zcu.fileByIndex(file_index);
if (file.multi_pkg) file.recursiveMarkMultiPkg(pt);
}
}
@@ -3443,11 +3445,12 @@ fn performAllTheWorkInner(
}
}
if (comp.module) |mod| {
try reportMultiModuleErrors(mod);
try mod.flushRetryableFailures();
mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
mod.codegen_prog_node = main_progress_node.start("Code Generation", 0);
if (comp.module) |zcu| {
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = .main };
try reportMultiModuleErrors(pt);
try zcu.flushRetryableFailures();
zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
}
if (!InternPool.single_threaded) comp.thread_pool.spawnWgId(&comp.work_queue_wait_group, codegenThread, .{comp});
@@ -4189,9 +4192,9 @@ fn workerAstGenFile(
comp.mutex.lock();
defer comp.mutex.unlock();
const res = pt.zcu.importFile(file, import_path) catch continue;
const res = pt.importFile(file, import_path) catch continue;
if (!res.is_pkg) {
res.file.addReference(pt.zcu.*, .{ .import = .{
res.file.addReference(pt.zcu, .{ .import = .{
.file = file_index,
.token = item.data.token,
} }) catch continue;

View File

@@ -1,6 +1,5 @@
//! All interned objects have both a value and a type.
//! This data structure is self-contained, with the following exceptions:
//! * Module.Namespace has a pointer to Module.File
//! This data structure is self-contained.
/// One item per thread, indexed by `tid`, which is dense and unique per thread.
locals: []Local = &.{},
@@ -79,10 +78,6 @@ const want_multi_threaded = false;
/// Whether a single-threaded intern pool impl is in use.
pub const single_threaded = builtin.single_threaded or !want_multi_threaded;
pub const FileIndex = enum(u32) {
_,
};
pub const TrackedInst = extern struct {
file: FileIndex,
inst: Zir.Inst.Index,
@@ -340,6 +335,7 @@ const Local = struct {
extra: ListMutate,
limbs: ListMutate,
strings: ListMutate,
files: ListMutate,
decls: BucketListMutate,
namespaces: BucketListMutate,
@@ -350,6 +346,7 @@ const Local = struct {
extra: Extra,
limbs: Limbs,
strings: Strings,
files: Files,
decls: Decls,
namespaces: Namespaces,
@@ -370,16 +367,17 @@ const Local = struct {
else => @compileError("unsupported host"),
};
const Strings = List(struct { u8 });
const Files = List(struct { *Zcu.File });
const decls_bucket_width = 8;
const decls_bucket_mask = (1 << decls_bucket_width) - 1;
const decl_next_free_field = "src_namespace";
const Decls = List(struct { *[1 << decls_bucket_width]Module.Decl });
const Decls = List(struct { *[1 << decls_bucket_width]Zcu.Decl });
const namespaces_bucket_width = 8;
const namespaces_bucket_mask = (1 << namespaces_bucket_width) - 1;
const namespace_next_free_field = "decl_index";
const Namespaces = List(struct { *[1 << namespaces_bucket_width]Module.Namespace });
const Namespaces = List(struct { *[1 << namespaces_bucket_width]Zcu.Namespace });
const ListMutate = struct {
len: u32,
@@ -677,6 +675,15 @@ const Local = struct {
};
}
pub fn getMutableFiles(local: *Local, gpa: std.mem.Allocator) Files.Mutable {
return .{
.gpa = gpa,
.arena = &local.mutate.arena,
.mutate = &local.mutate.files,
.list = &local.shared.files,
};
}
/// Rather than allocating Decl objects with an Allocator, we instead allocate
/// them with this BucketList. This provides four advantages:
/// * Stable memory so that one thread can access a Decl object while another
@@ -812,8 +819,6 @@ const Hash = std.hash.Wyhash;
const InternPool = @This();
const Zcu = @import("Zcu.zig");
/// Deprecated.
const Module = Zcu;
const Zir = std.zig.Zir;
/// An index into `maps` which might be `none`.
@@ -938,6 +943,28 @@ pub const OptionalNamespaceIndex = enum(u32) {
}
};
pub const FileIndex = enum(u32) {
_,
const Unwrapped = struct {
tid: Zcu.PerThread.Id,
index: u32,
fn wrap(unwrapped: Unwrapped, ip: *const InternPool) FileIndex {
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
assert(unwrapped.index <= ip.getIndexMask(u32));
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 |
unwrapped.index);
}
};
fn unwrap(file_index: FileIndex, ip: *const InternPool) Unwrapped {
return .{
.tid = @enumFromInt(@intFromEnum(file_index) >> ip.tid_shift_32 & ip.getTidMask()),
.index = @intFromEnum(file_index) & ip.getIndexMask(u32),
};
}
};
/// An index into `strings`.
pub const String = enum(u32) {
/// An empty string.
@@ -4608,12 +4635,12 @@ pub const FuncAnalysis = packed struct(u32) {
/// inline, which means no runtime version of the function will be generated.
inline_only,
in_progress,
/// There will be a corresponding ErrorMsg in Module.failed_decls
/// There will be a corresponding ErrorMsg in Zcu.failed_decls
sema_failure,
/// This function might be OK but it depends on another Decl which did not
/// successfully complete semantic analysis.
dependency_failure,
/// There will be a corresponding ErrorMsg in Module.failed_decls.
/// There will be a corresponding ErrorMsg in Zcu.failed_decls.
/// Indicates that semantic analysis succeeded, but code generation for
/// this function failed.
codegen_failure,
@@ -5210,6 +5237,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
.extra = Local.Extra.empty,
.limbs = Local.Limbs.empty,
.strings = Local.Strings.empty,
.files = Local.Files.empty,
.decls = Local.Decls.empty,
.namespaces = Local.Namespaces.empty,
@@ -5221,6 +5249,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
.extra = Local.ListMutate.empty,
.limbs = Local.ListMutate.empty,
.strings = Local.ListMutate.empty,
.files = Local.ListMutate.empty,
.decls = Local.BucketListMutate.empty,
.namespaces = Local.BucketListMutate.empty,
@@ -9213,7 +9242,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
const items_size = (1 + 4) * items_len;
const extra_size = 4 * extra_len;
const limbs_size = 8 * limbs_len;
const decls_size = @sizeOf(Module.Decl) * decls_len;
const decls_size = @sizeOf(Zcu.Decl) * decls_len;
// TODO: map overhead size is not taken into account
const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + decls_size;
@@ -9640,29 +9669,22 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
try bw.flush();
}
pub fn declPtr(ip: *InternPool, decl_index: DeclIndex) *Module.Decl {
pub fn declPtr(ip: *InternPool, decl_index: DeclIndex) *Zcu.Decl {
return @constCast(ip.declPtrConst(decl_index));
}
pub fn declPtrConst(ip: *const InternPool, decl_index: DeclIndex) *const Module.Decl {
pub fn declPtrConst(ip: *const InternPool, decl_index: DeclIndex) *const Zcu.Decl {
const unwrapped_decl_index = decl_index.unwrap(ip);
const decls = ip.getLocalShared(unwrapped_decl_index.tid).decls.acquire();
const decls_bucket = decls.view().items(.@"0")[unwrapped_decl_index.bucket_index];
return &decls_bucket[unwrapped_decl_index.index];
}
pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Module.Namespace {
const unwrapped_namespace_index = namespace_index.unwrap(ip);
const namespaces = ip.getLocalShared(unwrapped_namespace_index.tid).namespaces.acquire();
const namespaces_bucket = namespaces.view().items(.@"0")[unwrapped_namespace_index.bucket_index];
return &namespaces_bucket[unwrapped_namespace_index.index];
}
pub fn createDecl(
ip: *InternPool,
gpa: Allocator,
tid: Zcu.PerThread.Id,
initialization: Module.Decl,
initialization: Zcu.Decl,
) Allocator.Error!DeclIndex {
const local = ip.getLocal(tid);
const free_list_next = local.mutate.decls.free_list;
@@ -9679,7 +9701,7 @@ pub fn createDecl(
var arena = decls.arena.promote(decls.gpa);
defer decls.arena.* = arena.state;
decls.appendAssumeCapacity(.{try arena.allocator().create(
[1 << Local.decls_bucket_width]Module.Decl,
[1 << Local.decls_bucket_width]Zcu.Decl,
)});
}
const unwrapped_decl_index: DeclIndex.Unwrapped = .{
@@ -9702,11 +9724,18 @@ pub fn destroyDecl(ip: *InternPool, tid: Zcu.PerThread.Id, decl_index: DeclIndex
local.mutate.decls.free_list = @intFromEnum(decl_index);
}
pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Zcu.Namespace {
const unwrapped_namespace_index = namespace_index.unwrap(ip);
const namespaces = ip.getLocalShared(unwrapped_namespace_index.tid).namespaces.acquire();
const namespaces_bucket = namespaces.view().items(.@"0")[unwrapped_namespace_index.bucket_index];
return &namespaces_bucket[unwrapped_namespace_index.index];
}
pub fn createNamespace(
ip: *InternPool,
gpa: Allocator,
tid: Zcu.PerThread.Id,
initialization: Module.Namespace,
initialization: Zcu.Namespace,
) Allocator.Error!NamespaceIndex {
const local = ip.getLocal(tid);
const free_list_next = local.mutate.namespaces.free_list;
@@ -9724,7 +9753,7 @@ pub fn createNamespace(
var arena = namespaces.arena.promote(namespaces.gpa);
defer namespaces.arena.* = arena.state;
namespaces.appendAssumeCapacity(.{try arena.allocator().create(
[1 << Local.namespaces_bucket_width]Module.Namespace,
[1 << Local.namespaces_bucket_width]Zcu.Namespace,
)});
}
const unwrapped_namespace_index: NamespaceIndex.Unwrapped = .{
@@ -9756,6 +9785,27 @@ pub fn destroyNamespace(
local.mutate.namespaces.free_list = @intFromEnum(namespace_index);
}
pub fn filePtr(ip: *InternPool, file_index: FileIndex) *Zcu.File {
const file_index_unwrapped = file_index.unwrap(ip);
const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
return files.view().items(.@"0")[file_index_unwrapped.index];
}
pub fn createFile(
ip: *InternPool,
gpa: Allocator,
tid: Zcu.PerThread.Id,
file: *Zcu.File,
) Allocator.Error!FileIndex {
const files = ip.getLocal(tid).getMutableFiles(gpa);
const file_index_unwrapped: FileIndex.Unwrapped = .{
.tid = tid,
.index = files.mutate.len,
};
try files.append(.{file});
return file_index_unwrapped.wrap(ip);
}
const EmbeddedNulls = enum {
no_embedded_nulls,
maybe_embedded_nulls,

View File

@@ -6056,7 +6056,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
else => |e| return e,
};
const result = zcu.importPkg(c_import_mod) catch |err|
const result = pt.importPkg(c_import_mod) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
const path_digest = zcu.filePathDigest(result.file_index);
@@ -13950,7 +13950,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const operand_src = block.tokenOffset(inst_data.src_tok);
const operand = inst_data.get(sema.code);
const result = zcu.importFile(block.getFileScope(zcu), operand) catch |err| switch (err) {
const result = pt.importFile(block.getFileScope(zcu), operand) catch |err| switch (err) {
error.ImportOutsideModulePath => {
return sema.fail(block, operand_src, "import of file outside module path: '{s}'", .{operand});
},

View File

@@ -3451,7 +3451,7 @@ pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
};
}
pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 {
pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 {
const ip = &zcu.intern_pool;
const tracked = switch (ip.indexToKey(ty.toIntern())) {
.struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) {

View File

@@ -102,7 +102,7 @@ multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
/// `Compilation.update` of the process for a given `Compilation`.
///
/// Indexes correspond 1:1 to `files`.
import_table: std.StringArrayHashMapUnmanaged(*File) = .{},
import_table: std.StringArrayHashMapUnmanaged(File.Index) = .{},
/// The set of all the files which have been loaded with `@embedFile` in the Module.
/// We keep track of this in order to iterate over it and check which files have been
@@ -892,7 +892,7 @@ pub const File = struct {
}
/// Add a reference to this file during AstGen.
pub fn addReference(file: *File, zcu: Zcu, ref: File.Reference) !void {
pub fn addReference(file: *File, zcu: *Zcu, ref: File.Reference) !void {
// Don't add the same module root twice. Note that since we always add module roots at the
// front of the references array (see below), this loop is actually O(1) on valid code.
if (ref == .root) {
@@ -924,7 +924,7 @@ pub const File = struct {
/// Mark this file and every file referenced by it as multi_pkg and report an
/// astgen_failure error for them. AstGen must have completed in its entirety.
pub fn recursiveMarkMultiPkg(file: *File, mod: *Module) void {
pub fn recursiveMarkMultiPkg(file: *File, pt: Zcu.PerThread) void {
file.multi_pkg = true;
file.status = .astgen_failure;
@@ -944,9 +944,9 @@ pub const File = struct {
const import_path = file.zir.nullTerminatedString(item.data.name);
if (mem.eql(u8, import_path, "builtin")) continue;
const res = mod.importFile(file, import_path) catch continue;
const res = pt.importFile(file, import_path) catch continue;
if (!res.is_pkg and !res.file.multi_pkg) {
res.file.recursiveMarkMultiPkg(mod);
res.file.recursiveMarkMultiPkg(pt);
}
}
}
@@ -3002,183 +3002,7 @@ pub const ImportFileResult = struct {
is_pkg: bool,
};
pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult {
const gpa = zcu.gpa;
// The resolved path is used as the key in the import table, to detect if
// an import refers to the same as another, despite different relative paths
// or differently mapped package names.
const resolved_path = try std.fs.path.resolve(gpa, &.{
mod.root.root_dir.path orelse ".",
mod.root.sub_path,
mod.root_src_path,
});
var keep_resolved_path = false;
defer if (!keep_resolved_path) gpa.free(resolved_path);
const gop = try zcu.import_table.getOrPut(gpa, resolved_path);
errdefer _ = zcu.import_table.pop();
if (gop.found_existing) {
try gop.value_ptr.*.addReference(zcu.*, .{ .root = mod });
return .{
.file = gop.value_ptr.*,
.file_index = @enumFromInt(gop.index),
.is_new = false,
.is_pkg = true,
};
}
const ip = &zcu.intern_pool;
try ip.files.ensureUnusedCapacity(gpa, 1);
if (mod.builtin_file) |builtin_file| {
keep_resolved_path = true; // It's now owned by import_table.
gop.value_ptr.* = builtin_file;
try builtin_file.addReference(zcu.*, .{ .root = mod });
const path_digest = computePathDigest(zcu, mod, builtin_file.sub_file_path);
ip.files.putAssumeCapacityNoClobber(path_digest, .none);
return .{
.file = builtin_file,
.file_index = @enumFromInt(ip.files.entries.len - 1),
.is_new = false,
.is_pkg = true,
};
}
const sub_file_path = try gpa.dupe(u8, mod.root_src_path);
errdefer gpa.free(sub_file_path);
const new_file = try gpa.create(File);
errdefer gpa.destroy(new_file);
keep_resolved_path = true; // It's now owned by import_table.
gop.value_ptr.* = new_file;
new_file.* = .{
.sub_file_path = sub_file_path,
.source = undefined,
.source_loaded = false,
.tree_loaded = false,
.zir_loaded = false,
.stat = undefined,
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
.mod = mod,
};
const path_digest = computePathDigest(zcu, mod, sub_file_path);
try new_file.addReference(zcu.*, .{ .root = mod });
ip.files.putAssumeCapacityNoClobber(path_digest, .none);
return .{
.file = new_file,
.file_index = @enumFromInt(ip.files.entries.len - 1),
.is_new = true,
.is_pkg = true,
};
}
/// Called from a worker thread during AstGen.
/// Also called from Sema during semantic analysis.
pub fn importFile(
zcu: *Zcu,
cur_file: *File,
import_string: []const u8,
) !ImportFileResult {
const mod = cur_file.mod;
if (std.mem.eql(u8, import_string, "std")) {
return zcu.importPkg(zcu.std_mod);
}
if (std.mem.eql(u8, import_string, "root")) {
return zcu.importPkg(zcu.root_mod);
}
if (mod.deps.get(import_string)) |pkg| {
return zcu.importPkg(pkg);
}
if (!mem.endsWith(u8, import_string, ".zig")) {
return error.ModuleNotFound;
}
const gpa = zcu.gpa;
// The resolved path is used as the key in the import table, to detect if
// an import refers to the same as another, despite different relative paths
// or differently mapped package names.
const resolved_path = try std.fs.path.resolve(gpa, &.{
mod.root.root_dir.path orelse ".",
mod.root.sub_path,
cur_file.sub_file_path,
"..",
import_string,
});
var keep_resolved_path = false;
defer if (!keep_resolved_path) gpa.free(resolved_path);
const gop = try zcu.import_table.getOrPut(gpa, resolved_path);
errdefer _ = zcu.import_table.pop();
if (gop.found_existing) return .{
.file = gop.value_ptr.*,
.file_index = @enumFromInt(gop.index),
.is_new = false,
.is_pkg = false,
};
const ip = &zcu.intern_pool;
try ip.files.ensureUnusedCapacity(gpa, 1);
const new_file = try gpa.create(File);
errdefer gpa.destroy(new_file);
const resolved_root_path = try std.fs.path.resolve(gpa, &.{
mod.root.root_dir.path orelse ".",
mod.root.sub_path,
});
defer gpa.free(resolved_root_path);
const sub_file_path = p: {
const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path);
errdefer gpa.free(relative);
if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) {
break :p relative;
}
return error.ImportOutsideModulePath;
};
errdefer gpa.free(sub_file_path);
log.debug("new importFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, import_string={s}", .{
resolved_root_path, resolved_path, sub_file_path, import_string,
});
keep_resolved_path = true; // It's now owned by import_table.
gop.value_ptr.* = new_file;
new_file.* = .{
.sub_file_path = sub_file_path,
.source = undefined,
.source_loaded = false,
.tree_loaded = false,
.zir_loaded = false,
.stat = undefined,
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
.mod = mod,
};
const path_digest = computePathDigest(zcu, mod, sub_file_path);
ip.files.putAssumeCapacityNoClobber(path_digest, .none);
return .{
.file = new_file,
.file_index = @enumFromInt(ip.files.entries.len - 1),
.is_new = true,
.is_pkg = false,
};
}
fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) Cache.BinDigest {
pub fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) Cache.BinDigest {
const want_local_cache = mod == zcu.main_mod;
var path_hash: Cache.HashHelper = .{};
path_hash.addBytes(build_options.version);
@@ -3710,8 +3534,9 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, Resolved
return result;
}
pub fn fileByIndex(zcu: *const Zcu, i: File.Index) *File {
return zcu.import_table.values()[@intFromEnum(i)];
pub fn fileByIndex(zcu: *Zcu, i: File.Index) *File {
const ip = &zcu.intern_pool;
return ip.filePtr(i);
}
/// Returns the `Decl` of the struct that represents this `File`.

View File

@@ -817,7 +817,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
/// https://github.com/ziglang/zig/issues/14307
pub fn semaPkg(pt: Zcu.PerThread, pkg: *Module) !void {
const import_file_result = try pt.zcu.importPkg(pkg);
const import_file_result = try pt.importPkg(pkg);
const root_decl_index = pt.zcu.fileRootDecl(import_file_result.file_index);
if (root_decl_index == .none) {
return pt.semaFile(import_file_result.file_index);
@@ -1081,7 +1081,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult {
const std_mod = zcu.std_mod;
if (decl.getFileScope(zcu).mod != std_mod) break :ip_index .none;
// We're in the std module.
const std_file_imported = try zcu.importPkg(std_mod);
const std_file_imported = try pt.importPkg(std_mod);
const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index);
const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?);
const std_namespace = std_decl.getInnerNamespace(zcu).?;
@@ -1356,6 +1356,191 @@ pub fn semaAnonOwnerDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.Sem
};
}
pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult {
const zcu = pt.zcu;
const gpa = zcu.gpa;
// The resolved path is used as the key in the import table, to detect if
// an import refers to the same as another, despite different relative paths
// or differently mapped package names.
const resolved_path = try std.fs.path.resolve(gpa, &.{
mod.root.root_dir.path orelse ".",
mod.root.sub_path,
mod.root_src_path,
});
var keep_resolved_path = false;
defer if (!keep_resolved_path) gpa.free(resolved_path);
const gop = try zcu.import_table.getOrPut(gpa, resolved_path);
errdefer _ = zcu.import_table.pop();
if (gop.found_existing) {
const file_index = gop.value_ptr.*;
const file = zcu.fileByIndex(file_index);
try file.addReference(zcu, .{ .root = mod });
return .{
.file = file,
.file_index = file_index,
.is_new = false,
.is_pkg = true,
};
}
const ip = &zcu.intern_pool;
try ip.files.ensureUnusedCapacity(gpa, 1);
if (mod.builtin_file) |builtin_file| {
const file_index = try ip.createFile(gpa, pt.tid, builtin_file);
keep_resolved_path = true; // It's now owned by import_table.
gop.value_ptr.* = file_index;
try builtin_file.addReference(zcu, .{ .root = mod });
const path_digest = Zcu.computePathDigest(zcu, mod, builtin_file.sub_file_path);
ip.files.putAssumeCapacityNoClobber(path_digest, .none);
return .{
.file = builtin_file,
.file_index = file_index,
.is_new = false,
.is_pkg = true,
};
}
const sub_file_path = try gpa.dupe(u8, mod.root_src_path);
errdefer gpa.free(sub_file_path);
const new_file = try gpa.create(Zcu.File);
errdefer gpa.destroy(new_file);
const new_file_index = try ip.createFile(gpa, pt.tid, new_file);
keep_resolved_path = true; // It's now owned by import_table.
gop.value_ptr.* = new_file_index;
new_file.* = .{
.sub_file_path = sub_file_path,
.source = undefined,
.source_loaded = false,
.tree_loaded = false,
.zir_loaded = false,
.stat = undefined,
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
.mod = mod,
};
const path_digest = zcu.computePathDigest(mod, sub_file_path);
try new_file.addReference(zcu, .{ .root = mod });
ip.files.putAssumeCapacityNoClobber(path_digest, .none);
return .{
.file = new_file,
.file_index = new_file_index,
.is_new = true,
.is_pkg = true,
};
}
/// Called from a worker thread during AstGen.
/// Also called from Sema during semantic analysis.
pub fn importFile(
pt: Zcu.PerThread,
cur_file: *Zcu.File,
import_string: []const u8,
) !Zcu.ImportFileResult {
const zcu = pt.zcu;
const mod = cur_file.mod;
if (std.mem.eql(u8, import_string, "std")) {
return pt.importPkg(zcu.std_mod);
}
if (std.mem.eql(u8, import_string, "root")) {
return pt.importPkg(zcu.root_mod);
}
if (mod.deps.get(import_string)) |pkg| {
return pt.importPkg(pkg);
}
if (!std.mem.endsWith(u8, import_string, ".zig")) {
return error.ModuleNotFound;
}
const gpa = zcu.gpa;
// The resolved path is used as the key in the import table, to detect if
// an import refers to the same as another, despite different relative paths
// or differently mapped package names.
const resolved_path = try std.fs.path.resolve(gpa, &.{
mod.root.root_dir.path orelse ".",
mod.root.sub_path,
cur_file.sub_file_path,
"..",
import_string,
});
var keep_resolved_path = false;
defer if (!keep_resolved_path) gpa.free(resolved_path);
const gop = try zcu.import_table.getOrPut(gpa, resolved_path);
errdefer _ = zcu.import_table.pop();
if (gop.found_existing) {
const file_index = gop.value_ptr.*;
return .{
.file = zcu.fileByIndex(file_index),
.file_index = file_index,
.is_new = false,
.is_pkg = false,
};
}
const ip = &zcu.intern_pool;
try ip.files.ensureUnusedCapacity(gpa, 1);
const new_file = try gpa.create(Zcu.File);
errdefer gpa.destroy(new_file);
const resolved_root_path = try std.fs.path.resolve(gpa, &.{
mod.root.root_dir.path orelse ".",
mod.root.sub_path,
});
defer gpa.free(resolved_root_path);
const sub_file_path = p: {
const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path);
errdefer gpa.free(relative);
if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) {
break :p relative;
}
return error.ImportOutsideModulePath;
};
errdefer gpa.free(sub_file_path);
log.debug("new importFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, import_string={s}", .{
resolved_root_path, resolved_path, sub_file_path, import_string,
});
const new_file_index = try ip.createFile(gpa, pt.tid, new_file);
keep_resolved_path = true; // It's now owned by import_table.
gop.value_ptr.* = new_file_index;
new_file.* = .{
.sub_file_path = sub_file_path,
.source = undefined,
.source_loaded = false,
.tree_loaded = false,
.zir_loaded = false,
.stat = undefined,
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
.mod = mod,
};
const path_digest = zcu.computePathDigest(mod, sub_file_path);
ip.files.putAssumeCapacityNoClobber(path_digest, .none);
return .{
.file = new_file,
.file_index = new_file_index,
.is_new = true,
.is_pkg = false,
};
}
pub fn embedFile(
pt: Zcu.PerThread,
cur_file: *Zcu.File,
@@ -1429,20 +1614,6 @@ pub fn embedFile(
return pt.newEmbedFile(cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc);
}
/// Cancel the creation of an anon decl and delete any references to it.
/// If other decls depend on this decl, they must be aborted first.
pub fn abortAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void {
assert(!pt.zcu.declIsRoot(decl_index));
pt.destroyDecl(decl_index);
}
/// Finalize the creation of an anon decl.
pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void {
if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) {
try pt.zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
}
}
/// https://github.com/ziglang/zig/issues/14307
fn newEmbedFile(
pt: Zcu.PerThread,
@@ -1792,6 +1963,20 @@ const ScanDeclIter = struct {
}
};
/// Cancel the creation of an anon decl and delete any references to it.
/// If other decls depend on this decl, they must be aborted first.
pub fn abortAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void {
assert(!pt.zcu.declIsRoot(decl_index));
pt.destroyDecl(decl_index);
}
/// Finalize the creation of an anon decl.
pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void {
if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) {
try pt.zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
}
}
pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: Allocator) Zcu.SemaError!Air {
const tracy = trace(@src());
defer tracy.end();
@@ -2255,7 +2440,7 @@ pub fn populateTestFunctions(
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const builtin_mod = zcu.root_mod.getBuiltinDependency();
const builtin_file_index = (zcu.importPkg(builtin_mod) catch unreachable).file_index;
const builtin_file_index = (pt.importPkg(builtin_mod) catch unreachable).file_index;
const root_decl_index = zcu.fileRootDecl(builtin_file_index);
const root_decl = zcu.declPtr(root_decl_index.unwrap().?);
const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace);
@@ -2923,7 +3108,7 @@ pub fn getBuiltinDecl(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Inter
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig");
const std_file_imported = pt.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig");
const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?;
const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?;
const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls);

View File

@@ -2811,7 +2811,7 @@ pub const Object = struct {
const zcu = pt.zcu;
const std_mod = zcu.std_mod;
const std_file_imported = zcu.importPkg(std_mod) catch unreachable;
const std_file_imported = pt.importPkg(std_mod) catch unreachable;
const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls);
const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index);