elf linker: conform to explicit error sets
This commit is contained in:
@@ -3208,6 +3208,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
if (!zcu.navFileScope(nav).okToReportErrors()) continue;
|
||||
try addModuleErrorMsg(zcu, &bundle, error_msg.*);
|
||||
}
|
||||
for (zcu.failed_types.keys(), zcu.failed_types.values()) |ty_index, error_msg| {
|
||||
if (!zcu.typeFileScope(ty_index).okToReportErrors()) continue;
|
||||
try addModuleErrorMsg(zcu, &bundle, error_msg.*);
|
||||
}
|
||||
for (zcu.failed_exports.values()) |value| {
|
||||
try addModuleErrorMsg(zcu, &bundle, value.*);
|
||||
}
|
||||
|
||||
36
src/Zcu.zig
36
src/Zcu.zig
@@ -127,6 +127,7 @@ transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .emp
|
||||
/// This may be a simple "value" `Nav`, or it may be a function.
|
||||
/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
|
||||
failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .empty,
|
||||
failed_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, *ErrorMsg) = .empty,
|
||||
/// Keep track of one `@compileLog` callsite per `AnalUnit`.
|
||||
/// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`.
|
||||
compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
|
||||
@@ -2448,16 +2449,14 @@ pub fn deinit(zcu: *Zcu) void {
|
||||
zcu.local_zir_cache.handle.close();
|
||||
zcu.global_zir_cache.handle.close();
|
||||
|
||||
for (zcu.failed_analysis.values()) |value| {
|
||||
value.destroy(gpa);
|
||||
}
|
||||
for (zcu.failed_codegen.values()) |value| {
|
||||
value.destroy(gpa);
|
||||
}
|
||||
for (zcu.failed_analysis.values()) |value| value.destroy(gpa);
|
||||
for (zcu.failed_codegen.values()) |value| value.destroy(gpa);
|
||||
for (zcu.failed_types.values()) |value| value.destroy(gpa);
|
||||
zcu.analysis_in_progress.deinit(gpa);
|
||||
zcu.failed_analysis.deinit(gpa);
|
||||
zcu.transitive_failed_analysis.deinit(gpa);
|
||||
zcu.failed_codegen.deinit(gpa);
|
||||
zcu.failed_types.deinit(gpa);
|
||||
|
||||
for (zcu.failed_files.values()) |value| {
|
||||
if (value) |msg| msg.destroy(gpa);
|
||||
@@ -3800,6 +3799,18 @@ pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn typeSrcLoc(zcu: *const Zcu, ty_index: InternPool.Index) LazySrcLoc {
|
||||
_ = zcu;
|
||||
_ = ty_index;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
pub fn typeFileScope(zcu: *Zcu, ty_index: InternPool.Index) *File {
|
||||
_ = zcu;
|
||||
_ = ty_index;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 {
|
||||
const ip = &zcu.intern_pool;
|
||||
const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?;
|
||||
@@ -4060,3 +4071,16 @@ pub fn navValIsConst(zcu: *const Zcu, val: InternPool.Index) bool {
|
||||
else => true,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn codegenFail(
|
||||
zcu: *Zcu,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) error{ CodegenFail, OutOfMemory } {
|
||||
const gpa = zcu.gpa;
|
||||
try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1);
|
||||
const msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(nav_index), format, args);
|
||||
zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, msg);
|
||||
return error.CodegenFail;
|
||||
}
|
||||
|
||||
@@ -3130,24 +3130,31 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error
|
||||
}
|
||||
}
|
||||
|
||||
pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) !void {
|
||||
pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) error{OutOfMemory}!void {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const comp = zcu.comp;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const codegen_prog_node = zcu.codegen_prog_node.start(Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), 0);
|
||||
defer codegen_prog_node.end();
|
||||
|
||||
if (zcu.failed_types.fetchSwapRemove(ty)) |entry| entry.deinit();
|
||||
|
||||
if (!Air.typeFullyResolved(Type.fromInterned(ty), zcu)) {
|
||||
// This type failed to resolve. This is a transitive failure.
|
||||
// TODO: do we need to mark this failure anywhere? I don't think so, since compilation
|
||||
// will fail due to the type error anyway.
|
||||
} else if (comp.bin_file) |lf| {
|
||||
lf.updateContainerType(pt, ty) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| log.err("codegen type failed: {s}", .{@errorName(e)}),
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
if (comp.bin_file) |lf| lf.updateContainerType(pt, ty) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| try zcu.failed_types.putNoClobber(gpa, ty, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
zcu.typeSrcLoc(ty),
|
||||
"failed to update container type: {s}",
|
||||
.{@errorName(e)},
|
||||
)),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn linkerUpdateLineNumber(pt: Zcu.PerThread, ti: InternPool.TrackedInst.Index) !void {
|
||||
|
||||
@@ -1290,11 +1290,7 @@ pub const File = struct {
|
||||
args: anytype,
|
||||
) error{ CodegenFail, OutOfMemory } {
|
||||
@branchHint(.cold);
|
||||
const zcu = base.comp.zcu.?;
|
||||
const gpa = zcu.gpa;
|
||||
try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1);
|
||||
const msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(nav_index), format, args);
|
||||
zcu.failed_codegen.putAssumeCapacityNoClobber(gpa, nav_index, msg);
|
||||
return base.comp.zcu.?.codegenFail(nav_index, format, args);
|
||||
}
|
||||
|
||||
pub const C = @import("link/C.zig");
|
||||
|
||||
@@ -21,12 +21,24 @@ debug_rnglists: DebugRngLists,
|
||||
debug_str: StringSection,
|
||||
|
||||
pub const UpdateError = error{
|
||||
/// Indicates the error is already reported on `failed_codegen` in the Zcu.
|
||||
CodegenFail,
|
||||
ReinterpretDeclRef,
|
||||
Unimplemented,
|
||||
OutOfMemory,
|
||||
};
|
||||
EndOfStream,
|
||||
Overflow,
|
||||
Underflow,
|
||||
UnexpectedEndOfFile,
|
||||
} ||
|
||||
std.fs.File.OpenError ||
|
||||
std.fs.File.SetEndPosError ||
|
||||
std.fs.File.CopyRangeError ||
|
||||
std.fs.File.PReadError ||
|
||||
std.fs.File.PWriteError;
|
||||
|
||||
pub const FlushError = UpdateError || std.process.GetCwdError;
|
||||
pub const FlushError =
|
||||
UpdateError ||
|
||||
std.process.GetCwdError;
|
||||
|
||||
pub const RelocError =
|
||||
std.fs.File.PWriteError;
|
||||
@@ -587,14 +599,13 @@ const Unit = struct {
|
||||
|
||||
fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void {
|
||||
if (unit.off == new_off) return;
|
||||
const diags = &dwarf.bin_file.base.comp.link_diags;
|
||||
const n = dwarf.getFile().?.copyRangeAll(
|
||||
const n = try dwarf.getFile().?.copyRangeAll(
|
||||
sec.off(dwarf) + unit.off,
|
||||
dwarf.getFile().?,
|
||||
sec.off(dwarf) + new_off,
|
||||
unit.len,
|
||||
) catch |err| return diags.fail("failed to copy file range: {s}", .{@errorName(err)});
|
||||
if (n != unit.len) return diags.fail("unexpected short write from copy file range", .{});
|
||||
);
|
||||
if (n != unit.len) return error.InputOutput;
|
||||
unit.off = new_off;
|
||||
}
|
||||
|
||||
@@ -2267,7 +2278,7 @@ pub fn deinit(dwarf: *Dwarf) void {
|
||||
dwarf.* = undefined;
|
||||
}
|
||||
|
||||
fn getUnit(dwarf: *Dwarf, mod: *Module) UpdateError!Unit.Index {
|
||||
fn getUnit(dwarf: *Dwarf, mod: *Module) !Unit.Index {
|
||||
const mod_gop = try dwarf.mods.getOrPut(dwarf.gpa, mod);
|
||||
const unit: Unit.Index = @enumFromInt(mod_gop.index);
|
||||
if (!mod_gop.found_existing) {
|
||||
@@ -2327,7 +2338,25 @@ fn getModInfo(dwarf: *Dwarf, unit: Unit.Index) *ModInfo {
|
||||
return &dwarf.mods.values()[@intFromEnum(unit)];
|
||||
}
|
||||
|
||||
pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, sym_index: u32) UpdateError!?WipNav {
|
||||
pub fn initWipNav(
|
||||
dwarf: *Dwarf,
|
||||
pt: Zcu.PerThread,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
sym_index: u32,
|
||||
) error{ OutOfMemory, CodegenFail }!?WipNav {
|
||||
return initWipNavInner(dwarf, pt, nav_index, sym_index) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.CodegenFail => return error.CodegenFail,
|
||||
else => |e| return pt.zcu.codegenFail(nav_index, "failed to init dwarf: {s}", .{@errorName(e)}),
|
||||
};
|
||||
}
|
||||
|
||||
fn initWipNavInner(
|
||||
dwarf: *Dwarf,
|
||||
pt: Zcu.PerThread,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
sym_index: u32,
|
||||
) !?WipNav {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
@@ -2637,7 +2666,20 @@ pub fn finishWipNav(
|
||||
pt: Zcu.PerThread,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
wip_nav: *WipNav,
|
||||
) UpdateError!void {
|
||||
) error{ OutOfMemory, CodegenFail }!void {
|
||||
return finishWipNavInner(dwarf, pt, nav_index, wip_nav) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.CodegenFail => return error.CodegenFail,
|
||||
else => |e| return pt.zcu.codegenFail(nav_index, "failed to finish dwarf: {s}", .{@errorName(e)}),
|
||||
};
|
||||
}
|
||||
|
||||
fn finishWipNavInner(
|
||||
dwarf: *Dwarf,
|
||||
pt: Zcu.PerThread,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
wip_nav: *WipNav,
|
||||
) !void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
@@ -2656,7 +2698,15 @@ pub fn finishWipNav(
|
||||
try wip_nav.updateLazy(zcu.navSrcLoc(nav_index));
|
||||
}
|
||||
|
||||
pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) UpdateError!void {
|
||||
pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{ OutOfMemory, CodegenFail }!void {
|
||||
return updateComptimeNavInner(dwarf, pt, nav_index) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.CodegenFail => return error.CodegenFail,
|
||||
else => |e| return pt.zcu.codegenFail(nav_index, "failed to update dwarf: {s}", .{@errorName(e)}),
|
||||
};
|
||||
}
|
||||
|
||||
fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav_src_loc = zcu.navSrcLoc(nav_index);
|
||||
@@ -4310,7 +4360,7 @@ fn refAbbrevCode(dwarf: *Dwarf, abbrev_code: AbbrevCode) UpdateError!@typeInfo(A
|
||||
return @intFromEnum(abbrev_code);
|
||||
}
|
||||
|
||||
pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) !void {
|
||||
pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
|
||||
@@ -575,7 +575,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
|
||||
}
|
||||
}
|
||||
|
||||
if (at_end) try self.setEndPos(end);
|
||||
if (at_end) try self.base.file.?.setEndPos(end);
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -638,7 +638,7 @@ pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment:
|
||||
|
||||
shdr.sh_offset = new_offset;
|
||||
} else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
|
||||
try self.setEndPos(shdr.sh_offset + needed_size);
|
||||
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1300,7 +1300,9 @@ fn updateNavCode(
|
||||
const capacity = atom_ptr.capacity(elf_file);
|
||||
const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value));
|
||||
if (need_realloc) {
|
||||
try self.allocateAtom(atom_ptr, true, elf_file);
|
||||
self.allocateAtom(atom_ptr, true, elf_file) catch |err|
|
||||
return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
|
||||
|
||||
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value });
|
||||
if (old_vaddr != atom_ptr.value) {
|
||||
sym.value = 0;
|
||||
@@ -1310,7 +1312,9 @@ fn updateNavCode(
|
||||
// TODO shrink section size
|
||||
}
|
||||
} else {
|
||||
try self.allocateAtom(atom_ptr, true, elf_file);
|
||||
self.allocateAtom(atom_ptr, true, elf_file) catch |err|
|
||||
return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
|
||||
|
||||
errdefer self.freeNavMetadata(elf_file, sym_index);
|
||||
sym.value = 0;
|
||||
esym.st_value = 0;
|
||||
@@ -1342,7 +1346,8 @@ fn updateNavCode(
|
||||
const shdr = elf_file.sections.items(.shdr)[shdr_index];
|
||||
if (shdr.sh_type != elf.SHT_NOBITS) {
|
||||
const file_offset = atom_ptr.offset(elf_file);
|
||||
try elf_file.pwriteAll(code, file_offset);
|
||||
elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
|
||||
return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
|
||||
log.debug("writing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
|
||||
}
|
||||
}
|
||||
@@ -1385,7 +1390,8 @@ fn updateTlv(
|
||||
const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index);
|
||||
assert(!gop.found_existing); // TODO incremental updates
|
||||
|
||||
try self.allocateAtom(atom_ptr, true, elf_file);
|
||||
self.allocateAtom(atom_ptr, true, elf_file) catch |err|
|
||||
return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
|
||||
sym.value = 0;
|
||||
esym.st_value = 0;
|
||||
|
||||
@@ -1394,7 +1400,8 @@ fn updateTlv(
|
||||
const shdr = elf_file.sections.items(.shdr)[shndx];
|
||||
if (shdr.sh_type != elf.SHT_NOBITS) {
|
||||
const file_offset = atom_ptr.offset(elf_file);
|
||||
try elf_file.pwriteAll(code, file_offset);
|
||||
elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
|
||||
return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
|
||||
log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{
|
||||
atom_ptr.name(elf_file),
|
||||
file_offset,
|
||||
@@ -1513,7 +1520,8 @@ pub fn updateFunc(
|
||||
target_sym.flags.has_trampoline = true;
|
||||
}
|
||||
const target_sym = self.symbol(sym_index);
|
||||
try writeTrampoline(self.symbol(target_sym.extra(elf_file).trampoline).*, target_sym.*, elf_file);
|
||||
writeTrampoline(self.symbol(target_sym.extra(elf_file).trampoline).*, target_sym.*, elf_file) catch |err|
|
||||
return elf_file.base.cgFail(func.owner_nav, "failed to write trampoline: {s}", .{@errorName(err)});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1898,7 +1906,7 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 {
|
||||
return len;
|
||||
}
|
||||
|
||||
fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) link.File.UpdateNavError!void {
|
||||
fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
|
||||
const atom_ptr = tr_sym.atom(elf_file).?;
|
||||
const fileoff = atom_ptr.offset(elf_file);
|
||||
const source_addr = tr_sym.address(.{}, elf_file);
|
||||
@@ -1908,7 +1916,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) link.File.Upd
|
||||
.x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf),
|
||||
else => @panic("TODO implement write trampoline for this CPU arch"),
|
||||
};
|
||||
try elf_file.pwriteAll(out, fileoff);
|
||||
try elf_file.base.file.?.pwriteAll(out, fileoff);
|
||||
|
||||
if (elf_file.base.child_pid) |pid| {
|
||||
switch (builtin.os.tag) {
|
||||
|
||||
@@ -532,7 +532,10 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
|
||||
try self.generateUnwindInfo();
|
||||
|
||||
try self.initSegments();
|
||||
try self.allocateSections();
|
||||
self.allocateSections() catch |err| switch (err) {
|
||||
error.LinkFailure => return error.LinkFailure,
|
||||
else => |e| return diags.fail("failed to allocate sections: {s}", .{@errorName(e)}),
|
||||
};
|
||||
self.allocateSegments();
|
||||
self.allocateSyntheticSymbols();
|
||||
|
||||
@@ -3133,7 +3136,7 @@ fn detectAllocCollision(self: *MachO, start: u64, size: u64) !?u64 {
|
||||
}
|
||||
}
|
||||
|
||||
if (at_end) try self.setEndPos(end);
|
||||
if (at_end) try self.base.file.?.setEndPos(end);
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -3217,25 +3220,22 @@ pub fn findFreeSpaceVirtual(self: *MachO, object_size: u64, min_alignment: u32)
|
||||
return start;
|
||||
}
|
||||
|
||||
pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) error{LinkFailure}!void {
|
||||
const diags = &self.base.comp.link_diags;
|
||||
pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
|
||||
const file = self.base.file.?;
|
||||
const amt = file.copyRangeAll(old_offset, file, new_offset, size) catch |err|
|
||||
return diags.fail("failed to copy file range: {s}", .{@errorName(err)});
|
||||
if (amt != size)
|
||||
return diags.fail("unexpected short write in copy file range", .{});
|
||||
const amt = try file.copyRangeAll(old_offset, file, new_offset, size);
|
||||
if (amt != size) return error.InputOutput;
|
||||
}
|
||||
|
||||
/// Like File.copyRangeAll but also ensures the source region is zeroed out after copy.
|
||||
/// This is so that we guarantee zeroed out regions for mapping of zerofill sections by the loader.
|
||||
fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) error{ LinkFailure, OutOfMemory }!void {
|
||||
fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
try self.copyRangeAll(old_offset, new_offset, size);
|
||||
const size_u = try self.cast(usize, size);
|
||||
const size_u = math.cast(usize, size) orelse return error.Overflow;
|
||||
const zeroes = try gpa.alloc(u8, size_u); // TODO no need to allocate here.
|
||||
defer gpa.free(zeroes);
|
||||
@memset(zeroes, 0);
|
||||
try self.pwriteAll(zeroes, old_offset);
|
||||
try self.base.file.?.pwriteAll(zeroes, old_offset);
|
||||
}
|
||||
|
||||
const InitMetadataOptions = struct {
|
||||
@@ -3459,7 +3459,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
|
||||
|
||||
sect.offset = @intCast(new_offset);
|
||||
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
|
||||
try self.setEndPos(sect.offset + needed_size);
|
||||
try self.base.file.?.setEndPos(sect.offset + needed_size);
|
||||
}
|
||||
seg.filesize = needed_size;
|
||||
}
|
||||
@@ -3508,7 +3508,7 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void
|
||||
sect.offset = @intCast(new_offset);
|
||||
sect.addr = new_addr;
|
||||
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
|
||||
try self.setEndPos(sect.offset + needed_size);
|
||||
try self.base.file.?.setEndPos(sect.offset + needed_size);
|
||||
}
|
||||
}
|
||||
sect.size = needed_size;
|
||||
|
||||
@@ -55,7 +55,10 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
|
||||
try calcSectionSizes(macho_file);
|
||||
|
||||
try createSegment(macho_file);
|
||||
try allocateSections(macho_file);
|
||||
allocateSections(macho_file) catch |err| switch (err) {
|
||||
error.LinkFailure => return error.LinkFailure,
|
||||
else => |e| return diags.fail("failed to allocate sections: {s}", .{@errorName(e)}),
|
||||
};
|
||||
allocateSegment(macho_file);
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
|
||||
Reference in New Issue
Block a user