Merge pull request #12893 from ziglang/macho-relocs-cleanup

macho: rewrite incremental linker, and init splitting of linking contexts
This commit is contained in:
Jakub Konka
2022-09-18 23:19:33 +02:00
committed by GitHub
13 changed files with 4065 additions and 2838 deletions

View File

@@ -798,6 +798,11 @@ pub const section_64 = extern struct {
return tt == S_ZEROFILL or tt == S_GB_ZEROFILL or tt == S_THREAD_LOCAL_ZEROFILL;
}
pub fn isSymbolStubs(sect: section_64) bool {
const tt = sect.@"type"();
return tt == S_SYMBOL_STUBS;
}
pub fn isDebug(sect: section_64) bool {
return sect.attrs() & S_ATTR_DEBUG != 0;
}

View File

@@ -680,16 +680,15 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
break :blk offset;
};
// Add relocation to the decl.
const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?;
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
try atom.addRelocation(macho_file, .{
.@"type" = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
.target = target,
.offset = offset,
.addend = 0,
.subtractor = null,
.pcrel = true,
.length = 2,
.@"type" = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
});
} else {
return emit.fail("Implement call_extern for linking backends != MachO", .{});
@@ -872,8 +871,8 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
Instruction.LoadStoreOffset.imm(0),
));
},
.load_memory_ptr_got,
.load_memory_ptr_direct,
.load_memory_ptr_got,
=> {
// add reg, reg, offset
try emit.writeInstruction(Instruction.add(reg, reg, 0, false));
@@ -882,13 +881,13 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = macho_file.atom_by_index_table.get(data.atom_index).?;
// Page reloc for adrp instruction.
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
const atom = macho_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
// TODO this causes segfault in stage1
// try atom.addRelocations(macho_file, 2, .{
try atom.addRelocation(macho_file, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset,
.addend = 0,
.subtractor = null,
.pcrel = true,
.length = 2,
.@"type" = switch (tag) {
@@ -901,12 +900,10 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
// Pageoff reloc for adrp instruction.
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset + 4,
try atom.addRelocation(macho_file, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset + 4,
.addend = 0,
.subtractor = null,
.pcrel = false,
.length = 2,
.@"type" = switch (tag) {

View File

@@ -996,7 +996,6 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
);
const end_offset = emit.code.items.len;
const gpa = emit.bin_file.allocator;
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const reloc_type = switch (ops.flags) {
@@ -1004,19 +1003,17 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => unreachable,
};
const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?;
log.debug("adding reloc of type {} to local @{d}", .{ reloc_type, relocation.sym_index });
try atom.relocs.append(gpa, .{
.offset = @intCast(u32, end_offset - 4),
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
try atom.addRelocation(macho_file, .{
.@"type" = reloc_type,
.target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.subtractor = null,
.pcrel = true,
.length = 2,
.@"type" = reloc_type,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = coff_file.atom_by_index_table.get(relocation.atom_index).?;
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
try atom.addRelocation(coff_file, .{
.@"type" = switch (ops.flags) {
0b00 => .got,
@@ -1145,20 +1142,19 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?;
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
try atom.relocs.append(emit.bin_file.allocator, .{
.offset = offset,
try atom.addRelocation(macho_file, .{
.@"type" = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = target,
.offset = offset,
.addend = 0,
.subtractor = null,
.pcrel = true,
.length = 2,
.@"type" = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom = coff_file.atom_by_index_table.get(relocation.atom_index).?;
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = coff_file.getGlobalByIndex(relocation.sym_index);
try atom.addRelocation(coff_file, .{
.@"type" = .direct,

View File

@@ -1135,6 +1135,7 @@ fn getDeclOutputSection(self: *Coff, decl: *Module.Decl) u16 {
}
switch (zig_ty) {
// TODO: what if this is a function pointer?
.Fn => break :blk self.text_section_index.?,
else => {
if (val.castTag(.variable)) |_| {
@@ -1527,7 +1528,7 @@ pub fn getDeclVAddr(
assert(self.llvm_object == null);
assert(decl.link.coff.sym_index != 0);
const atom = self.atom_by_index_table.get(reloc_info.parent_atom_index).?;
const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
const target = SymbolWithLoc{ .sym_index = decl.link.coff.sym_index, .file = null };
try atom.addRelocation(self, .{
.@"type" = .direct,

View File

@@ -948,7 +948,7 @@ pub fn commitDeclState(
new_offset,
});
try File.MachO.copyRangeAllOverlappingAlloc(
try copyRangeAllOverlappingAlloc(
gpa,
d_sym.file,
debug_line_sect.offset,
@@ -1247,7 +1247,7 @@ fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []co
new_offset,
});
try File.MachO.copyRangeAllOverlappingAlloc(
try copyRangeAllOverlappingAlloc(
gpa,
d_sym.file,
debug_info_sect.offset,
@@ -2338,3 +2338,16 @@ fn addDbgInfoErrorSet(
// DW.AT.enumeration_type delimit children
try dbg_info_buffer.append(0);
}
fn copyRangeAllOverlappingAlloc(
allocator: Allocator,
file: std.fs.File,
in_offset: u64,
out_offset: u64,
len: usize,
) !void {
const buf = try allocator.alloc(u8, len);
defer allocator.free(buf);
const amt = try file.preadAll(buf, in_offset);
try file.pwriteAll(buf[0..amt], out_offset);
}

View File

@@ -2320,6 +2320,7 @@ fn getDeclPhdrIndex(self: *Elf, decl: *Module.Decl) !u16 {
}
switch (zig_ty) {
// TODO: what if this is a function pointer?
.Fn => break :blk self.phdr_load_re_index.?,
else => {
if (val.castTag(.variable)) |_| {

File diff suppressed because it is too large Load Diff

View File

@@ -16,6 +16,7 @@ const Arch = std.Target.Cpu.Arch;
const Dwarf = @import("../Dwarf.zig");
const MachO = @import("../MachO.zig");
const Object = @import("Object.zig");
const RelocationIncr = @import("Relocation.zig"); // temporary name until we clean up object-file relocation scanning
const SymbolWithLoc = MachO.SymbolWithLoc;
/// Each decl always gets a local symbol with the fully qualified name.
@@ -65,8 +66,6 @@ prev: ?*Atom,
dbg_info_atom: Dwarf.Atom,
dirty: bool = true,
pub const Binding = struct {
target: SymbolWithLoc,
offset: u64,
@@ -196,7 +195,7 @@ pub fn capacity(self: Atom, macho_file: *MachO) u64 {
} else {
// We are the last atom.
// The capacity is limited only by virtual address space.
return std.math.maxInt(u64) - self_sym.n_value;
return macho_file.allocatedVirtualSize(self_sym.n_value);
}
}
@@ -313,13 +312,13 @@ pub fn parseRelocs(self: *Atom, relocs: []align(1) const macho.relocation_info,
const sect_id = @intCast(u16, rel.r_symbolnum - 1);
const sym_index = object.sections_as_symbols.get(sect_id) orelse blk: {
const sect = object.getSourceSection(sect_id);
const match = (try context.macho_file.getOutputSection(sect)) orelse
const out_sect_id = (try context.macho_file.getOutputSection(sect)) orelse
unreachable;
const sym_index = @intCast(u32, object.symtab.items.len);
try object.symtab.append(gpa, .{
.n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = match + 1,
.n_sect = out_sect_id + 1,
.n_desc = 0,
.n_value = sect.addr,
});
@@ -894,3 +893,83 @@ inline fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @truncate(u5, inst[3]);
return ((group_decode >> 2) == 4);
}
pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: RelocationIncr) !void {
return self.addRelocations(macho_file, 1, .{reloc});
}
pub fn addRelocations(
self: *Atom,
macho_file: *MachO,
comptime count: comptime_int,
relocs: [count]RelocationIncr,
) !void {
const gpa = macho_file.base.allocator;
const target = macho_file.base.options.target;
const gop = try macho_file.relocs.getOrPut(gpa, self);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.ensureUnusedCapacity(gpa, count);
for (relocs) |reloc| {
log.debug(" (adding reloc of type {s} to target %{d})", .{
reloc.fmtType(target),
reloc.target.sym_index,
});
gop.value_ptr.appendAssumeCapacity(reloc);
}
}
pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
const gpa = macho_file.base.allocator;
log.debug(" (adding rebase at offset 0x{x} in %{d})", .{ offset, self.sym_index });
const gop = try macho_file.rebases.getOrPut(gpa, self);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
const gpa = macho_file.base.allocator;
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
self.sym_index,
});
const gop = try macho_file.bindings.getOrPut(gpa, self);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
const gpa = macho_file.base.allocator;
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
self.sym_index,
});
const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
pub fn resolveRelocations(self: *Atom, macho_file: *MachO) !void {
const relocs = macho_file.relocs.get(self) orelse return;
const source_sym = self.getSymbol(macho_file);
const source_section = macho_file.sections.get(source_sym.n_sect - 1).header;
const file_offset = source_section.offset + source_sym.n_value - source_section.addr;
log.debug("relocating '{s}'", .{self.getName(macho_file)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
try reloc.resolve(self, macho_file, file_offset);
reloc.dirty = false;
}
}

View File

@@ -512,7 +512,7 @@ fn writeSymtab(self: *DebugSymbols, lc: *macho.symtab_command) !void {
const dwarf_seg = &self.segments.items[self.dwarf_segment_cmd_index.?];
seg.filesize = aligned_size;
try MachO.copyRangeAllOverlappingAlloc(
try copyRangeAllOverlappingAlloc(
self.base.base.allocator,
self.file,
dwarf_seg.fileoff,
@@ -571,7 +571,7 @@ fn writeStrtab(self: *DebugSymbols, lc: *macho.symtab_command) !void {
const dwarf_seg = &self.segments.items[self.dwarf_segment_cmd_index.?];
seg.filesize = aligned_size;
try MachO.copyRangeAllOverlappingAlloc(
try copyRangeAllOverlappingAlloc(
self.base.base.allocator,
self.file,
dwarf_seg.fileoff,
@@ -601,3 +601,16 @@ fn writeStrtab(self: *DebugSymbols, lc: *macho.symtab_command) !void {
try self.file.pwriteAll(self.strtab.buffer.items, lc.stroff);
}
fn copyRangeAllOverlappingAlloc(
allocator: Allocator,
file: std.fs.File,
in_offset: u64,
out_offset: u64,
len: usize,
) !void {
const buf = try allocator.alloc(u8, len);
defer allocator.free(buf);
const amt = try file.preadAll(buf, in_offset);
try file.pwriteAll(buf[0..amt], out_offset);
}

View File

@@ -220,15 +220,15 @@ fn filterRelocs(
pub fn scanInputSections(self: Object, macho_file: *MachO) !void {
for (self.sections.items) |sect| {
const match = (try macho_file.getOutputSection(sect)) orelse {
const sect_id = (try macho_file.getOutputSection(sect)) orelse {
log.debug(" unhandled section", .{});
continue;
};
const output = macho_file.sections.items(.header)[match];
const output = macho_file.sections.items(.header)[sect_id];
log.debug("mapping '{s},{s}' into output sect({d}, '{s},{s}')", .{
sect.segName(),
sect.sectName(),
match + 1,
sect_id + 1,
output.segName(),
output.sectName(),
});
@@ -236,7 +236,7 @@ pub fn scanInputSections(self: Object, macho_file: *MachO) !void {
}
/// Splits object into atoms assuming one-shot linking mode.
pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32) !void {
pub fn splitIntoAtoms(self: *Object, macho_file: *MachO, object_id: u32) !void {
assert(macho_file.mode == .one_shot);
const tracy = trace(@src());
@@ -249,7 +249,7 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
const in_symtab = self.in_symtab orelse {
for (self.sections.items) |sect, id| {
if (sect.isDebug()) continue;
const match = (try macho_file.getOutputSection(sect)) orelse {
const out_sect_id = (try macho_file.getOutputSection(sect)) orelse {
log.debug(" unhandled section", .{});
continue;
};
@@ -261,7 +261,7 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
try self.symtab.append(gpa, .{
.n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = match + 1,
.n_sect = out_sect_id + 1,
.n_desc = 0,
.n_value = sect.addr,
});
@@ -282,10 +282,10 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
code,
relocs,
&.{},
match,
out_sect_id,
sect,
);
try macho_file.addAtomToSection(atom, match);
try macho_file.addAtomToSection(atom);
}
return;
};
@@ -335,15 +335,15 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
log.debug("splitting section '{s},{s}' into atoms", .{ sect.segName(), sect.sectName() });
// Get matching segment/section in the final artifact.
const match = (try macho_file.getOutputSection(sect)) orelse {
const out_sect_id = (try macho_file.getOutputSection(sect)) orelse {
log.debug(" unhandled section", .{});
continue;
};
log.debug(" output sect({d}, '{s},{s}')", .{
match + 1,
macho_file.sections.items(.header)[match].segName(),
macho_file.sections.items(.header)[match].sectName(),
out_sect_id + 1,
macho_file.sections.items(.header)[out_sect_id].segName(),
macho_file.sections.items(.header)[out_sect_id].sectName(),
});
const cpu_arch = macho_file.base.options.target.cpu.arch;
@@ -376,7 +376,7 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
try self.symtab.append(gpa, .{
.n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = match + 1,
.n_sect = out_sect_id + 1,
.n_desc = 0,
.n_value = sect.addr,
});
@@ -397,10 +397,10 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
atom_code,
relocs,
&.{},
match,
out_sect_id,
sect,
);
try macho_file.addAtomToSection(atom, match);
try macho_file.addAtomToSection(atom);
}
var next_sym_count: usize = 0;
@@ -452,7 +452,7 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
atom_code,
relocs,
sorted_atom_syms.items[1..],
match,
out_sect_id,
sect,
);
@@ -465,7 +465,7 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
try self.symtab.append(gpa, .{
.n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = match + 1,
.n_sect = out_sect_id + 1,
.n_desc = 0,
.n_value = addr,
});
@@ -479,7 +479,7 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
try self.atom_by_index_table.put(gpa, alias, atom);
}
try macho_file.addAtomToSection(atom, match);
try macho_file.addAtomToSection(atom);
}
} else {
// If there is no symbol to refer to this atom, we create
@@ -490,7 +490,7 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
try self.symtab.append(gpa, .{
.n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = match + 1,
.n_sect = out_sect_id + 1,
.n_desc = 0,
.n_value = sect.addr,
});
@@ -506,10 +506,10 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
code,
relocs,
filtered_syms,
match,
out_sect_id,
sect,
);
try macho_file.addAtomToSection(atom, match);
try macho_file.addAtomToSection(atom);
}
}
}
@@ -524,21 +524,21 @@ fn createAtomFromSubsection(
code: ?[]const u8,
relocs: []align(1) const macho.relocation_info,
indexes: []const SymbolAtIndex,
match: u8,
out_sect_id: u8,
sect: macho.section_64,
) !*Atom {
const gpa = macho_file.base.allocator;
const sym = self.symtab.items[sym_index];
const atom = try MachO.createEmptyAtom(gpa, sym_index, size, alignment);
atom.file = object_id;
self.symtab.items[sym_index].n_sect = match + 1;
self.symtab.items[sym_index].n_sect = out_sect_id + 1;
log.debug("creating ATOM(%{d}, '{s}') in sect({d}, '{s},{s}') in object({d})", .{
sym_index,
self.getString(sym.n_strx),
match + 1,
macho_file.sections.items(.header)[match].segName(),
macho_file.sections.items(.header)[match].sectName(),
out_sect_id + 1,
macho_file.sections.items(.header)[out_sect_id].segName(),
macho_file.sections.items(.header)[out_sect_id].sectName(),
object_id,
});
@@ -566,7 +566,7 @@ fn createAtomFromSubsection(
try atom.contained.ensureTotalCapacity(gpa, indexes.len);
for (indexes) |inner_sym_index| {
const inner_sym = &self.symtab.items[inner_sym_index.index];
inner_sym.n_sect = match + 1;
inner_sym.n_sect = out_sect_id + 1;
atom.contained.appendAssumeCapacity(.{
.sym_index = inner_sym_index.index,
.offset = inner_sym.n_value - sym.n_value,

View File

@@ -0,0 +1,287 @@
const Relocation = @This();
const std = @import("std");
const aarch64 = @import("../../arch/aarch64/bits.zig");
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const macho = std.macho;
const math = std.math;
const mem = std.mem;
const meta = std.meta;
const Atom = @import("Atom.zig");
const MachO = @import("../MachO.zig");
const SymbolWithLoc = MachO.SymbolWithLoc;
@"type": u4,
target: SymbolWithLoc,
offset: u32,
addend: i64,
pcrel: bool,
length: u2,
dirty: bool = true,
pub fn fmtType(self: Relocation, target: std.Target) []const u8 {
switch (target.cpu.arch) {
.aarch64 => return @tagName(@intToEnum(macho.reloc_type_arm64, self.@"type")),
.x86_64 => return @tagName(@intToEnum(macho.reloc_type_x86_64, self.@"type")),
else => unreachable,
}
}
pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
switch (macho_file.base.options.target.cpu.arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, self.@"type")) {
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
.ARM64_RELOC_POINTER_TO_GOT,
=> return macho_file.getGotAtomForSymbol(self.target),
else => {},
},
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, self.@"type")) {
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
=> return macho_file.getGotAtomForSymbol(self.target),
else => {},
},
else => unreachable,
}
if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
if (macho_file.getTlvPtrAtomForSymbol(self.target)) |tlv_ptr_atom| return tlv_ptr_atom;
return macho_file.getAtomForSymbol(self.target);
}
pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, base_offset: u64) !void {
const arch = macho_file.base.options.target.cpu.arch;
const source_sym = atom.getSymbol(macho_file);
const source_addr = source_sym.n_value + self.offset;
const target_atom = self.getTargetAtom(macho_file) orelse return;
const target_addr = @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend;
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
source_addr,
target_addr,
macho_file.getSymbolName(self.target),
self.fmtType(macho_file.base.options.target),
});
switch (arch) {
.aarch64 => return self.resolveAarch64(macho_file, source_addr, target_addr, base_offset),
.x86_64 => return self.resolveX8664(macho_file, source_addr, target_addr, base_offset),
else => unreachable,
}
}
fn resolveAarch64(
self: Relocation,
macho_file: *MachO,
source_addr: u64,
target_addr: i64,
base_offset: u64,
) !void {
const rel_type = @intToEnum(macho.reloc_type_arm64, self.@"type");
if (rel_type == .ARM64_RELOC_UNSIGNED) {
var buffer: [@sizeOf(u64)]u8 = undefined;
const code = blk: {
switch (self.length) {
2 => {
mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, @bitCast(u64, target_addr)));
break :blk buffer[0..4];
},
3 => {
mem.writeIntLittle(u64, &buffer, @bitCast(u64, target_addr));
break :blk &buffer;
},
else => unreachable,
}
};
return macho_file.base.file.?.pwriteAll(code, base_offset + self.offset);
}
var buffer: [@sizeOf(u32)]u8 = undefined;
const amt = try macho_file.base.file.?.preadAll(&buffer, base_offset + self.offset);
if (amt != buffer.len) return error.InputOutput;
switch (rel_type) {
.ARM64_RELOC_BRANCH26 => {
const displacement = math.cast(
i28,
@intCast(i64, target_addr) - @intCast(i64, source_addr),
) orelse unreachable; // TODO codegen should never allow for jump larger than i28 displacement
var inst = aarch64.Instruction{
.unconditional_branch_immediate = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.unconditional_branch_immediate,
), &buffer),
};
inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
mem.writeIntLittle(u32, &buffer, inst.toU32());
},
.ARM64_RELOC_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_TLVP_LOAD_PAGE21,
=> {
const source_page = @intCast(i32, source_addr >> 12);
const target_page = @intCast(i32, target_addr >> 12);
const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), &buffer),
};
inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
inst.pc_relative_address.immlo = @truncate(u2, pages);
mem.writeIntLittle(u32, &buffer, inst.toU32());
},
.ARM64_RELOC_PAGEOFF12,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
=> {
const narrowed = @truncate(u12, @intCast(u64, target_addr));
if (isArithmeticOp(&buffer)) {
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
), &buffer),
};
inst.add_subtract_immediate.imm12 = narrowed;
mem.writeIntLittle(u32, &buffer, inst.toU32());
} else {
var inst = aarch64.Instruction{
.load_store_register = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), &buffer),
};
const offset: u12 = blk: {
if (inst.load_store_register.size == 0) {
if (inst.load_store_register.v == 1) {
// 128-bit SIMD is scaled by 16.
break :blk @divExact(narrowed, 16);
}
// Otherwise, 8-bit SIMD or ldrb.
break :blk narrowed;
} else {
const denom: u4 = math.powi(u4, 2, inst.load_store_register.size) catch unreachable;
break :blk @divExact(narrowed, denom);
}
};
inst.load_store_register.offset = offset;
mem.writeIntLittle(u32, &buffer, inst.toU32());
}
},
.ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => {
const RegInfo = struct {
rd: u5,
rn: u5,
size: u2,
};
const reg_info: RegInfo = blk: {
if (isArithmeticOp(&buffer)) {
const inst = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
), &buffer);
break :blk .{
.rd = inst.rd,
.rn = inst.rn,
.size = inst.sf,
};
} else {
const inst = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), &buffer);
break :blk .{
.rd = inst.rt,
.rn = inst.rn,
.size = inst.size,
};
}
};
const narrowed = @truncate(u12, @intCast(u64, target_addr));
var inst = aarch64.Instruction{
.add_subtract_immediate = .{
.rd = reg_info.rd,
.rn = reg_info.rn,
.imm12 = narrowed,
.sh = 0,
.s = 0,
.op = 0,
.sf = @truncate(u1, reg_info.size),
},
};
mem.writeIntLittle(u32, &buffer, inst.toU32());
},
.ARM64_RELOC_POINTER_TO_GOT => {
const result = @intCast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr));
mem.writeIntLittle(i32, &buffer, result);
},
.ARM64_RELOC_SUBTRACTOR => unreachable,
.ARM64_RELOC_ADDEND => unreachable,
.ARM64_RELOC_UNSIGNED => unreachable,
}
try macho_file.base.file.?.pwriteAll(&buffer, base_offset + self.offset);
}
fn resolveX8664(
self: Relocation,
macho_file: *MachO,
source_addr: u64,
target_addr: i64,
base_offset: u64,
) !void {
const rel_type = @intToEnum(macho.reloc_type_x86_64, self.@"type");
var buffer: [@sizeOf(u64)]u8 = undefined;
const code = blk: {
switch (rel_type) {
.X86_64_RELOC_BRANCH,
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
.X86_64_RELOC_TLV,
=> {
const displacement = @intCast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr) - 4);
mem.writeIntLittle(u32, buffer[0..4], @bitCast(u32, displacement));
break :blk buffer[0..4];
},
.X86_64_RELOC_SIGNED,
.X86_64_RELOC_SIGNED_1,
.X86_64_RELOC_SIGNED_2,
.X86_64_RELOC_SIGNED_4,
=> {
const correction: u3 = switch (rel_type) {
.X86_64_RELOC_SIGNED => 0,
.X86_64_RELOC_SIGNED_1 => 1,
.X86_64_RELOC_SIGNED_2 => 2,
.X86_64_RELOC_SIGNED_4 => 4,
else => unreachable,
};
const displacement = @intCast(i32, target_addr - @intCast(i64, source_addr + correction + 4));
mem.writeIntLittle(u32, buffer[0..4], @bitCast(u32, displacement));
break :blk buffer[0..4];
},
.X86_64_RELOC_UNSIGNED => {
switch (self.length) {
2 => {
mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, @bitCast(u64, target_addr)));
break :blk buffer[0..4];
},
3 => {
mem.writeIntLittle(u64, buffer[0..8], @bitCast(u64, target_addr));
break :blk &buffer;
},
else => unreachable,
}
},
.X86_64_RELOC_SUBTRACTOR => unreachable,
}
};
try macho_file.base.file.?.pwriteAll(code, base_offset + self.offset);
}
inline fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @truncate(u5, inst[3]);
return ((group_decode >> 2) == 4);
}

View File

@@ -233,7 +233,7 @@ fn prune(arena: Allocator, alive: std.AutoHashMap(*Atom, void), macho_file: *Mac
if (sym.n_desc != MachO.N_DESC_GCED) continue;
// TODO tombstone
const atom = entry.getAtom(macho_file);
const atom = entry.getAtom(macho_file).?;
const match = sym.n_sect - 1;
removeAtomFromSection(atom, match, macho_file);
_ = try gc_sections.put(match, {});
@@ -245,7 +245,7 @@ fn prune(arena: Allocator, alive: std.AutoHashMap(*Atom, void), macho_file: *Mac
if (sym.n_desc != MachO.N_DESC_GCED) continue;
// TODO tombstone
const atom = entry.getAtom(macho_file);
const atom = entry.getAtom(macho_file).?;
const match = sym.n_sect - 1;
removeAtomFromSection(atom, match, macho_file);
_ = try gc_sections.put(match, {});
@@ -257,7 +257,7 @@ fn prune(arena: Allocator, alive: std.AutoHashMap(*Atom, void), macho_file: *Mac
if (sym.n_desc != MachO.N_DESC_GCED) continue;
// TODO tombstone
const atom = entry.getAtom(macho_file);
const atom = entry.getAtom(macho_file).?;
const match = sym.n_sect - 1;
removeAtomFromSection(atom, match, macho_file);
_ = try gc_sections.put(match, {});

1953
src/link/MachO/zld.zig Normal file

File diff suppressed because it is too large Load Diff