Files
zig/src/link/MachO/zld.zig
2023-08-29 11:39:34 +02:00

3139 lines
123 KiB
Zig

const std = @import("std");
const build_options = @import("build_options");
const assert = std.debug.assert;
const dwarf = std.dwarf;
const fs = std.fs;
const log = std.log.scoped(.link);
const macho = std.macho;
const math = std.math;
const mem = std.mem;
const aarch64 = @import("../../arch/aarch64/bits.zig");
const calcUuid = @import("uuid.zig").calcUuid;
const dead_strip = @import("dead_strip.zig");
const eh_frame = @import("eh_frame.zig");
const fat = @import("fat.zig");
const link = @import("../../link.zig");
const load_commands = @import("load_commands.zig");
const stubs = @import("stubs.zig");
const thunks = @import("thunks.zig");
const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
const Cache = std.Build.Cache;
const CodeSignature = @import("CodeSignature.zig");
const Compilation = @import("../../Compilation.zig");
const DwarfInfo = @import("DwarfInfo.zig");
const Dylib = @import("Dylib.zig");
const MachO = @import("../MachO.zig");
const Md5 = std.crypto.hash.Md5;
const LibStub = @import("../tapi.zig").LibStub;
const Object = @import("Object.zig");
const Section = MachO.Section;
const StringTable = @import("../strtab.zig").StringTable;
const SymbolWithLoc = MachO.SymbolWithLoc;
const TableSection = @import("../table_section.zig").TableSection;
const Trie = @import("Trie.zig");
const UnwindInfo = @import("UnwindInfo.zig");
const Bind = @import("dyld_info/bind.zig").Bind(*const Zld, SymbolWithLoc);
const LazyBind = @import("dyld_info/bind.zig").LazyBind(*const Zld, SymbolWithLoc);
const Rebase = @import("dyld_info/Rebase.zig");
pub const Zld = struct {
gpa: Allocator,
file: fs.File,
options: *const link.Options,
dyld_info_cmd: macho.dyld_info_command = .{},
symtab_cmd: macho.symtab_command = .{},
dysymtab_cmd: macho.dysymtab_command = .{},
function_starts_cmd: macho.linkedit_data_command = .{ .cmd = .FUNCTION_STARTS },
data_in_code_cmd: macho.linkedit_data_command = .{ .cmd = .DATA_IN_CODE },
uuid_cmd: macho.uuid_command = .{
.uuid = [_]u8{0} ** 16,
},
codesig_cmd: macho.linkedit_data_command = .{ .cmd = .CODE_SIGNATURE },
objects: std.ArrayListUnmanaged(Object) = .{},
archives: std.ArrayListUnmanaged(Archive) = .{},
dylibs: std.ArrayListUnmanaged(Dylib) = .{},
dylibs_map: std.StringHashMapUnmanaged(u16) = .{},
referenced_dylibs: std.AutoArrayHashMapUnmanaged(u16, void) = .{},
segments: std.ArrayListUnmanaged(macho.segment_command_64) = .{},
sections: std.MultiArrayList(Section) = .{},
text_section_index: ?u8 = null,
got_section_index: ?u8 = null,
tlv_ptr_section_index: ?u8 = null,
stubs_section_index: ?u8 = null,
stub_helper_section_index: ?u8 = null,
la_symbol_ptr_section_index: ?u8 = null,
locals: std.ArrayListUnmanaged(macho.nlist_64) = .{},
globals: std.ArrayListUnmanaged(SymbolWithLoc) = .{},
resolver: std.StringHashMapUnmanaged(u32) = .{},
unresolved: std.AutoArrayHashMapUnmanaged(u32, void) = .{},
entry_index: ?u32 = null,
mh_execute_header_index: ?u32 = null,
dso_handle_index: ?u32 = null,
dyld_stub_binder_index: ?u32 = null,
dyld_private_atom_index: ?Atom.Index = null,
strtab: StringTable(.strtab) = .{},
tlv_ptr_table: TableSection(SymbolWithLoc) = .{},
got_table: TableSection(SymbolWithLoc) = .{},
stubs_table: TableSection(SymbolWithLoc) = .{},
thunk_table: std.AutoHashMapUnmanaged(Atom.Index, thunks.Thunk.Index) = .{},
thunks: std.ArrayListUnmanaged(thunks.Thunk) = .{},
atoms: std.ArrayListUnmanaged(Atom) = .{},
pub fn addAtomToSection(self: *Zld, atom_index: Atom.Index) void {
const atom = self.getAtomPtr(atom_index);
const sym = self.getSymbol(atom.getSymbolWithLoc());
var section = self.sections.get(sym.n_sect - 1);
if (section.header.size > 0) {
const last_atom = self.getAtomPtr(section.last_atom_index.?);
last_atom.next_index = atom_index;
atom.prev_index = section.last_atom_index;
} else {
section.first_atom_index = atom_index;
}
section.last_atom_index = atom_index;
section.header.size += atom.size;
self.sections.set(sym.n_sect - 1, section);
}
pub fn createEmptyAtom(self: *Zld, sym_index: u32, size: u64, alignment: u32) !Atom.Index {
const gpa = self.gpa;
const index = @as(Atom.Index, @intCast(self.atoms.items.len));
const atom = try self.atoms.addOne(gpa);
atom.* = .{
.sym_index = 0,
.inner_sym_index = 0,
.inner_nsyms_trailing = 0,
.file = 0,
.size = 0,
.alignment = 0,
.prev_index = null,
.next_index = null,
};
atom.sym_index = sym_index;
atom.size = size;
atom.alignment = alignment;
log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, index });
return index;
}
fn createDyldPrivateAtom(self: *Zld) !void {
const sym_index = try self.allocateSymbol();
const atom_index = try self.createEmptyAtom(sym_index, @sizeOf(u64), 3);
const sym = self.getSymbolPtr(.{ .sym_index = sym_index });
sym.n_type = macho.N_SECT;
const sect_id = self.getSectionByName("__DATA", "__data") orelse
try self.initSection("__DATA", "__data", .{});
sym.n_sect = sect_id + 1;
self.dyld_private_atom_index = atom_index;
self.addAtomToSection(atom_index);
}
fn createTentativeDefAtoms(self: *Zld) !void {
const gpa = self.gpa;
for (self.globals.items) |global| {
const sym = self.getSymbolPtr(global);
if (!sym.tentative()) continue;
if (sym.n_desc == MachO.N_DEAD) continue;
log.debug("creating tentative definition for ATOM(%{d}, '{s}') in object({?})", .{
global.sym_index, self.getSymbolName(global), global.file,
});
// Convert any tentative definition into a regular symbol and allocate
// text blocks for each tentative definition.
const size = sym.n_value;
const alignment = (sym.n_desc >> 8) & 0x0f;
const sect_id = self.getSectionByName("__DATA", "__bss") orelse
try self.initSection("__DATA", "__bss", .{ .flags = macho.S_ZEROFILL });
sym.* = .{
.n_strx = sym.n_strx,
.n_type = macho.N_SECT | macho.N_EXT,
.n_sect = sect_id + 1,
.n_desc = 0,
.n_value = 0,
};
const atom_index = try self.createEmptyAtom(global.sym_index, size, alignment);
const atom = self.getAtomPtr(atom_index);
atom.file = global.file;
self.addAtomToSection(atom_index);
assert(global.getFile() != null);
const object = &self.objects.items[global.getFile().?];
try object.atoms.append(gpa, atom_index);
object.atom_by_index_table[global.sym_index] = atom_index;
}
}
fn addUndefined(self: *Zld, name: []const u8) !void {
const sym_index = try self.allocateSymbol();
const sym_loc = SymbolWithLoc{ .sym_index = sym_index };
const sym = self.getSymbolPtr(sym_loc);
sym.n_strx = try self.strtab.insert(self.gpa, name);
sym.n_type = macho.N_UNDF;
const global_index = try self.addGlobal(sym_loc);
try self.resolver.putNoClobber(self.gpa, name, global_index);
try self.unresolved.putNoClobber(self.gpa, global_index, {});
}
fn resolveSymbols(self: *Zld) !void {
// We add the specified entrypoint as the first unresolved symbols so that
// we search for it in libraries should there be no object files specified
// on the linker line.
if (self.options.output_mode == .Exe) {
const entry_name = self.options.entry orelse load_commands.default_entry_point;
try self.addUndefined(entry_name);
}
// Force resolution of any symbols requested by the user.
for (self.options.force_undefined_symbols.keys()) |sym_name| {
try self.addUndefined(sym_name);
}
for (self.objects.items, 0..) |_, object_id| {
try self.resolveSymbolsInObject(@as(u32, @intCast(object_id)));
}
try self.resolveSymbolsInArchives();
// Finally, force resolution of dyld_stub_binder if there are imports
// requested.
if (self.unresolved.count() > 0) {
try self.addUndefined("dyld_stub_binder");
}
try self.resolveSymbolsInDylibs();
self.dyld_stub_binder_index = self.resolver.get("dyld_stub_binder");
try self.createMhExecuteHeaderSymbol();
try self.createDsoHandleSymbol();
try self.resolveSymbolsAtLoading();
}
fn resolveSymbolsInObject(self: *Zld, object_id: u32) !void {
const object = &self.objects.items[object_id];
const in_symtab = object.in_symtab orelse return;
log.debug("resolving symbols in '{s}'", .{object.name});
var sym_index: u32 = 0;
while (sym_index < in_symtab.len) : (sym_index += 1) {
const sym = &object.symtab[sym_index];
const sym_name = object.getSymbolName(sym_index);
if (sym.stab()) {
log.err("unhandled symbol type: stab", .{});
log.err(" symbol '{s}'", .{sym_name});
log.err(" first definition in '{s}'", .{object.name});
return error.UnhandledSymbolType;
}
if (sym.indr()) {
log.err("unhandled symbol type: indirect", .{});
log.err(" symbol '{s}'", .{sym_name});
log.err(" first definition in '{s}'", .{object.name});
return error.UnhandledSymbolType;
}
if (sym.abs()) {
log.err("unhandled symbol type: absolute", .{});
log.err(" symbol '{s}'", .{sym_name});
log.err(" first definition in '{s}'", .{object.name});
return error.UnhandledSymbolType;
}
if (sym.sect() and !sym.ext()) {
log.debug("symbol '{s}' local to object {s}; skipping...", .{
sym_name,
object.name,
});
continue;
}
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = object_id + 1 };
const global_index = self.resolver.get(sym_name) orelse {
const global_index = try self.addGlobal(sym_loc);
try self.resolver.putNoClobber(self.gpa, sym_name, global_index);
if (sym.undf() and !sym.tentative()) {
try self.unresolved.putNoClobber(self.gpa, global_index, {});
}
continue;
};
const global = &self.globals.items[global_index];
const global_sym = self.getSymbol(global.*);
// Cases to consider: sym vs global_sym
// 1. strong(sym) and strong(global_sym) => error
// 2. strong(sym) and weak(global_sym) => sym
// 3. strong(sym) and tentative(global_sym) => sym
// 4. strong(sym) and undf(global_sym) => sym
// 5. weak(sym) and strong(global_sym) => global_sym
// 6. weak(sym) and tentative(global_sym) => sym
// 7. weak(sym) and undf(global_sym) => sym
// 8. tentative(sym) and strong(global_sym) => global_sym
// 9. tentative(sym) and weak(global_sym) => global_sym
// 10. tentative(sym) and tentative(global_sym) => pick larger
// 11. tentative(sym) and undf(global_sym) => sym
// 12. undf(sym) and * => global_sym
//
// Reduces to:
// 1. strong(sym) and strong(global_sym) => error
// 2. * and strong(global_sym) => global_sym
// 3. weak(sym) and weak(global_sym) => global_sym
// 4. tentative(sym) and tentative(global_sym) => pick larger
// 5. undf(sym) and * => global_sym
// 6. else => sym
const sym_is_strong = sym.sect() and !(sym.weakDef() or sym.pext());
const global_is_strong = global_sym.sect() and !(global_sym.weakDef() or global_sym.pext());
const sym_is_weak = sym.sect() and (sym.weakDef() or sym.pext());
const global_is_weak = global_sym.sect() and (global_sym.weakDef() or global_sym.pext());
if (sym_is_strong and global_is_strong) {
log.err("symbol '{s}' defined multiple times", .{sym_name});
if (global.getFile()) |file| {
log.err(" first definition in '{s}'", .{self.objects.items[file].name});
}
log.err(" next definition in '{s}'", .{self.objects.items[object_id].name});
return error.MultipleSymbolDefinitions;
}
const update_global = blk: {
if (global_is_strong) break :blk false;
if (sym_is_weak and global_is_weak) break :blk false;
if (sym.tentative() and global_sym.tentative()) {
if (global_sym.n_value >= sym.n_value) break :blk false;
}
if (sym.undf() and !sym.tentative()) break :blk false;
break :blk true;
};
if (update_global) {
if (global.getFile()) |file| {
const global_object = &self.objects.items[file];
global_object.globals_lookup[global.sym_index] = global_index;
}
_ = self.unresolved.swapRemove(self.resolver.get(sym_name).?);
global.* = sym_loc;
} else {
object.globals_lookup[sym_index] = global_index;
}
}
}
fn resolveSymbolsInArchives(self: *Zld) !void {
if (self.archives.items.len == 0) return;
const gpa = self.gpa;
var next_sym: usize = 0;
loop: while (next_sym < self.unresolved.count()) {
const global = self.globals.items[self.unresolved.keys()[next_sym]];
const sym_name = self.getSymbolName(global);
for (self.archives.items) |archive| {
// Check if the entry exists in a static archive.
const offsets = archive.toc.get(sym_name) orelse {
// No hit.
continue;
};
assert(offsets.items.len > 0);
const object_id = @as(u16, @intCast(self.objects.items.len));
const object = try archive.parseObject(gpa, offsets.items[0]);
try self.objects.append(gpa, object);
try self.resolveSymbolsInObject(object_id);
continue :loop;
}
next_sym += 1;
}
}
fn resolveSymbolsInDylibs(self: *Zld) !void {
if (self.dylibs.items.len == 0) return;
var next_sym: usize = 0;
loop: while (next_sym < self.unresolved.count()) {
const global_index = self.unresolved.keys()[next_sym];
const global = self.globals.items[global_index];
const sym = self.getSymbolPtr(global);
const sym_name = self.getSymbolName(global);
for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @as(u16, @intCast(id));
if (!self.referenced_dylibs.contains(dylib_id)) {
try self.referenced_dylibs.putNoClobber(self.gpa, dylib_id, {});
}
const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable;
sym.n_type |= macho.N_EXT;
sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER;
if (dylib.weak) {
sym.n_desc |= macho.N_WEAK_REF;
}
assert(self.unresolved.swapRemove(global_index));
continue :loop;
}
next_sym += 1;
}
}
fn resolveSymbolsAtLoading(self: *Zld) !void {
const is_lib = self.options.output_mode == .Lib;
const is_dyn_lib = self.options.link_mode == .Dynamic and is_lib;
const allow_undef = is_dyn_lib and (self.options.allow_shlib_undefined orelse false);
var next_sym: usize = 0;
while (next_sym < self.unresolved.count()) {
const global_index = self.unresolved.keys()[next_sym];
const global = self.globals.items[global_index];
const sym = self.getSymbolPtr(global);
if (sym.discarded()) {
sym.* = .{
.n_strx = 0,
.n_type = macho.N_UNDF,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
};
_ = self.unresolved.swapRemove(global_index);
continue;
} else if (allow_undef) {
const n_desc = @as(
u16,
@bitCast(macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @as(i16, @intCast(macho.N_SYMBOL_RESOLVER))),
);
sym.n_type = macho.N_EXT;
sym.n_desc = n_desc;
_ = self.unresolved.swapRemove(global_index);
continue;
}
next_sym += 1;
}
}
fn createMhExecuteHeaderSymbol(self: *Zld) !void {
if (self.options.output_mode != .Exe) return;
if (self.resolver.get("__mh_execute_header")) |global_index| {
const global = self.globals.items[global_index];
const sym = self.getSymbol(global);
self.mh_execute_header_index = global_index;
if (!sym.undf() and !(sym.pext() or sym.weakDef())) return;
}
const gpa = self.gpa;
const sym_index = try self.allocateSymbol();
const sym_loc = SymbolWithLoc{ .sym_index = sym_index };
const sym = self.getSymbolPtr(sym_loc);
sym.n_strx = try self.strtab.insert(gpa, "__mh_execute_header");
sym.n_type = macho.N_SECT | macho.N_EXT;
sym.n_desc = macho.REFERENCED_DYNAMICALLY;
if (self.resolver.get("__mh_execute_header")) |global_index| {
const global = &self.globals.items[global_index];
const global_object = &self.objects.items[global.getFile().?];
global_object.globals_lookup[global.sym_index] = global_index;
global.* = sym_loc;
self.mh_execute_header_index = global_index;
} else {
self.mh_execute_header_index = try self.addGlobal(sym_loc);
}
}
fn createDsoHandleSymbol(self: *Zld) !void {
const global_index = self.resolver.get("___dso_handle") orelse return;
const global = &self.globals.items[global_index];
self.dso_handle_index = global_index;
if (!self.getSymbol(global.*).undf()) return;
const gpa = self.gpa;
const sym_index = try self.allocateSymbol();
const sym_loc = SymbolWithLoc{ .sym_index = sym_index };
const sym = self.getSymbolPtr(sym_loc);
sym.n_strx = try self.strtab.insert(gpa, "___dso_handle");
sym.n_type = macho.N_SECT | macho.N_EXT;
sym.n_desc = macho.N_WEAK_DEF;
const global_object = &self.objects.items[global.getFile().?];
global_object.globals_lookup[global.sym_index] = global_index;
_ = self.unresolved.swapRemove(self.resolver.get("___dso_handle").?);
global.* = sym_loc;
}
pub fn deinit(self: *Zld) void {
const gpa = self.gpa;
self.tlv_ptr_table.deinit(gpa);
self.got_table.deinit(gpa);
self.stubs_table.deinit(gpa);
self.thunk_table.deinit(gpa);
for (self.thunks.items) |*thunk| {
thunk.deinit(gpa);
}
self.thunks.deinit(gpa);
self.strtab.deinit(gpa);
self.locals.deinit(gpa);
self.globals.deinit(gpa);
self.resolver.deinit(gpa);
self.unresolved.deinit(gpa);
for (self.objects.items) |*object| {
object.deinit(gpa);
}
self.objects.deinit(gpa);
for (self.archives.items) |*archive| {
archive.deinit(gpa);
}
self.archives.deinit(gpa);
for (self.dylibs.items) |*dylib| {
dylib.deinit(gpa);
}
self.dylibs.deinit(gpa);
self.dylibs_map.deinit(gpa);
self.referenced_dylibs.deinit(gpa);
self.segments.deinit(gpa);
self.sections.deinit(gpa);
self.atoms.deinit(gpa);
}
fn createSegments(self: *Zld) !void {
const pagezero_vmsize = self.options.pagezero_size orelse MachO.default_pagezero_vmsize;
const page_size = MachO.getPageSize(self.options.target.cpu.arch);
const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, page_size);
if (self.options.output_mode != .Lib and aligned_pagezero_vmsize > 0) {
if (aligned_pagezero_vmsize != pagezero_vmsize) {
log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize});
log.warn(" rounding down to 0x{x}", .{aligned_pagezero_vmsize});
}
try self.segments.append(self.gpa, .{
.cmdsize = @sizeOf(macho.segment_command_64),
.segname = makeStaticString("__PAGEZERO"),
.vmsize = aligned_pagezero_vmsize,
});
}
// __TEXT segment is non-optional
{
const protection = MachO.getSegmentMemoryProtection("__TEXT");
try self.segments.append(self.gpa, .{
.cmdsize = @sizeOf(macho.segment_command_64),
.segname = makeStaticString("__TEXT"),
.maxprot = protection,
.initprot = protection,
});
}
for (self.sections.items(.header), 0..) |header, sect_id| {
if (header.size == 0) continue; // empty section
const segname = header.segName();
const segment_id = self.getSegmentByName(segname) orelse blk: {
log.debug("creating segment '{s}'", .{segname});
const segment_id = @as(u8, @intCast(self.segments.items.len));
const protection = MachO.getSegmentMemoryProtection(segname);
try self.segments.append(self.gpa, .{
.cmdsize = @sizeOf(macho.segment_command_64),
.segname = makeStaticString(segname),
.maxprot = protection,
.initprot = protection,
});
break :blk segment_id;
};
const segment = &self.segments.items[segment_id];
segment.cmdsize += @sizeOf(macho.section_64);
segment.nsects += 1;
self.sections.items(.segment_index)[sect_id] = segment_id;
}
// __LINKEDIT always comes last
{
const protection = MachO.getSegmentMemoryProtection("__LINKEDIT");
try self.segments.append(self.gpa, .{
.cmdsize = @sizeOf(macho.segment_command_64),
.segname = makeStaticString("__LINKEDIT"),
.maxprot = protection,
.initprot = protection,
});
}
}
pub fn allocateSymbol(self: *Zld) !u32 {
try self.locals.ensureUnusedCapacity(self.gpa, 1);
log.debug(" (allocating symbol index {d})", .{self.locals.items.len});
const index = @as(u32, @intCast(self.locals.items.len));
_ = self.locals.addOneAssumeCapacity();
self.locals.items[index] = .{
.n_strx = 0,
.n_type = 0,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
};
return index;
}
fn addGlobal(self: *Zld, sym_loc: SymbolWithLoc) !u32 {
const global_index = @as(u32, @intCast(self.globals.items.len));
try self.globals.append(self.gpa, sym_loc);
return global_index;
}
pub fn addGotEntry(self: *Zld, target: SymbolWithLoc) !void {
if (self.got_table.lookup.contains(target)) return;
_ = try self.got_table.allocateEntry(self.gpa, target);
if (self.got_section_index == null) {
self.got_section_index = try self.initSection("__DATA_CONST", "__got", .{
.flags = macho.S_NON_LAZY_SYMBOL_POINTERS,
});
}
}
pub fn addTlvPtrEntry(self: *Zld, target: SymbolWithLoc) !void {
if (self.tlv_ptr_table.lookup.contains(target)) return;
_ = try self.tlv_ptr_table.allocateEntry(self.gpa, target);
if (self.tlv_ptr_section_index == null) {
self.tlv_ptr_section_index = try self.initSection("__DATA", "__thread_ptrs", .{
.flags = macho.S_THREAD_LOCAL_VARIABLE_POINTERS,
});
}
}
pub fn addStubEntry(self: *Zld, target: SymbolWithLoc) !void {
if (self.stubs_table.lookup.contains(target)) return;
_ = try self.stubs_table.allocateEntry(self.gpa, target);
if (self.stubs_section_index == null) {
self.stubs_section_index = try self.initSection("__TEXT", "__stubs", .{
.flags = macho.S_SYMBOL_STUBS |
macho.S_ATTR_PURE_INSTRUCTIONS |
macho.S_ATTR_SOME_INSTRUCTIONS,
.reserved2 = stubs.stubSize(self.options.target.cpu.arch),
});
self.stub_helper_section_index = try self.initSection("__TEXT", "__stub_helper", .{
.flags = macho.S_REGULAR |
macho.S_ATTR_PURE_INSTRUCTIONS |
macho.S_ATTR_SOME_INSTRUCTIONS,
});
self.la_symbol_ptr_section_index = try self.initSection("__DATA", "__la_symbol_ptr", .{
.flags = macho.S_LAZY_SYMBOL_POINTERS,
});
}
}
fn allocateSpecialSymbols(self: *Zld) !void {
for (&[_]?u32{
self.dso_handle_index,
self.mh_execute_header_index,
}) |maybe_index| {
const global_index = maybe_index orelse continue;
const global = self.globals.items[global_index];
if (global.getFile() != null) continue;
const name = self.getSymbolName(global);
const sym = self.getSymbolPtr(global);
const segment_index = self.getSegmentByName("__TEXT").?;
const seg = self.segments.items[segment_index];
sym.n_sect = 1;
sym.n_value = seg.vmaddr;
log.debug("allocating {s} at the start of {s}", .{
name,
seg.segName(),
});
}
}
fn writeAtoms(self: *Zld) !void {
const gpa = self.gpa;
const slice = self.sections.slice();
for (slice.items(.first_atom_index), 0..) |first_atom_index, sect_id| {
const header = slice.items(.header)[sect_id];
if (header.isZerofill()) continue;
var atom_index = first_atom_index orelse continue;
var buffer = try gpa.alloc(u8, math.cast(usize, header.size) orelse return error.Overflow);
defer gpa.free(buffer);
@memset(buffer, 0); // TODO with NOPs
log.debug("writing atoms in {s},{s}", .{ header.segName(), header.sectName() });
while (true) {
const atom = self.getAtom(atom_index);
if (atom.getFile()) |file| {
const this_sym = self.getSymbol(atom.getSymbolWithLoc());
const padding_size: usize = if (atom.next_index) |next_index| blk: {
const next_sym = self.getSymbol(self.getAtom(next_index).getSymbolWithLoc());
const size = next_sym.n_value - (this_sym.n_value + atom.size);
break :blk math.cast(usize, size) orelse return error.Overflow;
} else 0;
log.debug(" (adding ATOM(%{d}, '{s}') from object({d}) to buffer)", .{
atom.sym_index,
self.getSymbolName(atom.getSymbolWithLoc()),
file,
});
if (padding_size > 0) {
log.debug(" (with padding {x})", .{padding_size});
}
const offset = this_sym.n_value - header.addr;
log.debug(" (at offset 0x{x})", .{offset});
const code = Atom.getAtomCode(self, atom_index);
const relocs = Atom.getAtomRelocs(self, atom_index);
const size = math.cast(usize, atom.size) orelse return error.Overflow;
@memcpy(buffer[offset .. offset + size], code);
try Atom.resolveRelocs(
self,
atom_index,
buffer[offset..][0..size],
relocs,
);
}
if (atom.next_index) |next_index| {
atom_index = next_index;
} else break;
}
log.debug(" (writing at file offset 0x{x})", .{header.offset});
try self.file.pwriteAll(buffer, header.offset);
}
}
fn writeDyldPrivateAtom(self: *Zld) !void {
const atom_index = self.dyld_private_atom_index orelse return;
const atom = self.getAtom(atom_index);
const sym = self.getSymbol(atom.getSymbolWithLoc());
const sect_id = self.getSectionByName("__DATA", "__data").?;
const header = self.sections.items(.header)[sect_id];
const offset = sym.n_value - header.addr + header.offset;
log.debug("writing __dyld_private at offset 0x{x}", .{offset});
const buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
try self.file.pwriteAll(&buffer, offset);
}
fn writeThunks(self: *Zld) !void {
assert(self.requiresThunks());
const gpa = self.gpa;
const sect_id = self.text_section_index orelse return;
const header = self.sections.items(.header)[sect_id];
for (self.thunks.items, 0..) |*thunk, i| {
if (thunk.getSize() == 0) continue;
var buffer = try std.ArrayList(u8).initCapacity(gpa, thunk.getSize());
defer buffer.deinit();
try thunks.writeThunkCode(self, thunk, buffer.writer());
const thunk_atom = self.getAtom(thunk.getStartAtomIndex());
const thunk_sym = self.getSymbol(thunk_atom.getSymbolWithLoc());
const offset = thunk_sym.n_value - header.addr + header.offset;
log.debug("writing thunk({d}) at offset 0x{x}", .{ i, offset });
try self.file.pwriteAll(buffer.items, offset);
}
}
fn writePointerEntries(self: *Zld, sect_id: u8, table: anytype) !void {
const header = self.sections.items(.header)[sect_id];
var buffer = try std.ArrayList(u8).initCapacity(self.gpa, header.size);
defer buffer.deinit();
for (table.entries.items) |entry| {
const sym = self.getSymbol(entry);
buffer.writer().writeIntLittle(u64, sym.n_value) catch unreachable;
}
log.debug("writing __DATA_CONST,__got contents at file offset 0x{x}", .{header.offset});
try self.file.pwriteAll(buffer.items, header.offset);
}
fn writeStubs(self: *Zld) !void {
const gpa = self.gpa;
const cpu_arch = self.options.target.cpu.arch;
const stubs_header = self.sections.items(.header)[self.stubs_section_index.?];
const la_symbol_ptr_header = self.sections.items(.header)[self.la_symbol_ptr_section_index.?];
var buffer = try std.ArrayList(u8).initCapacity(gpa, stubs_header.size);
defer buffer.deinit();
for (0..self.stubs_table.count()) |index| {
try stubs.writeStubCode(.{
.cpu_arch = cpu_arch,
.source_addr = stubs_header.addr + stubs.stubSize(cpu_arch) * index,
.target_addr = la_symbol_ptr_header.addr + index * @sizeOf(u64),
}, buffer.writer());
}
log.debug("writing __TEXT,__stubs contents at file offset 0x{x}", .{stubs_header.offset});
try self.file.pwriteAll(buffer.items, stubs_header.offset);
}
fn writeStubHelpers(self: *Zld) !void {
const gpa = self.gpa;
const cpu_arch = self.options.target.cpu.arch;
const stub_helper_header = self.sections.items(.header)[self.stub_helper_section_index.?];
var buffer = try std.ArrayList(u8).initCapacity(gpa, stub_helper_header.size);
defer buffer.deinit();
{
const dyld_private_addr = blk: {
const atom = self.getAtom(self.dyld_private_atom_index.?);
const sym = self.getSymbol(atom.getSymbolWithLoc());
break :blk sym.n_value;
};
const dyld_stub_binder_got_addr = blk: {
const sym_loc = self.globals.items[self.dyld_stub_binder_index.?];
break :blk self.getGotEntryAddress(sym_loc).?;
};
try stubs.writeStubHelperPreambleCode(.{
.cpu_arch = cpu_arch,
.source_addr = stub_helper_header.addr,
.dyld_private_addr = dyld_private_addr,
.dyld_stub_binder_got_addr = dyld_stub_binder_got_addr,
}, buffer.writer());
}
for (0..self.stubs_table.count()) |index| {
const source_addr = stub_helper_header.addr + stubs.stubHelperPreambleSize(cpu_arch) +
stubs.stubHelperSize(cpu_arch) * index;
try stubs.writeStubHelperCode(.{
.cpu_arch = cpu_arch,
.source_addr = source_addr,
.target_addr = stub_helper_header.addr,
}, buffer.writer());
}
log.debug("writing __TEXT,__stub_helper contents at file offset 0x{x}", .{
stub_helper_header.offset,
});
try self.file.pwriteAll(buffer.items, stub_helper_header.offset);
}
fn writeLaSymbolPtrs(self: *Zld) !void {
const gpa = self.gpa;
const cpu_arch = self.options.target.cpu.arch;
const la_symbol_ptr_header = self.sections.items(.header)[self.la_symbol_ptr_section_index.?];
const stub_helper_header = self.sections.items(.header)[self.stub_helper_section_index.?];
var buffer = try std.ArrayList(u8).initCapacity(gpa, la_symbol_ptr_header.size);
defer buffer.deinit();
for (0..self.stubs_table.count()) |index| {
const target_addr = stub_helper_header.addr + stubs.stubHelperPreambleSize(cpu_arch) +
stubs.stubHelperSize(cpu_arch) * index;
buffer.writer().writeIntLittle(u64, target_addr) catch unreachable;
}
log.debug("writing __DATA,__la_symbol_ptr contents at file offset 0x{x}", .{
la_symbol_ptr_header.offset,
});
try self.file.pwriteAll(buffer.items, la_symbol_ptr_header.offset);
}
fn pruneAndSortSections(self: *Zld) !void {
const Entry = struct {
index: u8,
pub fn lessThan(zld: *Zld, lhs: @This(), rhs: @This()) bool {
const lhs_header = zld.sections.items(.header)[lhs.index];
const rhs_header = zld.sections.items(.header)[rhs.index];
return MachO.getSectionPrecedence(lhs_header) < MachO.getSectionPrecedence(rhs_header);
}
};
const gpa = self.gpa;
var entries = try std.ArrayList(Entry).initCapacity(gpa, self.sections.slice().len);
defer entries.deinit();
for (0..self.sections.slice().len) |index| {
const section = self.sections.get(index);
if (section.header.size == 0) {
log.debug("pruning section {s},{s} {?d}", .{
section.header.segName(),
section.header.sectName(),
section.first_atom_index,
});
for (&[_]*?u8{
&self.text_section_index,
&self.got_section_index,
&self.tlv_ptr_section_index,
&self.stubs_section_index,
&self.stub_helper_section_index,
&self.la_symbol_ptr_section_index,
}) |maybe_index| {
if (maybe_index.* != null and maybe_index.*.? == index) {
maybe_index.* = null;
}
}
continue;
}
entries.appendAssumeCapacity(.{ .index = @intCast(index) });
}
mem.sort(Entry, entries.items, self, Entry.lessThan);
var slice = self.sections.toOwnedSlice();
defer slice.deinit(gpa);
const backlinks = try gpa.alloc(u8, slice.len);
defer gpa.free(backlinks);
for (entries.items, 0..) |entry, i| {
backlinks[entry.index] = @as(u8, @intCast(i));
}
try self.sections.ensureTotalCapacity(gpa, entries.items.len);
for (entries.items) |entry| {
self.sections.appendAssumeCapacity(slice.get(entry.index));
}
for (&[_]*?u8{
&self.text_section_index,
&self.got_section_index,
&self.tlv_ptr_section_index,
&self.stubs_section_index,
&self.stub_helper_section_index,
&self.la_symbol_ptr_section_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
}
}
}
fn calcSectionSizes(self: *Zld) !void {
const slice = self.sections.slice();
for (slice.items(.header), 0..) |*header, sect_id| {
if (header.size == 0) continue;
if (self.text_section_index) |txt| {
if (txt == sect_id and self.requiresThunks()) continue;
}
var atom_index = slice.items(.first_atom_index)[sect_id] orelse continue;
header.size = 0;
header.@"align" = 0;
while (true) {
const atom = self.getAtom(atom_index);
const atom_alignment = try math.powi(u32, 2, atom.alignment);
const atom_offset = mem.alignForward(u64, header.size, atom_alignment);
const padding = atom_offset - header.size;
const sym = self.getSymbolPtr(atom.getSymbolWithLoc());
sym.n_value = atom_offset;
header.size += padding + atom.size;
header.@"align" = @max(header.@"align", atom.alignment);
if (atom.next_index) |next_index| {
atom_index = next_index;
} else break;
}
}
if (self.text_section_index != null and self.requiresThunks()) {
// Create jump/branch range extenders if needed.
try thunks.createThunks(self, self.text_section_index.?);
}
// Update offsets of all symbols contained within each Atom.
// We need to do this since our unwind info synthesiser relies on
// traversing the symbols when synthesising unwind info and DWARF CFI records.
for (slice.items(.first_atom_index)) |first_atom_index| {
var atom_index = first_atom_index orelse continue;
while (true) {
const atom = self.getAtom(atom_index);
const sym = self.getSymbol(atom.getSymbolWithLoc());
if (atom.getFile() != null) {
// Update each symbol contained within the atom
var it = Atom.getInnerSymbolsIterator(self, atom_index);
while (it.next()) |sym_loc| {
const inner_sym = self.getSymbolPtr(sym_loc);
inner_sym.n_value = sym.n_value + Atom.calcInnerSymbolOffset(
self,
atom_index,
sym_loc.sym_index,
);
}
// If there is a section alias, update it now too
if (Atom.getSectionAlias(self, atom_index)) |sym_loc| {
const alias = self.getSymbolPtr(sym_loc);
alias.n_value = sym.n_value;
}
}
if (atom.next_index) |next_index| {
atom_index = next_index;
} else break;
}
}
if (self.got_section_index) |sect_id| {
const header = &self.sections.items(.header)[sect_id];
header.size = self.got_table.count() * @sizeOf(u64);
header.@"align" = 3;
}
if (self.tlv_ptr_section_index) |sect_id| {
const header = &self.sections.items(.header)[sect_id];
header.size = self.tlv_ptr_table.count() * @sizeOf(u64);
header.@"align" = 3;
}
const cpu_arch = self.options.target.cpu.arch;
if (self.stubs_section_index) |sect_id| {
const header = &self.sections.items(.header)[sect_id];
header.size = self.stubs_table.count() * stubs.stubSize(cpu_arch);
header.@"align" = stubs.stubAlignment(cpu_arch);
}
if (self.stub_helper_section_index) |sect_id| {
const header = &self.sections.items(.header)[sect_id];
header.size = self.stubs_table.count() * stubs.stubHelperSize(cpu_arch) +
stubs.stubHelperPreambleSize(cpu_arch);
header.@"align" = stubs.stubAlignment(cpu_arch);
}
if (self.la_symbol_ptr_section_index) |sect_id| {
const header = &self.sections.items(.header)[sect_id];
header.size = self.stubs_table.count() * @sizeOf(u64);
header.@"align" = 3;
}
}
fn allocateSegments(self: *Zld) !void {
for (self.segments.items, 0..) |*segment, segment_index| {
const is_text_segment = mem.eql(u8, segment.segName(), "__TEXT");
const base_size = if (is_text_segment) try load_commands.calcMinHeaderPad(self.gpa, self.options, .{
.segments = self.segments.items,
.dylibs = self.dylibs.items,
.referenced_dylibs = self.referenced_dylibs.keys(),
}) else 0;
try self.allocateSegment(@as(u8, @intCast(segment_index)), base_size);
}
}
fn getSegmentAllocBase(self: Zld, segment_index: u8) struct { vmaddr: u64, fileoff: u64 } {
if (segment_index > 0) {
const prev_segment = self.segments.items[segment_index - 1];
return .{
.vmaddr = prev_segment.vmaddr + prev_segment.vmsize,
.fileoff = prev_segment.fileoff + prev_segment.filesize,
};
}
return .{ .vmaddr = 0, .fileoff = 0 };
}
fn allocateSegment(self: *Zld, segment_index: u8, init_size: u64) !void {
const segment = &self.segments.items[segment_index];
if (mem.eql(u8, segment.segName(), "__PAGEZERO")) return; // allocated upon creation
const base = self.getSegmentAllocBase(segment_index);
segment.vmaddr = base.vmaddr;
segment.fileoff = base.fileoff;
segment.filesize = init_size;
segment.vmsize = init_size;
// Allocate the sections according to their alignment at the beginning of the segment.
const indexes = self.getSectionIndexes(segment_index);
var start = init_size;
const slice = self.sections.slice();
for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| {
const alignment = try math.powi(u32, 2, header.@"align");
const start_aligned = mem.alignForward(u64, start, alignment);
const n_sect = @as(u8, @intCast(indexes.start + sect_id + 1));
header.offset = if (header.isZerofill())
0
else
@as(u32, @intCast(segment.fileoff + start_aligned));
header.addr = segment.vmaddr + start_aligned;
if (slice.items(.first_atom_index)[indexes.start + sect_id]) |first_atom_index| {
var atom_index = first_atom_index;
log.debug("allocating local symbols in sect({d}, '{s},{s}')", .{
n_sect,
header.segName(),
header.sectName(),
});
while (true) {
const atom = self.getAtom(atom_index);
const sym = self.getSymbolPtr(atom.getSymbolWithLoc());
sym.n_value += header.addr;
sym.n_sect = n_sect;
log.debug(" ATOM(%{d}, '{s}') @{x}", .{
atom.sym_index,
self.getSymbolName(atom.getSymbolWithLoc()),
sym.n_value,
});
if (atom.getFile() != null) {
// Update each symbol contained within the atom
var it = Atom.getInnerSymbolsIterator(self, atom_index);
while (it.next()) |sym_loc| {
const inner_sym = self.getSymbolPtr(sym_loc);
inner_sym.n_value = sym.n_value + Atom.calcInnerSymbolOffset(
self,
atom_index,
sym_loc.sym_index,
);
inner_sym.n_sect = n_sect;
}
// If there is a section alias, update it now too
if (Atom.getSectionAlias(self, atom_index)) |sym_loc| {
const alias = self.getSymbolPtr(sym_loc);
alias.n_value = sym.n_value;
alias.n_sect = n_sect;
}
}
if (atom.next_index) |next_index| {
atom_index = next_index;
} else break;
}
}
start = start_aligned + header.size;
if (!header.isZerofill()) {
segment.filesize = start;
}
segment.vmsize = start;
}
const page_size = MachO.getPageSize(self.options.target.cpu.arch);
segment.filesize = mem.alignForward(u64, segment.filesize, page_size);
segment.vmsize = mem.alignForward(u64, segment.vmsize, page_size);
}
const InitSectionOpts = struct {
flags: u32 = macho.S_REGULAR,
reserved1: u32 = 0,
reserved2: u32 = 0,
};
pub fn initSection(
self: *Zld,
segname: []const u8,
sectname: []const u8,
opts: InitSectionOpts,
) !u8 {
const gpa = self.gpa;
log.debug("creating section '{s},{s}'", .{ segname, sectname });
const index = @as(u8, @intCast(self.sections.slice().len));
try self.sections.append(gpa, .{
.segment_index = undefined, // Segments will be created automatically later down the pipeline
.header = .{
.sectname = makeStaticString(sectname),
.segname = makeStaticString(segname),
.flags = opts.flags,
.reserved1 = opts.reserved1,
.reserved2 = opts.reserved2,
},
});
return index;
}
fn writeSegmentHeaders(self: *Zld, writer: anytype) !void {
for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
var out_seg = seg;
out_seg.cmdsize = @sizeOf(macho.segment_command_64);
out_seg.nsects = 0;
// Update section headers count; any section with size of 0 is excluded
// since it doesn't have any data in the final binary file.
for (self.sections.items(.header)[indexes.start..indexes.end]) |header| {
if (header.size == 0) continue;
out_seg.cmdsize += @sizeOf(macho.section_64);
out_seg.nsects += 1;
}
if (out_seg.nsects == 0 and
(mem.eql(u8, out_seg.segName(), "__DATA_CONST") or
mem.eql(u8, out_seg.segName(), "__DATA"))) continue;
try writer.writeStruct(out_seg);
for (self.sections.items(.header)[indexes.start..indexes.end]) |header| {
if (header.size == 0) continue;
try writer.writeStruct(header);
}
}
}
fn writeLinkeditSegmentData(self: *Zld) !void {
try self.writeDyldInfoData();
try self.writeFunctionStarts();
try self.writeDataInCode();
try self.writeSymtabs();
const seg = self.getLinkeditSegmentPtr();
seg.vmsize = mem.alignForward(u64, seg.filesize, MachO.getPageSize(self.options.target.cpu.arch));
}
fn collectRebaseData(self: *Zld, rebase: *Rebase) !void {
log.debug("collecting rebase data", .{});
// First, unpack GOT entries
if (self.got_section_index) |sect_id| {
try MachO.collectRebaseDataFromTableSection(self.gpa, self, sect_id, rebase, self.got_table);
}
// Next, unpack __la_symbol_ptr entries
if (self.la_symbol_ptr_section_index) |sect_id| {
try MachO.collectRebaseDataFromTableSection(self.gpa, self, sect_id, rebase, self.stubs_table);
}
// Finally, unpack the rest.
const cpu_arch = self.options.target.cpu.arch;
for (self.objects.items) |*object| {
for (object.atoms.items) |atom_index| {
const atom = self.getAtom(atom_index);
const sym = self.getSymbol(atom.getSymbolWithLoc());
if (sym.n_desc == MachO.N_DEAD) continue;
const sect_id = sym.n_sect - 1;
const section = self.sections.items(.header)[sect_id];
const segment_id = self.sections.items(.segment_index)[sect_id];
const segment = self.segments.items[segment_id];
if (segment.maxprot & macho.PROT.WRITE == 0) continue;
switch (section.type()) {
macho.S_LITERAL_POINTERS,
macho.S_REGULAR,
macho.S_MOD_INIT_FUNC_POINTERS,
macho.S_MOD_TERM_FUNC_POINTERS,
=> {},
else => continue,
}
log.debug(" ATOM({d}, %{d}, '{s}')", .{
atom_index,
atom.sym_index,
self.getSymbolName(atom.getSymbolWithLoc()),
});
const code = Atom.getAtomCode(self, atom_index);
const relocs = Atom.getAtomRelocs(self, atom_index);
const ctx = Atom.getRelocContext(self, atom_index);
for (relocs) |rel| {
switch (cpu_arch) {
.aarch64 => {
const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
if (rel_type != .ARM64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
.x86_64 => {
const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
if (rel_type != .X86_64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
else => unreachable,
}
const target = Atom.parseRelocTarget(self, .{
.object_id = atom.getFile().?,
.rel = rel,
.code = code,
.base_offset = ctx.base_offset,
.base_addr = ctx.base_addr,
});
const target_sym = self.getSymbol(target);
if (target_sym.undf()) continue;
const base_offset = @as(i32, @intCast(sym.n_value - segment.vmaddr));
const rel_offset = rel.r_address - ctx.base_offset;
const offset = @as(u64, @intCast(base_offset + rel_offset));
log.debug(" | rebase at {x}", .{offset});
try rebase.entries.append(self.gpa, .{
.offset = offset,
.segment_id = segment_id,
});
}
}
}
try rebase.finalize(self.gpa);
}
fn collectBindData(
self: *Zld,
bind: *Bind,
) !void {
log.debug("collecting bind data", .{});
// First, unpack GOT section
if (self.got_section_index) |sect_id| {
try MachO.collectBindDataFromTableSection(self.gpa, self, sect_id, bind, self.got_table);
}
// Next, unpack TLV pointers section
if (self.tlv_ptr_section_index) |sect_id| {
try MachO.collectBindDataFromTableSection(self.gpa, self, sect_id, bind, self.tlv_ptr_table);
}
// Finally, unpack the rest.
const cpu_arch = self.options.target.cpu.arch;
for (self.objects.items) |*object| {
for (object.atoms.items) |atom_index| {
const atom = self.getAtom(atom_index);
const sym = self.getSymbol(atom.getSymbolWithLoc());
if (sym.n_desc == MachO.N_DEAD) continue;
const sect_id = sym.n_sect - 1;
const section = self.sections.items(.header)[sect_id];
const segment_id = self.sections.items(.segment_index)[sect_id];
const segment = self.segments.items[segment_id];
if (segment.maxprot & macho.PROT.WRITE == 0) continue;
switch (section.type()) {
macho.S_LITERAL_POINTERS,
macho.S_REGULAR,
macho.S_MOD_INIT_FUNC_POINTERS,
macho.S_MOD_TERM_FUNC_POINTERS,
=> {},
else => continue,
}
log.debug(" ATOM({d}, %{d}, '{s}')", .{
atom_index,
atom.sym_index,
self.getSymbolName(atom.getSymbolWithLoc()),
});
const code = Atom.getAtomCode(self, atom_index);
const relocs = Atom.getAtomRelocs(self, atom_index);
const ctx = Atom.getRelocContext(self, atom_index);
for (relocs) |rel| {
switch (cpu_arch) {
.aarch64 => {
const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
if (rel_type != .ARM64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
.x86_64 => {
const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
if (rel_type != .X86_64_RELOC_UNSIGNED) continue;
if (rel.r_length != 3) continue;
},
else => unreachable,
}
const global = Atom.parseRelocTarget(self, .{
.object_id = atom.getFile().?,
.rel = rel,
.code = code,
.base_offset = ctx.base_offset,
.base_addr = ctx.base_addr,
});
const bind_sym_name = self.getSymbolName(global);
const bind_sym = self.getSymbol(global);
if (!bind_sym.undf()) continue;
const base_offset = sym.n_value - segment.vmaddr;
const rel_offset = @as(u32, @intCast(rel.r_address - ctx.base_offset));
const offset = @as(u64, @intCast(base_offset + rel_offset));
const addend = mem.readIntLittle(i64, code[rel_offset..][0..8]);
const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{
base_offset,
bind_sym_name,
dylib_ordinal,
});
log.debug(" | with addend {x}", .{addend});
if (bind_sym.weakRef()) {
log.debug(" | marking as weak ref ", .{});
}
try bind.entries.append(self.gpa, .{
.target = global,
.offset = offset,
.segment_id = segment_id,
.addend = addend,
});
}
}
}
try bind.finalize(self.gpa, self);
}
fn collectLazyBindData(self: *Zld, lazy_bind: *LazyBind) !void {
const sect_id = self.la_symbol_ptr_section_index orelse return;
try MachO.collectBindDataFromTableSection(self.gpa, self, sect_id, lazy_bind, self.stubs_table);
try lazy_bind.finalize(self.gpa, self);
}
fn collectExportData(self: *Zld, trie: *Trie) !void {
const gpa = self.gpa;
// TODO handle macho.EXPORT_SYMBOL_FLAGS_REEXPORT and macho.EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER.
log.debug("collecting export data", .{});
const segment_index = self.getSegmentByName("__TEXT").?;
const exec_segment = self.segments.items[segment_index];
const base_address = exec_segment.vmaddr;
for (self.globals.items) |global| {
const sym = self.getSymbol(global);
if (sym.undf()) continue;
if (sym.n_desc == MachO.N_DEAD) continue;
const sym_name = self.getSymbolName(global);
log.debug(" (putting '{s}' defined at 0x{x})", .{ sym_name, sym.n_value });
try trie.put(gpa, .{
.name = sym_name,
.vmaddr_offset = sym.n_value - base_address,
.export_flags = macho.EXPORT_SYMBOL_FLAGS_KIND_REGULAR,
});
}
try trie.finalize(gpa);
}
fn writeDyldInfoData(self: *Zld) !void {
const gpa = self.gpa;
var rebase = Rebase{};
defer rebase.deinit(gpa);
try self.collectRebaseData(&rebase);
var bind = Bind{};
defer bind.deinit(gpa);
try self.collectBindData(&bind);
var lazy_bind = LazyBind{};
defer lazy_bind.deinit(gpa);
try self.collectLazyBindData(&lazy_bind);
var trie = Trie{};
defer trie.deinit(gpa);
try trie.init(gpa);
try self.collectExportData(&trie);
const link_seg = self.getLinkeditSegmentPtr();
assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64)));
const rebase_off = link_seg.fileoff;
const rebase_size = rebase.size();
const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64));
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned });
const bind_off = rebase_off + rebase_size_aligned;
const bind_size = bind.size();
const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64));
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned });
const lazy_bind_off = bind_off + bind_size_aligned;
const lazy_bind_size = lazy_bind.size();
const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64));
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{
lazy_bind_off,
lazy_bind_off + lazy_bind_size_aligned,
});
const export_off = lazy_bind_off + lazy_bind_size_aligned;
const export_size = trie.size;
const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64));
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned });
const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse
return error.Overflow;
link_seg.filesize = needed_size;
assert(mem.isAlignedGeneric(u64, link_seg.fileoff + link_seg.filesize, @alignOf(u64)));
var buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
@memset(buffer, 0);
var stream = std.io.fixedBufferStream(buffer);
const writer = stream.writer();
try rebase.write(writer);
try stream.seekTo(bind_off - rebase_off);
try bind.write(writer);
try stream.seekTo(lazy_bind_off - rebase_off);
try lazy_bind.write(writer);
try stream.seekTo(export_off - rebase_off);
_ = try trie.write(writer);
log.debug("writing dyld info from 0x{x} to 0x{x}", .{
rebase_off,
rebase_off + needed_size,
});
try self.file.pwriteAll(buffer, rebase_off);
try MachO.populateLazyBindOffsetsInStubHelper(
self,
self.options.target.cpu.arch,
self.file,
lazy_bind,
);
self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off));
self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned));
self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off));
self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned));
self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off));
self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned));
self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off));
self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned));
}
const asc_u64 = std.sort.asc(u64);
fn addSymbolToFunctionStarts(self: *Zld, sym_loc: SymbolWithLoc, addresses: *std.ArrayList(u64)) !void {
const sym = self.getSymbol(sym_loc);
if (sym.n_strx == 0) return;
if (sym.n_desc == MachO.N_DEAD) return;
if (self.symbolIsTemp(sym_loc)) return;
try addresses.append(sym.n_value);
}
fn writeFunctionStarts(self: *Zld) !void {
const text_seg_index = self.getSegmentByName("__TEXT") orelse return;
const text_seg = self.segments.items[text_seg_index];
const gpa = self.gpa;
// We need to sort by address first
var addresses = std.ArrayList(u64).init(gpa);
defer addresses.deinit();
for (self.objects.items) |object| {
for (object.exec_atoms.items) |atom_index| {
const atom = self.getAtom(atom_index);
const sym_loc = atom.getSymbolWithLoc();
try self.addSymbolToFunctionStarts(sym_loc, &addresses);
var it = Atom.getInnerSymbolsIterator(self, atom_index);
while (it.next()) |inner_sym_loc| {
try self.addSymbolToFunctionStarts(inner_sym_loc, &addresses);
}
}
}
mem.sort(u64, addresses.items, {}, asc_u64);
var offsets = std.ArrayList(u32).init(gpa);
defer offsets.deinit();
try offsets.ensureTotalCapacityPrecise(addresses.items.len);
var last_off: u32 = 0;
for (addresses.items) |addr| {
const offset = @as(u32, @intCast(addr - text_seg.vmaddr));
const diff = offset - last_off;
if (diff == 0) continue;
offsets.appendAssumeCapacity(diff);
last_off = offset;
}
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
const max_size = @as(usize, @intCast(offsets.items.len * @sizeOf(u64)));
try buffer.ensureTotalCapacity(max_size);
for (offsets.items) |offset| {
try std.leb.writeULEB128(buffer.writer(), offset);
}
const link_seg = self.getLinkeditSegmentPtr();
const offset = link_seg.fileoff + link_seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = buffer.items.len;
const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
if (padding > 0) {
try buffer.ensureUnusedCapacity(padding);
buffer.appendNTimesAssumeCapacity(0, padding);
}
link_seg.filesize = offset + needed_size_aligned - link_seg.fileoff;
log.debug("writing function starts info from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
try self.file.pwriteAll(buffer.items, offset);
self.function_starts_cmd.dataoff = @as(u32, @intCast(offset));
self.function_starts_cmd.datasize = @as(u32, @intCast(needed_size_aligned));
}
fn filterDataInCode(
dices: []const macho.data_in_code_entry,
start_addr: u64,
end_addr: u64,
) []const macho.data_in_code_entry {
const Predicate = struct {
addr: u64,
pub fn predicate(self: @This(), dice: macho.data_in_code_entry) bool {
return dice.offset >= self.addr;
}
};
const start = MachO.lsearch(macho.data_in_code_entry, dices, Predicate{ .addr = start_addr });
const end = MachO.lsearch(macho.data_in_code_entry, dices[start..], Predicate{ .addr = end_addr }) + start;
return dices[start..end];
}
fn writeDataInCode(self: *Zld) !void {
var out_dice = std.ArrayList(macho.data_in_code_entry).init(self.gpa);
defer out_dice.deinit();
const text_sect_id = self.text_section_index orelse return;
const text_sect_header = self.sections.items(.header)[text_sect_id];
for (self.objects.items) |object| {
if (!object.hasDataInCode()) continue;
const dice = object.data_in_code.items;
try out_dice.ensureUnusedCapacity(dice.len);
for (object.exec_atoms.items) |atom_index| {
const atom = self.getAtom(atom_index);
const sym = self.getSymbol(atom.getSymbolWithLoc());
if (sym.n_desc == MachO.N_DEAD) continue;
const source_addr = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
source_sym.n_value
else blk: {
const nbase = @as(u32, @intCast(object.in_symtab.?.len));
const source_sect_id = @as(u8, @intCast(atom.sym_index - nbase));
break :blk object.getSourceSection(source_sect_id).addr;
};
const filtered_dice = filterDataInCode(dice, source_addr, source_addr + atom.size);
const base = math.cast(u32, sym.n_value - text_sect_header.addr + text_sect_header.offset) orelse
return error.Overflow;
for (filtered_dice) |single| {
const offset = math.cast(u32, single.offset - source_addr + base) orelse
return error.Overflow;
out_dice.appendAssumeCapacity(.{
.offset = offset,
.length = single.length,
.kind = single.kind,
});
}
}
}
const seg = self.getLinkeditSegmentPtr();
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry);
const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
defer self.gpa.free(buffer);
{
const src = mem.sliceAsBytes(out_dice.items);
@memcpy(buffer[0..src.len], src);
@memset(buffer[src.len..], 0);
}
log.debug("writing data-in-code from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
try self.file.pwriteAll(buffer, offset);
self.data_in_code_cmd.dataoff = @as(u32, @intCast(offset));
self.data_in_code_cmd.datasize = @as(u32, @intCast(needed_size_aligned));
}
fn writeSymtabs(self: *Zld) !void {
var ctx = try self.writeSymtab();
defer ctx.imports_table.deinit();
try self.writeDysymtab(ctx);
try self.writeStrtab();
}
fn addLocalToSymtab(self: *Zld, sym_loc: SymbolWithLoc, locals: *std.ArrayList(macho.nlist_64)) !void {
const sym = self.getSymbol(sym_loc);
if (sym.n_strx == 0) return; // no name, skip
if (sym.n_desc == MachO.N_DEAD) return; // garbage-collected, skip
if (sym.ext()) return; // an export lands in its own symtab section, skip
if (self.symbolIsTemp(sym_loc)) return; // local temp symbol, skip
var out_sym = sym;
out_sym.n_strx = try self.strtab.insert(self.gpa, self.getSymbolName(sym_loc));
try locals.append(out_sym);
}
fn writeSymtab(self: *Zld) !SymtabCtx {
const gpa = self.gpa;
var locals = std.ArrayList(macho.nlist_64).init(gpa);
defer locals.deinit();
for (self.objects.items) |object| {
for (object.atoms.items) |atom_index| {
const atom = self.getAtom(atom_index);
const sym_loc = atom.getSymbolWithLoc();
try self.addLocalToSymtab(sym_loc, &locals);
var it = Atom.getInnerSymbolsIterator(self, atom_index);
while (it.next()) |inner_sym_loc| {
try self.addLocalToSymtab(inner_sym_loc, &locals);
}
}
}
var exports = std.ArrayList(macho.nlist_64).init(gpa);
defer exports.deinit();
for (self.globals.items) |global| {
const sym = self.getSymbol(global);
if (sym.undf()) continue; // import, skip
if (sym.n_desc == MachO.N_DEAD) continue;
var out_sym = sym;
out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global));
try exports.append(out_sym);
}
var imports = std.ArrayList(macho.nlist_64).init(gpa);
defer imports.deinit();
var imports_table = std.AutoHashMap(SymbolWithLoc, u32).init(gpa);
for (self.globals.items) |global| {
const sym = self.getSymbol(global);
if (!sym.undf()) continue; // not an import, skip
if (sym.n_desc == MachO.N_DEAD) continue;
const new_index = @as(u32, @intCast(imports.items.len));
var out_sym = sym;
out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global));
try imports.append(out_sym);
try imports_table.putNoClobber(global, new_index);
}
// We generate stabs last in order to ensure that the strtab always has debug info
// strings trailing
if (!self.options.strip) {
for (self.objects.items) |object| {
try self.generateSymbolStabs(object, &locals);
}
}
const nlocals = @as(u32, @intCast(locals.items.len));
const nexports = @as(u32, @intCast(exports.items.len));
const nimports = @as(u32, @intCast(imports.items.len));
const nsyms = nlocals + nexports + nimports;
const seg = self.getLinkeditSegmentPtr();
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff;
assert(mem.isAlignedGeneric(u64, seg.fileoff + seg.filesize, @alignOf(u64)));
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
try buffer.ensureTotalCapacityPrecise(needed_size);
buffer.appendSliceAssumeCapacity(mem.sliceAsBytes(locals.items));
buffer.appendSliceAssumeCapacity(mem.sliceAsBytes(exports.items));
buffer.appendSliceAssumeCapacity(mem.sliceAsBytes(imports.items));
log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
try self.file.pwriteAll(buffer.items, offset);
self.symtab_cmd.symoff = @as(u32, @intCast(offset));
self.symtab_cmd.nsyms = nsyms;
return SymtabCtx{
.nlocalsym = nlocals,
.nextdefsym = nexports,
.nundefsym = nimports,
.imports_table = imports_table,
};
}
fn writeStrtab(self: *Zld) !void {
const seg = self.getLinkeditSegmentPtr();
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = self.strtab.buffer.items.len;
const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
defer self.gpa.free(buffer);
@memcpy(buffer[0..self.strtab.buffer.items.len], self.strtab.buffer.items);
@memset(buffer[self.strtab.buffer.items.len..], 0);
try self.file.pwriteAll(buffer, offset);
self.symtab_cmd.stroff = @as(u32, @intCast(offset));
self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned));
}
const SymtabCtx = struct {
nlocalsym: u32,
nextdefsym: u32,
nundefsym: u32,
imports_table: std.AutoHashMap(SymbolWithLoc, u32),
};
fn writeDysymtab(self: *Zld, ctx: SymtabCtx) !void {
const gpa = self.gpa;
const nstubs = @as(u32, @intCast(self.stubs_table.lookup.count()));
const ngot_entries = @as(u32, @intCast(self.got_table.lookup.count()));
const nindirectsyms = nstubs * 2 + ngot_entries;
const iextdefsym = ctx.nlocalsym;
const iundefsym = iextdefsym + ctx.nextdefsym;
const seg = self.getLinkeditSegmentPtr();
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nindirectsyms * @sizeOf(u32);
const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
var buf = std.ArrayList(u8).init(gpa);
defer buf.deinit();
try buf.ensureTotalCapacityPrecise(math.cast(usize, needed_size_aligned) orelse return error.Overflow);
const writer = buf.writer();
if (self.stubs_section_index) |sect_id| {
const header = &self.sections.items(.header)[sect_id];
header.reserved1 = 0;
for (self.stubs_table.entries.items) |entry| {
if (!self.stubs_table.lookup.contains(entry)) continue;
const target_sym = self.getSymbol(entry);
assert(target_sym.undf());
try writer.writeIntLittle(u32, iundefsym + ctx.imports_table.get(entry).?);
}
}
if (self.got_section_index) |sect_id| {
const header = &self.sections.items(.header)[sect_id];
header.reserved1 = nstubs;
for (self.got_table.entries.items) |entry| {
if (!self.got_table.lookup.contains(entry)) continue;
const target_sym = self.getSymbol(entry);
if (target_sym.undf()) {
try writer.writeIntLittle(u32, iundefsym + ctx.imports_table.get(entry).?);
} else {
try writer.writeIntLittle(u32, macho.INDIRECT_SYMBOL_LOCAL);
}
}
}
if (self.la_symbol_ptr_section_index) |sect_id| {
const header = &self.sections.items(.header)[sect_id];
header.reserved1 = nstubs + ngot_entries;
for (self.stubs_table.entries.items) |entry| {
if (!self.stubs_table.lookup.contains(entry)) continue;
const target_sym = self.getSymbol(entry);
assert(target_sym.undf());
try writer.writeIntLittle(u32, iundefsym + ctx.imports_table.get(entry).?);
}
}
const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
if (padding > 0) {
buf.appendNTimesAssumeCapacity(0, padding);
}
assert(buf.items.len == needed_size_aligned);
try self.file.pwriteAll(buf.items, offset);
self.dysymtab_cmd.nlocalsym = ctx.nlocalsym;
self.dysymtab_cmd.iextdefsym = iextdefsym;
self.dysymtab_cmd.nextdefsym = ctx.nextdefsym;
self.dysymtab_cmd.iundefsym = iundefsym;
self.dysymtab_cmd.nundefsym = ctx.nundefsym;
self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset));
self.dysymtab_cmd.nindirectsyms = nindirectsyms;
}
fn writeUuid(self: *Zld, comp: *const Compilation, uuid_cmd_offset: u32, has_codesig: bool) !void {
const file_size = if (!has_codesig) blk: {
const seg = self.getLinkeditSegmentPtr();
break :blk seg.fileoff + seg.filesize;
} else self.codesig_cmd.dataoff;
try calcUuid(comp, self.file, file_size, &self.uuid_cmd.uuid);
const offset = uuid_cmd_offset + @sizeOf(macho.load_command);
try self.file.pwriteAll(&self.uuid_cmd.uuid, offset);
}
fn writeCodeSignaturePadding(self: *Zld, code_sig: *CodeSignature) !void {
const seg = self.getLinkeditSegmentPtr();
// Code signature data has to be 16-bytes aligned for Apple tools to recognize the file
// https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271
const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16);
const needed_size = code_sig.estimateSize(offset);
seg.filesize = offset + needed_size - seg.fileoff;
seg.vmsize = mem.alignForward(u64, seg.filesize, MachO.getPageSize(self.options.target.cpu.arch));
log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
// Pad out the space. We need to do this to calculate valid hashes for everything in the file
// except for code signature data.
try self.file.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
self.codesig_cmd.dataoff = @as(u32, @intCast(offset));
self.codesig_cmd.datasize = @as(u32, @intCast(needed_size));
}
fn writeCodeSignature(self: *Zld, comp: *const Compilation, code_sig: *CodeSignature) !void {
const seg_id = self.getSegmentByName("__TEXT").?;
const seg = self.segments.items[seg_id];
var buffer = std.ArrayList(u8).init(self.gpa);
defer buffer.deinit();
try buffer.ensureTotalCapacityPrecise(code_sig.size());
try code_sig.writeAdhocSignature(comp, .{
.file = self.file,
.exec_seg_base = seg.fileoff,
.exec_seg_limit = seg.filesize,
.file_size = self.codesig_cmd.dataoff,
.output_mode = self.options.output_mode,
}, buffer.writer());
assert(buffer.items.len == code_sig.size());
log.debug("writing code signature from 0x{x} to 0x{x}", .{
self.codesig_cmd.dataoff,
self.codesig_cmd.dataoff + buffer.items.len,
});
try self.file.pwriteAll(buffer.items, self.codesig_cmd.dataoff);
}
/// Writes Mach-O file header.
fn writeHeader(self: *Zld, ncmds: u32, sizeofcmds: u32) !void {
var header: macho.mach_header_64 = .{};
header.flags = macho.MH_NOUNDEFS | macho.MH_DYLDLINK | macho.MH_PIE | macho.MH_TWOLEVEL;
switch (self.options.target.cpu.arch) {
.aarch64 => {
header.cputype = macho.CPU_TYPE_ARM64;
header.cpusubtype = macho.CPU_SUBTYPE_ARM_ALL;
},
.x86_64 => {
header.cputype = macho.CPU_TYPE_X86_64;
header.cpusubtype = macho.CPU_SUBTYPE_X86_64_ALL;
},
else => return error.UnsupportedCpuArchitecture,
}
switch (self.options.output_mode) {
.Exe => {
header.filetype = macho.MH_EXECUTE;
},
.Lib => {
// By this point, it can only be a dylib.
header.filetype = macho.MH_DYLIB;
header.flags |= macho.MH_NO_REEXPORTED_DYLIBS;
},
else => unreachable,
}
if (self.getSectionByName("__DATA", "__thread_vars")) |sect_id| {
header.flags |= macho.MH_HAS_TLV_DESCRIPTORS;
if (self.sections.items(.header)[sect_id].size > 0) {
header.flags |= macho.MH_HAS_TLV_DESCRIPTORS;
}
}
header.ncmds = ncmds;
header.sizeofcmds = sizeofcmds;
log.debug("writing Mach-O header {}", .{header});
try self.file.pwriteAll(mem.asBytes(&header), 0);
}
pub fn makeStaticString(bytes: []const u8) [16]u8 {
var buf = [_]u8{0} ** 16;
@memcpy(buf[0..bytes.len], bytes);
return buf;
}
pub fn getAtomPtr(self: *Zld, atom_index: Atom.Index) *Atom {
assert(atom_index < self.atoms.items.len);
return &self.atoms.items[atom_index];
}
pub fn getAtom(self: Zld, atom_index: Atom.Index) Atom {
assert(atom_index < self.atoms.items.len);
return self.atoms.items[atom_index];
}
fn getSegmentByName(self: Zld, segname: []const u8) ?u8 {
for (self.segments.items, 0..) |seg, i| {
if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i));
} else return null;
}
pub fn getSegment(self: Zld, sect_id: u8) macho.segment_command_64 {
const index = self.sections.items(.segment_index)[sect_id];
return self.segments.items[index];
}
pub fn getSegmentPtr(self: *Zld, sect_id: u8) *macho.segment_command_64 {
const index = self.sections.items(.segment_index)[sect_id];
return &self.segments.items[index];
}
pub fn getLinkeditSegmentPtr(self: *Zld) *macho.segment_command_64 {
assert(self.segments.items.len > 0);
const seg = &self.segments.items[self.segments.items.len - 1];
assert(mem.eql(u8, seg.segName(), "__LINKEDIT"));
return seg;
}
pub fn getSectionByName(self: Zld, segname: []const u8, sectname: []const u8) ?u8 {
// TODO investigate caching with a hashmap
for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
return @as(u8, @intCast(i));
} else return null;
}
pub fn getSectionIndexes(self: Zld, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @as(u8, @intCast(seg.nsects));
start += @as(u8, @intCast(seg.nsects));
} else 0;
return .{ .start = start, .end = start + nsects };
}
pub fn symbolIsTemp(self: *Zld, sym_with_loc: SymbolWithLoc) bool {
const sym = self.getSymbol(sym_with_loc);
if (!sym.sect()) return false;
if (sym.ext()) return false;
const sym_name = self.getSymbolName(sym_with_loc);
return mem.startsWith(u8, sym_name, "l") or mem.startsWith(u8, sym_name, "L");
}
/// Returns pointer-to-symbol described by `sym_with_loc` descriptor.
pub fn getSymbolPtr(self: *Zld, sym_with_loc: SymbolWithLoc) *macho.nlist_64 {
if (sym_with_loc.getFile()) |file| {
const object = &self.objects.items[file];
return &object.symtab[sym_with_loc.sym_index];
} else {
return &self.locals.items[sym_with_loc.sym_index];
}
}
/// Returns symbol described by `sym_with_loc` descriptor.
pub fn getSymbol(self: *const Zld, sym_with_loc: SymbolWithLoc) macho.nlist_64 {
if (sym_with_loc.getFile()) |file| {
const object = &self.objects.items[file];
return object.symtab[sym_with_loc.sym_index];
} else {
return self.locals.items[sym_with_loc.sym_index];
}
}
/// Returns name of the symbol described by `sym_with_loc` descriptor.
pub fn getSymbolName(self: *const Zld, sym_with_loc: SymbolWithLoc) []const u8 {
if (sym_with_loc.getFile()) |file| {
const object = self.objects.items[file];
return object.getSymbolName(sym_with_loc.sym_index);
} else {
const sym = self.locals.items[sym_with_loc.sym_index];
return self.strtab.get(sym.n_strx).?;
}
}
pub fn getGotEntryAddress(self: *Zld, sym_with_loc: SymbolWithLoc) ?u64 {
const index = self.got_table.lookup.get(sym_with_loc) orelse return null;
const header = self.sections.items(.header)[self.got_section_index.?];
return header.addr + @sizeOf(u64) * index;
}
pub fn getTlvPtrEntryAddress(self: *Zld, sym_with_loc: SymbolWithLoc) ?u64 {
const index = self.tlv_ptr_table.lookup.get(sym_with_loc) orelse return null;
const header = self.sections.items(.header)[self.tlv_ptr_section_index.?];
return header.addr + @sizeOf(u64) * index;
}
pub fn getStubsEntryAddress(self: *Zld, sym_with_loc: SymbolWithLoc) ?u64 {
const index = self.stubs_table.lookup.get(sym_with_loc) orelse return null;
const header = self.sections.items(.header)[self.stubs_section_index.?];
return header.addr + stubs.stubSize(self.options.target.cpu.arch) * index;
}
/// Returns symbol location corresponding to the set entrypoint.
/// Asserts output mode is executable.
pub fn getEntryPoint(self: Zld) SymbolWithLoc {
assert(self.options.output_mode == .Exe);
const global_index = self.entry_index.?;
return self.globals.items[global_index];
}
inline fn requiresThunks(self: Zld) bool {
return self.options.target.cpu.arch == .aarch64;
}
pub fn generateSymbolStabs(self: *Zld, object: Object, locals: *std.ArrayList(macho.nlist_64)) !void {
log.debug("generating stabs for '{s}'", .{object.name});
const gpa = self.gpa;
var debug_info = object.parseDwarfInfo();
var lookup = DwarfInfo.AbbrevLookupTable.init(gpa);
defer lookup.deinit();
try lookup.ensureUnusedCapacity(std.math.maxInt(u8));
// We assume there is only one CU.
var cu_it = debug_info.getCompileUnitIterator();
const compile_unit = while (try cu_it.next()) |cu| {
const offset = math.cast(usize, cu.cuh.debug_abbrev_offset) orelse return error.Overflow;
try debug_info.genAbbrevLookupByKind(offset, &lookup);
break cu;
} else {
log.debug("no compile unit found in debug info in {s}; skipping", .{object.name});
return;
};
var abbrev_it = compile_unit.getAbbrevEntryIterator(debug_info);
const cu_entry: DwarfInfo.AbbrevEntry = while (try abbrev_it.next(lookup)) |entry| switch (entry.tag) {
dwarf.TAG.compile_unit => break entry,
else => continue,
} else {
log.debug("missing DWARF_TAG_compile_unit tag in {s}; skipping", .{object.name});
return;
};
var maybe_tu_name: ?[]const u8 = null;
var maybe_tu_comp_dir: ?[]const u8 = null;
var attr_it = cu_entry.getAttributeIterator(debug_info, compile_unit.cuh);
while (try attr_it.next()) |attr| switch (attr.name) {
dwarf.AT.comp_dir => maybe_tu_comp_dir = attr.getString(debug_info, compile_unit.cuh) orelse continue,
dwarf.AT.name => maybe_tu_name = attr.getString(debug_info, compile_unit.cuh) orelse continue,
else => continue,
};
if (maybe_tu_name == null or maybe_tu_comp_dir == null) {
log.debug("missing DWARF_AT_comp_dir and DWARF_AT_name attributes {s}; skipping", .{object.name});
return;
}
const tu_name = maybe_tu_name.?;
const tu_comp_dir = maybe_tu_comp_dir.?;
// Open scope
try locals.ensureUnusedCapacity(3);
locals.appendAssumeCapacity(.{
.n_strx = try self.strtab.insert(gpa, tu_comp_dir),
.n_type = macho.N_SO,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
});
locals.appendAssumeCapacity(.{
.n_strx = try self.strtab.insert(gpa, tu_name),
.n_type = macho.N_SO,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
});
locals.appendAssumeCapacity(.{
.n_strx = try self.strtab.insert(gpa, object.name),
.n_type = macho.N_OSO,
.n_sect = 0,
.n_desc = 1,
.n_value = object.mtime,
});
var stabs_buf: [4]macho.nlist_64 = undefined;
var name_lookup: ?DwarfInfo.SubprogramLookupByName = if (object.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS == 0) blk: {
var name_lookup = DwarfInfo.SubprogramLookupByName.init(gpa);
errdefer name_lookup.deinit();
try name_lookup.ensureUnusedCapacity(@as(u32, @intCast(object.atoms.items.len)));
try debug_info.genSubprogramLookupByName(compile_unit, lookup, &name_lookup);
break :blk name_lookup;
} else null;
defer if (name_lookup) |*nl| nl.deinit();
for (object.atoms.items) |atom_index| {
const atom = self.getAtom(atom_index);
const stabs = try self.generateSymbolStabsForSymbol(
atom_index,
atom.getSymbolWithLoc(),
name_lookup,
&stabs_buf,
);
try locals.appendSlice(stabs);
var it = Atom.getInnerSymbolsIterator(self, atom_index);
while (it.next()) |sym_loc| {
const contained_stabs = try self.generateSymbolStabsForSymbol(
atom_index,
sym_loc,
name_lookup,
&stabs_buf,
);
try locals.appendSlice(contained_stabs);
}
}
// Close scope
try locals.append(.{
.n_strx = 0,
.n_type = macho.N_SO,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
});
}
fn generateSymbolStabsForSymbol(
self: *Zld,
atom_index: Atom.Index,
sym_loc: SymbolWithLoc,
lookup: ?DwarfInfo.SubprogramLookupByName,
buf: *[4]macho.nlist_64,
) ![]const macho.nlist_64 {
const gpa = self.gpa;
const object = self.objects.items[sym_loc.getFile().?];
const sym = self.getSymbol(sym_loc);
const sym_name = self.getSymbolName(sym_loc);
const header = self.sections.items(.header)[sym.n_sect - 1];
if (sym.n_strx == 0) return buf[0..0];
if (self.symbolIsTemp(sym_loc)) return buf[0..0];
if (!header.isCode()) {
// Since we are not dealing with machine code, it's either a global or a static depending
// on the linkage scope.
if (sym.sect() and sym.ext()) {
// Global gets an N_GSYM stab type.
buf[0] = .{
.n_strx = try self.strtab.insert(gpa, sym_name),
.n_type = macho.N_GSYM,
.n_sect = sym.n_sect,
.n_desc = 0,
.n_value = 0,
};
} else {
// Local static gets an N_STSYM stab type.
buf[0] = .{
.n_strx = try self.strtab.insert(gpa, sym_name),
.n_type = macho.N_STSYM,
.n_sect = sym.n_sect,
.n_desc = 0,
.n_value = sym.n_value,
};
}
return buf[0..1];
}
const size: u64 = size: {
if (object.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS != 0) {
break :size self.getAtom(atom_index).size;
}
// Since we don't have subsections to work with, we need to infer the size of each function
// the slow way by scanning the debug info for matching symbol names and extracting
// the symbol's DWARF_AT_low_pc and DWARF_AT_high_pc values.
const source_sym = object.getSourceSymbol(sym_loc.sym_index) orelse return buf[0..0];
const subprogram = lookup.?.get(sym_name[1..]) orelse return buf[0..0];
if (subprogram.addr <= source_sym.n_value and source_sym.n_value < subprogram.addr + subprogram.size) {
break :size subprogram.size;
} else {
log.debug("no stab found for {s}", .{sym_name});
return buf[0..0];
}
};
buf[0] = .{
.n_strx = 0,
.n_type = macho.N_BNSYM,
.n_sect = sym.n_sect,
.n_desc = 0,
.n_value = sym.n_value,
};
buf[1] = .{
.n_strx = try self.strtab.insert(gpa, sym_name),
.n_type = macho.N_FUN,
.n_sect = sym.n_sect,
.n_desc = 0,
.n_value = sym.n_value,
};
buf[2] = .{
.n_strx = 0,
.n_type = macho.N_FUN,
.n_sect = 0,
.n_desc = 0,
.n_value = size,
};
buf[3] = .{
.n_strx = 0,
.n_type = macho.N_ENSYM,
.n_sect = sym.n_sect,
.n_desc = 0,
.n_value = size,
};
return buf;
}
fn logSegments(self: *Zld) void {
log.debug("segments:", .{});
for (self.segments.items, 0..) |segment, i| {
log.debug(" segment({d}): {s} @{x} ({x}), sizeof({x})", .{
i,
segment.segName(),
segment.fileoff,
segment.vmaddr,
segment.vmsize,
});
}
}
fn logSections(self: *Zld) void {
log.debug("sections:", .{});
for (self.sections.items(.header), 0..) |header, i| {
log.debug(" sect({d}): {s},{s} @{x} ({x}), sizeof({x})", .{
i + 1,
header.segName(),
header.sectName(),
header.offset,
header.addr,
header.size,
});
}
}
fn logSymAttributes(sym: macho.nlist_64, buf: []u8) []const u8 {
if (sym.sect()) {
buf[0] = 's';
}
if (sym.ext()) {
if (sym.weakDef() or sym.pext()) {
buf[1] = 'w';
} else {
buf[1] = 'e';
}
}
if (sym.tentative()) {
buf[2] = 't';
}
if (sym.undf()) {
buf[3] = 'u';
}
return buf[0..];
}
fn logSymtab(self: *Zld) void {
var buf: [4]u8 = undefined;
const scoped_log = std.log.scoped(.symtab);
scoped_log.debug("locals:", .{});
for (self.objects.items, 0..) |object, id| {
scoped_log.debug(" object({d}): {s}", .{ id, object.name });
if (object.in_symtab == null) continue;
for (object.symtab, 0..) |sym, sym_id| {
@memset(&buf, '_');
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
object.getSymbolName(@as(u32, @intCast(sym_id))),
sym.n_value,
sym.n_sect,
logSymAttributes(sym, &buf),
});
}
}
scoped_log.debug(" object(-1)", .{});
for (self.locals.items, 0..) |sym, sym_id| {
if (sym.undf()) continue;
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
self.strtab.get(sym.n_strx).?,
sym.n_value,
sym.n_sect,
logSymAttributes(sym, &buf),
});
}
scoped_log.debug("exports:", .{});
for (self.globals.items, 0..) |global, i| {
const sym = self.getSymbol(global);
if (sym.undf()) continue;
if (sym.n_desc == MachO.N_DEAD) continue;
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s} (def in object({?}))", .{
i,
self.getSymbolName(global),
sym.n_value,
sym.n_sect,
logSymAttributes(sym, &buf),
global.file,
});
}
scoped_log.debug("imports:", .{});
for (self.globals.items, 0..) |global, i| {
const sym = self.getSymbol(global);
if (!sym.undf()) continue;
if (sym.n_desc == MachO.N_DEAD) continue;
const ord = @divTrunc(sym.n_desc, macho.N_SYMBOL_RESOLVER);
scoped_log.debug(" %{d}: {s} @{x} in ord({d}), {s}", .{
i,
self.getSymbolName(global),
sym.n_value,
ord,
logSymAttributes(sym, &buf),
});
}
scoped_log.debug("GOT entries:", .{});
scoped_log.debug("{}", .{self.got_table});
scoped_log.debug("TLV pointers:", .{});
scoped_log.debug("{}", .{self.tlv_ptr_table});
scoped_log.debug("stubs entries:", .{});
scoped_log.debug("{}", .{self.stubs_table});
scoped_log.debug("thunks:", .{});
for (self.thunks.items, 0..) |thunk, i| {
scoped_log.debug(" thunk({d})", .{i});
const slice = thunk.targets.slice();
for (slice.items(.tag), slice.items(.target), 0..) |tag, target, j| {
const atom_index = @as(u32, @intCast(thunk.getStartAtomIndex() + j));
const atom = self.getAtom(atom_index);
const atom_sym = self.getSymbol(atom.getSymbolWithLoc());
const target_addr = switch (tag) {
.stub => self.getStubsEntryAddress(target).?,
.atom => self.getSymbol(target).n_value,
};
scoped_log.debug(" {d}@{x} => {s}({s}@{x})", .{
j,
atom_sym.n_value,
@tagName(tag),
self.getSymbolName(target),
target_addr,
});
}
}
}
fn logAtoms(self: *Zld) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
for (slice.items(.first_atom_index), 0..) |first_atom_index, sect_id| {
var atom_index = first_atom_index orelse continue;
const header = slice.items(.header)[sect_id];
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
while (true) {
const atom = self.getAtom(atom_index);
self.logAtom(atom_index, log);
if (atom.next_index) |next_index| {
atom_index = next_index;
} else break;
}
}
}
pub fn logAtom(self: *Zld, atom_index: Atom.Index, logger: anytype) void {
if (!build_options.enable_logging) return;
const atom = self.getAtom(atom_index);
const sym = self.getSymbol(atom.getSymbolWithLoc());
const sym_name = self.getSymbolName(atom.getSymbolWithLoc());
logger.debug(" ATOM({d}, %{d}, '{s}') @ {x} (sizeof({x}), alignof({x})) in object({?}) in sect({d})", .{
atom_index,
atom.sym_index,
sym_name,
sym.n_value,
atom.size,
atom.alignment,
atom.getFile(),
sym.n_sect,
});
if (atom.getFile() != null) {
var it = Atom.getInnerSymbolsIterator(self, atom_index);
while (it.next()) |sym_loc| {
const inner = self.getSymbol(sym_loc);
const inner_name = self.getSymbolName(sym_loc);
const offset = Atom.calcInnerSymbolOffset(self, atom_index, sym_loc.sym_index);
logger.debug(" (%{d}, '{s}') @ {x} ({x})", .{
sym_loc.sym_index,
inner_name,
inner.n_value,
offset,
});
}
if (Atom.getSectionAlias(self, atom_index)) |sym_loc| {
const alias = self.getSymbol(sym_loc);
const alias_name = self.getSymbolName(sym_loc);
logger.debug(" (%{d}, '{s}') @ {x} ({x})", .{
sym_loc.sym_index,
alias_name,
alias.n_value,
0,
});
}
}
}
};
pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
const gpa = macho_file.base.allocator;
const options = &macho_file.base.options;
const target = options.target;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const directory = options.emit.?.directory; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{options.emit.?.sub_path});
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (options.module != null) blk: {
try macho_file.flushModule(comp, prog_node);
if (fs.path.dirname(full_out_path)) |dirname| {
break :blk try fs.path.join(arena, &.{ dirname, macho_file.base.intermediary_basename.? });
} else {
break :blk macho_file.base.intermediary_basename.?;
}
} else null;
var sub_prog_node = prog_node.start("MachO Flush", 0);
sub_prog_node.activate();
sub_prog_node.context.refresh();
defer sub_prog_node.end();
const cpu_arch = target.cpu.arch;
const os_tag = target.os.tag;
const abi = target.abi;
const is_lib = options.output_mode == .Lib;
const is_dyn_lib = options.link_mode == .Dynamic and is_lib;
const is_exe_or_dyn_lib = is_dyn_lib or options.output_mode == .Exe;
const stack_size = options.stack_size_override orelse 0;
const is_debug_build = options.optimize_mode == .Debug;
const gc_sections = options.gc_sections orelse !is_debug_build;
const id_symlink_basename = "zld.id";
var man: Cache.Manifest = undefined;
defer if (!options.disable_lld_caching) man.deinit();
var digest: [Cache.hex_digest_len]u8 = undefined;
if (!options.disable_lld_caching) {
man = comp.cache_parent.obtain();
// We are about to obtain this lock, so here we give other processes a chance first.
macho_file.base.releaseLock();
comptime assert(Compilation.link_hash_implementation_version == 10);
for (options.objects) |obj| {
_ = try man.addFile(obj.path, null);
man.hash.add(obj.must_link);
}
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
}
try man.addOptionalFile(module_obj_path);
// We can skip hashing libc and libc++ components that we are in charge of building from Zig
// installation sources because they are always a product of the compiler version + target information.
man.hash.add(stack_size);
man.hash.addOptional(options.pagezero_size);
man.hash.addOptional(options.headerpad_size);
man.hash.add(options.headerpad_max_install_names);
man.hash.add(gc_sections);
man.hash.add(options.dead_strip_dylibs);
man.hash.add(options.strip);
man.hash.addListOfBytes(options.lib_dirs);
man.hash.addListOfBytes(options.framework_dirs);
try link.hashAddFrameworks(&man, options.frameworks);
man.hash.addListOfBytes(options.rpath_list);
if (is_dyn_lib) {
man.hash.addOptionalBytes(options.install_name);
man.hash.addOptional(options.version);
}
try link.hashAddSystemLibs(&man, options.system_libs);
man.hash.addOptionalBytes(options.sysroot);
man.hash.addListOfBytes(options.force_undefined_symbols.keys());
try man.addOptionalFile(options.entitlements);
// We don't actually care whether it's a cache hit or miss; we just
// need the digest and the lock.
_ = try man.hit();
digest = man.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("MachO Zld new_digest={s} error: {s}", .{
std.fmt.fmtSliceHexLower(&digest),
@errorName(err),
});
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
// Hot diggity dog! The output binary is already there.
log.debug("MachO Zld digest={s} match - skipping invocation", .{
std.fmt.fmtSliceHexLower(&digest),
});
macho_file.base.lock = man.toOwnedLock();
return;
}
log.debug("MachO Zld prev_digest={s} new_digest={s}", .{
std.fmt.fmtSliceHexLower(prev_digest),
std.fmt.fmtSliceHexLower(&digest),
});
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
};
}
if (options.output_mode == .Obj) {
// LLD's MachO driver does not support the equivalent of `-r` so we do a simple file copy
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
if (options.objects.len != 0) {
break :blk options.objects[0].path;
}
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.keys()[0].status.success.object_path;
if (module_obj_path) |p|
break :blk p;
// TODO I think this is unreachable. Audit this situation when solving the above TODO
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
// This can happen when using --enable-cache and using the stage1 backend. In this case
// we can skip the file copy.
if (!mem.eql(u8, the_object_path, full_out_path)) {
try fs.cwd().copyFile(the_object_path, fs.cwd(), full_out_path, .{});
}
} else {
const sub_path = options.emit.?.sub_path;
const file = try directory.handle.createFile(sub_path, .{
.truncate = true,
.read = true,
.mode = link.determineMode(options.*),
});
defer file.close();
var zld = Zld{
.gpa = gpa,
.file = file,
.options = options,
};
defer zld.deinit();
// Index 0 is always a null symbol.
try zld.locals.append(gpa, .{
.n_strx = 0,
.n_type = 0,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
});
try zld.strtab.buffer.append(gpa, 0);
// Positional arguments to the linker such as object files and static archives.
var positionals = std.ArrayList(Compilation.LinkObject).init(arena);
try positionals.ensureUnusedCapacity(options.objects.len);
positionals.appendSliceAssumeCapacity(options.objects);
for (comp.c_object_table.keys()) |key| {
try positionals.append(.{ .path = key.status.success.object_path });
}
if (module_obj_path) |p| {
try positionals.append(.{ .path = p });
}
if (comp.compiler_rt_lib) |lib| {
try positionals.append(.{ .path = lib.full_object_path });
}
// libc++ dep
if (options.link_libcpp) {
try positionals.ensureUnusedCapacity(2);
positionals.appendAssumeCapacity(.{ .path = comp.libcxxabi_static_lib.?.full_object_path });
positionals.appendAssumeCapacity(.{ .path = comp.libcxx_static_lib.?.full_object_path });
}
var libs = std.StringArrayHashMap(link.SystemLib).init(arena);
{
const vals = options.system_libs.values();
try libs.ensureUnusedCapacity(vals.len);
for (vals) |v| libs.putAssumeCapacity(v.path.?, v);
}
{
try libs.ensureUnusedCapacity(options.frameworks.len);
for (options.frameworks) |v| libs.putAssumeCapacity(v.path, .{
.needed = v.needed,
.weak = v.weak,
.path = v.path,
});
}
try MachO.resolveLibSystem(arena, comp, options.sysroot, target, options.lib_dirs, &libs);
if (options.verbose_link) {
var argv = std.ArrayList([]const u8).init(arena);
try argv.append("zig");
try argv.append("ld");
if (is_exe_or_dyn_lib) {
try argv.append("-dynamic");
}
if (is_dyn_lib) {
try argv.append("-dylib");
if (options.install_name) |install_name| {
try argv.append("-install_name");
try argv.append(install_name);
}
}
if (options.sysroot) |syslibroot| {
try argv.append("-syslibroot");
try argv.append(syslibroot);
}
for (options.rpath_list) |rpath| {
try argv.append("-rpath");
try argv.append(rpath);
}
if (options.pagezero_size) |pagezero_size| {
try argv.append("-pagezero_size");
try argv.append(try std.fmt.allocPrint(arena, "0x{x}", .{pagezero_size}));
}
if (options.headerpad_size) |headerpad_size| {
try argv.append("-headerpad_size");
try argv.append(try std.fmt.allocPrint(arena, "0x{x}", .{headerpad_size}));
}
if (options.headerpad_max_install_names) {
try argv.append("-headerpad_max_install_names");
}
if (gc_sections) {
try argv.append("-dead_strip");
}
if (options.dead_strip_dylibs) {
try argv.append("-dead_strip_dylibs");
}
if (options.entry) |entry| {
try argv.append("-e");
try argv.append(entry);
}
for (options.objects) |obj| {
if (obj.must_link) {
try argv.append("-force_load");
}
try argv.append(obj.path);
}
for (comp.c_object_table.keys()) |key| {
try argv.append(key.status.success.object_path);
}
if (module_obj_path) |p| {
try argv.append(p);
}
if (comp.compiler_rt_lib) |lib| {
try argv.append(lib.full_object_path);
}
if (options.link_libcpp) {
try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
try argv.append(comp.libcxx_static_lib.?.full_object_path);
}
try argv.append("-o");
try argv.append(full_out_path);
try argv.append("-lSystem");
try argv.append("-lc");
for (options.system_libs.keys()) |l_name| {
const info = options.system_libs.get(l_name).?;
const arg = if (info.needed)
try std.fmt.allocPrint(arena, "-needed-l{s}", .{l_name})
else if (info.weak)
try std.fmt.allocPrint(arena, "-weak-l{s}", .{l_name})
else
try std.fmt.allocPrint(arena, "-l{s}", .{l_name});
try argv.append(arg);
}
for (options.lib_dirs) |lib_dir| {
try argv.append(try std.fmt.allocPrint(arena, "-L{s}", .{lib_dir}));
}
for (options.frameworks) |framework| {
const name = std.fs.path.stem(framework.path);
const arg = if (framework.needed)
try std.fmt.allocPrint(arena, "-needed_framework {s}", .{name})
else if (framework.weak)
try std.fmt.allocPrint(arena, "-weak_framework {s}", .{name})
else
try std.fmt.allocPrint(arena, "-framework {s}", .{name});
try argv.append(arg);
}
for (options.framework_dirs) |framework_dir| {
try argv.append(try std.fmt.allocPrint(arena, "-F{s}", .{framework_dir}));
}
if (is_dyn_lib and (options.allow_shlib_undefined orelse false)) {
try argv.append("-undefined");
try argv.append("dynamic_lookup");
}
Compilation.dump_argv(argv.items);
}
var dependent_libs = std.fifo.LinearFifo(struct {
id: Dylib.Id,
parent: u16,
}, .Dynamic).init(arena);
for (positionals.items) |obj| {
const in_file = try std.fs.cwd().openFile(obj.path, .{});
defer in_file.close();
MachO.parsePositional(
&zld,
gpa,
in_file,
obj.path,
obj.must_link,
&dependent_libs,
options,
) catch |err| {
// TODO convert to error
log.err("{s}: parsing positional failed with err {s}", .{ obj.path, @errorName(err) });
continue;
};
}
for (libs.keys(), libs.values()) |path, lib| {
const in_file = try std.fs.cwd().openFile(path, .{});
defer in_file.close();
MachO.parseLibrary(
&zld,
gpa,
in_file,
path,
lib,
false,
&dependent_libs,
options,
) catch |err| {
// TODO convert to error
log.err("{s}: parsing library failed with err {s}", .{ path, @errorName(err) });
continue;
};
}
MachO.parseDependentLibs(&zld, gpa, &dependent_libs, options) catch |err| {
// TODO convert to error
log.err("parsing dependent libraries failed with err {s}", .{@errorName(err)});
};
try zld.resolveSymbols();
try macho_file.reportUndefined(&zld);
if (options.output_mode == .Exe) {
const entry_name = options.entry orelse load_commands.default_entry_point;
const global_index = zld.resolver.get(entry_name).?; // Error was flagged earlier
zld.entry_index = global_index;
}
for (zld.objects.items, 0..) |*object, object_id| {
try object.splitIntoAtoms(&zld, @as(u32, @intCast(object_id)));
}
if (gc_sections) {
try dead_strip.gcAtoms(&zld);
}
try zld.createDyldPrivateAtom();
try zld.createTentativeDefAtoms();
if (zld.options.output_mode == .Exe) {
const global = zld.getEntryPoint();
if (zld.getSymbol(global).undf()) {
// We do one additional check here in case the entry point was found in one of the dylibs.
// (I actually have no idea what this would imply but it is a possible outcome and so we
// support it.)
try zld.addStubEntry(global);
}
}
for (zld.objects.items) |object| {
for (object.atoms.items) |atom_index| {
const atom = zld.getAtom(atom_index);
const sym = zld.getSymbol(atom.getSymbolWithLoc());
const header = zld.sections.items(.header)[sym.n_sect - 1];
if (header.isZerofill()) continue;
const relocs = Atom.getAtomRelocs(&zld, atom_index);
try Atom.scanAtomRelocs(&zld, atom_index, relocs);
}
}
try eh_frame.scanRelocs(&zld);
try UnwindInfo.scanRelocs(&zld);
if (zld.dyld_stub_binder_index) |index| try zld.addGotEntry(zld.globals.items[index]);
try zld.calcSectionSizes();
var unwind_info = UnwindInfo{ .gpa = zld.gpa };
defer unwind_info.deinit();
try unwind_info.collect(&zld);
try eh_frame.calcSectionSize(&zld, &unwind_info);
try unwind_info.calcSectionSize(&zld);
try zld.pruneAndSortSections();
try zld.createSegments();
try zld.allocateSegments();
try zld.allocateSpecialSymbols();
if (build_options.enable_logging) {
zld.logSymtab();
zld.logSegments();
zld.logSections();
zld.logAtoms();
}
try zld.writeAtoms();
if (zld.requiresThunks()) try zld.writeThunks();
try zld.writeDyldPrivateAtom();
if (zld.stubs_section_index) |_| {
try zld.writeStubs();
try zld.writeStubHelpers();
try zld.writeLaSymbolPtrs();
}
if (zld.got_section_index) |sect_id| try zld.writePointerEntries(sect_id, &zld.got_table);
if (zld.tlv_ptr_section_index) |sect_id| try zld.writePointerEntries(sect_id, &zld.tlv_ptr_table);
try eh_frame.write(&zld, &unwind_info);
try unwind_info.write(&zld);
try zld.writeLinkeditSegmentData();
// If the last section of __DATA segment is zerofill section, we need to ensure
// that the free space between the end of the last non-zerofill section of __DATA
// segment and the beginning of __LINKEDIT segment is zerofilled as the loader will
// copy-paste this space into memory for quicker zerofill operation.
if (zld.getSegmentByName("__DATA")) |data_seg_id| blk: {
var physical_zerofill_start: ?u64 = null;
const section_indexes = zld.getSectionIndexes(data_seg_id);
for (zld.sections.items(.header)[section_indexes.start..section_indexes.end]) |header| {
if (header.isZerofill() and header.size > 0) break;
physical_zerofill_start = header.offset + header.size;
} else break :blk;
const start = physical_zerofill_start orelse break :blk;
const linkedit = zld.getLinkeditSegmentPtr();
const size = math.cast(usize, linkedit.fileoff - start) orelse return error.Overflow;
if (size > 0) {
log.debug("zeroing out zerofill area of length {x} at {x}", .{ size, start });
var padding = try zld.gpa.alloc(u8, size);
defer zld.gpa.free(padding);
@memset(padding, 0);
try zld.file.pwriteAll(padding, start);
}
}
// Write code signature padding if required
const requires_codesig = blk: {
if (options.entitlements) |_| break :blk true;
if (cpu_arch == .aarch64 and (os_tag == .macos or abi == .simulator)) break :blk true;
break :blk false;
};
var codesig: ?CodeSignature = if (requires_codesig) blk: {
// Preallocate space for the code signature.
// We need to do this at this stage so that we have the load commands with proper values
// written out to the file.
// The most important here is to have the correct vm and filesize of the __LINKEDIT segment
// where the code signature goes into.
var codesig = CodeSignature.init(MachO.getPageSize(zld.options.target.cpu.arch));
codesig.code_directory.ident = fs.path.basename(full_out_path);
if (options.entitlements) |path| {
try codesig.addEntitlements(zld.gpa, path);
}
try zld.writeCodeSignaturePadding(&codesig);
break :blk codesig;
} else null;
defer if (codesig) |*csig| csig.deinit(zld.gpa);
// Write load commands
var lc_buffer = std.ArrayList(u8).init(arena);
const lc_writer = lc_buffer.writer();
try zld.writeSegmentHeaders(lc_writer);
try lc_writer.writeStruct(zld.dyld_info_cmd);
try lc_writer.writeStruct(zld.function_starts_cmd);
try lc_writer.writeStruct(zld.data_in_code_cmd);
try lc_writer.writeStruct(zld.symtab_cmd);
try lc_writer.writeStruct(zld.dysymtab_cmd);
try load_commands.writeDylinkerLC(lc_writer);
if (zld.options.output_mode == .Exe) {
const seg_id = zld.getSegmentByName("__TEXT").?;
const seg = zld.segments.items[seg_id];
const global = zld.getEntryPoint();
const sym = zld.getSymbol(global);
const addr: u64 = if (sym.undf())
// In this case, the symbol has been resolved in one of dylibs and so we point
// to the stub as its vmaddr value.
zld.getStubsEntryAddress(global).?
else
sym.n_value;
try lc_writer.writeStruct(macho.entry_point_command{
.entryoff = @as(u32, @intCast(addr - seg.vmaddr)),
.stacksize = options.stack_size_override orelse 0,
});
} else {
assert(zld.options.output_mode == .Lib);
try load_commands.writeDylibIdLC(zld.gpa, zld.options, lc_writer);
}
try load_commands.writeRpathLCs(zld.gpa, zld.options, lc_writer);
try lc_writer.writeStruct(macho.source_version_command{
.version = 0,
});
try load_commands.writeBuildVersionLC(zld.options, lc_writer);
const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len));
try lc_writer.writeStruct(zld.uuid_cmd);
try load_commands.writeLoadDylibLCs(zld.dylibs.items, zld.referenced_dylibs.keys(), lc_writer);
if (requires_codesig) {
try lc_writer.writeStruct(zld.codesig_cmd);
}
const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
try zld.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
try zld.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len)));
try zld.writeUuid(comp, uuid_cmd_offset, requires_codesig);
if (codesig) |*csig| {
try zld.writeCodeSignature(comp, csig); // code signing always comes last
try MachO.invalidateKernelCache(directory.handle, zld.options.emit.?.sub_path);
}
}
if (!options.disable_lld_caching) {
// Update the file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
log.debug("failed to save linking hash digest file: {s}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
if (man.have_exclusive_lock) {
man.writeManifest() catch |err| {
log.debug("failed to write cache manifest when linking: {s}", .{@errorName(err)});
};
}
// We hang on to this lock so that the output file path can be used without
// other processes clobbering it.
macho_file.base.lock = man.toOwnedLock();
}
}