elf: add more prepwork for linking c++ objects

This commit is contained in:
Jakub Konka
2023-10-04 12:06:52 +02:00
parent 66f34b15e8
commit d6cec5a586
5 changed files with 710 additions and 107 deletions

View File

@@ -22,6 +22,9 @@ shdrs: std.ArrayListUnmanaged(elf.Elf64_Shdr) = .{},
phdr_to_shdr_table: std.AutoHashMapUnmanaged(u16, u16) = .{},
/// File offset into the shdr table.
shdr_table_offset: ?u64 = null,
/// Table of lists of atoms per output section.
/// This table is not used to track incrementally generated atoms.
output_sections: std.AutoArrayHashMapUnmanaged(u16, std.ArrayListUnmanaged(Atom.Index)) = .{},
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
@@ -61,6 +64,7 @@ strtab: StringTable(.strtab) = .{},
/// Representation of the GOT table as committed to the file.
got: GotSection = .{},
rela_dyn: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
/// Tracked section headers
text_section_index: ?u16 = null,
@@ -109,6 +113,9 @@ symbols_extra: std.ArrayListUnmanaged(u32) = .{},
resolver: std.AutoArrayHashMapUnmanaged(u32, Symbol.Index) = .{},
symbols_free_list: std.ArrayListUnmanaged(Symbol.Index) = .{},
has_text_reloc: bool = false,
num_ifunc_dynrelocs: usize = 0,
phdr_table_dirty: bool = false,
shdr_table_dirty: bool = false,
@@ -317,6 +324,10 @@ pub fn deinit(self: *Elf) void {
self.shdrs.deinit(gpa);
self.phdr_to_shdr_table.deinit(gpa);
self.phdrs.deinit(gpa);
for (self.output_sections.values()) |*list| {
list.deinit(gpa);
}
self.output_sections.deinit(gpa);
self.shstrtab.deinit(gpa);
self.strtab.deinit(gpa);
self.symbols.deinit(gpa);
@@ -358,6 +369,7 @@ pub fn deinit(self: *Elf) void {
self.comdat_groups.deinit(gpa);
self.comdat_groups_owners.deinit(gpa);
self.comdat_groups_table.deinit(gpa);
self.rela_dyn.deinit(gpa);
}
pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo) !u64 {
@@ -1249,6 +1261,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
for (self.objects.items) |index| {
try self.file(index).?.object.addAtomsToOutputSections(self);
}
try self.sortInitFini();
try self.updateSectionSizes();
try self.allocateSections();
@@ -1316,7 +1329,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const code = try zig_module.codeAlloc(self, atom_index);
defer gpa.free(code);
const file_offset = shdr.sh_offset + atom_ptr.value - shdr.sh_addr;
try atom_ptr.resolveRelocsAlloc(self, code);
atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) {
// TODO
error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
},
else => |e| return e,
};
try self.base.file.?.pwriteAll(code, file_offset);
}
@@ -3460,6 +3479,69 @@ fn initSections(self: *Elf) !void {
}
}
fn sortInitFini(self: *Elf) !void {
const gpa = self.base.allocator;
const Entry = struct {
priority: i32,
atom_index: Atom.Index,
pub fn lessThan(ctx: void, lhs: @This(), rhs: @This()) bool {
_ = ctx;
return lhs.priority < rhs.priority;
}
};
for (self.shdrs.items, 0..) |*shdr, shndx| {
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
var is_init_fini = false;
var is_ctor_dtor = false;
switch (shdr.sh_type) {
elf.SHT_PREINIT_ARRAY,
elf.SHT_INIT_ARRAY,
elf.SHT_FINI_ARRAY,
=> is_init_fini = true,
else => {
const name = self.shstrtab.getAssumeExists(shdr.sh_name);
is_ctor_dtor = mem.indexOf(u8, name, ".ctors") != null or mem.indexOf(u8, name, ".dtors") != null;
},
}
if (!is_init_fini and !is_ctor_dtor) continue;
const atom_list = self.output_sections.getPtr(@intCast(shndx)) orelse continue;
var entries = std.ArrayList(Entry).init(gpa);
try entries.ensureTotalCapacityPrecise(atom_list.items.len);
defer entries.deinit();
for (atom_list.items) |atom_index| {
const atom_ptr = self.atom(atom_index).?;
const object = atom_ptr.file(self).?.object;
const priority = blk: {
if (is_ctor_dtor) {
if (mem.indexOf(u8, object.path, "crtbegin") != null) break :blk std.math.minInt(i32);
if (mem.indexOf(u8, object.path, "crtend") != null) break :blk std.math.maxInt(i32);
}
const default: i32 = if (is_ctor_dtor) -1 else std.math.maxInt(i32);
const name = atom_ptr.name(self);
var it = mem.splitBackwards(u8, name, ".");
const priority = std.fmt.parseUnsigned(u16, it.first(), 10) catch default;
break :blk priority;
};
entries.appendAssumeCapacity(.{ .priority = priority, .atom_index = atom_index });
}
mem.sort(Entry, entries.items, {}, Entry.lessThan);
atom_list.clearRetainingCapacity();
for (entries.items) |entry| {
atom_list.appendAssumeCapacity(entry.atom_index);
}
}
}
fn sectionRank(self: *Elf, shdr: elf.Elf64_Shdr) u8 {
const name = self.shstrtab.getAssumeExists(shdr.sh_name);
const flags = shdr.sh_flags;
@@ -3926,6 +4008,8 @@ fn writeAtoms(self: *Elf) !void {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_type == elf.SHT_NOBITS) continue;
const atom_list = self.output_sections.get(@intCast(shndx)) orelse continue;
log.debug("writing atoms in '{s}' section", .{self.shstrtab.getAssumeExists(shdr.sh_name)});
const buffer = try gpa.alloc(u8, shdr.sh_size);
@@ -3937,8 +4021,32 @@ fn writeAtoms(self: *Elf) !void {
0;
@memset(buffer, padding_byte);
for (self.objects.items) |index| {
try self.file(index).?.object.writeAtoms(self, @intCast(shndx), buffer, &undefs);
for (atom_list.items) |atom_index| {
const atom_ptr = self.atom(atom_index).?;
assert(atom_ptr.flags.alive);
const object = atom_ptr.file(self).?.object;
const offset = atom_ptr.value - shdr.sh_addr;
log.debug("writing atom({d}) at 0x{x}", .{ atom_index, shdr.sh_offset + offset });
// TODO decompress directly into provided buffer
const out_code = buffer[offset..][0..atom_ptr.size];
const in_code = try object.codeDecompressAlloc(self, atom_index);
defer gpa.free(in_code);
@memcpy(out_code, in_code);
if (shdr.sh_flags & elf.SHF_ALLOC == 0) {
try atom_ptr.resolveRelocsNonAlloc(self, out_code, &undefs);
} else {
atom_ptr.resolveRelocsAlloc(self, out_code) catch |err| switch (err) {
// TODO
error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
},
else => |e| return e,
};
}
}
try self.base.file.?.pwriteAll(buffer, shdr.sh_offset);
@@ -4495,9 +4603,58 @@ pub fn sectionByName(self: *Elf, name: [:0]const u8) ?u16 {
} else return null;
}
pub fn calcNumIRelativeRelocs(self: *Elf) u64 {
_ = self;
unreachable; // TODO
const RelaDyn = struct {
offset: u64,
sym: u64 = 0,
type: u32,
addend: i64 = 0,
};
pub fn addRelaDyn(self: *Elf, opts: RelaDyn) !void {
try self.rela_dyn.ensureUnusedCapacity(self.base.alloctor, 1);
self.addRelaDynAssumeCapacity(opts);
}
pub fn addRelaDynAssumeCapacity(self: *Elf, opts: RelaDyn) void {
self.rela_dyn.appendAssumeCapacity(.{
.r_offset = opts.offset,
.r_info = (opts.sym << 32) | opts.type,
.r_addend = opts.addend,
});
}
fn sortRelaDyn(self: *Elf) void {
const Sort = struct {
fn rank(rel: elf.Elf64_Rela) u2 {
return switch (rel.r_type()) {
elf.R_X86_64_RELATIVE => 0,
elf.R_X86_64_IRELATIVE => 2,
else => 1,
};
}
pub fn lessThan(ctx: void, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
_ = ctx;
if (rank(lhs) == rank(rhs)) {
if (lhs.r_sym() == rhs.r_sym()) return lhs.r_offset < rhs.r_offset;
return lhs.r_sym() < rhs.r_sym();
}
return rank(lhs) < rank(rhs);
}
};
mem.sort(elf.Elf64_Rela, self.rela_dyn.items, {}, Sort.lessThan);
}
fn calcNumIRelativeRelocs(self: *Elf) usize {
var count: usize = self.num_ifunc_dynrelocs;
for (self.got.entries.items) |entry| {
if (entry.tag != .got) continue;
const sym = self.symbol(entry.symbol_index);
if (sym.isIFunc(self)) count += 1;
}
return count;
}
pub fn atom(self: *Elf, atom_index: Atom.Index) ?*Atom {

View File

@@ -316,6 +316,7 @@ pub fn scanRelocsRequiresCode(self: Atom, elf_file: *Elf) bool {
}
pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype) !void {
const is_static = elf_file.isStatic();
const is_dyn_lib = elf_file.isDynLib();
const file_ptr = self.file(elf_file).?;
const rels = self.relocs(elf_file);
@@ -348,14 +349,23 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
// Report an undefined symbol.
try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs);
if (symbol.isIFunc(elf_file)) {
symbol.flags.needs_got = true;
symbol.flags.needs_plt = true;
}
// While traversing relocations, mark symbols that require special handling such as
// pointer indirection via GOT, or a stub trampoline via PLT.
switch (rel.r_type()) {
elf.R_X86_64_64 => {},
elf.R_X86_64_64 => {
try self.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
elf.R_X86_64_32,
elf.R_X86_64_32S,
=> {},
=> {
try self.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
elf.R_X86_64_GOT32,
elf.R_X86_64_GOT64,
@@ -377,23 +387,14 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
}
},
elf.R_X86_64_PC32 => {},
elf.R_X86_64_TPOFF32,
elf.R_X86_64_TPOFF64,
=> {
if (is_dyn_lib) {
// TODO
// self.picError(symbol, rel, elf_file);
}
elf.R_X86_64_PC32 => {
try self.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
},
elf.R_X86_64_TLSGD => {
// TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
if (elf_file.isStatic() or
(!symbol.flags.import and !is_dyn_lib))
{
if (is_static or (!symbol.flags.import and !is_dyn_lib)) {
// Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
// We skip the next relocation.
i += 1;
@@ -405,9 +406,21 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
}
},
elf.R_X86_64_TLSLD => {
// TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
if (is_static or !is_dyn_lib) {
// Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
// We skip the next relocation.
i += 1;
} else {
elf_file.got.flags.needs_tlsld = true;
}
},
elf.R_X86_64_GOTTPOFF => {
const should_relax = blk: {
// if (!elf_file.options.relax or is_shared or symbol.flags.import) break :blk false;
if (is_dyn_lib or symbol.flags.import) break :blk false;
if (!x86_64.canRelaxGotTpOff(code.?[r_offset - 3 ..])) break :blk false;
break :blk true;
};
@@ -416,21 +429,245 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
}
},
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: unhandled relocation type {}", .{
fmtRelocType(rel.r_type()),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
r_offset,
});
elf.R_X86_64_GOTPC32_TLSDESC => {
const should_relax = is_static or (!is_dyn_lib and !symbol.flags.import);
if (!should_relax) {
symbol.flags.needs_tlsdesc = true;
}
},
elf.R_X86_64_TPOFF32,
elf.R_X86_64_TPOFF64,
=> {
if (is_dyn_lib) try self.reportPicError(symbol, rel, elf_file);
},
elf.R_X86_64_GOTOFF64,
elf.R_X86_64_DTPOFF32,
elf.R_X86_64_DTPOFF64,
elf.R_X86_64_SIZE32,
elf.R_X86_64_SIZE64,
elf.R_X86_64_TLSDESC_CALL,
=> {},
else => try self.reportUnhandledRelocError(rel, elf_file),
}
}
}
fn scanReloc(
self: Atom,
symbol: *Symbol,
rel: elf.Elf64_Rela,
action: RelocAction,
elf_file: *Elf,
) error{OutOfMemory}!void {
const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
const object = self.file(elf_file).?.object;
switch (action) {
.none => {},
.@"error" => if (symbol.isAbs(elf_file))
try self.reportNoPicError(symbol, rel, elf_file)
else
try self.reportPicError(symbol, rel, elf_file),
.copyrel => {
if (elf_file.base.options.z_nocopyreloc) {
if (symbol.isAbs(elf_file))
try self.reportNoPicError(symbol, rel, elf_file)
else
try self.reportPicError(symbol, rel, elf_file);
} else {
symbol.flags.needs_copy_rel = true;
}
},
.dyn_copyrel => {
if (is_writeable or elf_file.base.options.z_nocopyreloc) {
if (!is_writeable) {
if (elf_file.base.options.z_notext) {
elf_file.has_text_reloc = true;
} else {
try self.reportTextRelocError(symbol, rel, elf_file);
}
}
object.num_dynrelocs += 1;
} else {
symbol.flags.needs_copy_rel = true;
}
},
.plt => {
symbol.flags.needs_plt = true;
},
.cplt => {
symbol.flags.needs_plt = true;
symbol.flags.is_canonical = true;
},
.dyn_cplt => {
if (is_writeable) {
object.num_dynrelocs += 1;
} else {
symbol.flags.needs_plt = true;
symbol.flags.is_canonical = true;
}
},
.dynrel, .baserel, .ifunc => {
if (!is_writeable) {
if (elf_file.base.options.z_notext) {
elf_file.has_text_reloc = true;
} else {
try self.reportTextRelocError(symbol, rel, elf_file);
}
}
object.num_dynrelocs += 1;
if (action == .ifunc) elf_file.num_ifunc_dynrelocs += 1;
},
}
}
const RelocAction = enum {
none,
@"error",
copyrel,
dyn_copyrel,
plt,
dyn_cplt,
cplt,
dynrel,
baserel,
ifunc,
};
fn pcRelocAction(symbol: *const Symbol, elf_file: *Elf) RelocAction {
// zig fmt: off
const table: [3][4]RelocAction = .{
// Abs Local Import data Import func
.{ .@"error", .none, .@"error", .plt }, // Shared object
.{ .@"error", .none, .copyrel, .plt }, // PIE
.{ .none, .none, .copyrel, .cplt }, // Non-PIE
};
// zig fmt: on
const output = outputType(elf_file);
const data = dataType(symbol, elf_file);
return table[output][data];
}
fn absRelocAction(symbol: *const Symbol, elf_file: *Elf) RelocAction {
// zig fmt: off
const table: [3][4]RelocAction = .{
// Abs Local Import data Import func
.{ .none, .@"error", .@"error", .@"error" }, // Shared object
.{ .none, .@"error", .@"error", .@"error" }, // PIE
.{ .none, .none, .copyrel, .cplt }, // Non-PIE
};
// zig fmt: on
const output = outputType(elf_file);
const data = dataType(symbol, elf_file);
return table[output][data];
}
fn dynAbsRelocAction(symbol: *const Symbol, elf_file: *Elf) RelocAction {
if (symbol.isIFunc(elf_file)) return .ifunc;
// zig fmt: off
const table: [3][4]RelocAction = .{
// Abs Local Import data Import func
.{ .none, .baserel, .dynrel, .dynrel }, // Shared object
.{ .none, .baserel, .dynrel, .dynrel }, // PIE
.{ .none, .none, .dyn_copyrel, .dyn_cplt }, // Non-PIE
};
// zig fmt: on
const output = outputType(elf_file);
const data = dataType(symbol, elf_file);
return table[output][data];
}
fn outputType(elf_file: *Elf) u2 {
return switch (elf_file.base.options.output_mode) {
.Obj => unreachable,
.Lib => 0,
.Exe => if (elf_file.base.options.pie) 1 else 2,
};
}
fn dataType(symbol: *const Symbol, elf_file: *Elf) u2 {
if (symbol.isAbs(elf_file)) return 0;
if (!symbol.flags.import) return 1;
if (symbol.type(elf_file) != elf.STT_FUNC) return 2;
return 3;
}
fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) error{OutOfMemory}!void {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: unhandled relocation type {} at offset 0x{x}", .{
fmtRelocType(rel.r_type()),
rel.r_offset,
});
try err.addNote(elf_file, "in {}:{s}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
}
fn reportTextRelocError(
self: Atom,
symbol: *const Symbol,
rel: elf.Elf64_Rela,
elf_file: *Elf,
) error{OutOfMemory}!void {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
rel.r_offset,
symbol.name(elf_file),
});
try err.addNote(elf_file, "in {}:{s}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
}
fn reportPicError(
self: Atom,
symbol: *const Symbol,
rel: elf.Elf64_Rela,
elf_file: *Elf,
) error{OutOfMemory}!void {
var err = try elf_file.addErrorWithNotes(2);
try err.addMsg(elf_file, "relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
rel.r_offset,
symbol.name(elf_file),
});
try err.addNote(elf_file, "in {}:{s}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
try err.addNote(elf_file, "recompile with -fPIC", .{});
}
fn reportNoPicError(
self: Atom,
symbol: *const Symbol,
rel: elf.Elf64_Rela,
elf_file: *Elf,
) error{OutOfMemory}!void {
var err = try elf_file.addErrorWithNotes(2);
try err.addMsg(elf_file, "relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
rel.r_offset,
symbol.name(elf_file),
});
try err.addNote(elf_file, "in {}:{s}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
try err.addNote(elf_file, "recompile with -fno-PIC", .{});
}
// This function will report any undefined non-weak symbols that are not imports.
fn reportUndefined(
self: Atom,
@@ -504,7 +741,6 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
const TP = @as(i64, @intCast(elf_file.tpAddress()));
// Address of the dynamic thread pointer.
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
_ = DTP;
relocs_log.debug(" {s}: {x}: [{x} => {x}] G({x}) ({s})", .{
fmtRelocType(r_type),
@@ -520,18 +756,20 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
switch (rel.r_type()) {
elf.R_X86_64_NONE => unreachable,
elf.R_X86_64_64 => try cwriter.writeIntLittle(i64, S + A),
elf.R_X86_64_32 => try cwriter.writeIntLittle(u32, @as(u32, @truncate(@as(u64, @intCast(S + A))))),
elf.R_X86_64_32S => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A))),
elf.R_X86_64_64 => {
try self.resolveDynAbsReloc(
target,
rel,
dynAbsRelocAction(target, elf_file),
elf_file,
cwriter,
);
},
elf.R_X86_64_PLT32,
elf.R_X86_64_PC32,
=> try cwriter.writeIntLittle(i32, @as(i32, @intCast(S + A - P))),
elf.R_X86_64_GOT32 => try cwriter.writeIntLittle(u32, @as(u32, @intCast(G + GOT + A))),
elf.R_X86_64_GOT64 => try cwriter.writeIntLittle(u64, @as(u64, @intCast(G + GOT + A))),
elf.R_X86_64_GOTPCREL => try cwriter.writeIntLittle(i32, @as(i32, @intCast(G + GOT + A - P))),
elf.R_X86_64_GOTPC32 => try cwriter.writeIntLittle(i32, @as(i32, @intCast(GOT + A - P))),
elf.R_X86_64_GOTPC64 => try cwriter.writeIntLittle(i64, GOT + A - P),
@@ -554,18 +792,25 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
try cwriter.writeIntLittle(i32, @as(i32, @intCast(G + GOT + A - P)));
},
elf.R_X86_64_32 => try cwriter.writeIntLittle(u32, @as(u32, @truncate(@as(u64, @intCast(S + A))))),
elf.R_X86_64_32S => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A))),
elf.R_X86_64_GOT32 => try cwriter.writeIntLittle(u32, @as(u32, @intCast(G + GOT + A))),
elf.R_X86_64_GOT64 => try cwriter.writeIntLittle(u64, @as(u64, @intCast(G + GOT + A))),
elf.R_X86_64_TPOFF32 => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A - TP))),
elf.R_X86_64_TPOFF64 => try cwriter.writeIntLittle(i64, S + A - TP),
elf.R_X86_64_DTPOFF32 => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A - DTP))),
elf.R_X86_64_DTPOFF64 => try cwriter.writeIntLittle(i64, S + A - DTP),
elf.R_X86_64_TLSGD => {
if (target.flags.has_tlsgd) {
// TODO
// const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
// try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
} else if (target.flags.has_gottp) {
// TODO
// const S_ = @as(i64, @intCast(target.getGotTpAddress(elf_file)));
// try relaxTlsGdToIe(relocs[i .. i + 2], @intCast(S_ - P), elf_file, &stream);
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
try x86_64.relaxTlsGdToIe(self, rels[i .. i + 2], @intCast(S_ - P), elf_file, &stream);
i += 1;
} else {
try x86_64.relaxTlsGdToLe(
@@ -579,11 +824,42 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
}
},
elf.R_X86_64_TLSLD => {
if (elf_file.got.tlsld_index) |entry_index| {
const tlsld_entry = elf_file.got.entries.items[entry_index];
const S_ = @as(i64, @intCast(tlsld_entry.address(elf_file)));
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
} else {
try x86_64.relaxTlsLdToLe(
self,
rels[i .. i + 2],
@as(i32, @intCast(TP - @as(i64, @intCast(elf_file.tlsAddress())))),
elf_file,
&stream,
);
i += 1;
}
},
elf.R_X86_64_GOTPC32_TLSDESC => {
if (target.flags.has_tlsdesc) {
const S_ = @as(i64, @intCast(target.tlsDescAddress(elf_file)));
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
} else {
try x86_64.relaxGotPcTlsDesc(code[rel.r_offset - 3 ..]);
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S - TP)));
}
},
elf.R_X86_64_TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
// call -> nop
try cwriter.writeAll(&.{ 0x66, 0x90 });
},
elf.R_X86_64_GOTTPOFF => {
if (target.flags.has_gottp) {
// TODO
// const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
// try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
} else {
x86_64.relaxGotTpOff(code[r_offset - 3 ..]) catch unreachable;
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S - TP)));
@@ -595,6 +871,98 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
}
}
fn resolveDynAbsReloc(
self: Atom,
target: *const Symbol,
rel: elf.Elf64_Rela,
action: RelocAction,
elf_file: *Elf,
writer: anytype,
) !void {
const P = self.value + rel.r_offset;
const A = rel.r_addend;
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
const object = self.file(elf_file).?.object;
try elf_file.rela_dyn.ensureUnusedCapacity(elf_file.base.allocator, object.num_dynrelocs);
switch (action) {
.@"error",
.plt,
=> unreachable,
.copyrel,
.cplt,
.none,
=> try writer.writeIntLittle(i32, @as(i32, @truncate(S + A))),
.dyn_copyrel => {
if (is_writeable or elf_file.base.options.z_nocopyreloc) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.sym = target.extra(elf_file).?.dynamic,
.type = elf.R_X86_64_64,
.addend = A,
});
try applyDynamicReloc(A, elf_file, writer);
} else {
try writer.writeIntLittle(i32, @as(i32, @truncate(S + A)));
}
},
.dyn_cplt => {
if (is_writeable) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.sym = target.extra(elf_file).?.dynamic,
.type = elf.R_X86_64_64,
.addend = A,
});
try applyDynamicReloc(A, elf_file, writer);
} else {
try writer.writeIntLittle(i32, @as(i32, @truncate(S + A)));
}
},
.dynrel => {
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.sym = target.extra(elf_file).?.dynamic,
.type = elf.R_X86_64_64,
.addend = A,
});
try applyDynamicReloc(A, elf_file, writer);
},
.baserel => {
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.type = elf.R_X86_64_RELATIVE,
.addend = S + A,
});
try applyDynamicReloc(S + A, elf_file, writer);
},
.ifunc => {
const S_ = @as(i64, @intCast(target.address(.{ .plt = false }, elf_file)));
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.type = elf.R_X86_64_IRELATIVE,
.addend = S_ + A,
});
try applyDynamicReloc(S_ + A, elf_file, writer);
},
}
}
fn applyDynamicReloc(value: i64, elf_file: *Elf, writer: anytype) !void {
_ = elf_file;
// if (elf_file.options.apply_dynamic_relocs) {
try writer.writeIntLittle(i64, value);
// }
}
pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: anytype) !void {
relocs_log.debug("0x{x}: {s}", .{ self.value, self.name(elf_file) });
@@ -682,17 +1050,7 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
try cwriter.writeIntLittle(i64, @as(i64, @intCast(size + A)));
},
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: unhandled relocation type {}", .{
fmtRelocType(r_type),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
r_offset,
});
},
else => try self.reportUnhandledRelocError(rel, elf_file),
}
}
}
@@ -854,6 +1212,95 @@ const x86_64 = struct {
}
}
pub fn relaxTlsGdToIe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
stream: anytype,
) !void {
assert(rels.len == 2);
const writer = stream.writer();
switch (rels[1].r_type()) {
elf.R_X86_64_PC32,
elf.R_X86_64_PLT32,
=> {
var insts = [_]u8{
0x64, 0x48, 0x8b, 0x04, 0x25, 0, 0, 0, 0, // movq %fs:0,%rax
0x48, 0x03, 0x05, 0, 0, 0, 0, // add foo@gottpoff(%rip), %rax
};
std.mem.writeIntLittle(i32, insts[12..][0..4], value - 12);
try stream.seekBy(-4);
try writer.writeAll(&insts);
},
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: rewrite {} when followed by {}", .{
fmtRelocType(rels[0].r_type()),
fmtRelocType(rels[1].r_type()),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
rels[0].r_offset,
});
},
}
}
pub fn relaxTlsLdToLe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
stream: anytype,
) !void {
assert(rels.len == 2);
const writer = stream.writer();
switch (rels[1].r_type()) {
elf.R_X86_64_PC32,
elf.R_X86_64_PLT32,
=> {
var insts = [_]u8{
0x31, 0xc0, // xor %eax, %eax
0x64, 0x48, 0x8b, 0, // mov %fs:(%rax), %rax
0x48, 0x2d, 0, 0, 0, 0, // sub $tls_size, %rax
};
std.mem.writeIntLittle(i32, insts[8..][0..4], value);
try stream.seekBy(-3);
try writer.writeAll(&insts);
},
elf.R_X86_64_GOTPCREL,
elf.R_X86_64_GOTPCRELX,
=> {
var insts = [_]u8{
0x31, 0xc0, // xor %eax, %eax
0x64, 0x48, 0x8b, 0, // mov %fs:(%rax), %rax
0x48, 0x2d, 0, 0, 0, 0, // sub $tls_size, %rax
0x90, // nop
};
std.mem.writeIntLittle(i32, insts[8..][0..4], value);
try stream.seekBy(-3);
try writer.writeAll(&insts);
},
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: rewrite {} when followed by {}", .{
fmtRelocType(rels[0].r_type()),
fmtRelocType(rels[1].r_type()),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
rels[0].r_offset,
});
},
}
}
pub fn canRelaxGotTpOff(code: []const u8) bool {
const old_inst = disassemble(code) orelse return false;
switch (old_inst.encoding.mnemonic) {
@@ -885,6 +1332,22 @@ const x86_64 = struct {
}
}
pub fn relaxGotPcTlsDesc(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
switch (old_inst.encoding.mnemonic) {
.lea => {
const inst = try Instruction.new(old_inst.prefix, .mov, &.{
old_inst.ops[0],
// TODO: hack to force imm32s in the assembler
.{ .imm = Immediate.s(-129) },
});
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
encode(&.{inst}, code) catch return error.RelaxFail;
},
else => return error.RelaxFail,
}
}
pub fn relaxTlsGdToLe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,

View File

@@ -20,7 +20,6 @@ cies: std.ArrayListUnmanaged(Cie) = .{},
alive: bool = true,
num_dynrelocs: u32 = 0,
output_sections: std.AutoArrayHashMapUnmanaged(u16, std.ArrayListUnmanaged(Atom.Index)) = .{},
output_symtab_size: Elf.SymtabSize = .{},
pub fn isObject(file: std.fs.File) bool {
@@ -43,10 +42,6 @@ pub fn deinit(self: *Object, allocator: Allocator) void {
self.comdat_groups.deinit(allocator);
self.fdes.deinit(allocator);
self.cies.deinit(allocator);
for (self.output_sections.values()) |*list| {
list.deinit(allocator);
}
self.output_sections.deinit(allocator);
}
pub fn parse(self: *Object, elf_file: *Elf) !void {
@@ -635,7 +630,7 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
const gpa = elf_file.base.allocator;
const gop = try self.output_sections.getOrPut(gpa, atom.output_section_index);
const gop = try elf_file.output_sections.getOrPut(gpa, atom.output_section_index);
if (!gop.found_existing) gop.value_ptr.* = .{};
try gop.value_ptr.append(gpa, atom_index);
}
@@ -680,28 +675,6 @@ pub fn allocateAtoms(self: Object, elf_file: *Elf) void {
}
}
pub fn writeAtoms(self: Object, elf_file: *Elf, output_section_index: u16, buffer: []u8, undefs: anytype) !void {
const gpa = elf_file.base.allocator;
const atom_list = self.output_sections.get(output_section_index) orelse return;
const shdr = elf_file.shdrs.items[output_section_index];
for (atom_list.items) |atom_index| {
const atom = elf_file.atom(atom_index).?;
assert(atom.flags.alive);
const offset = atom.value - shdr.sh_addr;
log.debug("writing atom({d}) at 0x{x}", .{ atom_index, shdr.sh_offset + offset });
// TODO decompress directly into provided buffer
const out_code = buffer[offset..][0..atom.size];
const in_code = try self.codeDecompressAlloc(elf_file, atom_index);
defer gpa.free(in_code);
@memcpy(out_code, in_code);
if (shdr.sh_flags & elf.SHF_ALLOC == 0)
try atom.resolveRelocsNonAlloc(elf_file, out_code, undefs)
else
try atom.resolveRelocsAlloc(elf_file, out_code);
}
}
pub fn updateSymtabSize(self: *Object, elf_file: *Elf) void {
for (self.locals()) |local_index| {
const local = elf_file.symbol(local_index);

View File

@@ -128,23 +128,26 @@ pub fn getOrCreateGotEntry(symbol: *Symbol, symbol_index: Index, elf_file: *Elf)
return .{ .found_existing = false, .index = index };
}
// pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) u64 {
// if (!symbol.flags.tlsgd) return 0;
// const extra = symbol.getExtra(elf_file).?;
// return elf_file.getGotEntryAddress(extra.tlsgd);
// }
pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_tlsgd) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.tlsgd];
return entry.address(elf_file);
}
// pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) u64 {
// if (!symbol.flags.gottp) return 0;
// const extra = symbol.getExtra(elf_file).?;
// return elf_file.getGotEntryAddress(extra.gottp);
// }
pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_gottp) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.gottp];
return entry.address(elf_file);
}
// pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) u64 {
// if (!symbol.flags.tlsdesc) return 0;
// const extra = symbol.getExtra(elf_file).?;
// return elf_file.getGotEntryAddress(extra.tlsdesc);
// }
pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_tlsdesc) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.tlsdesc];
return entry.address(elf_file);
}
// pub fn alignment(symbol: Symbol, elf_file: *Elf) !u64 {
// const file = symbol.getFile(elf_file) orelse return 0;
@@ -318,12 +321,12 @@ pub const Flags = packed struct {
/// Whether the symbol contains PLT indirection.
needs_plt: bool = false,
plt: bool = false,
has_plt: bool = false,
/// Whether the PLT entry is canonical.
is_canonical: bool = false,
/// Whether the symbol contains COPYREL directive.
copy_rel: bool = false,
needs_copy_rel: bool = false,
has_copy_rel: bool = false,
has_dynamic: bool = false,
@@ -336,7 +339,8 @@ pub const Flags = packed struct {
has_gottp: bool = false,
/// Whether the symbol contains TLSDESC indirection.
tlsdesc: bool = false,
needs_tlsdesc: bool = false,
has_tlsdesc: bool = false,
};
pub const Extra = struct {

View File

@@ -1,10 +1,16 @@
pub const GotSection = struct {
entries: std.ArrayListUnmanaged(Entry) = .{},
needs_rela: bool = false,
output_symtab_size: Elf.SymtabSize = .{},
tlsld_index: ?u32 = null,
flags: Flags = .{},
pub const Index = u32;
const Flags = packed struct {
needs_rela: bool = false,
needs_tlsld: bool = false,
};
const Tag = enum {
got,
tlsld,
@@ -57,7 +63,7 @@ pub const GotSection = struct {
entry.symbol_index = sym_index;
const symbol = elf_file.symbol(sym_index);
if (symbol.flags.import or symbol.isIFunc(elf_file) or (elf_file.base.options.pic and !symbol.isAbs(elf_file)))
got.needs_rela = true;
got.flags.needs_rela = true;
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.got = index;