Merge pull request #17556 from ziglang/elf-link-zig-proper

elf: port 99% of zld ELF linker to Zig proper
This commit is contained in:
Jakub Konka
2023-10-17 17:36:40 +02:00
committed by GitHub
22 changed files with 8602 additions and 1641 deletions

View File

@@ -591,10 +591,12 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/link/Elf/Atom.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/LinkerDefined.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/Object.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/SharedObject.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/Symbol.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/ZigModule.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/eh_frame.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/file.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/gc.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/synthetic_sections.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Archive.zig"

View File

@@ -4012,6 +4012,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.got => .load_memory_ptr_got,
.direct => .load_memory_ptr_direct,
.import => unreachable,
.extern_got => unreachable,
};
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
@@ -4318,8 +4319,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.getOrCreateMetadataForDecl(func.owner_decl);
const sym = elf_file.symbol(sym_index);
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.gotAddress(elf_file)));
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.zigGotAddress(elf_file)));
try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
@@ -5531,6 +5532,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.got => .load_memory_ptr_got,
.direct => .load_memory_ptr_direct,
.import => unreachable,
.extern_got => unreachable,
};
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
@@ -5652,6 +5654,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.got => .load_memory_got,
.direct => .load_memory_direct,
.import => .load_memory_import,
.extern_got => unreachable,
};
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
@@ -5849,6 +5852,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
.got => .load_memory_ptr_got,
.direct => .load_memory_ptr_direct,
.import => unreachable,
.extern_got => unreachable,
};
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
@@ -6176,7 +6180,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
.memory => |addr| .{ .memory = addr },
.load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } },
.load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } },
.load_tlv => unreachable, // TODO
.load_extern_got, .load_tlv => unreachable, // TODO
},
.fail => |msg| {
self.err_msg = msg;

View File

@@ -4304,8 +4304,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.getOrCreateMetadataForDecl(func.owner_decl);
const sym = elf_file.symbol(sym_index);
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.gotAddress(elf_file)));
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.zigGotAddress(elf_file)));
try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
@@ -6135,7 +6135,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
.load_got, .load_direct, .load_tlv => unreachable, // TODO
.load_got, .load_extern_got, .load_direct, .load_tlv => unreachable, // TODO
.immediate => |imm| .{ .immediate = @as(u32, @truncate(imm)) },
.memory => |addr| .{ .memory = addr },
},

View File

@@ -1754,8 +1754,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.func => |func| {
const sym_index = try elf_file.getOrCreateMetadataForDecl(func.owner_decl);
const sym = elf_file.symbol(sym_index);
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.gotAddress(elf_file)));
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.zigGotAddress(elf_file)));
try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,
@@ -2591,7 +2591,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
.load_got, .load_direct, .load_tlv => unreachable, // TODO
.load_got, .load_extern_got, .load_direct, .load_tlv => unreachable, // TODO
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
},

View File

@@ -1349,8 +1349,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const sym_index = try elf_file.getOrCreateMetadataForDecl(func.owner_decl);
const sym = elf_file.symbol(sym_index);
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
break :blk @as(u32, @intCast(sym.gotAddress(elf_file)));
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
break :blk @as(u32, @intCast(sym.zigGotAddress(elf_file)));
} else unreachable;
try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr });
@@ -4137,7 +4137,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
.load_got, .load_direct, .load_tlv => unreachable, // TODO
.load_got, .load_extern_got, .load_direct, .load_tlv => unreachable, // TODO
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
},

View File

@@ -207,6 +207,12 @@ pub const MCValue = union(enum) {
/// The value is a pointer to a value referenced indirectly via GOT.
/// Payload is a symbol index.
lea_got: u32,
/// The value is an extern variable referenced via GOT.
/// Payload is a symbol index.
load_extern_got: u32,
/// The value is a pointer to an extern variable referenced via GOT.
/// Payload is a symbol index.
lea_extern_got: u32,
/// The value is a threadlocal variable.
/// Payload is a symbol index.
load_tlv: u32,
@@ -295,6 +301,7 @@ pub const MCValue = union(enum) {
.register_overflow,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
.reserved_frame,
@@ -308,6 +315,7 @@ pub const MCValue = union(enum) {
.load_direct => |sym_index| .{ .lea_direct = sym_index },
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
.load_extern_got => |sym_index| .{ .lea_extern_got = sym_index },
.load_frame => |frame_addr| .{ .lea_frame = frame_addr },
};
}
@@ -325,6 +333,7 @@ pub const MCValue = union(enum) {
.indirect,
.load_direct,
.load_got,
.load_extern_got,
.load_tlv,
.load_frame,
.reserved_frame,
@@ -335,6 +344,7 @@ pub const MCValue = union(enum) {
.register_offset => |reg_off| .{ .indirect = reg_off },
.lea_direct => |sym_index| .{ .load_direct = sym_index },
.lea_got => |sym_index| .{ .load_got = sym_index },
.lea_extern_got => |sym_index| .{ .load_extern_got = sym_index },
.lea_tlv => |sym_index| .{ .load_tlv = sym_index },
.lea_frame => |frame_addr| .{ .load_frame = frame_addr },
};
@@ -358,6 +368,8 @@ pub const MCValue = union(enum) {
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.load_frame,
@@ -392,6 +404,8 @@ pub const MCValue = union(enum) {
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.lea_frame,
@@ -434,6 +448,8 @@ pub const MCValue = union(enum) {
.lea_direct => |pl| try writer.print("direct:{d}", .{pl}),
.load_got => |pl| try writer.print("[got:{d}]", .{pl}),
.lea_got => |pl| try writer.print("got:{d}", .{pl}),
.load_extern_got => |pl| try writer.print("[extern_got:{d}]", .{pl}),
.lea_extern_got => |pl| try writer.print("extern_got:{d}", .{pl}),
.load_tlv => |pl| try writer.print("[tlv:{d}]", .{pl}),
.lea_tlv => |pl| try writer.print("tlv:{d}", .{pl}),
.load_frame => |pl| try writer.print("[{} + 0x{x}]", .{ pl.index, pl.off }),
@@ -461,6 +477,8 @@ const InstTracking = struct {
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.load_frame,
@@ -520,6 +538,8 @@ const InstTracking = struct {
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.load_frame,
@@ -555,6 +575,8 @@ const InstTracking = struct {
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.lea_frame,
@@ -4371,6 +4393,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
.memory,
.load_direct,
.load_got,
.load_extern_got,
.load_tlv,
=> try self.genSetReg(addr_reg, Type.usize, array.address()),
.lea_direct, .lea_tlv => unreachable,
@@ -5851,6 +5874,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro
.register_offset,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
=> try self.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()),
@@ -5858,6 +5882,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro
.indirect,
.load_direct,
.load_got,
.load_extern_got,
.load_tlv,
.load_frame,
=> {
@@ -5996,6 +6021,7 @@ fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerErr
.register_offset,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
=> try self.genCopy(src_ty, ptr_mcv.deref(), src_mcv),
@@ -6003,6 +6029,7 @@ fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerErr
.indirect,
.load_direct,
.load_got,
.load_extern_got,
.load_tlv,
.load_frame,
=> {
@@ -6424,6 +6451,7 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MC
.register_overflow,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
.reserved_frame,
@@ -6431,7 +6459,7 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MC
=> unreachable, // unmodifiable destination
.register => |dst_reg| try self.asmRegister(mir_tag, registerAlias(dst_reg, abi_size)),
.register_pair => unreachable, // unimplemented
.memory, .load_got, .load_direct, .load_tlv => {
.memory, .load_got, .load_extern_got, .load_direct, .load_tlv => {
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
defer self.register_manager.unlockReg(addr_reg_lock);
@@ -7389,6 +7417,8 @@ fn genBinOp(
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.lea_frame,
@@ -7445,6 +7475,8 @@ fn genBinOp(
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.lea_frame,
@@ -8397,6 +8429,7 @@ fn genBinOpMir(
.register_overflow,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
.reserved_frame,
@@ -8485,6 +8518,8 @@ fn genBinOpMir(
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.load_frame,
@@ -8517,6 +8552,7 @@ fn genBinOpMir(
.register_offset,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
=> {
@@ -8532,6 +8568,7 @@ fn genBinOpMir(
.memory,
.load_direct,
.load_got,
.load_extern_got,
.load_tlv,
=> {
const ptr_ty = try mod.singleConstPtrType(ty);
@@ -8552,13 +8589,13 @@ fn genBinOpMir(
}
}
},
.memory, .indirect, .load_got, .load_direct, .load_tlv, .load_frame => {
.memory, .indirect, .load_got, .load_extern_got, .load_direct, .load_tlv, .load_frame => {
const OpInfo = ?struct { addr_reg: Register, addr_lock: RegisterLock };
const limb_abi_size: u32 = @min(abi_size, 8);
const dst_info: OpInfo = switch (dst_mcv) {
else => unreachable,
.memory, .load_got, .load_direct, .load_tlv => dst: {
.memory, .load_got, .load_extern_got, .load_direct, .load_tlv => dst: {
const dst_addr_reg =
(try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to64();
const dst_addr_lock = self.register_manager.lockRegAssumeUnused(dst_addr_reg);
@@ -8592,16 +8629,17 @@ fn genBinOpMir(
.indirect,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.load_frame,
.lea_frame,
=> null,
.memory, .load_got, .load_direct, .load_tlv => src: {
.memory, .load_got, .load_extern_got, .load_direct, .load_tlv => src: {
switch (resolved_src_mcv) {
.memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr))) != null and
math.cast(i32, @as(i64, @bitCast(addr)) + abi_size - limb_abi_size) != null)
break :src null,
.load_got, .load_direct, .load_tlv => {},
.load_got, .load_extern_got, .load_direct, .load_tlv => {},
else => unreachable,
}
@@ -8644,6 +8682,7 @@ fn genBinOpMir(
switch (dst_mcv) {
.memory,
.load_got,
.load_extern_got,
.load_direct,
.load_tlv,
=> .{ .base = .{ .reg = dst_info.?.addr_reg }, .disp = off },
@@ -8728,6 +8767,8 @@ fn genBinOpMir(
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.load_frame,
@@ -8743,6 +8784,7 @@ fn genBinOpMir(
.register_offset,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
=> switch (limb_i) {
@@ -8792,6 +8834,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.register_overflow,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
.reserved_frame,
@@ -8840,6 +8883,8 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.lea_frame,
@@ -8878,7 +8923,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
}
},
.register_pair => unreachable, // unimplemented
.memory, .indirect, .load_direct, .load_got, .load_tlv, .load_frame => {
.memory, .indirect, .load_direct, .load_got, .load_extern_got, .load_tlv, .load_frame => {
const tmp_reg = try self.copyToTmpRegister(dst_ty, dst_mcv);
const tmp_mcv = MCValue{ .register = tmp_reg };
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
@@ -8971,6 +9016,7 @@ fn genVarDbgInfo(
//} },
.memory => |address| .{ .memory = address },
.load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } },
.load_extern_got => |sym_index| .{ .linker_load = .{ .type = .extern_got, .sym_index = sym_index } },
.load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } },
.immediate => |x| .{ .immediate = x },
.undef => .undef,
@@ -9189,16 +9235,20 @@ fn genCall(self: *Self, info: union(enum) {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.getOrCreateMetadataForDecl(owner_decl);
const sym = elf_file.symbol(sym_index);
sym.flags.needs_got = true;
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
_ = try self.addInst(.{
.tag = .call,
.ops = .direct_got_reloc,
.data = .{ .reloc = .{
.atom_index = try self.owner.getSymbolIndex(self),
.sym_index = sym.esym_index,
} },
});
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
if (self.bin_file.options.pic) {
try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym.esym_index });
try self.asmRegister(.{ ._, .call }, .rax);
} else {
_ = try self.addInst(.{
.tag = .call,
.ops = .direct_got_reloc,
.data = .{ .reloc = .{
.atom_index = try self.owner.getSymbolIndex(self),
.sym_index = sym.esym_index,
} },
});
}
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = try coff_file.getOrCreateAtomForDecl(owner_decl);
const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
@@ -9406,13 +9456,14 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
.indirect,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
.reserved_frame,
.air_ref,
=> unreachable,
.register_pair, .load_frame => null,
.memory, .load_got, .load_direct, .load_tlv => dst: {
.memory, .load_got, .load_extern_got, .load_direct, .load_tlv => dst: {
switch (resolved_dst_mcv) {
.memory => |addr| if (math.cast(
i32,
@@ -9421,7 +9472,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
i32,
@as(i64, @bitCast(addr)) + abi_size - 8,
) != null) break :dst null,
.load_got, .load_direct, .load_tlv => {},
.load_got, .load_extern_got, .load_direct, .load_tlv => {},
else => unreachable,
}
@@ -9464,13 +9515,14 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
.indirect,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
.reserved_frame,
.air_ref,
=> unreachable,
.register_pair, .load_frame => null,
.memory, .load_got, .load_direct, .load_tlv => src: {
.memory, .load_got, .load_extern_got, .load_direct, .load_tlv => src: {
switch (resolved_src_mcv) {
.memory => |addr| if (math.cast(
i32,
@@ -9479,7 +9531,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
i32,
@as(i64, @bitCast(addr)) + abi_size - 8,
) != null) break :src null,
.load_got, .load_direct, .load_tlv => {},
.load_got, .load_extern_got, .load_direct, .load_tlv => {},
else => unreachable,
}
@@ -9898,6 +9950,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
.register_overflow,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
.reserved_frame,
@@ -9924,6 +9977,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
.memory,
.load_got,
.load_extern_got,
.load_direct,
.load_tlv,
=> {
@@ -10481,7 +10535,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
.memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |_|
break :arg input_mcv,
.indirect, .load_frame => break :arg input_mcv,
.load_direct, .load_got, .load_tlv => {},
.load_direct, .load_got, .load_extern_got, .load_tlv => {},
else => {
const temp_mcv = try self.allocTempRegOrMem(ty, false);
try self.genCopy(ty, temp_mcv, input_mcv);
@@ -11142,6 +11196,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError
.register_overflow,
.lea_direct,
.lea_got,
.lea_extern_got,
.lea_tlv,
.lea_frame,
.reserved_frame,
@@ -11193,11 +11248,11 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError
}
},
.indirect => |reg_off| try self.genSetMem(.{ .reg = reg_off.reg }, reg_off.off, ty, src_mcv),
.memory, .load_direct, .load_got, .load_tlv => {
.memory, .load_direct, .load_got, .load_extern_got, .load_tlv => {
switch (dst_mcv) {
.memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
return self.genSetMem(.{ .reg = .ds }, small_addr, ty, src_mcv),
.load_direct, .load_got, .load_tlv => {},
.load_direct, .load_got, .load_extern_got, .load_tlv => {},
else => unreachable,
}
@@ -11359,7 +11414,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
else => unreachable,
},
)),
.memory, .load_direct, .load_got, .load_tlv => {
.memory, .load_direct, .load_got, .load_extern_got, .load_tlv => {
switch (src_mcv) {
.memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
return (try self.moveStrategy(
@@ -11387,7 +11442,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
},
.Float, .Vector => {},
},
.load_got, .load_tlv => {},
.load_got, .load_extern_got, .load_tlv => {},
else => unreachable,
}
@@ -11401,17 +11456,18 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = addr_reg } }),
);
},
.lea_direct, .lea_got => |sym_index| {
.lea_direct, .lea_got, .lea_extern_got => |sym_index| {
const atom_index = try self.owner.getSymbolIndex(self);
_ = try self.addInst(.{
.tag = switch (src_mcv) {
.lea_direct => .lea,
.lea_got => .mov,
.lea_got, .lea_extern_got => .mov,
else => unreachable,
},
.ops = switch (src_mcv) {
.lea_direct => .direct_reloc,
.lea_got => .got_reloc,
.lea_extern_got => .extern_got_reloc,
else => unreachable,
},
.data = .{ .rx = .{
@@ -11547,6 +11603,8 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
.lea_direct,
.load_got,
.lea_got,
.load_extern_got,
.lea_extern_got,
.load_tlv,
.lea_tlv,
.load_frame,
@@ -11637,36 +11695,49 @@ fn genLazySymbolRef(
const sym_index = elf_file.getOrCreateMetadataForLazySymbol(lazy_sym) catch |err|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
const sym = elf_file.symbol(sym_index);
sym.flags.needs_got = true;
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
const reloc = Mir.Reloc{
.atom_index = try self.owner.getSymbolIndex(self),
.sym_index = sym.esym_index,
};
switch (tag) {
.lea, .mov => _ = try self.addInst(.{
.tag = .mov,
.ops = .direct_got_reloc,
.data = .{ .rx = .{
.r1 = reg.to64(),
.payload = try self.addExtra(reloc),
} },
}),
.call => _ = try self.addInst(.{
.tag = .call,
.ops = .direct_got_reloc,
.data = .{ .reloc = reloc },
}),
else => unreachable,
}
switch (tag) {
.lea, .call => {},
.mov => try self.asmRegisterMemory(
.{ ._, tag },
reg.to64(),
Memory.sib(.qword, .{ .base = .{ .reg = reg.to64() } }),
),
else => unreachable,
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
if (self.bin_file.options.pic) {
switch (tag) {
.lea, .call => try self.genSetReg(reg, Type.usize, .{ .lea_got = sym.esym_index }),
.mov => try self.genSetReg(reg, Type.usize, .{ .load_got = sym.esym_index }),
else => unreachable,
}
switch (tag) {
.lea, .mov => {},
.call => try self.asmRegister(.{ ._, .call }, reg),
else => unreachable,
}
} else {
const reloc = Mir.Reloc{
.atom_index = try self.owner.getSymbolIndex(self),
.sym_index = sym.esym_index,
};
switch (tag) {
.lea, .mov => _ = try self.addInst(.{
.tag = .mov,
.ops = .direct_got_reloc,
.data = .{ .rx = .{
.r1 = reg.to64(),
.payload = try self.addExtra(reloc),
} },
}),
.call => _ = try self.addInst(.{
.tag = .call,
.ops = .direct_got_reloc,
.data = .{ .reloc = reloc },
}),
else => unreachable,
}
switch (tag) {
.lea, .call => {},
.mov => try self.asmRegisterMemory(
.{ ._, tag },
reg.to64(),
Memory.sib(.qword, .{ .base = .{ .reg = reg.to64() } }),
),
else => unreachable,
}
}
} else if (self.bin_file.cast(link.File.Plan9)) |p9_file| {
const atom_index = p9_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err|
@@ -13539,6 +13610,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
.memory => |addr| .{ .memory = addr },
.load_direct => |sym_index| .{ .load_direct = sym_index },
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_extern_got => |sym_index| .{ .lea_extern_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
},
.fail => |msg| {

View File

@@ -79,20 +79,29 @@ pub fn emitMir(emit: *Emit) Error!void {
@tagName(emit.bin_file.tag),
}),
.linker_got,
.linker_extern_got,
.linker_direct,
.linker_direct_got,
.linker_import,
.linker_tlv,
=> |symbol| if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
const r_type: u32 = switch (lowered_relocs[0].target) {
.linker_direct_got => std.elf.R_X86_64_GOT32,
.linker_direct_got => link.File.Elf.R_X86_64_ZIG_GOT32,
.linker_got => link.File.Elf.R_X86_64_ZIG_GOTPCREL,
.linker_extern_got => std.elf.R_X86_64_GOTPCREL,
.linker_direct => std.elf.R_X86_64_PC32,
else => unreachable,
};
const r_addend: i64 = switch (lowered_relocs[0].target) {
.linker_direct_got => 0,
.linker_got, .linker_extern_got, .linker_direct => -4,
else => unreachable,
};
const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
try atom_ptr.addReloc(elf_file, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | r_type,
.r_addend = 0,
.r_addend = r_addend,
});
} else if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = symbol.atom_index }).?;

View File

@@ -51,6 +51,7 @@ pub const Reloc = struct {
inst: Mir.Inst.Index,
linker_extern_fn: Mir.Reloc,
linker_got: Mir.Reloc,
linker_extern_got: Mir.Reloc,
linker_direct: Mir.Reloc,
linker_direct_got: Mir.Reloc,
linker_import: Mir.Reloc,
@@ -388,7 +389,7 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
.rrmi_sib, .rrmi_rip => inst.data.rrix.fixes,
.mi_sib_u, .mi_rip_u, .mi_sib_s, .mi_rip_s => inst.data.x.fixes,
.m_sib, .m_rip, .rax_moffs, .moffs_rax => inst.data.x.fixes,
.extern_fn_reloc, .got_reloc, .direct_reloc, .direct_got_reloc, .import_reloc, .tlv_reloc => ._,
.extern_fn_reloc, .got_reloc, .extern_got_reloc, .direct_reloc, .direct_got_reloc, .import_reloc, .tlv_reloc => ._,
else => return lower.fail("TODO lower .{s}", .{@tagName(inst.ops)}),
};
try lower.emit(switch (fixes) {
@@ -532,11 +533,12 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
else => unreachable,
}
},
.got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ops: {
.got_reloc, .extern_got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ops: {
const reg = inst.data.rx.r1;
const extra = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data;
_ = lower.reloc(switch (inst.ops) {
.got_reloc => .{ .linker_got = extra },
.extern_got_reloc => .{ .linker_extern_got = extra },
.direct_reloc => .{ .linker_direct = extra },
.import_reloc => .{ .linker_import = extra },
.tlv_reloc => .{ .linker_tlv = extra },

View File

@@ -783,6 +783,9 @@ pub const Inst = struct {
/// Linker relocation - GOT indirection.
/// Uses `rx` payload with extra data of type `Reloc`.
got_reloc,
/// Linker relocation - reference to an extern variable via GOT.
/// Uses `rx` payload with extra data of type `Reloc`.
extern_got_reloc,
/// Linker relocation - direct reference.
/// Uses `rx` payload with extra data of type `Reloc`.
direct_reloc,

View File

@@ -793,11 +793,13 @@ fn lowerDeclRef(
/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:
/// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
/// * extern_got - pointer to extern variable referenced via GOT
/// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
/// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc)
pub const LinkerLoad = struct {
type: enum {
got,
extern_got,
direct,
import,
},
@@ -827,6 +829,8 @@ pub const GenResult = union(enum) {
load_got: u32,
/// Direct by-address reference to memory location.
memory: u64,
/// Pointer to extern variable via GOT.
load_extern_got: u32,
};
fn mcv(val: MCValue) GenResult {
@@ -885,13 +889,26 @@ fn genDeclRef(
try mod.markDeclAlive(decl);
const is_threadlocal = tv.val.isPtrToThreadLocal(mod) and !bin_file.options.single_threaded;
const is_extern = decl.isExtern(mod);
if (bin_file.cast(link.File.Elf)) |elf_file| {
if (is_extern) {
const name = mod.intern_pool.stringToSlice(decl.name);
// TODO audit this
const lib_name = if (decl.getOwnedVariable(mod)) |ov|
mod.intern_pool.stringToSliceUnwrap(ov.lib_name)
else
null;
return GenResult.mcv(.{ .load_extern_got = try elf_file.getGlobalSymbol(name, lib_name) });
}
const sym_index = try elf_file.getOrCreateMetadataForDecl(decl_index);
const sym = elf_file.symbol(sym_index);
sym.flags.needs_got = true;
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
return GenResult.mcv(.{ .memory = sym.gotAddress(elf_file) });
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
if (bin_file.options.pic) {
return GenResult.mcv(.{ .load_got = sym.esym_index });
} else {
return GenResult.mcv(.{ .memory = sym.zigGotAddress(elf_file) });
}
} else if (bin_file.cast(link.File.MachO)) |macho_file| {
const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
@@ -926,7 +943,12 @@ fn genUnnamedConst(
return GenResult.fail(bin_file.allocator, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (bin_file.cast(link.File.Elf)) |elf_file| {
return GenResult.mcv(.{ .memory = elf_file.symbol(local_sym_index).value });
const local = elf_file.symbol(local_sym_index);
if (bin_file.options.pic) {
return GenResult.mcv(.{ .load_direct = local.esym_index });
} else {
return GenResult.mcv(.{ .memory = local.value });
}
} else if (bin_file.cast(link.File.MachO)) |_| {
return GenResult.mcv(.{ .load_direct = local_sym_index });
} else if (bin_file.cast(link.File.Coff)) |_| {

View File

@@ -1389,6 +1389,7 @@ pub fn commitDeclState(
.prev_vaddr = 0,
});
},
.elf => {}, // TODO
else => unreachable,
}
}
@@ -1850,8 +1851,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
// not including the initial length itself.
// We have to come back and write it later after we know the size.
const after_init_len = di_buf.items.len + init_len_size;
// +1 for the final 0 that ends the compilation unit children.
const dbg_info_end = self.getDebugInfoEnd().? + 1;
const dbg_info_end = self.getDebugInfoEnd().?;
const init_len = dbg_info_end - after_init_len;
if (self.bin_file.tag == .macho) {
mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(init_len)));
@@ -2500,7 +2500,7 @@ fn getDebugInfoOff(self: Dwarf) ?u32 {
fn getDebugInfoEnd(self: Dwarf) ?u32 {
const last_index = self.di_atom_last_index orelse return null;
const last = self.getAtom(.di_atom, last_index);
return last.off + last.len;
return last.off + last.len + 1;
}
fn getDebugLineProgramOff(self: Dwarf) ?u32 {
@@ -2642,7 +2642,7 @@ fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
elf_file.markDirty(elf_file.debug_line_section_index.?, null);
elf_file.markDirty(elf_file.debug_line_section_index.?);
},
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;

File diff suppressed because it is too large Load Diff

View File

@@ -14,13 +14,13 @@ size: u64 = 0,
alignment: Alignment = .@"1",
/// Index of the input section.
input_section_index: Index = 0,
input_section_index: u16 = 0,
/// Index of the output section.
output_section_index: u16 = 0,
/// Index of the input section containing this atom's relocs.
relocs_section_index: Index = 0,
relocs_section_index: u16 = 0,
/// Index of this atom in the linker's atoms table.
atom_index: Index = 0,
@@ -49,9 +49,12 @@ pub fn file(self: Atom, elf_file: *Elf) ?File {
return elf_file.file(self.file_index);
}
pub fn inputShdr(self: Atom, elf_file: *Elf) elf.Elf64_Shdr {
const object = self.file(elf_file).?.object;
return object.shdrs.items[self.input_section_index];
pub fn inputShdr(self: Atom, elf_file: *Elf) Object.ElfShdr {
return switch (self.file(elf_file).?) {
.object => |x| x.shdrs.items[self.input_section_index],
.zig_module => |x| x.inputShdr(self.atom_index, elf_file),
else => unreachable,
};
}
pub fn outputShndx(self: Atom) ?u16 {
@@ -199,7 +202,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
_ = free_list.swapRemove(i);
}
self.flags.allocated = true;
self.flags.alive = true;
}
pub fn shrink(self: *Atom, elf_file: *Elf) void {
@@ -216,7 +219,6 @@ pub fn free(self: *Atom, elf_file: *Elf) void {
log.debug("freeAtom {d} ({s})", .{ self.atom_index, self.name(elf_file) });
const gpa = elf_file.base.allocator;
const zig_module = self.file(elf_file).?.zig_module;
const shndx = self.outputShndx().?;
const meta = elf_file.last_atom_and_free_list_table.getPtr(shndx).?;
const free_list = &meta.free_list;
@@ -267,11 +269,13 @@ pub fn free(self: *Atom, elf_file: *Elf) void {
// TODO create relocs free list
self.freeRelocs(elf_file);
assert(zig_module.atoms.swapRemove(self.atom_index));
// TODO figure out how to free input section mappind in ZigModule
// const zig_module = self.file(elf_file).?.zig_module;
// assert(zig_module.atoms.swapRemove(self.atom_index));
self.* = .{};
}
pub fn relocs(self: Atom, elf_file: *Elf) error{Overflow}![]align(1) const elf.Elf64_Rela {
pub fn relocs(self: Atom, elf_file: *Elf) []align(1) const elf.Elf64_Rela {
return switch (self.file(elf_file).?) {
.zig_module => |x| x.relocs.items[self.relocs_section_index].items,
.object => |x| x.getRelocs(self.relocs_section_index),
@@ -279,6 +283,18 @@ pub fn relocs(self: Atom, elf_file: *Elf) error{Overflow}![]align(1) const elf.E
};
}
pub fn fdes(self: Atom, elf_file: *Elf) []Fde {
if (self.fde_start == self.fde_end) return &[0]Fde{};
const object = self.file(elf_file).?.object;
return object.fdes.items[self.fde_start..self.fde_end];
}
pub fn markFdesDead(self: Atom, elf_file: *Elf) void {
for (self.fdes(elf_file)) |*fde| {
fde.alive = false;
}
}
pub fn addReloc(self: Atom, elf_file: *Elf, reloc: elf.Elf64_Rela) !void {
const gpa = elf_file.base.allocator;
const file_ptr = self.file(elf_file).?;
@@ -295,17 +311,18 @@ pub fn freeRelocs(self: Atom, elf_file: *Elf) void {
zig_module.relocs.items[self.relocs_section_index].clearRetainingCapacity();
}
pub fn scanRelocsRequiresCode(self: Atom, elf_file: *Elf) error{Overflow}!bool {
for (try self.relocs(elf_file)) |rel| {
pub fn scanRelocsRequiresCode(self: Atom, elf_file: *Elf) bool {
for (self.relocs(elf_file)) |rel| {
if (rel.r_type() == elf.R_X86_64_GOTTPOFF) return true;
}
return false;
}
pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype) !void {
const is_static = elf_file.isStatic();
const is_dyn_lib = elf_file.isDynLib();
const file_ptr = self.file(elf_file).?;
const rels = try self.relocs(elf_file);
const rels = self.relocs(elf_file);
var i: usize = 0;
while (i < rels.len) : (i += 1) {
const rel = rels[i];
@@ -335,17 +352,24 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
// Report an undefined symbol.
try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs);
if (symbol.isIFunc(elf_file)) {
symbol.flags.needs_got = true;
symbol.flags.needs_plt = true;
}
// While traversing relocations, mark symbols that require special handling such as
// pointer indirection via GOT, or a stub trampoline via PLT.
switch (rel.r_type()) {
elf.R_X86_64_64 => {},
elf.R_X86_64_64 => {
try self.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
elf.R_X86_64_32,
elf.R_X86_64_32S,
=> {},
=> {
try self.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
elf.R_X86_64_GOT32,
elf.R_X86_64_GOT64,
elf.R_X86_64_GOTPC32,
elf.R_X86_64_GOTPC64,
elf.R_X86_64_GOTPCREL,
@@ -364,23 +388,14 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
}
},
elf.R_X86_64_PC32 => {},
elf.R_X86_64_TPOFF32,
elf.R_X86_64_TPOFF64,
=> {
if (is_dyn_lib) {
// TODO
// self.picError(symbol, rel, elf_file);
}
elf.R_X86_64_PC32 => {
try self.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
},
elf.R_X86_64_TLSGD => {
// TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
if (elf_file.isStatic() or
(!symbol.flags.import and !is_dyn_lib))
{
if (is_static or (!symbol.flags.import and !is_dyn_lib)) {
// Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
// We skip the next relocation.
i += 1;
@@ -392,9 +407,21 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
}
},
elf.R_X86_64_TLSLD => {
// TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
if (is_static or !is_dyn_lib) {
// Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
// We skip the next relocation.
i += 1;
} else {
elf_file.got.flags.needs_tlsld = true;
}
},
elf.R_X86_64_GOTTPOFF => {
const should_relax = blk: {
// if (!elf_file.options.relax or is_shared or symbol.flags.import) break :blk false;
if (is_dyn_lib or symbol.flags.import) break :blk false;
if (!x86_64.canRelaxGotTpOff(code.?[r_offset - 3 ..])) break :blk false;
break :blk true;
};
@@ -403,21 +430,255 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
}
},
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: unhandled relocation type {}", .{
fmtRelocType(rel.r_type()),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
r_offset,
});
elf.R_X86_64_GOTPC32_TLSDESC => {
const should_relax = is_static or (!is_dyn_lib and !symbol.flags.import);
if (!should_relax) {
symbol.flags.needs_tlsdesc = true;
}
},
elf.R_X86_64_TPOFF32,
elf.R_X86_64_TPOFF64,
=> {
if (is_dyn_lib) try self.reportPicError(symbol, rel, elf_file);
},
elf.R_X86_64_GOTOFF64,
elf.R_X86_64_DTPOFF32,
elf.R_X86_64_DTPOFF64,
elf.R_X86_64_SIZE32,
elf.R_X86_64_SIZE64,
elf.R_X86_64_TLSDESC_CALL,
=> {},
// Zig custom relocations
Elf.R_X86_64_ZIG_GOT32,
Elf.R_X86_64_ZIG_GOTPCREL,
=> {
assert(symbol.flags.has_zig_got);
},
else => try self.reportUnhandledRelocError(rel, elf_file),
}
}
}
fn scanReloc(
self: Atom,
symbol: *Symbol,
rel: elf.Elf64_Rela,
action: RelocAction,
elf_file: *Elf,
) error{OutOfMemory}!void {
const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
const num_dynrelocs = switch (self.file(elf_file).?) {
.linker_defined => unreachable,
.shared_object => unreachable,
inline else => |x| &x.num_dynrelocs,
};
switch (action) {
.none => {},
.@"error" => if (symbol.isAbs(elf_file))
try self.reportNoPicError(symbol, rel, elf_file)
else
try self.reportPicError(symbol, rel, elf_file),
.copyrel => {
if (elf_file.base.options.z_nocopyreloc) {
if (symbol.isAbs(elf_file))
try self.reportNoPicError(symbol, rel, elf_file)
else
try self.reportPicError(symbol, rel, elf_file);
}
symbol.flags.needs_copy_rel = true;
},
.dyn_copyrel => {
if (is_writeable or elf_file.base.options.z_nocopyreloc) {
if (!is_writeable) {
if (elf_file.base.options.z_notext) {
elf_file.has_text_reloc = true;
} else {
try self.reportTextRelocError(symbol, rel, elf_file);
}
}
num_dynrelocs.* += 1;
} else {
symbol.flags.needs_copy_rel = true;
}
},
.plt => {
symbol.flags.needs_plt = true;
},
.cplt => {
symbol.flags.needs_plt = true;
symbol.flags.is_canonical = true;
},
.dyn_cplt => {
if (is_writeable) {
num_dynrelocs.* += 1;
} else {
symbol.flags.needs_plt = true;
symbol.flags.is_canonical = true;
}
},
.dynrel, .baserel, .ifunc => {
if (!is_writeable) {
if (elf_file.base.options.z_notext) {
elf_file.has_text_reloc = true;
} else {
try self.reportTextRelocError(symbol, rel, elf_file);
}
}
num_dynrelocs.* += 1;
if (action == .ifunc) elf_file.num_ifunc_dynrelocs += 1;
},
}
}
const RelocAction = enum {
none,
@"error",
copyrel,
dyn_copyrel,
plt,
dyn_cplt,
cplt,
dynrel,
baserel,
ifunc,
};
fn pcRelocAction(symbol: *const Symbol, elf_file: *Elf) RelocAction {
// zig fmt: off
const table: [3][4]RelocAction = .{
// Abs Local Import data Import func
.{ .@"error", .none, .@"error", .plt }, // Shared object
.{ .@"error", .none, .copyrel, .plt }, // PIE
.{ .none, .none, .copyrel, .cplt }, // Non-PIE
};
// zig fmt: on
const output = outputType(elf_file);
const data = dataType(symbol, elf_file);
return table[output][data];
}
fn absRelocAction(symbol: *const Symbol, elf_file: *Elf) RelocAction {
// zig fmt: off
const table: [3][4]RelocAction = .{
// Abs Local Import data Import func
.{ .none, .@"error", .@"error", .@"error" }, // Shared object
.{ .none, .@"error", .@"error", .@"error" }, // PIE
.{ .none, .none, .copyrel, .cplt }, // Non-PIE
};
// zig fmt: on
const output = outputType(elf_file);
const data = dataType(symbol, elf_file);
return table[output][data];
}
fn dynAbsRelocAction(symbol: *const Symbol, elf_file: *Elf) RelocAction {
if (symbol.isIFunc(elf_file)) return .ifunc;
// zig fmt: off
const table: [3][4]RelocAction = .{
// Abs Local Import data Import func
.{ .none, .baserel, .dynrel, .dynrel }, // Shared object
.{ .none, .baserel, .dynrel, .dynrel }, // PIE
.{ .none, .none, .dyn_copyrel, .dyn_cplt }, // Non-PIE
};
// zig fmt: on
const output = outputType(elf_file);
const data = dataType(symbol, elf_file);
return table[output][data];
}
fn outputType(elf_file: *Elf) u2 {
return switch (elf_file.base.options.output_mode) {
.Obj => unreachable,
.Lib => 0,
.Exe => if (elf_file.base.options.pie) 1 else 2,
};
}
fn dataType(symbol: *const Symbol, elf_file: *Elf) u2 {
if (symbol.isAbs(elf_file)) return 0;
if (!symbol.flags.import) return 1;
if (symbol.type(elf_file) != elf.STT_FUNC) return 2;
return 3;
}
fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) error{OutOfMemory}!void {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: unhandled relocation type {} at offset 0x{x}", .{
fmtRelocType(rel.r_type()),
rel.r_offset,
});
try err.addNote(elf_file, "in {}:{s}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
}
fn reportTextRelocError(
self: Atom,
symbol: *const Symbol,
rel: elf.Elf64_Rela,
elf_file: *Elf,
) error{OutOfMemory}!void {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
rel.r_offset,
symbol.name(elf_file),
});
try err.addNote(elf_file, "in {}:{s}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
}
fn reportPicError(
self: Atom,
symbol: *const Symbol,
rel: elf.Elf64_Rela,
elf_file: *Elf,
) error{OutOfMemory}!void {
var err = try elf_file.addErrorWithNotes(2);
try err.addMsg(elf_file, "relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
rel.r_offset,
symbol.name(elf_file),
});
try err.addNote(elf_file, "in {}:{s}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
try err.addNote(elf_file, "recompile with -fPIC", .{});
}
fn reportNoPicError(
self: Atom,
symbol: *const Symbol,
rel: elf.Elf64_Rela,
elf_file: *Elf,
) error{OutOfMemory}!void {
var err = try elf_file.addErrorWithNotes(2);
try err.addMsg(elf_file, "relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
rel.r_offset,
symbol.name(elf_file),
});
try err.addNote(elf_file, "in {}:{s}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
});
try err.addNote(elf_file, "recompile with -fno-PIC", .{});
}
// This function will report any undefined non-weak symbols that are not imports.
fn reportUndefined(
self: Atom,
@@ -447,15 +708,14 @@ fn reportUndefined(
}
}
/// TODO mark relocs dirty
pub fn resolveRelocs(self: Atom, elf_file: *Elf, code: []u8) !void {
pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
relocs_log.debug("0x{x}: {s}", .{ self.value, self.name(elf_file) });
const file_ptr = self.file(elf_file).?;
var stream = std.io.fixedBufferStream(code);
const cwriter = stream.writer();
const rels = try self.relocs(elf_file);
const rels = self.relocs(elf_file);
var i: usize = 0;
while (i < rels.len) : (i += 1) {
const rel = rels[i];
@@ -488,19 +748,22 @@ pub fn resolveRelocs(self: Atom, elf_file: *Elf, code: []u8) !void {
null;
break :blk if (shndx) |index| @as(i64, @intCast(elf_file.shdrs.items[index].sh_addr)) else 0;
};
// Address of the .zig.got table entry if any.
const ZIG_GOT = @as(i64, @intCast(target.zigGotAddress(elf_file)));
// Relative offset to the start of the global offset table.
const G = @as(i64, @intCast(target.gotAddress(elf_file))) - GOT;
// // Address of the thread pointer.
const TP = @as(i64, @intCast(elf_file.tpAddress()));
// // Address of the dynamic thread pointer.
// const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
// Address of the dynamic thread pointer.
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
relocs_log.debug(" {s}: {x}: [{x} => {x}] G({x}) ({s})", .{
relocs_log.debug(" {s}: {x}: [{x} => {x}] G({x}) ZG({x}) ({s})", .{
fmtRelocType(r_type),
r_offset,
P,
S + A,
G + GOT + A,
ZIG_GOT + A,
target.name(elf_file),
});
@@ -509,18 +772,20 @@ pub fn resolveRelocs(self: Atom, elf_file: *Elf, code: []u8) !void {
switch (rel.r_type()) {
elf.R_X86_64_NONE => unreachable,
elf.R_X86_64_64 => try cwriter.writeIntLittle(i64, S + A),
elf.R_X86_64_32 => try cwriter.writeIntLittle(u32, @as(u32, @truncate(@as(u64, @intCast(S + A))))),
elf.R_X86_64_32S => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A))),
elf.R_X86_64_64 => {
try self.resolveDynAbsReloc(
target,
rel,
dynAbsRelocAction(target, elf_file),
elf_file,
cwriter,
);
},
elf.R_X86_64_PLT32,
elf.R_X86_64_PC32,
=> try cwriter.writeIntLittle(i32, @as(i32, @intCast(S + A - P))),
elf.R_X86_64_GOT32 => try cwriter.writeIntLittle(u32, @as(u32, @intCast(G + GOT + A))),
elf.R_X86_64_GOT64 => try cwriter.writeIntLittle(u64, @as(u64, @intCast(G + GOT + A))),
elf.R_X86_64_GOTPCREL => try cwriter.writeIntLittle(i32, @as(i32, @intCast(G + GOT + A - P))),
elf.R_X86_64_GOTPC32 => try cwriter.writeIntLittle(i32, @as(i32, @intCast(GOT + A - P))),
elf.R_X86_64_GOTPC64 => try cwriter.writeIntLittle(i64, GOT + A - P),
@@ -543,18 +808,22 @@ pub fn resolveRelocs(self: Atom, elf_file: *Elf, code: []u8) !void {
try cwriter.writeIntLittle(i32, @as(i32, @intCast(G + GOT + A - P)));
},
elf.R_X86_64_32 => try cwriter.writeIntLittle(u32, @as(u32, @truncate(@as(u64, @intCast(S + A))))),
elf.R_X86_64_32S => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A))),
elf.R_X86_64_TPOFF32 => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A - TP))),
elf.R_X86_64_TPOFF64 => try cwriter.writeIntLittle(i64, S + A - TP),
elf.R_X86_64_DTPOFF32 => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A - DTP))),
elf.R_X86_64_DTPOFF64 => try cwriter.writeIntLittle(i64, S + A - DTP),
elf.R_X86_64_TLSGD => {
if (target.flags.has_tlsgd) {
// TODO
// const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
// try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
} else if (target.flags.has_gottp) {
// TODO
// const S_ = @as(i64, @intCast(target.getGotTpAddress(elf_file)));
// try relaxTlsGdToIe(relocs[i .. i + 2], @intCast(S_ - P), elf_file, &stream);
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
try x86_64.relaxTlsGdToIe(self, rels[i .. i + 2], @intCast(S_ - P), elf_file, &stream);
i += 1;
} else {
try x86_64.relaxTlsGdToLe(
@@ -568,22 +837,245 @@ pub fn resolveRelocs(self: Atom, elf_file: *Elf, code: []u8) !void {
}
},
elf.R_X86_64_TLSLD => {
if (elf_file.got.tlsld_index) |entry_index| {
const tlsld_entry = elf_file.got.entries.items[entry_index];
const S_ = @as(i64, @intCast(tlsld_entry.address(elf_file)));
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
} else {
try x86_64.relaxTlsLdToLe(
self,
rels[i .. i + 2],
@as(i32, @intCast(TP - @as(i64, @intCast(elf_file.tlsAddress())))),
elf_file,
&stream,
);
i += 1;
}
},
elf.R_X86_64_GOTPC32_TLSDESC => {
if (target.flags.has_tlsdesc) {
const S_ = @as(i64, @intCast(target.tlsDescAddress(elf_file)));
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
} else {
try x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]);
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S - TP)));
}
},
elf.R_X86_64_TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
// call -> nop
try cwriter.writeAll(&.{ 0x66, 0x90 });
},
elf.R_X86_64_GOTTPOFF => {
if (target.flags.has_gottp) {
// TODO
// const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
// try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
} else {
x86_64.relaxGotTpOff(code[r_offset - 3 ..]) catch unreachable;
try cwriter.writeIntLittle(i32, @as(i32, @intCast(S - TP)));
}
},
// Zig custom relocations
Elf.R_X86_64_ZIG_GOT32 => try cwriter.writeIntLittle(u32, @as(u32, @intCast(ZIG_GOT + A))),
Elf.R_X86_64_ZIG_GOTPCREL => try cwriter.writeIntLittle(i32, @as(i32, @intCast(ZIG_GOT + A - P))),
else => {},
}
}
}
fn resolveDynAbsReloc(
self: Atom,
target: *const Symbol,
rel: elf.Elf64_Rela,
action: RelocAction,
elf_file: *Elf,
writer: anytype,
) !void {
const P = self.value + rel.r_offset;
const A = rel.r_addend;
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
const num_dynrelocs = switch (self.file(elf_file).?) {
.linker_defined => unreachable,
.shared_object => unreachable,
inline else => |x| x.num_dynrelocs,
};
try elf_file.rela_dyn.ensureUnusedCapacity(elf_file.base.allocator, num_dynrelocs);
switch (action) {
.@"error",
.plt,
=> unreachable,
.copyrel,
.cplt,
.none,
=> try writer.writeIntLittle(i32, @as(i32, @truncate(S + A))),
.dyn_copyrel => {
if (is_writeable or elf_file.base.options.z_nocopyreloc) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.sym = target.extra(elf_file).?.dynamic,
.type = elf.R_X86_64_64,
.addend = A,
});
try applyDynamicReloc(A, elf_file, writer);
} else {
try writer.writeIntLittle(i32, @as(i32, @truncate(S + A)));
}
},
.dyn_cplt => {
if (is_writeable) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.sym = target.extra(elf_file).?.dynamic,
.type = elf.R_X86_64_64,
.addend = A,
});
try applyDynamicReloc(A, elf_file, writer);
} else {
try writer.writeIntLittle(i32, @as(i32, @truncate(S + A)));
}
},
.dynrel => {
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.sym = target.extra(elf_file).?.dynamic,
.type = elf.R_X86_64_64,
.addend = A,
});
try applyDynamicReloc(A, elf_file, writer);
},
.baserel => {
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.type = elf.R_X86_64_RELATIVE,
.addend = S + A,
});
try applyDynamicReloc(S + A, elf_file, writer);
},
.ifunc => {
const S_ = @as(i64, @intCast(target.address(.{ .plt = false }, elf_file)));
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.type = elf.R_X86_64_IRELATIVE,
.addend = S_ + A,
});
try applyDynamicReloc(S_ + A, elf_file, writer);
},
}
}
fn applyDynamicReloc(value: i64, elf_file: *Elf, writer: anytype) !void {
_ = elf_file;
// if (elf_file.options.apply_dynamic_relocs) {
try writer.writeIntLittle(i64, value);
// }
}
pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: anytype) !void {
relocs_log.debug("0x{x}: {s}", .{ self.value, self.name(elf_file) });
const file_ptr = self.file(elf_file).?;
var stream = std.io.fixedBufferStream(code);
const cwriter = stream.writer();
const rels = self.relocs(elf_file);
var i: usize = 0;
while (i < rels.len) : (i += 1) {
const rel = rels[i];
const r_type = rel.r_type();
if (r_type == elf.R_X86_64_NONE) continue;
const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
const target_index = switch (file_ptr) {
.zig_module => |x| x.symbol(rel.r_sym()),
.object => |x| x.symbols.items[rel.r_sym()],
else => unreachable,
};
const target = elf_file.symbol(target_index);
// Check for violation of One Definition Rule for COMDATs.
if (target.file(elf_file) == null) {
// TODO convert into an error
log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
file_ptr.fmtPath(),
self.name(elf_file),
target.name(elf_file),
});
continue;
}
// Report an undefined symbol.
try self.reportUndefined(elf_file, target, target_index, rel, undefs);
// We will use equation format to resolve relocations:
// https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
//
const P = @as(i64, @intCast(self.value + rel.r_offset));
// Addend from the relocation.
const A = rel.r_addend;
// Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
// Address of the global offset table.
const GOT = blk: {
const shndx = if (elf_file.got_plt_section_index) |shndx|
shndx
else if (elf_file.got_section_index) |shndx|
shndx
else
null;
break :blk if (shndx) |index| @as(i64, @intCast(elf_file.shdrs.items[index].sh_addr)) else 0;
};
// Address of the dynamic thread pointer.
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
relocs_log.debug(" {s}: {x}: [{x} => {x}] ({s})", .{
fmtRelocType(r_type),
rel.r_offset,
P,
S + A,
target.name(elf_file),
});
try stream.seekTo(r_offset);
switch (r_type) {
elf.R_X86_64_NONE => unreachable,
elf.R_X86_64_8 => try cwriter.writeIntLittle(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A))))),
elf.R_X86_64_16 => try cwriter.writeIntLittle(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A))))),
elf.R_X86_64_32 => try cwriter.writeIntLittle(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A))))),
elf.R_X86_64_32S => try cwriter.writeIntLittle(i32, @as(i32, @intCast(S + A))),
elf.R_X86_64_64 => try cwriter.writeIntLittle(i64, S + A),
elf.R_X86_64_DTPOFF32 => try cwriter.writeIntLittle(i32, @as(i32, @intCast(S + A - DTP))),
elf.R_X86_64_DTPOFF64 => try cwriter.writeIntLittle(i64, S + A - DTP),
elf.R_X86_64_GOTOFF64 => try cwriter.writeIntLittle(i64, S + A - GOT),
elf.R_X86_64_GOTPC64 => try cwriter.writeIntLittle(i64, GOT + A),
elf.R_X86_64_SIZE32 => {
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
try cwriter.writeIntLittle(u32, @as(u32, @bitCast(@as(i32, @intCast(size + A)))));
},
elf.R_X86_64_SIZE64 => {
const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
try cwriter.writeIntLittle(i64, @as(i64, @intCast(size + A)));
},
else => try self.reportUnhandledRelocError(rel, elf_file),
}
}
}
pub fn fmtRelocType(r_type: u32) std.fmt.Formatter(formatRelocType) {
return .{ .data = r_type };
}
@@ -639,6 +1131,9 @@ fn formatRelocType(
elf.R_X86_64_GOTPCRELX => "R_X86_64_GOTPCRELX",
elf.R_X86_64_REX_GOTPCRELX => "R_X86_64_REX_GOTPCRELX",
elf.R_X86_64_NUM => "R_X86_64_NUM",
// Zig custom relocations
Elf.R_X86_64_ZIG_GOT32 => "R_X86_64_ZIG_GOT32",
Elf.R_X86_64_ZIG_GOTPCREL => "R_X86_64_ZIG_GOTPCREL",
else => "R_X86_64_UNKNOWN",
};
try writer.print("{s}", .{str});
@@ -679,39 +1174,32 @@ fn format2(
_ = unused_fmt_string;
const atom = ctx.atom;
const elf_file = ctx.elf_file;
try writer.print("atom({d}) : {s} : @{x} : sect({d}) : align({x}) : size({x})", .{
try writer.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x})", .{
atom.atom_index, atom.name(elf_file), atom.value,
atom.output_section_index, atom.alignment, atom.size,
});
// if (atom.fde_start != atom.fde_end) {
// try writer.writeAll(" : fdes{ ");
// for (atom.getFdes(elf_file), atom.fde_start..) |fde, i| {
// try writer.print("{d}", .{i});
// if (!fde.alive) try writer.writeAll("([*])");
// if (i < atom.fde_end - 1) try writer.writeAll(", ");
// }
// try writer.writeAll(" }");
// }
const gc_sections = if (elf_file.base.options.gc_sections) |gc_sections| gc_sections else false;
if (gc_sections and !atom.flags.alive) {
if (atom.fde_start != atom.fde_end) {
try writer.writeAll(" : fdes{ ");
for (atom.fdes(elf_file), atom.fde_start..) |fde, i| {
try writer.print("{d}", .{i});
if (!fde.alive) try writer.writeAll("([*])");
if (i < atom.fde_end - 1) try writer.writeAll(", ");
}
try writer.writeAll(" }");
}
if (!atom.flags.alive) {
try writer.writeAll(" : [*]");
}
}
// TODO this has to be u32 but for now, to avoid redesigning elfSym machinery for
// ZigModule, keep it at u16 with the intention of bumping it to u32 in the near
// future.
pub const Index = u16;
pub const Index = u32;
pub const Flags = packed struct {
/// Specifies whether this atom is alive or has been garbage collected.
alive: bool = false,
alive: bool = true,
/// Specifies if the atom has been visited during garbage collection.
visited: bool = false,
/// Specifies whether this atom has been allocated in the output section.
allocated: bool = false,
};
const x86_64 = struct {
@@ -745,6 +1233,95 @@ const x86_64 = struct {
}
}
pub fn relaxTlsGdToIe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
stream: anytype,
) !void {
assert(rels.len == 2);
const writer = stream.writer();
switch (rels[1].r_type()) {
elf.R_X86_64_PC32,
elf.R_X86_64_PLT32,
=> {
var insts = [_]u8{
0x64, 0x48, 0x8b, 0x04, 0x25, 0, 0, 0, 0, // movq %fs:0,%rax
0x48, 0x03, 0x05, 0, 0, 0, 0, // add foo@gottpoff(%rip), %rax
};
std.mem.writeIntLittle(i32, insts[12..][0..4], value - 12);
try stream.seekBy(-4);
try writer.writeAll(&insts);
},
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: rewrite {} when followed by {}", .{
fmtRelocType(rels[0].r_type()),
fmtRelocType(rels[1].r_type()),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
rels[0].r_offset,
});
},
}
}
pub fn relaxTlsLdToLe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
value: i32,
elf_file: *Elf,
stream: anytype,
) !void {
assert(rels.len == 2);
const writer = stream.writer();
switch (rels[1].r_type()) {
elf.R_X86_64_PC32,
elf.R_X86_64_PLT32,
=> {
var insts = [_]u8{
0x31, 0xc0, // xor %eax, %eax
0x64, 0x48, 0x8b, 0, // mov %fs:(%rax), %rax
0x48, 0x2d, 0, 0, 0, 0, // sub $tls_size, %rax
};
std.mem.writeIntLittle(i32, insts[8..][0..4], value);
try stream.seekBy(-3);
try writer.writeAll(&insts);
},
elf.R_X86_64_GOTPCREL,
elf.R_X86_64_GOTPCRELX,
=> {
var insts = [_]u8{
0x31, 0xc0, // xor %eax, %eax
0x64, 0x48, 0x8b, 0, // mov %fs:(%rax), %rax
0x48, 0x2d, 0, 0, 0, 0, // sub $tls_size, %rax
0x90, // nop
};
std.mem.writeIntLittle(i32, insts[8..][0..4], value);
try stream.seekBy(-3);
try writer.writeAll(&insts);
},
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: rewrite {} when followed by {}", .{
fmtRelocType(rels[0].r_type()),
fmtRelocType(rels[1].r_type()),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
self.name(elf_file),
rels[0].r_offset,
});
},
}
}
pub fn canRelaxGotTpOff(code: []const u8) bool {
const old_inst = disassemble(code) orelse return false;
switch (old_inst.encoding.mnemonic) {
@@ -776,6 +1353,22 @@ const x86_64 = struct {
}
}
pub fn relaxGotPcTlsDesc(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
switch (old_inst.encoding.mnemonic) {
.lea => {
const inst = try Instruction.new(old_inst.prefix, .mov, &.{
old_inst.ops[0],
// TODO: hack to force imm32s in the assembler
.{ .imm = Immediate.s(-129) },
});
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
encode(&.{inst}, code) catch return error.RelaxFail;
},
else => return error.RelaxFail,
}
}
pub fn relaxTlsGdToLe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
@@ -843,11 +1436,14 @@ const x86_64 = struct {
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const eh_frame = @import("eh_frame.zig");
const log = std.log.scoped(.link);
const relocs_log = std.log.scoped(.link_relocs);
const Allocator = std.mem.Allocator;
const Atom = @This();
const Elf = @import("../Elf.zig");
const Fde = eh_frame.Fde;
const File = @import("file.zig").File;
const Object = @import("Object.zig");
const Symbol = @import("Symbol.zig");

View File

@@ -4,7 +4,7 @@ data: []const u8,
index: File.Index,
header: ?elf.Elf64_Ehdr = null,
shdrs: std.ArrayListUnmanaged(elf.Elf64_Shdr) = .{},
shdrs: std.ArrayListUnmanaged(ElfShdr) = .{},
strings: StringTable(.object_strings) = .{},
symtab: []align(1) const elf.Elf64_Sym = &[0]elf.Elf64_Sym{},
strtab: []const u8 = &[0]u8{},
@@ -59,8 +59,13 @@ pub fn parse(self: *Object, elf_file: *Elf) !void {
[*]align(1) const elf.Elf64_Shdr,
@ptrCast(self.data.ptr + shoff),
)[0..self.header.?.e_shnum];
try self.shdrs.appendUnalignedSlice(gpa, shdrs);
try self.strings.buffer.appendSlice(gpa, try self.shdrContents(self.header.?.e_shstrndx));
try self.shdrs.ensureTotalCapacityPrecise(gpa, shdrs.len);
for (shdrs) |shdr| {
self.shdrs.appendAssumeCapacity(try ElfShdr.fromElf64Shdr(shdr));
}
try self.strings.buffer.appendSlice(gpa, self.shdrContents(self.header.?.e_shstrndx));
const symtab_index = for (self.shdrs.items, 0..) |shdr, i| switch (shdr.sh_type) {
elf.SHT_SYMTAB => break @as(u16, @intCast(i)),
@@ -71,21 +76,21 @@ pub fn parse(self: *Object, elf_file: *Elf) !void {
const shdr = shdrs[index];
self.first_global = shdr.sh_info;
const symtab = try self.shdrContents(index);
const symtab = self.shdrContents(index);
const nsyms = @divExact(symtab.len, @sizeOf(elf.Elf64_Sym));
self.symtab = @as([*]align(1) const elf.Elf64_Sym, @ptrCast(symtab.ptr))[0..nsyms];
self.strtab = try self.shdrContents(@as(u16, @intCast(shdr.sh_link)));
self.strtab = self.shdrContents(@as(u16, @intCast(shdr.sh_link)));
}
try self.initAtoms(elf_file);
try self.initSymtab(elf_file);
// for (self.shdrs.items, 0..) |shdr, i| {
// const atom = elf_file.atom(self.atoms.items[i]) orelse continue;
// if (!atom.alive) continue;
// if (shdr.sh_type == elf.SHT_X86_64_UNWIND or mem.eql(u8, atom.name(elf_file), ".eh_frame"))
// try self.parseEhFrame(@as(u16, @intCast(i)), elf_file);
// }
for (self.shdrs.items, 0..) |shdr, i| {
const atom = elf_file.atom(self.atoms.items[i]) orelse continue;
if (!atom.flags.alive) continue;
if (shdr.sh_type == elf.SHT_X86_64_UNWIND or mem.eql(u8, atom.name(elf_file), ".eh_frame"))
try self.parseEhFrame(@as(u16, @intCast(i)), elf_file);
}
}
fn initAtoms(self: *Object, elf_file: *Elf) !void {
@@ -115,7 +120,7 @@ fn initAtoms(self: *Object, elf_file: *Elf) !void {
};
const shndx = @as(u16, @intCast(i));
const group_raw_data = try self.shdrContents(shndx);
const group_raw_data = self.shdrContents(shndx);
const group_nmembers = @divExact(group_raw_data.len, @sizeOf(u32));
const group_members = @as([*]align(1) const u32, @ptrCast(group_raw_data.ptr))[0..group_nmembers];
@@ -125,7 +130,10 @@ fn initAtoms(self: *Object, elf_file: *Elf) !void {
continue;
}
const group_signature_off = try self.strings.insert(elf_file.base.allocator, group_signature);
// Note the assumption about a global strtab used here to disambiguate common
// COMDAT owners.
const gpa = elf_file.base.allocator;
const group_signature_off = try elf_file.strtab.insert(gpa, group_signature);
const gop = try elf_file.getOrCreateComdatGroupOwner(group_signature_off);
const comdat_group_index = try elf_file.addComdatGroup();
const comdat_group = elf_file.comdatGroup(comdat_group_index);
@@ -133,7 +141,7 @@ fn initAtoms(self: *Object, elf_file: *Elf) !void {
.owner = gop.index,
.shndx = shndx,
};
try self.comdat_groups.append(elf_file.base.allocator, comdat_group_index);
try self.comdat_groups.append(gpa, comdat_group_index);
},
elf.SHT_SYMTAB_SHNDX => @panic("TODO SHT_SYMTAB_SHNDX"),
@@ -168,23 +176,21 @@ fn initAtoms(self: *Object, elf_file: *Elf) !void {
fn addAtom(
self: *Object,
shdr: elf.Elf64_Shdr,
shdr: ElfShdr,
shndx: u16,
name: [:0]const u8,
elf_file: *Elf,
) error{ OutOfMemory, Overflow }!void {
) error{OutOfMemory}!void {
const atom_index = try elf_file.addAtom();
const atom = elf_file.atom(atom_index).?;
atom.atom_index = atom_index;
atom.name_offset = try elf_file.strtab.insert(elf_file.base.allocator, name);
atom.file_index = self.index;
atom.input_section_index = shndx;
atom.output_section_index = try self.getOutputSectionIndex(elf_file, shdr);
atom.flags.alive = true;
self.atoms.items[shndx] = atom_index;
if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) {
const data = try self.shdrContents(shndx);
const data = self.shdrContents(shndx);
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
atom.size = chdr.ch_size;
atom.alignment = Alignment.fromNonzeroByteUnits(chdr.ch_addralign);
@@ -194,10 +200,10 @@ fn addAtom(
}
}
fn getOutputSectionIndex(self: *Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{OutOfMemory}!u16 {
fn initOutputSection(self: Object, elf_file: *Elf, shdr: ElfShdr) error{OutOfMemory}!u16 {
const name = blk: {
const name = self.strings.getAssumeExists(shdr.sh_name);
// if (shdr.sh_flags & elf.SHF_MERGE != 0) break :blk name;
if (shdr.sh_flags & elf.SHF_MERGE != 0) break :blk name;
const sh_name_prefixes: []const [:0]const u8 = &.{
".text", ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro", ".bss",
".init_array", ".fini_array", ".tbss", ".tdata", ".gcc_except_table", ".ctors",
@@ -208,8 +214,6 @@ fn getOutputSectionIndex(self: *Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) er
break :blk prefix;
}
}
if (std.mem.eql(u8, name, ".tcommon")) break :blk ".tbss";
if (std.mem.eql(u8, name, ".common")) break :blk ".bss";
break :blk name;
};
const @"type" = switch (shdr.sh_type) {
@@ -231,47 +235,23 @@ fn getOutputSectionIndex(self: *Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) er
else => flags,
};
};
const out_shndx = elf_file.sectionByName(name) orelse blk: {
const is_alloc = flags & elf.SHF_ALLOC != 0;
const is_write = flags & elf.SHF_WRITE != 0;
const is_exec = flags & elf.SHF_EXECINSTR != 0;
if (!is_alloc) {
log.err("{}: output section {s} not found", .{ self.fmtPath(), name });
@panic("TODO: missing output section!");
}
var phdr_flags: u32 = elf.PF_R;
if (is_write) phdr_flags |= elf.PF_W;
if (is_exec) phdr_flags |= elf.PF_X;
const phdr_index = try elf_file.allocateSegment(.{
.size = Elf.padToIdeal(shdr.sh_size),
.alignment = elf_file.page_size,
.flags = phdr_flags,
});
const shndx = try elf_file.allocateAllocSection(.{
.name = name,
.phdr_index = phdr_index,
.alignment = shdr.sh_addralign,
.flags = flags,
.type = @"type",
});
try elf_file.last_atom_and_free_list_table.putNoClobber(elf_file.base.allocator, shndx, .{});
break :blk shndx;
};
const out_shndx = elf_file.sectionByName(name) orelse try elf_file.addSection(.{
.type = @"type",
.flags = flags,
.name = name,
});
return out_shndx;
}
fn skipShdr(self: *Object, index: u16, elf_file: *Elf) bool {
_ = elf_file;
const shdr = self.shdrs.items[index];
const name = self.strings.getAssumeExists(shdr.sh_name);
const ignore = blk: {
if (mem.startsWith(u8, name, ".note")) break :blk true;
if (mem.startsWith(u8, name, ".comment")) break :blk true;
if (mem.startsWith(u8, name, ".llvm_addrsig")) break :blk true;
if (mem.startsWith(u8, name, ".eh_frame")) break :blk true;
// if (elf_file.base.options.strip and shdr.sh_flags & elf.SHF_ALLOC == 0 and
// mem.startsWith(u8, name, ".debug")) break :blk true;
if (shdr.sh_flags & elf.SHF_ALLOC == 0 and mem.startsWith(u8, name, ".debug")) break :blk true;
if (elf_file.base.options.strip and shdr.sh_flags & elf.SHF_ALLOC == 0 and
mem.startsWith(u8, name, ".debug")) break :blk true;
break :blk false;
};
return ignore;
@@ -300,10 +280,6 @@ fn initSymtab(self: *Object, elf_file: *Elf) !void {
sym_ptr.esym_index = @as(u32, @intCast(i));
sym_ptr.atom_index = if (sym.st_shndx == elf.SHN_ABS) 0 else self.atoms.items[sym.st_shndx];
sym_ptr.file_index = self.index;
sym_ptr.output_section_index = if (sym_ptr.atom(elf_file)) |atom_ptr|
atom_ptr.outputShndx().?
else
elf.SHN_UNDEF;
}
for (self.symtab[first_global..]) |sym| {
@@ -324,8 +300,8 @@ fn parseEhFrame(self: *Object, shndx: u16, elf_file: *Elf) !void {
};
const gpa = elf_file.base.allocator;
const raw = try self.shdrContents(shndx);
const relocs = try self.getRelocs(relocs_shndx);
const raw = self.shdrContents(shndx);
const relocs = self.getRelocs(relocs_shndx);
const fdes_start = self.fdes.items.len;
const cies_start = self.cies.items.len;
@@ -429,7 +405,7 @@ pub fn scanRelocs(self: *Object, elf_file: *Elf, undefs: anytype) !void {
const shdr = atom.inputShdr(elf_file);
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (shdr.sh_type == elf.SHT_NOBITS) continue;
if (try atom.scanRelocsRequiresCode(elf_file)) {
if (atom.scanRelocsRequiresCode(elf_file)) {
// TODO ideally, we don't have to decompress at this stage (should already be done)
// and we just fetch the code slice.
const code = try self.codeDecompressAlloc(elf_file, atom_index);
@@ -439,7 +415,7 @@ pub fn scanRelocs(self: *Object, elf_file: *Elf, undefs: anytype) !void {
}
for (self.cies.items) |cie| {
for (try cie.relocs(elf_file)) |rel| {
for (cie.relocs(elf_file)) |rel| {
const sym = elf_file.symbol(self.symbols.items[rel.r_sym()]);
if (sym.flags.import) {
if (sym.type(elf_file) != elf.STT_FUNC)
@@ -474,15 +450,10 @@ pub fn resolveSymbols(self: *Object, elf_file: *Elf) void {
elf.SHN_ABS, elf.SHN_COMMON => 0,
else => self.atoms.items[esym.st_shndx],
};
const output_section_index = if (elf_file.atom(atom_index)) |atom|
atom.outputShndx().?
else
elf.SHN_UNDEF;
global.value = esym.st_value;
global.atom_index = atom_index;
global.esym_index = esym_index;
global.file_index = self.index;
global.output_section_index = output_section_index;
global.version_index = elf_file.default_sym_version;
if (esym.st_bind() == elf.STB_WEAK) global.flags.weak = true;
}
@@ -544,6 +515,15 @@ pub fn markLive(self: *Object, elf_file: *Elf) void {
}
}
pub fn markEhFrameAtomsDead(self: Object, elf_file: *Elf) void {
for (self.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
const is_eh_frame = atom.inputShdr(elf_file).sh_type == elf.SHT_X86_64_UNWIND or
mem.eql(u8, atom.name(elf_file), ".eh_frame");
if (atom.flags.alive and is_eh_frame) atom.flags.alive = false;
}
}
pub fn checkDuplicates(self: *Object, elf_file: *Elf) void {
const first_global = self.first_global orelse return;
for (self.globals(), 0..) |index, i| {
@@ -581,14 +561,14 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
if (this_sym.st_shndx != elf.SHN_COMMON) continue;
const global = elf_file.symbol(index);
const global_file = global.getFile(elf_file).?;
if (global_file.getIndex() != self.index) {
if (elf_file.options.warn_common) {
elf_file.base.warn("{}: multiple common symbols: {s}", .{
self.fmtPath(),
global.getName(elf_file),
});
}
const global_file = global.file(elf_file).?;
if (global_file.index() != self.index) {
// if (elf_file.options.warn_common) {
// elf_file.base.warn("{}: multiple common symbols: {s}", .{
// self.fmtPath(),
// global.getName(elf_file),
// });
// }
continue;
}
@@ -597,13 +577,13 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
const atom_index = try elf_file.addAtom();
try self.atoms.append(gpa, atom_index);
const is_tls = global.getType(elf_file) == elf.STT_TLS;
const name = if (is_tls) ".tbss" else ".bss";
const is_tls = global.type(elf_file) == elf.STT_TLS;
const name = if (is_tls) ".tls_common" else ".common";
const atom = elf_file.atom(atom_index).?;
atom.atom_index = atom_index;
atom.name = try elf_file.strtab.insert(gpa, name);
atom.file = self.index;
atom.name_offset = try elf_file.strtab.insert(gpa, name);
atom.file_index = self.index;
atom.size = this_sym.st_size;
const alignment = this_sym.st_value;
atom.alignment = Alignment.fromNonzeroByteUnits(alignment);
@@ -612,26 +592,76 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
if (is_tls) sh_flags |= elf.SHF_TLS;
const shndx = @as(u16, @intCast(self.shdrs.items.len));
const shdr = try self.shdrs.addOne(gpa);
const sh_size = math.cast(usize, this_sym.st_size) orelse return error.Overflow;
shdr.* = .{
.sh_name = try self.strings.insert(gpa, name),
.sh_type = elf.SHT_NOBITS,
.sh_flags = sh_flags,
.sh_addr = 0,
.sh_offset = 0,
.sh_size = this_sym.st_size,
.sh_size = sh_size,
.sh_link = 0,
.sh_info = 0,
.sh_addralign = alignment,
.sh_entsize = 0,
};
atom.shndx = shndx;
atom.input_section_index = shndx;
global.value = 0;
global.atom = atom_index;
global.atom_index = atom_index;
global.flags.weak = false;
}
}
pub fn initOutputSections(self: Object, elf_file: *Elf) !void {
for (self.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
const shdr = atom.inputShdr(elf_file);
_ = try self.initOutputSection(elf_file, shdr);
}
}
pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
for (self.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
const shdr = atom.inputShdr(elf_file);
atom.output_section_index = self.initOutputSection(elf_file, shdr) catch unreachable;
const gpa = elf_file.base.allocator;
const gop = try elf_file.output_sections.getOrPut(gpa, atom.output_section_index);
if (!gop.found_existing) gop.value_ptr.* = .{};
try gop.value_ptr.append(gpa, atom_index);
}
}
pub fn allocateAtoms(self: Object, elf_file: *Elf) void {
for (self.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
const shdr = elf_file.shdrs.items[atom.output_section_index];
atom.value += shdr.sh_addr;
}
for (self.locals()) |local_index| {
const local = elf_file.symbol(local_index);
const atom = local.atom(elf_file) orelse continue;
if (!atom.flags.alive) continue;
local.value += atom.value;
local.output_section_index = atom.output_section_index;
}
for (self.globals()) |global_index| {
const global = elf_file.symbol(global_index);
const atom = global.atom(elf_file) orelse continue;
if (!atom.flags.alive) continue;
if (global.file(elf_file).?.index() != self.index) continue;
global.value += atom.value;
global.output_section_index = atom.output_section_index;
}
}
pub fn updateSymtabSize(self: *Object, elf_file: *Elf) void {
for (self.locals()) |local_index| {
const local = elf_file.symbol(local_index);
@@ -682,22 +712,20 @@ pub fn writeSymtab(self: *Object, elf_file: *Elf, ctx: anytype) void {
}
}
pub fn locals(self: *Object) []const Symbol.Index {
pub fn locals(self: Object) []const Symbol.Index {
const end = self.first_global orelse self.symbols.items.len;
return self.symbols.items[0..end];
}
pub fn globals(self: *Object) []const Symbol.Index {
pub fn globals(self: Object) []const Symbol.Index {
const start = self.first_global orelse self.symbols.items.len;
return self.symbols.items[start..];
}
fn shdrContents(self: Object, index: u32) error{Overflow}![]const u8 {
pub fn shdrContents(self: Object, index: u32) []const u8 {
assert(index < self.shdrs.items.len);
const shdr = self.shdrs.items[index];
const offset = math.cast(usize, shdr.sh_offset) orelse return error.Overflow;
const size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
return self.data[offset..][0..size];
return self.data[shdr.sh_offset..][0..shdr.sh_size];
}
/// Returns atom's code and optionally uncompresses data if required (for compressed sections).
@@ -706,7 +734,7 @@ pub fn codeDecompressAlloc(self: Object, elf_file: *Elf, atom_index: Atom.Index)
const gpa = elf_file.base.allocator;
const atom_ptr = elf_file.atom(atom_index).?;
assert(atom_ptr.file_index == self.index);
const data = try self.shdrContents(atom_ptr.input_section_index);
const data = self.shdrContents(atom_ptr.input_section_index);
const shdr = atom_ptr.inputShdr(elf_file);
if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) {
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
@@ -734,8 +762,8 @@ fn getString(self: *Object, off: u32) [:0]const u8 {
return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.strtab.ptr + off)), 0);
}
pub fn comdatGroupMembers(self: *Object, index: u16) error{Overflow}![]align(1) const u32 {
const raw = try self.shdrContents(index);
pub fn comdatGroupMembers(self: *Object, index: u16) []align(1) const u32 {
const raw = self.shdrContents(index);
const nmembers = @divExact(raw.len, @sizeOf(u32));
const members = @as([*]align(1) const u32, @ptrCast(raw.ptr))[1..nmembers];
return members;
@@ -745,8 +773,8 @@ pub fn asFile(self: *Object) File {
return .{ .object = self };
}
pub fn getRelocs(self: *Object, shndx: u32) error{Overflow}![]align(1) const elf.Elf64_Rela {
const raw = try self.shdrContents(shndx);
pub fn getRelocs(self: *Object, shndx: u32) []align(1) const elf.Elf64_Rela {
const raw = self.shdrContents(shndx);
const num = @divExact(raw.len, @sizeOf(elf.Elf64_Rela));
return @as([*]align(1) const elf.Elf64_Rela, @ptrCast(raw.ptr))[0..num];
}
@@ -886,7 +914,7 @@ fn formatComdatGroups(
const cg = elf_file.comdatGroup(cg_index);
const cg_owner = elf_file.comdatGroupOwner(cg.owner);
if (cg_owner.file != object.index) continue;
const cg_members = object.comdatGroupMembers(cg.shndx) catch continue;
const cg_members = object.comdatGroupMembers(cg.shndx);
for (cg_members) |shndx| {
const atom_index = object.atoms.items[shndx];
const atom = elf_file.atom(atom_index) orelse continue;
@@ -915,6 +943,34 @@ fn formatPath(
} else try writer.writeAll(object.path);
}
pub const ElfShdr = struct {
sh_name: u32,
sh_type: u32,
sh_flags: u64,
sh_addr: u64,
sh_offset: usize,
sh_size: usize,
sh_link: u32,
sh_info: u32,
sh_addralign: u64,
sh_entsize: u64,
pub fn fromElf64Shdr(shdr: elf.Elf64_Shdr) error{Overflow}!ElfShdr {
return .{
.sh_name = shdr.sh_name,
.sh_type = shdr.sh_type,
.sh_flags = shdr.sh_flags,
.sh_addr = shdr.sh_addr,
.sh_offset = math.cast(usize, shdr.sh_offset) orelse return error.Overflow,
.sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow,
.sh_link = shdr.sh_link,
.sh_info = shdr.sh_info,
.sh_addralign = shdr.sh_addralign,
.sh_entsize = shdr.sh_entsize,
};
}
};
const Object = @This();
const std = @import("std");

View File

@@ -0,0 +1,363 @@
path: []const u8,
data: []const u8,
index: File.Index,
header: ?elf.Elf64_Ehdr = null,
shdrs: std.ArrayListUnmanaged(ElfShdr) = .{},
symtab: []align(1) const elf.Elf64_Sym = &[0]elf.Elf64_Sym{},
strtab: []const u8 = &[0]u8{},
/// Version symtab contains version strings of the symbols if present.
versyms: std.ArrayListUnmanaged(elf.Elf64_Versym) = .{},
verstrings: std.ArrayListUnmanaged(u32) = .{},
dynamic_sect_index: ?u16 = null,
versym_sect_index: ?u16 = null,
verdef_sect_index: ?u16 = null,
symbols: std.ArrayListUnmanaged(Symbol.Index) = .{},
aliases: ?std.ArrayListUnmanaged(u32) = null,
needed: bool,
alive: bool,
output_symtab_size: Elf.SymtabSize = .{},
pub fn isSharedObject(file: std.fs.File) bool {
const reader = file.reader();
const header = reader.readStruct(elf.Elf64_Ehdr) catch return false;
defer file.seekTo(0) catch {};
if (!mem.eql(u8, header.e_ident[0..4], "\x7fELF")) return false;
if (header.e_ident[elf.EI_VERSION] != 1) return false;
if (header.e_type != elf.ET.DYN) return false;
return true;
}
pub fn deinit(self: *SharedObject, allocator: Allocator) void {
allocator.free(self.data);
self.versyms.deinit(allocator);
self.verstrings.deinit(allocator);
self.symbols.deinit(allocator);
if (self.aliases) |*aliases| aliases.deinit(allocator);
self.shdrs.deinit(allocator);
}
pub fn parse(self: *SharedObject, elf_file: *Elf) !void {
const gpa = elf_file.base.allocator;
var stream = std.io.fixedBufferStream(self.data);
const reader = stream.reader();
self.header = try reader.readStruct(elf.Elf64_Ehdr);
const shoff = std.math.cast(usize, self.header.?.e_shoff) orelse return error.Overflow;
var dynsym_index: ?u16 = null;
const shdrs = @as(
[*]align(1) const elf.Elf64_Shdr,
@ptrCast(self.data.ptr + shoff),
)[0..self.header.?.e_shnum];
try self.shdrs.ensureTotalCapacityPrecise(gpa, shdrs.len);
for (shdrs, 0..) |shdr, i| {
self.shdrs.appendAssumeCapacity(try ElfShdr.fromElf64Shdr(shdr));
switch (shdr.sh_type) {
elf.SHT_DYNSYM => dynsym_index = @as(u16, @intCast(i)),
elf.SHT_DYNAMIC => self.dynamic_sect_index = @as(u16, @intCast(i)),
elf.SHT_GNU_VERSYM => self.versym_sect_index = @as(u16, @intCast(i)),
elf.SHT_GNU_VERDEF => self.verdef_sect_index = @as(u16, @intCast(i)),
else => {},
}
}
if (dynsym_index) |index| {
const shdr = self.shdrs.items[index];
const symtab = self.shdrContents(index);
const nsyms = @divExact(symtab.len, @sizeOf(elf.Elf64_Sym));
self.symtab = @as([*]align(1) const elf.Elf64_Sym, @ptrCast(symtab.ptr))[0..nsyms];
self.strtab = self.shdrContents(@as(u16, @intCast(shdr.sh_link)));
}
try self.parseVersions(elf_file);
try self.initSymtab(elf_file);
}
fn parseVersions(self: *SharedObject, elf_file: *Elf) !void {
const gpa = elf_file.base.allocator;
try self.verstrings.resize(gpa, 2);
self.verstrings.items[elf.VER_NDX_LOCAL] = 0;
self.verstrings.items[elf.VER_NDX_GLOBAL] = 0;
if (self.verdef_sect_index) |shndx| {
const verdefs = self.shdrContents(shndx);
const nverdefs = self.verdefNum();
try self.verstrings.resize(gpa, self.verstrings.items.len + nverdefs);
var i: u32 = 0;
var offset: u32 = 0;
while (i < nverdefs) : (i += 1) {
const verdef = @as(*align(1) const elf.Elf64_Verdef, @ptrCast(verdefs.ptr + offset)).*;
defer offset += verdef.vd_next;
if (verdef.vd_flags == elf.VER_FLG_BASE) continue; // Skip BASE entry
const vda_name = if (verdef.vd_cnt > 0)
@as(*align(1) const elf.Elf64_Verdaux, @ptrCast(verdefs.ptr + offset + verdef.vd_aux)).vda_name
else
0;
self.verstrings.items[verdef.vd_ndx] = vda_name;
}
}
try self.versyms.ensureTotalCapacityPrecise(gpa, self.symtab.len);
if (self.versym_sect_index) |shndx| {
const versyms_raw = self.shdrContents(shndx);
const nversyms = @divExact(versyms_raw.len, @sizeOf(elf.Elf64_Versym));
const versyms = @as([*]align(1) const elf.Elf64_Versym, @ptrCast(versyms_raw.ptr))[0..nversyms];
for (versyms) |ver| {
const normalized_ver = if (ver & elf.VERSYM_VERSION >= self.verstrings.items.len - 1)
elf.VER_NDX_GLOBAL
else
ver;
self.versyms.appendAssumeCapacity(normalized_ver);
}
} else for (0..self.symtab.len) |_| {
self.versyms.appendAssumeCapacity(elf.VER_NDX_GLOBAL);
}
}
fn initSymtab(self: *SharedObject, elf_file: *Elf) !void {
const gpa = elf_file.base.allocator;
try self.symbols.ensureTotalCapacityPrecise(gpa, self.symtab.len);
for (self.symtab, 0..) |sym, i| {
const hidden = self.versyms.items[i] & elf.VERSYM_HIDDEN != 0;
const name = self.getString(sym.st_name);
// We need to garble up the name so that we don't pick this symbol
// during symbol resolution. Thank you GNU!
const off = if (hidden) blk: {
const full_name = try std.fmt.allocPrint(gpa, "{s}@{s}", .{
name,
self.versionString(self.versyms.items[i]),
});
defer gpa.free(full_name);
break :blk try elf_file.strtab.insert(gpa, full_name);
} else try elf_file.strtab.insert(gpa, name);
const gop = try elf_file.getOrPutGlobal(off);
self.symbols.addOneAssumeCapacity().* = gop.index;
}
}
pub fn resolveSymbols(self: *SharedObject, elf_file: *Elf) void {
for (self.globals(), 0..) |index, i| {
const esym_index = @as(u32, @intCast(i));
const this_sym = self.symtab[esym_index];
if (this_sym.st_shndx == elf.SHN_UNDEF) continue;
const global = elf_file.symbol(index);
if (self.asFile().symbolRank(this_sym, false) < global.symbolRank(elf_file)) {
global.value = this_sym.st_value;
global.atom_index = 0;
global.esym_index = esym_index;
global.version_index = self.versyms.items[esym_index];
global.file_index = self.index;
}
}
}
pub fn resetGlobals(self: *SharedObject, elf_file: *Elf) void {
for (self.globals()) |index| {
const global = elf_file.symbol(index);
const off = global.name_offset;
global.* = .{};
global.name_offset = off;
}
}
pub fn markLive(self: *SharedObject, elf_file: *Elf) void {
for (self.globals(), 0..) |index, i| {
const sym = self.symtab[i];
if (sym.st_shndx != elf.SHN_UNDEF) continue;
const global = elf_file.symbol(index);
const file = global.file(elf_file) orelse continue;
const should_drop = switch (file) {
.shared_object => |sh| !sh.needed and sym.st_bind() == elf.STB_WEAK,
else => false,
};
if (!should_drop and !file.isAlive()) {
file.setAlive();
file.markLive(elf_file);
}
}
}
pub fn updateSymtabSize(self: *SharedObject, elf_file: *Elf) void {
for (self.globals()) |global_index| {
const global = elf_file.symbol(global_index);
if (global.file(elf_file)) |file| if (file.index() != self.index) continue;
if (global.isLocal()) continue;
global.flags.output_symtab = true;
self.output_symtab_size.nglobals += 1;
}
}
pub fn writeSymtab(self: *SharedObject, elf_file: *Elf, ctx: anytype) void {
var iglobal = ctx.iglobal;
for (self.globals()) |global_index| {
const global = elf_file.symbol(global_index);
if (global.file(elf_file)) |file| if (file.index() != self.index) continue;
if (!global.flags.output_symtab) continue;
global.setOutputSym(elf_file, &ctx.symtab[iglobal]);
iglobal += 1;
}
}
pub fn globals(self: SharedObject) []const Symbol.Index {
return self.symbols.items;
}
pub fn shdrContents(self: SharedObject, index: u16) []const u8 {
const shdr = self.shdrs.items[index];
return self.data[shdr.sh_offset..][0..shdr.sh_size];
}
pub fn getString(self: SharedObject, off: u32) [:0]const u8 {
assert(off < self.strtab.len);
return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.strtab.ptr + off)), 0);
}
pub fn versionString(self: SharedObject, index: elf.Elf64_Versym) [:0]const u8 {
const off = self.verstrings.items[index & elf.VERSYM_VERSION];
return self.getString(off);
}
pub fn asFile(self: *SharedObject) File {
return .{ .shared_object = self };
}
fn dynamicTable(self: *SharedObject) []align(1) const elf.Elf64_Dyn {
const shndx = self.dynamic_sect_index orelse return &[0]elf.Elf64_Dyn{};
const raw = self.shdrContents(shndx);
const num = @divExact(raw.len, @sizeOf(elf.Elf64_Dyn));
return @as([*]align(1) const elf.Elf64_Dyn, @ptrCast(raw.ptr))[0..num];
}
fn verdefNum(self: *SharedObject) u32 {
const entries = self.dynamicTable();
for (entries) |entry| switch (entry.d_tag) {
elf.DT_VERDEFNUM => return @as(u32, @intCast(entry.d_val)),
else => {},
};
return 0;
}
pub fn soname(self: *SharedObject) []const u8 {
const entries = self.dynamicTable();
for (entries) |entry| switch (entry.d_tag) {
elf.DT_SONAME => return self.getString(@as(u32, @intCast(entry.d_val))),
else => {},
};
return std.fs.path.basename(self.path);
}
pub fn initSymbolAliases(self: *SharedObject, elf_file: *Elf) !void {
assert(self.aliases == null);
const SortAlias = struct {
pub fn lessThan(ctx: *Elf, lhs: Symbol.Index, rhs: Symbol.Index) bool {
const lhs_sym = ctx.symbol(lhs).elfSym(ctx);
const rhs_sym = ctx.symbol(rhs).elfSym(ctx);
return lhs_sym.st_value < rhs_sym.st_value;
}
};
const gpa = elf_file.base.allocator;
var aliases = std.ArrayList(Symbol.Index).init(gpa);
defer aliases.deinit();
try aliases.ensureTotalCapacityPrecise(self.globals().len);
for (self.globals()) |index| {
const global = elf_file.symbol(index);
const global_file = global.file(elf_file) orelse continue;
if (global_file.index() != self.index) continue;
aliases.appendAssumeCapacity(index);
}
std.mem.sort(u32, aliases.items, elf_file, SortAlias.lessThan);
self.aliases = aliases.moveToUnmanaged();
}
pub fn symbolAliases(self: *SharedObject, index: u32, elf_file: *Elf) []const u32 {
assert(self.aliases != null);
const symbol = elf_file.symbol(index).elfSym(elf_file);
const aliases = self.aliases.?;
const start = for (aliases.items, 0..) |alias, i| {
const alias_sym = elf_file.symbol(alias).elfSym(elf_file);
if (symbol.st_value == alias_sym.st_value) break i;
} else aliases.items.len;
const end = for (aliases.items[start..], 0..) |alias, i| {
const alias_sym = elf_file.symbol(alias).elfSym(elf_file);
if (symbol.st_value < alias_sym.st_value) break i + start;
} else aliases.items.len;
return aliases.items[start..end];
}
pub fn format(
self: SharedObject,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = self;
_ = unused_fmt_string;
_ = options;
_ = writer;
@compileError("do not format shared objects directly");
}
pub fn fmtSymtab(self: SharedObject, elf_file: *Elf) std.fmt.Formatter(formatSymtab) {
return .{ .data = .{
.shared = self,
.elf_file = elf_file,
} };
}
const FormatContext = struct {
shared: SharedObject,
elf_file: *Elf,
};
fn formatSymtab(
ctx: FormatContext,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
const shared = ctx.shared;
try writer.writeAll(" globals\n");
for (shared.symbols.items) |index| {
const global = ctx.elf_file.symbol(index);
try writer.print(" {}\n", .{global.fmt(ctx.elf_file)});
}
}
const SharedObject = @This();
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const log = std.log.scoped(.elf);
const mem = std.mem;
const Allocator = mem.Allocator;
const Elf = @import("../Elf.zig");
const ElfShdr = @import("Object.zig").ElfShdr;
const File = @import("file.zig").File;
const Symbol = @import("Symbol.zig");

View File

@@ -32,7 +32,7 @@ extra_index: u32 = 0,
pub fn isAbs(symbol: Symbol, elf_file: *Elf) bool {
const file_ptr = symbol.file(elf_file).?;
// if (file_ptr == .shared) return symbol.sourceSymbol(elf_file).st_shndx == elf.SHN_ABS;
if (file_ptr == .shared_object) return symbol.elfSym(elf_file).st_shndx == elf.SHN_ABS;
return !symbol.flags.import and symbol.atom(elf_file) == null and symbol.outputShndx() == null and
file_ptr != .linker_defined;
}
@@ -51,10 +51,10 @@ pub fn isIFunc(symbol: Symbol, elf_file: *Elf) bool {
}
pub fn @"type"(symbol: Symbol, elf_file: *Elf) u4 {
const s_sym = symbol.elfSym(elf_file);
// const file_ptr = symbol.file(elf_file).?;
// if (s_sym.st_type() == elf.STT_GNU_IFUNC and file_ptr == .shared) return elf.STT_FUNC;
return s_sym.st_type();
const esym = symbol.elfSym(elf_file);
const file_ptr = symbol.file(elf_file).?;
if (esym.st_type() == elf.STT_GNU_IFUNC and file_ptr == .shared_object) return elf.STT_FUNC;
return esym.st_type();
}
pub fn name(symbol: Symbol, elf_file: *Elf) [:0]const u8 {
@@ -74,7 +74,7 @@ pub fn elfSym(symbol: Symbol, elf_file: *Elf) elf.Elf64_Sym {
switch (file_ptr) {
.zig_module => |x| return x.elfSym(symbol.esym_index).*,
.linker_defined => |x| return x.symtab.items[symbol.esym_index],
.object => |x| return x.symtab[symbol.esym_index],
inline else => |x| return x.symtab[symbol.esym_index],
}
}
@@ -88,23 +88,18 @@ pub fn symbolRank(symbol: Symbol, elf_file: *Elf) u32 {
return file_ptr.symbolRank(sym, in_archive);
}
pub fn address(symbol: Symbol, opts: struct {
plt: bool = true,
}, elf_file: *Elf) u64 {
_ = elf_file;
_ = opts;
// if (symbol.flags.copy_rel) {
// return elf_file.sectionAddress(elf_file.copy_rel_sect_index.?) + symbol.value;
// }
// if (symbol.flags.plt and opts.plt) {
// const extra = symbol.getExtra(elf_file).?;
// if (!symbol.flags.is_canonical and symbol.flags.got) {
// // We have a non-lazy bound function pointer, use that!
// return elf_file.getPltGotEntryAddress(extra.plt_got);
// }
// // Lazy-bound function it is!
// return elf_file.getPltEntryAddress(extra.plt);
// }
pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf) u64 {
if (symbol.flags.has_copy_rel) {
return symbol.copyRelAddress(elf_file);
}
if (symbol.flags.has_plt and opts.plt) {
if (!symbol.flags.is_canonical and symbol.flags.has_got) {
// We have a non-lazy bound function pointer, use that!
return symbol.pltGotAddress(elf_file);
}
// Lazy-bound function it is!
return symbol.pltAddress(elf_file);
}
return symbol.value;
}
@@ -115,48 +110,83 @@ pub fn gotAddress(symbol: Symbol, elf_file: *Elf) u64 {
return entry.address(elf_file);
}
const GetOrCreateGotEntryResult = struct {
pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!(symbol.flags.has_plt and symbol.flags.has_got)) return 0;
const extras = symbol.extra(elf_file).?;
const shdr = elf_file.shdrs.items[elf_file.plt_got_section_index.?];
return shdr.sh_addr + extras.plt_got * 16;
}
pub fn pltAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_plt) return 0;
const extras = symbol.extra(elf_file).?;
const shdr = elf_file.shdrs.items[elf_file.plt_section_index.?];
return shdr.sh_addr + extras.plt * 16 + PltSection.preamble_size;
}
pub fn gotPltAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_plt) return 0;
const extras = symbol.extra(elf_file).?;
const shdr = elf_file.shdrs.items[elf_file.got_plt_section_index.?];
return shdr.sh_addr + extras.plt * 8 + GotPltSection.preamble_size;
}
pub fn copyRelAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_copy_rel) return 0;
const shdr = elf_file.shdrs.items[elf_file.copy_rel_section_index.?];
return shdr.sh_addr + symbol.value;
}
pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_tlsgd) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.tlsgd];
return entry.address(elf_file);
}
pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_gottp) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.gottp];
return entry.address(elf_file);
}
pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_tlsdesc) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.tlsdesc];
return entry.address(elf_file);
}
const GetOrCreateZigGotEntryResult = struct {
found_existing: bool,
index: GotSection.Index,
index: ZigGotSection.Index,
};
pub fn getOrCreateGotEntry(symbol: *Symbol, symbol_index: Index, elf_file: *Elf) !GetOrCreateGotEntryResult {
assert(symbol.flags.needs_got);
if (symbol.flags.has_got) return .{ .found_existing = true, .index = symbol.extra(elf_file).?.got };
const index = try elf_file.got.addGotSymbol(symbol_index, elf_file);
symbol.flags.has_got = true;
pub fn getOrCreateZigGotEntry(symbol: *Symbol, symbol_index: Index, elf_file: *Elf) !GetOrCreateZigGotEntryResult {
if (symbol.flags.has_zig_got) return .{ .found_existing = true, .index = symbol.extra(elf_file).?.zig_got };
const index = try elf_file.zig_got.addSymbol(symbol_index, elf_file);
return .{ .found_existing = false, .index = index };
}
// pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) u64 {
// if (!symbol.flags.tlsgd) return 0;
// const extra = symbol.getExtra(elf_file).?;
// return elf_file.getGotEntryAddress(extra.tlsgd);
// }
pub fn zigGotAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_zig_got) return 0;
const extras = symbol.extra(elf_file).?;
return elf_file.zig_got.entryAddress(extras.zig_got, elf_file);
}
// pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) u64 {
// if (!symbol.flags.gottp) return 0;
// const extra = symbol.getExtra(elf_file).?;
// return elf_file.getGotEntryAddress(extra.gottp);
// }
// pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) u64 {
// if (!symbol.flags.tlsdesc) return 0;
// const extra = symbol.getExtra(elf_file).?;
// return elf_file.getGotEntryAddress(extra.tlsdesc);
// }
// pub fn alignment(symbol: Symbol, elf_file: *Elf) !u64 {
// const file = symbol.getFile(elf_file) orelse return 0;
// const shared = file.shared;
// const s_sym = symbol.getSourceSymbol(elf_file);
// const shdr = shared.getShdrs()[s_sym.st_shndx];
// const alignment = @max(1, shdr.sh_addralign);
// return if (s_sym.st_value == 0)
// alignment
// else
// @min(alignment, try std.math.powi(u64, 2, @ctz(s_sym.st_value)));
// }
pub fn dsoAlignment(symbol: Symbol, elf_file: *Elf) !u64 {
const file_ptr = symbol.file(elf_file) orelse return 0;
assert(file_ptr == .shared_object);
const shared_object = file_ptr.shared_object;
const esym = symbol.elfSym(elf_file);
const shdr = shared_object.shdrs.items[esym.st_shndx];
const alignment = @max(1, shdr.sh_addralign);
return if (esym.st_value == 0)
alignment
else
@min(alignment, try std.math.powi(u64, 2, @ctz(esym.st_value)));
}
pub fn addExtra(symbol: *Symbol, extras: Extra, elf_file: *Elf) !void {
symbol.extra_index = try elf_file.addSymbolExtra(extras);
@@ -180,22 +210,22 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
const st_bind: u8 = blk: {
if (symbol.isLocal()) break :blk 0;
if (symbol.flags.weak) break :blk elf.STB_WEAK;
// if (file_ptr == .shared) break :blk elf.STB_GLOBAL;
if (file_ptr == .shared_object) break :blk elf.STB_GLOBAL;
break :blk esym.st_bind();
};
const st_shndx = blk: {
// if (symbol.flags.copy_rel) break :blk elf_file.copy_rel_sect_index.?;
// if (file_ptr == .shared or s_sym.st_shndx == elf.SHN_UNDEF) break :blk elf.SHN_UNDEF;
if (symbol.flags.has_copy_rel) break :blk elf_file.copy_rel_section_index.?;
if (file_ptr == .shared_object or esym.st_shndx == elf.SHN_UNDEF) break :blk elf.SHN_UNDEF;
if (symbol.atom(elf_file) == null and file_ptr != .linker_defined)
break :blk elf.SHN_ABS;
break :blk symbol.outputShndx() orelse elf.SHN_UNDEF;
};
const st_value = blk: {
// if (symbol.flags.copy_rel) break :blk symbol.address(.{}, elf_file);
// if (file_ptr == .shared or s_sym.st_shndx == elf.SHN_UNDEF) {
// if (symbol.flags.is_canonical) break :blk symbol.address(.{}, elf_file);
// break :blk 0;
// }
if (symbol.flags.has_copy_rel) break :blk symbol.address(.{}, elf_file);
if (file_ptr == .shared_object or esym.st_shndx == elf.SHN_UNDEF) {
if (symbol.flags.is_canonical) break :blk symbol.address(.{}, elf_file);
break :blk 0;
}
if (st_shndx == elf.SHN_ABS) break :blk symbol.value;
const shdr = &elf_file.shdrs.items[st_shndx];
if (shdr.sh_flags & elf.SHF_TLS != 0 and file_ptr != .linker_defined)
@@ -251,9 +281,10 @@ fn formatName(
switch (symbol.version_index & elf.VERSYM_VERSION) {
elf.VER_NDX_LOCAL, elf.VER_NDX_GLOBAL => {},
else => {
unreachable;
// const shared = symbol.getFile(elf_file).?.shared;
// try writer.print("@{s}", .{shared.getVersionString(symbol.version_index)});
const file_ptr = symbol.file(elf_file).?;
assert(file_ptr == .shared_object);
const shared_object = file_ptr.shared_object;
try writer.print("@{s}", .{shared_object.versionString(symbol.version_index)});
},
}
}
@@ -283,7 +314,7 @@ fn format2(
try writer.writeAll(" : absolute");
}
} else if (symbol.outputShndx()) |shndx| {
try writer.print(" : sect({d})", .{shndx});
try writer.print(" : shdr({d})", .{shndx});
}
if (symbol.atom(ctx.elf_file)) |atom_ptr| {
try writer.print(" : atom({d})", .{atom_ptr.atom_index});
@@ -309,23 +340,25 @@ pub const Flags = packed struct {
/// Whether this symbol is weak.
weak: bool = false,
/// Whether the symbol makes into the output symtab or not.
/// Whether the symbol makes into the output symtab.
output_symtab: bool = false,
/// Whether the symbol has entry in dynamic symbol table.
has_dynamic: bool = false,
/// Whether the symbol contains GOT indirection.
needs_got: bool = false,
has_got: bool = false,
/// Whether the symbol contains PLT indirection.
needs_plt: bool = false,
plt: bool = false,
has_plt: bool = false,
/// Whether the PLT entry is canonical.
is_canonical: bool = false,
/// Whether the symbol contains COPYREL directive.
copy_rel: bool = false,
needs_copy_rel: bool = false,
has_copy_rel: bool = false,
has_dynamic: bool = false,
/// Whether the symbol contains TLSGD indirection.
needs_tlsgd: bool = false,
@@ -336,7 +369,11 @@ pub const Flags = packed struct {
has_gottp: bool = false,
/// Whether the symbol contains TLSDESC indirection.
tlsdesc: bool = false,
needs_tlsdesc: bool = false,
has_tlsdesc: bool = false,
/// Whether the symbol contains .zig.got indirection.
has_zig_got: bool = false,
};
pub const Extra = struct {
@@ -348,6 +385,7 @@ pub const Extra = struct {
tlsgd: u32 = 0,
gottp: u32 = 0,
tlsdesc: u32 = 0,
zig_got: u32 = 0,
};
pub const Index = u32;
@@ -361,8 +399,11 @@ const Atom = @import("Atom.zig");
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;
const GotSection = synthetic_sections.GotSection;
const GotPltSection = synthetic_sections.GotPltSection;
const LinkerDefined = @import("LinkerDefined.zig");
// const Object = @import("Object.zig");
// const SharedObject = @import("SharedObject.zig");
const Object = @import("Object.zig");
const PltSection = synthetic_sections.PltSection;
const SharedObject = @import("SharedObject.zig");
const Symbol = @This();
const ZigGotSection = synthetic_sections.ZigGotSection;
const ZigModule = @import("ZigModule.zig");

View File

@@ -13,9 +13,11 @@ local_symbols: std.ArrayListUnmanaged(Symbol.Index) = .{},
global_symbols: std.ArrayListUnmanaged(Symbol.Index) = .{},
globals_lookup: std.AutoHashMapUnmanaged(u32, Symbol.Index) = .{},
atoms: std.AutoArrayHashMapUnmanaged(Atom.Index, void) = .{},
atoms: std.ArrayListUnmanaged(Atom.Index) = .{},
relocs: std.ArrayListUnmanaged(std.ArrayListUnmanaged(elf.Elf64_Rela)) = .{},
num_dynrelocs: u32 = 0,
output_symtab_size: Elf.SymtabSize = .{},
pub fn deinit(self: *ZigModule, allocator: Allocator) void {
@@ -56,7 +58,8 @@ pub fn addAtom(self: *ZigModule, elf_file: *Elf) !Symbol.Index {
const symbol_index = try elf_file.addSymbol();
const esym_index = try self.addLocalEsym(gpa);
try self.atoms.putNoClobber(gpa, atom_index, {});
const shndx = @as(u16, @intCast(self.atoms.items.len));
try self.atoms.append(gpa, atom_index);
try self.local_symbols.append(gpa, symbol_index);
const atom_ptr = elf_file.atom(atom_index).?;
@@ -67,10 +70,10 @@ pub fn addAtom(self: *ZigModule, elf_file: *Elf) !Symbol.Index {
symbol_ptr.atom_index = atom_index;
const esym = &self.local_esyms.items[esym_index];
esym.st_shndx = atom_index;
esym.st_shndx = shndx;
symbol_ptr.esym_index = esym_index;
const relocs_index = @as(Atom.Index, @intCast(self.relocs.items.len));
const relocs_index = @as(u16, @intCast(self.relocs.items.len));
const relocs = try self.relocs.addOne(gpa);
relocs.* = .{};
atom_ptr.relocs_section_index = relocs_index;
@@ -78,6 +81,22 @@ pub fn addAtom(self: *ZigModule, elf_file: *Elf) !Symbol.Index {
return symbol_index;
}
/// TODO actually create fake input shdrs and return that instead.
pub fn inputShdr(self: ZigModule, atom_index: Atom.Index, elf_file: *Elf) Object.ElfShdr {
_ = self;
const shdr = shdr: {
const atom = elf_file.atom(atom_index) orelse break :shdr Elf.null_shdr;
const shndx = atom.outputShndx() orelse break :shdr Elf.null_shdr;
var shdr = elf_file.shdrs.items[shndx];
shdr.sh_addr = 0;
shdr.sh_offset = 0;
shdr.sh_size = atom.size;
shdr.sh_addralign = atom.alignment.toByteUnits(1);
break :shdr shdr;
};
return Object.ElfShdr.fromElf64Shdr(shdr) catch unreachable;
}
pub fn resolveSymbols(self: *ZigModule, elf_file: *Elf) void {
for (self.globals(), 0..) |index, i| {
const esym_index = @as(Symbol.Index, @intCast(i)) | 0x10000000;
@@ -86,7 +105,7 @@ pub fn resolveSymbols(self: *ZigModule, elf_file: *Elf) void {
if (esym.st_shndx == elf.SHN_UNDEF) continue;
if (esym.st_shndx != elf.SHN_ABS and esym.st_shndx != elf.SHN_COMMON) {
const atom_index = esym.st_shndx;
const atom_index = self.atoms.items[esym.st_shndx];
const atom = elf_file.atom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
}
@@ -95,7 +114,7 @@ pub fn resolveSymbols(self: *ZigModule, elf_file: *Elf) void {
if (self.asFile().symbolRank(esym, false) < global.symbolRank(elf_file)) {
const atom_index = switch (esym.st_shndx) {
elf.SHN_ABS, elf.SHN_COMMON => 0,
else => esym.st_shndx,
else => self.atoms.items[esym.st_shndx],
};
const output_section_index = if (elf_file.atom(atom_index)) |atom|
atom.outputShndx().?
@@ -141,10 +160,12 @@ pub fn claimUnresolved(self: *ZigModule, elf_file: *Elf) void {
}
pub fn scanRelocs(self: *ZigModule, elf_file: *Elf, undefs: anytype) !void {
for (self.atoms.keys()) |atom_index| {
for (self.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
if (try atom.scanRelocsRequiresCode(elf_file)) {
const shdr = atom.inputShdr(elf_file);
if (shdr.sh_type == elf.SHT_NOBITS) continue;
if (atom.scanRelocsRequiresCode(elf_file)) {
// TODO ideally we don't have to fetch the code here.
// Perhaps it would make sense to save the code until flushModule where we
// would free all of generated code?
@@ -272,7 +293,10 @@ pub fn codeAlloc(self: ZigModule, elf_file: *Elf, atom_index: Atom.Index) ![]u8
const code = try gpa.alloc(u8, size);
errdefer gpa.free(code);
const amt = try elf_file.base.file.?.preadAll(code, file_offset);
if (amt != code.len) return error.InputOutput;
if (amt != code.len) {
log.err("fetching code for {s} failed", .{atom.name(elf_file)});
return error.InputOutput;
}
return code;
}
@@ -324,7 +348,7 @@ fn formatAtoms(
_ = unused_fmt_string;
_ = options;
try writer.writeAll(" atoms\n");
for (ctx.self.atoms.keys()) |atom_index| {
for (ctx.self.atoms.items) |atom_index| {
const atom = ctx.elf_file.atom(atom_index) orelse continue;
try writer.print(" {}\n", .{atom.fmt(ctx.elf_file)});
}
@@ -333,11 +357,13 @@ fn formatAtoms(
const assert = std.debug.assert;
const std = @import("std");
const elf = std.elf;
const log = std.log.scoped(.link);
const Allocator = std.mem.Allocator;
const Atom = @import("Atom.zig");
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;
const Module = @import("../../Module.zig");
const Object = @import("Object.zig");
const Symbol = @import("Symbol.zig");
const ZigModule = @This();

View File

@@ -1,7 +1,7 @@
pub const Fde = struct {
/// Includes 4byte size cell.
offset: u64,
size: u64,
offset: usize,
size: usize,
cie_index: u32,
rel_index: u32 = 0,
rel_num: u32 = 0,
@@ -20,9 +20,9 @@ pub const Fde = struct {
return base + fde.out_offset;
}
pub fn data(fde: Fde, elf_file: *Elf) error{Overflow}![]const u8 {
pub fn data(fde: Fde, elf_file: *Elf) []const u8 {
const object = elf_file.file(fde.file_index).?.object;
const contents = try object.shdrContents(fde.input_section_index);
const contents = object.shdrContents(fde.input_section_index);
return contents[fde.offset..][0..fde.calcSize()];
}
@@ -32,24 +32,25 @@ pub const Fde = struct {
}
pub fn ciePointer(fde: Fde, elf_file: *Elf) u32 {
return std.mem.readIntLittle(u32, fde.data(elf_file)[4..8]);
const fde_data = fde.data(elf_file);
return std.mem.readIntLittle(u32, fde_data[4..8]);
}
pub fn calcSize(fde: Fde) u64 {
pub fn calcSize(fde: Fde) usize {
return fde.size + 4;
}
pub fn atom(fde: Fde, elf_file: *Elf) error{Overflow}!*Atom {
pub fn atom(fde: Fde, elf_file: *Elf) *Atom {
const object = elf_file.file(fde.file_index).?.object;
const rel = (try fde.relocs(elf_file))[0];
const rel = fde.relocs(elf_file)[0];
const sym = object.symtab[rel.r_sym()];
const atom_index = object.atoms.items[sym.st_shndx];
return elf_file.atom(atom_index).?;
}
pub fn relocs(fde: Fde, elf_file: *Elf) error{Overflow}![]align(1) const elf.Elf64_Rela {
pub fn relocs(fde: Fde, elf_file: *Elf) []align(1) const elf.Elf64_Rela {
const object = elf_file.file(fde.file_index).?.object;
return (try object.getRelocs(fde.rel_section_index))[fde.rel_index..][0..fde.rel_num];
return object.getRelocs(fde.rel_section_index)[fde.rel_index..][0..fde.rel_num];
}
pub fn format(
@@ -88,10 +89,7 @@ pub const Fde = struct {
const fde = ctx.fde;
const elf_file = ctx.elf_file;
const base_addr = fde.address(elf_file);
const atom_name = if (fde.atom(elf_file)) |atom_ptr|
atom_ptr.name(elf_file)
else |_|
"";
const atom_name = fde.atom(elf_file).name(elf_file);
try writer.print("@{x} : size({x}) : cie({d}) : {s}", .{
base_addr + fde.out_offset,
fde.calcSize(),
@@ -104,8 +102,8 @@ pub const Fde = struct {
pub const Cie = struct {
/// Includes 4byte size cell.
offset: u64,
size: u64,
offset: usize,
size: usize,
rel_index: u32 = 0,
rel_num: u32 = 0,
rel_section_index: u32 = 0,
@@ -123,26 +121,26 @@ pub const Cie = struct {
return base + cie.out_offset;
}
pub fn data(cie: Cie, elf_file: *Elf) error{Overflow}![]const u8 {
pub fn data(cie: Cie, elf_file: *Elf) []const u8 {
const object = elf_file.file(cie.file_index).?.object;
const contents = try object.shdrContents(cie.input_section_index);
const contents = object.shdrContents(cie.input_section_index);
return contents[cie.offset..][0..cie.calcSize()];
}
pub fn calcSize(cie: Cie) u64 {
pub fn calcSize(cie: Cie) usize {
return cie.size + 4;
}
pub fn relocs(cie: Cie, elf_file: *Elf) error{Overflow}![]align(1) const elf.Elf64_Rela {
pub fn relocs(cie: Cie, elf_file: *Elf) []align(1) const elf.Elf64_Rela {
const object = elf_file.file(cie.file_index).?.object;
return (try object.getRelocs(cie.rel_section_index))[cie.rel_index..][0..cie.rel_num];
return object.getRelocs(cie.rel_section_index)[cie.rel_index..][0..cie.rel_num];
}
pub fn eql(cie: Cie, other: Cie, elf_file: *Elf) error{Overflow}!bool {
if (!std.mem.eql(u8, try cie.data(elf_file), try other.data(elf_file))) return false;
pub fn eql(cie: Cie, other: Cie, elf_file: *Elf) bool {
if (!std.mem.eql(u8, cie.data(elf_file), other.data(elf_file))) return false;
const cie_relocs = try cie.relocs(elf_file);
const other_relocs = try other.relocs(elf_file);
const cie_relocs = cie.relocs(elf_file);
const other_relocs = other.relocs(elf_file);
if (cie_relocs.len != other_relocs.len) return false;
for (cie_relocs, other_relocs) |cie_rel, other_rel| {
@@ -152,8 +150,8 @@ pub const Cie = struct {
const cie_object = elf_file.file(cie.file_index).?.object;
const other_object = elf_file.file(other.file_index).?.object;
const cie_sym = cie_object.symbol(cie_rel.r_sym(), elf_file);
const other_sym = other_object.symbol(other_rel.r_sym(), elf_file);
const cie_sym = cie_object.symbols.items[cie_rel.r_sym()];
const other_sym = other_object.symbols.items[other_rel.r_sym()];
if (!std.mem.eql(u8, std.mem.asBytes(&cie_sym), std.mem.asBytes(&other_sym))) return false;
}
return true;
@@ -205,12 +203,12 @@ pub const Cie = struct {
pub const Iterator = struct {
data: []const u8,
pos: u64 = 0,
pos: usize = 0,
pub const Record = struct {
tag: enum { fde, cie },
offset: u64,
size: u64,
offset: usize,
size: usize,
};
pub fn next(it: *Iterator) !?Record {
@@ -235,7 +233,7 @@ pub const Iterator = struct {
};
pub fn calcEhFrameSize(elf_file: *Elf) !usize {
var offset: u64 = 0;
var offset: usize = 0;
var cies = std.ArrayList(Cie).init(elf_file.base.allocator);
defer cies.deinit();
@@ -285,7 +283,7 @@ pub fn calcEhFrameHdrSize(elf_file: *Elf) usize {
}
fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file: *Elf, contents: []u8) !void {
const offset = rel.r_offset - rec.offset;
const offset = std.math.cast(usize, rel.r_offset - rec.offset) orelse return error.Overflow;
const P = @as(i64, @intCast(rec.address(elf_file) + offset));
const S = @as(i64, @intCast(sym.address(.{}, elf_file)));
const A = rel.r_addend;
@@ -319,11 +317,11 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
for (object.cies.items) |cie| {
if (!cie.alive) continue;
const contents = try gpa.dupe(u8, try cie.data(elf_file));
const contents = try gpa.dupe(u8, cie.data(elf_file));
defer gpa.free(contents);
for (try cie.relocs(elf_file)) |rel| {
const sym = object.symbol(rel.r_sym(), elf_file);
for (cie.relocs(elf_file)) |rel| {
const sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
try resolveReloc(cie, sym, rel, elf_file, contents);
}
@@ -337,7 +335,7 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
const contents = try gpa.dupe(u8, try fde.data(elf_file));
const contents = try gpa.dupe(u8, fde.data(elf_file));
defer gpa.free(contents);
std.mem.writeIntLittle(
@@ -346,8 +344,8 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
@as(i32, @truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(elf_file).out_offset)))),
);
for (try fde.relocs(elf_file)) |rel| {
const sym = object.symbol(rel.r_sym(), elf_file);
for (fde.relocs(elf_file)) |rel| {
const sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
try resolveReloc(fde, sym, rel, elf_file, contents);
}
@@ -395,10 +393,10 @@ pub fn writeEhFrameHdr(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
const relocs = try fde.relocs(elf_file);
const relocs = fde.relocs(elf_file);
assert(relocs.len > 0); // Should this be an error? Things are completely broken anyhow if this trips...
const rel = relocs[0];
const sym = object.symbol(rel.r_sym(), elf_file);
const sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
const P = @as(i64, @intCast(fde.address(elf_file)));
const S = @as(i64, @intCast(sym.address(.{}, elf_file)));
const A = rel.r_addend;
@@ -416,7 +414,7 @@ pub fn writeEhFrameHdr(elf_file: *Elf, writer: anytype) !void {
try writer.writeAll(std.mem.sliceAsBytes(entries.items));
}
const eh_frame_hdr_header_size: u64 = 12;
const eh_frame_hdr_header_size: usize = 12;
const EH_PE = struct {
pub const absptr = 0x00;

View File

@@ -2,7 +2,7 @@ pub const File = union(enum) {
zig_module: *ZigModule,
linker_defined: *LinkerDefined,
object: *Object,
// shared_object: *SharedObject,
shared_object: *SharedObject,
pub fn index(file: File) Index {
return switch (file) {
@@ -26,7 +26,7 @@ pub const File = union(enum) {
.zig_module => |x| try writer.print("{s}", .{x.path}),
.linker_defined => try writer.writeAll("(linker defined)"),
.object => |x| try writer.print("{}", .{x.fmtPath()}),
// .shared_object => |x| try writer.writeAll(x.path),
.shared_object => |x| try writer.writeAll(x.path),
}
}
@@ -49,8 +49,7 @@ pub const File = union(enum) {
pub fn symbolRank(file: File, sym: elf.Elf64_Sym, in_archive: bool) u32 {
const base: u3 = blk: {
if (sym.st_shndx == elf.SHN_COMMON) break :blk if (in_archive) 6 else 5;
// if (file == .shared or in_archive) break :blk switch (sym.st_bind()) {
if (in_archive) break :blk switch (sym.st_bind()) {
if (file == .shared_object or in_archive) break :blk switch (sym.st_bind()) {
elf.STB_GLOBAL => 3,
else => 4,
};
@@ -92,7 +91,8 @@ pub const File = union(enum) {
pub fn atoms(file: File) []const Atom.Index {
return switch (file) {
.linker_defined => unreachable,
.zig_module => |x| x.atoms.keys(),
.shared_object => unreachable,
.zig_module => |x| x.atoms.items,
.object => |x| x.atoms.items,
};
}
@@ -100,6 +100,7 @@ pub const File = union(enum) {
pub fn locals(file: File) []const Symbol.Index {
return switch (file) {
.linker_defined => unreachable,
.shared_object => unreachable,
inline else => |x| x.locals(),
};
}
@@ -117,7 +118,7 @@ pub const File = union(enum) {
zig_module: ZigModule,
linker_defined: LinkerDefined,
object: Object,
// shared_object: SharedObject,
shared_object: SharedObject,
};
};
@@ -129,6 +130,6 @@ const Atom = @import("Atom.zig");
const Elf = @import("../Elf.zig");
const LinkerDefined = @import("LinkerDefined.zig");
const Object = @import("Object.zig");
// const SharedObject = @import("SharedObject.zig");
const SharedObject = @import("SharedObject.zig");
const Symbol = @import("Symbol.zig");
const ZigModule = @import("ZigModule.zig");

161
src/link/Elf/gc.zig Normal file
View File

@@ -0,0 +1,161 @@
pub fn gcAtoms(elf_file: *Elf) !void {
var roots = std.ArrayList(*Atom).init(elf_file.base.allocator);
defer roots.deinit();
try collectRoots(&roots, elf_file);
mark(roots, elf_file);
prune(elf_file);
}
fn collectRoots(roots: *std.ArrayList(*Atom), elf_file: *Elf) !void {
if (elf_file.entry_index) |index| {
const global = elf_file.symbol(index);
try markSymbol(global, roots, elf_file);
}
for (elf_file.objects.items) |index| {
for (elf_file.file(index).?.object.globals()) |global_index| {
const global = elf_file.symbol(global_index);
if (global.file(elf_file)) |file| {
if (file.index() == index and global.flags.@"export")
try markSymbol(global, roots, elf_file);
}
}
}
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
for (object.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
const shdr = atom.inputShdr(elf_file);
const name = atom.name(elf_file);
const is_gc_root = blk: {
if (shdr.sh_flags & elf.SHF_GNU_RETAIN != 0) break :blk true;
if (shdr.sh_type == elf.SHT_NOTE) break :blk true;
if (shdr.sh_type == elf.SHT_PREINIT_ARRAY) break :blk true;
if (shdr.sh_type == elf.SHT_INIT_ARRAY) break :blk true;
if (shdr.sh_type == elf.SHT_FINI_ARRAY) break :blk true;
if (mem.startsWith(u8, name, ".ctors")) break :blk true;
if (mem.startsWith(u8, name, ".dtors")) break :blk true;
if (mem.startsWith(u8, name, ".init")) break :blk true;
if (mem.startsWith(u8, name, ".fini")) break :blk true;
if (Elf.isCIdentifier(name)) break :blk true;
break :blk false;
};
if (is_gc_root and markAtom(atom)) try roots.append(atom);
if (shdr.sh_flags & elf.SHF_ALLOC == 0) atom.flags.visited = true;
}
// Mark every atom referenced by CIE as alive.
for (object.cies.items) |cie| {
for (cie.relocs(elf_file)) |rel| {
const sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
try markSymbol(sym, roots, elf_file);
}
}
}
}
fn markSymbol(sym: *Symbol, roots: *std.ArrayList(*Atom), elf_file: *Elf) !void {
const atom = sym.atom(elf_file) orelse return;
if (markAtom(atom)) try roots.append(atom);
}
fn markAtom(atom: *Atom) bool {
const already_visited = atom.flags.visited;
atom.flags.visited = true;
return atom.flags.alive and !already_visited;
}
fn markLive(atom: *Atom, elf_file: *Elf) void {
if (@import("build_options").enable_logging) track_live_level.incr();
assert(atom.flags.visited);
const object = atom.file(elf_file).?.object;
for (atom.fdes(elf_file)) |fde| {
for (fde.relocs(elf_file)[1..]) |rel| {
const target_sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
const target_atom = target_sym.atom(elf_file) orelse continue;
target_atom.flags.alive = true;
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
if (markAtom(target_atom)) markLive(target_atom, elf_file);
}
}
for (atom.relocs(elf_file)) |rel| {
const target_sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
const target_atom = target_sym.atom(elf_file) orelse continue;
target_atom.flags.alive = true;
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
if (markAtom(target_atom)) markLive(target_atom, elf_file);
}
}
fn mark(roots: std.ArrayList(*Atom), elf_file: *Elf) void {
for (roots.items) |root| {
gc_track_live_log.debug("root atom({d})", .{root.atom_index});
markLive(root, elf_file);
}
}
fn prune(elf_file: *Elf) void {
for (elf_file.objects.items) |index| {
for (elf_file.file(index).?.object.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
if (atom.flags.alive and !atom.flags.visited) {
atom.flags.alive = false;
atom.markFdesDead(elf_file);
}
}
}
}
pub fn dumpPrunedAtoms(elf_file: *Elf) !void {
const stderr = std.io.getStdErr().writer();
for (elf_file.objects.items) |index| {
for (elf_file.file(index).?.object.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
if (!atom.flags.alive)
// TODO should we simply print to stderr?
try stderr.print("link: removing unused section '{s}' in file '{}'\n", .{
atom.name(elf_file),
atom.file(elf_file).?.fmtPath(),
});
}
}
}
const Level = struct {
value: usize = 0,
fn incr(self: *@This()) void {
self.value += 1;
}
pub fn format(
self: *const @This(),
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
try writer.writeByteNTimes(' ', self.value);
}
};
var track_live_level: Level = .{};
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const gc_track_live_log = std.log.scoped(.gc_track_live);
const mem = std.mem;
const Allocator = mem.Allocator;
const Atom = @import("Atom.zig");
const Elf = @import("../Elf.zig");
const Symbol = @import("Symbol.zig");

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff