Dwarf: implement .eh_frame

This commit is contained in:
Jacob Young
2024-08-26 15:38:35 -04:00
parent 26d4fd5276
commit f289b82d0e
17 changed files with 1203 additions and 275 deletions

View File

@@ -125,7 +125,7 @@ pub const readILEB128 = readIleb128;
pub fn writeIleb128(writer: anytype, arg: anytype) !void {
const Arg = @TypeOf(arg);
const Int = switch (Arg) {
comptime_int => std.math.IntFittingRange(-arg - 1, arg),
comptime_int => std.math.IntFittingRange(-@abs(arg), @abs(arg)),
else => Arg,
};
const Signed = if (@typeInfo(Int).Int.bits < 8) i8 else Int;

View File

@@ -114,14 +114,15 @@ pub fn clone() callconv(.Naked) usize {
\\ movq %%rcx,(%%rsi)
\\ syscall
\\ testq %%rax,%%rax
\\ jnz 1f
\\ jz 1f
\\ retq
\\1: .cfi_undefined %%rip
\\ xorl %%ebp,%%ebp
\\ popq %%rdi
\\ callq *%%r9
\\ movl %%eax,%%edi
\\ movl $60,%%eax // SYS_exit
\\ syscall
\\1: ret
\\
);
}

View File

@@ -249,6 +249,7 @@ fn _start() callconv(.Naked) noreturn {
// linker explicitly.
asm volatile (switch (native_arch) {
.x86_64 =>
\\ .cfi_undefined %%rip
\\ xorl %%ebp, %%ebp
\\ movq %%rsp, %%rdi
\\ andq $-16, %%rsp

View File

@@ -1491,6 +1491,46 @@ fn asmPseudo(self: *Self, ops: Mir.Inst.Ops) !void {
});
}
fn asmPseudoRegister(self: *Self, ops: Mir.Inst.Ops, reg: Register) !void {
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
std.mem.endsWith(u8, @tagName(ops), "_r"));
_ = try self.addInst(.{
.tag = .pseudo,
.ops = ops,
.data = .{ .r = .{ .r1 = reg } },
});
}
fn asmPseudoImmediate(self: *Self, ops: Mir.Inst.Ops, imm: Immediate) !void {
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
std.mem.endsWith(u8, @tagName(ops), "_i_s"));
_ = try self.addInst(.{
.tag = .pseudo,
.ops = ops,
.data = .{ .i = .{ .i = @bitCast(imm.signed) } },
});
}
fn asmPseudoRegisterRegister(self: *Self, ops: Mir.Inst.Ops, reg1: Register, reg2: Register) !void {
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
std.mem.endsWith(u8, @tagName(ops), "_rr"));
_ = try self.addInst(.{
.tag = .pseudo,
.ops = ops,
.data = .{ .rr = .{ .r1 = reg1, .r2 = reg2 } },
});
}
fn asmPseudoRegisterImmediate(self: *Self, ops: Mir.Inst.Ops, reg: Register, imm: Immediate) !void {
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
std.mem.endsWith(u8, @tagName(ops), "_ri_s"));
_ = try self.addInst(.{
.tag = .pseudo,
.ops = ops,
.data = .{ .ri = .{ .r1 = reg, .i = @bitCast(imm.signed) } },
});
}
fn asmRegister(self: *Self, tag: Mir.Inst.FixedTag, reg: Register) !void {
_ = try self.addInst(.{
.tag = tag[1],
@@ -1877,7 +1917,10 @@ fn gen(self: *Self) InnerError!void {
const cc = abi.resolveCallingConvention(fn_info.cc, self.target.*);
if (cc != .Naked) {
try self.asmRegister(.{ ._, .push }, .rbp);
try self.asmPseudoImmediate(.pseudo_cfi_adjust_cfa_offset_i_s, Immediate.s(8));
try self.asmPseudoRegisterImmediate(.pseudo_cfi_rel_offset_ri_s, .rbp, Immediate.s(0));
try self.asmRegisterRegister(.{ ._, .mov }, .rbp, .rsp);
try self.asmPseudoRegister(.pseudo_cfi_def_cfa_register_r, .rbp);
const backpatch_push_callee_preserved_regs = try self.asmPlaceholder();
const backpatch_frame_align = try self.asmPlaceholder();
const backpatch_frame_align_extra = try self.asmPlaceholder();
@@ -1962,6 +2005,7 @@ fn gen(self: *Self) InnerError!void {
const backpatch_stack_dealloc = try self.asmPlaceholder();
const backpatch_pop_callee_preserved_regs = try self.asmPlaceholder();
try self.asmRegister(.{ ._, .pop }, .rbp);
try self.asmPseudoRegisterImmediate(.pseudo_cfi_def_cfa_ri_s, .rsp, Immediate.s(8));
try self.asmOpOnly(.{ ._, .ret });
const frame_layout = try self.computeFrameLayout(cc);
@@ -14038,7 +14082,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
var mnem_it = mem.tokenizeAny(u8, line, " \t");
var prefix: Instruction.Prefix = .none;
const mnem_str = while (mnem_it.next()) |mnem_str| {
if (mem.startsWith(u8, mnem_str, "#")) continue :next_line;
if (mnem_str[0] == '#') continue :next_line;
if (mem.startsWith(u8, mnem_str, "//")) continue :next_line;
if (std.meta.stringToEnum(Instruction.Prefix, mnem_str)) |pre| {
if (prefix != .none) return self.fail("extra prefix: '{s}'", .{mnem_str});
@@ -14063,8 +14107,14 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
}
label_gop.value_ptr.target = @intCast(self.mir_instructions.len);
} else continue;
if (mnem_str[0] == '.') {
if (prefix != .none) return self.fail("prefixed directive: '{s} {s}'", .{ @tagName(prefix), mnem_str });
prefix = .directive;
}
var mnem_size: ?Memory.Size = if (mem.endsWith(u8, mnem_str, "b"))
var mnem_size: ?Memory.Size = if (prefix == .directive)
null
else if (mem.endsWith(u8, mnem_str, "b"))
.byte
else if (mem.endsWith(u8, mnem_str, "w"))
.word
@@ -14095,7 +14145,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
mnem_size = fixed_mnem_size;
}
const mnem_name = @tagName(mnem_tag);
const mnem_fixed_tag: Mir.Inst.FixedTag = for (std.enums.values(Mir.Inst.Fixes)) |fixes| {
const mnem_fixed_tag: Mir.Inst.FixedTag = if (prefix == .directive)
.{ ._, .pseudo }
else for (std.enums.values(Mir.Inst.Fixes)) |fixes| {
const fixes_name = @tagName(fixes);
const space_i = mem.indexOfScalar(u8, fixes_name, ' ');
const fixes_prefix = if (space_i) |i|
@@ -14116,7 +14168,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
} else {
assert(prefix != .none); // no combination of fixes produced a known mnemonic
return self.fail("invalid prefix for mnemonic: '{s} {s}'", .{
@tagName(prefix), mnem_str,
@tagName(prefix), mnem_name,
});
};
@@ -14324,7 +14376,62 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
} else return self.fail("invalid operand: '{s}'", .{op_str});
} else if (op_it.next()) |op_str| return self.fail("extra operand: '{s}'", .{op_str});
(switch (ops[0]) {
(if (prefix == .directive) switch (mnem_tag) {
.@".cfi_def_cfa" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
self.asmPseudoRegisterImmediate(.pseudo_cfi_def_cfa_ri_s, ops[0].reg, ops[1].imm)
else
error.InvalidInstruction,
.@".cfi_def_cfa_register" => if (ops[0] == .reg and ops[1] == .none)
self.asmPseudoRegister(.pseudo_cfi_def_cfa_register_r, ops[0].reg)
else
error.InvalidInstruction,
.@".cfi_def_cfa_offset" => if (ops[0] == .imm and ops[1] == .none)
self.asmPseudoImmediate(.pseudo_cfi_def_cfa_offset_i_s, ops[0].imm)
else
error.InvalidInstruction,
.@".cfi_adjust_cfa_offset" => if (ops[0] == .imm and ops[1] == .none)
self.asmPseudoImmediate(.pseudo_cfi_adjust_cfa_offset_i_s, ops[0].imm)
else
error.InvalidInstruction,
.@".cfi_offset" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
self.asmPseudoRegisterImmediate(.pseudo_cfi_offset_ri_s, ops[0].reg, ops[1].imm)
else
error.InvalidInstruction,
.@".cfi_val_offset" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
self.asmPseudoRegisterImmediate(.pseudo_cfi_val_offset_ri_s, ops[0].reg, ops[1].imm)
else
error.InvalidInstruction,
.@".cfi_rel_offset" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
self.asmPseudoRegisterImmediate(.pseudo_cfi_rel_offset_ri_s, ops[0].reg, ops[1].imm)
else
error.InvalidInstruction,
.@".cfi_register" => if (ops[0] == .reg and ops[1] == .reg and ops[2] == .none)
self.asmPseudoRegisterRegister(.pseudo_cfi_register_rr, ops[0].reg, ops[1].reg)
else
error.InvalidInstruction,
.@".cfi_restore" => if (ops[0] == .reg and ops[1] == .none)
self.asmPseudoRegister(.pseudo_cfi_restore_r, ops[0].reg)
else
error.InvalidInstruction,
.@".cfi_undefined" => if (ops[0] == .reg and ops[1] == .none)
self.asmPseudoRegister(.pseudo_cfi_undefined_r, ops[0].reg)
else
error.InvalidInstruction,
.@".cfi_same_value" => if (ops[0] == .reg and ops[1] == .none)
self.asmPseudoRegister(.pseudo_cfi_same_value_r, ops[0].reg)
else
error.InvalidInstruction,
.@".cfi_remember_state" => if (ops[0] == .none)
self.asmPseudo(.pseudo_cfi_remember_state_none)
else
error.InvalidInstruction,
.@".cfi_restore_state" => if (ops[0] == .none)
self.asmPseudo(.pseudo_cfi_restore_state_none)
else
error.InvalidInstruction,
.@".cfi_escape" => error.InvalidInstruction,
else => unreachable,
} else switch (ops[0]) {
.none => self.asmOpOnly(mnem_fixed_tag),
.reg => |reg0| switch (ops[1]) {
.none => self.asmRegister(mnem_fixed_tag, reg0),
@@ -19210,14 +19317,6 @@ fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
return error.CodegenFail;
}
fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError {
@branchHint(.cold);
assert(self.err_msg == null);
const gpa = self.gpa;
self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
return error.CodegenFail;
}
fn parseRegName(name: []const u8) ?Register {
if (@hasDecl(Register, "parseRegName")) {
return Register.parseRegName(name);

View File

@@ -30,6 +30,59 @@ pub fn emitMir(emit: *Emit) Error!void {
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset: u32 = @intCast(emit.code.items.len);
if (lowered_inst.prefix == .directive) {
switch (emit.debug_output) {
.dwarf => |dwarf| switch (lowered_inst.encoding.mnemonic) {
.@".cfi_def_cfa" => try dwarf.genDebugFrame(start_offset, .{ .def_cfa = .{
.reg = lowered_inst.ops[0].reg.dwarfNum(),
.off = lowered_inst.ops[1].imm.signed,
} }),
.@".cfi_def_cfa_register" => try dwarf.genDebugFrame(start_offset, .{
.def_cfa_register = lowered_inst.ops[0].reg.dwarfNum(),
}),
.@".cfi_def_cfa_offset" => try dwarf.genDebugFrame(start_offset, .{
.def_cfa_offset = lowered_inst.ops[0].imm.signed,
}),
.@".cfi_adjust_cfa_offset" => try dwarf.genDebugFrame(start_offset, .{
.adjust_cfa_offset = lowered_inst.ops[0].imm.signed,
}),
.@".cfi_offset" => try dwarf.genDebugFrame(start_offset, .{ .offset = .{
.reg = lowered_inst.ops[0].reg.dwarfNum(),
.off = lowered_inst.ops[1].imm.signed,
} }),
.@".cfi_val_offset" => try dwarf.genDebugFrame(start_offset, .{ .val_offset = .{
.reg = lowered_inst.ops[0].reg.dwarfNum(),
.off = lowered_inst.ops[1].imm.signed,
} }),
.@".cfi_rel_offset" => try dwarf.genDebugFrame(start_offset, .{ .rel_offset = .{
.reg = lowered_inst.ops[0].reg.dwarfNum(),
.off = lowered_inst.ops[1].imm.signed,
} }),
.@".cfi_register" => try dwarf.genDebugFrame(start_offset, .{ .register = .{
lowered_inst.ops[0].reg.dwarfNum(),
lowered_inst.ops[1].reg.dwarfNum(),
} }),
.@".cfi_restore" => try dwarf.genDebugFrame(start_offset, .{
.restore = lowered_inst.ops[0].reg.dwarfNum(),
}),
.@".cfi_undefined" => try dwarf.genDebugFrame(start_offset, .{
.undefined = lowered_inst.ops[0].reg.dwarfNum(),
}),
.@".cfi_same_value" => try dwarf.genDebugFrame(start_offset, .{
.same_value = lowered_inst.ops[0].reg.dwarfNum(),
}),
.@".cfi_remember_state" => try dwarf.genDebugFrame(start_offset, .remember_state),
.@".cfi_restore_state" => try dwarf.genDebugFrame(start_offset, .restore_state),
.@".cfi_escape" => try dwarf.genDebugFrame(start_offset, .{
.escape = lowered_inst.ops[0].bytes,
}),
else => unreachable,
},
.plan9 => {},
.none => {},
}
continue;
}
try lowered_inst.encode(emit.code.writer(), .{});
const end_offset: u32 = @intCast(emit.code.items.len);
while (lowered_relocs.len > 0 and

View File

@@ -220,6 +220,21 @@ pub fn format(
}
pub const Mnemonic = enum {
// Directives
@".cfi_def_cfa",
@".cfi_def_cfa_register",
@".cfi_def_cfa_offset",
@".cfi_adjust_cfa_offset",
@".cfi_offset",
@".cfi_val_offset",
@".cfi_rel_offset",
@".cfi_register",
@".cfi_restore",
@".cfi_undefined",
@".cfi_same_value",
@".cfi_remember_state",
@".cfi_restore_state",
@".cfi_escape",
// zig fmt: off
// General-purpose
adc, add, @"and",
@@ -442,6 +457,7 @@ pub const Op = enum {
imm8s, imm16s, imm32s,
al, ax, eax, rax,
cl,
rip, eip, ip,
r8, r16, r32, r64,
rm8, rm16, rm32, rm64,
r32_m8, r32_m16, r64_m16,
@@ -487,7 +503,12 @@ pub const Op = enum {
256 => .ymm,
else => unreachable,
},
.ip => unreachable,
.ip => switch (reg) {
.rip => .rip,
.eip => .eip,
.ip => .ip,
else => unreachable,
},
},
.mem => |mem| switch (mem) {
@@ -531,13 +552,15 @@ pub const Op = enum {
else
.imm64,
},
.bytes => unreachable,
};
}
pub fn immBitSize(op: Op) u64 {
return switch (op) {
.none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable,
.al, .cl, .r8, .rm8, .r32_m8 => unreachable,
.al, .cl, .rip, .eip, .ip, .r8, .rm8, .r32_m8 => unreachable,
.ax, .r16, .rm16 => unreachable,
.eax, .r32, .rm32, .r32_m16 => unreachable,
.rax, .r64, .rm64, .r64_m16 => unreachable,
@@ -560,9 +583,9 @@ pub const Op = enum {
.rel8, .rel16, .rel32 => unreachable,
.m8, .m16, .m32, .m64, .m80, .m128, .m256 => unreachable,
.al, .cl, .r8, .rm8 => 8,
.ax, .r16, .rm16 => 16,
.eax, .r32, .rm32, .r32_m8, .r32_m16 => 32,
.rax, .r64, .rm64, .r64_m16, .mm, .mm_m64 => 64,
.ax, .ip, .r16, .rm16 => 16,
.eax, .eip, .r32, .rm32, .r32_m8, .r32_m16 => 32,
.rax, .rip, .r64, .rm64, .r64_m16, .mm, .mm_m64 => 64,
.st => 80,
.xmm0, .xmm, .xmm_m8, .xmm_m16, .xmm_m32, .xmm_m64, .xmm_m128 => 128,
.ymm, .ymm_m256 => 256,
@@ -574,7 +597,7 @@ pub const Op = enum {
.none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable,
.unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable,
.rel8, .rel16, .rel32 => unreachable,
.al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64 => unreachable,
.al, .cl, .r8, .ax, .ip, .r16, .eax, .eip, .r32, .rax, .rip, .r64 => unreachable,
.st, .mm, .xmm0, .xmm, .ymm => unreachable,
.m8, .rm8, .r32_m8, .xmm_m8 => 8,
.m16, .rm16, .r32_m16, .r64_m16, .xmm_m16 => 16,
@@ -602,8 +625,9 @@ pub const Op = enum {
pub fn isRegister(op: Op) bool {
// zig fmt: off
return switch (op) {
.cl,
.al, .ax, .eax, .rax,
.cl,
.ip, .eip, .rip,
.r8, .r16, .r32, .r64,
.rm8, .rm16, .rm32, .rm64,
.r32_m8, .r32_m16, .r64_m16,
@@ -664,6 +688,7 @@ pub const Op = enum {
.mm, .mm_m64 => .mmx,
.xmm0, .xmm, .xmm_m8, .xmm_m16, .xmm_m32, .xmm_m64, .xmm_m128 => .sse,
.ymm, .ymm_m256 => .sse,
.rip, .eip, .ip => .ip,
};
}

View File

@@ -12,7 +12,7 @@ src_loc: Zcu.LazySrcLoc,
result_insts_len: u8 = undefined,
result_relocs_len: u8 = undefined,
result_insts: [
std.mem.max(usize, &.{
@max(
1, // non-pseudo instructions
3, // (ELF only) TLS local dynamic (LD) sequence in PIC mode
2, // cmovcc: cmovcc \ cmovcc
@@ -22,18 +22,18 @@ result_insts: [
pseudo_probe_adjust_unrolled_max_insts,
pseudo_probe_adjust_setup_insts,
pseudo_probe_adjust_loop_insts,
abi.Win64.callee_preserved_regs.len, // push_regs/pop_regs
abi.SysV.callee_preserved_regs.len, // push_regs/pop_regs
})
abi.Win64.callee_preserved_regs.len * 2, // push_regs/pop_regs
abi.SysV.callee_preserved_regs.len * 2, // push_regs/pop_regs
)
]Instruction = undefined,
result_relocs: [
std.mem.max(usize, &.{
@max(
1, // jmp/jcc/call/mov/lea: jmp/jcc/call/mov/lea
2, // jcc: jcc \ jcc
2, // test \ jcc \ probe \ sub \ jmp
1, // probe \ sub \ jcc
3, // (ELF only) TLS local dynamic (LD) sequence in PIC mode
})
)
]Reloc = undefined,
pub const pseudo_probe_align_insts = 5; // test \ jcc \ probe \ sub \ jmp
@@ -265,6 +265,50 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_push_reg_list => try lower.pushPopRegList(.push, inst),
.pseudo_pop_reg_list => try lower.pushPopRegList(.pop, inst),
.pseudo_cfi_def_cfa_ri_s => try lower.emit(.directive, .@".cfi_def_cfa", &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
}),
.pseudo_cfi_def_cfa_register_r => try lower.emit(.directive, .@".cfi_def_cfa_register", &.{
.{ .reg = inst.data.r.r1 },
}),
.pseudo_cfi_def_cfa_offset_i_s => try lower.emit(.directive, .@".cfi_def_cfa_offset", &.{
.{ .imm = lower.imm(.i_s, inst.data.i.i) },
}),
.pseudo_cfi_adjust_cfa_offset_i_s => try lower.emit(.directive, .@".cfi_adjust_cfa_offset", &.{
.{ .imm = lower.imm(.i_s, inst.data.i.i) },
}),
.pseudo_cfi_offset_ri_s => try lower.emit(.directive, .@".cfi_offset", &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
}),
.pseudo_cfi_val_offset_ri_s => try lower.emit(.directive, .@".cfi_val_offset", &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
}),
.pseudo_cfi_rel_offset_ri_s => try lower.emit(.directive, .@".cfi_rel_offset", &.{
.{ .reg = inst.data.ri.r1 },
.{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
}),
.pseudo_cfi_register_rr => try lower.emit(.directive, .@".cfi_register", &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
}),
.pseudo_cfi_restore_r => try lower.emit(.directive, .@".cfi_restore", &.{
.{ .reg = inst.data.r.r1 },
}),
.pseudo_cfi_undefined_r => try lower.emit(.directive, .@".cfi_undefined", &.{
.{ .reg = inst.data.r.r1 },
}),
.pseudo_cfi_same_value_r => try lower.emit(.directive, .@".cfi_same_value", &.{
.{ .reg = inst.data.r.r1 },
}),
.pseudo_cfi_remember_state_none => try lower.emit(.directive, .@".cfi_remember_state", &.{}),
.pseudo_cfi_restore_state_none => try lower.emit(.directive, .@".cfi_restore_state", &.{}),
.pseudo_cfi_escape_bytes => try lower.emit(.directive, .@".cfi_escape", &.{
.{ .bytes = inst.data.bytes.get(lower.mir) },
}),
.pseudo_dbg_prologue_end_none,
.pseudo_dbg_line_line_column,
.pseudo_dbg_epilogue_begin_none,
@@ -280,6 +324,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_dbg_local_af,
.pseudo_dbg_local_am,
.pseudo_dbg_var_args_none,
.pseudo_dead_none,
=> {},
else => unreachable,
@@ -665,12 +710,43 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
fn pushPopRegList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void {
const callee_preserved_regs = abi.getCalleePreservedRegs(lower.cc);
var it = inst.data.reg_list.iterator(.{ .direction = switch (mnemonic) {
.push => .reverse,
.pop => .forward,
var off: i32 = switch (mnemonic) {
.push => 0,
.pop => undefined,
else => unreachable,
} });
while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }});
};
{
var it = inst.data.reg_list.iterator(.{ .direction = switch (mnemonic) {
.push => .reverse,
.pop => .forward,
else => unreachable,
} });
while (it.next()) |i| {
try lower.emit(.none, mnemonic, &.{.{
.reg = callee_preserved_regs[i],
}});
switch (mnemonic) {
.push => off -= 8,
.pop => {},
else => unreachable,
}
}
}
switch (mnemonic) {
.push => {
var it = inst.data.reg_list.iterator(.{});
while (it.next()) |i| {
try lower.emit(.directive, .@".cfi_rel_offset", &.{
.{ .reg = callee_preserved_regs[i] },
.{ .imm = Immediate.s(off) },
});
off += 8;
}
assert(off == 0);
},
.pop => {},
else => unreachable,
}
}
const page_size: i32 = 1 << 12;

View File

@@ -879,6 +879,7 @@ pub const Inst = struct {
/// Probe adjust loop
/// Uses `rr` payload.
pseudo_probe_adjust_loop_rr,
/// Push registers
/// Uses `reg_list` payload.
pseudo_push_reg_list,
@@ -886,6 +887,47 @@ pub const Inst = struct {
/// Uses `reg_list` payload.
pseudo_pop_reg_list,
/// Define cfa rule as offset from register.
/// Uses `ri` payload.
pseudo_cfi_def_cfa_ri_s,
/// Modify cfa rule register.
/// Uses `r` payload.
pseudo_cfi_def_cfa_register_r,
/// Modify cfa rule offset.
/// Uses `i` payload.
pseudo_cfi_def_cfa_offset_i_s,
/// Offset cfa rule offset.
/// Uses `i` payload.
pseudo_cfi_adjust_cfa_offset_i_s,
/// Define register rule as stored at offset from cfa.
/// Uses `ri` payload.
pseudo_cfi_offset_ri_s,
/// Define register rule as offset from cfa.
/// Uses `ri` payload.
pseudo_cfi_val_offset_ri_s,
/// Define register rule as stored at offset from cfa rule register.
/// Uses `ri` payload.
pseudo_cfi_rel_offset_ri_s,
/// Define register rule as register.
/// Uses `rr` payload.
pseudo_cfi_register_rr,
/// Define register rule from initial.
/// Uses `r` payload.
pseudo_cfi_restore_r,
/// Define register rule as undefined.
/// Uses `r` payload.
pseudo_cfi_undefined_r,
/// Define register rule as itself.
/// Uses `r` payload.
pseudo_cfi_same_value_r,
/// Push cfi state.
pseudo_cfi_remember_state_none,
/// Pop cfi state.
pseudo_cfi_restore_state_none,
/// Raw cfi bytes.
/// Uses `bytes` payload.
pseudo_cfi_escape_bytes,
/// End of prologue
pseudo_dbg_prologue_end_none,
/// Update debug line
@@ -1028,8 +1070,13 @@ pub const Inst = struct {
fixes: Fixes = ._,
payload: u32,
},
ix: struct {
bytes: struct {
payload: u32,
len: u32,
pub fn get(bytes: @This(), mir: Mir) []const u8 {
return std.mem.sliceAsBytes(mir.extra[bytes.payload..])[0..bytes.len];
}
},
a: struct {
air_inst: Air.Inst.Index,

View File

@@ -371,7 +371,7 @@ pub const Register = enum(u7) {
.x87 => 33 + @as(u6, reg.enc()),
.mmx => 41 + @as(u6, reg.enc()),
.segment => 50 + @as(u6, reg.enc()),
.ip => unreachable,
.ip => 16,
};
}
};

View File

@@ -25,6 +25,7 @@ pub const Instruction = struct {
repz,
repne,
repnz,
directive,
};
pub const Immediate = union(enum) {
@@ -180,6 +181,7 @@ pub const Instruction = struct {
reg: Register,
mem: Memory,
imm: Immediate,
bytes: []const u8,
/// Returns the bitsize of the operand.
pub fn bitSize(op: Operand) u64 {
@@ -188,6 +190,7 @@ pub const Instruction = struct {
.reg => |reg| reg.bitSize(),
.mem => |mem| mem.bitSize(),
.imm => unreachable,
.bytes => unreachable,
};
}
@@ -199,6 +202,7 @@ pub const Instruction = struct {
.reg => |reg| reg.class() == .segment,
.mem => |mem| mem.isSegmentRegister(),
.imm => unreachable,
.bytes => unreachable,
};
}
@@ -207,6 +211,7 @@ pub const Instruction = struct {
.none, .imm => false,
.reg => |reg| reg.isExtended(),
.mem => |mem| mem.base().isExtended(),
.bytes => unreachable,
};
}
@@ -214,6 +219,7 @@ pub const Instruction = struct {
return switch (op) {
.none, .reg, .imm => false,
.mem => |mem| if (mem.scaleIndex()) |si| si.index.isExtended() else false,
.bytes => unreachable,
};
}
@@ -299,6 +305,7 @@ pub const Instruction = struct {
if (imms < 0) try writer.writeByte('-');
try writer.print("0x{x}", .{@abs(imms)});
} else try writer.print("0x{x}", .{imm.asUnsigned(enc_op.immBitSize())}),
.bytes => unreachable,
}
}
@@ -308,20 +315,39 @@ pub const Instruction = struct {
};
pub fn new(prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) !Instruction {
const encoding = (try Encoding.findByMnemonic(prefix, mnemonic, ops)) orelse {
log.err("no encoding found for: {s} {s} {s} {s} {s} {s}", .{
@tagName(prefix),
@tagName(mnemonic),
@tagName(if (ops.len > 0) Encoding.Op.fromOperand(ops[0]) else .none),
@tagName(if (ops.len > 1) Encoding.Op.fromOperand(ops[1]) else .none),
@tagName(if (ops.len > 2) Encoding.Op.fromOperand(ops[2]) else .none),
@tagName(if (ops.len > 3) Encoding.Op.fromOperand(ops[3]) else .none),
});
return error.InvalidInstruction;
const encoding: Encoding = switch (prefix) {
else => (try Encoding.findByMnemonic(prefix, mnemonic, ops)) orelse {
log.err("no encoding found for: {s} {s} {s} {s} {s} {s}", .{
@tagName(prefix),
@tagName(mnemonic),
@tagName(if (ops.len > 0) Encoding.Op.fromOperand(ops[0]) else .none),
@tagName(if (ops.len > 1) Encoding.Op.fromOperand(ops[1]) else .none),
@tagName(if (ops.len > 2) Encoding.Op.fromOperand(ops[2]) else .none),
@tagName(if (ops.len > 3) Encoding.Op.fromOperand(ops[3]) else .none),
});
return error.InvalidInstruction;
},
.directive => .{
.mnemonic = mnemonic,
.data = .{
.op_en = .zo,
.ops = .{
if (ops.len > 0) Encoding.Op.fromOperand(ops[0]) else .none,
if (ops.len > 1) Encoding.Op.fromOperand(ops[1]) else .none,
if (ops.len > 2) Encoding.Op.fromOperand(ops[2]) else .none,
if (ops.len > 3) Encoding.Op.fromOperand(ops[3]) else .none,
},
.opc_len = 0,
.opc = undefined,
.modrm_ext = 0,
.mode = .none,
.feature = .none,
},
},
};
log.debug("selected encoding: {}", .{encoding});
var inst = Instruction{
var inst: Instruction = .{
.prefix = prefix,
.encoding = encoding,
.ops = [1]Operand{.none} ** 4,
@@ -338,7 +364,10 @@ pub const Instruction = struct {
) @TypeOf(writer).Error!void {
_ = unused_format_string;
_ = options;
if (inst.prefix != .none) try writer.print("{s} ", .{@tagName(inst.prefix)});
switch (inst.prefix) {
.none, .directive => {},
else => try writer.print("{s} ", .{@tagName(inst.prefix)}),
}
try writer.print("{s}", .{@tagName(inst.encoding.mnemonic)});
for (inst.ops, inst.encoding.data.ops, 0..) |op, enc, i| {
if (op == .none) break;
@@ -349,6 +378,7 @@ pub const Instruction = struct {
}
pub fn encode(inst: Instruction, writer: anytype, comptime opts: Options) !void {
assert(inst.prefix != .directive);
const encoder = Encoder(@TypeOf(writer), opts){ .writer = writer };
const enc = inst.encoding;
const data = enc.data;
@@ -435,6 +465,7 @@ pub const Instruction = struct {
.lock => legacy.prefix_f0 = true,
.repne, .repnz => legacy.prefix_f2 = true,
.rep, .repe, .repz => legacy.prefix_f3 = true,
.directive => unreachable,
}
switch (data.mode) {

File diff suppressed because it is too large Load Diff

View File

@@ -569,9 +569,7 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
if (shdr.sh_type != elf.SHT_NOBITS) {
const allocated_size = self.allocatedSize(shdr.sh_offset);
if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
} else if (needed_size > allocated_size) {
if (needed_size > allocated_size) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Must move the entire section.
@@ -590,6 +588,8 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
shdr.sh_offset = new_offset;
if (maybe_phdr) |phdr| phdr.p_offset = new_offset;
} else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
}
if (maybe_phdr) |phdr| phdr.p_filesz = needed_size;
}
@@ -621,9 +621,7 @@ pub fn growNonAllocSection(
assert(shdr.sh_flags & elf.SHF_ALLOC == 0);
const allocated_size = self.allocatedSize(shdr.sh_offset);
if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
} else if (needed_size > allocated_size) {
if (needed_size > allocated_size) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Move all the symbols to a new file location.
@@ -646,6 +644,8 @@ pub fn growNonAllocSection(
}
shdr.sh_offset = new_offset;
} else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
}
shdr.sh_size = needed_size;
@@ -699,7 +699,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
const sub_prog_node = prog_node.start("ELF Flush", 0);
defer sub_prog_node.end();
const target = comp.root_mod.resolved_target.result;
const target = self.getTarget();
const link_mode = comp.config.link_mode;
const directory = self.base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
@@ -1053,7 +1053,7 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const target = self.base.comp.root_mod.resolved_target.result;
const target = self.getTarget();
const link_mode = self.base.comp.config.link_mode;
const directory = self.base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
@@ -1498,15 +1498,13 @@ fn parseLdScript(self: *Elf, lib: SystemLib) ParseError!void {
}
pub fn validateEFlags(self: *Elf, file_index: File.Index, e_flags: elf.Elf64_Word) !void {
const target = self.base.comp.root_mod.resolved_target.result;
if (self.first_eflags == null) {
self.first_eflags = e_flags;
return; // there isn't anything to conflict with yet
}
const self_eflags: *elf.Elf64_Word = &self.first_eflags.?;
switch (target.cpu.arch) {
switch (self.getTarget().cpu.arch) {
.riscv64 => {
if (e_flags != self_eflags.*) {
const riscv_eflags: riscv.RiscvEflags = @bitCast(e_flags);
@@ -1549,7 +1547,7 @@ fn accessLibPath(
link_mode: ?std.builtin.LinkMode,
) !bool {
const sep = fs.path.sep_str;
const target = self.base.comp.root_mod.resolved_target.result;
const target = self.getTarget();
test_path.clearRetainingCapacity();
const prefix = if (link_mode != null) "lib" else "";
const suffix = if (link_mode) |mode| switch (mode) {
@@ -1779,7 +1777,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
const is_exe_or_dyn_lib = is_dyn_lib or output_mode == .Exe;
const have_dynamic_linker = comp.config.link_libc and
link_mode == .dynamic and is_exe_or_dyn_lib;
const target = comp.root_mod.resolved_target.result;
const target = self.getTarget();
const compiler_rt_path: ?[]const u8 = blk: {
if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
@@ -2353,8 +2351,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
pub fn writeShdrTable(self: *Elf) !void {
const gpa = self.base.comp.gpa;
const target = self.base.comp.root_mod.resolved_target.result;
const target_endian = target.cpu.arch.endian();
const target_endian = self.getTarget().cpu.arch.endian();
const foreign_endian = target_endian != builtin.cpu.arch.endian();
const shsize: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Shdr),
@@ -2410,8 +2407,7 @@ pub fn writeShdrTable(self: *Elf) !void {
fn writePhdrTable(self: *Elf) !void {
const gpa = self.base.comp.gpa;
const target = self.base.comp.root_mod.resolved_target.result;
const target_endian = target.cpu.arch.endian();
const target_endian = self.getTarget().cpu.arch.endian();
const foreign_endian = target_endian != builtin.cpu.arch.endian();
const phdr_table = &self.phdrs.items[self.phdr_table_index.?];
@@ -2464,7 +2460,7 @@ pub fn writeElfHeader(self: *Elf) !void {
};
index += 1;
const target = comp.root_mod.resolved_target.result;
const target = self.getTarget();
const endian = target.cpu.arch.endian();
hdr_buf[index] = switch (endian) {
.little => elf.ELFDATA2LSB,
@@ -2772,21 +2768,25 @@ fn initOutputSections(self: *Elf) !void {
fn initSyntheticSections(self: *Elf) !void {
const comp = self.base.comp;
const target = comp.root_mod.resolved_target.result;
const target = self.getTarget();
const ptr_size = self.ptrWidthBytes();
const needs_eh_frame = for (self.objects.items) |index| {
if (self.file(index).?.object.cies.items.len > 0) break true;
} else false;
if (needs_eh_frame) {
self.eh_frame_section_index = try self.addSection(.{
.name = try self.insertShString(".eh_frame"),
.type = elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC,
.addralign = ptr_size,
.offset = std.math.maxInt(u64),
});
if (self.eh_frame_section_index == null) {
self.eh_frame_section_index = try self.addSection(.{
.name = try self.insertShString(".eh_frame"),
.type = if (target.cpu.arch == .x86_64)
elf.SHT_X86_64_UNWIND
else
elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC,
.addralign = ptr_size,
.offset = std.math.maxInt(u64),
});
}
if (comp.link_eh_frame_hdr) {
self.eh_frame_hdr_section_index = try self.addSection(.{
.name = try self.insertShString(".eh_frame_hdr"),
@@ -3446,7 +3446,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
}
fn updateSectionSizes(self: *Elf) !void {
const target = self.base.comp.root_mod.resolved_target.result;
const slice = self.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| {
if (atom_list.items.len == 0) continue;
@@ -3474,7 +3473,11 @@ fn updateSectionSizes(self: *Elf) !void {
const shdrs = slice.items(.shdr);
if (self.eh_frame_section_index) |index| {
shdrs[index].sh_size = try eh_frame.calcEhFrameSize(self);
shdrs[index].sh_size = existing_size: {
const zo = self.zigObjectPtr() orelse break :existing_size 0;
const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
break :existing_size sym.atom(self).?.size;
} + try eh_frame.calcEhFrameSize(self);
}
if (self.eh_frame_hdr_section_index) |index| {
@@ -3517,7 +3520,7 @@ fn updateSectionSizes(self: *Elf) !void {
}
if (self.interp_section_index) |index| {
shdrs[index].sh_size = target.dynamic_linker.get().?.len + 1;
shdrs[index].sh_size = self.getTarget().dynamic_linker.get().?.len + 1;
}
if (self.hash_section_index) |index| {
@@ -3759,10 +3762,10 @@ pub fn allocateAllocSections(self: *Elf) !void {
}
const first = slice.items(.shdr)[cover.items[0]];
var off = try self.findFreeSpace(filesz, @"align");
var new_offset = try self.findFreeSpace(filesz, @"align");
const phndx = try self.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
.offset = new_offset,
.addr = first.sh_addr,
.memsz = memsz,
.filesz = filesz,
@@ -3777,9 +3780,28 @@ pub fn allocateAllocSections(self: *Elf) !void {
shdr.sh_offset = 0;
continue;
}
off = alignment.@"align"(shndx, shdr.sh_addralign, off);
shdr.sh_offset = off;
off += shdr.sh_size;
new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset);
if (shndx == self.eh_frame_section_index) eh_frame: {
const zo = self.zigObjectPtr() orelse break :eh_frame;
const sym = zo.symbol(zo.eh_frame_index orelse break :eh_frame);
const existing_size = sym.atom(self).?.size;
log.debug("moving {s} from 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
shdr.sh_offset,
new_offset,
});
const amt = try self.base.file.?.copyRangeAll(
shdr.sh_offset,
self.base.file.?,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
}
shdr.sh_offset = new_offset;
new_offset += shdr.sh_size;
}
addr = mem.alignForward(u64, addr, self.page_size);
@@ -3910,9 +3932,9 @@ fn writeAtoms(self: *Elf) !void {
log.debug("writing atoms in '{s}' section", .{self.getShString(shdr.sh_name)});
// TODO really, really handle debug section separately
const base_offset = if (self.isDebugSection(@intCast(shndx))) blk: {
const base_offset = if (self.isDebugSection(@intCast(shndx))) base_offset: {
const zo = self.zigObjectPtr().?;
break :blk for ([_]Symbol.Index{
for ([_]Symbol.Index{
zo.debug_info_index.?,
zo.debug_abbrev_index.?,
zo.debug_aranges_index.?,
@@ -3924,8 +3946,13 @@ fn writeAtoms(self: *Elf) !void {
}) |sym_index| {
const sym = zo.symbol(sym_index);
const atom_ptr = sym.atom(self).?;
if (atom_ptr.output_section_index == shndx) break atom_ptr.size;
} else 0;
if (atom_ptr.output_section_index == shndx) break :base_offset atom_ptr.size;
}
break :base_offset 0;
} else if (@as(u32, @intCast(shndx)) == self.eh_frame_section_index) base_offset: {
const zo = self.zigObjectPtr() orelse break :base_offset 0;
const sym = zo.symbol(zo.eh_frame_index orelse break :base_offset 0);
break :base_offset sym.atom(self).?.size;
} else 0;
const sh_offset = shdr.sh_offset + base_offset;
const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow;
@@ -4082,12 +4109,11 @@ pub fn updateSymtabSize(self: *Elf) !void {
fn writeSyntheticSections(self: *Elf) !void {
const gpa = self.base.comp.gpa;
const target = self.getTarget();
const slice = self.sections.slice();
if (self.interp_section_index) |shndx| {
var buffer: [256]u8 = undefined;
const interp = target.dynamic_linker.get().?;
const interp = self.getTarget().dynamic_linker.get().?;
@memcpy(buffer[0..interp.len], interp);
buffer[interp.len] = 0;
const contents = buffer[0 .. interp.len + 1];
@@ -4144,12 +4170,18 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.eh_frame_section_index) |shndx| {
const existing_size = existing_size: {
const zo = self.zigObjectPtr() orelse break :existing_size 0;
const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
break :existing_size sym.atom(self).?.size;
};
const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
try eh_frame.writeEhFrame(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
assert(buffer.items.len == sh_size - existing_size);
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
}
if (self.eh_frame_hdr_section_index) |shndx| {
@@ -4222,7 +4254,6 @@ pub fn writeShStrtab(self: *Elf) !void {
pub fn writeSymtab(self: *Elf) !void {
const gpa = self.base.comp.gpa;
const target = self.getTarget();
const slice = self.sections.slice();
const symtab_shdr = slice.items(.shdr)[self.symtab_section_index.?];
const strtab_shdr = slice.items(.shdr)[self.strtab_section_index.?];
@@ -4292,7 +4323,7 @@ pub fn writeSymtab(self: *Elf) !void {
self.plt_got.writeSymtab(self);
}
const foreign_endian = target.cpu.arch.endian() != builtin.cpu.arch.endian();
const foreign_endian = self.getTarget().cpu.arch.endian() != builtin.cpu.arch.endian();
switch (self.ptr_width) {
.p32 => {
const buf = try gpa.alloc(elf.Elf32_Sym, self.symtab.items.len);
@@ -4630,10 +4661,8 @@ pub fn isZigSection(self: Elf, shndx: u32) bool {
self.zig_data_rel_ro_section_index,
self.zig_data_section_index,
self.zig_bss_section_index,
}) |maybe_index| {
if (maybe_index) |index| {
if (index == shndx) return true;
}
}) |index| {
if (index == shndx) return true;
}
return false;
}
@@ -4648,10 +4677,8 @@ pub fn isDebugSection(self: Elf, shndx: u32) bool {
self.debug_line_str_section_index,
self.debug_loclists_section_index,
self.debug_rnglists_section_index,
}) |maybe_index| {
if (maybe_index) |index| {
if (index == shndx) return true;
}
}) |index| {
if (index == shndx) return true;
}
return false;
}

View File

@@ -49,6 +49,7 @@ debug_line_section_dirty: bool = false,
debug_line_str_section_dirty: bool = false,
debug_loclists_section_dirty: bool = false,
debug_rnglists_section_dirty: bool = false,
eh_frame_section_dirty: bool = false,
debug_info_index: ?Symbol.Index = null,
debug_abbrev_index: ?Symbol.Index = null,
@@ -58,6 +59,7 @@ debug_line_index: ?Symbol.Index = null,
debug_line_str_index: ?Symbol.Index = null,
debug_loclists_index: ?Symbol.Index = null,
debug_rnglists_index: ?Symbol.Index = null,
eh_frame_index: ?Symbol.Index = null,
pub const global_symbol_bit: u32 = 0x80000000;
pub const symbol_mask: u32 = 0x7fffffff;
@@ -72,8 +74,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const ptr_size = elf_file.ptrWidthBytes();
const target = elf_file.getTarget();
const ptr_bit_width = target.ptrBitWidth();
try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) }); // null input section
try self.relocs.append(gpa, .{}); // null relocs section
@@ -113,7 +113,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.type = elf.PT_LOAD,
.offset = off,
.filesz = filesz,
.addr = if (ptr_bit_width >= 32) 0x4000000 else 0x4000,
.addr = if (ptr_size >= 4) 0x4000000 else 0x4000,
.memsz = filesz,
.@"align" = elf_file.page_size,
.flags = elf.PF_X | elf.PF_R | elf.PF_W,
@@ -128,7 +128,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.type = elf.PT_LOAD,
.offset = off,
.filesz = filesz,
.addr = if (ptr_bit_width >= 32) 0xc000000 else 0xa000,
.addr = if (ptr_size >= 4) 0xc000000 else 0xa000,
.memsz = filesz,
.@"align" = alignment,
.flags = elf.PF_R | elf.PF_W,
@@ -143,7 +143,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.type = elf.PT_LOAD,
.offset = off,
.filesz = filesz,
.addr = if (ptr_bit_width >= 32) 0x10000000 else 0xc000,
.addr = if (ptr_size >= 4) 0x10000000 else 0xc000,
.memsz = filesz,
.@"align" = alignment,
.flags = elf.PF_R | elf.PF_W,
@@ -154,7 +154,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
const alignment = elf_file.page_size;
elf_file.phdr_zig_load_zerofill_index = try elf_file.addPhdr(.{
.type = elf.PT_LOAD,
.addr = if (ptr_bit_width >= 32) 0x14000000 else 0xf000,
.addr = if (ptr_size >= 4) 0x14000000 else 0xf000,
.memsz = 1024,
.@"align" = alignment,
.flags = elf.PF_R | elf.PF_W,
@@ -354,6 +354,20 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
self.debug_rnglists_index = try addSectionSymbol(self, gpa, ".debug_rnglists", .@"1", elf_file.debug_rnglists_section_index.?);
}
if (elf_file.eh_frame_section_index == null) {
elf_file.eh_frame_section_index = try elf_file.addSection(.{
.name = try elf_file.insertShString(".eh_frame"),
.type = if (elf_file.getTarget().cpu.arch == .x86_64)
elf.SHT_X86_64_UNWIND
else
elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC,
.addralign = ptr_size,
});
self.eh_frame_section_dirty = true;
self.eh_frame_index = try addSectionSymbol(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), elf_file.eh_frame_section_index.?);
}
try dwarf.initMetadata();
self.dwarf = dwarf;
},
@@ -460,6 +474,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
self.debug_line_str_index.?,
self.debug_loclists_index.?,
self.debug_rnglists_index.?,
self.eh_frame_index.?,
}, [_]*Dwarf.Section{
&dwarf.debug_info.section,
&dwarf.debug_abbrev.section,
@@ -469,7 +484,18 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
&dwarf.debug_line_str.section,
&dwarf.debug_loclists.section,
&dwarf.debug_rnglists.section,
}) |sym_index, sect| {
&dwarf.debug_frame.section,
}, [_]Dwarf.Section.Index{
.debug_info,
.debug_abbrev,
.debug_str,
.debug_aranges,
.debug_line,
.debug_line_str,
.debug_loclists,
.debug_rnglists,
.debug_frame,
}) |sym_index, sect, sect_index| {
const sym = self.symbol(sym_index);
const atom_ptr = self.atom(sym.ref.index).?;
if (!atom_ptr.alive) continue;
@@ -509,6 +535,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
for (unit.cross_section_relocs.items) |reloc| {
const target_sym_index = switch (reloc.target_sec) {
.debug_abbrev => self.debug_abbrev_index.?,
.debug_aranges => self.debug_aranges_index.?,
.debug_frame => self.eh_frame_index.?,
.debug_info => self.debug_info_index.?,
.debug_line => self.debug_line_index.?,
.debug_line_str => self.debug_line_str_index.?,
@@ -547,7 +575,10 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
entry.external_relocs.items.len);
for (entry.cross_entry_relocs.items) |reloc| {
const r_offset = entry_off + reloc.source_off;
const r_addend: i64 = @intCast(unit.off + reloc.target_off + unit.header_len + unit.getEntry(reloc.target_entry).assertNonEmpty(unit, sect, dwarf).off);
const r_addend: i64 = @intCast(unit.off + reloc.target_off + (if (reloc.target_entry.unwrap()) |target_entry|
unit.header_len + unit.getEntry(target_entry).assertNonEmpty(unit, sect, dwarf).off
else
0));
const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{
self.symbol(sym_index).name(elf_file),
@@ -584,6 +615,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
for (entry.cross_section_relocs.items) |reloc| {
const target_sym_index = switch (reloc.target_sec) {
.debug_abbrev => self.debug_abbrev_index.?,
.debug_aranges => self.debug_aranges_index.?,
.debug_frame => self.eh_frame_index.?,
.debug_info => self.debug_info_index.?,
.debug_line => self.debug_line_index.?,
.debug_line_str => self.debug_line_str_index.?,
@@ -617,7 +650,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
const target_sym = self.symbol(reloc.target_sym);
const r_offset = entry_off + reloc.source_off;
const r_addend: i64 = @intCast(reloc.target_off);
const r_type = relocation.dwarf.externalRelocType(target_sym.*, dwarf.address_size, cpu_arch);
const r_type = relocation.dwarf.externalRelocType(target_sym.*, sect_index, dwarf.address_size, cpu_arch);
log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{
target_sym.name(elf_file),
r_offset,

View File

@@ -289,8 +289,6 @@ fn claimUnresolved(elf_file: *Elf) void {
}
fn initSections(elf_file: *Elf) !void {
const ptr_size = elf_file.ptrWidthBytes();
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
try object.initOutputSections(elf_file);
@@ -306,13 +304,18 @@ fn initSections(elf_file: *Elf) !void {
if (elf_file.file(index).?.object.cies.items.len > 0) break true;
} else false;
if (needs_eh_frame) {
elf_file.eh_frame_section_index = try elf_file.addSection(.{
.name = try elf_file.insertShString(".eh_frame"),
.type = elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC,
.addralign = ptr_size,
.offset = std.math.maxInt(u64),
});
if (elf_file.eh_frame_section_index == null) {
elf_file.eh_frame_section_index = try elf_file.addSection(.{
.name = try elf_file.insertShString(".eh_frame"),
.type = if (elf_file.getTarget().cpu.arch == .x86_64)
elf.SHT_X86_64_UNWIND
else
elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC,
.addralign = elf_file.ptrWidthBytes(),
.offset = std.math.maxInt(u64),
});
}
elf_file.eh_frame_rela_section_index = try elf_file.addRelaShdr(
try elf_file.insertShString(".rela.eh_frame"),
elf_file.eh_frame_section_index.?,
@@ -373,7 +376,11 @@ fn updateSectionSizes(elf_file: *Elf) !void {
}
if (elf_file.eh_frame_section_index) |index| {
slice.items(.shdr)[index].sh_size = try eh_frame.calcEhFrameSize(elf_file);
slice.items(.shdr)[index].sh_size = existing_size: {
const zo = elf_file.zigObjectPtr() orelse break :existing_size 0;
const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
break :existing_size sym.atom(elf_file).?.size;
} + try eh_frame.calcEhFrameSize(elf_file);
}
if (elf_file.eh_frame_rela_section_index) |index| {
const shdr = &slice.items(.shdr)[index];
@@ -526,17 +533,22 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
}
if (elf_file.eh_frame_section_index) |shndx| {
const existing_size = existing_size: {
const zo = elf_file.zigObjectPtr() orelse break :existing_size 0;
const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
break :existing_size sym.atom(elf_file).?.size;
};
const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
try eh_frame.writeEhFrameObject(elf_file, buffer.writer());
log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{
shdr.sh_offset,
shdr.sh_offset + shdr.sh_size,
shdr.sh_offset + existing_size,
shdr.sh_offset + sh_size,
});
assert(buffer.items.len == sh_size);
try elf_file.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
assert(buffer.items.len == sh_size - existing_size);
try elf_file.base.file.?.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
}
if (elf_file.eh_frame_rela_section_index) |shndx| {
const shdr = slice.items(.shdr)[shndx];

View File

@@ -108,20 +108,27 @@ pub const dwarf = struct {
pub fn externalRelocType(
target: Symbol,
source_section: Dwarf.Section.Index,
address_size: Dwarf.AddressSize,
cpu_arch: std.Target.Cpu.Arch,
) u32 {
return switch (cpu_arch) {
.x86_64 => @intFromEnum(switch (address_size) {
.@"32" => if (target.flags.is_tls) elf.R_X86_64.DTPOFF32 else .@"32",
.@"64" => if (target.flags.is_tls) elf.R_X86_64.DTPOFF64 else .@"64",
else => unreachable,
}),
.riscv64 => @intFromEnum(switch (address_size) {
.@"32" => elf.R_RISCV.@"32",
.@"64" => elf.R_RISCV.@"64",
else => unreachable,
}),
.x86_64 => @intFromEnum(@as(elf.R_X86_64, switch (source_section) {
else => switch (address_size) {
.@"32" => if (target.flags.is_tls) .DTPOFF32 else .@"32",
.@"64" => if (target.flags.is_tls) .DTPOFF64 else .@"64",
else => unreachable,
},
.debug_frame => .PC32,
})),
.riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (source_section) {
else => switch (address_size) {
.@"32" => .@"32",
.@"64" => .@"64",
else => unreachable,
},
.debug_frame => unreachable,
})),
else => @panic("TODO unhandled cpu arch"),
};
}

View File

@@ -3411,9 +3411,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(sect.offset + needed_size);
} else if (needed_size > allocated_size) {
if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0;
@@ -3431,6 +3429,8 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
try self.copyRangeAllZeroOut(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(sect.offset + needed_size);
}
seg.filesize = needed_size;
}
@@ -3456,9 +3456,7 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(sect.offset + needed_size);
} else if (needed_size > allocated_size) {
if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0;
@@ -3480,6 +3478,8 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(sect.offset + needed_size);
}
}
sect.size = needed_size;

View File

@@ -105,9 +105,7 @@ pub fn growSection(
const sect = self.getSectionPtr(sect_index);
const allocated_size = self.allocatedSize(sect.offset);
if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.file.setEndPos(sect.offset + needed_size);
} else if (needed_size > allocated_size) {
if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0; // free the space
const new_offset = try self.findFreeSpace(needed_size, 1);
@@ -130,6 +128,8 @@ pub fn growSection(
}
sect.offset = @intCast(new_offset);
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.file.setEndPos(sect.offset + needed_size);
}
sect.size = needed_size;