Merge pull request #17359 from jacobly0/x86_64
x86_64: improve C abi support
This commit is contained in:
@@ -156,7 +156,7 @@ pub var elf_aux_maybe: ?[*]std.elf.Auxv = null;
|
||||
|
||||
pub usingnamespace if (switch (builtin.zig_backend) {
|
||||
// Calling extern functions is not yet supported with these backends
|
||||
.stage2_x86_64, .stage2_aarch64, .stage2_arm, .stage2_riscv64, .stage2_sparc64 => false,
|
||||
.stage2_aarch64, .stage2_arm, .stage2_riscv64, .stage2_sparc64 => false,
|
||||
else => !builtin.link_libc,
|
||||
}) struct {
|
||||
/// See `std.elf` for the constants.
|
||||
|
||||
@@ -445,25 +445,23 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
|
||||
},
|
||||
.plan9 => |dbg_out| {
|
||||
if (delta_pc <= 0) return; // only do this when the pc changes
|
||||
// we have already checked the target in the linker to make sure it is compatable
|
||||
const quant = @import("../../link/Plan9/aout.zig").getPCQuant(self.target.cpu.arch) catch unreachable;
|
||||
|
||||
// increasing the line number
|
||||
try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
|
||||
try link.File.Plan9.changeLine(&dbg_out.dbg_line, delta_line);
|
||||
// increasing the pc
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
|
||||
if (d_pc_p9 > 0) {
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one quanta
|
||||
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant);
|
||||
if (dbg_out.pcop_change_index.*) |pci|
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
|
||||
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
|
||||
if (dbg_out.pcop_change_index) |pci|
|
||||
dbg_out.dbg_line.items[pci] += 1;
|
||||
dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
|
||||
dbg_out.pcop_change_index = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
|
||||
} else if (d_pc_p9 == 0) {
|
||||
// we don't need to do anything, because adding the quant does it for us
|
||||
// we don't need to do anything, because adding the pc quanta does it for us
|
||||
} else unreachable;
|
||||
if (dbg_out.start_line.* == null)
|
||||
dbg_out.start_line.* = self.prev_di_line;
|
||||
dbg_out.end_line.* = line;
|
||||
if (dbg_out.start_line == null)
|
||||
dbg_out.start_line = self.prev_di_line;
|
||||
dbg_out.end_line = line;
|
||||
// only do this if the pc changed
|
||||
self.prev_di_line = line;
|
||||
self.prev_di_column = column;
|
||||
|
||||
@@ -362,25 +362,23 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
|
||||
},
|
||||
.plan9 => |dbg_out| {
|
||||
if (delta_pc <= 0) return; // only do this when the pc changes
|
||||
// we have already checked the target in the linker to make sure it is compatable
|
||||
const quant = @import("../../link/Plan9/aout.zig").getPCQuant(self.target.cpu.arch) catch unreachable;
|
||||
|
||||
// increasing the line number
|
||||
try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
|
||||
try link.File.Plan9.changeLine(&dbg_out.dbg_line, delta_line);
|
||||
// increasing the pc
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
|
||||
if (d_pc_p9 > 0) {
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one quanta
|
||||
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant);
|
||||
if (dbg_out.pcop_change_index.*) |pci|
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
|
||||
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
|
||||
if (dbg_out.pcop_change_index) |pci|
|
||||
dbg_out.dbg_line.items[pci] += 1;
|
||||
dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
|
||||
dbg_out.pcop_change_index = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
|
||||
} else if (d_pc_p9 == 0) {
|
||||
// we don't need to do anything, because adding the quant does it for us
|
||||
// we don't need to do anything, because adding the pc quanta does it for us
|
||||
} else unreachable;
|
||||
if (dbg_out.start_line.* == null)
|
||||
dbg_out.start_line.* = self.prev_di_line;
|
||||
dbg_out.end_line.* = line;
|
||||
if (dbg_out.start_line == null)
|
||||
dbg_out.start_line = self.prev_di_line;
|
||||
dbg_out.end_line = line;
|
||||
// only do this if the pc changed
|
||||
self.prev_di_line = line;
|
||||
self.prev_di_column = column;
|
||||
|
||||
@@ -96,25 +96,23 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
|
||||
},
|
||||
.plan9 => |dbg_out| {
|
||||
if (delta_pc <= 0) return; // only do this when the pc changes
|
||||
// we have already checked the target in the linker to make sure it is compatable
|
||||
const quant = @import("../../link/Plan9/aout.zig").getPCQuant(self.target.cpu.arch) catch unreachable;
|
||||
|
||||
// increasing the line number
|
||||
try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
|
||||
try link.File.Plan9.changeLine(&dbg_out.dbg_line, delta_line);
|
||||
// increasing the pc
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
|
||||
if (d_pc_p9 > 0) {
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one quanta
|
||||
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant);
|
||||
if (dbg_out.pcop_change_index.*) |pci|
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
|
||||
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
|
||||
if (dbg_out.pcop_change_index) |pci|
|
||||
dbg_out.dbg_line.items[pci] += 1;
|
||||
dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
|
||||
dbg_out.pcop_change_index = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
|
||||
} else if (d_pc_p9 == 0) {
|
||||
// we don't need to do anything, because adding the quant does it for us
|
||||
// we don't need to do anything, because adding the pc quanta does it for us
|
||||
} else unreachable;
|
||||
if (dbg_out.start_line.* == null)
|
||||
dbg_out.start_line.* = self.prev_di_line;
|
||||
dbg_out.end_line.* = line;
|
||||
if (dbg_out.start_line == null)
|
||||
dbg_out.start_line = self.prev_di_line;
|
||||
dbg_out.end_line = line;
|
||||
// only do this if the pc changed
|
||||
self.prev_di_line = line;
|
||||
self.prev_di_column = column;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -242,16 +242,14 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
|
||||
},
|
||||
.plan9 => |dbg_out| {
|
||||
if (delta_pc <= 0) return; // only do this when the pc changes
|
||||
// we have already checked the target in the linker to make sure it is compatable
|
||||
const quant = @import("../../link/Plan9/aout.zig").getPCQuant(emit.lower.target.cpu.arch) catch unreachable;
|
||||
|
||||
// increasing the line number
|
||||
try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
|
||||
try link.File.Plan9.changeLine(&dbg_out.dbg_line, delta_line);
|
||||
// increasing the pc
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
|
||||
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
|
||||
if (d_pc_p9 > 0) {
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one quanta
|
||||
var diff = @divExact(d_pc_p9, quant) - quant;
|
||||
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
|
||||
var diff = @divExact(d_pc_p9, dbg_out.pc_quanta) - dbg_out.pc_quanta;
|
||||
while (diff > 0) {
|
||||
if (diff < 64) {
|
||||
try dbg_out.dbg_line.append(@as(u8, @intCast(diff + 128)));
|
||||
@@ -261,15 +259,15 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
|
||||
diff -= 64;
|
||||
}
|
||||
}
|
||||
if (dbg_out.pcop_change_index.*) |pci|
|
||||
if (dbg_out.pcop_change_index) |pci|
|
||||
dbg_out.dbg_line.items[pci] += 1;
|
||||
dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
|
||||
dbg_out.pcop_change_index = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
|
||||
} else if (d_pc_p9 == 0) {
|
||||
// we don't need to do anything, because adding the quant does it for us
|
||||
// we don't need to do anything, because adding the pc quanta does it for us
|
||||
} else unreachable;
|
||||
if (dbg_out.start_line.* == null)
|
||||
dbg_out.start_line.* = emit.prev_di_line;
|
||||
dbg_out.end_line.* = line;
|
||||
if (dbg_out.start_line == null)
|
||||
dbg_out.start_line = emit.prev_di_line;
|
||||
dbg_out.end_line = line;
|
||||
// only do this if the pc changed
|
||||
emit.prev_di_line = line;
|
||||
emit.prev_di_column = column;
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
allocator: Allocator,
|
||||
mir: Mir,
|
||||
target: *const std.Target,
|
||||
cc: std.builtin.CallingConvention,
|
||||
err_msg: ?*ErrorMsg = null,
|
||||
src_loc: Module.SrcLoc,
|
||||
result_insts_len: u8 = undefined,
|
||||
@@ -552,15 +552,13 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
|
||||
}
|
||||
|
||||
fn pushPopRegList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void {
|
||||
const callee_preserved_regs = abi.getCalleePreservedRegs(lower.target.*);
|
||||
const callee_preserved_regs = abi.getCalleePreservedRegs(lower.cc);
|
||||
var it = inst.data.reg_list.iterator(.{ .direction = switch (mnemonic) {
|
||||
.push => .reverse,
|
||||
.pop => .forward,
|
||||
else => unreachable,
|
||||
} });
|
||||
while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{
|
||||
.reg = callee_preserved_regs[i],
|
||||
}});
|
||||
while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }});
|
||||
}
|
||||
|
||||
const page_size: i32 = 1 << 12;
|
||||
|
||||
@@ -137,7 +137,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
||||
return result;
|
||||
},
|
||||
128 => {
|
||||
// "Arguments of types__float128, _Decimal128 and__m128 are
|
||||
// "Arguments of types __float128, _Decimal128 and __m128 are
|
||||
// split into two halves. The least significant ones belong
|
||||
// to class SSE, the most significant one to class SSEUP."
|
||||
if (ctx == .other) {
|
||||
@@ -213,9 +213,9 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
const ty_size = ty.abiSize(mod);
|
||||
if (struct_type.layout == .Packed) {
|
||||
assert(ty_size <= 128);
|
||||
assert(ty_size <= 16);
|
||||
result[0] = .integer;
|
||||
if (ty_size > 64) result[1] = .integer;
|
||||
if (ty_size > 8) result[1] = .integer;
|
||||
return result;
|
||||
}
|
||||
if (ty_size > 64)
|
||||
@@ -331,9 +331,9 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const ty_size = mod.unionAbiSize(union_obj);
|
||||
if (union_obj.getLayout(ip) == .Packed) {
|
||||
assert(ty_size <= 128);
|
||||
assert(ty_size <= 16);
|
||||
result[0] = .integer;
|
||||
if (ty_size > 64) result[1] = .integer;
|
||||
if (ty_size > 8) result[1] = .integer;
|
||||
return result;
|
||||
}
|
||||
if (ty_size > 64)
|
||||
@@ -422,11 +422,11 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
||||
},
|
||||
.Array => {
|
||||
const ty_size = ty.abiSize(mod);
|
||||
if (ty_size <= 64) {
|
||||
if (ty_size <= 8) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
}
|
||||
if (ty_size <= 128) {
|
||||
if (ty_size <= 16) {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
return result;
|
||||
@@ -447,7 +447,9 @@ pub const SysV = struct {
|
||||
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 } ++ sse_avx_regs;
|
||||
|
||||
pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
|
||||
pub const c_abi_sse_param_regs = sse_avx_regs[0..8].*;
|
||||
pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
|
||||
pub const c_abi_sse_return_regs = sse_avx_regs[0..2].*;
|
||||
};
|
||||
|
||||
pub const Win64 = struct {
|
||||
@@ -460,34 +462,69 @@ pub const Win64 = struct {
|
||||
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .r8, .r9, .r10, .r11 } ++ sse_avx_regs;
|
||||
|
||||
pub const c_abi_int_param_regs = [_]Register{ .rcx, .rdx, .r8, .r9 };
|
||||
pub const c_abi_sse_param_regs = sse_avx_regs[0..4].*;
|
||||
pub const c_abi_int_return_regs = [_]Register{.rax};
|
||||
pub const c_abi_sse_return_regs = sse_avx_regs[0..1].*;
|
||||
};
|
||||
|
||||
pub fn getCalleePreservedRegs(target: Target) []const Register {
|
||||
return switch (target.os.tag) {
|
||||
.windows => &Win64.callee_preserved_regs,
|
||||
else => &SysV.callee_preserved_regs,
|
||||
pub fn resolveCallingConvention(
|
||||
cc: std.builtin.CallingConvention,
|
||||
target: std.Target,
|
||||
) std.builtin.CallingConvention {
|
||||
return switch (cc) {
|
||||
.Unspecified, .C => switch (target.os.tag) {
|
||||
else => .SysV,
|
||||
.windows => .Win64,
|
||||
},
|
||||
else => cc,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCallerPreservedRegs(target: Target) []const Register {
|
||||
return switch (target.os.tag) {
|
||||
.windows => &Win64.caller_preserved_regs,
|
||||
else => &SysV.caller_preserved_regs,
|
||||
pub fn getCalleePreservedRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.callee_preserved_regs,
|
||||
.Win64 => &Win64.callee_preserved_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiIntParamRegs(target: Target) []const Register {
|
||||
return switch (target.os.tag) {
|
||||
.windows => &Win64.c_abi_int_param_regs,
|
||||
else => &SysV.c_abi_int_param_regs,
|
||||
pub fn getCallerPreservedRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.caller_preserved_regs,
|
||||
.Win64 => &Win64.caller_preserved_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiIntReturnRegs(target: Target) []const Register {
|
||||
return switch (target.os.tag) {
|
||||
.windows => &Win64.c_abi_int_return_regs,
|
||||
else => &SysV.c_abi_int_return_regs,
|
||||
pub fn getCAbiIntParamRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_int_param_regs,
|
||||
.Win64 => &Win64.c_abi_int_param_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiSseParamRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_sse_param_regs,
|
||||
.Win64 => &Win64.c_abi_sse_param_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiIntReturnRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_int_return_regs,
|
||||
.Win64 => &Win64.c_abi_int_return_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiSseReturnRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_sse_return_regs,
|
||||
.Win64 => &Win64.c_abi_sse_return_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -524,7 +561,6 @@ pub const RegisterClass = struct {
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const Target = std.Target;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
|
||||
@@ -40,28 +40,7 @@ pub const CodeGenError = error{
|
||||
|
||||
pub const DebugInfoOutput = union(enum) {
|
||||
dwarf: *link.File.Dwarf.DeclState,
|
||||
/// the plan9 debuginfo output is a bytecode with 4 opcodes
|
||||
/// assume all numbers/variables are bytes
|
||||
/// 0 w x y z -> interpret w x y z as a big-endian i32, and add it to the line offset
|
||||
/// x when x < 65 -> add x to line offset
|
||||
/// x when x < 129 -> subtract 64 from x and subtract it from the line offset
|
||||
/// x -> subtract 129 from x, multiply it by the quanta of the instruction size
|
||||
/// (1 on x86_64), and add it to the pc
|
||||
/// after every opcode, add the quanta of the instruction size to the pc
|
||||
plan9: struct {
|
||||
/// the actual opcodes
|
||||
dbg_line: *std.ArrayList(u8),
|
||||
/// what line the debuginfo starts on
|
||||
/// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl
|
||||
start_line: *?u32,
|
||||
/// what the line count ends on after codegen
|
||||
/// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl
|
||||
end_line: *u32,
|
||||
/// the last pc change op
|
||||
/// This is very useful for adding quanta
|
||||
/// to it if its not actually the last one.
|
||||
pcop_change_index: *?u32,
|
||||
},
|
||||
plan9: *link.File.Plan9.DebugInfoOutput,
|
||||
none,
|
||||
};
|
||||
|
||||
|
||||
@@ -566,6 +566,7 @@ pub const DeclState = struct {
|
||||
|
||||
pub const DbgInfoLoc = union(enum) {
|
||||
register: u8,
|
||||
register_pair: [2]u8,
|
||||
stack: struct {
|
||||
fp_register: u8,
|
||||
offset: i32,
|
||||
@@ -610,6 +611,42 @@ pub const DeclState = struct {
|
||||
leb128.writeULEB128(dbg_info.writer(), reg) catch unreachable;
|
||||
}
|
||||
},
|
||||
.register_pair => |regs| {
|
||||
const reg_bits = self.mod.getTarget().ptrBitWidth();
|
||||
const reg_bytes = @as(u8, @intCast(@divExact(reg_bits, 8)));
|
||||
const abi_size = ty.abiSize(self.mod);
|
||||
try dbg_info.ensureUnusedCapacity(10);
|
||||
dbg_info.appendAssumeCapacity(@intFromEnum(AbbrevKind.parameter));
|
||||
// DW.AT.location, DW.FORM.exprloc
|
||||
var expr_len = std.io.countingWriter(std.io.null_writer);
|
||||
for (regs, 0..) |reg, reg_i| {
|
||||
if (reg < 32) {
|
||||
expr_len.writer().writeByte(DW.OP.reg0 + reg) catch unreachable;
|
||||
} else {
|
||||
expr_len.writer().writeByte(DW.OP.regx) catch unreachable;
|
||||
leb128.writeULEB128(expr_len.writer(), reg) catch unreachable;
|
||||
}
|
||||
expr_len.writer().writeByte(DW.OP.piece) catch unreachable;
|
||||
leb128.writeULEB128(
|
||||
expr_len.writer(),
|
||||
@min(abi_size - reg_i * reg_bytes, reg_bytes),
|
||||
) catch unreachable;
|
||||
}
|
||||
leb128.writeULEB128(dbg_info.writer(), expr_len.bytes_written) catch unreachable;
|
||||
for (regs, 0..) |reg, reg_i| {
|
||||
if (reg < 32) {
|
||||
dbg_info.appendAssumeCapacity(DW.OP.reg0 + reg);
|
||||
} else {
|
||||
dbg_info.appendAssumeCapacity(DW.OP.regx);
|
||||
leb128.writeULEB128(dbg_info.writer(), reg) catch unreachable;
|
||||
}
|
||||
dbg_info.appendAssumeCapacity(DW.OP.piece);
|
||||
leb128.writeULEB128(
|
||||
dbg_info.writer(),
|
||||
@min(abi_size - reg_i * reg_bytes, reg_bytes),
|
||||
) catch unreachable;
|
||||
}
|
||||
},
|
||||
.stack => |info| {
|
||||
try dbg_info.ensureUnusedCapacity(9);
|
||||
dbg_info.appendAssumeCapacity(@intFromEnum(AbbrevKind.parameter));
|
||||
@@ -676,7 +713,7 @@ pub const DeclState = struct {
|
||||
|
||||
switch (loc) {
|
||||
.register => |reg| {
|
||||
try dbg_info.ensureUnusedCapacity(4);
|
||||
try dbg_info.ensureUnusedCapacity(3);
|
||||
// DW.AT.location, DW.FORM.exprloc
|
||||
var expr_len = std.io.countingWriter(std.io.null_writer);
|
||||
if (reg < 32) {
|
||||
@@ -694,6 +731,42 @@ pub const DeclState = struct {
|
||||
}
|
||||
},
|
||||
|
||||
.register_pair => |regs| {
|
||||
const reg_bits = self.mod.getTarget().ptrBitWidth();
|
||||
const reg_bytes = @as(u8, @intCast(@divExact(reg_bits, 8)));
|
||||
const abi_size = child_ty.abiSize(self.mod);
|
||||
try dbg_info.ensureUnusedCapacity(9);
|
||||
// DW.AT.location, DW.FORM.exprloc
|
||||
var expr_len = std.io.countingWriter(std.io.null_writer);
|
||||
for (regs, 0..) |reg, reg_i| {
|
||||
if (reg < 32) {
|
||||
expr_len.writer().writeByte(DW.OP.reg0 + reg) catch unreachable;
|
||||
} else {
|
||||
expr_len.writer().writeByte(DW.OP.regx) catch unreachable;
|
||||
leb128.writeULEB128(expr_len.writer(), reg) catch unreachable;
|
||||
}
|
||||
expr_len.writer().writeByte(DW.OP.piece) catch unreachable;
|
||||
leb128.writeULEB128(
|
||||
expr_len.writer(),
|
||||
@min(abi_size - reg_i * reg_bytes, reg_bytes),
|
||||
) catch unreachable;
|
||||
}
|
||||
leb128.writeULEB128(dbg_info.writer(), expr_len.bytes_written) catch unreachable;
|
||||
for (regs, 0..) |reg, reg_i| {
|
||||
if (reg < 32) {
|
||||
dbg_info.appendAssumeCapacity(DW.OP.reg0 + reg);
|
||||
} else {
|
||||
dbg_info.appendAssumeCapacity(DW.OP.regx);
|
||||
leb128.writeULEB128(dbg_info.writer(), reg) catch unreachable;
|
||||
}
|
||||
dbg_info.appendAssumeCapacity(DW.OP.piece);
|
||||
leb128.writeULEB128(
|
||||
dbg_info.writer(),
|
||||
@min(abi_size - reg_i * reg_bytes, reg_bytes),
|
||||
) catch unreachable;
|
||||
}
|
||||
},
|
||||
|
||||
.stack => |info| {
|
||||
try dbg_info.ensureUnusedCapacity(9);
|
||||
// DW.AT.location, DW.FORM.exprloc
|
||||
|
||||
@@ -211,6 +211,31 @@ pub const Atom = struct {
|
||||
}
|
||||
};
|
||||
|
||||
/// the plan9 debuginfo output is a bytecode with 4 opcodes
|
||||
/// assume all numbers/variables are bytes
|
||||
/// 0 w x y z -> interpret w x y z as a big-endian i32, and add it to the line offset
|
||||
/// x when x < 65 -> add x to line offset
|
||||
/// x when x < 129 -> subtract 64 from x and subtract it from the line offset
|
||||
/// x -> subtract 129 from x, multiply it by the quanta of the instruction size
|
||||
/// (1 on x86_64), and add it to the pc
|
||||
/// after every opcode, add the quanta of the instruction size to the pc
|
||||
pub const DebugInfoOutput = struct {
|
||||
/// the actual opcodes
|
||||
dbg_line: std.ArrayList(u8),
|
||||
/// what line the debuginfo starts on
|
||||
/// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl
|
||||
start_line: ?u32,
|
||||
/// what the line count ends on after codegen
|
||||
/// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl
|
||||
end_line: u32,
|
||||
/// the last pc change op
|
||||
/// This is very useful for adding quanta
|
||||
/// to it if its not actually the last one.
|
||||
pcop_change_index: ?u32,
|
||||
/// cached pc quanta
|
||||
pc_quanta: u8,
|
||||
};
|
||||
|
||||
const DeclMetadata = struct {
|
||||
index: Atom.Index,
|
||||
exports: std.ArrayListUnmanaged(usize) = .{},
|
||||
@@ -376,11 +401,15 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer dbg_line_buffer.deinit();
|
||||
var start_line: ?u32 = null;
|
||||
var end_line: u32 = undefined;
|
||||
var pcop_change_index: ?u32 = null;
|
||||
var dbg_info_output: DebugInfoOutput = .{
|
||||
.dbg_line = std.ArrayList(u8).init(self.base.allocator),
|
||||
.start_line = null,
|
||||
.end_line = undefined,
|
||||
.pcop_change_index = null,
|
||||
// we have already checked the target in the linker to make sure it is compatable
|
||||
.pc_quanta = aout.getPCQuant(self.base.options.target.cpu.arch) catch unreachable,
|
||||
};
|
||||
defer dbg_info_output.dbg_line.deinit();
|
||||
|
||||
const res = try codegen.generateFunction(
|
||||
&self.base,
|
||||
@@ -389,14 +418,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
|
||||
air,
|
||||
liveness,
|
||||
&code_buffer,
|
||||
.{
|
||||
.plan9 = .{
|
||||
.dbg_line = &dbg_line_buffer,
|
||||
.end_line = &end_line,
|
||||
.start_line = &start_line,
|
||||
.pcop_change_index = &pcop_change_index,
|
||||
},
|
||||
},
|
||||
.{ .plan9 = &dbg_info_output },
|
||||
);
|
||||
const code = switch (res) {
|
||||
.ok => try code_buffer.toOwnedSlice(),
|
||||
@@ -412,9 +434,9 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
|
||||
};
|
||||
const out: FnDeclOutput = .{
|
||||
.code = code,
|
||||
.lineinfo = try dbg_line_buffer.toOwnedSlice(),
|
||||
.start_line = start_line.?,
|
||||
.end_line = end_line,
|
||||
.lineinfo = try dbg_info_output.dbg_line.toOwnedSlice(),
|
||||
.start_line = dbg_info_output.start_line.?,
|
||||
.end_line = dbg_info_output.end_line,
|
||||
};
|
||||
try self.putFn(decl_index, out);
|
||||
return self.updateFinish(decl_index);
|
||||
|
||||
@@ -97,7 +97,7 @@ test "@abs floats" {
|
||||
try comptime testAbsFloats(f80);
|
||||
if (builtin.zig_backend != .stage2_x86_64 and builtin.zig_backend != .stage2_wasm) try testAbsFloats(f80);
|
||||
try comptime testAbsFloats(f128);
|
||||
if (builtin.zig_backend != .stage2_x86_64 and builtin.zig_backend != .stage2_wasm) try testAbsFloats(f128);
|
||||
if (builtin.zig_backend != .stage2_wasm) try testAbsFloats(f128);
|
||||
}
|
||||
|
||||
fn testAbsFloats(comptime T: type) !void {
|
||||
|
||||
@@ -297,11 +297,11 @@ test "triple level result location with bitcast sandwich passed as tuple element
|
||||
test "@bitCast packed struct of floats" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
const Foo = packed struct {
|
||||
a: f16 = 0,
|
||||
@@ -375,11 +375,11 @@ test "comptime @bitCast packed struct to int and back" {
|
||||
|
||||
test "comptime bitcast with fields following f80" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
const FloatT = extern struct { f: f80, x: u128 align(16) };
|
||||
const x: FloatT = .{ .f = 0.5, .x = 123 };
|
||||
|
||||
@@ -18,7 +18,6 @@ const wuffs_base__slice_u8 = extern struct {
|
||||
};
|
||||
test {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
||||
@@ -1369,10 +1369,10 @@ fn boolToStr(b: bool) []const u8 {
|
||||
test "cast f16 to wider types" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@@ -1389,9 +1389,9 @@ test "cast f16 to wider types" {
|
||||
test "cast f128 to narrower types" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
|
||||
@@ -130,7 +130,7 @@ test "cmp f128" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
try testCmp(f128);
|
||||
try comptime testCmp(f128);
|
||||
@@ -1379,7 +1379,7 @@ test "comptime fixed-width float zero divided by zero produces NaN" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
inline for (.{ f16, f32, f64, f80, f128 }) |F| {
|
||||
try expect(math.isNan(@as(F, 0) / @as(F, 0)));
|
||||
|
||||
@@ -637,11 +637,11 @@ fn testShrTrunc(x: u16) !void {
|
||||
}
|
||||
|
||||
test "f128" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
try test_f128();
|
||||
try comptime test_f128();
|
||||
@@ -1481,7 +1481,6 @@ test "@round f80" {
|
||||
test "@round f128" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
|
||||
@@ -1522,9 +1521,9 @@ test "vector integer addition" {
|
||||
test "NaN comparison" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
try testNanEqNan(f16);
|
||||
try testNanEqNan(f32);
|
||||
@@ -1539,9 +1538,9 @@ test "NaN comparison" {
|
||||
test "NaN comparison f80" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
try testNanEqNan(f80);
|
||||
try comptime testNanEqNan(f80);
|
||||
|
||||
@@ -10,7 +10,6 @@ test "@max" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@@ -30,7 +29,8 @@ test "@max on vectors" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and
|
||||
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@@ -78,7 +78,8 @@ test "@min for vectors" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and
|
||||
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
|
||||
@@ -258,7 +258,6 @@ test "nested packed struct unaligned" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (native_endian != .Little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
|
||||
|
||||
const S1 = packed struct {
|
||||
|
||||
@@ -60,11 +60,11 @@ test "float widening" {
|
||||
}
|
||||
|
||||
test "float widening f16 to f128" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
|
||||
|
||||
var x: f16 = 12.34;
|
||||
var y: f128 = x;
|
||||
|
||||
Reference in New Issue
Block a user