Merge pull request #7808 from joachimschmidt557/stage2-aarch64

Stage2 AArch64: Fix genSetStack
This commit is contained in:
Jakub Konka
2021-01-18 20:01:44 +01:00
committed by GitHub
2 changed files with 88 additions and 10 deletions

View File

@@ -641,20 +641,33 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
// TODO Finish function prologue and epilogue for aarch64.
// Reserve the stack for local variables, etc.
// stp fp, lr, [sp, #-16]!
// mov fp, sp
// sub sp, sp, #reloc
writeInt(u32, try self.code.addManyAsArray(4), Instruction.stp(
.x29,
.x30,
Register.sp,
Instruction.LoadStorePairOffset.pre_index(-16),
).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.x29, .xzr, 0, false).toU32());
const backpatch_reloc = self.code.items.len;
try self.code.resize(backpatch_reloc + 4);
try self.dbgSetPrologueEnd();
try self.genBody(self.mod_fn.body);
// Backpatch stack offset
const stack_end = self.max_end_stack;
const aligned_stack_end = mem.alignForward(stack_end, self.stack_align);
if (math.cast(u12, aligned_stack_end)) |size| {
writeInt(u32, self.code.items[backpatch_reloc..][0..4], Instruction.sub(.xzr, .xzr, size, false).toU32());
} else |_| {
return self.failSymbol("TODO AArch64: allow larger stacks", .{});
}
try self.dbgSetEpilogueBegin();
// exitlude jumps
@@ -690,6 +703,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
Register.sp,
Instruction.LoadStorePairOffset.post_index(16),
).toU32());
// add sp, sp, #stack_size
writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.xzr, .xzr, @intCast(u12, aligned_stack_end), false).toU32());
// ret lr
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32());
} else {
@@ -2685,9 +2700,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (abi_size) {
1, 4 => {
const offset = if (adj_off <= math.maxInt(u12)) blk: {
break :blk Instruction.Offset.imm(@intCast(u12, adj_off));
} else Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0);
const offset = if (math.cast(u12, adj_off)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
} else |_| Instruction.Offset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }), 0);
const str = switch (abi_size) {
1 => Instruction.strb,
4 => Instruction.str,
@@ -2848,12 +2863,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (abi_size) {
4, 8 => {
const offset = if (adj_off <= math.maxInt(u12)) blk: {
break :blk Instruction.LoadStoreOffset.imm(@intCast(u12, adj_off));
} else Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }));
const rn: Register = switch (abi_size) {
4 => .w29,
8 => .x29,
const offset = if (math.cast(i9, adj_off)) |imm|
Instruction.LoadStoreOffset.imm_post_index(-imm)
else |_|
Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, MCValue{ .immediate = adj_off }));
const rn: Register = switch (arch) {
.aarch64, .aarch64_be => .x29,
.aarch64_32 => .w29,
else => unreachable,
};

View File

@@ -274,6 +274,16 @@ pub const Instruction = union(enum) {
opc: u2,
sf: u1,
},
AddSubtractImmediate: packed struct {
rd: u5,
rn: u5,
imm12: u12,
sh: u1,
fixed: u6 = 0b100010,
s: u1,
op: u1,
sf: u1,
},
pub const Shift = struct {
shift: Type = .lsl,
@@ -304,6 +314,7 @@ pub const Instruction = union(enum) {
.UnconditionalBranchImmediate => |v| @bitCast(u32, v),
.NoOperation => |v| @bitCast(u32, v),
.LogicalShiftedRegister => |v| @bitCast(u32, v),
.AddSubtractImmediate => |v| @bitCast(u32, v),
};
}
@@ -671,6 +682,31 @@ pub const Instruction = union(enum) {
}
}
fn addSubtractImmediate(
op: u1,
s: u1,
rd: Register,
rn: Register,
imm12: u12,
shift: bool,
) Instruction {
return Instruction{
.AddSubtractImmediate = .{
.rd = rd.id(),
.rn = rn.id(),
.imm12 = imm12,
.sh = @boolToInt(shift),
.s = s,
.op = op,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
// Helper functions for assembly syntax functions
// Move wide (immediate)
@@ -850,6 +886,24 @@ pub const Instruction = union(enum) {
pub fn bics(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
return logicalShiftedRegister(0b11, 0b1, shift, rd, rn, rm);
}
// Add/subtract (immediate)
pub fn add(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
return addSubtractImmediate(0b0, 0b0, rd, rn, imm, shift);
}
pub fn adds(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
return addSubtractImmediate(0b0, 0b1, rd, rn, imm, shift);
}
pub fn sub(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
return addSubtractImmediate(0b1, 0b0, rd, rn, imm, shift);
}
pub fn subs(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
return addSubtractImmediate(0b1, 0b1, rd, rn, imm, shift);
}
};
test "" {
@@ -979,6 +1033,14 @@ test "serialize instructions" {
.inst = Instruction.@"and"(.x0, .x4, .x2, .{ .shift = .lsl, .amount = 0x8 }),
.expected = 0b1_00_01010_00_0_00010_001000_00100_00000,
},
.{ // add x0, x10, #10
.inst = Instruction.add(.x0, .x10, 10, false),
.expected = 0b1_0_0_100010_0_0000_0000_1010_01010_00000,
},
.{ // subs x0, x5, #11, lsl #12
.inst = Instruction.subs(.x0, .x5, 11, true),
.expected = 0b1_1_1_100010_1_0000_0000_1011_00101_00000,
},
};
for (testcases) |case| {