Merge pull request #11608 from ziglang/stage2-regalloc
This commit is contained in:
@@ -23,6 +23,7 @@ const log = std.log.scoped(.codegen);
|
||||
const build_options = @import("build_options");
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &callee_preserved_regs);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
@@ -727,7 +728,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
// zig fmt: on
|
||||
}
|
||||
|
||||
assert(!self.register_manager.frozenRegsExist());
|
||||
assert(!self.register_manager.lockedRegsExist());
|
||||
|
||||
if (std.debug.runtime_safety) {
|
||||
if (self.air_bookkeeping < old_air_bookkeeping + 1) {
|
||||
@@ -910,16 +911,16 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
|
||||
fn spillCompareFlagsIfOccupied(self: *Self) !void {
|
||||
if (self.compare_flags_inst) |inst_to_save| {
|
||||
const mcv = self.getResolvedInstValue(inst_to_save);
|
||||
switch (mcv) {
|
||||
const new_mcv = switch (mcv) {
|
||||
.compare_flags_signed,
|
||||
.compare_flags_unsigned,
|
||||
=> try self.allocRegOrMem(inst_to_save, true),
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> {},
|
||||
=> try self.allocRegOrMem(inst_to_save, false),
|
||||
else => unreachable, // mcv doesn't occupy the compare flags
|
||||
}
|
||||
};
|
||||
|
||||
const new_mcv = try self.allocRegOrMem(inst_to_save, true);
|
||||
try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
|
||||
log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv });
|
||||
|
||||
@@ -927,6 +928,15 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void {
|
||||
try branch.inst_table.put(self.gpa, inst_to_save, new_mcv);
|
||||
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// TODO consolidate with register manager and spillInstruction
|
||||
// this call should really belong in the register manager!
|
||||
switch (mcv) {
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> |reg| self.register_manager.freeReg(reg),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1048,8 +1058,8 @@ fn trunc(
|
||||
}
|
||||
},
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{operand_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{operand_reg});
|
||||
const lock = self.register_manager.lockReg(operand_reg);
|
||||
defer if (lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
@@ -1135,8 +1145,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(operand_ty, operand),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{op_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{op_reg});
|
||||
const reg_lock = self.register_manager.lockRegAssumeUnused(op_reg);
|
||||
defer self.register_manager.unlockReg(reg_lock);
|
||||
|
||||
const dest_reg = blk: {
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
@@ -1168,8 +1178,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(operand_ty, operand),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{op_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{op_reg});
|
||||
const reg_lock = self.register_manager.lockRegAssumeUnused(op_reg);
|
||||
defer self.register_manager.unlockReg(reg_lock);
|
||||
|
||||
const dest_reg = blk: {
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
@@ -1257,8 +1267,17 @@ fn binOpRegister(
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.lockReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_lock: ?RegisterLock = if (rhs_is_register)
|
||||
self.register_manager.lockReg(rhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
@@ -1270,13 +1289,13 @@ fn binOpRegister(
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
@@ -1286,13 +1305,13 @@ fn binOpRegister(
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst);
|
||||
const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp_shifted_register => undefined, // cmp has no destination register
|
||||
@@ -1394,7 +1413,11 @@ fn binOpImmediate(
|
||||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.lockReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
@@ -1408,13 +1431,13 @@ fn binOpImmediate(
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp_immediate => undefined, // cmp has no destination register
|
||||
@@ -1758,7 +1781,10 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.air.typeOf(bin_op.rhs);
|
||||
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const result: MCValue = if (self.liveness.isUnused(inst))
|
||||
.dead
|
||||
else
|
||||
try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
@@ -1815,13 +1841,13 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
};
|
||||
const dest = try self.binOp(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
const raw_truncated_reg = try self.register_manager.allocReg(null);
|
||||
const truncated_reg = registerAlias(raw_truncated_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
// sbfx/ubfx truncated, dest, #0, #bits
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
@@ -1922,12 +1948,12 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const dest = try self.binOpRegister(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
try self.truncRegister(
|
||||
dest_reg.to32(),
|
||||
@@ -1977,36 +2003,44 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.lockRegAssumeUnused(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_lock: ?RegisterLock = if (rhs_is_register)
|
||||
self.register_manager.lockRegAssumeUnused(rhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
|
||||
|
||||
// TODO reuse operands
|
||||
const dest_reg = blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
switch (int_info.signedness) {
|
||||
.signed => {
|
||||
@@ -2021,8 +2055,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
});
|
||||
|
||||
const dest_high_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{dest_high_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_high_reg});
|
||||
const dest_high_reg_lock = self.register_manager.lockRegAssumeUnused(dest_high_reg);
|
||||
defer self.register_manager.unlockReg(dest_high_reg_lock);
|
||||
|
||||
// smulh dest_high, lhs, rhs
|
||||
_ = try self.addInst(.{
|
||||
@@ -2071,8 +2105,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
},
|
||||
.unsigned => {
|
||||
const dest_high_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{dest_high_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_high_reg});
|
||||
const dest_high_reg_lock = self.register_manager.lockRegAssumeUnused(dest_high_reg);
|
||||
defer self.register_manager.unlockReg(dest_high_reg_lock);
|
||||
|
||||
// umulh dest_high, lhs, rhs
|
||||
_ = try self.addInst(.{
|
||||
@@ -2127,8 +2161,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
||||
@@ -2168,14 +2202,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
if (int_info.bits <= 64) {
|
||||
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
|
||||
|
||||
if (lhs == .register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
defer if (lhs == .register) self.register_manager.unfreezeRegs(&.{lhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs == .register)
|
||||
self.register_manager.lockRegAssumeUnused(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// lsl dest, lhs, rhs
|
||||
const dest = try self.binOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
// asr/lsr reconstructed, dest, rhs
|
||||
const reconstructed = try self.binOp(.shr, null, dest, rhs, lhs_ty, rhs_ty);
|
||||
@@ -2184,7 +2224,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
_ = try self.binOp(.cmp_eq, null, lhs, reconstructed, lhs_ty, lhs_ty);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, dest);
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{
|
||||
.compare_flags_unsigned = .neq,
|
||||
});
|
||||
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
} else {
|
||||
@@ -2411,14 +2453,18 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
|
||||
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
|
||||
|
||||
if (index_is_register) self.register_manager.freezeRegs(&.{index_mcv.register});
|
||||
defer if (index_is_register) self.register_manager.unfreezeRegs(&.{index_mcv.register});
|
||||
const index_lock: ?RegisterLock = if (index_is_register)
|
||||
self.register_manager.lockRegAssumeUnused(index_mcv.register)
|
||||
else
|
||||
null;
|
||||
defer if (index_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const base_mcv: MCValue = switch (slice_mcv) {
|
||||
.stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off }) },
|
||||
else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{base_mcv.register});
|
||||
const base_lock = self.register_manager.lockRegAssumeUnused(base_mcv.register);
|
||||
defer self.register_manager.unlockReg(base_lock);
|
||||
|
||||
switch (elem_size) {
|
||||
else => {
|
||||
@@ -2559,8 +2605,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
|
||||
.ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
|
||||
.register => |addr_reg| {
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.lockReg(addr_reg);
|
||||
defer if (addr_reg_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
switch (dst_mcv) {
|
||||
.dead => unreachable,
|
||||
@@ -2573,16 +2619,18 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
if (elem_size <= 8) {
|
||||
const raw_tmp_reg = try self.register_manager.allocReg(null);
|
||||
const tmp_reg = registerAlias(raw_tmp_reg, elem_size);
|
||||
self.register_manager.freezeRegs(&.{tmp_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
|
||||
try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
|
||||
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
|
||||
} else {
|
||||
// TODO optimize the register allocation
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null });
|
||||
self.register_manager.freezeRegs(®s);
|
||||
defer self.register_manager.unfreezeRegs(®s);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
};
|
||||
|
||||
const src_reg = addr_reg;
|
||||
const dst_reg = regs[0];
|
||||
@@ -2784,8 +2832,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
try self.genSetStack(value_ty, off, value);
|
||||
},
|
||||
.register => |addr_reg| {
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.lockReg(addr_reg);
|
||||
defer if (addr_reg_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
switch (value) {
|
||||
.register => |value_reg| {
|
||||
@@ -2795,8 +2843,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
if (abi_size <= 8) {
|
||||
const raw_tmp_reg = try self.register_manager.allocReg(null);
|
||||
const tmp_reg = registerAlias(raw_tmp_reg, abi_size);
|
||||
self.register_manager.freezeRegs(&.{tmp_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
|
||||
try self.genSetReg(value_ty, tmp_reg, value);
|
||||
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
@@ -2856,12 +2904,12 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
||||
const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
|
||||
.immediate = struct_field_offset,
|
||||
});
|
||||
self.register_manager.freezeRegs(&.{offset_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{offset_reg});
|
||||
const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
|
||||
defer self.register_manager.unlockReg(offset_reg_lock);
|
||||
|
||||
const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv);
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
||||
defer self.register_manager.unlockReg(addr_reg_lock);
|
||||
|
||||
const dest = try self.binOp(
|
||||
.add,
|
||||
@@ -3369,6 +3417,9 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const parent_compare_flags_inst = self.compare_flags_inst;
|
||||
|
||||
try self.branch_stack.append(.{});
|
||||
errdefer {
|
||||
_ = self.branch_stack.pop();
|
||||
}
|
||||
|
||||
try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
|
||||
for (liveness_condbr.then_deaths) |operand| {
|
||||
@@ -3955,8 +4006,38 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
},
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> {
|
||||
return self.fail("TODO implement genSetStack {}", .{mcv});
|
||||
=> |reg| {
|
||||
const reg_lock = self.register_manager.lockReg(reg);
|
||||
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
|
||||
|
||||
const wrapped_ty = ty.structFieldType(0);
|
||||
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1);
|
||||
const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*));
|
||||
const raw_cond_reg = try self.register_manager.allocReg(null);
|
||||
const cond_reg = registerAlias(
|
||||
raw_cond_reg,
|
||||
@intCast(u32, overflow_bit_ty.abiSize(self.target.*)),
|
||||
);
|
||||
|
||||
// C flag: cset reg, cs
|
||||
// V flag: cset reg, vs
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cset,
|
||||
.data = .{ .r_cond = .{
|
||||
.rd = cond_reg,
|
||||
.cond = switch (mcv) {
|
||||
.register_c_flag => .cs,
|
||||
.register_v_flag => .vs,
|
||||
else => unreachable,
|
||||
},
|
||||
} },
|
||||
});
|
||||
|
||||
try self.genSetStack(overflow_bit_ty, stack_offset - overflow_bit_offset, .{
|
||||
.register = cond_reg,
|
||||
});
|
||||
},
|
||||
.got_load,
|
||||
.direct_load,
|
||||
@@ -3983,8 +4064,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
|
||||
// TODO call extern memcpy
|
||||
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null });
|
||||
self.register_manager.freezeRegs(®s);
|
||||
defer self.register_manager.unfreezeRegs(®s);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(5, regs);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
};
|
||||
|
||||
const src_reg = regs[0];
|
||||
const dst_reg = regs[1];
|
||||
|
||||
@@ -23,6 +23,7 @@ const log = std.log.scoped(.codegen);
|
||||
const build_options = @import("build_options");
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &allocatable_registers);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
@@ -734,7 +735,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
// zig fmt: on
|
||||
}
|
||||
|
||||
assert(!self.register_manager.frozenRegsExist());
|
||||
assert(!self.register_manager.lockedRegsExist());
|
||||
|
||||
if (std.debug.runtime_safety) {
|
||||
if (self.air_bookkeeping < old_air_bookkeeping + 1) {
|
||||
@@ -897,16 +898,16 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
|
||||
fn spillCompareFlagsIfOccupied(self: *Self) !void {
|
||||
if (self.compare_flags_inst) |inst_to_save| {
|
||||
const mcv = self.getResolvedInstValue(inst_to_save);
|
||||
switch (mcv) {
|
||||
const new_mcv = switch (mcv) {
|
||||
.compare_flags_signed,
|
||||
.compare_flags_unsigned,
|
||||
=> try self.allocRegOrMem(inst_to_save, true),
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> {},
|
||||
=> try self.allocRegOrMem(inst_to_save, false),
|
||||
else => unreachable, // mcv doesn't occupy the compare flags
|
||||
}
|
||||
};
|
||||
|
||||
const new_mcv = try self.allocRegOrMem(inst_to_save, true);
|
||||
try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
|
||||
log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv });
|
||||
|
||||
@@ -914,6 +915,15 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void {
|
||||
try branch.inst_table.put(self.gpa, inst_to_save, new_mcv);
|
||||
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// TODO consolidate with register manager and spillInstruction
|
||||
// this call should really belong in the register manager!
|
||||
switch (mcv) {
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> |reg| self.register_manager.freeReg(reg),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1038,8 +1048,8 @@ fn trunc(
|
||||
}
|
||||
},
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{operand_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{operand_reg});
|
||||
const operand_reg_lock = self.register_manager.lockReg(operand_reg);
|
||||
defer if (operand_reg_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
@@ -1127,8 +1137,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(operand_ty, operand),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{op_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{op_reg});
|
||||
const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg);
|
||||
defer self.register_manager.unlockReg(op_reg_lock);
|
||||
|
||||
const dest_reg = blk: {
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
@@ -1157,8 +1167,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(operand_ty, operand),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{op_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{op_reg});
|
||||
const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg);
|
||||
defer self.register_manager.unlockReg(op_reg_lock);
|
||||
|
||||
const dest_reg = blk: {
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
@@ -1218,15 +1228,15 @@ fn minMax(
|
||||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(lhs_ty, lhs),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{lhs_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const lhs_reg_lock = self.register_manager.lockReg(lhs_reg);
|
||||
defer if (lhs_reg_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = switch (rhs) {
|
||||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(rhs_ty, rhs),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{rhs_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const rhs_reg_lock = self.register_manager.lockReg(rhs_reg);
|
||||
defer if (rhs_reg_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
@@ -1392,12 +1402,12 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
};
|
||||
const dest = try self.binOp(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
// sbfx/ubfx truncated, dest, #0, #bits
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
@@ -1493,12 +1503,12 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const dest = try self.binOpRegister(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
// sbfx/ubfx truncated, dest, #0, #bits
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
@@ -1526,28 +1536,31 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.lockReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
const lhs_reg = if (lhs_is_register)
|
||||
lhs.register
|
||||
else
|
||||
try self.register_manager.allocReg(null);
|
||||
const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const rhs_reg = if (rhs_is_register)
|
||||
rhs.register
|
||||
else
|
||||
try self.register_manager.allocReg(null);
|
||||
const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_regs = try self.register_manager.allocRegs(2, .{ null, null });
|
||||
self.register_manager.freezeRegs(&dest_regs);
|
||||
defer self.register_manager.unfreezeRegs(&dest_regs);
|
||||
const dest_regs_locks = self.register_manager.lockRegsAssumeUnused(2, dest_regs);
|
||||
defer for (dest_regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
};
|
||||
const rdlo = dest_regs[0];
|
||||
const rdhi = dest_regs[1];
|
||||
|
||||
@@ -1555,8 +1568,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
_ = try self.addInst(.{
|
||||
.tag = base_tag,
|
||||
@@ -1648,14 +1661,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
if (int_info.bits <= 32) {
|
||||
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
|
||||
|
||||
if (lhs == .register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
defer if (lhs == .register) self.register_manager.unfreezeRegs(&.{lhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs == .register)
|
||||
self.register_manager.lockRegAssumeUnused(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// lsl dest, lhs, rhs
|
||||
const dest = try self.binOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
const dest_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_lock);
|
||||
|
||||
// asr/lsr reconstructed, dest, rhs
|
||||
const reconstructed = try self.binOp(.shr, null, dest, rhs, lhs_ty, rhs_ty);
|
||||
@@ -1939,8 +1958,11 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
|
||||
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
|
||||
|
||||
if (index_is_register) self.register_manager.freezeRegs(&.{index_mcv.register});
|
||||
defer if (index_is_register) self.register_manager.unfreezeRegs(&.{index_mcv.register});
|
||||
const index_lock: ?RegisterLock = if (index_is_register)
|
||||
self.register_manager.lockRegAssumeUnused(index_mcv.register)
|
||||
else
|
||||
null;
|
||||
defer if (index_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const base_mcv = slicePtr(slice_mcv);
|
||||
|
||||
@@ -1950,20 +1972,20 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.register => |r| r,
|
||||
else => try self.copyToTmpRegister(slice_ptr_field_type, base_mcv),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{base_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{base_reg});
|
||||
const base_reg_lock = self.register_manager.lockRegAssumeUnused(base_reg);
|
||||
defer self.register_manager.unlockReg(base_reg_lock);
|
||||
|
||||
const dst_reg = try self.register_manager.allocReg(inst);
|
||||
const dst_mcv = MCValue{ .register = dst_reg };
|
||||
self.register_manager.freezeRegs(&.{dst_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dst_reg});
|
||||
const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
||||
defer self.register_manager.unlockReg(dst_reg_lock);
|
||||
|
||||
const index_reg: Register = switch (index_mcv) {
|
||||
.register => |reg| reg,
|
||||
else => try self.copyToTmpRegister(Type.usize, index_mcv),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{index_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{index_reg});
|
||||
const index_reg_lock = self.register_manager.lockReg(index_reg);
|
||||
defer if (index_reg_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const tag: Mir.Inst.Tag = switch (elem_size) {
|
||||
1 => .ldrb,
|
||||
@@ -2149,8 +2171,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
|
||||
.ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
|
||||
.register => |reg| {
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{reg});
|
||||
const reg_lock = self.register_manager.lockReg(reg);
|
||||
defer if (reg_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
|
||||
|
||||
switch (dst_mcv) {
|
||||
.dead => unreachable,
|
||||
@@ -2162,16 +2184,18 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.stack_offset => |off| {
|
||||
if (elem_size <= 4) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{tmp_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
|
||||
try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
|
||||
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
|
||||
} else {
|
||||
// TODO optimize the register allocation
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null });
|
||||
self.register_manager.freezeRegs(®s);
|
||||
defer self.register_manager.unfreezeRegs(®s);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
|
||||
defer for (regs_locks) |reg_locked| {
|
||||
self.register_manager.unlockReg(reg_locked);
|
||||
};
|
||||
|
||||
const src_reg = reg;
|
||||
const dst_reg = regs[0];
|
||||
@@ -2197,8 +2221,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.stack_argument_offset,
|
||||
=> {
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{reg});
|
||||
const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
|
||||
defer self.register_manager.unlockReg(reg_lock);
|
||||
|
||||
try self.genSetReg(ptr_ty, reg, ptr);
|
||||
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
|
||||
@@ -2252,8 +2276,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
try self.genSetStack(value_ty, off, value);
|
||||
},
|
||||
.register => |addr_reg| {
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.lockReg(addr_reg);
|
||||
defer if (addr_reg_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
switch (value) {
|
||||
.dead => unreachable,
|
||||
@@ -2264,15 +2288,17 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
else => {
|
||||
if (elem_size <= 4) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{tmp_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
|
||||
try self.genSetReg(value_ty, tmp_reg, value);
|
||||
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
} else {
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null });
|
||||
self.register_manager.freezeRegs(®s);
|
||||
defer self.register_manager.unfreezeRegs(®s);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
};
|
||||
|
||||
const src_reg = regs[0];
|
||||
const dst_reg = addr_reg;
|
||||
@@ -2356,12 +2382,12 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
||||
const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
|
||||
.immediate = struct_field_offset,
|
||||
});
|
||||
self.register_manager.freezeRegs(&.{offset_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{offset_reg});
|
||||
const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
|
||||
defer self.register_manager.unlockReg(offset_reg_lock);
|
||||
|
||||
const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv);
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
||||
defer self.register_manager.unlockReg(addr_reg_lock);
|
||||
|
||||
const dest = try self.binOp(
|
||||
.add,
|
||||
@@ -2477,8 +2503,11 @@ fn binOpRegister(
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.lockReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
@@ -2489,13 +2518,13 @@ fn binOpRegister(
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
@@ -2504,13 +2533,13 @@ fn binOpRegister(
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp => .r0, // cmp has no destination regardless
|
||||
@@ -2593,7 +2622,11 @@ fn binOpImmediate(
|
||||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.lockReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
@@ -2606,13 +2639,13 @@ fn binOpImmediate(
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp => .r0, // cmp has no destination reg
|
||||
@@ -3656,6 +3689,9 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const parent_compare_flags_inst = self.compare_flags_inst;
|
||||
|
||||
try self.branch_stack.append(.{});
|
||||
errdefer {
|
||||
_ = self.branch_stack.pop();
|
||||
}
|
||||
|
||||
try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
|
||||
for (liveness_condbr.then_deaths) |operand| {
|
||||
@@ -4264,8 +4300,36 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
},
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> {
|
||||
return self.fail("TODO implement genSetStack {}", .{mcv});
|
||||
=> |reg| {
|
||||
const reg_lock = self.register_manager.lockReg(reg);
|
||||
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
|
||||
|
||||
const wrapped_ty = ty.structFieldType(0);
|
||||
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1);
|
||||
const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*));
|
||||
const cond_reg = try self.register_manager.allocReg(null);
|
||||
|
||||
// C flag: movcs reg, #1
|
||||
// V flag: movvs reg, #1
|
||||
_ = try self.addInst(.{
|
||||
.tag = .mov,
|
||||
.cond = switch (mcv) {
|
||||
.register_c_flag => .cs,
|
||||
.register_v_flag => .vs,
|
||||
else => unreachable,
|
||||
},
|
||||
.data = .{ .rr_op = .{
|
||||
.rd = cond_reg,
|
||||
.rn = .r0,
|
||||
.op = Instruction.Operand.fromU32(1).?,
|
||||
} },
|
||||
});
|
||||
|
||||
try self.genSetStack(overflow_bit_ty, stack_offset - overflow_bit_offset, .{
|
||||
.register = cond_reg,
|
||||
});
|
||||
},
|
||||
.memory,
|
||||
.stack_argument_offset,
|
||||
|
||||
@@ -23,6 +23,7 @@ const log = std.log.scoped(.codegen);
|
||||
const build_options = @import("build_options");
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &callee_preserved_regs);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
@@ -937,8 +938,11 @@ fn binOpRegister(
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
const lhs_lock: ?RegisterLock = if (lhs_is_register)
|
||||
self.register_manager.lockReg(lhs.register)
|
||||
else
|
||||
null;
|
||||
defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
@@ -949,13 +953,13 @@ fn binOpRegister(
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
@@ -964,13 +968,13 @@ fn binOpRegister(
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
@@ -1448,8 +1452,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.stack_offset,
|
||||
=> {
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{reg});
|
||||
const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
|
||||
defer self.register_manager.unlockReg(reg_lock);
|
||||
|
||||
try self.genSetReg(ptr_ty, reg, ptr);
|
||||
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -45,9 +45,8 @@ pub fn RegisterManager(
|
||||
/// Tracks all registers allocated in the course of this
|
||||
/// function
|
||||
allocated_registers: FreeRegInt = 0,
|
||||
/// Tracks registers which are temporarily blocked from being
|
||||
/// allocated
|
||||
frozen_registers: FreeRegInt = 0,
|
||||
/// Tracks registers which are locked from being allocated
|
||||
locked_registers: FreeRegInt = 0,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
@@ -108,34 +107,70 @@ pub fn RegisterManager(
|
||||
return self.allocated_registers & mask != 0;
|
||||
}
|
||||
|
||||
/// Returns whether this register is frozen
|
||||
/// Returns whether this register is locked
|
||||
///
|
||||
/// Returns false when this register is not tracked
|
||||
pub fn isRegFrozen(self: Self, reg: Register) bool {
|
||||
pub fn isRegLocked(self: Self, reg: Register) bool {
|
||||
const mask = getRegisterMask(reg) orelse return false;
|
||||
return self.frozen_registers & mask != 0;
|
||||
return self.locked_registers & mask != 0;
|
||||
}
|
||||
|
||||
/// Prevents the registers from being allocated until they are
|
||||
/// unfrozen again
|
||||
pub fn freezeRegs(self: *Self, regs: []const Register) void {
|
||||
for (regs) |reg| {
|
||||
const mask = getRegisterMask(reg) orelse continue;
|
||||
self.frozen_registers |= mask;
|
||||
pub const RegisterLock = struct {
|
||||
register: Register,
|
||||
};
|
||||
|
||||
/// Prevents the register from being allocated until they are
|
||||
/// unlocked again.
|
||||
/// Returns `RegisterLock` if the register was not already
|
||||
/// locked, or `null` otherwise.
|
||||
/// Only the owner of the `RegisterLock` can unlock the
|
||||
/// register later.
|
||||
pub fn lockReg(self: *Self, reg: Register) ?RegisterLock {
|
||||
log.debug("locking {}", .{reg});
|
||||
if (self.isRegLocked(reg)) {
|
||||
log.debug(" register already locked", .{});
|
||||
return null;
|
||||
}
|
||||
const mask = getRegisterMask(reg) orelse return null;
|
||||
self.locked_registers |= mask;
|
||||
return RegisterLock{ .register = reg };
|
||||
}
|
||||
|
||||
/// Enables the allocation of the registers
|
||||
pub fn unfreezeRegs(self: *Self, regs: []const Register) void {
|
||||
for (regs) |reg| {
|
||||
const mask = getRegisterMask(reg) orelse continue;
|
||||
self.frozen_registers &= ~mask;
|
||||
/// Like `lockReg` but asserts the register was unused always
|
||||
/// returning a valid lock.
|
||||
pub fn lockRegAssumeUnused(self: *Self, reg: Register) RegisterLock {
|
||||
log.debug("locking asserting free {}", .{reg});
|
||||
assert(!self.isRegLocked(reg));
|
||||
const mask = getRegisterMask(reg) orelse unreachable;
|
||||
self.locked_registers |= mask;
|
||||
return RegisterLock{ .register = reg };
|
||||
}
|
||||
|
||||
/// Like `lockRegAssumeUnused` but locks multiple registers.
|
||||
pub fn lockRegsAssumeUnused(
|
||||
self: *Self,
|
||||
comptime count: comptime_int,
|
||||
regs: [count]Register,
|
||||
) [count]RegisterLock {
|
||||
var buf: [count]RegisterLock = undefined;
|
||||
for (regs) |reg, i| {
|
||||
buf[i] = self.lockRegAssumeUnused(reg);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
/// Returns true when at least one register is frozen
|
||||
pub fn frozenRegsExist(self: Self) bool {
|
||||
return self.frozen_registers != 0;
|
||||
/// Unlocks the register allowing its re-allocation and re-use.
|
||||
/// Requires `RegisterLock` to unlock a register.
|
||||
/// Call `lockReg` to obtain the lock first.
|
||||
pub fn unlockReg(self: *Self, lock: RegisterLock) void {
|
||||
log.debug("unlocking {}", .{lock.register});
|
||||
const mask = getRegisterMask(lock.register) orelse return;
|
||||
self.locked_registers &= ~mask;
|
||||
}
|
||||
|
||||
/// Returns true when at least one register is locked
|
||||
pub fn lockedRegsExist(self: Self) bool {
|
||||
return self.locked_registers != 0;
|
||||
}
|
||||
|
||||
/// Allocates a specified number of registers, optionally
|
||||
@@ -148,15 +183,15 @@ pub fn RegisterManager(
|
||||
) ?[count]Register {
|
||||
comptime assert(count > 0 and count <= tracked_registers.len);
|
||||
|
||||
const free_and_not_frozen_registers = self.free_registers & ~self.frozen_registers;
|
||||
const free_and_not_frozen_registers_count = @popCount(FreeRegInt, free_and_not_frozen_registers);
|
||||
if (free_and_not_frozen_registers_count < count) return null;
|
||||
const free_and_not_locked_registers = self.free_registers & ~self.locked_registers;
|
||||
const free_and_not_locked_registers_count = @popCount(FreeRegInt, free_and_not_locked_registers);
|
||||
if (free_and_not_locked_registers_count < count) return null;
|
||||
|
||||
var regs: [count]Register = undefined;
|
||||
var i: usize = 0;
|
||||
for (tracked_registers) |reg| {
|
||||
if (i >= count) break;
|
||||
if (self.isRegFrozen(reg)) continue;
|
||||
if (self.isRegLocked(reg)) continue;
|
||||
if (!self.isRegFree(reg)) continue;
|
||||
|
||||
regs[i] = reg;
|
||||
@@ -194,8 +229,8 @@ pub fn RegisterManager(
|
||||
insts: [count]?Air.Inst.Index,
|
||||
) AllocateRegistersError![count]Register {
|
||||
comptime assert(count > 0 and count <= tracked_registers.len);
|
||||
const frozen_registers_count = @popCount(FreeRegInt, self.frozen_registers);
|
||||
if (count > tracked_registers.len - frozen_registers_count) return error.OutOfRegisters;
|
||||
const locked_registers_count = @popCount(FreeRegInt, self.locked_registers);
|
||||
if (count > tracked_registers.len - locked_registers_count) return error.OutOfRegisters;
|
||||
|
||||
const result = self.tryAllocRegs(count, insts) orelse blk: {
|
||||
// We'll take over the first count registers. Spill
|
||||
@@ -205,7 +240,7 @@ pub fn RegisterManager(
|
||||
var i: usize = 0;
|
||||
for (tracked_registers) |reg| {
|
||||
if (i >= count) break;
|
||||
if (self.isRegFrozen(reg)) continue;
|
||||
if (self.isRegLocked(reg)) continue;
|
||||
|
||||
regs[i] = reg;
|
||||
self.markRegAllocated(reg);
|
||||
@@ -416,15 +451,15 @@ test "allocReg: spilling" {
|
||||
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction));
|
||||
try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items);
|
||||
|
||||
// Frozen registers
|
||||
// Locked registers
|
||||
function.register_manager.freeReg(.r3);
|
||||
{
|
||||
function.register_manager.freezeRegs(&.{.r2});
|
||||
defer function.register_manager.unfreezeRegs(&.{.r2});
|
||||
const lock = function.register_manager.lockReg(.r2);
|
||||
defer if (lock) |reg| function.register_manager.unlockReg(reg);
|
||||
|
||||
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction));
|
||||
}
|
||||
try expect(!function.register_manager.frozenRegsExist());
|
||||
try expect(!function.register_manager.lockedRegsExist());
|
||||
}
|
||||
|
||||
test "tryAllocRegs" {
|
||||
@@ -442,17 +477,17 @@ test "tryAllocRegs" {
|
||||
try expect(function.register_manager.isRegAllocated(.r2));
|
||||
try expect(!function.register_manager.isRegAllocated(.r3));
|
||||
|
||||
// Frozen registers
|
||||
// Locked registers
|
||||
function.register_manager.freeReg(.r0);
|
||||
function.register_manager.freeReg(.r2);
|
||||
function.register_manager.freeReg(.r3);
|
||||
{
|
||||
function.register_manager.freezeRegs(&.{.r1});
|
||||
defer function.register_manager.unfreezeRegs(&.{.r1});
|
||||
const lock = function.register_manager.lockReg(.r1);
|
||||
defer if (lock) |reg| function.register_manager.unlockReg(reg);
|
||||
|
||||
try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }).?);
|
||||
}
|
||||
try expect(!function.register_manager.frozenRegsExist());
|
||||
try expect(!function.register_manager.lockedRegsExist());
|
||||
|
||||
try expect(function.register_manager.isRegAllocated(.r0));
|
||||
try expect(function.register_manager.isRegAllocated(.r1));
|
||||
@@ -475,19 +510,19 @@ test "allocRegs: normal usage" {
|
||||
|
||||
// The result register is known and fixed at this point, we
|
||||
// don't want to accidentally allocate lhs or rhs to the
|
||||
// result register, this is why we freeze it.
|
||||
// result register, this is why we lock it.
|
||||
//
|
||||
// Using defer unfreeze right after freeze is a good idea in
|
||||
// most cases as you probably are using the frozen registers
|
||||
// Using defer unlock right after lock is a good idea in
|
||||
// most cases as you probably are using the locked registers
|
||||
// in the remainder of this scope and don't need to use it
|
||||
// after the end of this scope. However, in some situations,
|
||||
// it may make sense to manually unfreeze registers before the
|
||||
// it may make sense to manually unlock registers before the
|
||||
// end of the scope when you are certain that they don't
|
||||
// contain any valuable data anymore and can be reused. For an
|
||||
// example of that, see `selectively reducing register
|
||||
// pressure`.
|
||||
function.register_manager.freezeRegs(&.{result_reg});
|
||||
defer function.register_manager.unfreezeRegs(&.{result_reg});
|
||||
const lock = function.register_manager.lockReg(result_reg);
|
||||
defer if (lock) |reg| function.register_manager.unlockReg(reg);
|
||||
|
||||
const regs = try function.register_manager.allocRegs(2, .{ null, null });
|
||||
try function.genAdd(result_reg, regs[0], regs[1]);
|
||||
@@ -507,16 +542,14 @@ test "allocRegs: selectively reducing register pressure" {
|
||||
{
|
||||
const result_reg: MockRegister2 = .r1;
|
||||
|
||||
function.register_manager.freezeRegs(&.{result_reg});
|
||||
defer function.register_manager.unfreezeRegs(&.{result_reg});
|
||||
const lock = function.register_manager.lockReg(result_reg);
|
||||
|
||||
// Here, we don't defer unfreeze because we manually unfreeze
|
||||
// Here, we don't defer unlock because we manually unlock
|
||||
// after genAdd
|
||||
const regs = try function.register_manager.allocRegs(2, .{ null, null });
|
||||
function.register_manager.freezeRegs(&.{result_reg});
|
||||
|
||||
try function.genAdd(result_reg, regs[0], regs[1]);
|
||||
function.register_manager.unfreezeRegs(®s);
|
||||
function.register_manager.unlockReg(lock.?);
|
||||
|
||||
const extra_summand_reg = try function.register_manager.allocReg(null);
|
||||
try function.genAdd(result_reg, result_reg, extra_summand_reg);
|
||||
|
||||
@@ -7,6 +7,8 @@ var foo: u8 align(4) = 100;
|
||||
|
||||
test "global variable alignment" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
||||
comptime try expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
|
||||
comptime try expect(@TypeOf(&foo) == *align(4) u8);
|
||||
|
||||
@@ -6,6 +6,7 @@ var result: []const u8 = "wrong";
|
||||
test "pass string literal byvalue to a generic var param" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
|
||||
start();
|
||||
blowUpStack(10);
|
||||
|
||||
Reference in New Issue
Block a user