diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 8887a1e0ca..cb404d8315 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -858,6 +858,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + fn reuseOperand(inst: *ir.Inst, op_index: ir.Inst.DeathsBitIndex, mcv: MCValue) bool { + if (!inst.operandDies(op_index) or !mcv.isMutable()) + return false; + + // OK we're going to do it, but we need to clear the operand death bit so that + // it stays allocated. + inst.clearOperandDeath(op_index); + return true; + } + fn genLoad(self: *Self, inst: *ir.Inst.UnOp) !MCValue { const elem_ty = inst.base.ty; if (!elem_ty.hasCodeGenBits()) @@ -867,9 +877,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (inst.base.isUnused() and !is_volatile) return MCValue.dead; const dst_mcv: MCValue = blk: { - if (inst.base.operandDies(0) and ptr.isMutable()) { + if (reuseOperand(&inst.base, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. - // TODO track this in the register/stack allocation metadata. break :blk ptr; } else { break :blk try self.allocRegOrMem(&inst.base); @@ -966,7 +975,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var dst_mcv: MCValue = undefined; var src_mcv: MCValue = undefined; var src_inst: *ir.Inst = undefined; - if (inst.operandDies(0) and lhs.isMutable()) { + if (reuseOperand(inst, 0, lhs)) { // LHS dies; use it as the destination. // Both operands cannot be memory. src_inst = op_rhs; @@ -977,7 +986,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { dst_mcv = lhs; src_mcv = rhs; } - } else if (inst.operandDies(1) and rhs.isMutable()) { + } else if (reuseOperand(inst, 1, rhs)) { // RHS dies; use it as the destination. // Both operands cannot be memory. src_inst = op_lhs; diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index 91dfad45d7..070f41eb5e 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -42,6 +42,11 @@ pub const Inst = struct { return @truncate(u1, self.deaths >> index) != 0; } + pub fn clearOperandDeath(self: *Inst, index: DeathsBitIndex) void { + assert(index < deaths_bits); + self.deaths &= ~(@as(DeathsInt, 1) << index); + } + pub fn specialOperandDeaths(self: Inst) bool { return (self.deaths & (1 << deaths_bits)) != 0; } diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig index ec9d1b9f9e..3513605602 100644 --- a/test/stage2/compare_output.zig +++ b/test/stage2/compare_output.zig @@ -544,6 +544,48 @@ pub fn addCases(ctx: *TestContext) !void { "", ); + // This catches a possible bug in the logic for re-using dying operands. + case.addCompareOutput( + \\export fn _start() noreturn { + \\ assert(add(3, 4) == 116); + \\ + \\ exit(); + \\} + \\ + \\fn add(a: u32, b: u32) u32 { + \\ const x: u32 = blk: { + \\ const c = a + b; // 7 + \\ const d = a + c; // 10 + \\ const e = d + b; // 14 + \\ const f = d + e; // 24 + \\ const g = e + f; // 38 + \\ const h = f + g; // 62 + \\ const i = g + h; // 100 + \\ const j = i + d; // 110 + \\ break :blk j; + \\ }; + \\ const y = x + a; // 113 + \\ const z = y + a; // 116 + \\ return z; + \\} + \\ + \\pub fn assert(ok: bool) void { + \\ if (!ok) unreachable; // assertion failure + \\} + \\ + \\fn exit() noreturn { + \\ asm volatile ("syscall" + \\ : + \\ : [number] "{rax}" (231), + \\ [arg1] "{rdi}" (0) + \\ : "rcx", "r11", "memory" + \\ ); + \\ unreachable; + \\} + , + "", + ); + case.addCompareOutput( \\export fn _start() noreturn { \\ const ignore =