stage2: register allocator processes operand deaths
also rework the IR data structures
This commit is contained in:
@@ -1349,8 +1349,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
|
||||
fn analyzeBodyValueAsType(self: *Module, block_scope: *Scope.Block, body: zir.Module.Body) !Type {
|
||||
try self.analyzeBody(&block_scope.base, body);
|
||||
for (block_scope.instructions.items) |inst| {
|
||||
if (inst.cast(Inst.Ret)) |ret| {
|
||||
const val = try self.resolveConstValue(&block_scope.base, ret.args.operand);
|
||||
if (inst.castTag(.ret)) |ret| {
|
||||
const val = try self.resolveConstValue(&block_scope.base, ret.operand);
|
||||
return val.toType();
|
||||
} else {
|
||||
return self.fail(&block_scope.base, inst.src, "unable to resolve comptime value", .{});
|
||||
@@ -1938,16 +1938,132 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const
|
||||
};
|
||||
}
|
||||
|
||||
fn addNewInstArgs(
|
||||
fn addNoOp(
|
||||
self: *Module,
|
||||
block: *Scope.Block,
|
||||
src: usize,
|
||||
ty: Type,
|
||||
comptime T: type,
|
||||
args: Inst.Args(T),
|
||||
comptime tag: Inst.Tag,
|
||||
) !*Inst {
|
||||
const inst = try self.addNewInst(block, src, ty, T);
|
||||
inst.args = args;
|
||||
const inst = try block.arena.create(tag.Type());
|
||||
inst.* = .{
|
||||
.base = .{
|
||||
.tag = tag,
|
||||
.ty = ty,
|
||||
.src = src,
|
||||
},
|
||||
};
|
||||
try block.instructions.append(self.gpa, &inst.base);
|
||||
return &inst.base;
|
||||
}
|
||||
|
||||
fn addUnOp(
|
||||
self: *Module,
|
||||
block: *Scope.Block,
|
||||
src: usize,
|
||||
ty: Type,
|
||||
tag: Inst.Tag,
|
||||
operand: *Inst,
|
||||
) !*Inst {
|
||||
const inst = try block.arena.create(Inst.UnOp);
|
||||
inst.* = .{
|
||||
.base = .{
|
||||
.tag = tag,
|
||||
.ty = ty,
|
||||
.src = src,
|
||||
},
|
||||
.operand = operand,
|
||||
};
|
||||
try block.instructions.append(self.gpa, &inst.base);
|
||||
return &inst.base;
|
||||
}
|
||||
|
||||
fn addBinOp(
|
||||
self: *Module,
|
||||
block: *Scope.Block,
|
||||
src: usize,
|
||||
ty: Type,
|
||||
tag: Inst.Tag,
|
||||
lhs: *Inst,
|
||||
rhs: *Inst,
|
||||
) !*Inst {
|
||||
const inst = try block.arena.create(Inst.BinOp);
|
||||
inst.* = .{
|
||||
.base = .{
|
||||
.tag = tag,
|
||||
.ty = ty,
|
||||
.src = src,
|
||||
},
|
||||
.lhs = lhs,
|
||||
.rhs = rhs,
|
||||
};
|
||||
try block.instructions.append(self.gpa, &inst.base);
|
||||
return &inst.base;
|
||||
}
|
||||
|
||||
fn addBr(
|
||||
self: *Module,
|
||||
scope_block: *Scope.Block,
|
||||
src: usize,
|
||||
target_block: *Inst.Block,
|
||||
operand: *Inst,
|
||||
) !*Inst {
|
||||
const inst = try scope_block.arena.create(Inst.Br);
|
||||
inst.* = .{
|
||||
.base = .{
|
||||
.tag = .br,
|
||||
.ty = Type.initTag(.noreturn),
|
||||
.src = src,
|
||||
},
|
||||
.operand = operand,
|
||||
.block = target_block,
|
||||
};
|
||||
try scope_block.instructions.append(self.gpa, &inst.base);
|
||||
return &inst.base;
|
||||
}
|
||||
|
||||
fn addCondBr(
|
||||
self: *Module,
|
||||
block: *Scope.Block,
|
||||
src: usize,
|
||||
condition: *Inst,
|
||||
then_body: ir.Body,
|
||||
else_body: ir.Body,
|
||||
) !*Inst {
|
||||
const inst = try block.arena.create(Inst.CondBr);
|
||||
inst.* = .{
|
||||
.base = .{
|
||||
.tag = .condbr,
|
||||
.ty = Type.initTag(.noreturn),
|
||||
.src = src,
|
||||
},
|
||||
.condition = condition,
|
||||
.then_body = then_body,
|
||||
.else_body = else_body,
|
||||
};
|
||||
try block.instructions.append(self.gpa, &inst.base);
|
||||
return &inst.base;
|
||||
}
|
||||
|
||||
fn addCall(
|
||||
self: *Module,
|
||||
block: *Scope.Block,
|
||||
src: usize,
|
||||
ty: Type,
|
||||
func: *Inst,
|
||||
args: []const *Inst,
|
||||
) !*Inst {
|
||||
const inst = try block.arena.create(Inst.Call);
|
||||
inst.* = .{
|
||||
.base = .{
|
||||
.tag = .call,
|
||||
.ty = ty,
|
||||
.src = src,
|
||||
},
|
||||
.func = func,
|
||||
.args = args,
|
||||
};
|
||||
try block.instructions.append(self.gpa, &inst.base);
|
||||
return &inst.base;
|
||||
}
|
||||
|
||||
@@ -2017,7 +2133,6 @@ fn addNewInst(self: *Module, block: *Scope.Block, src: usize, ty: Type, comptime
|
||||
.ty = ty,
|
||||
.src = src,
|
||||
},
|
||||
.args = undefined,
|
||||
};
|
||||
try block.instructions.append(self.gpa, &inst.base);
|
||||
return inst;
|
||||
@@ -2269,7 +2384,7 @@ fn analyzeInstArg(self: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!
|
||||
});
|
||||
}
|
||||
const param_type = fn_ty.fnParamType(param_index);
|
||||
return self.addNewInstArgs(b, inst.base.src, param_type, Inst.Arg, {});
|
||||
return self.addNoOp(b, inst.base.src, param_type, .arg);
|
||||
}
|
||||
|
||||
fn analyzeInstBlock(self: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerError!*Inst {
|
||||
@@ -2285,7 +2400,7 @@ fn analyzeInstBlock(self: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerEr
|
||||
.ty = undefined, // Set after analysis.
|
||||
.src = inst.base.src,
|
||||
},
|
||||
.args = undefined,
|
||||
.body = undefined,
|
||||
};
|
||||
|
||||
var child_block: Scope.Block = .{
|
||||
@@ -2316,13 +2431,13 @@ fn analyzeInstBlock(self: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerEr
|
||||
// to emit a jump instruction to after the block when it encounters the break.
|
||||
try parent_block.instructions.append(self.gpa, &block_inst.base);
|
||||
block_inst.base.ty = try self.resolvePeerTypes(scope, label.results.items);
|
||||
block_inst.args.body = .{ .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items) };
|
||||
block_inst.body = .{ .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items) };
|
||||
return &block_inst.base;
|
||||
}
|
||||
|
||||
fn analyzeInstBreakpoint(self: *Module, scope: *Scope, inst: *zir.Inst.Breakpoint) InnerError!*Inst {
|
||||
const b = try self.requireRuntimeBlock(scope, inst.base.src);
|
||||
return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Breakpoint, {});
|
||||
return self.addNoOp(b, inst.base.src, Type.initTag(.void), .breakpoint);
|
||||
}
|
||||
|
||||
fn analyzeInstBreak(self: *Module, scope: *Scope, inst: *zir.Inst.Break) InnerError!*Inst {
|
||||
@@ -2350,10 +2465,7 @@ fn analyzeBreak(
|
||||
if (label.zir_block == zir_block) {
|
||||
try label.results.append(self.gpa, operand);
|
||||
const b = try self.requireRuntimeBlock(scope, src);
|
||||
return self.addNewInstArgs(b, src, Type.initTag(.noreturn), Inst.Br, .{
|
||||
.block = label.block_inst,
|
||||
.operand = operand,
|
||||
});
|
||||
return self.addBr(b, src, label.block_inst, operand);
|
||||
}
|
||||
}
|
||||
opt_block = block.parent;
|
||||
@@ -2484,10 +2596,7 @@ fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerErro
|
||||
}
|
||||
|
||||
const b = try self.requireRuntimeBlock(scope, inst.base.src);
|
||||
return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Call, .{
|
||||
.func = func,
|
||||
.args = casted_args,
|
||||
});
|
||||
return self.addCall(b, inst.base.src, Type.initTag(.void), func, casted_args);
|
||||
}
|
||||
|
||||
fn analyzeInstFn(self: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError!*Inst {
|
||||
@@ -2570,14 +2679,14 @@ fn analyzeInstAs(self: *Module, scope: *Scope, as: *zir.Inst.As) InnerError!*Ins
|
||||
}
|
||||
|
||||
fn analyzeInstPtrToInt(self: *Module, scope: *Scope, ptrtoint: *zir.Inst.PtrToInt) InnerError!*Inst {
|
||||
const ptr = try self.resolveInst(scope, ptrtoint.positionals.ptr);
|
||||
const ptr = try self.resolveInst(scope, ptrtoint.positionals.operand);
|
||||
if (ptr.ty.zigTypeTag() != .Pointer) {
|
||||
return self.fail(scope, ptrtoint.positionals.ptr.src, "expected pointer, found '{}'", .{ptr.ty});
|
||||
return self.fail(scope, ptrtoint.positionals.operand.src, "expected pointer, found '{}'", .{ptr.ty});
|
||||
}
|
||||
// TODO handle known-pointer-address
|
||||
const b = try self.requireRuntimeBlock(scope, ptrtoint.base.src);
|
||||
const ty = Type.initTag(.usize);
|
||||
return self.addNewInstArgs(b, ptrtoint.base.src, ty, Inst.PtrToInt, .{ .ptr = ptr });
|
||||
return self.addUnOp(b, ptrtoint.base.src, ty, .ptrtoint, ptr);
|
||||
}
|
||||
|
||||
fn analyzeInstFieldPtr(self: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr) InnerError!*Inst {
|
||||
@@ -2734,10 +2843,7 @@ fn analyzeInstAdd(self: *Module, scope: *Scope, inst: *zir.Inst.Add) InnerError!
|
||||
}
|
||||
|
||||
const b = try self.requireRuntimeBlock(scope, inst.base.src);
|
||||
return self.addNewInstArgs(b, inst.base.src, lhs.ty, Inst.Add, .{
|
||||
.lhs = lhs,
|
||||
.rhs = rhs,
|
||||
});
|
||||
return self.addBinOp(b, inst.base.src, lhs.ty, .add, lhs, rhs);
|
||||
}
|
||||
return self.fail(scope, inst.base.src, "TODO analyze add for {} + {}", .{ lhs.ty.zigTypeTag(), rhs.ty.zigTypeTag() });
|
||||
}
|
||||
@@ -2783,14 +2889,22 @@ fn analyzeInstAsm(self: *Module, scope: *Scope, assembly: *zir.Inst.Asm) InnerEr
|
||||
}
|
||||
|
||||
const b = try self.requireRuntimeBlock(scope, assembly.base.src);
|
||||
return self.addNewInstArgs(b, assembly.base.src, return_type, Inst.Assembly, .{
|
||||
const inst = try b.arena.create(Inst.Assembly);
|
||||
inst.* = .{
|
||||
.base = .{
|
||||
.tag = .assembly,
|
||||
.ty = return_type,
|
||||
.src = assembly.base.src,
|
||||
},
|
||||
.asm_source = asm_source,
|
||||
.is_volatile = assembly.kw_args.@"volatile",
|
||||
.output = output,
|
||||
.inputs = inputs,
|
||||
.clobbers = clobbers,
|
||||
.args = args,
|
||||
});
|
||||
};
|
||||
try b.instructions.append(self.gpa, &inst.base);
|
||||
return &inst.base;
|
||||
}
|
||||
|
||||
fn analyzeInstCmp(self: *Module, scope: *Scope, inst: *zir.Inst.Cmp) InnerError!*Inst {
|
||||
@@ -2818,15 +2932,12 @@ fn analyzeInstCmp(self: *Module, scope: *Scope, inst: *zir.Inst.Cmp) InnerError!
|
||||
return self.constBool(scope, inst.base.src, if (op == .eq) is_null else !is_null);
|
||||
}
|
||||
const b = try self.requireRuntimeBlock(scope, inst.base.src);
|
||||
switch (op) {
|
||||
.eq => return self.addNewInstArgs(b, inst.base.src, Type.initTag(.bool), Inst.IsNull, .{
|
||||
.operand = opt_operand,
|
||||
}),
|
||||
.neq => return self.addNewInstArgs(b, inst.base.src, Type.initTag(.bool), Inst.IsNonNull, .{
|
||||
.operand = opt_operand,
|
||||
}),
|
||||
const inst_tag: Inst.Tag = switch (op) {
|
||||
.eq => .isnull,
|
||||
.neq => .isnonnull,
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
return self.addUnOp(b, inst.base.src, Type.initTag(.bool), inst_tag, opt_operand);
|
||||
} else if (is_equality_cmp and
|
||||
((lhs_ty_tag == .Null and rhs.ty.isCPtr()) or (rhs_ty_tag == .Null and lhs.ty.isCPtr())))
|
||||
{
|
||||
@@ -2861,7 +2972,7 @@ fn analyzeInstBoolNot(self: *Module, scope: *Scope, inst: *zir.Inst.BoolNot) Inn
|
||||
return self.constBool(scope, inst.base.src, !val.toBool());
|
||||
}
|
||||
const b = try self.requireRuntimeBlock(scope, inst.base.src);
|
||||
return self.addNewInstArgs(b, inst.base.src, bool_type, Inst.Not, .{ .operand = operand });
|
||||
return self.addUnOp(b, inst.base.src, bool_type, .not, operand);
|
||||
}
|
||||
|
||||
fn analyzeInstIsNull(self: *Module, scope: *Scope, inst: *zir.Inst.IsNull) InnerError!*Inst {
|
||||
@@ -2879,7 +2990,7 @@ fn analyzeInstCondBr(self: *Module, scope: *Scope, inst: *zir.Inst.CondBr) Inner
|
||||
const cond = try self.coerce(scope, Type.initTag(.bool), uncasted_cond);
|
||||
|
||||
if (try self.resolveDefinedValue(scope, cond)) |cond_val| {
|
||||
const body = if (cond_val.toBool()) &inst.positionals.true_body else &inst.positionals.false_body;
|
||||
const body = if (cond_val.toBool()) &inst.positionals.then_body else &inst.positionals.else_body;
|
||||
try self.analyzeBody(scope, body.*);
|
||||
return self.constVoid(scope, inst.base.src);
|
||||
}
|
||||
@@ -2894,7 +3005,7 @@ fn analyzeInstCondBr(self: *Module, scope: *Scope, inst: *zir.Inst.CondBr) Inner
|
||||
.arena = parent_block.arena,
|
||||
};
|
||||
defer true_block.instructions.deinit(self.gpa);
|
||||
try self.analyzeBody(&true_block.base, inst.positionals.true_body);
|
||||
try self.analyzeBody(&true_block.base, inst.positionals.then_body);
|
||||
|
||||
var false_block: Scope.Block = .{
|
||||
.parent = parent_block,
|
||||
@@ -2904,13 +3015,11 @@ fn analyzeInstCondBr(self: *Module, scope: *Scope, inst: *zir.Inst.CondBr) Inner
|
||||
.arena = parent_block.arena,
|
||||
};
|
||||
defer false_block.instructions.deinit(self.gpa);
|
||||
try self.analyzeBody(&false_block.base, inst.positionals.false_body);
|
||||
try self.analyzeBody(&false_block.base, inst.positionals.else_body);
|
||||
|
||||
return self.addNewInstArgs(parent_block, inst.base.src, Type.initTag(.noreturn), Inst.CondBr, Inst.Args(Inst.CondBr){
|
||||
.condition = cond,
|
||||
.true_body = .{ .instructions = try scope.arena().dupe(*Inst, true_block.instructions.items) },
|
||||
.false_body = .{ .instructions = try scope.arena().dupe(*Inst, false_block.instructions.items) },
|
||||
});
|
||||
const then_body: ir.Body = .{ .instructions = try scope.arena().dupe(*Inst, true_block.instructions.items) };
|
||||
const else_body: ir.Body = .{ .instructions = try scope.arena().dupe(*Inst, false_block.instructions.items) };
|
||||
return self.addCondBr(parent_block, inst.base.src, cond, then_body, else_body);
|
||||
}
|
||||
|
||||
fn wantSafety(self: *Module, scope: *Scope) bool {
|
||||
@@ -2926,20 +3035,20 @@ fn analyzeInstUnreachable(self: *Module, scope: *Scope, unreach: *zir.Inst.Unrea
|
||||
const b = try self.requireRuntimeBlock(scope, unreach.base.src);
|
||||
if (self.wantSafety(scope)) {
|
||||
// TODO Once we have a panic function to call, call it here instead of this.
|
||||
_ = try self.addNewInstArgs(b, unreach.base.src, Type.initTag(.void), Inst.Breakpoint, {});
|
||||
_ = try self.addNoOp(b, unreach.base.src, Type.initTag(.void), .breakpoint);
|
||||
}
|
||||
return self.addNewInstArgs(b, unreach.base.src, Type.initTag(.noreturn), Inst.Unreach, {});
|
||||
return self.addNoOp(b, unreach.base.src, Type.initTag(.noreturn), .unreach);
|
||||
}
|
||||
|
||||
fn analyzeInstRet(self: *Module, scope: *Scope, inst: *zir.Inst.Return) InnerError!*Inst {
|
||||
const operand = try self.resolveInst(scope, inst.positionals.operand);
|
||||
const b = try self.requireRuntimeBlock(scope, inst.base.src);
|
||||
return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.Ret, .{ .operand = operand });
|
||||
return self.addUnOp(b, inst.base.src, Type.initTag(.noreturn), .ret, operand);
|
||||
}
|
||||
|
||||
fn analyzeInstRetVoid(self: *Module, scope: *Scope, inst: *zir.Inst.ReturnVoid) InnerError!*Inst {
|
||||
const b = try self.requireRuntimeBlock(scope, inst.base.src);
|
||||
return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.RetVoid, {});
|
||||
return self.addNoOp(b, inst.base.src, Type.initTag(.noreturn), .retvoid);
|
||||
}
|
||||
|
||||
fn analyzeBody(self: *Module, scope: *Scope, body: zir.Module.Body) !void {
|
||||
@@ -3027,11 +3136,7 @@ fn cmpNumeric(
|
||||
};
|
||||
const casted_lhs = try self.coerce(scope, dest_type, lhs);
|
||||
const casted_rhs = try self.coerce(scope, dest_type, rhs);
|
||||
return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, .{
|
||||
.lhs = casted_lhs,
|
||||
.rhs = casted_rhs,
|
||||
.op = op,
|
||||
});
|
||||
return self.addBinOp(b, src, dest_type, Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
|
||||
}
|
||||
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
|
||||
// For mixed signed and unsigned integers, implicit cast both operands to a signed
|
||||
@@ -3131,11 +3236,7 @@ fn cmpNumeric(
|
||||
const casted_lhs = try self.coerce(scope, dest_type, lhs);
|
||||
const casted_rhs = try self.coerce(scope, dest_type, rhs);
|
||||
|
||||
return self.addNewInstArgs(b, src, Type.initTag(.bool), Inst.Cmp, .{
|
||||
.lhs = casted_lhs,
|
||||
.rhs = casted_rhs,
|
||||
.op = op,
|
||||
});
|
||||
return self.addBinOp(b, src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
|
||||
}
|
||||
|
||||
fn makeIntType(self: *Module, scope: *Scope, signed: bool, bits: u16) !Type {
|
||||
@@ -3236,7 +3337,7 @@ fn bitcast(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
|
||||
}
|
||||
// TODO validate the type size and other compile errors
|
||||
const b = try self.requireRuntimeBlock(scope, inst.src);
|
||||
return self.addNewInstArgs(b, inst.src, dest_type, Inst.BitCast, .{ .operand = inst });
|
||||
return self.addUnOp(b, inst.src, dest_type, .bitcast, inst);
|
||||
}
|
||||
|
||||
fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
|
||||
|
||||
@@ -173,8 +173,8 @@ fn ifExpr(mod: *Module, scope: *Scope, if_node: *ast.Node.If) InnerError!*zir.In
|
||||
const if_src = tree.token_locs[if_node.if_token].start;
|
||||
const condbr = try mod.addZIRInstSpecial(&block_scope.base, if_src, zir.Inst.CondBr, .{
|
||||
.condition = cond,
|
||||
.true_body = undefined, // populated below
|
||||
.false_body = undefined, // populated below
|
||||
.then_body = undefined, // populated below
|
||||
.else_body = undefined, // populated below
|
||||
}, .{});
|
||||
|
||||
const block = try mod.addZIRInstBlock(scope, if_src, .{
|
||||
@@ -196,7 +196,7 @@ fn ifExpr(mod: *Module, scope: *Scope, if_node: *ast.Node.If) InnerError!*zir.In
|
||||
.operand = then_result,
|
||||
}, .{});
|
||||
}
|
||||
condbr.positionals.true_body = .{
|
||||
condbr.positionals.then_body = .{
|
||||
.instructions = try then_scope.arena.dupe(*zir.Inst, then_scope.instructions.items),
|
||||
};
|
||||
|
||||
@@ -225,7 +225,7 @@ fn ifExpr(mod: *Module, scope: *Scope, if_node: *ast.Node.If) InnerError!*zir.In
|
||||
.block = block,
|
||||
}, .{});
|
||||
}
|
||||
condbr.positionals.false_body = .{
|
||||
condbr.positionals.else_body = .{
|
||||
.instructions = try else_scope.arena.dupe(*zir.Inst, else_scope.instructions.items),
|
||||
};
|
||||
|
||||
|
||||
@@ -290,6 +290,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
next_stack_offset: u32 = 0,
|
||||
|
||||
fn markRegUsed(self: *Branch, reg: Register) void {
|
||||
if (FreeRegInt == u0) return;
|
||||
const index = reg.allocIndex() orelse return;
|
||||
const ShiftInt = std.math.Log2Int(FreeRegInt);
|
||||
const shift = @intCast(ShiftInt, index);
|
||||
@@ -297,6 +298,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
}
|
||||
|
||||
fn markRegFree(self: *Branch, reg: Register) void {
|
||||
if (FreeRegInt == u0) return;
|
||||
const index = reg.allocIndex() orelse return;
|
||||
const ShiftInt = std.math.Log2Int(FreeRegInt);
|
||||
const shift = @intCast(ShiftInt, index);
|
||||
@@ -407,40 +409,64 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
for (body.instructions) |inst| {
|
||||
const new_inst = try self.genFuncInst(inst);
|
||||
try inst_table.putNoClobber(self.gpa, inst, new_inst);
|
||||
// TODO process operand deaths
|
||||
|
||||
var i: ir.Inst.DeathsBitIndex = 0;
|
||||
while (inst.getOperand(i)) |operand| : (i += 1) {
|
||||
if (inst.operandDies(i))
|
||||
self.processDeath(operand);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn processDeath(self: *Self, inst: *ir.Inst) void {
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
const entry = branch.inst_table.getEntry(inst) orelse return;
|
||||
const prev_value = entry.value;
|
||||
entry.value = .dead;
|
||||
switch (prev_value) {
|
||||
.register => |reg| {
|
||||
_ = branch.registers.remove(reg);
|
||||
branch.markRegFree(reg);
|
||||
},
|
||||
else => {}, // TODO process stack allocation death
|
||||
}
|
||||
}
|
||||
|
||||
fn genFuncInst(self: *Self, inst: *ir.Inst) !MCValue {
|
||||
switch (inst.tag) {
|
||||
.add => return self.genAdd(inst.cast(ir.Inst.Add).?),
|
||||
.arg => return self.genArg(inst.cast(ir.Inst.Arg).?),
|
||||
.assembly => return self.genAsm(inst.cast(ir.Inst.Assembly).?),
|
||||
.bitcast => return self.genBitCast(inst.cast(ir.Inst.BitCast).?),
|
||||
.block => return self.genBlock(inst.cast(ir.Inst.Block).?),
|
||||
.br => return self.genBr(inst.cast(ir.Inst.Br).?),
|
||||
.add => return self.genAdd(inst.castTag(.add).?),
|
||||
.arg => return self.genArg(inst.castTag(.arg).?),
|
||||
.assembly => return self.genAsm(inst.castTag(.assembly).?),
|
||||
.bitcast => return self.genBitCast(inst.castTag(.bitcast).?),
|
||||
.block => return self.genBlock(inst.castTag(.block).?),
|
||||
.br => return self.genBr(inst.castTag(.br).?),
|
||||
.breakpoint => return self.genBreakpoint(inst.src),
|
||||
.brvoid => return self.genBrVoid(inst.cast(ir.Inst.BrVoid).?),
|
||||
.call => return self.genCall(inst.cast(ir.Inst.Call).?),
|
||||
.cmp => return self.genCmp(inst.cast(ir.Inst.Cmp).?),
|
||||
.condbr => return self.genCondBr(inst.cast(ir.Inst.CondBr).?),
|
||||
.brvoid => return self.genBrVoid(inst.castTag(.brvoid).?),
|
||||
.call => return self.genCall(inst.castTag(.call).?),
|
||||
.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt),
|
||||
.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte),
|
||||
.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq),
|
||||
.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte),
|
||||
.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt),
|
||||
.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq),
|
||||
.condbr => return self.genCondBr(inst.castTag(.condbr).?),
|
||||
.constant => unreachable, // excluded from function bodies
|
||||
.isnonnull => return self.genIsNonNull(inst.cast(ir.Inst.IsNonNull).?),
|
||||
.isnull => return self.genIsNull(inst.cast(ir.Inst.IsNull).?),
|
||||
.ptrtoint => return self.genPtrToInt(inst.cast(ir.Inst.PtrToInt).?),
|
||||
.ret => return self.genRet(inst.cast(ir.Inst.Ret).?),
|
||||
.retvoid => return self.genRetVoid(inst.cast(ir.Inst.RetVoid).?),
|
||||
.sub => return self.genSub(inst.cast(ir.Inst.Sub).?),
|
||||
.isnonnull => return self.genIsNonNull(inst.castTag(.isnonnull).?),
|
||||
.isnull => return self.genIsNull(inst.castTag(.isnull).?),
|
||||
.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?),
|
||||
.ret => return self.genRet(inst.castTag(.ret).?),
|
||||
.retvoid => return self.genRetVoid(inst.castTag(.retvoid).?),
|
||||
.sub => return self.genSub(inst.castTag(.sub).?),
|
||||
.unreach => return MCValue{ .unreach = {} },
|
||||
.not => return self.genNot(inst.cast(ir.Inst.Not).?),
|
||||
.not => return self.genNot(inst.castTag(.not).?),
|
||||
}
|
||||
}
|
||||
|
||||
fn genNot(self: *Self, inst: *ir.Inst.Not) !MCValue {
|
||||
fn genNot(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
|
||||
// No side effects, so if it's unreferenced, do nothing.
|
||||
if (inst.base.isUnused())
|
||||
return MCValue.dead;
|
||||
const operand = try self.resolveInst(inst.args.operand);
|
||||
const operand = try self.resolveInst(inst.operand);
|
||||
switch (operand) {
|
||||
.dead => unreachable,
|
||||
.unreach => unreachable,
|
||||
@@ -473,36 +499,36 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
.base = .{
|
||||
.tag = .constant,
|
||||
.deaths = 0,
|
||||
.ty = inst.args.operand.ty,
|
||||
.src = inst.args.operand.src,
|
||||
.ty = inst.operand.ty,
|
||||
.src = inst.operand.src,
|
||||
},
|
||||
.val = Value.initTag(.bool_true),
|
||||
};
|
||||
return try self.genX8664BinMath(&inst.base, inst.args.operand, &imm.base, 6, 0x30);
|
||||
return try self.genX8664BinMath(&inst.base, inst.operand, &imm.base, 6, 0x30);
|
||||
},
|
||||
else => return self.fail(inst.base.src, "TODO implement NOT for {}", .{self.target.cpu.arch}),
|
||||
}
|
||||
}
|
||||
|
||||
fn genAdd(self: *Self, inst: *ir.Inst.Add) !MCValue {
|
||||
fn genAdd(self: *Self, inst: *ir.Inst.BinOp) !MCValue {
|
||||
// No side effects, so if it's unreferenced, do nothing.
|
||||
if (inst.base.isUnused())
|
||||
return MCValue.dead;
|
||||
switch (arch) {
|
||||
.x86_64 => {
|
||||
return try self.genX8664BinMath(&inst.base, inst.args.lhs, inst.args.rhs, 0, 0x00);
|
||||
return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 0, 0x00);
|
||||
},
|
||||
else => return self.fail(inst.base.src, "TODO implement add for {}", .{self.target.cpu.arch}),
|
||||
}
|
||||
}
|
||||
|
||||
fn genSub(self: *Self, inst: *ir.Inst.Sub) !MCValue {
|
||||
fn genSub(self: *Self, inst: *ir.Inst.BinOp) !MCValue {
|
||||
// No side effects, so if it's unreferenced, do nothing.
|
||||
if (inst.base.isUnused())
|
||||
return MCValue.dead;
|
||||
switch (arch) {
|
||||
.x86_64 => {
|
||||
return try self.genX8664BinMath(&inst.base, inst.args.lhs, inst.args.rhs, 5, 0x28);
|
||||
return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs, 5, 0x28);
|
||||
},
|
||||
else => return self.fail(inst.base.src, "TODO implement sub for {}", .{self.target.cpu.arch}),
|
||||
}
|
||||
@@ -625,7 +651,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
}
|
||||
}
|
||||
|
||||
fn genArg(self: *Self, inst: *ir.Inst.Arg) !MCValue {
|
||||
fn genArg(self: *Self, inst: *ir.Inst.NoOp) !MCValue {
|
||||
if (FreeRegInt == u0) {
|
||||
return self.fail(inst.base.src, "TODO implement Register enum for {}", .{self.target.cpu.arch});
|
||||
}
|
||||
@@ -659,7 +685,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
}
|
||||
|
||||
fn genCall(self: *Self, inst: *ir.Inst.Call) !MCValue {
|
||||
const fn_ty = inst.args.func.ty;
|
||||
const fn_ty = inst.func.ty;
|
||||
const cc = fn_ty.fnCallingConvention();
|
||||
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
|
||||
defer self.gpa.free(param_types);
|
||||
@@ -671,8 +697,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
switch (arch) {
|
||||
.x86_64 => {
|
||||
for (mc_args) |mc_arg, arg_i| {
|
||||
const arg = inst.args.args[arg_i];
|
||||
const arg_mcv = try self.resolveInst(inst.args.args[arg_i]);
|
||||
const arg = inst.args[arg_i];
|
||||
const arg_mcv = try self.resolveInst(inst.args[arg_i]);
|
||||
switch (mc_arg) {
|
||||
.none => continue,
|
||||
.register => |reg| {
|
||||
@@ -694,7 +720,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
}
|
||||
}
|
||||
|
||||
if (inst.args.func.cast(ir.Inst.Constant)) |func_inst| {
|
||||
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
|
||||
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
|
||||
const func = func_val.func;
|
||||
const got = &self.bin_file.program_headers.items[self.bin_file.phdr_got_index.?];
|
||||
@@ -742,16 +768,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
return .unreach;
|
||||
}
|
||||
|
||||
fn genRet(self: *Self, inst: *ir.Inst.Ret) !MCValue {
|
||||
const operand = try self.resolveInst(inst.args.operand);
|
||||
fn genRet(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
|
||||
const operand = try self.resolveInst(inst.operand);
|
||||
return self.ret(inst.base.src, operand);
|
||||
}
|
||||
|
||||
fn genRetVoid(self: *Self, inst: *ir.Inst.RetVoid) !MCValue {
|
||||
fn genRetVoid(self: *Self, inst: *ir.Inst.NoOp) !MCValue {
|
||||
return self.ret(inst.base.src, .none);
|
||||
}
|
||||
|
||||
fn genCmp(self: *Self, inst: *ir.Inst.Cmp) !MCValue {
|
||||
fn genCmp(self: *Self, inst: *ir.Inst.BinOp, op: std.math.CompareOperator) !MCValue {
|
||||
// No side effects, so if it's unreferenced, do nothing.
|
||||
if (inst.base.isUnused())
|
||||
return MCValue.dead;
|
||||
@@ -759,25 +785,25 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
.x86_64 => {
|
||||
try self.code.ensureCapacity(self.code.items.len + 8);
|
||||
|
||||
const lhs = try self.resolveInst(inst.args.lhs);
|
||||
const rhs = try self.resolveInst(inst.args.rhs);
|
||||
const lhs = try self.resolveInst(inst.lhs);
|
||||
const rhs = try self.resolveInst(inst.rhs);
|
||||
|
||||
// There are 2 operands, destination and source.
|
||||
// Either one, but not both, can be a memory operand.
|
||||
// Source operand can be an immediate, 8 bits or 32 bits.
|
||||
const dst_mcv = if (lhs.isImmediate() or (lhs.isMemory() and rhs.isMemory()))
|
||||
try self.copyToNewRegister(inst.args.lhs)
|
||||
try self.copyToNewRegister(inst.lhs)
|
||||
else
|
||||
lhs;
|
||||
// This instruction supports only signed 32-bit immediates at most.
|
||||
const src_mcv = try self.limitImmediateType(inst.args.rhs, i32);
|
||||
const src_mcv = try self.limitImmediateType(inst.rhs, i32);
|
||||
|
||||
try self.genX8664BinMathCode(inst.base.src, dst_mcv, src_mcv, 7, 0x38);
|
||||
const info = inst.args.lhs.ty.intInfo(self.target.*);
|
||||
const info = inst.lhs.ty.intInfo(self.target.*);
|
||||
if (info.signed) {
|
||||
return MCValue{ .compare_flags_signed = inst.args.op };
|
||||
return MCValue{ .compare_flags_signed = op };
|
||||
} else {
|
||||
return MCValue{ .compare_flags_unsigned = inst.args.op };
|
||||
return MCValue{ .compare_flags_unsigned = op };
|
||||
}
|
||||
},
|
||||
else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}),
|
||||
@@ -789,7 +815,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
.x86_64 => {
|
||||
try self.code.ensureCapacity(self.code.items.len + 6);
|
||||
|
||||
const cond = try self.resolveInst(inst.args.condition);
|
||||
const cond = try self.resolveInst(inst.condition);
|
||||
switch (cond) {
|
||||
.compare_flags_signed => |cmp_op| {
|
||||
// Here we map to the opposite opcode because the jump is to the false branch.
|
||||
@@ -838,19 +864,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode });
|
||||
const reloc = Reloc{ .rel32 = self.code.items.len };
|
||||
self.code.items.len += 4;
|
||||
try self.genBody(inst.args.true_body);
|
||||
try self.genBody(inst.then_body);
|
||||
try self.performReloc(inst.base.src, reloc);
|
||||
try self.genBody(inst.args.false_body);
|
||||
try self.genBody(inst.else_body);
|
||||
return MCValue.unreach;
|
||||
}
|
||||
|
||||
fn genIsNull(self: *Self, inst: *ir.Inst.IsNull) !MCValue {
|
||||
fn genIsNull(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
|
||||
switch (arch) {
|
||||
else => return self.fail(inst.base.src, "TODO implement isnull for {}", .{self.target.cpu.arch}),
|
||||
}
|
||||
}
|
||||
|
||||
fn genIsNonNull(self: *Self, inst: *ir.Inst.IsNonNull) !MCValue {
|
||||
fn genIsNonNull(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
|
||||
// Here you can specialize this instruction if it makes sense to, otherwise the default
|
||||
// will call genIsNull and invert the result.
|
||||
switch (arch) {
|
||||
@@ -864,7 +890,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
}
|
||||
// A block is nothing but a setup to be able to jump to the end.
|
||||
defer inst.codegen.relocs.deinit(self.gpa);
|
||||
try self.genBody(inst.args.body);
|
||||
try self.genBody(inst.body);
|
||||
|
||||
for (inst.codegen.relocs.items) |reloc| try self.performReloc(inst.base.src, reloc);
|
||||
|
||||
@@ -883,17 +909,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
}
|
||||
|
||||
fn genBr(self: *Self, inst: *ir.Inst.Br) !MCValue {
|
||||
if (!inst.args.operand.ty.hasCodeGenBits())
|
||||
return self.brVoid(inst.base.src, inst.args.block);
|
||||
if (!inst.operand.ty.hasCodeGenBits())
|
||||
return self.brVoid(inst.base.src, inst.block);
|
||||
|
||||
const operand = try self.resolveInst(inst.args.operand);
|
||||
const operand = try self.resolveInst(inst.operand);
|
||||
switch (arch) {
|
||||
else => return self.fail(inst.base.src, "TODO implement br for {}", .{self.target.cpu.arch}),
|
||||
}
|
||||
}
|
||||
|
||||
fn genBrVoid(self: *Self, inst: *ir.Inst.BrVoid) !MCValue {
|
||||
return self.brVoid(inst.base.src, inst.args.block);
|
||||
return self.brVoid(inst.base.src, inst.block);
|
||||
}
|
||||
|
||||
fn brVoid(self: *Self, src: usize, block: *ir.Inst.Block) !MCValue {
|
||||
@@ -915,29 +941,29 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
}
|
||||
|
||||
fn genAsm(self: *Self, inst: *ir.Inst.Assembly) !MCValue {
|
||||
if (!inst.args.is_volatile and inst.base.isUnused())
|
||||
if (!inst.is_volatile and inst.base.isUnused())
|
||||
return MCValue.dead;
|
||||
if (arch != .x86_64 and arch != .i386) {
|
||||
return self.fail(inst.base.src, "TODO implement inline asm support for more architectures", .{});
|
||||
}
|
||||
for (inst.args.inputs) |input, i| {
|
||||
for (inst.inputs) |input, i| {
|
||||
if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') {
|
||||
return self.fail(inst.base.src, "unrecognized asm input constraint: '{}'", .{input});
|
||||
}
|
||||
const reg_name = input[1 .. input.len - 1];
|
||||
const reg = parseRegName(reg_name) orelse
|
||||
return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name});
|
||||
const arg = try self.resolveInst(inst.args.args[i]);
|
||||
const arg = try self.resolveInst(inst.args[i]);
|
||||
try self.genSetReg(inst.base.src, reg, arg);
|
||||
}
|
||||
|
||||
if (mem.eql(u8, inst.args.asm_source, "syscall")) {
|
||||
if (mem.eql(u8, inst.asm_source, "syscall")) {
|
||||
try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 });
|
||||
} else {
|
||||
return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{});
|
||||
}
|
||||
|
||||
if (inst.args.output) |output| {
|
||||
if (inst.output) |output| {
|
||||
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
|
||||
return self.fail(inst.base.src, "unrecognized asm output constraint: '{}'", .{output});
|
||||
}
|
||||
@@ -1169,13 +1195,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
}
|
||||
}
|
||||
|
||||
fn genPtrToInt(self: *Self, inst: *ir.Inst.PtrToInt) !MCValue {
|
||||
fn genPtrToInt(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
|
||||
// no-op
|
||||
return self.resolveInst(inst.args.ptr);
|
||||
return self.resolveInst(inst.operand);
|
||||
}
|
||||
|
||||
fn genBitCast(self: *Self, inst: *ir.Inst.BitCast) !MCValue {
|
||||
const operand = try self.resolveInst(inst.args.operand);
|
||||
fn genBitCast(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
|
||||
const operand = try self.resolveInst(inst.operand);
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
@@ -92,9 +92,9 @@ fn genFn(file: *C, decl: *Decl) !void {
|
||||
for (instructions) |inst| {
|
||||
try writer.writeAll("\n\t");
|
||||
switch (inst.tag) {
|
||||
.assembly => try genAsm(file, inst.cast(Inst.Assembly).?, decl),
|
||||
.call => try genCall(file, inst.cast(Inst.Call).?, decl),
|
||||
.ret => try genRet(file, inst.cast(Inst.Ret).?, decl, tv.ty.fnReturnType()),
|
||||
.assembly => try genAsm(file, inst.castTag(.assembly).?, decl),
|
||||
.call => try genCall(file, inst.castTag(.call).?, decl),
|
||||
.ret => try genRet(file, inst.castTag(.ret).?, decl, tv.ty.fnReturnType()),
|
||||
.retvoid => try file.main.writer().print("return;", .{}),
|
||||
else => |e| return file.fail(decl.src(), "TODO implement C codegen for {}", .{e}),
|
||||
}
|
||||
@@ -105,9 +105,9 @@ fn genFn(file: *C, decl: *Decl) !void {
|
||||
try writer.writeAll("}\n\n");
|
||||
}
|
||||
|
||||
fn genRet(file: *C, inst: *Inst.Ret, decl: *Decl, expected_return_type: Type) !void {
|
||||
fn genRet(file: *C, inst: *Inst.UnOp, decl: *Decl, expected_return_type: Type) !void {
|
||||
const writer = file.main.writer();
|
||||
const ret_value = inst.args.operand;
|
||||
const ret_value = inst.operand;
|
||||
const value = ret_value.value().?;
|
||||
if (expected_return_type.eql(ret_value.ty))
|
||||
return file.fail(decl.src(), "TODO return {}", .{expected_return_type})
|
||||
@@ -126,7 +126,7 @@ fn genRet(file: *C, inst: *Inst.Ret, decl: *Decl, expected_return_type: Type) !v
|
||||
fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
|
||||
const writer = file.main.writer();
|
||||
const header = file.header.writer();
|
||||
if (inst.args.func.cast(Inst.Constant)) |func_inst| {
|
||||
if (inst.func.castTag(.constant)) |func_inst| {
|
||||
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
|
||||
const target = func_val.func.owner_decl;
|
||||
const target_ty = target.typed_value.most_recent.typed_value.ty;
|
||||
@@ -144,7 +144,7 @@ fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
|
||||
} else {
|
||||
return file.fail(decl.src(), "TODO non-function call target?", .{});
|
||||
}
|
||||
if (inst.args.args.len != 0) {
|
||||
if (inst.args.len != 0) {
|
||||
return file.fail(decl.src(), "TODO function arguments", .{});
|
||||
}
|
||||
} else {
|
||||
@@ -152,14 +152,13 @@ fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn genAsm(file: *C, inst: *Inst.Assembly, decl: *Decl) !void {
|
||||
const as = inst.args;
|
||||
fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl) !void {
|
||||
const writer = file.main.writer();
|
||||
for (as.inputs) |i, index| {
|
||||
if (i[0] == '{' and i[i.len - 1] == '}') {
|
||||
const reg = i[1 .. i.len - 1];
|
||||
const arg = as.args[index];
|
||||
if (arg.cast(Inst.Constant)) |c| {
|
||||
if (arg.castTag(.constant)) |c| {
|
||||
if (c.val.tag() == .int_u64) {
|
||||
try writer.writeAll("register ");
|
||||
try renderType(file, writer, arg.ty, decl.src());
|
||||
@@ -190,7 +189,7 @@ fn genAsm(file: *C, inst: *Inst.Assembly, decl: *Decl) !void {
|
||||
if (index > 0) {
|
||||
try writer.writeAll(", ");
|
||||
}
|
||||
if (arg.cast(Inst.Constant)) |c| {
|
||||
if (arg.castTag(.constant)) |c| {
|
||||
try writer.print("\"\"({}_constant)", .{reg});
|
||||
} else {
|
||||
// This is blocked by the earlier test
|
||||
|
||||
@@ -55,7 +55,12 @@ pub const Inst = struct {
|
||||
breakpoint,
|
||||
brvoid,
|
||||
call,
|
||||
cmp,
|
||||
cmp_lt,
|
||||
cmp_lte,
|
||||
cmp_eq,
|
||||
cmp_gte,
|
||||
cmp_gt,
|
||||
cmp_neq,
|
||||
condbr,
|
||||
constant,
|
||||
isnonnull,
|
||||
@@ -66,13 +71,80 @@ pub const Inst = struct {
|
||||
sub,
|
||||
unreach,
|
||||
not,
|
||||
|
||||
/// There is one-to-one correspondence between tag and type for now,
|
||||
/// but this will not always be the case. For example, binary operations
|
||||
/// such as + and - will have different tags but the same type.
|
||||
pub fn Type(tag: Tag) type {
|
||||
return switch (tag) {
|
||||
.retvoid,
|
||||
.unreach,
|
||||
.arg,
|
||||
.breakpoint,
|
||||
=> NoOp,
|
||||
|
||||
.ret,
|
||||
.bitcast,
|
||||
.not,
|
||||
.isnonnull,
|
||||
.isnull,
|
||||
.ptrtoint,
|
||||
=> UnOp,
|
||||
|
||||
.add,
|
||||
.sub,
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_eq,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
.cmp_neq,
|
||||
=> BinOp,
|
||||
|
||||
.assembly => Assembly,
|
||||
.block => Block,
|
||||
.br => Br,
|
||||
.brvoid => BrVoid,
|
||||
.call => Call,
|
||||
.condbr => CondBr,
|
||||
.constant => Constant,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fromCmpOp(op: std.math.CompareOperator) Tag {
|
||||
return switch (op) {
|
||||
.lt => .cmp_lt,
|
||||
.lte => .cmp_lte,
|
||||
.eq => .cmp_eq,
|
||||
.gte => .cmp_gte,
|
||||
.gt => .cmp_gt,
|
||||
.neq => .cmp_neq,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// Prefer `castTag` to this.
|
||||
pub fn cast(base: *Inst, comptime T: type) ?*T {
|
||||
if (base.tag != T.base_tag)
|
||||
return null;
|
||||
if (@hasField(T, "base_tag")) {
|
||||
return base.castTag(T.base_tag);
|
||||
}
|
||||
inline for (@typeInfo(Tag).Enum.fields) |field| {
|
||||
const tag = @intToEnum(Tag, field.value);
|
||||
if (base.tag == tag) {
|
||||
if (T == tag.Type()) {
|
||||
return @fieldParentPtr(T, "base", base);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
return @fieldParentPtr(T, "base", base);
|
||||
pub fn castTag(base: *Inst, comptime tag: Tag) ?*tag.Type() {
|
||||
if (base.tag == tag) {
|
||||
return @fieldParentPtr(tag.Type(), "base", base);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn Args(comptime T: type) type {
|
||||
@@ -88,186 +160,219 @@ pub const Inst = struct {
|
||||
return inst.val;
|
||||
}
|
||||
|
||||
pub const Add = struct {
|
||||
pub const base_tag = Tag.add;
|
||||
pub fn cmpOperator(base: *Inst) ?std.math.CompareOperator {
|
||||
return switch (self.base.tag) {
|
||||
.cmp_lt => .lt,
|
||||
.cmp_lte => .lte,
|
||||
.cmp_eq => .eq,
|
||||
.cmp_gte => .gte,
|
||||
.cmp_gt => .gt,
|
||||
.cmp_neq => .neq,
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn operandCount(base: *Inst) usize {
|
||||
inline for (@typeInfo(Tag).Enum.fields) |field| {
|
||||
const tag = @intToEnum(Tag, field.value);
|
||||
if (tag == base.tag) {
|
||||
return @fieldParentPtr(tag.Type(), "base", base).operandCount();
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub fn getOperand(base: *Inst, index: usize) ?*Inst {
|
||||
inline for (@typeInfo(Tag).Enum.fields) |field| {
|
||||
const tag = @intToEnum(Tag, field.value);
|
||||
if (tag == base.tag) {
|
||||
return @fieldParentPtr(tag.Type(), "base", base).getOperand(index);
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub const NoOp = struct {
|
||||
base: Inst,
|
||||
|
||||
args: struct {
|
||||
lhs: *Inst,
|
||||
rhs: *Inst,
|
||||
},
|
||||
pub fn operandCount(self: *const NoOp) usize {
|
||||
return 0;
|
||||
}
|
||||
pub fn getOperand(self: *const NoOp, index: usize) ?*Inst {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Arg = struct {
|
||||
pub const base_tag = Tag.arg;
|
||||
pub const UnOp = struct {
|
||||
base: Inst,
|
||||
args: void,
|
||||
operand: *Inst,
|
||||
|
||||
pub fn operandCount(self: *const UnOp) usize {
|
||||
return 1;
|
||||
}
|
||||
pub fn getOperand(self: *const UnOp, index: usize) ?*Inst {
|
||||
if (index == 0)
|
||||
return self.operand;
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const BinOp = struct {
|
||||
base: Inst,
|
||||
lhs: *Inst,
|
||||
rhs: *Inst,
|
||||
|
||||
pub fn operandCount(self: *const BinOp) usize {
|
||||
return 2;
|
||||
}
|
||||
pub fn getOperand(self: *const BinOp, index: usize) ?*Inst {
|
||||
var i = index;
|
||||
|
||||
if (i < 1)
|
||||
return self.lhs;
|
||||
i -= 1;
|
||||
|
||||
if (i < 1)
|
||||
return self.rhs;
|
||||
i -= 1;
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Assembly = struct {
|
||||
pub const base_tag = Tag.assembly;
|
||||
base: Inst,
|
||||
|
||||
args: struct {
|
||||
asm_source: []const u8,
|
||||
is_volatile: bool,
|
||||
output: ?[]const u8,
|
||||
inputs: []const []const u8,
|
||||
clobbers: []const []const u8,
|
||||
args: []const *Inst,
|
||||
},
|
||||
};
|
||||
|
||||
pub const BitCast = struct {
|
||||
pub const base_tag = Tag.bitcast;
|
||||
|
||||
base: Inst,
|
||||
args: struct {
|
||||
operand: *Inst,
|
||||
},
|
||||
asm_source: []const u8,
|
||||
is_volatile: bool,
|
||||
output: ?[]const u8,
|
||||
inputs: []const []const u8,
|
||||
clobbers: []const []const u8,
|
||||
args: []const *Inst,
|
||||
|
||||
pub fn operandCount(self: *const Assembly) usize {
|
||||
return self.args.len;
|
||||
}
|
||||
pub fn getOperand(self: *const Assembly, index: usize) ?*Inst {
|
||||
if (index < self.args.len)
|
||||
return self.args[index];
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Block = struct {
|
||||
pub const base_tag = Tag.block;
|
||||
|
||||
base: Inst,
|
||||
args: struct {
|
||||
body: Body,
|
||||
},
|
||||
body: Body,
|
||||
/// This memory is reserved for codegen code to do whatever it needs to here.
|
||||
codegen: codegen.BlockData = .{},
|
||||
|
||||
pub fn operandCount(self: *const Block) usize {
|
||||
return 0;
|
||||
}
|
||||
pub fn getOperand(self: *const Block, index: usize) ?*Inst {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Br = struct {
|
||||
pub const base_tag = Tag.br;
|
||||
base: Inst,
|
||||
args: struct {
|
||||
block: *Block,
|
||||
operand: *Inst,
|
||||
},
|
||||
};
|
||||
|
||||
pub const Breakpoint = struct {
|
||||
pub const base_tag = Tag.breakpoint;
|
||||
base: Inst,
|
||||
args: void,
|
||||
block: *Block,
|
||||
operand: *Inst,
|
||||
|
||||
pub fn operandCount(self: *const Br) usize {
|
||||
return 0;
|
||||
}
|
||||
pub fn getOperand(self: *const Br, index: usize) ?*Inst {
|
||||
if (index == 0)
|
||||
return self.operand;
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const BrVoid = struct {
|
||||
pub const base_tag = Tag.brvoid;
|
||||
|
||||
base: Inst,
|
||||
args: struct {
|
||||
block: *Block,
|
||||
},
|
||||
block: *Block,
|
||||
|
||||
pub fn operandCount(self: *const BrVoid) usize {
|
||||
return 0;
|
||||
}
|
||||
pub fn getOperand(self: *const BrVoid, index: usize) ?*Inst {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Call = struct {
|
||||
pub const base_tag = Tag.call;
|
||||
base: Inst,
|
||||
args: struct {
|
||||
func: *Inst,
|
||||
args: []const *Inst,
|
||||
},
|
||||
};
|
||||
|
||||
pub const Cmp = struct {
|
||||
pub const base_tag = Tag.cmp;
|
||||
|
||||
base: Inst,
|
||||
args: struct {
|
||||
lhs: *Inst,
|
||||
op: std.math.CompareOperator,
|
||||
rhs: *Inst,
|
||||
},
|
||||
func: *Inst,
|
||||
args: []const *Inst,
|
||||
|
||||
pub fn operandCount(self: *const Call) usize {
|
||||
return self.args.len + 1;
|
||||
}
|
||||
pub fn getOperand(self: *const Call, index: usize) ?*Inst {
|
||||
var i = index;
|
||||
|
||||
if (i < 1)
|
||||
return self.func;
|
||||
i -= 1;
|
||||
|
||||
if (i < self.args.len)
|
||||
return self.args[i];
|
||||
i -= self.args.len;
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const CondBr = struct {
|
||||
pub const base_tag = Tag.condbr;
|
||||
|
||||
base: Inst,
|
||||
args: struct {
|
||||
condition: *Inst,
|
||||
true_body: Body,
|
||||
false_body: Body,
|
||||
},
|
||||
condition: *Inst,
|
||||
then_body: Body,
|
||||
else_body: Body,
|
||||
/// Set of instructions whose lifetimes end at the start of one of the branches.
|
||||
/// The `true` branch is first: `deaths[0..true_death_count]`.
|
||||
/// The `false` branch is next: `(deaths + true_death_count)[..false_death_count]`.
|
||||
deaths: [*]*Inst = undefined,
|
||||
true_death_count: u32 = 0,
|
||||
false_death_count: u32 = 0,
|
||||
};
|
||||
|
||||
pub const Not = struct {
|
||||
pub const base_tag = Tag.not;
|
||||
pub fn operandCount(self: *const CondBr) usize {
|
||||
return 1;
|
||||
}
|
||||
pub fn getOperand(self: *const CondBr, index: usize) ?*Inst {
|
||||
var i = index;
|
||||
|
||||
base: Inst,
|
||||
args: struct {
|
||||
operand: *Inst,
|
||||
},
|
||||
if (i < 1)
|
||||
return self.condition;
|
||||
i -= 1;
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Constant = struct {
|
||||
pub const base_tag = Tag.constant;
|
||||
base: Inst,
|
||||
|
||||
base: Inst,
|
||||
val: Value,
|
||||
};
|
||||
|
||||
pub const IsNonNull = struct {
|
||||
pub const base_tag = Tag.isnonnull;
|
||||
|
||||
base: Inst,
|
||||
args: struct {
|
||||
operand: *Inst,
|
||||
},
|
||||
};
|
||||
|
||||
pub const IsNull = struct {
|
||||
pub const base_tag = Tag.isnull;
|
||||
|
||||
base: Inst,
|
||||
args: struct {
|
||||
operand: *Inst,
|
||||
},
|
||||
};
|
||||
|
||||
pub const PtrToInt = struct {
|
||||
pub const base_tag = Tag.ptrtoint;
|
||||
|
||||
base: Inst,
|
||||
args: struct {
|
||||
ptr: *Inst,
|
||||
},
|
||||
};
|
||||
|
||||
pub const Ret = struct {
|
||||
pub const base_tag = Tag.ret;
|
||||
base: Inst,
|
||||
args: struct {
|
||||
operand: *Inst,
|
||||
},
|
||||
};
|
||||
|
||||
pub const RetVoid = struct {
|
||||
pub const base_tag = Tag.retvoid;
|
||||
base: Inst,
|
||||
args: void,
|
||||
};
|
||||
|
||||
pub const Sub = struct {
|
||||
pub const base_tag = Tag.sub;
|
||||
base: Inst,
|
||||
|
||||
args: struct {
|
||||
lhs: *Inst,
|
||||
rhs: *Inst,
|
||||
},
|
||||
};
|
||||
|
||||
pub const Unreach = struct {
|
||||
pub const base_tag = Tag.unreach;
|
||||
base: Inst,
|
||||
args: void,
|
||||
pub fn operandCount(self: *const Constant) usize {
|
||||
return 0;
|
||||
}
|
||||
pub fn getOperand(self: *const Constant, index: usize) ?*Inst {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -25,53 +25,38 @@ fn analyzeWithTable(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst,
|
||||
while (i != 0) {
|
||||
i -= 1;
|
||||
const base = body.instructions[i];
|
||||
try analyzeInstGeneric(arena, table, base);
|
||||
try analyzeInst(arena, table, base);
|
||||
}
|
||||
}
|
||||
|
||||
fn analyzeInstGeneric(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), base: *ir.Inst) error{OutOfMemory}!void {
|
||||
// Obtain the corresponding instruction type based on the tag type.
|
||||
inline for (std.meta.declarations(ir.Inst)) |decl| {
|
||||
switch (decl.data) {
|
||||
.Type => |T| {
|
||||
if (@typeInfo(T) == .Struct and @hasDecl(T, "base_tag")) {
|
||||
if (T.base_tag == base.tag) {
|
||||
return analyzeInst(arena, table, T, @fieldParentPtr(T, "base", base));
|
||||
}
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), comptime T: type, inst: *T) error{OutOfMemory}!void {
|
||||
if (table.contains(&inst.base)) {
|
||||
inst.base.deaths = 0;
|
||||
fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), base: *ir.Inst) error{OutOfMemory}!void {
|
||||
if (table.contains(base)) {
|
||||
base.deaths = 0;
|
||||
} else {
|
||||
// No tombstone for this instruction means it is never referenced,
|
||||
// and its birth marks its own death. Very metal 🤘
|
||||
inst.base.deaths = 1 << ir.Inst.unreferenced_bit_index;
|
||||
base.deaths = 1 << ir.Inst.unreferenced_bit_index;
|
||||
}
|
||||
|
||||
switch (T) {
|
||||
ir.Inst.Constant => return,
|
||||
ir.Inst.Block => {
|
||||
try analyzeWithTable(arena, table, inst.args.body);
|
||||
switch (base.tag) {
|
||||
.constant => return,
|
||||
.block => {
|
||||
const inst = base.castTag(.block).?;
|
||||
try analyzeWithTable(arena, table, inst.body);
|
||||
// We let this continue so that it can possibly mark the block as
|
||||
// unreferenced below.
|
||||
},
|
||||
ir.Inst.CondBr => {
|
||||
.condbr => {
|
||||
const inst = base.castTag(.condbr).?;
|
||||
var true_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
|
||||
defer true_table.deinit();
|
||||
try true_table.ensureCapacity(inst.args.true_body.instructions.len);
|
||||
try analyzeWithTable(arena, &true_table, inst.args.true_body);
|
||||
try true_table.ensureCapacity(inst.then_body.instructions.len);
|
||||
try analyzeWithTable(arena, &true_table, inst.then_body);
|
||||
|
||||
var false_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
|
||||
defer false_table.deinit();
|
||||
try false_table.ensureCapacity(inst.args.false_body.instructions.len);
|
||||
try analyzeWithTable(arena, &false_table, inst.args.false_body);
|
||||
try false_table.ensureCapacity(inst.else_body.instructions.len);
|
||||
try analyzeWithTable(arena, &false_table, inst.else_body);
|
||||
|
||||
// Each death that occurs inside one branch, but not the other, needs
|
||||
// to be added as a death immediately upon entering the other branch.
|
||||
@@ -112,47 +97,22 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void
|
||||
// instruction, and the deaths flag for the CondBr instruction will indicate whether the
|
||||
// condition's lifetime ends immediately before entering any branch.
|
||||
},
|
||||
ir.Inst.Call => {
|
||||
// Call instructions have a runtime-known number of operands so we have to handle them ourselves here.
|
||||
const needed_bits = 1 + inst.args.args.len;
|
||||
if (needed_bits <= ir.Inst.deaths_bits) {
|
||||
var bit_i: ir.Inst.DeathsBitIndex = 0;
|
||||
{
|
||||
const prev = try table.fetchPut(inst.args.func, {});
|
||||
if (prev == null) inst.base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
|
||||
bit_i += 1;
|
||||
}
|
||||
for (inst.args.args) |arg| {
|
||||
const prev = try table.fetchPut(arg, {});
|
||||
if (prev == null) inst.base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
|
||||
bit_i += 1;
|
||||
}
|
||||
} else {
|
||||
@panic("Handle liveness analysis for function calls with many parameters");
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
const Args = ir.Inst.Args(T);
|
||||
if (Args == void) {
|
||||
return;
|
||||
}
|
||||
|
||||
comptime var arg_index: usize = 0;
|
||||
inline for (std.meta.fields(Args)) |field| {
|
||||
if (field.field_type == *ir.Inst) {
|
||||
if (arg_index >= 6) {
|
||||
@compileError("out of bits to mark deaths of operands");
|
||||
}
|
||||
const prev = try table.fetchPut(@field(inst.args, field.name), {});
|
||||
const needed_bits = base.operandCount();
|
||||
if (needed_bits <= ir.Inst.deaths_bits) {
|
||||
var bit_i: ir.Inst.DeathsBitIndex = 0;
|
||||
while (base.getOperand(bit_i)) |operand| : (bit_i += 1) {
|
||||
const prev = try table.fetchPut(operand, {});
|
||||
if (prev == null) {
|
||||
// Death.
|
||||
inst.base.deaths |= 1 << arg_index;
|
||||
base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
|
||||
}
|
||||
arg_index += 1;
|
||||
}
|
||||
} else {
|
||||
@panic("Handle liveness analysis for instructions with many parameters");
|
||||
}
|
||||
|
||||
std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{ inst.base.tag, inst.base.deaths });
|
||||
std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{ base.tag, base.deaths });
|
||||
}
|
||||
|
||||
@@ -337,7 +337,7 @@ pub const Inst = struct {
|
||||
base: Inst,
|
||||
|
||||
positionals: struct {
|
||||
ptr: *Inst,
|
||||
operand: *Inst,
|
||||
},
|
||||
kw_args: struct {},
|
||||
};
|
||||
@@ -629,8 +629,8 @@ pub const Inst = struct {
|
||||
|
||||
positionals: struct {
|
||||
condition: *Inst,
|
||||
true_body: Module.Body,
|
||||
false_body: Module.Body,
|
||||
then_body: Module.Body,
|
||||
else_body: Module.Body,
|
||||
},
|
||||
kw_args: struct {},
|
||||
};
|
||||
@@ -1615,7 +1615,7 @@ const EmitZIR = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn emitTrivial(self: *EmitZIR, src: usize, comptime T: type) Allocator.Error!*Inst {
|
||||
fn emitNoOp(self: *EmitZIR, src: usize, comptime T: type) Allocator.Error!*Inst {
|
||||
const new_inst = try self.arena.allocator.create(T);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
@@ -1628,6 +1628,72 @@ const EmitZIR = struct {
|
||||
return &new_inst.base;
|
||||
}
|
||||
|
||||
fn emitCmp(
|
||||
self: *EmitZIR,
|
||||
src: usize,
|
||||
new_body: ZirBody,
|
||||
old_inst: *ir.Inst.BinOp,
|
||||
op: std.math.CompareOperator,
|
||||
) Allocator.Error!*Inst {
|
||||
const new_inst = try self.arena.allocator.create(Inst.Cmp);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = src,
|
||||
.tag = Inst.Cmp.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.lhs = try self.resolveInst(new_body, old_inst.lhs),
|
||||
.rhs = try self.resolveInst(new_body, old_inst.rhs),
|
||||
.op = op,
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
return &new_inst.base;
|
||||
}
|
||||
|
||||
fn emitUnOp(
|
||||
self: *EmitZIR,
|
||||
src: usize,
|
||||
new_body: ZirBody,
|
||||
old_inst: *ir.Inst.UnOp,
|
||||
comptime I: type,
|
||||
) Allocator.Error!*Inst {
|
||||
const new_inst = try self.arena.allocator.create(I);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = src,
|
||||
.tag = I.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.operand = try self.resolveInst(new_body, old_inst.operand),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
return &new_inst.base;
|
||||
}
|
||||
|
||||
fn emitBinOp(
|
||||
self: *EmitZIR,
|
||||
src: usize,
|
||||
new_body: ZirBody,
|
||||
old_inst: *ir.Inst.BinOp,
|
||||
comptime I: type,
|
||||
) Allocator.Error!*Inst {
|
||||
const new_inst = try self.arena.allocator.create(I);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = src,
|
||||
.tag = I.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.lhs = try self.resolveInst(new_body, old_inst.lhs),
|
||||
.rhs = try self.resolveInst(new_body, old_inst.rhs),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
return &new_inst.base;
|
||||
}
|
||||
|
||||
fn emitBody(
|
||||
self: *EmitZIR,
|
||||
body: ir.Body,
|
||||
@@ -1640,69 +1706,48 @@ const EmitZIR = struct {
|
||||
};
|
||||
for (body.instructions) |inst| {
|
||||
const new_inst = switch (inst.tag) {
|
||||
.not => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Not).?;
|
||||
assert(inst.ty.zigTypeTag() == .Bool);
|
||||
const new_inst = try self.arena.allocator.create(Inst.BoolNot);
|
||||
.constant => unreachable, // excluded from function bodies
|
||||
|
||||
.arg => try self.emitNoOp(inst.src, Inst.Arg),
|
||||
.breakpoint => try self.emitNoOp(inst.src, Inst.Breakpoint),
|
||||
.unreach => try self.emitNoOp(inst.src, Inst.Unreachable),
|
||||
.retvoid => try self.emitNoOp(inst.src, Inst.ReturnVoid),
|
||||
|
||||
.not => try self.emitUnOp(inst.src, new_body, inst.castTag(.not).?, Inst.BoolNot),
|
||||
.ret => try self.emitUnOp(inst.src, new_body, inst.castTag(.ret).?, Inst.Return),
|
||||
.ptrtoint => try self.emitUnOp(inst.src, new_body, inst.castTag(.ptrtoint).?, Inst.PtrToInt),
|
||||
.isnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnull).?, Inst.IsNull),
|
||||
.isnonnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnonnull).?, Inst.IsNonNull),
|
||||
|
||||
.add => try self.emitBinOp(inst.src, new_body, inst.castTag(.add).?, Inst.Add),
|
||||
.sub => try self.emitBinOp(inst.src, new_body, inst.castTag(.sub).?, Inst.Sub),
|
||||
|
||||
.cmp_lt => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_lt).?, .lt),
|
||||
.cmp_lte => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_lte).?, .lte),
|
||||
.cmp_eq => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_eq).?, .eq),
|
||||
.cmp_gte => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_gte).?, .gte),
|
||||
.cmp_gt => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_gt).?, .gt),
|
||||
.cmp_neq => try self.emitCmp(inst.src, new_body, inst.castTag(.cmp_neq).?, .neq),
|
||||
|
||||
.bitcast => blk: {
|
||||
const old_inst = inst.castTag(.bitcast).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.BitCast);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.BoolNot.base_tag,
|
||||
.tag = Inst.BitCast.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.operand = try self.resolveInst(new_body, old_inst.args.operand),
|
||||
.dest_type = (try self.emitType(inst.src, inst.ty)).inst,
|
||||
.operand = try self.resolveInst(new_body, old_inst.operand),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.add => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Add).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Add);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.Add.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.lhs = try self.resolveInst(new_body, old_inst.args.lhs),
|
||||
.rhs = try self.resolveInst(new_body, old_inst.args.rhs),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.sub => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Sub).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Sub);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.Sub.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.lhs = try self.resolveInst(new_body, old_inst.args.lhs),
|
||||
.rhs = try self.resolveInst(new_body, old_inst.args.rhs),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.arg => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Arg).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Arg);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.Arg.base_tag,
|
||||
},
|
||||
.positionals = .{},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
|
||||
.block => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Block).?;
|
||||
const old_inst = inst.castTag(.block).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Block);
|
||||
|
||||
try self.block_table.put(old_inst, new_inst);
|
||||
@@ -1710,7 +1755,7 @@ const EmitZIR = struct {
|
||||
var block_body = std.ArrayList(*Inst).init(self.allocator);
|
||||
defer block_body.deinit();
|
||||
|
||||
try self.emitBody(old_inst.args.body, inst_table, &block_body);
|
||||
try self.emitBody(old_inst.body, inst_table, &block_body);
|
||||
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
@@ -1725,27 +1770,10 @@ const EmitZIR = struct {
|
||||
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.br => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Br).?;
|
||||
const new_block = self.block_table.get(old_inst.args.block).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Break);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.Break.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.block = new_block,
|
||||
.operand = try self.resolveInst(new_body, old_inst.args.operand),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.breakpoint => try self.emitTrivial(inst.src, Inst.Breakpoint),
|
||||
|
||||
.brvoid => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.BrVoid).?;
|
||||
const new_block = self.block_table.get(old_inst.args.block).?;
|
||||
const new_block = self.block_table.get(old_inst.block).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.BreakVoid);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
@@ -1759,13 +1787,32 @@ const EmitZIR = struct {
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
|
||||
.br => blk: {
|
||||
const old_inst = inst.castTag(.br).?;
|
||||
const new_block = self.block_table.get(old_inst.block).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Break);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.Break.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.block = new_block,
|
||||
.operand = try self.resolveInst(new_body, old_inst.operand),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
|
||||
.call => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Call).?;
|
||||
const old_inst = inst.castTag(.call).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Call);
|
||||
|
||||
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len);
|
||||
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.len);
|
||||
for (args) |*elem, i| {
|
||||
elem.* = try self.resolveInst(new_body, old_inst.args.args[i]);
|
||||
elem.* = try self.resolveInst(new_body, old_inst.args[i]);
|
||||
}
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
@@ -1773,48 +1820,31 @@ const EmitZIR = struct {
|
||||
.tag = Inst.Call.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.func = try self.resolveInst(new_body, old_inst.args.func),
|
||||
.func = try self.resolveInst(new_body, old_inst.func),
|
||||
.args = args,
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.unreach => try self.emitTrivial(inst.src, Inst.Unreachable),
|
||||
.ret => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Ret).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Return);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.Return.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.operand = try self.resolveInst(new_body, old_inst.args.operand),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.retvoid => try self.emitTrivial(inst.src, Inst.ReturnVoid),
|
||||
.constant => unreachable, // excluded from function bodies
|
||||
|
||||
.assembly => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Assembly).?;
|
||||
const old_inst = inst.castTag(.assembly).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Asm);
|
||||
|
||||
const inputs = try self.arena.allocator.alloc(*Inst, old_inst.args.inputs.len);
|
||||
const inputs = try self.arena.allocator.alloc(*Inst, old_inst.inputs.len);
|
||||
for (inputs) |*elem, i| {
|
||||
elem.* = (try self.emitStringLiteral(inst.src, old_inst.args.inputs[i])).inst;
|
||||
elem.* = (try self.emitStringLiteral(inst.src, old_inst.inputs[i])).inst;
|
||||
}
|
||||
|
||||
const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.args.clobbers.len);
|
||||
const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.clobbers.len);
|
||||
for (clobbers) |*elem, i| {
|
||||
elem.* = (try self.emitStringLiteral(inst.src, old_inst.args.clobbers[i])).inst;
|
||||
elem.* = (try self.emitStringLiteral(inst.src, old_inst.clobbers[i])).inst;
|
||||
}
|
||||
|
||||
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len);
|
||||
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.len);
|
||||
for (args) |*elem, i| {
|
||||
elem.* = try self.resolveInst(new_body, old_inst.args.args[i]);
|
||||
elem.* = try self.resolveInst(new_body, old_inst.args[i]);
|
||||
}
|
||||
|
||||
new_inst.* = .{
|
||||
@@ -1823,12 +1853,12 @@ const EmitZIR = struct {
|
||||
.tag = Inst.Asm.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.asm_source = (try self.emitStringLiteral(inst.src, old_inst.args.asm_source)).inst,
|
||||
.asm_source = (try self.emitStringLiteral(inst.src, old_inst.asm_source)).inst,
|
||||
.return_type = (try self.emitType(inst.src, inst.ty)).inst,
|
||||
},
|
||||
.kw_args = .{
|
||||
.@"volatile" = old_inst.args.is_volatile,
|
||||
.output = if (old_inst.args.output) |o|
|
||||
.@"volatile" = old_inst.is_volatile,
|
||||
.output = if (old_inst.output) |o|
|
||||
(try self.emitStringLiteral(inst.src, o)).inst
|
||||
else
|
||||
null,
|
||||
@@ -1839,65 +1869,18 @@ const EmitZIR = struct {
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.ptrtoint => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.PtrToInt).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.PtrToInt);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.PtrToInt.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.ptr = try self.resolveInst(new_body, old_inst.args.ptr),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.bitcast => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.BitCast).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.BitCast);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.BitCast.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.dest_type = (try self.emitType(inst.src, inst.ty)).inst,
|
||||
.operand = try self.resolveInst(new_body, old_inst.args.operand),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.cmp => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Cmp).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Cmp);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.Cmp.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.lhs = try self.resolveInst(new_body, old_inst.args.lhs),
|
||||
.rhs = try self.resolveInst(new_body, old_inst.args.rhs),
|
||||
.op = old_inst.args.op,
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
|
||||
.condbr => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.CondBr).?;
|
||||
const old_inst = inst.castTag(.condbr).?;
|
||||
|
||||
var true_body = std.ArrayList(*Inst).init(self.allocator);
|
||||
var false_body = std.ArrayList(*Inst).init(self.allocator);
|
||||
var then_body = std.ArrayList(*Inst).init(self.allocator);
|
||||
var else_body = std.ArrayList(*Inst).init(self.allocator);
|
||||
|
||||
defer true_body.deinit();
|
||||
defer false_body.deinit();
|
||||
defer then_body.deinit();
|
||||
defer else_body.deinit();
|
||||
|
||||
try self.emitBody(old_inst.args.true_body, inst_table, &true_body);
|
||||
try self.emitBody(old_inst.args.false_body, inst_table, &false_body);
|
||||
try self.emitBody(old_inst.then_body, inst_table, &then_body);
|
||||
try self.emitBody(old_inst.else_body, inst_table, &else_body);
|
||||
|
||||
const new_inst = try self.arena.allocator.create(Inst.CondBr);
|
||||
new_inst.* = .{
|
||||
@@ -1906,39 +1889,9 @@ const EmitZIR = struct {
|
||||
.tag = Inst.CondBr.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.condition = try self.resolveInst(new_body, old_inst.args.condition),
|
||||
.true_body = .{ .instructions = true_body.toOwnedSlice() },
|
||||
.false_body = .{ .instructions = false_body.toOwnedSlice() },
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.isnull => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.IsNull).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.IsNull);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.IsNull.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.operand = try self.resolveInst(new_body, old_inst.args.operand),
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.isnonnull => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.IsNonNull).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.IsNonNull);
|
||||
new_inst.* = .{
|
||||
.base = .{
|
||||
.src = inst.src,
|
||||
.tag = Inst.IsNonNull.base_tag,
|
||||
},
|
||||
.positionals = .{
|
||||
.operand = try self.resolveInst(new_body, old_inst.args.operand),
|
||||
.condition = try self.resolveInst(new_body, old_inst.condition),
|
||||
.then_body = .{ .instructions = then_body.toOwnedSlice() },
|
||||
.else_body = .{ .instructions = else_body.toOwnedSlice() },
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
|
||||
@@ -267,5 +267,42 @@ pub fn addCases(ctx: *TestContext) !void {
|
||||
,
|
||||
"",
|
||||
);
|
||||
|
||||
// Requires a second move. The register allocator should figure out to re-use rax.
|
||||
case.addCompareOutput(
|
||||
\\export fn _start() noreturn {
|
||||
\\ add(3, 4);
|
||||
\\
|
||||
\\ exit();
|
||||
\\}
|
||||
\\
|
||||
\\fn add(a: u32, b: u32) void {
|
||||
\\ const c = a + b; // 7
|
||||
\\ const d = a + c; // 10
|
||||
\\ const e = d + b; // 14
|
||||
\\ const f = d + e; // 24
|
||||
\\ const g = e + f; // 38
|
||||
\\ const h = f + g; // 62
|
||||
\\ const i = g + h; // 100
|
||||
\\ const j = i + d; // 110
|
||||
\\ assert(j == 110);
|
||||
\\}
|
||||
\\
|
||||
\\pub fn assert(ok: bool) void {
|
||||
\\ if (!ok) unreachable; // assertion failure
|
||||
\\}
|
||||
\\
|
||||
\\fn exit() noreturn {
|
||||
\\ asm volatile ("syscall"
|
||||
\\ :
|
||||
\\ : [number] "{rax}" (231),
|
||||
\\ [arg1] "{rdi}" (0)
|
||||
\\ : "rcx", "r11", "memory"
|
||||
\\ );
|
||||
\\ unreachable;
|
||||
\\}
|
||||
,
|
||||
"",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user