stage2: implement while loops (bool condition)

* introduce a dump() function on Module.Fn which helpfully prints to
   stderr the ZIR representation of a function (can be called before
   attempting to codegen it). This is a debugging tool.
 * implement x86 codegen for loops
 * liveness: fix analysis of conditional branches. The logic was buggy
   in a couple ways:
   - it never actually saved the results into the IR instruction (fixed now)
   - it incorrectly labeled operands as dying when their true death was
     after the conditional branch ended (fixed now)
 * zir rendering is enhanced to show liveness analysis results. this
   helps when debugging liveness analysis.
 * fix bug in zir rendering not numbering instructions correctly

closes #6021
This commit is contained in:
Andrew Kelley
2020-08-13 20:27:25 -07:00
parent 576581bd7b
commit 28a9da8bfc
8 changed files with 355 additions and 137 deletions

View File

@@ -747,6 +747,7 @@ test "math.negateCast" {
/// Cast an integer to a different integer type. If the value doesn't fit,
/// return an error.
/// TODO make this an optional not an error.
pub fn cast(comptime T: type, x: anytype) (error{Overflow}!T) {
comptime assert(@typeInfo(T) == .Int); // must pass an integer
comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer

View File

@@ -301,6 +301,23 @@ pub const Fn = struct {
body: zir.Module.Body,
arena: std.heap.ArenaAllocator.State,
};
/// For debugging purposes.
pub fn dump(self: *Fn, mod: Module) void {
std.debug.print("Module.Function(name={}) ", .{self.owner_decl.name});
switch (self.analysis) {
.queued => {
std.debug.print("queued\n", .{});
},
.in_progress => {
std.debug.print("in_progress\n", .{});
},
else => {
std.debug.print("\n", .{});
zir.dumpFn(mod, self);
},
}
}
};
pub const Scope = struct {

View File

@@ -23,8 +23,6 @@ pub const BlockData = struct {
relocs: std.ArrayListUnmanaged(Reloc) = .{},
};
pub const LoopData = struct { };
pub const Reloc = union(enum) {
/// The value is an offset into the `Function` `code` from the beginning.
/// To perform the reloc, write 32-bit signed little-endian integer
@@ -556,7 +554,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genBody(self: *Self, body: ir.Body) InnerError!void {
const inst_table = &self.branch_stack.items[0].inst_table;
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
const inst_table = &branch.inst_table;
for (body.instructions) |inst| {
const new_inst = try self.genFuncInst(inst);
try inst_table.putNoClobber(self.gpa, inst, new_inst);
@@ -1284,6 +1283,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genCondBr(self: *Self, inst: *ir.Inst.CondBr) !MCValue {
// TODO Rework this so that the arch-independent logic isn't buried and duplicated.
switch (arch) {
.x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 6);
@@ -1336,6 +1336,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genX86CondBr(self: *Self, inst: *ir.Inst.CondBr, opcode: u8) !MCValue {
// TODO deal with liveness / deaths condbr's then_entry_deaths and else_entry_deaths
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode });
const reloc = Reloc{ .rel32 = self.code.items.len };
self.code.items.len += 4;
@@ -1360,14 +1361,36 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue {
return self.fail(inst.base.src, "TODO codegen loop", .{});
// A loop is a setup to be able to jump back to the beginning.
const start_index = self.code.items.len;
try self.genBody(inst.body);
try self.jump(inst.base.src, start_index);
return MCValue.unreach;
}
/// Send control flow to the `index` of `self.code`.
fn jump(self: *Self, src: usize, index: usize) !void {
switch (arch) {
.i386, .x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 5);
if (math.cast(i8, @intCast(i32, index) - (@intCast(i32, self.code.items.len + 2)))) |delta| {
self.code.appendAssumeCapacity(0xeb); // jmp rel8
self.code.appendAssumeCapacity(@bitCast(u8, delta));
} else |_| {
const delta = @intCast(i32, index) - (@intCast(i32, self.code.items.len + 5));
self.code.appendAssumeCapacity(0xe9); // jmp rel32
mem.writeIntLittle(i32, self.code.addManyAsArrayAssumeCapacity(4), delta);
}
},
else => return self.fail(src, "TODO implement jump for {}", .{self.target.cpu.arch}),
}
}
fn genBlock(self: *Self, inst: *ir.Inst.Block) !MCValue {
if (inst.base.ty.hasCodeGenBits()) {
return self.fail(inst.base.src, "TODO codegen Block with non-void type", .{});
}
// A block is nothing but a setup to be able to jump to the end.
// A block is a setup to be able to jump to the end.
defer inst.codegen.relocs.deinit(self.gpa);
try self.genBody(inst.body);

View File

@@ -372,11 +372,11 @@ pub const Inst = struct {
then_body: Body,
else_body: Body,
/// Set of instructions whose lifetimes end at the start of one of the branches.
/// The `true` branch is first: `deaths[0..true_death_count]`.
/// The `false` branch is next: `(deaths + true_death_count)[..false_death_count]`.
/// The `then` branch is first: `deaths[0..then_death_count]`.
/// The `else` branch is next: `(deaths + then_death_count)[0..else_death_count]`.
deaths: [*]*Inst = undefined,
true_death_count: u32 = 0,
false_death_count: u32 = 0,
then_death_count: u32 = 0,
else_death_count: u32 = 0,
pub fn operandCount(self: *const CondBr) usize {
return 1;
@@ -390,6 +390,12 @@ pub const Inst = struct {
return null;
}
pub fn thenDeaths(self: *const CondBr) []*Inst {
return self.deaths[0..self.then_death_count];
}
pub fn elseDeaths(self: *const CondBr) []*Inst {
return (self.deaths + self.then_death_count)[0..self.else_death_count];
}
};
pub const Constant = struct {
@@ -411,8 +417,6 @@ pub const Inst = struct {
base: Inst,
body: Body,
/// This memory is reserved for codegen code to do whatever it needs to here.
codegen: codegen.LoopData = .{},
pub fn operandCount(self: *const Loop) usize {
return 0;

View File

@@ -1887,6 +1887,8 @@ pub const File = struct {
else => false,
};
if (is_fn) {
//typed_value.val.cast(Value.Payload.Function).?.func.dump(module.*);
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureCapacity(26);

View File

@@ -16,20 +16,42 @@ pub fn analyze(
var table = std.AutoHashMap(*ir.Inst, void).init(gpa);
defer table.deinit();
try table.ensureCapacity(body.instructions.len);
try analyzeWithTable(arena, &table, body);
try analyzeWithTable(arena, &table, null, body);
}
fn analyzeWithTable(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), body: ir.Body) error{OutOfMemory}!void {
fn analyzeWithTable(
arena: *std.mem.Allocator,
table: *std.AutoHashMap(*ir.Inst, void),
new_set: ?*std.AutoHashMap(*ir.Inst, void),
body: ir.Body,
) error{OutOfMemory}!void {
var i: usize = body.instructions.len;
while (i != 0) {
i -= 1;
const base = body.instructions[i];
try analyzeInst(arena, table, base);
if (new_set) |ns| {
// We are only interested in doing this for instructions which are born
// before a conditional branch, so after obtaining the new set for
// each branch we prune the instructions which were born within.
while (i != 0) {
i -= 1;
const base = body.instructions[i];
_ = ns.remove(base);
try analyzeInst(arena, table, new_set, base);
}
} else {
while (i != 0) {
i -= 1;
const base = body.instructions[i];
try analyzeInst(arena, table, new_set, base);
}
}
}
fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), base: *ir.Inst) error{OutOfMemory}!void {
fn analyzeInst(
arena: *std.mem.Allocator,
table: *std.AutoHashMap(*ir.Inst, void),
new_set: ?*std.AutoHashMap(*ir.Inst, void),
base: *ir.Inst,
) error{OutOfMemory}!void {
if (table.contains(base)) {
base.deaths = 0;
} else {
@@ -42,56 +64,70 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void
.constant => return,
.block => {
const inst = base.castTag(.block).?;
try analyzeWithTable(arena, table, inst.body);
try analyzeWithTable(arena, table, new_set, inst.body);
// We let this continue so that it can possibly mark the block as
// unreferenced below.
},
.loop => {
const inst = base.castTag(.loop).?;
try analyzeWithTable(arena, table, new_set, inst.body);
return; // Loop has no operands and it is always unreferenced.
},
.condbr => {
const inst = base.castTag(.condbr).?;
var true_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
defer true_table.deinit();
try true_table.ensureCapacity(inst.then_body.instructions.len);
try analyzeWithTable(arena, &true_table, inst.then_body);
var false_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
defer false_table.deinit();
try false_table.ensureCapacity(inst.else_body.instructions.len);
try analyzeWithTable(arena, &false_table, inst.else_body);
// Each death that occurs inside one branch, but not the other, needs
// to be added as a death immediately upon entering the other branch.
// During the iteration of the table, we additionally propagate the
// deaths to the parent table.
var true_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
defer true_entry_deaths.deinit();
var false_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
defer false_entry_deaths.deinit();
{
var it = false_table.iterator();
while (it.next()) |entry| {
const false_death = entry.key;
if (!true_table.contains(false_death)) {
try true_entry_deaths.append(false_death);
// Here we are only adding to the parent table if the following iteration
// would miss it.
try table.putNoClobber(false_death, {});
}
var then_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
defer then_table.deinit();
try analyzeWithTable(arena, table, &then_table, inst.then_body);
// Reset the table back to its state from before the branch.
for (then_table.items()) |entry| {
table.removeAssertDiscard(entry.key);
}
var else_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
defer else_table.deinit();
try analyzeWithTable(arena, table, &else_table, inst.else_body);
var then_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
defer then_entry_deaths.deinit();
var else_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
defer else_entry_deaths.deinit();
for (else_table.items()) |entry| {
const else_death = entry.key;
if (!then_table.contains(else_death)) {
try then_entry_deaths.append(else_death);
}
}
{
var it = true_table.iterator();
while (it.next()) |entry| {
const true_death = entry.key;
try table.putNoClobber(true_death, {});
if (!false_table.contains(true_death)) {
try false_entry_deaths.append(true_death);
}
// This loop is the same, except it's for the then branch, and it additionally
// has to put its items back into the table to undo the reset.
for (then_table.items()) |entry| {
const then_death = entry.key;
if (!else_table.contains(then_death)) {
try else_entry_deaths.append(then_death);
}
_ = try table.put(then_death, {});
}
// Now we have to correctly populate new_set.
if (new_set) |ns| {
try ns.ensureCapacity(ns.items().len + then_table.items().len + else_table.items().len);
for (then_table.items()) |entry| {
_ = ns.putAssumeCapacity(entry.key, {});
}
for (else_table.items()) |entry| {
_ = ns.putAssumeCapacity(entry.key, {});
}
}
inst.true_death_count = std.math.cast(@TypeOf(inst.true_death_count), true_entry_deaths.items.len) catch return error.OutOfMemory;
inst.false_death_count = std.math.cast(@TypeOf(inst.false_death_count), false_entry_deaths.items.len) catch return error.OutOfMemory;
const allocated_slice = try arena.alloc(*ir.Inst, true_entry_deaths.items.len + false_entry_deaths.items.len);
inst.then_death_count = std.math.cast(@TypeOf(inst.then_death_count), then_entry_deaths.items.len) catch return error.OutOfMemory;
inst.else_death_count = std.math.cast(@TypeOf(inst.else_death_count), else_entry_deaths.items.len) catch return error.OutOfMemory;
const allocated_slice = try arena.alloc(*ir.Inst, then_entry_deaths.items.len + else_entry_deaths.items.len);
inst.deaths = allocated_slice.ptr;
std.mem.copy(*ir.Inst, inst.thenDeaths(), then_entry_deaths.items);
std.mem.copy(*ir.Inst, inst.elseDeaths(), else_entry_deaths.items);
// Continue on with the instruction analysis. The following code will find the condition
// instruction, and the deaths flag for the CondBr instruction will indicate whether the
@@ -108,6 +144,7 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void
if (prev == null) {
// Death.
base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
if (new_set) |ns| try ns.putNoClobber(operand, {});
}
}
} else {

View File

@@ -822,6 +822,16 @@ pub const Module = struct {
decls: []*Decl,
arena: std.heap.ArenaAllocator,
error_msg: ?ErrorMsg = null,
metadata: std.AutoHashMap(*Inst, MetaData),
body_metadata: std.AutoHashMap(*Body, BodyMetaData),
pub const MetaData = struct {
deaths: ir.Inst.DeathsInt,
};
pub const BodyMetaData = struct {
deaths: []*Inst,
};
pub const Body = struct {
instructions: []*Inst,
@@ -878,6 +888,7 @@ pub const Module = struct {
.loop_table = std.AutoHashMap(*Inst.Loop, []const u8).init(allocator),
.arena = std.heap.ArenaAllocator.init(allocator),
.indent = 2,
.next_instr_index = undefined,
};
defer write.arena.deinit();
defer write.inst_table.deinit();
@@ -889,15 +900,10 @@ pub const Module = struct {
for (self.decls) |decl, decl_i| {
try write.inst_table.putNoClobber(decl.inst, .{ .inst = decl.inst, .index = null, .name = decl.name });
if (decl.inst.cast(Inst.Fn)) |fn_inst| {
for (fn_inst.positionals.body.instructions) |inst, inst_i| {
try write.inst_table.putNoClobber(inst, .{ .inst = inst, .index = inst_i, .name = undefined });
}
}
}
for (self.decls) |decl, i| {
write.next_instr_index = 0;
try stream.print("@{} ", .{decl.name});
try write.writeInstToStream(stream, decl.inst);
try stream.writeByte('\n');
@@ -914,6 +920,7 @@ const Writer = struct {
loop_table: std.AutoHashMap(*Inst.Loop, []const u8),
arena: std.heap.ArenaAllocator,
indent: usize,
next_instr_index: usize,
fn writeInstToStream(
self: *Writer,
@@ -944,7 +951,7 @@ const Writer = struct {
if (i != 0) {
try stream.writeAll(", ");
}
try self.writeParamToStream(stream, @field(inst.positionals, arg_field.name));
try self.writeParamToStream(stream, &@field(inst.positionals, arg_field.name));
}
comptime var need_comma = pos_fields.len != 0;
@@ -954,13 +961,13 @@ const Writer = struct {
if (@field(inst.kw_args, arg_field.name)) |non_optional| {
if (need_comma) try stream.writeAll(", ");
try stream.print("{}=", .{arg_field.name});
try self.writeParamToStream(stream, non_optional);
try self.writeParamToStream(stream, &non_optional);
need_comma = true;
}
} else {
if (need_comma) try stream.writeAll(", ");
try stream.print("{}=", .{arg_field.name});
try self.writeParamToStream(stream, @field(inst.kw_args, arg_field.name));
try self.writeParamToStream(stream, &@field(inst.kw_args, arg_field.name));
need_comma = true;
}
}
@@ -968,7 +975,8 @@ const Writer = struct {
try stream.writeByte(')');
}
fn writeParamToStream(self: *Writer, stream: anytype, param: anytype) !void {
fn writeParamToStream(self: *Writer, stream: anytype, param_ptr: anytype) !void {
const param = param_ptr.*;
if (@typeInfo(@TypeOf(param)) == .Enum) {
return stream.writeAll(@tagName(param));
}
@@ -986,18 +994,36 @@ const Writer = struct {
},
Module.Body => {
try stream.writeAll("{\n");
for (param.instructions) |inst, i| {
if (self.module.body_metadata.get(param_ptr)) |metadata| {
if (metadata.deaths.len > 0) {
try stream.writeByteNTimes(' ', self.indent);
try stream.writeAll("; deaths={");
for (metadata.deaths) |death, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstParamToStream(stream, death);
}
try stream.writeAll("}\n");
}
}
for (param.instructions) |inst| {
const my_i = self.next_instr_index;
self.next_instr_index += 1;
try self.inst_table.putNoClobber(inst, .{ .inst = inst, .index = my_i, .name = undefined });
try stream.writeByteNTimes(' ', self.indent);
try stream.print("%{} ", .{i});
try stream.print("%{} ", .{my_i});
if (inst.cast(Inst.Block)) |block| {
const name = try std.fmt.allocPrint(&self.arena.allocator, "label_{}", .{i});
const name = try std.fmt.allocPrint(&self.arena.allocator, "label_{}", .{my_i});
try self.block_table.put(block, name);
} else if (inst.cast(Inst.Loop)) |loop| {
const name = try std.fmt.allocPrint(&self.arena.allocator, "loop_{}", .{i});
const name = try std.fmt.allocPrint(&self.arena.allocator, "loop_{}", .{my_i});
try self.loop_table.put(loop, name);
}
self.indent += 2;
try self.writeInstToStream(stream, inst);
if (self.module.metadata.get(inst)) |metadata| {
try stream.print(" ; deaths=0b{b}", .{metadata.deaths});
}
self.indent -= 2;
try stream.writeByte('\n');
}
@@ -1070,6 +1096,8 @@ pub fn parse(allocator: *Allocator, source: [:0]const u8) Allocator.Error!Module
.decls = parser.decls.toOwnedSlice(allocator),
.arena = parser.arena,
.error_msg = parser.error_msg,
.metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator),
.body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator),
};
}
@@ -1478,7 +1506,11 @@ pub fn emit(allocator: *Allocator, old_module: IrModule) !Module {
.indent = 0,
.block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator),
.loop_table = std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop).init(allocator),
.metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator),
.body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator),
};
defer ctx.metadata.deinit();
defer ctx.body_metadata.deinit();
defer ctx.block_table.deinit();
defer ctx.loop_table.deinit();
defer ctx.decls.deinit(allocator);
@@ -1491,9 +1523,52 @@ pub fn emit(allocator: *Allocator, old_module: IrModule) !Module {
return Module{
.decls = ctx.decls.toOwnedSlice(allocator),
.arena = ctx.arena,
.metadata = ctx.metadata,
.body_metadata = ctx.body_metadata,
};
}
/// For debugging purposes, prints a function representation to stderr.
pub fn dumpFn(old_module: IrModule, module_fn: *IrModule.Fn) void {
const allocator = old_module.gpa;
var ctx: EmitZIR = .{
.allocator = allocator,
.decls = .{},
.arena = std.heap.ArenaAllocator.init(allocator),
.old_module = &old_module,
.next_auto_name = 0,
.names = std.StringHashMap(void).init(allocator),
.primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Decl).init(allocator),
.indent = 0,
.block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator),
.loop_table = std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop).init(allocator),
.metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator),
.body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator),
};
defer ctx.metadata.deinit();
defer ctx.body_metadata.deinit();
defer ctx.block_table.deinit();
defer ctx.loop_table.deinit();
defer ctx.decls.deinit(allocator);
defer ctx.names.deinit();
defer ctx.primitive_table.deinit();
defer ctx.arena.deinit();
const fn_ty = module_fn.owner_decl.typed_value.most_recent.typed_value.ty;
_ = ctx.emitFn(module_fn, 0, fn_ty) catch |err| {
std.debug.print("unable to dump function: {}\n", .{err});
return;
};
var module = Module{
.decls = ctx.decls.items,
.arena = ctx.arena,
.metadata = ctx.metadata,
.body_metadata = ctx.body_metadata,
};
module.dump();
}
const EmitZIR = struct {
allocator: *Allocator,
arena: std.heap.ArenaAllocator,
@@ -1505,6 +1580,8 @@ const EmitZIR = struct {
indent: usize,
block_table: std.AutoHashMap(*ir.Inst.Block, *Inst.Block),
loop_table: std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop),
metadata: std.AutoHashMap(*Inst, Module.MetaData),
body_metadata: std.AutoHashMap(*Module.Body, Module.BodyMetaData),
fn emit(self: *EmitZIR) !void {
// Put all the Decls in a list and sort them by name to avoid nondeterminism introduced
@@ -1604,7 +1681,7 @@ const EmitZIR = struct {
} else blk: {
break :blk (try self.emitTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val })).inst;
};
try new_body.inst_table.putNoClobber(inst, new_inst);
_ = try new_body.inst_table.put(inst, new_inst);
return new_inst;
} else {
return new_body.inst_table.get(inst).?;
@@ -1655,6 +1732,70 @@ const EmitZIR = struct {
return &declref_inst.base;
}
fn emitFn(self: *EmitZIR, module_fn: *IrModule.Fn, src: usize, ty: Type) Allocator.Error!*Decl {
var inst_table = std.AutoHashMap(*ir.Inst, *Inst).init(self.allocator);
defer inst_table.deinit();
var instructions = std.ArrayList(*Inst).init(self.allocator);
defer instructions.deinit();
switch (module_fn.analysis) {
.queued => unreachable,
.in_progress => unreachable,
.success => |body| {
try self.emitBody(body, &inst_table, &instructions);
},
.sema_failure => {
const err_msg = self.old_module.failed_decls.get(module_fn.owner_decl).?;
const fail_inst = try self.arena.allocator.create(Inst.CompileError);
fail_inst.* = .{
.base = .{
.src = src,
.tag = Inst.CompileError.base_tag,
},
.positionals = .{
.msg = try self.arena.allocator.dupe(u8, err_msg.msg),
},
.kw_args = .{},
};
try instructions.append(&fail_inst.base);
},
.dependency_failure => {
const fail_inst = try self.arena.allocator.create(Inst.CompileError);
fail_inst.* = .{
.base = .{
.src = src,
.tag = Inst.CompileError.base_tag,
},
.positionals = .{
.msg = try self.arena.allocator.dupe(u8, "depends on another failed Decl"),
},
.kw_args = .{},
};
try instructions.append(&fail_inst.base);
},
}
const fn_type = try self.emitType(src, ty);
const arena_instrs = try self.arena.allocator.alloc(*Inst, instructions.items.len);
mem.copy(*Inst, arena_instrs, instructions.items);
const fn_inst = try self.arena.allocator.create(Inst.Fn);
fn_inst.* = .{
.base = .{
.src = src,
.tag = Inst.Fn.base_tag,
},
.positionals = .{
.fn_type = fn_type.inst,
.body = .{ .instructions = arena_instrs },
},
.kw_args = .{},
};
return self.emitUnnamedDecl(&fn_inst.base);
}
fn emitTypedValue(self: *EmitZIR, src: usize, typed_value: TypedValue) Allocator.Error!*Decl {
const allocator = &self.arena.allocator;
if (typed_value.val.cast(Value.Payload.DeclRef)) |decl_ref| {
@@ -1718,68 +1859,7 @@ const EmitZIR = struct {
},
.Fn => {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
var inst_table = std.AutoHashMap(*ir.Inst, *Inst).init(self.allocator);
defer inst_table.deinit();
var instructions = std.ArrayList(*Inst).init(self.allocator);
defer instructions.deinit();
switch (module_fn.analysis) {
.queued => unreachable,
.in_progress => unreachable,
.success => |body| {
try self.emitBody(body, &inst_table, &instructions);
},
.sema_failure => {
const err_msg = self.old_module.failed_decls.get(module_fn.owner_decl).?;
const fail_inst = try self.arena.allocator.create(Inst.CompileError);
fail_inst.* = .{
.base = .{
.src = src,
.tag = Inst.CompileError.base_tag,
},
.positionals = .{
.msg = try self.arena.allocator.dupe(u8, err_msg.msg),
},
.kw_args = .{},
};
try instructions.append(&fail_inst.base);
},
.dependency_failure => {
const fail_inst = try self.arena.allocator.create(Inst.CompileError);
fail_inst.* = .{
.base = .{
.src = src,
.tag = Inst.CompileError.base_tag,
},
.positionals = .{
.msg = try self.arena.allocator.dupe(u8, "depends on another failed Decl"),
},
.kw_args = .{},
};
try instructions.append(&fail_inst.base);
},
}
const fn_type = try self.emitType(src, typed_value.ty);
const arena_instrs = try self.arena.allocator.alloc(*Inst, instructions.items.len);
mem.copy(*Inst, arena_instrs, instructions.items);
const fn_inst = try self.arena.allocator.create(Inst.Fn);
fn_inst.* = .{
.base = .{
.src = src,
.tag = Inst.Fn.base_tag,
},
.positionals = .{
.fn_type = fn_type.inst,
.body = .{ .instructions = arena_instrs },
},
.kw_args = .{},
};
return self.emitUnnamedDecl(&fn_inst.base);
return self.emitFn(module_fn, src, typed_value.ty);
},
.Array => {
// TODO more checks to make sure this can be emitted as a string literal
@@ -1810,7 +1890,7 @@ const EmitZIR = struct {
}
}
fn emitNoOp(self: *EmitZIR, src: usize, tag: Inst.Tag) Allocator.Error!*Inst {
fn emitNoOp(self: *EmitZIR, src: usize, old_inst: *ir.Inst.NoOp, tag: Inst.Tag) Allocator.Error!*Inst {
const new_inst = try self.arena.allocator.create(Inst.NoOp);
new_inst.* = .{
.base = .{
@@ -1902,10 +1982,10 @@ const EmitZIR = struct {
const new_inst = switch (inst.tag) {
.constant => unreachable, // excluded from function bodies
.breakpoint => try self.emitNoOp(inst.src, .breakpoint),
.unreach => try self.emitNoOp(inst.src, .@"unreachable"),
.retvoid => try self.emitNoOp(inst.src, .returnvoid),
.dbg_stmt => try self.emitNoOp(inst.src, .dbg_stmt),
.breakpoint => try self.emitNoOp(inst.src, inst.castTag(.breakpoint).?, .breakpoint),
.unreach => try self.emitNoOp(inst.src, inst.castTag(.unreach).?, .unreach_nocheck),
.retvoid => try self.emitNoOp(inst.src, inst.castTag(.retvoid).?, .returnvoid),
.dbg_stmt => try self.emitNoOp(inst.src, inst.castTag(.dbg_stmt).?, .dbg_stmt),
.not => try self.emitUnOp(inst.src, new_body, inst.castTag(.not).?, .boolnot),
.ret => try self.emitUnOp(inst.src, new_body, inst.castTag(.ret).?, .@"return"),
@@ -2119,10 +2199,24 @@ const EmitZIR = struct {
defer then_body.deinit();
defer else_body.deinit();
const then_deaths = try self.arena.allocator.alloc(*Inst, old_inst.thenDeaths().len);
const else_deaths = try self.arena.allocator.alloc(*Inst, old_inst.elseDeaths().len);
for (old_inst.thenDeaths()) |death, i| {
then_deaths[i] = try self.resolveInst(new_body, death);
}
for (old_inst.elseDeaths()) |death, i| {
else_deaths[i] = try self.resolveInst(new_body, death);
}
try self.emitBody(old_inst.then_body, inst_table, &then_body);
try self.emitBody(old_inst.else_body, inst_table, &else_body);
const new_inst = try self.arena.allocator.create(Inst.CondBr);
try self.body_metadata.put(&new_inst.positionals.then_body, .{ .deaths = then_deaths });
try self.body_metadata.put(&new_inst.positionals.else_body, .{ .deaths = else_deaths });
new_inst.* = .{
.base = .{
.src = inst.src,
@@ -2138,6 +2232,7 @@ const EmitZIR = struct {
break :blk &new_inst.base;
},
};
try self.metadata.put(new_inst, .{ .deaths = inst.deaths });
try instructions.append(new_inst);
try inst_table.put(inst, new_inst);
}

View File

@@ -465,5 +465,44 @@ pub fn addCases(ctx: *TestContext) !void {
,
"",
);
// While loops
case.addCompareOutput(
\\export fn _start() noreturn {
\\ var i: u32 = 0;
\\ while (i < 4) : (i += 1) print();
\\ assert(i == 4);
\\
\\ exit();
\\}
\\
\\fn print() void {
\\ asm volatile ("syscall"
\\ :
\\ : [number] "{rax}" (1),
\\ [arg1] "{rdi}" (1),
\\ [arg2] "{rsi}" (@ptrToInt("hello\n")),
\\ [arg3] "{rdx}" (6)
\\ : "rcx", "r11", "memory"
\\ );
\\ return;
\\}
\\
\\pub fn assert(ok: bool) void {
\\ if (!ok) unreachable; // assertion failure
\\}
\\
\\fn exit() noreturn {
\\ asm volatile ("syscall"
\\ :
\\ : [number] "{rax}" (231),
\\ [arg1] "{rdi}" (0)
\\ : "rcx", "r11", "memory"
\\ );
\\ unreachable;
\\}
,
"hello\nhello\nhello\nhello\n",
);
}
}