astgen: rework labeled blocks
This commit is contained in:
@@ -717,7 +717,7 @@ pub const Scope = struct {
|
||||
label: ?Label = null,
|
||||
break_block: ?*zir.Inst.Block = null,
|
||||
continue_block: ?*zir.Inst.Block = null,
|
||||
/// only valid if label != null or (continue_block and break_block) != null
|
||||
/// Only valid when setBlockResultLoc is called.
|
||||
break_result_loc: astgen.ResultLoc = undefined,
|
||||
/// When a block has a pointer result location, here it is.
|
||||
rl_ptr: ?*zir.Inst = null,
|
||||
@@ -726,6 +726,17 @@ pub const Scope = struct {
|
||||
/// whether to rely on break instructions or writing to the result
|
||||
/// pointer for the result instruction.
|
||||
rvalue_rl_count: usize = 0,
|
||||
/// Keeps track of how many break instructions there are. When astgen is finished
|
||||
/// with a block, it can check this against rvalue_rl_count to find out whether
|
||||
/// the break instructions should be downgraded to break_void.
|
||||
break_count: usize = 0,
|
||||
/// Tracks `break :foo bar` instructions so they can possibly be elided later if
|
||||
/// the labeled block ends up not needing a result location pointer.
|
||||
labeled_breaks: std.ArrayListUnmanaged(*zir.Inst.Break) = .{},
|
||||
/// Tracks `store_to_block_ptr` instructions that correspond to break instructions
|
||||
/// so they can possibly be elided later if the labeled block ends up not needing
|
||||
/// a result location pointer.
|
||||
labeled_store_to_block_ptr_list: std.ArrayListUnmanaged(*zir.Inst.BinOp) = .{},
|
||||
|
||||
pub const Label = struct {
|
||||
token: ast.TokenIndex,
|
||||
@@ -3495,18 +3506,18 @@ pub fn addSafetyCheck(mod: *Module, parent_block: *Scope.Block, ok: *Inst, panic
|
||||
};
|
||||
|
||||
const ok_body: ir.Body = .{
|
||||
.instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the brvoid.
|
||||
.instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the br_void.
|
||||
};
|
||||
const brvoid = try parent_block.arena.create(Inst.BrVoid);
|
||||
brvoid.* = .{
|
||||
const br_void = try parent_block.arena.create(Inst.BrVoid);
|
||||
br_void.* = .{
|
||||
.base = .{
|
||||
.tag = .brvoid,
|
||||
.tag = .br_void,
|
||||
.ty = Type.initTag(.noreturn),
|
||||
.src = ok.src,
|
||||
},
|
||||
.block = block_inst,
|
||||
};
|
||||
ok_body.instructions[0] = &brvoid.base;
|
||||
ok_body.instructions[0] = &br_void.base;
|
||||
|
||||
var fail_block: Scope.Block = .{
|
||||
.parent = parent_block,
|
||||
|
||||
249
src/astgen.zig
249
src/astgen.zig
@@ -38,6 +38,21 @@ pub const ResultLoc = union(enum) {
|
||||
/// is inferred based on peer type resolution for a `zir.Inst.Block`.
|
||||
/// The result instruction from the expression must be ignored.
|
||||
block_ptr: *Module.Scope.GenZIR,
|
||||
|
||||
pub const Strategy = struct {
|
||||
elide_store_to_block_ptr_instructions: bool,
|
||||
tag: Tag,
|
||||
|
||||
pub const Tag = enum {
|
||||
/// Both branches will use break_void; result location is used to communicate the
|
||||
/// result instruction.
|
||||
break_void,
|
||||
/// Use break statements to pass the block result value, and call rvalue() at
|
||||
/// the end depending on rl. Also elide the store_to_block_ptr instructions
|
||||
/// depending on rl.
|
||||
break_operand,
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
pub fn typeExpr(mod: *Module, scope: *Scope, type_node: *ast.Node) InnerError!*zir.Inst {
|
||||
@@ -348,10 +363,11 @@ pub fn comptimeExpr(mod: *Module, parent_scope: *Scope, rl: ResultLoc, node: *as
|
||||
return &block.base;
|
||||
}
|
||||
|
||||
fn breakExpr(mod: *Module, parent_scope: *Scope, node: *ast.Node.ControlFlowExpression) InnerError!*zir.Inst {
|
||||
if (true) {
|
||||
@panic("TODO reimplement this");
|
||||
}
|
||||
fn breakExpr(
|
||||
mod: *Module,
|
||||
parent_scope: *Scope,
|
||||
node: *ast.Node.ControlFlowExpression,
|
||||
) InnerError!*zir.Inst {
|
||||
const tree = parent_scope.tree();
|
||||
const src = tree.token_locs[node.ltoken].start;
|
||||
|
||||
@@ -377,25 +393,31 @@ fn breakExpr(mod: *Module, parent_scope: *Scope, node: *ast.Node.ControlFlowExpr
|
||||
continue;
|
||||
};
|
||||
|
||||
if (node.getRHS()) |rhs| {
|
||||
// Most result location types can be forwarded directly; however
|
||||
// if we need to write to a pointer which has an inferred type,
|
||||
// proper type inference requires peer type resolution on the block's
|
||||
// break operand expressions.
|
||||
const branch_rl: ResultLoc = switch (gen_zir.break_result_loc) {
|
||||
.discard, .none, .ty, .ptr, .ref => gen_zir.break_result_loc,
|
||||
.inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = block_inst },
|
||||
};
|
||||
const operand = try expr(mod, parent_scope, branch_rl, rhs);
|
||||
return try addZIRInst(mod, parent_scope, src, zir.Inst.Break, .{
|
||||
const rhs = node.getRHS() orelse {
|
||||
return addZirInstTag(mod, parent_scope, src, .break_void, .{
|
||||
.block = block_inst,
|
||||
.operand = operand,
|
||||
}, .{});
|
||||
} else {
|
||||
return try addZIRInst(mod, parent_scope, src, zir.Inst.BreakVoid, .{
|
||||
.block = block_inst,
|
||||
}, .{});
|
||||
});
|
||||
};
|
||||
gen_zir.break_count += 1;
|
||||
const prev_rvalue_rl_count = gen_zir.rvalue_rl_count;
|
||||
const operand = try expr(mod, parent_scope, gen_zir.break_result_loc, rhs);
|
||||
const have_store_to_block = gen_zir.rvalue_rl_count != prev_rvalue_rl_count;
|
||||
const br = try addZirInstTag(mod, parent_scope, src, .@"break", .{
|
||||
.block = block_inst,
|
||||
.operand = operand,
|
||||
});
|
||||
if (gen_zir.break_result_loc == .block_ptr) {
|
||||
try gen_zir.labeled_breaks.append(mod.gpa, br.castTag(.@"break").?);
|
||||
|
||||
if (have_store_to_block) {
|
||||
const inst_list = parent_scope.cast(Scope.GenZIR).?.instructions.items;
|
||||
const last_inst = inst_list[inst_list.len - 2];
|
||||
const store_inst = last_inst.castTag(.store_to_block_ptr).?;
|
||||
assert(store_inst.positionals.lhs == gen_zir.rl_ptr.?);
|
||||
try gen_zir.labeled_store_to_block_ptr_list.append(mod.gpa, store_inst);
|
||||
}
|
||||
}
|
||||
return br;
|
||||
},
|
||||
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
|
||||
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
|
||||
@@ -538,7 +560,6 @@ fn labeledBlockExpr(
|
||||
.decl = parent_scope.ownerDecl().?,
|
||||
.arena = gen_zir.arena,
|
||||
.instructions = .{},
|
||||
.break_result_loc = rl,
|
||||
// TODO @as here is working around a stage1 miscompilation bug :(
|
||||
.label = @as(?Scope.GenZIR.Label, Scope.GenZIR.Label{
|
||||
.token = block_node.label,
|
||||
@@ -546,19 +567,57 @@ fn labeledBlockExpr(
|
||||
}),
|
||||
};
|
||||
defer block_scope.instructions.deinit(mod.gpa);
|
||||
defer block_scope.labeled_breaks.deinit(mod.gpa);
|
||||
defer block_scope.labeled_store_to_block_ptr_list.deinit(mod.gpa);
|
||||
|
||||
setBlockResultLoc(&block_scope, rl);
|
||||
|
||||
try blockExprStmts(mod, &block_scope.base, &block_node.base, block_node.statements());
|
||||
|
||||
if (!block_scope.label.?.used) {
|
||||
return mod.fail(parent_scope, tree.token_locs[block_node.label].start, "unused block label", .{});
|
||||
}
|
||||
|
||||
block_inst.positionals.body.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items);
|
||||
try gen_zir.instructions.append(mod.gpa, &block_inst.base);
|
||||
|
||||
return &block_inst.base;
|
||||
const strat = rlStrategy(rl, &block_scope);
|
||||
switch (strat.tag) {
|
||||
.break_void => {
|
||||
// The code took advantage of the result location as a pointer.
|
||||
// Turn the break instructions into break_void instructions.
|
||||
for (block_scope.labeled_breaks.items) |br| {
|
||||
br.base.tag = .break_void;
|
||||
}
|
||||
// TODO technically not needed since we changed the tag to break_void but
|
||||
// would be better still to elide the ones that are in this list.
|
||||
try copyBodyNoEliding(&block_inst.positionals.body, block_scope);
|
||||
|
||||
return &block_inst.base;
|
||||
},
|
||||
.break_operand => {
|
||||
// All break operands are values that did not use the result location pointer.
|
||||
if (strat.elide_store_to_block_ptr_instructions) {
|
||||
for (block_scope.labeled_store_to_block_ptr_list.items) |inst| {
|
||||
inst.base.tag = .void_value;
|
||||
}
|
||||
// TODO technically not needed since we changed the tag to void_value but
|
||||
// would be better still to elide the ones that are in this list.
|
||||
}
|
||||
try copyBodyNoEliding(&block_inst.positionals.body, block_scope);
|
||||
switch (rl) {
|
||||
.ref => return &block_inst.base,
|
||||
else => return rvalue(mod, parent_scope, rl, &block_inst.base),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn blockExprStmts(mod: *Module, parent_scope: *Scope, node: *ast.Node, statements: []*ast.Node) !void {
|
||||
fn blockExprStmts(
|
||||
mod: *Module,
|
||||
parent_scope: *Scope,
|
||||
node: *ast.Node,
|
||||
statements: []*ast.Node,
|
||||
) !void {
|
||||
const tree = parent_scope.tree();
|
||||
|
||||
var block_arena = std.heap.ArenaAllocator.init(mod.gpa);
|
||||
@@ -1659,7 +1718,6 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
|
||||
cond_kind = .{ .err_union = null };
|
||||
}
|
||||
}
|
||||
const block_branch_count = 2; // then and else
|
||||
var block_scope: Scope.GenZIR = .{
|
||||
.parent = scope,
|
||||
.decl = scope.ownerDecl().?,
|
||||
@@ -1668,6 +1726,8 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
|
||||
};
|
||||
defer block_scope.instructions.deinit(mod.gpa);
|
||||
|
||||
setBlockResultLoc(&block_scope, rl);
|
||||
|
||||
const tree = scope.tree();
|
||||
const if_src = tree.token_locs[if_node.if_token].start;
|
||||
const cond = try cond_kind.cond(mod, &block_scope, if_src, if_node.condition);
|
||||
@@ -1682,33 +1742,6 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
|
||||
.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items),
|
||||
});
|
||||
|
||||
// Depending on whether the result location is a pointer or value, different
|
||||
// ZIR needs to be generated. In the former case we rely on storing to the
|
||||
// pointer to communicate the result, and use breakvoid; in the latter case
|
||||
// the block break instructions will have the result values.
|
||||
// One more complication: when the result location is a pointer, we detect
|
||||
// the scenario where the result location is not consumed. In this case
|
||||
// we emit ZIR for the block break instructions to have the result values,
|
||||
// and then rvalue() on that to pass the value to the result location.
|
||||
const branch_rl: ResultLoc = switch (rl) {
|
||||
.discard, .none, .ty, .ptr, .ref => rl,
|
||||
|
||||
.inferred_ptr => |ptr| blk: {
|
||||
block_scope.rl_ptr = &ptr.base;
|
||||
break :blk .{ .block_ptr = &block_scope };
|
||||
},
|
||||
|
||||
.bitcasted_ptr => |ptr| blk: {
|
||||
block_scope.rl_ptr = &ptr.base;
|
||||
break :blk .{ .block_ptr = &block_scope };
|
||||
},
|
||||
|
||||
.block_ptr => |parent_block_scope| blk: {
|
||||
block_scope.rl_ptr = parent_block_scope.rl_ptr.?;
|
||||
break :blk .{ .block_ptr = &block_scope };
|
||||
},
|
||||
};
|
||||
|
||||
const then_src = tree.token_locs[if_node.body.lastToken()].start;
|
||||
var then_scope: Scope.GenZIR = .{
|
||||
.parent = scope,
|
||||
@@ -1721,7 +1754,8 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
|
||||
// declare payload to the then_scope
|
||||
const then_sub_scope = try cond_kind.thenSubScope(mod, &then_scope, then_src, if_node.payload);
|
||||
|
||||
const then_result = try expr(mod, then_sub_scope, branch_rl, if_node.body);
|
||||
block_scope.break_count += 1;
|
||||
const then_result = try expr(mod, then_sub_scope, block_scope.break_result_loc, if_node.body);
|
||||
// We hold off on the break instructions as well as copying the then/else
|
||||
// instructions into place until we know whether to keep store_to_block_ptr
|
||||
// instructions or not.
|
||||
@@ -1741,47 +1775,18 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
|
||||
// declare payload to the then_scope
|
||||
else_sub_scope = try cond_kind.elseSubScope(mod, &else_scope, else_src, else_node.payload);
|
||||
|
||||
break :blk try expr(mod, else_sub_scope, branch_rl, else_node.body);
|
||||
block_scope.break_count += 1;
|
||||
break :blk try expr(mod, else_sub_scope, block_scope.break_result_loc, else_node.body);
|
||||
} else blk: {
|
||||
else_src = tree.token_locs[if_node.lastToken()].start;
|
||||
else_sub_scope = &else_scope.base;
|
||||
block_scope.rvalue_rl_count += 1;
|
||||
break :blk null;
|
||||
};
|
||||
|
||||
// We now have enough information to decide whether the result instruction should
|
||||
// be communicated via result location pointer or break instructions.
|
||||
const Strategy = enum {
|
||||
/// Both branches will use break_void; result location is used to communicate the
|
||||
/// result instruction.
|
||||
break_void,
|
||||
/// Use break statements to pass the block result value, and call rvalue() at
|
||||
/// the end depending on rl. Also elide the store_to_block_ptr instructions
|
||||
/// depending on rl.
|
||||
break_operand,
|
||||
};
|
||||
var elide_store_to_block_ptr_instructions = false;
|
||||
const strategy: Strategy = switch (rl) {
|
||||
// In this branch there will not be any store_to_block_ptr instructions.
|
||||
.discard, .none, .ty, .ref => .break_operand,
|
||||
// The pointer got passed through to the sub-expressions, so we will use
|
||||
// break_void here.
|
||||
// In this branch there will not be any store_to_block_ptr instructions.
|
||||
.ptr => .break_void,
|
||||
.inferred_ptr, .bitcasted_ptr, .block_ptr => blk: {
|
||||
if (block_scope.rvalue_rl_count == 2) {
|
||||
// Neither prong of the if consumed the result location, so we can
|
||||
// use break instructions to create an rvalue.
|
||||
elide_store_to_block_ptr_instructions = true;
|
||||
break :blk Strategy.break_operand;
|
||||
} else {
|
||||
// Allow the store_to_block_ptr instructions to remain so that
|
||||
// semantic analysis can turn them into bitcasts.
|
||||
break :blk Strategy.break_void;
|
||||
}
|
||||
},
|
||||
};
|
||||
switch (strategy) {
|
||||
const strat = rlStrategy(rl, &block_scope);
|
||||
switch (strat.tag) {
|
||||
.break_void => {
|
||||
if (!then_result.tag.isNoReturn()) {
|
||||
_ = try addZirInstTag(mod, then_sub_scope, then_src, .break_void, .{
|
||||
@@ -1799,7 +1804,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
|
||||
.block = block,
|
||||
});
|
||||
}
|
||||
assert(!elide_store_to_block_ptr_instructions);
|
||||
assert(!strat.elide_store_to_block_ptr_instructions);
|
||||
try copyBodyNoEliding(&condbr.positionals.then_body, then_scope);
|
||||
try copyBodyNoEliding(&condbr.positionals.else_body, else_scope);
|
||||
return &block.base;
|
||||
@@ -1823,7 +1828,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
|
||||
.block = block,
|
||||
});
|
||||
}
|
||||
if (elide_store_to_block_ptr_instructions) {
|
||||
if (strat.elide_store_to_block_ptr_instructions) {
|
||||
try copyBodyWithElidedStoreBlockPtr(&condbr.positionals.then_body, then_scope);
|
||||
try copyBodyWithElidedStoreBlockPtr(&condbr.positionals.else_body, else_scope);
|
||||
} else {
|
||||
@@ -3376,6 +3381,72 @@ fn rvalueVoid(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node, resul
|
||||
return rvalue(mod, scope, rl, void_inst);
|
||||
}
|
||||
|
||||
fn rlStrategy(rl: ResultLoc, block_scope: *Scope.GenZIR) ResultLoc.Strategy {
|
||||
var elide_store_to_block_ptr_instructions = false;
|
||||
switch (rl) {
|
||||
// In this branch there will not be any store_to_block_ptr instructions.
|
||||
.discard, .none, .ty, .ref => return .{
|
||||
.tag = .break_operand,
|
||||
.elide_store_to_block_ptr_instructions = false,
|
||||
},
|
||||
// The pointer got passed through to the sub-expressions, so we will use
|
||||
// break_void here.
|
||||
// In this branch there will not be any store_to_block_ptr instructions.
|
||||
.ptr => return .{
|
||||
.tag = .break_void,
|
||||
.elide_store_to_block_ptr_instructions = false,
|
||||
},
|
||||
.inferred_ptr, .bitcasted_ptr, .block_ptr => {
|
||||
if (block_scope.rvalue_rl_count == block_scope.break_count) {
|
||||
// Neither prong of the if consumed the result location, so we can
|
||||
// use break instructions to create an rvalue.
|
||||
return .{
|
||||
.tag = .break_operand,
|
||||
.elide_store_to_block_ptr_instructions = true,
|
||||
};
|
||||
} else {
|
||||
// Allow the store_to_block_ptr instructions to remain so that
|
||||
// semantic analysis can turn them into bitcasts.
|
||||
return .{
|
||||
.tag = .break_void,
|
||||
.elide_store_to_block_ptr_instructions = false,
|
||||
};
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn setBlockResultLoc(block_scope: *Scope.GenZIR, parent_rl: ResultLoc) void {
|
||||
// Depending on whether the result location is a pointer or value, different
|
||||
// ZIR needs to be generated. In the former case we rely on storing to the
|
||||
// pointer to communicate the result, and use breakvoid; in the latter case
|
||||
// the block break instructions will have the result values.
|
||||
// One more complication: when the result location is a pointer, we detect
|
||||
// the scenario where the result location is not consumed. In this case
|
||||
// we emit ZIR for the block break instructions to have the result values,
|
||||
// and then rvalue() on that to pass the value to the result location.
|
||||
switch (parent_rl) {
|
||||
.discard, .none, .ty, .ptr, .ref => {
|
||||
block_scope.break_result_loc = parent_rl;
|
||||
},
|
||||
|
||||
.inferred_ptr => |ptr| {
|
||||
block_scope.rl_ptr = &ptr.base;
|
||||
block_scope.break_result_loc = .{ .block_ptr = block_scope };
|
||||
},
|
||||
|
||||
.bitcasted_ptr => |ptr| {
|
||||
block_scope.rl_ptr = &ptr.base;
|
||||
block_scope.break_result_loc = .{ .block_ptr = block_scope };
|
||||
},
|
||||
|
||||
.block_ptr => |parent_block_scope| {
|
||||
block_scope.rl_ptr = parent_block_scope.rl_ptr.?;
|
||||
block_scope.break_result_loc = .{ .block_ptr = block_scope };
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addZirInstTag(
|
||||
mod: *Module,
|
||||
scope: *Scope,
|
||||
|
||||
@@ -846,7 +846,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
.br => return self.genBr(inst.castTag(.br).?),
|
||||
.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?),
|
||||
.breakpoint => return self.genBreakpoint(inst.src),
|
||||
.brvoid => return self.genBrVoid(inst.castTag(.brvoid).?),
|
||||
.br_void => return self.genBrVoid(inst.castTag(.br_void).?),
|
||||
.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?),
|
||||
.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?),
|
||||
.call => return self.genCall(inst.castTag(.call).?),
|
||||
@@ -2442,10 +2442,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
}
|
||||
}
|
||||
|
||||
fn genBrBlockFlat(self: *Self, parent_inst: *ir.Inst.BrBlockFlat) !MCValue {
|
||||
try self.genBody(parent_inst.body);
|
||||
const last = parent_inst.body.instructions[parent_inst.body.instructions.len - 1];
|
||||
return self.br(parent_inst.base.src, parent_inst.block, last);
|
||||
fn genBrBlockFlat(self: *Self, inst: *ir.Inst.BrBlockFlat) !MCValue {
|
||||
try self.genBody(inst.body);
|
||||
const last = inst.body.instructions[inst.body.instructions.len - 1];
|
||||
return self.br(inst.base.src, inst.block, last);
|
||||
}
|
||||
|
||||
fn genBr(self: *Self, inst: *ir.Inst.Br) !MCValue {
|
||||
|
||||
@@ -69,7 +69,7 @@ pub const Inst = struct {
|
||||
/// replace one br operand with multiple instructions, without moving anything else around.
|
||||
br_block_flat,
|
||||
breakpoint,
|
||||
brvoid,
|
||||
br_void,
|
||||
call,
|
||||
cmp_lt,
|
||||
cmp_lte,
|
||||
@@ -166,7 +166,7 @@ pub const Inst = struct {
|
||||
.block => Block,
|
||||
.br => Br,
|
||||
.br_block_flat => BrBlockFlat,
|
||||
.brvoid => BrVoid,
|
||||
.br_void => BrVoid,
|
||||
.call => Call,
|
||||
.condbr => CondBr,
|
||||
.constant => Constant,
|
||||
@@ -259,7 +259,7 @@ pub const Inst = struct {
|
||||
pub fn breakBlock(base: *Inst) ?*Block {
|
||||
return switch (base.tag) {
|
||||
.br => base.castTag(.br).?.block,
|
||||
.brvoid => base.castTag(.brvoid).?.block,
|
||||
.br_void => base.castTag(.br_void).?.block,
|
||||
.br_block_flat => base.castTag(.br_block_flat).?.block,
|
||||
else => null,
|
||||
};
|
||||
@@ -403,7 +403,7 @@ pub const Inst = struct {
|
||||
};
|
||||
|
||||
pub const BrVoid = struct {
|
||||
pub const base_tag = Tag.brvoid;
|
||||
pub const base_tag = Tag.br_void;
|
||||
|
||||
base: Inst,
|
||||
block: *Block,
|
||||
|
||||
19
src/zir.zig
19
src/zir.zig
@@ -264,8 +264,7 @@ pub const Inst = struct {
|
||||
/// Write a value to a pointer. For loading, see `deref`.
|
||||
store,
|
||||
/// Same as `store` but the type of the value being stored will be used to infer
|
||||
/// the block type. The LHS is a block instruction, whose result location is
|
||||
/// being stored to.
|
||||
/// the block type. The LHS is the pointer to store to.
|
||||
store_to_block_ptr,
|
||||
/// Same as `store` but the type of the value being stored will be used to infer
|
||||
/// the pointer type.
|
||||
@@ -343,6 +342,8 @@ pub const Inst = struct {
|
||||
/// Only checks that `lhs >= rhs` if they are ints, everything else is
|
||||
/// validated by the .switch instruction.
|
||||
switch_range,
|
||||
/// Does nothing; returns a void value.
|
||||
void_value,
|
||||
|
||||
pub fn Type(tag: Tag) type {
|
||||
return switch (tag) {
|
||||
@@ -355,6 +356,7 @@ pub const Inst = struct {
|
||||
.ret_type,
|
||||
.unreachable_unsafe,
|
||||
.unreachable_safe,
|
||||
.void_value,
|
||||
=> NoOp,
|
||||
|
||||
.alloc,
|
||||
@@ -611,6 +613,7 @@ pub const Inst = struct {
|
||||
.enum_type,
|
||||
.union_type,
|
||||
.struct_type,
|
||||
.void_value,
|
||||
=> false,
|
||||
|
||||
.@"break",
|
||||
@@ -1640,9 +1643,9 @@ const DumpTzir = struct {
|
||||
try dtz.fetchInstsAndResolveConsts(br_block_flat.body);
|
||||
},
|
||||
|
||||
.brvoid => {
|
||||
const brvoid = inst.castTag(.brvoid).?;
|
||||
try dtz.findConst(&brvoid.block.base);
|
||||
.br_void => {
|
||||
const br_void = inst.castTag(.br_void).?;
|
||||
try dtz.findConst(&br_void.block.base);
|
||||
},
|
||||
|
||||
.block => {
|
||||
@@ -1803,9 +1806,9 @@ const DumpTzir = struct {
|
||||
try writer.writeAll("})\n");
|
||||
},
|
||||
|
||||
.brvoid => {
|
||||
const brvoid = inst.castTag(.brvoid).?;
|
||||
const kinky = try dtz.writeInst(writer, &brvoid.block.base);
|
||||
.br_void => {
|
||||
const br_void = inst.castTag(.br_void).?;
|
||||
const kinky = try dtz.writeInst(writer, &br_void.block.base);
|
||||
if (kinky) |_| {
|
||||
try writer.writeAll(") // Instruction does not dominate all uses!\n");
|
||||
} else {
|
||||
|
||||
@@ -155,6 +155,7 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
|
||||
.switch_range => return zirSwitchRange(mod, scope, old_inst.castTag(.switch_range).?),
|
||||
.bool_and => return zirBoolOp(mod, scope, old_inst.castTag(.bool_and).?),
|
||||
.bool_or => return zirBoolOp(mod, scope, old_inst.castTag(.bool_or).?),
|
||||
.void_value => return mod.constVoid(scope, old_inst.src),
|
||||
|
||||
.container_field_named,
|
||||
.container_field_typed,
|
||||
@@ -447,6 +448,8 @@ fn zirStoreToBlockPtr(
|
||||
const ptr = try resolveInst(mod, scope, inst.positionals.lhs);
|
||||
const value = try resolveInst(mod, scope, inst.positionals.rhs);
|
||||
const ptr_ty = try mod.simplePtrType(scope, inst.base.src, value.ty, true, .One);
|
||||
// TODO detect when this store should be done at compile-time. For example,
|
||||
// if expressions should force it when the condition is compile-time known.
|
||||
const b = try mod.requireRuntimeBlock(scope, inst.base.src);
|
||||
const bitcasted_ptr = try mod.addUnOp(b, inst.base.src, ptr_ty, .bitcast, ptr);
|
||||
return mod.storePtr(scope, inst.base.src, bitcasted_ptr, value);
|
||||
|
||||
Reference in New Issue
Block a user