diff --git a/src/Sema.zig b/src/Sema.zig index da93a2906a..972efcff72 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3866,8 +3866,8 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty); const empty_trash_count = trash_block.instructions.items.len; - for (placeholders, 0..) |bitcast_inst, i| { - const sub_ptr_ty = sema.typeOf(Air.indexToRef(bitcast_inst)); + for (peer_inst_list, placeholders) |peer_inst, placeholder_inst| { + const sub_ptr_ty = sema.typeOf(Air.indexToRef(placeholder_inst)); if (mut_final_ptr_ty.eql(sub_ptr_ty, sema.mod)) { // New result location type is the same as the old one; nothing @@ -3875,39 +3875,54 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com continue; } - var bitcast_block = block.makeSubBlock(); - defer bitcast_block.instructions.deinit(gpa); + var replacement_block = block.makeSubBlock(); + defer replacement_block.instructions.deinit(gpa); - trash_block.instructions.shrinkRetainingCapacity(empty_trash_count); - const sub_ptr = try sema.coerceResultPtr(&bitcast_block, src, ptr, dummy_ptr, peer_inst_list[i], &trash_block); + const result = switch (sema.air_instructions.items(.tag)[placeholder_inst]) { + .bitcast => result: { + trash_block.instructions.shrinkRetainingCapacity(empty_trash_count); + const sub_ptr = try sema.coerceResultPtr(&replacement_block, src, ptr, dummy_ptr, peer_inst, &trash_block); + + assert(replacement_block.instructions.items.len > 0); + break :result sub_ptr; + }, + .store => result: { + const bin_op = sema.air_instructions.items(.data)[placeholder_inst].bin_op; + try sema.storePtr2(&replacement_block, src, bin_op.lhs, src, bin_op.rhs, src, .bitcast); + break :result .void_value; + }, + else => unreachable, + }; - assert(bitcast_block.instructions.items.len > 0); // If only one instruction is produced then we can replace the bitcast // placeholder instruction with this instruction; no need for an entire block. - if (bitcast_block.instructions.items.len == 1) { - const only_inst = bitcast_block.instructions.items[0]; - sema.air_instructions.set(bitcast_inst, sema.air_instructions.get(only_inst)); + if (replacement_block.instructions.items.len == 1) { + const only_inst = replacement_block.instructions.items[0]; + sema.air_instructions.set(placeholder_inst, sema.air_instructions.get(only_inst)); continue; } // Here we replace the placeholder bitcast instruction with a block // that does the coerce_result_ptr logic. - _ = try bitcast_block.addBr(bitcast_inst, sub_ptr); - const ty_inst = sema.air_instructions.items(.data)[bitcast_inst].ty_op.ty; + _ = try replacement_block.addBr(placeholder_inst, result); + const ty_inst = if (result == .void_value) + .void_type + else + sema.air_instructions.items(.data)[placeholder_inst].ty_op.ty; try sema.air_extra.ensureUnusedCapacity( gpa, - @typeInfo(Air.Block).Struct.fields.len + bitcast_block.instructions.items.len, + @typeInfo(Air.Block).Struct.fields.len + replacement_block.instructions.items.len, ); - sema.air_instructions.set(bitcast_inst, .{ + sema.air_instructions.set(placeholder_inst, .{ .tag = .block, .data = .{ .ty_pl = .{ .ty = ty_inst, .payload = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @intCast(u32, bitcast_block.instructions.items.len), + .body_len = @intCast(u32, replacement_block.instructions.items.len), }), } }, }); - sema.air_extra.appendSliceAssumeCapacity(bitcast_block.instructions.items); + sema.air_extra.appendSliceAssumeCapacity(replacement_block.instructions.items); } }, else => unreachable, @@ -4916,7 +4931,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE }, .inferred_alloc => { const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - return sema.storeToInferredAlloc(block, src, ptr, operand, inferred_alloc); + return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); }, else => break :blk, } @@ -4945,7 +4960,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi }, .inferred_alloc => { const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - return sema.storeToInferredAlloc(block, src, ptr, operand, inferred_alloc); + return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); }, else => unreachable, } @@ -4954,27 +4969,19 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi fn storeToInferredAlloc( sema: *Sema, block: *Block, - src: LazySrcLoc, ptr: Air.Inst.Ref, operand: Air.Inst.Ref, inferred_alloc: *Value.Payload.InferredAlloc, ) CompileError!void { - const operand_ty = sema.typeOf(operand); - // Create a runtime bitcast instruction with exactly the type the pointer wants. - const target = sema.mod.getTarget(); - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = operand_ty, - .@"align" = inferred_alloc.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), - }); - const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); + // Create a store instruction as a placeholder. This will be replaced by a + // proper store sequence once we know the stored type. + const dummy_store = try block.addBinOp(.store, ptr, operand); // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.prongs.append(sema.arena, .{ .stored_inst = operand, - .placeholder = Air.refToIndex(bitcasted_ptr).?, + .placeholder = Air.refToIndex(dummy_store).?, }); - return sema.storePtr2(block, src, bitcasted_ptr, src, operand, src, .bitcast); } fn storeToInferredAllocComptime( diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 704a1e31c5..a3758bac69 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3597,10 +3597,6 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { const ptr_ty = f.air.typeOf(bin_op.lhs); const ptr_scalar_ty = ptr_ty.scalarType(); const ptr_info = ptr_scalar_ty.ptrInfo().data; - if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) { - try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - return .none; - } const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.air.typeOf(bin_op.rhs); @@ -4461,9 +4457,7 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; const dest_ty = f.air.typeOfIndex(inst); - // No IgnoreComptime until Sema stops giving us garbage Air. - // https://github.com/ziglang/zig/issues/13410 - if (f.liveness.isUnused(inst) or !dest_ty.hasRuntimeBits()) { + if (f.liveness.isUnused(inst)) { try reap(f, inst, &.{ty_op.operand}); return .none; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ce49fcde78..f32047fe64 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -8216,7 +8216,6 @@ pub const FuncGen = struct { const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null; // TODO Sema should emit a different instruction when the store should // possibly do the safety 0xaa bytes for undefined. diff --git a/test/behavior/if.zig b/test/behavior/if.zig index 730c0713c6..2294a2bcfd 100644 --- a/test/behavior/if.zig +++ b/test/behavior/if.zig @@ -140,12 +140,6 @@ test "if-else expression with runtime condition result location is inferred opti } test "result location with inferred type ends up being pointer to comptime_int" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - var a: ?u32 = 1234; var b: u32 = 2000; var c = if (a) |d| blk: { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 20ad0a60ff..e8a9f4c831 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1540,3 +1540,19 @@ test "access the tag of a global tagged union" { }; try expect(U.u == .a); } + +test "coerce enum literal to union in result loc" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + const U = union(enum) { + a, + b: u8, + + fn doTest(c: bool) !void { + var u = if (c) .a else @This(){ .b = 0 }; + try expect(u == .a); + } + }; + try U.doTest(true); + comptime try U.doTest(true); +}