Merge pull request #11043 from topolarity/ptr-fixes

stage2: Improve `@ptrCast` support for sliced/optional operands
This commit is contained in:
Andrew Kelley
2022-03-03 15:12:02 -05:00
committed by GitHub
5 changed files with 53 additions and 19 deletions

View File

@@ -12538,6 +12538,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
const target = sema.mod.getTarget();
try sema.checkPtrType(block, dest_ty_src, dest_ty);
try sema.checkPtrOperand(block, operand_src, operand_ty);
if (dest_ty.isSlice()) {
@@ -12547,7 +12549,28 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty)
else
operand;
return sema.coerceCompatiblePtrs(block, dest_ty, ptr, operand_src);
try sema.resolveTypeLayout(block, dest_ty_src, dest_ty.elemType2());
const dest_align = dest_ty.ptrAlignment(target);
try sema.resolveTypeLayout(block, operand_src, operand_ty.elemType2());
const operand_align = operand_ty.ptrAlignment(target);
// If the destination is less aligned than the source, preserve the source alignment
var aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: {
// Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result
if (dest_ty.zigTypeTag() == .Optional) {
var buf: Type.Payload.ElemType = undefined;
var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data;
dest_ptr_info.@"align" = operand_align;
break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, target, dest_ptr_info));
} else {
var dest_ptr_info = dest_ty.ptrInfo().data;
dest_ptr_info.@"align" = operand_align;
break :blk try Type.ptr(sema.arena, target, dest_ptr_info);
}
};
return sema.coerceCompatiblePtrs(block, aligned_dest_ty, ptr, operand_src);
}
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -15340,7 +15363,7 @@ fn unionFieldVal(
return sema.addConstant(field.ty, tag_and_val.val);
} else {
const old_ty = union_ty.unionFieldType(tag_and_val.tag);
const new_val = try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty);
const new_val = try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0);
return sema.addConstant(field.ty, new_val);
}
},
@@ -16447,7 +16470,7 @@ fn storePtrVal(
var kit = try beginComptimePtrMutation(sema, block, src, ptr_val);
try sema.checkComptimeVarStore(block, src, kit.decl_ref_mut);
const bitcasted_val = try sema.bitCastVal(block, src, operand_val, operand_ty, kit.ty);
const bitcasted_val = try sema.bitCastVal(block, src, operand_val, operand_ty, kit.ty, 0);
const arena = kit.beginArena(sema.gpa);
defer kit.finishArena();
@@ -16741,6 +16764,8 @@ fn beginComptimePtrMutation(
const ComptimePtrLoadKit = struct {
/// The Value of the Decl that owns this memory.
root_val: Value,
/// The Type of the Decl that owns this memory.
root_ty: Type,
/// Parent Value.
val: Value,
/// The Type of the parent Value.
@@ -16771,6 +16796,7 @@ fn beginComptimePtrLoad(
if (decl_val.tag() == .variable) return error.RuntimeLoad;
return ComptimePtrLoadKit{
.root_val = decl_val,
.root_ty = decl.ty,
.val = decl_val,
.ty = decl.ty,
.byte_offset = 0,
@@ -16783,6 +16809,7 @@ fn beginComptimePtrLoad(
if (decl_val.tag() == .variable) return error.RuntimeLoad;
return ComptimePtrLoadKit{
.root_val = decl_val,
.root_ty = decl.ty,
.val = decl_val,
.ty = decl.ty,
.byte_offset = 0,
@@ -16817,6 +16844,7 @@ fn beginComptimePtrLoad(
};
return ComptimePtrLoadKit{
.root_val = parent.root_val,
.root_ty = parent.ty,
.val = try parent.val.elemValue(sema.arena, elem_ptr.index),
.ty = elem_ty,
.byte_offset = byte_offset,
@@ -16832,6 +16860,7 @@ fn beginComptimePtrLoad(
}
return ComptimePtrLoadKit{
.root_val = parent.root_val,
.root_ty = parent.ty,
.val = parent.val,
.ty = parent.ty,
.byte_offset = parent.byte_offset,
@@ -16859,6 +16888,7 @@ fn beginComptimePtrLoad(
};
return ComptimePtrLoadKit{
.root_val = parent.root_val,
.root_ty = parent.ty,
.val = try parent.val.fieldValue(sema.arena, field_index),
.ty = parent.ty.structFieldType(field_index),
.byte_offset = byte_offset,
@@ -16870,6 +16900,7 @@ fn beginComptimePtrLoad(
const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr);
return ComptimePtrLoadKit{
.root_val = parent.root_val,
.root_ty = parent.root_ty,
.val = parent.val.castTag(.eu_payload).?.data,
.ty = parent.ty.errorUnionPayload(),
.byte_offset = null,
@@ -16881,6 +16912,7 @@ fn beginComptimePtrLoad(
const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr);
return ComptimePtrLoadKit{
.root_val = parent.root_val,
.root_ty = parent.root_ty,
.val = parent.val.castTag(.opt_payload).?.data,
.ty = try parent.ty.optionalChildAlloc(sema.arena),
.byte_offset = null,
@@ -16918,7 +16950,7 @@ fn bitCast(
// TODO validate the type size and other compile errors
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty);
const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0);
return sema.addConstant(dest_ty, result_val);
}
try sema.requireRuntimeBlock(block, inst_src);
@@ -16932,6 +16964,7 @@ pub fn bitCastVal(
val: Value,
old_ty: Type,
new_ty: Type,
buffer_offset: usize,
) !Value {
if (old_ty.eql(new_ty)) return val;
@@ -16942,7 +16975,7 @@ pub fn bitCastVal(
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
val.writeToMemory(old_ty, target, buffer);
return Value.readFromMemory(new_ty, target, buffer, sema.arena);
return Value.readFromMemory(new_ty, target, buffer[buffer_offset..], sema.arena);
}
fn coerceArrayPtrToSlice(
@@ -19808,13 +19841,10 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr
// The Type it is stored as in the compiler has an ABI size greater or equal to
// the ABI size of `load_ty`. We may perform the bitcast based on
// `parent.val` alone (more efficient).
return try sema.bitCastVal(block, src, parent.val, parent.ty, load_ty);
return try sema.bitCastVal(block, src, parent.val, parent.ty, load_ty, 0);
} else {
return try sema.bitCastVal(block, src, parent.root_val, parent.root_ty, load_ty, parent.byte_offset.?);
}
// The Type it is stored as in the compiler has an ABI size less than the ABI size
// of `load_ty`. The bitcast must be performed based on the `parent.root_val`
// and reinterpreted starting at `parent.byte_offset`.
return sema.fail(block, src, "TODO: implement bitcast with index offset", .{});
}
/// Used to convert a u64 value to a usize value, emitting a compile error if the number

View File

@@ -2882,7 +2882,7 @@ pub const FuncGen = struct {
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType();
if (!elem_ty.hasRuntimeBits()) return null;
if (!elem_ty.hasRuntimeBits()) return self.dg.lowerPtrToVoid(ptr_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);

View File

@@ -2245,6 +2245,7 @@ pub const Type = extern union {
.prefetch_options,
.export_options,
.extern_options,
.@"opaque",
=> return 1,
.fn_noreturn_no_args, // represents machine code; not a pointer
@@ -2432,7 +2433,6 @@ pub const Type = extern union {
.noreturn,
.inferred_alloc_const,
.inferred_alloc_mut,
.@"opaque",
.var_args_param,
.bound_fn,
=> unreachable,

View File

@@ -273,7 +273,10 @@ test "0-bit child type coerced to optional return ptr result location" {
}
test "0-bit child type coerced to optional" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {

View File

@@ -4,7 +4,9 @@ const expect = std.testing.expect;
const native_endian = builtin.target.cpu.arch.endian();
test "reinterpret bytes as integer with nonzero offset" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testReinterpretBytesAsInteger();
comptime try testReinterpretBytesAsInteger();
@@ -43,7 +45,6 @@ fn testReinterpretBytesAsExternStruct() !void {
test "reinterpret struct field at comptime" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -67,8 +68,6 @@ const Bytes = struct {
};
test "comptime ptrcast keeps larger alignment" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
comptime {
const a: u32 = 1234;
const p = @ptrCast([*]const u8, &a);
@@ -77,7 +76,9 @@ test "comptime ptrcast keeps larger alignment" {
}
test "implicit optional pointer to optional anyopaque pointer" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var buf: [4]u8 = "aoeu".*;
var x: ?[*]u8 = &buf;