Sema: introduce a mechanism in Value to resolve types

This commit adds a new optional argument to several Value methods which
provides the ability to resolve types if it comes to it. This prevents
having duplicated logic inside both Sema and Value.

With this commit, the "struct contains slice of itself" test is passing
by exploiting the new lazy_align Value Tag.
This commit is contained in:
Andrew Kelley
2022-03-22 15:42:09 -07:00
parent 593130ce0a
commit 60d8c4739d
5 changed files with 167 additions and 54 deletions

View File

@@ -177,6 +177,12 @@ const MonomorphedFuncsContext = struct {
}
};
pub const WipAnalysis = struct {
sema: *Sema,
block: *Sema.Block,
src: Module.LazySrcLoc,
};
pub const MemoizedCallSet = std.HashMapUnmanaged(
MemoizedCall.Key,
MemoizedCall.Result,

View File

@@ -1591,7 +1591,7 @@ fn resolveInt(
const coerced = try sema.coerce(block, dest_ty, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced);
const target = sema.mod.getTarget();
return val.toUnsignedInt(target);
return (try val.getUnsignedIntAdvanced(target, sema.kit(block, src))).?;
}
// Returns a compile error if the value has tag `variable`. See `resolveInstValue` for
@@ -9926,7 +9926,7 @@ fn analyzePtrArithmetic(
const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(target));
// TODO I tried to put this check earlier but it the LLVM backend generate invalid instructinons
if (offset_int == 0) return ptr;
if (ptr_val.getUnsignedInt(target)) |addr| {
if (try ptr_val.getUnsignedIntAdvanced(target, sema.kit(block, ptr_src))) |addr| {
const ptr_child_ty = ptr_ty.childType();
const elem_ty = if (ptr_ty.isSinglePointer() and ptr_child_ty.zigTypeTag() == .Array)
ptr_child_ty.childType()
@@ -11863,6 +11863,8 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const elem_ty_src: LazySrcLoc = .unneeded;
const inst_data = sema.code.instructions.items(.data)[inst].ptr_type;
const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index);
const unresolved_elem_ty = try sema.resolveType(block, elem_ty_src, extra.data.elem_type);
const target = sema.mod.getTarget();
var extra_i = extra.end;
@@ -11872,10 +11874,19 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
break :blk (try sema.resolveInstConst(block, .unneeded, ref)).val;
} else null;
const abi_align = if (inst_data.flags.has_align) blk: {
const abi_align: u32 = if (inst_data.flags.has_align) blk: {
const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
const abi_align = try sema.resolveInt(block, .unneeded, ref, Type.u32);
const coerced = try sema.coerce(block, Type.u32, sema.resolveInst(ref), src);
const val = try sema.resolveConstValue(block, src, coerced);
// Check if this happens to be the lazy alignment of our element type, in
// which case we can make this 0 without resolving it.
if (val.castTag(.lazy_align)) |payload| {
if (payload.data.eql(unresolved_elem_ty, target)) {
break :blk 0;
}
}
const abi_align = (try val.getUnsignedIntAdvanced(target, sema.kit(block, src))).?;
break :blk @intCast(u32, abi_align);
} else 0;
@@ -11903,7 +11914,6 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.fail(block, src, "bit offset starts after end of host integer", .{});
}
const unresolved_elem_ty = try sema.resolveType(block, elem_ty_src, extra.data.elem_type);
const elem_ty = if (abi_align == 0)
unresolved_elem_ty
else t: {
@@ -11911,7 +11921,6 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
try sema.resolveTypeLayout(block, elem_ty_src, elem_ty);
break :t elem_ty;
};
const target = sema.mod.getTarget();
const ty = try Type.ptr(sema.arena, target, .{
.pointee_type = elem_ty,
.sentinel = sentinel,
@@ -18390,15 +18399,15 @@ fn storePtrVal(
operand_val: Value,
operand_ty: Type,
) !void {
var kit = try beginComptimePtrMutation(sema, block, src, ptr_val);
try sema.checkComptimeVarStore(block, src, kit.decl_ref_mut);
var mut_kit = try beginComptimePtrMutation(sema, block, src, ptr_val);
try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut);
const bitcasted_val = try sema.bitCastVal(block, src, operand_val, operand_ty, kit.ty, 0);
const bitcasted_val = try sema.bitCastVal(block, src, operand_val, operand_ty, mut_kit.ty, 0);
const arena = kit.beginArena(sema.gpa);
defer kit.finishArena();
const arena = mut_kit.beginArena(sema.gpa);
defer mut_kit.finishArena();
kit.val.* = try bitcasted_val.copy(arena);
mut_kit.val.* = try bitcasted_val.copy(arena);
}
const ComptimePtrMutationKit = struct {
@@ -19891,7 +19900,7 @@ fn cmpNumeric(
return Air.Inst.Ref.bool_false;
}
}
if (Value.compareHetero(lhs_val, op, rhs_val, target)) {
if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, target, sema.kit(block, src))) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
@@ -20758,7 +20767,7 @@ pub fn resolveFnTypes(
}
}
fn resolveTypeLayout(
pub fn resolveTypeLayout(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
@@ -20929,7 +20938,7 @@ fn resolveUnionFully(
union_obj.status = .fully_resolved;
}
fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!Type {
pub fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!Type {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
@@ -22209,7 +22218,9 @@ fn typePtrOrOptionalPtrTy(
/// This function returns false negatives when structs and unions are having their
/// field types resolved.
/// TODO assert the return value matches `ty.comptimeOnly`
fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
/// TODO merge these implementations together with the "advanced"/sema_kit pattern seen
/// elsewhere in value.zig
pub fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
return switch (ty.tag()) {
.u1,
.u8,
@@ -22415,6 +22426,7 @@ fn typeAbiSize(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !u64 {
return ty.abiSize(target);
}
/// TODO merge with Type.abiAlignmentAdvanced
fn typeAbiAlignment(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !u32 {
try sema.resolveTypeLayout(block, src, ty);
const target = sema.mod.getTarget();
@@ -22498,3 +22510,7 @@ fn anonStructFieldIndex(
struct_ty.fmt(target), field_name,
});
}
fn kit(sema: *Sema, block: *Block, src: LazySrcLoc) Module.WipAnalysis {
return .{ .sema = sema, .block = block, .src = src };
}

View File

@@ -1483,20 +1483,29 @@ pub const Type = extern union {
@compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()");
}
pub fn fmt(ty: Type, target: Target) std.fmt.Formatter(TypedValue.format) {
var ty_payload: Value.Payload.Ty = .{
.base = .{ .tag = .ty },
.data = ty,
};
pub fn fmt(ty: Type, target: Target) std.fmt.Formatter(format2) {
return .{ .data = .{
.tv = .{
.ty = Type.type,
.val = Value.initPayload(&ty_payload.base),
},
.ty = ty,
.target = target,
} };
}
const FormatContext = struct {
ty: Type,
target: Target,
};
fn format2(
ctx: FormatContext,
comptime unused_format_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
comptime assert(unused_format_string.len == 0);
_ = options;
return print(ctx.ty, writer, ctx.target);
}
pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) {
return .{ .data = ty };
}
@@ -2241,8 +2250,12 @@ pub const Type = extern union {
/// * the type has only one possible value, making its ABI size 0.
/// When `ignore_comptime_only` is true, then types that are comptime only
/// may return false positives.
pub fn hasRuntimeBitsAdvanced(ty: Type, ignore_comptime_only: bool) bool {
return switch (ty.tag()) {
pub fn hasRuntimeBitsAdvanced(
ty: Type,
ignore_comptime_only: bool,
sema_kit: ?Module.WipAnalysis,
) Module.CompileError!bool {
switch (ty.tag()) {
.u1,
.u8,
.i8,
@@ -2296,7 +2309,7 @@ pub const Type = extern union {
.@"anyframe",
.anyopaque,
.@"opaque",
=> true,
=> return true,
// These are false because they are comptime-only types.
.single_const_pointer_to_comptime_int,
@@ -2320,7 +2333,7 @@ pub const Type = extern union {
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
=> false,
=> return false,
// These types have more than one possible value, so the result is the same as
// asking whether they are comptime-only types.
@@ -2337,20 +2350,34 @@ pub const Type = extern union {
.const_slice,
.mut_slice,
.pointer,
=> if (ignore_comptime_only) true else !comptimeOnly(ty),
=> {
if (ignore_comptime_only) {
return true;
} else if (sema_kit) |sk| {
return !(try sk.sema.typeRequiresComptime(sk.block, sk.src, ty));
} else {
return !comptimeOnly(ty);
}
},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
if (sema_kit) |sk| {
_ = try sk.sema.typeRequiresComptime(sk.block, sk.src, ty);
}
switch (struct_obj.requires_comptime) {
.wip => unreachable,
.yes => return false,
.no => if (struct_obj.known_non_opv) return true,
.unknown => {},
}
if (sema_kit) |sk| {
_ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
}
assert(struct_obj.haveFieldTypes());
for (struct_obj.fields.values()) |value| {
if (value.is_comptime) continue;
if (value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only))
if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit))
return true;
} else {
return false;
@@ -2368,14 +2395,17 @@ pub const Type = extern union {
.enum_numbered, .enum_nonexhaustive => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = ty.intTagType(&buffer);
return int_tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only);
return int_tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit);
},
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
if (sema_kit) |sk| {
_ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
}
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only))
if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit))
return true;
} else {
return false;
@@ -2383,29 +2413,32 @@ pub const Type = extern union {
},
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
if (union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only)) {
if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit)) {
return true;
}
if (sema_kit) |sk| {
_ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
}
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only))
if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit))
return true;
} else {
return false;
}
},
.array, .vector => ty.arrayLen() != 0 and
ty.elemType().hasRuntimeBitsAdvanced(ignore_comptime_only),
.array_u8 => ty.arrayLen() != 0,
.array_sentinel => ty.childType().hasRuntimeBitsAdvanced(ignore_comptime_only),
.array, .vector => return ty.arrayLen() != 0 and
try ty.elemType().hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit),
.array_u8 => return ty.arrayLen() != 0,
.array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit),
.int_signed, .int_unsigned => ty.cast(Payload.Bits).?.data != 0,
.int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data != 0,
.error_union => {
const payload = ty.castTag(.error_union).?.data;
return payload.error_set.hasRuntimeBitsAdvanced(ignore_comptime_only) or
payload.payload.hasRuntimeBitsAdvanced(ignore_comptime_only);
return (try payload.error_set.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit)) or
(try payload.payload.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit));
},
.tuple, .anon_struct => {
@@ -2413,7 +2446,7 @@ pub const Type = extern union {
for (tuple.types) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only)) return true;
if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit)) return true;
}
return false;
},
@@ -2422,7 +2455,7 @@ pub const Type = extern union {
.inferred_alloc_mut => unreachable,
.var_args_param => unreachable,
.generic_poison => unreachable,
};
}
}
/// true if and only if the type has a well-defined memory layout
@@ -2548,11 +2581,11 @@ pub const Type = extern union {
}
pub fn hasRuntimeBits(ty: Type) bool {
return hasRuntimeBitsAdvanced(ty, false);
return hasRuntimeBitsAdvanced(ty, false, null) catch unreachable;
}
pub fn hasRuntimeBitsIgnoreComptime(ty: Type) bool {
return hasRuntimeBitsAdvanced(ty, true);
return hasRuntimeBitsAdvanced(ty, true, null) catch unreachable;
}
pub fn isFnOrHasRuntimeBits(ty: Type) bool {
@@ -4538,6 +4571,8 @@ pub const Type = extern union {
/// During semantic analysis, instead call `Sema.typeRequiresComptime` which
/// resolves field types rather than asserting they are already resolved.
/// TODO merge these implementations together with the "advanced" pattern seen
/// elsewhere in this file.
pub fn comptimeOnly(ty: Type) bool {
return switch (ty.tag()) {
.u1,

View File

@@ -9,6 +9,7 @@ const Allocator = std.mem.Allocator;
const Module = @import("Module.zig");
const Air = @import("Air.zig");
const TypedValue = @import("TypedValue.zig");
const Sema = @import("Sema.zig");
/// This is the raw data, with no bookkeeping, no memory awareness,
/// no de-duplication, and no type system awareness.
@@ -990,6 +991,16 @@ pub const Value = extern union {
/// Asserts the value is an integer.
pub fn toBigInt(val: Value, space: *BigIntSpace, target: Target) BigIntConst {
return val.toBigIntAdvanced(space, target, null) catch unreachable;
}
/// Asserts the value is an integer.
pub fn toBigIntAdvanced(
val: Value,
space: *BigIntSpace,
target: Target,
sema_kit: ?Module.WipAnalysis,
) !BigIntConst {
switch (val.tag()) {
.zero,
.bool_false,
@@ -1008,7 +1019,11 @@ pub const Value = extern union {
.undef => unreachable,
.lazy_align => {
const x = val.castTag(.lazy_align).?.data.abiAlignment(target);
const ty = val.castTag(.lazy_align).?.data;
if (sema_kit) |sk| {
try sk.sema.resolveTypeLayout(sk.block, sk.src, ty);
}
const x = ty.abiAlignment(target);
return BigIntMutable.init(&space.limbs, x).toConst();
},
@@ -1019,6 +1034,12 @@ pub const Value = extern union {
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
pub fn getUnsignedInt(val: Value, target: Target) ?u64 {
return getUnsignedIntAdvanced(val, target, null) catch unreachable;
}
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
pub fn getUnsignedIntAdvanced(val: Value, target: Target, sema_kit: ?Module.WipAnalysis) !?u64 {
switch (val.tag()) {
.zero,
.bool_false,
@@ -1036,7 +1057,13 @@ pub const Value = extern union {
.undef => unreachable,
.lazy_align => return val.castTag(.lazy_align).?.data.abiAlignment(target),
.lazy_align => {
const ty = val.castTag(.lazy_align).?.data;
if (sema_kit) |sk| {
try sk.sema.resolveTypeLayout(sk.block, sk.src, ty);
}
return ty.abiAlignment(target);
},
else => return null,
}
@@ -1777,6 +1804,10 @@ pub const Value = extern union {
}
pub fn orderAgainstZero(lhs: Value) std.math.Order {
return orderAgainstZeroAdvanced(lhs, null) catch unreachable;
}
pub fn orderAgainstZeroAdvanced(lhs: Value, sema_kit: ?Module.WipAnalysis) !std.math.Order {
return switch (lhs.tag()) {
.zero,
.bool_false,
@@ -1799,7 +1830,7 @@ pub const Value = extern union {
.lazy_align => {
const ty = lhs.castTag(.lazy_align).?.data;
if (ty.hasRuntimeBitsIgnoreComptime()) {
if (try ty.hasRuntimeBitsAdvanced(false, sema_kit)) {
return .gt;
} else {
return .eq;
@@ -1818,10 +1849,16 @@ pub const Value = extern union {
/// Asserts the value is comparable.
pub fn order(lhs: Value, rhs: Value, target: Target) std.math.Order {
return orderAdvanced(lhs, rhs, target, null) catch unreachable;
}
/// Asserts the value is comparable.
/// If sema_kit is null then this function asserts things are resolved and cannot fail.
pub fn orderAdvanced(lhs: Value, rhs: Value, target: Target, sema_kit: ?Module.WipAnalysis) !std.math.Order {
const lhs_tag = lhs.tag();
const rhs_tag = rhs.tag();
const lhs_against_zero = lhs.orderAgainstZero();
const rhs_against_zero = rhs.orderAgainstZero();
const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(sema_kit);
const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(sema_kit);
switch (lhs_against_zero) {
.lt => if (rhs_against_zero != .lt) return .lt,
.eq => return rhs_against_zero.invert(),
@@ -1855,14 +1892,24 @@ pub const Value = extern union {
var lhs_bigint_space: BigIntSpace = undefined;
var rhs_bigint_space: BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_bigint_space, target);
const rhs_bigint = rhs.toBigInt(&rhs_bigint_space, target);
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, target, sema_kit);
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, target, sema_kit);
return lhs_bigint.order(rhs_bigint);
}
/// Asserts the value is comparable. Does not take a type parameter because it supports
/// comparisons between heterogeneous types.
pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, target: Target) bool {
return compareHeteroAdvanced(lhs, op, rhs, target, null) catch unreachable;
}
pub fn compareHeteroAdvanced(
lhs: Value,
op: std.math.CompareOperator,
rhs: Value,
target: Target,
sema_kit: ?Module.WipAnalysis,
) !bool {
if (lhs.pointerDecl()) |lhs_decl| {
if (rhs.pointerDecl()) |rhs_decl| {
switch (op) {
@@ -1884,7 +1931,7 @@ pub const Value = extern union {
else => {},
}
}
return order(lhs, rhs, target).compare(op);
return (try orderAdvanced(lhs, rhs, target, sema_kit)).compare(op);
}
/// Asserts the values are comparable. Both operands have type `ty`.

View File

@@ -1,3 +1,4 @@
const builtin = @import("builtin");
const expect = @import("std").testing.expect;
const Node = struct {
@@ -11,6 +12,10 @@ const NodeAligned = struct {
};
test "struct contains slice of itself" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var other_nodes = [_]Node{
Node{
.payload = 31,
@@ -48,6 +53,10 @@ test "struct contains slice of itself" {
}
test "struct contains aligned slice of itself" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var other_nodes = [_]NodeAligned{
NodeAligned{
.payload = 31,