InternPool: improve hashing performance

Key.PtrType is now an extern struct so that hashing it can be done by
reinterpreting bytes directly. It also uses the same representation for
type_pointer Tag encoding and the Key. Accessing pointer attributes now
requires packed struct access, however, many operations are now a copy
of a u32 rather than several independent fields.

This function moves the top two most used Key variants - pointer types
and pointer values - to use a single-shot hash function that branches
for small keys instead of calling memcpy.

As a result, perf against merge-base went from 1.17x ± 0.04 slower to
1.12x ± 0.04 slower. After the pointer value hashing was changed, total
CPU instructions spent in memcpy went from 4.40% to 4.08%, and after
additionally improving pointer type hashing, it further decreased to
3.72%.
This commit is contained in:
Andrew Kelley
2023-05-30 20:23:51 -07:00
parent c7d65fa368
commit 82f6f164a1
8 changed files with 527 additions and 392 deletions

View File

@@ -249,35 +249,47 @@ pub const Key = union(enum) {
}
};
pub const PtrType = struct {
elem_type: Index,
/// Extern layout so it can be hashed with `std.mem.asBytes`.
pub const PtrType = extern struct {
child: Index,
sentinel: Index = .none,
/// `none` indicates the ABI alignment of the pointee_type. In this
/// case, this field *must* be set to `none`, otherwise the
/// `InternPool` equality and hashing functions will return incorrect
/// results.
alignment: Alignment = .none,
/// If this is non-zero it means the pointer points to a sub-byte
/// range of data, which is backed by a "host integer" with this
/// number of bytes.
/// When host_size=pointee_abi_size and bit_offset=0, this must be
/// represented with host_size=0 instead.
host_size: u16 = 0,
bit_offset: u16 = 0,
vector_index: VectorIndex = .none,
size: std.builtin.Type.Pointer.Size = .One,
is_const: bool = false,
is_volatile: bool = false,
is_allowzero: bool = false,
/// See src/target.zig defaultAddressSpace function for how to obtain
/// an appropriate value for this field.
address_space: std.builtin.AddressSpace = .generic,
flags: Flags = .{},
packed_offset: PackedOffset = .{ .bit_offset = 0, .host_size = 0 },
pub const VectorIndex = enum(u16) {
none = std.math.maxInt(u16),
runtime = std.math.maxInt(u16) - 1,
_,
};
pub const Flags = packed struct(u32) {
size: Size = .One,
/// `none` indicates the ABI alignment of the pointee_type. In this
/// case, this field *must* be set to `none`, otherwise the
/// `InternPool` equality and hashing functions will return incorrect
/// results.
alignment: Alignment = .none,
is_const: bool = false,
is_volatile: bool = false,
is_allowzero: bool = false,
/// See src/target.zig defaultAddressSpace function for how to obtain
/// an appropriate value for this field.
address_space: AddressSpace = .generic,
vector_index: VectorIndex = .none,
};
pub const PackedOffset = packed struct(u32) {
/// If this is non-zero it means the pointer points to a sub-byte
/// range of data, which is backed by a "host integer" with this
/// number of bytes.
/// When host_size=pointee_abi_size and bit_offset=0, this must be
/// represented with host_size=0 instead.
host_size: u16,
bit_offset: u16,
};
pub const Size = std.builtin.Type.Pointer.Size;
pub const AddressSpace = std.builtin.AddressSpace;
};
pub const ArrayType = struct {
@@ -635,17 +647,13 @@ pub const Key = union(enum) {
}
pub fn hash64(key: Key, ip: *const InternPool) u64 {
var hasher = std.hash.Wyhash.init(0);
key.hashWithHasher(&hasher, ip);
return hasher.final();
}
pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void {
const asBytes = std.mem.asBytes;
const KeyTag = @typeInfo(Key).Union.tag_type.?;
std.hash.autoHash(hasher, @as(KeyTag, key));
const seed = @enumToInt(@as(KeyTag, key));
switch (key) {
.ptr_type => |x| return WyhashKing.hash(seed, asBytes(&x)),
inline .int_type,
.ptr_type,
.array_type,
.vector_type,
.opt_type,
@@ -663,73 +671,110 @@ pub const Key = union(enum) {
.enum_literal,
.enum_tag,
.inferred_error_set_type,
=> |info| std.hash.autoHash(hasher, info),
=> |info| {
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, info);
return hasher.final();
},
.runtime_value => |runtime_value| std.hash.autoHash(hasher, runtime_value.val),
.opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl),
.enum_type => |enum_type| std.hash.autoHash(hasher, enum_type.decl),
.runtime_value => |runtime_value| {
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, runtime_value.val);
return hasher.final();
},
.opaque_type => |opaque_type| {
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, opaque_type.decl);
return hasher.final();
},
.enum_type => |enum_type| {
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, enum_type.decl);
return hasher.final();
},
.variable => |variable| std.hash.autoHash(hasher, variable.decl),
.variable => |variable| {
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, variable.decl);
return hasher.final();
},
.extern_func => |extern_func| {
std.hash.autoHash(hasher, extern_func.ty);
std.hash.autoHash(hasher, extern_func.decl);
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, extern_func.ty);
std.hash.autoHash(&hasher, extern_func.decl);
return hasher.final();
},
.func => |func| {
std.hash.autoHash(hasher, func.ty);
std.hash.autoHash(hasher, func.index);
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, func.ty);
std.hash.autoHash(&hasher, func.index);
return hasher.final();
},
.int => |int| {
var hasher = std.hash.Wyhash.init(seed);
// Canonicalize all integers by converting them to BigIntConst.
switch (int.storage) {
.u64, .i64, .big_int => {
var buffer: Key.Int.Storage.BigIntSpace = undefined;
const big_int = int.storage.toBigInt(&buffer);
std.hash.autoHash(hasher, int.ty);
std.hash.autoHash(hasher, big_int.positive);
for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb);
std.hash.autoHash(&hasher, int.ty);
std.hash.autoHash(&hasher, big_int.positive);
for (big_int.limbs) |limb| std.hash.autoHash(&hasher, limb);
},
.lazy_align, .lazy_size => |lazy_ty| {
std.hash.autoHash(
hasher,
&hasher,
@as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage),
);
std.hash.autoHash(hasher, lazy_ty);
std.hash.autoHash(&hasher, lazy_ty);
},
}
return hasher.final();
},
.float => |float| {
std.hash.autoHash(hasher, float.ty);
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, float.ty);
switch (float.storage) {
inline else => |val| std.hash.autoHash(
hasher,
&hasher,
@bitCast(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), val),
),
}
return hasher.final();
},
.ptr => |ptr| {
std.hash.autoHash(hasher, ptr.ty);
std.hash.autoHash(hasher, ptr.len);
// Int-to-ptr pointers are hashed separately than decl-referencing pointers.
// This is sound due to pointer provenance rules.
std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr));
switch (ptr.addr) {
.decl => |decl| std.hash.autoHash(hasher, decl),
.mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl),
.int => |int| std.hash.autoHash(hasher, int),
.eu_payload => |eu_payload| std.hash.autoHash(hasher, eu_payload),
.opt_payload => |opt_payload| std.hash.autoHash(hasher, opt_payload),
.comptime_field => |comptime_field| std.hash.autoHash(hasher, comptime_field),
.elem => |elem| std.hash.autoHash(hasher, elem),
.field => |field| std.hash.autoHash(hasher, field),
}
const addr: @typeInfo(Key.Ptr.Addr).Union.tag_type.? = ptr.addr;
const seed2 = seed + @enumToInt(addr);
const common = asBytes(&ptr.ty) ++ asBytes(&ptr.len);
return switch (ptr.addr) {
.decl => |x| WyhashKing.hash(seed2, common ++ asBytes(&x)),
.mut_decl => |x| WyhashKing.hash(
seed2,
asBytes(&x.decl) ++ asBytes(&x.runtime_index),
),
.int, .eu_payload, .opt_payload, .comptime_field => |int| WyhashKing.hash(
seed2,
asBytes(&int),
),
.elem, .field => |x| WyhashKing.hash(
seed2,
asBytes(&x.base) ++ asBytes(&x.index),
),
};
},
.aggregate => |aggregate| {
std.hash.autoHash(hasher, aggregate.ty);
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, aggregate.ty);
const len = ip.aggregateTypeLen(aggregate.ty);
const child = switch (ip.indexToKey(aggregate.ty)) {
.array_type => |array_type| array_type.child,
@@ -741,16 +786,16 @@ pub const Key = union(enum) {
if (child == .u8_type) {
switch (aggregate.storage) {
.bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| {
std.hash.autoHash(hasher, KeyTag.int);
std.hash.autoHash(hasher, byte);
std.hash.autoHash(&hasher, KeyTag.int);
std.hash.autoHash(&hasher, byte);
},
.elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| {
const elem_key = ip.indexToKey(elem);
std.hash.autoHash(hasher, @as(KeyTag, elem_key));
std.hash.autoHash(&hasher, @as(KeyTag, elem_key));
switch (elem_key) {
.undef => {},
.int => |int| std.hash.autoHash(
hasher,
&hasher,
@intCast(u8, int.storage.u64),
),
else => unreachable,
@@ -760,11 +805,11 @@ pub const Key = union(enum) {
const elem_key = ip.indexToKey(elem);
var remaining = len;
while (remaining > 0) : (remaining -= 1) {
std.hash.autoHash(hasher, @as(KeyTag, elem_key));
std.hash.autoHash(&hasher, @as(KeyTag, elem_key));
switch (elem_key) {
.undef => {},
.int => |int| std.hash.autoHash(
hasher,
&hasher,
@intCast(u8, int.storage.u64),
),
else => unreachable,
@@ -772,47 +817,60 @@ pub const Key = union(enum) {
}
},
}
return;
return hasher.final();
}
switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| for (elems[0..@intCast(usize, len)]) |elem|
std.hash.autoHash(hasher, elem),
std.hash.autoHash(&hasher, elem),
.repeated_elem => |elem| {
var remaining = len;
while (remaining > 0) : (remaining -= 1) std.hash.autoHash(hasher, elem);
while (remaining > 0) : (remaining -= 1) std.hash.autoHash(&hasher, elem);
},
}
return hasher.final();
},
.error_set_type => |error_set_type| {
for (error_set_type.names) |elem| std.hash.autoHash(hasher, elem);
var hasher = std.hash.Wyhash.init(seed);
for (error_set_type.names) |elem| std.hash.autoHash(&hasher, elem);
return hasher.final();
},
.anon_struct_type => |anon_struct_type| {
for (anon_struct_type.types) |elem| std.hash.autoHash(hasher, elem);
for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem);
for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem);
var hasher = std.hash.Wyhash.init(seed);
for (anon_struct_type.types) |elem| std.hash.autoHash(&hasher, elem);
for (anon_struct_type.values) |elem| std.hash.autoHash(&hasher, elem);
for (anon_struct_type.names) |elem| std.hash.autoHash(&hasher, elem);
return hasher.final();
},
.func_type => |func_type| {
for (func_type.param_types) |param_type| std.hash.autoHash(hasher, param_type);
std.hash.autoHash(hasher, func_type.return_type);
std.hash.autoHash(hasher, func_type.comptime_bits);
std.hash.autoHash(hasher, func_type.noalias_bits);
std.hash.autoHash(hasher, func_type.alignment);
std.hash.autoHash(hasher, func_type.cc);
std.hash.autoHash(hasher, func_type.is_var_args);
std.hash.autoHash(hasher, func_type.is_generic);
std.hash.autoHash(hasher, func_type.is_noinline);
var hasher = std.hash.Wyhash.init(seed);
for (func_type.param_types) |param_type| std.hash.autoHash(&hasher, param_type);
std.hash.autoHash(&hasher, func_type.return_type);
std.hash.autoHash(&hasher, func_type.comptime_bits);
std.hash.autoHash(&hasher, func_type.noalias_bits);
std.hash.autoHash(&hasher, func_type.alignment);
std.hash.autoHash(&hasher, func_type.cc);
std.hash.autoHash(&hasher, func_type.is_var_args);
std.hash.autoHash(&hasher, func_type.is_generic);
std.hash.autoHash(&hasher, func_type.is_noinline);
return hasher.final();
},
.memoized_decl => |memoized_decl| std.hash.autoHash(hasher, memoized_decl.val),
.memoized_decl => |memoized_decl| {
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, memoized_decl.val);
return hasher.final();
},
.memoized_call => |memoized_call| {
std.hash.autoHash(hasher, memoized_call.func);
for (memoized_call.arg_values) |arg| std.hash.autoHash(hasher, arg);
var hasher = std.hash.Wyhash.init(seed);
std.hash.autoHash(&hasher, memoized_call.func);
for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg);
return hasher.final();
},
}
}
@@ -1340,7 +1398,7 @@ pub const Index = enum(u32) {
type_array_big: struct { data: *Array },
type_array_small: struct { data: *Vector },
type_vector: struct { data: *Vector },
type_pointer: struct { data: *Pointer },
type_pointer: struct { data: *Tag.TypePointer },
type_slice: DataIsIndex,
type_optional: DataIsIndex,
type_anyframe: DataIsIndex,
@@ -1564,44 +1622,56 @@ pub const static_keys = [_]Key{
.{ .simple_type = .type_info },
.{ .ptr_type = .{
.elem_type = .u8_type,
.size = .Many,
.child = .u8_type,
.flags = .{
.size = .Many,
},
} },
// manyptr_const_u8_type
.{ .ptr_type = .{
.elem_type = .u8_type,
.size = .Many,
.is_const = true,
.child = .u8_type,
.flags = .{
.size = .Many,
.is_const = true,
},
} },
// manyptr_const_u8_sentinel_0_type
.{ .ptr_type = .{
.elem_type = .u8_type,
.child = .u8_type,
.sentinel = .zero_u8,
.size = .Many,
.is_const = true,
.flags = .{
.size = .Many,
.is_const = true,
},
} },
.{ .ptr_type = .{
.elem_type = .comptime_int_type,
.size = .One,
.is_const = true,
.child = .comptime_int_type,
.flags = .{
.size = .One,
.is_const = true,
},
} },
// slice_const_u8_type
.{ .ptr_type = .{
.elem_type = .u8_type,
.size = .Slice,
.is_const = true,
.child = .u8_type,
.flags = .{
.size = .Slice,
.is_const = true,
},
} },
// slice_const_u8_sentinel_0_type
.{ .ptr_type = .{
.elem_type = .u8_type,
.child = .u8_type,
.sentinel = .zero_u8,
.size = .Slice,
.is_const = true,
.flags = .{
.size = .Slice,
.is_const = true,
},
} },
// anyerror_void_error_union_type
@@ -1702,7 +1772,6 @@ pub const Tag = enum(u8) {
/// data is payload to Vector.
type_vector,
/// A fully explicitly specified pointer type.
/// data is payload to Pointer.
type_pointer,
/// A slice type.
/// data is Index of underlying pointer type.
@@ -1941,6 +2010,7 @@ pub const Tag = enum(u8) {
const Func = Key.Func;
const Union = Key.Union;
const MemoizedDecl = Key.MemoizedDecl;
const TypePointer = Key.PtrType;
fn Payload(comptime tag: Tag) type {
return switch (tag) {
@@ -1949,7 +2019,7 @@ pub const Tag = enum(u8) {
.type_array_big => Array,
.type_array_small => Vector,
.type_vector => Vector,
.type_pointer => Pointer,
.type_pointer => TypePointer,
.type_slice => unreachable,
.type_optional => unreachable,
.type_anyframe => unreachable,
@@ -2167,32 +2237,6 @@ pub const SimpleValue = enum(u32) {
generic_poison,
};
pub const Pointer = struct {
child: Index,
sentinel: Index,
flags: Flags,
packed_offset: PackedOffset,
pub const Flags = packed struct(u32) {
size: Size,
alignment: Alignment,
is_const: bool,
is_volatile: bool,
is_allowzero: bool,
address_space: AddressSpace,
vector_index: VectorIndex,
};
pub const PackedOffset = packed struct(u32) {
host_size: u16,
bit_offset: u16,
};
pub const Size = std.builtin.Type.Pointer.Size;
pub const AddressSpace = std.builtin.AddressSpace;
pub const VectorIndex = Key.PtrType.VectorIndex;
};
/// Stored as a power-of-two, with one special value to indicate none.
pub const Alignment = enum(u6) {
none = std.math.maxInt(u6),
@@ -2531,39 +2575,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
} };
},
.type_pointer => {
const ptr_info = ip.extraData(Pointer, data);
return .{ .ptr_type = .{
.elem_type = ptr_info.child,
.sentinel = ptr_info.sentinel,
.alignment = ptr_info.flags.alignment,
.size = ptr_info.flags.size,
.is_const = ptr_info.flags.is_const,
.is_volatile = ptr_info.flags.is_volatile,
.is_allowzero = ptr_info.flags.is_allowzero,
.address_space = ptr_info.flags.address_space,
.vector_index = ptr_info.flags.vector_index,
.host_size = ptr_info.packed_offset.host_size,
.bit_offset = ptr_info.packed_offset.bit_offset,
} };
},
.type_pointer => .{ .ptr_type = ip.extraData(Tag.TypePointer, data) },
.type_slice => {
assert(ip.items.items(.tag)[data] == .type_pointer);
const ptr_info = ip.extraData(Pointer, ip.items.items(.data)[data]);
return .{ .ptr_type = .{
.elem_type = ptr_info.child,
.sentinel = ptr_info.sentinel,
.alignment = ptr_info.flags.alignment,
.size = .Slice,
.is_const = ptr_info.flags.is_const,
.is_volatile = ptr_info.flags.is_volatile,
.is_allowzero = ptr_info.flags.is_allowzero,
.address_space = ptr_info.flags.address_space,
.vector_index = ptr_info.flags.vector_index,
.host_size = ptr_info.packed_offset.host_size,
.bit_offset = ptr_info.packed_offset.bit_offset,
} };
var ptr_info = ip.extraData(Tag.TypePointer, ip.items.items(.data)[data]);
ptr_info.flags.size = .Slice;
return .{ .ptr_type = ptr_info };
},
.type_optional => .{ .opt_type = @intToEnum(Index, data) },
@@ -3066,13 +3084,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
},
.ptr_type => |ptr_type| {
assert(ptr_type.elem_type != .none);
assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.elem_type);
assert(ptr_type.child != .none);
assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.child);
if (ptr_type.size == .Slice) {
if (ptr_type.flags.size == .Slice) {
_ = ip.map.pop();
var new_key = key;
new_key.ptr_type.size = .Many;
new_key.ptr_type.flags.size = .Many;
const ptr_type_index = try ip.get(gpa, new_key);
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
try ip.items.ensureUnusedCapacity(gpa, 1);
@@ -3083,27 +3101,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
return @intToEnum(Index, ip.items.len - 1);
}
const is_allowzero = ptr_type.is_allowzero or ptr_type.size == .C;
var ptr_type_adjusted = ptr_type;
if (ptr_type.flags.size == .C) ptr_type_adjusted.flags.is_allowzero = true;
ip.items.appendAssumeCapacity(.{
.tag = .type_pointer,
.data = try ip.addExtra(gpa, Pointer{
.child = ptr_type.elem_type,
.sentinel = ptr_type.sentinel,
.flags = .{
.alignment = ptr_type.alignment,
.is_const = ptr_type.is_const,
.is_volatile = ptr_type.is_volatile,
.is_allowzero = is_allowzero,
.size = ptr_type.size,
.address_space = ptr_type.address_space,
.vector_index = ptr_type.vector_index,
},
.packed_offset = .{
.host_size = ptr_type.host_size,
.bit_offset = ptr_type.bit_offset,
},
}),
.data = try ip.addExtra(gpa, ptr_type_adjusted),
});
},
.array_type => |array_type| {
@@ -3379,7 +3382,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
switch (ptr.len) {
.none => {
assert(ptr_type.size != .Slice);
assert(ptr_type.flags.size != .Slice);
switch (ptr.addr) {
.decl => |decl| ip.items.appendAssumeCapacity(.{
.tag = .ptr_decl,
@@ -3410,10 +3413,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
switch (ptr.addr) {
.int => assert(ip.typeOf(base) == .usize_type),
.eu_payload => assert(ip.indexToKey(
ip.indexToKey(ip.typeOf(base)).ptr_type.elem_type,
ip.indexToKey(ip.typeOf(base)).ptr_type.child,
) == .error_union_type),
.opt_payload => assert(ip.indexToKey(
ip.indexToKey(ip.typeOf(base)).ptr_type.elem_type,
ip.indexToKey(ip.typeOf(base)).ptr_type.child,
) == .opt_type),
else => unreachable,
}
@@ -3433,10 +3436,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.elem, .field => |base_index| {
const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type;
switch (ptr.addr) {
.elem => assert(base_ptr_type.size == .Many),
.elem => assert(base_ptr_type.flags.size == .Many),
.field => {
assert(base_ptr_type.size == .One);
switch (ip.indexToKey(base_ptr_type.elem_type)) {
assert(base_ptr_type.flags.size == .One);
switch (ip.indexToKey(base_ptr_type.child)) {
.anon_struct_type => |anon_struct_type| {
assert(ptr.addr == .field);
assert(base_index.index < anon_struct_type.types.len);
@@ -3451,7 +3454,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.ptr_type => |slice_type| {
assert(ptr.addr == .field);
assert(slice_type.size == .Slice);
assert(slice_type.flags.size == .Slice);
assert(base_index.index < 2);
},
else => unreachable,
@@ -3485,12 +3488,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
// TODO: change Key.Ptr for slices to reference the manyptr value
// rather than having an addr field directly. Then we can avoid
// these problematic calls to pop(), get(), and getOrPutAdapted().
assert(ptr_type.size == .Slice);
assert(ptr_type.flags.size == .Slice);
_ = ip.map.pop();
var new_key = key;
new_key.ptr.ty = ip.slicePtrType(ptr.ty);
new_key.ptr.len = .none;
assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many);
assert(ip.indexToKey(new_key.ptr.ty).ptr_type.flags.size == .Many);
const ptr_index = try ip.get(gpa, new_key);
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
try ip.items.ensureUnusedCapacity(gpa, 1);
@@ -4302,10 +4305,10 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
NullTerminatedString => @enumToInt(@field(extra, field.name)),
OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
Pointer.Flags => @bitCast(u32, @field(extra, field.name)),
Tag.TypePointer.Flags => @bitCast(u32, @field(extra, field.name)),
TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)),
Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)),
Pointer.VectorIndex => @enumToInt(@field(extra, field.name)),
Tag.TypePointer.PackedOffset => @bitCast(u32, @field(extra, field.name)),
Tag.TypePointer.VectorIndex => @enumToInt(@field(extra, field.name)),
Tag.Variable.Flags => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type: " ++ @typeName(field.type)),
});
@@ -4370,10 +4373,10 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct
NullTerminatedString => @intToEnum(NullTerminatedString, int32),
OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32),
i32 => @bitCast(i32, int32),
Pointer.Flags => @bitCast(Pointer.Flags, int32),
Tag.TypePointer.Flags => @bitCast(Tag.TypePointer.Flags, int32),
TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32),
Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32),
Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32),
Tag.TypePointer.PackedOffset => @bitCast(Tag.TypePointer.PackedOffset, int32),
Tag.TypePointer.VectorIndex => @intToEnum(Tag.TypePointer.VectorIndex, int32),
Tag.Variable.Flags => @bitCast(Tag.Variable.Flags, int32),
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
@@ -4487,7 +4490,7 @@ test "basic usage" {
pub fn childType(ip: *const InternPool, i: Index) Index {
return switch (ip.indexToKey(i)) {
.ptr_type => |ptr_type| ptr_type.elem_type,
.ptr_type => |ptr_type| ptr_type.child,
.vector_type => |vector_type| vector_type.child,
.array_type => |array_type| array_type.child,
.opt_type, .anyframe_type => |child| child,
@@ -4559,7 +4562,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
.addr = .{ .int = .zero_usize },
.len = switch (ip.indexToKey(new_ty).ptr_type.size) {
.len = switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
.One, .Many, .C => .none,
.Slice => try ip.get(gpa, .{ .undef = .usize_type }),
},
@@ -4623,7 +4626,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.none => try ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
.addr = .{ .int = .zero_usize },
.len = switch (ip.indexToKey(new_ty).ptr_type.size) {
.len = switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
.One, .Many, .C => .none,
.Slice => try ip.get(gpa, .{ .undef = .usize_type }),
},
@@ -4889,7 +4892,7 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
.type_array_small => @sizeOf(Vector),
.type_array_big => @sizeOf(Array),
.type_vector => @sizeOf(Vector),
.type_pointer => @sizeOf(Pointer),
.type_pointer => @sizeOf(Tag.TypePointer),
.type_slice => 0,
.type_optional => 0,
.type_anyframe => 0,
@@ -5007,6 +5010,7 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
const values = ctx.map.values();
return values[a_index].bytes > values[b_index].bytes;
//return values[a_index].count > values[b_index].count;
}
};
counts.sort(SortContext{ .map = &counts });
@@ -5621,3 +5625,79 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.none => unreachable, // special tag
};
}
/// I got this from King, using this temporarily until std lib hashing can be
/// improved to make stateless hashing performant. Currently the
/// implementations suffer from not special casing small lengths and not taking
/// advantage of comptime-known lengths, both of which this implementation
/// does.
const WyhashKing = struct {
inline fn mum(pair: *[2]u64) void {
const x = @as(u128, pair[0]) *% pair[1];
pair[0] = @truncate(u64, x);
pair[1] = @truncate(u64, x >> 64);
}
inline fn mix(a: u64, b: u64) u64 {
var pair = [_]u64{ a, b };
mum(&pair);
return pair[0] ^ pair[1];
}
inline fn read(comptime I: type, in: []const u8) I {
return std.mem.readIntLittle(I, in[0..@sizeOf(I)]);
}
const secret = [_]u64{
0xa0761d6478bd642f,
0xe7037ed1a0b428db,
0x8ebc6af09c88c6e3,
0x589965cc75374cc3,
};
fn hash(seed: u64, input: anytype) u64 {
var in: []const u8 = input;
var last = std.mem.zeroes([2]u64);
const starting_len: u64 = input.len;
var state = seed ^ mix(seed ^ secret[0], secret[1]);
if (in.len <= 16) {
if (in.len >= 4) {
const end = (in.len >> 3) << 2;
last[0] = (@as(u64, read(u32, in)) << 32) | read(u32, in[end..]);
last[1] = (@as(u64, read(u32, in[in.len - 4 ..])) << 32) | read(u32, in[in.len - 4 - end ..]);
} else if (in.len > 0) {
last[0] = (@as(u64, in[0]) << 16) | (@as(u64, in[in.len >> 1]) << 8) | in[in.len - 1];
}
} else {
large: {
if (in.len <= 48) break :large;
var split = [_]u64{ state, state, state };
while (true) {
for (&split, 0..) |*lane, i| {
const a = read(u64, in[(i * 2) * 8 ..]) ^ secret[i + 1];
const b = read(u64, in[((i * 2) + 1) * 8 ..]) ^ lane.*;
lane.* = mix(a, b);
}
in = in[48..];
if (in.len > 48) continue;
state = split[0] ^ (split[1] ^ split[2]);
break :large;
}
}
while (true) {
if (in.len <= 16) break;
state = mix(read(u64, in) ^ secret[1], read(u64, in[8..]) ^ state);
in = in[16..];
if (in.len <= 16) break;
}
last[0] = read(u64, in[in.len - 16 ..]);
last[1] = read(u64, in[in.len - 8 ..]);
}
last[0] ^= secret[1];
last[1] ^= state;
mum(&last);
return mix(last[0] ^ secret[0] ^ starting_len, last[1] ^ secret[1]);
}
};

View File

@@ -6430,8 +6430,10 @@ pub fn populateTestFunctions(
// func
try mod.intern(.{ .ptr = .{
.ty = try mod.intern(.{ .ptr_type = .{
.elem_type = test_decl.ty.toIntern(),
.is_const = true,
.child = test_decl.ty.toIntern(),
.flags = .{
.is_const = true,
},
} }),
.addr = .{ .decl = test_decl_index },
} }),
@@ -6466,9 +6468,11 @@ pub fn populateTestFunctions(
{
const new_ty = try mod.ptrType(.{
.elem_type = test_fn_ty.toIntern(),
.is_const = true,
.size = .Slice,
.child = test_fn_ty.toIntern(),
.flags = .{
.is_const = true,
.size = .Slice,
},
});
const new_val = decl.val;
const new_init = try mod.intern(.{ .ptr = .{
@@ -6681,65 +6685,68 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!
pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type {
var canon_info = info;
const have_elem_layout = info.elem_type.toType().layoutIsResolved(mod);
const have_elem_layout = info.child.toType().layoutIsResolved(mod);
if (info.size == .C) canon_info.is_allowzero = true;
if (info.flags.size == .C) canon_info.flags.is_allowzero = true;
// Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee
// type, we change it to 0 here. If this causes an assertion trip because the
// pointee type needs to be resolved more, that needs to be done before calling
// this ptr() function.
if (info.alignment.toByteUnitsOptional()) |info_align| {
if (have_elem_layout and info_align == info.elem_type.toType().abiAlignment(mod)) {
canon_info.alignment = .none;
if (info.flags.alignment.toByteUnitsOptional()) |info_align| {
if (have_elem_layout and info_align == info.child.toType().abiAlignment(mod)) {
canon_info.flags.alignment = .none;
}
}
switch (info.vector_index) {
switch (info.flags.vector_index) {
// Canonicalize host_size. If it matches the bit size of the pointee type,
// we change it to 0 here. If this causes an assertion trip, the pointee type
// needs to be resolved before calling this ptr() function.
.none => if (have_elem_layout and info.host_size != 0) {
const elem_bit_size = info.elem_type.toType().bitSize(mod);
assert(info.bit_offset + elem_bit_size <= info.host_size * 8);
if (info.host_size * 8 == elem_bit_size) {
canon_info.host_size = 0;
.none => if (have_elem_layout and info.packed_offset.host_size != 0) {
const elem_bit_size = info.child.toType().bitSize(mod);
assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
if (info.packed_offset.host_size * 8 == elem_bit_size) {
canon_info.packed_offset.host_size = 0;
}
},
.runtime => {},
_ => assert(@enumToInt(info.vector_index) < info.host_size),
_ => assert(@enumToInt(info.flags.vector_index) < info.packed_offset.host_size),
}
return (try intern(mod, .{ .ptr_type = canon_info })).toType();
}
pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
return ptrType(mod, .{ .elem_type = child_type.toIntern() });
return ptrType(mod, .{ .child = child_type.toIntern() });
}
pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
return ptrType(mod, .{ .elem_type = child_type.toIntern(), .is_const = true });
return ptrType(mod, .{
.child = child_type.toIntern(),
.flags = .{
.is_const = true,
},
});
}
pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
return ptrType(mod, .{ .elem_type = child_type.toIntern(), .size = .Many, .is_const = true });
return ptrType(mod, .{
.child = child_type.toIntern(),
.flags = .{
.size = .Many,
.is_const = true,
},
});
}
pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type {
const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern());
return mod.ptrType(.{
.elem_type = new_child.toIntern(),
.child = new_child.toIntern(),
.sentinel = info.sentinel,
.alignment = info.alignment,
.host_size = info.host_size,
.bit_offset = info.bit_offset,
.vector_index = info.vector_index,
.size = info.size,
.is_const = info.is_const,
.is_volatile = info.is_volatile,
.is_allowzero = info.is_allowzero,
.address_space = info.address_space,
.flags = info.flags,
.packed_offset = info.packed_offset,
});
}

View File

@@ -2490,9 +2490,11 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const operand = try trash_block.addBitCast(pointee_ty, .void_value);
const ptr_ty = try mod.ptrType(.{
.elem_type = pointee_ty.toIntern(),
.alignment = ia1.alignment,
.address_space = addr_space,
.child = pointee_ty.toIntern(),
.flags = .{
.alignment = ia1.alignment,
.address_space = addr_space,
},
});
const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
@@ -2519,9 +2521,11 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
try sema.resolveTypeLayout(pointee_ty);
}
const ptr_ty = try mod.ptrType(.{
.elem_type = pointee_ty.toIntern(),
.alignment = alignment,
.address_space = addr_space,
.child = pointee_ty.toIntern(),
.flags = .{
.alignment = alignment,
.address_space = addr_space,
},
});
try sema.maybeQueueFuncBodyAnalysis(decl_index);
return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{
@@ -3771,10 +3775,12 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
if (iac.is_const) try decl.intern(mod);
const final_elem_ty = decl.ty;
const final_ptr_ty = try mod.ptrType(.{
.elem_type = final_elem_ty.toIntern(),
.is_const = false,
.alignment = iac.alignment,
.address_space = target_util.defaultAddressSpace(target, .local),
.child = final_elem_ty.toIntern(),
.flags = .{
.is_const = false,
.alignment = iac.alignment,
.address_space = target_util.defaultAddressSpace(target, .local),
},
});
try sema.maybeQueueFuncBodyAnalysis(decl_index);
@@ -3797,9 +3803,11 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none);
const final_ptr_ty = try mod.ptrType(.{
.elem_type = final_elem_ty.toIntern(),
.alignment = ia1.alignment,
.address_space = target_util.defaultAddressSpace(target, .local),
.child = final_elem_ty.toIntern(),
.flags = .{
.alignment = ia1.alignment,
.address_space = target_util.defaultAddressSpace(target, .local),
},
});
if (!ia1.is_const) {
@@ -3916,9 +3924,11 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
defer trash_block.instructions.deinit(gpa);
const mut_final_ptr_ty = try mod.ptrType(.{
.elem_type = final_elem_ty.toIntern(),
.alignment = ia1.alignment,
.address_space = target_util.defaultAddressSpace(target, .local),
.child = final_elem_ty.toIntern(),
.flags = .{
.alignment = ia1.alignment,
.address_space = target_util.defaultAddressSpace(target, .local),
},
});
const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty);
const empty_trash_count = trash_block.instructions.items.len;
@@ -12038,7 +12048,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const has_field = hf: {
switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice => {
if (mem.eql(u8, field_name, "ptr")) break :hf true;
if (mem.eql(u8, field_name, "len")) break :hf true;
@@ -16019,9 +16029,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
break :v try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.elem_type = param_info_ty.toIntern(),
.size = .Slice,
.is_const = true,
.child = param_info_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern(),
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(),
@@ -16329,9 +16341,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// Build our ?[]const Error value
const slice_errors_ty = try mod.ptrType(.{
.elem_type = error_field_ty.toIntern(),
.size = .Slice,
.is_const = true,
.child = error_field_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
});
const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern());
const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: {
@@ -16471,9 +16485,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
break :v try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.elem_type = enum_field_ty.toIntern(),
.size = .Slice,
.is_const = true,
.child = enum_field_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern(),
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(),
@@ -16614,9 +16630,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
break :v try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.elem_type = union_field_ty.toIntern(),
.size = .Slice,
.is_const = true,
.child = union_field_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern(),
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(),
@@ -16833,9 +16851,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
break :v try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.elem_type = struct_field_ty.toIntern(),
.size = .Slice,
.is_const = true,
.child = struct_field_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern(),
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(),
@@ -16976,9 +16996,11 @@ fn typeInfoDecls(
);
return try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.elem_type = declaration_ty.toIntern(),
.size = .Slice,
.is_const = true,
.child = declaration_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
})).toIntern(),
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(),
@@ -18047,16 +18069,20 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
const ty = try mod.ptrType(.{
.elem_type = elem_ty.toIntern(),
.child = elem_ty.toIntern(),
.sentinel = sentinel,
.alignment = abi_align,
.address_space = address_space,
.bit_offset = bit_offset,
.host_size = host_size,
.is_const = !inst_data.flags.is_mutable,
.is_allowzero = inst_data.flags.is_allowzero,
.is_volatile = inst_data.flags.is_volatile,
.size = inst_data.size,
.flags = .{
.alignment = abi_align,
.address_space = address_space,
.is_const = !inst_data.flags.is_mutable,
.is_allowzero = inst_data.flags.is_allowzero,
.is_volatile = inst_data.flags.is_volatile,
.size = inst_data.size,
},
.packed_offset = .{
.bit_offset = bit_offset,
.host_size = host_size,
},
});
return sema.addType(ty);
}
@@ -19209,14 +19235,16 @@ fn zirReify(
}
const ty = try mod.ptrType(.{
.size = ptr_size,
.is_const = is_const_val.toBool(),
.is_volatile = is_volatile_val.toBool(),
.alignment = abi_align,
.address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val),
.elem_type = elem_ty.toIntern(),
.is_allowzero = is_allowzero_val.toBool(),
.child = elem_ty.toIntern(),
.sentinel = actual_sentinel,
.flags = .{
.size = ptr_size,
.is_const = is_const_val.toBool(),
.is_volatile = is_volatile_val.toBool(),
.alignment = abi_align,
.address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val),
.is_allowzero = is_allowzero_val.toBool(),
},
});
return sema.addType(ty);
},
@@ -22714,9 +22742,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty)
else if (new_dest_ptr_ty.ptrSize(mod) == .One) ptr: {
var dest_manyptr_ty_key = mod.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type;
assert(dest_manyptr_ty_key.size == .One);
dest_manyptr_ty_key.elem_type = dest_elem_ty.toIntern();
dest_manyptr_ty_key.size = .Many;
assert(dest_manyptr_ty_key.flags.size == .One);
dest_manyptr_ty_key.child = dest_elem_ty.toIntern();
dest_manyptr_ty_key.flags.size = .Many;
break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src);
} else new_dest_ptr;
@@ -22725,9 +22753,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty)
else if (new_src_ptr_ty.ptrSize(mod) == .One) ptr: {
var src_manyptr_ty_key = mod.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type;
assert(src_manyptr_ty_key.size == .One);
src_manyptr_ty_key.elem_type = src_elem_ty.toIntern();
src_manyptr_ty_key.size = .Many;
assert(src_manyptr_ty_key.flags.size == .One);
src_manyptr_ty_key.child = src_elem_ty.toIntern();
src_manyptr_ty_key.flags.size = .Many;
break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(src_manyptr_ty_key), new_src_ptr, src_src);
} else new_src_ptr;
@@ -24036,8 +24064,10 @@ fn panicWithMsg(
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const target = mod.getTarget();
const ptr_stack_trace_ty = try mod.ptrType(.{
.elem_type = stack_trace_ty.toIntern(),
.address_space = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic
.child = stack_trace_ty.toIntern(),
.flags = .{
.address_space = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic
},
});
const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
const null_stack_trace = try sema.addConstant(opt_ptr_stack_trace_ty, (try mod.intern(.{ .opt = .{
@@ -29630,10 +29660,12 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
const ptr_ty = try mod.ptrType(.{
.elem_type = decl_tv.ty.toIntern(),
.alignment = InternPool.Alignment.fromByteUnits(decl.@"align"),
.is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true,
.address_space = decl.@"addrspace",
.child = decl_tv.ty.toIntern(),
.flags = .{
.alignment = InternPool.Alignment.fromByteUnits(decl.@"align"),
.is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true,
.address_space = decl.@"addrspace",
},
});
if (analyze_fn_body) {
try sema.maybeQueueFuncBodyAnalysis(decl_index);
@@ -30025,10 +30057,10 @@ fn analyzeSlice(
try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty)
else if (array_ty.zigTypeTag(mod) == .Array) ptr: {
var manyptr_ty_key = mod.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type;
assert(manyptr_ty_key.elem_type == array_ty.toIntern());
assert(manyptr_ty_key.size == .One);
manyptr_ty_key.elem_type = elem_ty.toIntern();
manyptr_ty_key.size = .Many;
assert(manyptr_ty_key.child == array_ty.toIntern());
assert(manyptr_ty_key.flags.size == .One);
manyptr_ty_key.child = elem_ty.toIntern();
manyptr_ty_key.flags.size = .Many;
break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src);
} else ptr_or_slice;
@@ -31972,7 +32004,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => false,
.ptr_type => |ptr_type| {
const child_ty = ptr_type.elem_type.toType();
const child_ty = ptr_type.child.toType();
if (child_ty.zigTypeTag(mod) == .Fn) {
return mod.typeToFunc(child_ty).?.is_generic;
} else {
@@ -33917,15 +33949,15 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError
fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
const mod = sema.mod;
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => ty,
.Slice => null,
},
.opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice, .C => null,
.Many, .One => {
if (ptr_type.is_allowzero) return null;
if (ptr_type.flags.is_allowzero) return null;
// optionals of zero sized types behave like bools, not pointers
const payload_ty = opt_child.toType();
@@ -33956,7 +33988,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => return false,
.ptr_type => |ptr_type| {
const child_ty = ptr_type.elem_type.toType();
const child_ty = ptr_type.child.toType();
if (child_ty.zigTypeTag(mod) == .Fn) {
return mod.typeToFunc(child_ty).?.is_generic;
} else {

View File

@@ -673,7 +673,7 @@ fn lowerParentPtr(
mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))),
),
.field => |field| {
const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.elem_type;
const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.child;
return lowerParentPtr(
bin_file,
src_loc,
@@ -681,7 +681,7 @@ fn lowerParentPtr(
code,
debug_output,
reloc_info.offset(switch (mod.intern_pool.indexToKey(base_type)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => unreachable,
.Slice => switch (field.index) {
0 => 0,

View File

@@ -630,7 +630,7 @@ pub const DeclGen = struct {
try writer.writeByte(')');
}
try writer.writeAll("&(");
if (mod.intern_pool.indexToKey(ptr_base_ty.toIntern()).ptr_type.size == .One)
if (mod.intern_pool.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One)
try writer.writeByte('*');
try dg.renderParentPtr(writer, elem.base, location);
try writer.print(")[{d}]", .{elem.index});
@@ -642,7 +642,7 @@ pub const DeclGen = struct {
_ = try dg.typeToIndex(base_ty, .complete);
const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) {
.anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(field.index, mod),
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => unreachable,
.Slice => switch (field.index) {
Value.slice_ptr_index => base_ty.slicePtrFieldType(mod),
@@ -6285,8 +6285,10 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
// casted to a regular pointer, otherwise an error like this occurs:
// error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable
const elem_ptr_ty = try mod.ptrType(.{
.size = .C,
.elem_type = elem_ty.ip_index,
.child = elem_ty.ip_index,
.flags = .{
.size = .C,
},
});
const index = try f.allocLocal(inst, Type.usize);

View File

@@ -1577,25 +1577,27 @@ pub const Object = struct {
const ptr_info = Type.ptrInfoIp(&mod.intern_pool, ty.toIntern());
if (ptr_info.sentinel != .none or
ptr_info.address_space != .generic or
ptr_info.bit_offset != 0 or
ptr_info.host_size != 0 or
ptr_info.vector_index != .none or
ptr_info.is_allowzero or
ptr_info.is_const or
ptr_info.is_volatile or
ptr_info.size == .Many or ptr_info.size == .C or
!ptr_info.elem_type.toType().hasRuntimeBitsIgnoreComptime(mod))
ptr_info.flags.address_space != .generic or
ptr_info.packed_offset.bit_offset != 0 or
ptr_info.packed_offset.host_size != 0 or
ptr_info.flags.vector_index != .none or
ptr_info.flags.is_allowzero or
ptr_info.flags.is_const or
ptr_info.flags.is_volatile or
ptr_info.flags.size == .Many or ptr_info.flags.size == .C or
!ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod))
{
const bland_ptr_ty = try mod.ptrType(.{
.elem_type = if (!ptr_info.elem_type.toType().hasRuntimeBitsIgnoreComptime(mod))
.child = if (!ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod))
.anyopaque_type
else
ptr_info.elem_type,
.alignment = ptr_info.alignment,
.size = switch (ptr_info.size) {
.Many, .C, .One => .One,
.Slice => .Slice,
ptr_info.child,
.flags = .{
.alignment = ptr_info.flags.alignment,
.size = switch (ptr_info.flags.size) {
.Many, .C, .One => .One,
.Slice => .Slice,
},
},
});
const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve);
@@ -1683,7 +1685,7 @@ pub const Object = struct {
return full_di_ty;
}
const elem_di_ty = try o.lowerDebugType(ptr_info.elem_type.toType(), .fwd);
const elem_di_ty = try o.lowerDebugType(ptr_info.child.toType(), .fwd);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
const ptr_di_ty = dib.createPointerType(
@@ -5856,8 +5858,10 @@ pub const FuncGen = struct {
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, "");
const field_ptr_ty = try mod.ptrType(.{
.elem_type = llvm_field.ty.toIntern(),
.alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment),
.child = llvm_field.ty.toIntern(),
.flags = .{
.alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment),
},
});
if (isByRef(field_ty, mod)) {
if (canElideLoad(self, body_tail))
@@ -6732,8 +6736,10 @@ pub const FuncGen = struct {
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, "");
const field_ptr_ty = try mod.ptrType(.{
.elem_type = llvm_field.ty.toIntern(),
.alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment),
.child = llvm_field.ty.toIntern(),
.flags = .{
.alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment),
},
});
return self.load(field_ptr, field_ptr_ty);
}
@@ -9131,10 +9137,12 @@ pub const FuncGen = struct {
indices[1] = llvm_u32.constInt(llvm_i, .False);
const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
const field_ptr_ty = try mod.ptrType(.{
.elem_type = self.typeOf(elem).toIntern(),
.alignment = InternPool.Alignment.fromNonzeroByteUnits(
result_ty.structFieldAlign(i, mod),
),
.child = self.typeOf(elem).toIntern(),
.flags = .{
.alignment = InternPool.Alignment.fromNonzeroByteUnits(
result_ty.structFieldAlign(i, mod),
),
},
});
try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic);
}
@@ -9160,7 +9168,7 @@ pub const FuncGen = struct {
const array_info = result_ty.arrayInfo(mod);
const elem_ptr_ty = try mod.ptrType(.{
.elem_type = array_info.elem_type.toIntern(),
.child = array_info.elem_type.toIntern(),
});
for (elements, 0..) |elem, i| {
@@ -9282,8 +9290,10 @@ pub const FuncGen = struct {
const index_type = self.context.intType(32);
const field_ptr_ty = try mod.ptrType(.{
.elem_type = field.ty.toIntern(),
.alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align),
.child = field.ty.toIntern(),
.flags = .{
.alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align),
},
});
if (layout.tag_size == 0) {
const indices: [3]*llvm.Value = .{

View File

@@ -85,7 +85,7 @@ pub const Type = struct {
/// Asserts the type is a pointer.
pub fn ptrIsMutable(ty: Type, mod: *const Module) bool {
return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.is_const;
return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const;
}
pub const ArrayInfo = struct {
@@ -488,7 +488,7 @@ pub const Type = struct {
// Pointers to zero-bit types still have a runtime address; however, pointers
// to comptime-only types do not, with the exception of function pointers.
if (ignore_comptime_only) return true;
const child_ty = ptr_type.elem_type.toType();
const child_ty = ptr_type.child.toType();
if (child_ty.zigTypeTag(mod) == .Fn) return !mod.typeToFunc(child_ty).?.is_generic;
if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty));
return !comptimeOnly(ty, mod);
@@ -689,7 +689,7 @@ pub const Type = struct {
.array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod),
.opt_type => ty.isPtrLikeOptional(mod),
.ptr_type => |ptr_type| ptr_type.size != .Slice,
.ptr_type => |ptr_type| ptr_type.flags.size != .Slice,
.simple_type => |t| switch (t) {
.f16,
@@ -823,13 +823,13 @@ pub const Type = struct {
pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| {
if (ptr_type.alignment.toByteUnitsOptional()) |a| {
if (ptr_type.flags.alignment.toByteUnitsOptional()) |a| {
return @intCast(u32, a);
} else if (opt_sema) |sema| {
const res = try ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .{ .sema = sema });
const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema });
return res.scalar;
} else {
return (ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
}
},
.opt_type => |child| child.toType().ptrAlignmentAdvanced(mod, opt_sema),
@@ -839,8 +839,8 @@ pub const Type = struct {
pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.address_space,
.opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.address_space,
.ptr_type => |ptr_type| ptr_type.flags.address_space,
.opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space,
else => unreachable,
};
}
@@ -1297,7 +1297,7 @@ pub const Type = struct {
if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) };
},
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
},
@@ -1620,7 +1620,7 @@ pub const Type = struct {
switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| return int_type.bits,
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice => return target.ptrBitWidth() * 2,
else => return target.ptrBitWidth(),
},
@@ -1795,7 +1795,7 @@ pub const Type = struct {
pub fn isSinglePointer(ty: Type, mod: *const Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_info| ptr_info.size == .One,
.ptr_type => |ptr_info| ptr_info.flags.size == .One,
else => false,
};
}
@@ -1808,14 +1808,14 @@ pub const Type = struct {
/// Returns `null` if `ty` is not a pointer.
pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_info| ptr_info.size,
.ptr_type => |ptr_info| ptr_info.flags.size,
else => null,
};
}
pub fn isSlice(ty: Type, mod: *const Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.size == .Slice,
.ptr_type => |ptr_type| ptr_type.flags.size == .Slice,
else => false,
};
}
@@ -1826,7 +1826,7 @@ pub const Type = struct {
pub fn isConstPtr(ty: Type, mod: *const Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.is_const,
.ptr_type => |ptr_type| ptr_type.flags.is_const,
else => false,
};
}
@@ -1837,14 +1837,14 @@ pub const Type = struct {
pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool {
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.is_volatile,
.ptr_type => |ptr_type| ptr_type.flags.is_volatile,
else => false,
};
}
pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.is_allowzero,
.ptr_type => |ptr_type| ptr_type.flags.is_allowzero,
.opt_type => true,
else => false,
};
@@ -1852,21 +1852,21 @@ pub const Type = struct {
pub fn isCPtr(ty: Type, mod: *const Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.size == .C,
.ptr_type => |ptr_type| ptr_type.flags.size == .C,
else => false,
};
}
pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice => false,
.One, .Many, .C => true,
},
.opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
.ptr_type => |p| switch (p.size) {
.ptr_type => |p| switch (p.flags.size) {
.Slice, .C => false,
.Many, .One => !p.is_allowzero,
.Many, .One => !p.flags.is_allowzero,
},
else => false,
},
@@ -1887,14 +1887,14 @@ pub const Type = struct {
pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.opt_type => |child_type| switch (mod.intern_pool.indexToKey(child_type)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.C => false,
.Slice, .Many, .One => !ptr_type.is_allowzero,
.Slice, .Many, .One => !ptr_type.flags.is_allowzero,
},
.error_set_type => true,
else => false,
},
.ptr_type => |ptr_type| ptr_type.size == .C,
.ptr_type => |ptr_type| ptr_type.flags.size == .C,
else => false,
};
}
@@ -1904,11 +1904,11 @@ pub const Type = struct {
/// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`.
pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.size == .C,
.ptr_type => |ptr_type| ptr_type.flags.size == .C,
.opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice, .C => false,
.Many, .One => !ptr_type.is_allowzero,
.Many, .One => !ptr_type.flags.is_allowzero,
},
else => false,
},
@@ -1938,9 +1938,9 @@ pub const Type = struct {
/// For anyframe->T, returns T.
pub fn elemType2(ty: Type, mod: *const Module) Type {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.One => ptr_type.elem_type.toType().shallowElemType(mod),
.Many, .C, .Slice => ptr_type.elem_type.toType(),
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One => ptr_type.child.toType().shallowElemType(mod),
.Many, .C, .Slice => ptr_type.child.toType(),
},
.anyframe_type => |child| {
assert(child != .none);
@@ -1974,7 +1974,7 @@ pub const Type = struct {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.opt_type => |child| child.toType(),
.ptr_type => |ptr_type| b: {
assert(ptr_type.size == .C);
assert(ptr_type.flags.size == .C);
break :b ty;
},
else => unreachable,
@@ -2390,7 +2390,7 @@ pub const Type = struct {
pub fn fnReturnTypeIp(ty: Type, ip: *const InternPool) Type {
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type,
.ptr_type => |ptr_type| ip.indexToKey(ptr_type.child).func_type.return_type,
.func_type => |func_type| func_type.return_type,
else => unreachable,
}.toType();
@@ -2672,7 +2672,7 @@ pub const Type = struct {
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => false,
.ptr_type => |ptr_type| {
const child_ty = ptr_type.elem_type.toType();
const child_ty = ptr_type.child.toType();
if (child_ty.zigTypeTag(mod) == .Fn) {
return false;
} else {
@@ -3374,17 +3374,17 @@ pub const Type = struct {
pub fn fromKey(p: InternPool.Key.PtrType) Data {
return .{
.pointee_type = p.elem_type.toType(),
.pointee_type = p.child.toType(),
.sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null,
.@"align" = @intCast(u32, p.alignment.toByteUnits(0)),
.@"addrspace" = p.address_space,
.bit_offset = p.bit_offset,
.host_size = p.host_size,
.vector_index = p.vector_index,
.@"allowzero" = p.is_allowzero,
.mutable = !p.is_const,
.@"volatile" = p.is_volatile,
.size = p.size,
.@"align" = @intCast(u32, p.flags.alignment.toByteUnits(0)),
.@"addrspace" = p.flags.address_space,
.bit_offset = p.packed_offset.bit_offset,
.host_size = p.packed_offset.host_size,
.vector_index = p.flags.vector_index,
.@"allowzero" = p.flags.is_allowzero,
.mutable = !p.flags.is_const,
.@"volatile" = p.flags.is_volatile,
.size = p.flags.size,
};
}
};
@@ -3478,17 +3478,21 @@ pub const Type = struct {
}
return mod.ptrType(.{
.elem_type = d.pointee_type.ip_index,
.child = d.pointee_type.ip_index,
.sentinel = if (d.sentinel) |s| s.ip_index else .none,
.alignment = InternPool.Alignment.fromByteUnits(d.@"align"),
.host_size = d.host_size,
.bit_offset = d.bit_offset,
.vector_index = d.vector_index,
.size = d.size,
.is_const = !d.mutable,
.is_volatile = d.@"volatile",
.is_allowzero = d.@"allowzero",
.address_space = d.@"addrspace",
.flags = .{
.alignment = InternPool.Alignment.fromByteUnits(d.@"align"),
.vector_index = d.vector_index,
.size = d.size,
.is_const = !d.mutable,
.is_volatile = d.@"volatile",
.is_allowzero = d.@"allowzero",
.address_space = d.@"addrspace",
},
.packed_offset = .{
.host_size = d.host_size,
.bit_offset = d.bit_offset,
},
});
}

View File

@@ -2080,8 +2080,8 @@ pub const Value = struct {
else => val,
};
var ptr_ty_key = mod.intern_pool.indexToKey(elem_ptr_ty.toIntern()).ptr_type;
assert(ptr_ty_key.size != .Slice);
ptr_ty_key.size = .Many;
assert(ptr_ty_key.flags.size != .Slice);
ptr_ty_key.flags.size = .Many;
return (try mod.intern(.{ .ptr = .{
.ty = elem_ptr_ty.toIntern(),
.addr = .{ .elem = .{