InternPool: add more pointer values

This commit is contained in:
Jacob Young
2023-05-20 23:24:39 -04:00
committed by Andrew Kelley
parent cbf304d8c3
commit dfd91abfe1
7 changed files with 583 additions and 113 deletions

View File

@@ -1292,7 +1292,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type {
.try_ptr,
=> return air.getRefType(datas[inst].ty_pl.ty),
.interned => return ip.indexToKey(datas[inst].interned).typeOf().toType(),
.interned => return ip.typeOf(datas[inst].interned).toType(),
.not,
.bitcast,

View File

@@ -510,6 +510,16 @@ pub const Key = union(enum) {
runtime_index: RuntimeIndex,
},
int: Index,
eu_payload: Index,
opt_payload: Index,
comptime_field: Index,
elem: BaseIndex,
field: BaseIndex,
pub const BaseIndex = struct {
base: Index,
index: u64,
};
};
};
@@ -599,6 +609,7 @@ pub const Key = union(enum) {
.ptr => |ptr| {
std.hash.autoHash(hasher, ptr.ty);
std.hash.autoHash(hasher, ptr.len);
// Int-to-ptr pointers are hashed separately than decl-referencing pointers.
// This is sound due to pointer provenance rules.
std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr));
@@ -607,6 +618,11 @@ pub const Key = union(enum) {
.decl => |decl| std.hash.autoHash(hasher, decl),
.mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl),
.int => |int| std.hash.autoHash(hasher, int),
.eu_payload => |eu_payload| std.hash.autoHash(hasher, eu_payload),
.opt_payload => |opt_payload| std.hash.autoHash(hasher, opt_payload),
.comptime_field => |comptime_field| std.hash.autoHash(hasher, comptime_field),
.elem => |elem| std.hash.autoHash(hasher, elem),
.field => |field| std.hash.autoHash(hasher, field),
}
},
@@ -719,7 +735,7 @@ pub const Key = union(enum) {
.ptr => |a_info| {
const b_info = b.ptr;
if (a_info.ty != b_info.ty) return false;
if (a_info.ty != b_info.ty or a_info.len != b_info.len) return false;
const AddrTag = @typeInfo(Key.Ptr.Addr).Union.tag_type.?;
if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false;
@@ -729,6 +745,11 @@ pub const Key = union(enum) {
.decl => |a_decl| a_decl == b_info.addr.decl,
.mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl),
.int => |a_int| a_int == b_info.addr.int,
.eu_payload => |a_eu_payload| a_eu_payload == b_info.addr.eu_payload,
.opt_payload => |a_opt_payload| a_opt_payload == b_info.addr.opt_payload,
.comptime_field => |a_comptime_field| a_comptime_field == b_info.addr.comptime_field,
.elem => |a_elem| std.meta.eql(a_elem, b_info.addr.elem),
.field => |a_field| std.meta.eql(a_field, b_info.addr.field),
};
},
@@ -1375,6 +1396,26 @@ pub const Tag = enum(u8) {
/// Only pointer types are allowed to have this encoding. Optional types must use
/// `opt_payload` or `opt_null`.
ptr_int,
/// A pointer to the payload of an error union.
/// data is Index of a pointer value to the error union.
/// In order to use this encoding, one must ensure that the `InternPool`
/// already contains the payload pointer type corresponding to this payload.
ptr_eu_payload,
/// A pointer to the payload of an optional.
/// data is Index of a pointer value to the optional.
/// In order to use this encoding, one must ensure that the `InternPool`
/// already contains the payload pointer type corresponding to this payload.
ptr_opt_payload,
/// data is extra index of PtrComptimeField, which contains the pointer type and field value.
ptr_comptime_field,
/// A pointer to an array element.
/// data is extra index of PtrBaseIndex, which contains the base array and element index.
/// In order to use this encoding, one must ensure that the `InternPool`
/// already contains the elem pointer type corresponding to this payload.
ptr_elem,
/// A pointer to a container field.
/// data is extra index of PtrBaseIndex, which contains the base container and field index.
ptr_field,
/// A slice.
/// data is extra index of PtrSlice, which contains the ptr and len values
/// In order to use this encoding, one must ensure that the `InternPool`
@@ -1753,6 +1794,17 @@ pub const PtrInt = struct {
addr: Index,
};
pub const PtrComptimeField = struct {
ty: Index,
field_val: Index,
};
pub const PtrBaseIndex = struct {
ty: Index,
base: Index,
index: Index,
};
pub const PtrSlice = struct {
ptr: Index,
len: Index,
@@ -1956,10 +2008,10 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
},
.type_slice => {
const ptr_ty_index = @intToEnum(Index, data);
var result = indexToKey(ip, ptr_ty_index);
result.ptr_type.size = .Slice;
return result;
const ptr_type_index = @intToEnum(Index, data);
var result = indexToKey(ip, ptr_type_index).ptr_type;
result.size = .Slice;
return .{ .ptr_type = result };
},
.type_optional => .{ .opt_type = @intToEnum(Index, data) },
@@ -2063,7 +2115,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
// The existence of `opt_payload` guarantees that the optional type will be
// stored in the `InternPool`.
const opt_ty = ip.getAssumeExists(.{
.opt_type = indexToKey(ip, payload_val).typeOf(),
.opt_type = ip.typeOf(payload_val),
});
return .{ .opt = .{
.ty = opt_ty,
@@ -2108,14 +2160,59 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.addr = .{ .int = info.addr },
} };
},
.ptr_eu_payload => {
const ptr_eu_index = @intToEnum(Index, data);
var ptr_type = ip.indexToKey(ip.typeOf(ptr_eu_index)).ptr_type;
ptr_type.elem_type = ip.indexToKey(ptr_type.elem_type).error_union_type.payload_type;
return .{ .ptr = .{
.ty = ip.getAssumeExists(.{ .ptr_type = ptr_type }),
.addr = .{ .eu_payload = ptr_eu_index },
} };
},
.ptr_opt_payload => {
const ptr_opt_index = @intToEnum(Index, data);
var ptr_type = ip.indexToKey(ip.typeOf(ptr_opt_index)).ptr_type;
ptr_type.elem_type = ip.indexToKey(ptr_type.elem_type).opt_type;
return .{ .ptr = .{
.ty = ip.getAssumeExists(.{ .ptr_type = ptr_type }),
.addr = .{ .opt_payload = ptr_opt_index },
} };
},
.ptr_comptime_field => {
const info = ip.extraData(PtrComptimeField, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .comptime_field = info.field_val },
} };
},
.ptr_elem => {
const info = ip.extraData(PtrBaseIndex, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .elem = .{
.base = info.base,
.index = ip.indexToKey(info.index).int.storage.u64,
} },
} };
},
.ptr_field => {
const info = ip.extraData(PtrBaseIndex, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .field = .{
.base = info.base,
.index = ip.indexToKey(info.index).int.storage.u64,
} },
} };
},
.ptr_slice => {
const info = ip.extraData(PtrSlice, data);
const ptr = ip.indexToKey(info.ptr).ptr;
var ptr_ty = ip.indexToKey(ptr.ty);
assert(ptr_ty.ptr_type.size == .Many);
ptr_ty.ptr_type.size = .Slice;
var ptr_type = ip.indexToKey(ptr.ty).ptr_type;
assert(ptr_type.size == .Many);
ptr_type.size = .Slice;
return .{ .ptr = .{
.ty = ip.getAssumeExists(ptr_ty),
.ty = ip.getAssumeExists(.{ .ptr_type = ptr_type }),
.addr = ptr.addr,
.len = info.len,
} };
@@ -2301,9 +2398,7 @@ fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key {
pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) {
return @intToEnum(Index, gop.index);
}
if (gop.found_existing) return @intToEnum(Index, gop.index);
try ip.items.ensureUnusedCapacity(gpa, 1);
switch (key) {
.int_type => |int_type| {
@@ -2322,11 +2417,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
if (ptr_type.size == .Slice) {
var new_key = key;
new_key.ptr_type.size = .Many;
const ptr_ty_index = try get(ip, gpa, new_key);
const ptr_type_index = try get(ip, gpa, new_key);
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
.tag = .type_slice,
.data = @enumToInt(ptr_ty_index),
.data = @enumToInt(ptr_type_index),
});
return @intToEnum(Index, ip.items.len - 1);
}
@@ -2584,64 +2679,98 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.extern_func => @panic("TODO"),
.ptr => |ptr| switch (ptr.len) {
.none => {
assert(ip.indexToKey(ptr.ty).ptr_type.size != .Slice);
switch (ptr.addr) {
.@"var" => |@"var"| ip.items.appendAssumeCapacity(.{
.tag = .ptr_var,
.data = try ip.addExtra(gpa, PtrVar{
.ty = ptr.ty,
.init = @"var".init,
.owner_decl = @"var".owner_decl,
.lib_name = @"var".lib_name,
.flags = .{
.is_const = @"var".is_const,
.is_threadlocal = @"var".is_threadlocal,
.is_weak_linkage = @"var".is_weak_linkage,
.ptr => |ptr| {
const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
switch (ptr.len) {
.none => {
assert(ptr_type.size != .Slice);
switch (ptr.addr) {
.@"var" => |@"var"| ip.items.appendAssumeCapacity(.{
.tag = .ptr_var,
.data = try ip.addExtra(gpa, PtrVar{
.ty = ptr.ty,
.init = @"var".init,
.owner_decl = @"var".owner_decl,
.lib_name = @"var".lib_name,
.flags = .{
.is_const = @"var".is_const,
.is_threadlocal = @"var".is_threadlocal,
.is_weak_linkage = @"var".is_weak_linkage,
},
}),
}),
.decl => |decl| ip.items.appendAssumeCapacity(.{
.tag = .ptr_decl,
.data = try ip.addExtra(gpa, PtrDecl{
.ty = ptr.ty,
.decl = decl,
}),
}),
.mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{
.tag = .ptr_mut_decl,
.data = try ip.addExtra(gpa, PtrMutDecl{
.ty = ptr.ty,
.decl = mut_decl.decl,
.runtime_index = mut_decl.runtime_index,
}),
}),
.int => |int| ip.items.appendAssumeCapacity(.{
.tag = .ptr_int,
.data = try ip.addExtra(gpa, PtrInt{
.ty = ptr.ty,
.addr = int,
}),
}),
.eu_payload, .opt_payload => |data| ip.items.appendAssumeCapacity(.{
.tag = switch (ptr.addr) {
.eu_payload => .ptr_eu_payload,
.opt_payload => .ptr_opt_payload,
else => unreachable,
},
.data = @enumToInt(data),
}),
}),
.decl => |decl| ip.items.appendAssumeCapacity(.{
.tag = .ptr_decl,
.data = try ip.addExtra(gpa, PtrDecl{
.ty = ptr.ty,
.decl = decl,
.comptime_field => |field_val| ip.items.appendAssumeCapacity(.{
.tag = .ptr_comptime_field,
.data = try ip.addExtra(gpa, PtrComptimeField{
.ty = ptr.ty,
.field_val = field_val,
}),
}),
}),
.mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{
.tag = .ptr_mut_decl,
.data = try ip.addExtra(gpa, PtrMutDecl{
.ty = ptr.ty,
.decl = mut_decl.decl,
.runtime_index = mut_decl.runtime_index,
.elem, .field => |base_index| {
const index_index = try get(ip, gpa, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = base_index.index },
} });
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
.tag = .ptr_elem,
.data = try ip.addExtra(gpa, PtrBaseIndex{
.ty = ptr.ty,
.base = base_index.base,
.index = index_index,
}),
});
},
}
},
else => {
assert(ptr_type.size == .Slice);
var new_key = key;
new_key.ptr.ty = ip.slicePtrType(ptr.ty);
new_key.ptr.len = .none;
assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many);
const ptr_index = try get(ip, gpa, new_key);
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
.tag = .ptr_slice,
.data = try ip.addExtra(gpa, PtrSlice{
.ptr = ptr_index,
.len = ptr.len,
}),
}),
.int => |int| ip.items.appendAssumeCapacity(.{
.tag = .ptr_int,
.data = try ip.addExtra(gpa, PtrInt{
.ty = ptr.ty,
.addr = int,
}),
}),
}
},
else => {
assert(ip.indexToKey(ptr.ty).ptr_type.size == .Slice);
var new_key = key;
new_key.ptr.ty = ip.slicePtrType(ptr.ty);
new_key.ptr.len = .none;
assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many);
const ptr_index = try get(ip, gpa, new_key);
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
.tag = .ptr_slice,
.data = try ip.addExtra(gpa, PtrSlice{
.ptr = ptr_index,
.len = ptr.len,
}),
});
},
});
},
}
assert(ptr.ty == ip.indexToKey(@intToEnum(Index, ip.items.len - 1)).ptr.ty);
},
.opt => |opt| {
@@ -3683,6 +3812,11 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.ptr_decl => @sizeOf(PtrDecl),
.ptr_mut_decl => @sizeOf(PtrMutDecl),
.ptr_int => @sizeOf(PtrInt),
.ptr_eu_payload => 0,
.ptr_opt_payload => 0,
.ptr_comptime_field => @sizeOf(PtrComptimeField),
.ptr_elem => @sizeOf(PtrBaseIndex),
.ptr_field => @sizeOf(PtrBaseIndex),
.ptr_slice => @sizeOf(PtrSlice),
.opt_null => 0,
.opt_payload => 0,
@@ -3757,6 +3891,10 @@ pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union {
return ip.allocated_unions.at(@enumToInt(index));
}
pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Union {
return ip.allocated_unions.at(@enumToInt(index));
}
pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet {
return ip.allocated_inferred_error_sets.at(@enumToInt(index));
}

View File

@@ -6783,7 +6783,7 @@ pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allo
pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type {
if (std.debug.runtime_safety and info.sentinel != .none) {
const sent_ty = mod.intern_pool.indexToKey(info.sentinel).typeOf();
const sent_ty = mod.intern_pool.typeOf(info.sentinel);
assert(sent_ty == info.child);
}
const i = try intern(mod, .{ .array_type = info });
@@ -6802,7 +6802,7 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!
pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type {
if (std.debug.runtime_safety and info.sentinel != .none) {
const sent_ty = mod.intern_pool.indexToKey(info.sentinel).typeOf();
const sent_ty = mod.intern_pool.typeOf(info.sentinel);
assert(sent_ty == info.elem_type);
}
const i = try intern(mod, .{ .ptr_type = info });

View File

@@ -28473,6 +28473,178 @@ fn beginComptimePtrLoad(
.ty_without_well_defined_layout = if (!layout_defined) decl.ty else null,
};
},
.eu_payload, .opt_payload => |container_ptr| blk: {
const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod);
const payload_ty = ptr.ty.toType().childType(mod);
var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty);
// eu_payload_ptr and opt_payload_ptr never have a well-defined layout
if (deref.parent != null) {
deref.parent = null;
deref.ty_without_well_defined_layout = container_ty;
}
if (deref.pointee) |*tv| {
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
if (coerce_in_mem_ok) {
const payload_val = switch (ptr_val.tag()) {
.eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else {
return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name});
},
.opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: {
if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{});
break :opt tv.val;
},
else => unreachable,
};
tv.* = TypedValue{ .ty = payload_ty, .val = payload_val };
break :blk deref;
}
}
deref.pointee = null;
break :blk deref;
},
.comptime_field => |comptime_field| blk: {
const field_ty = mod.intern_pool.typeOf(comptime_field).toType();
break :blk ComptimePtrLoadKit{
.parent = null,
.pointee = .{ .ty = field_ty, .val = comptime_field.toValue() },
.is_mutable = false,
.ty_without_well_defined_layout = field_ty,
};
},
.elem => |elem_ptr| blk: {
const elem_ty = ptr.ty.toType().childType(mod);
var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null);
// This code assumes that elem_ptrs have been "flattened" in order for direct dereference
// to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that
// our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened"
switch (mod.intern_pool.indexToKey(elem_ptr.base)) {
.ptr => |base_ptr| switch (base_ptr.addr) {
.elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)),
else => {},
},
else => {},
}
if (elem_ptr.index != 0) {
if (elem_ty.hasWellDefinedLayout(mod)) {
if (deref.parent) |*parent| {
// Update the byte offset (in-place)
const elem_size = try sema.typeAbiSize(elem_ty);
const offset = parent.byte_offset + elem_size * elem_ptr.index;
parent.byte_offset = try sema.usizeCast(block, src, offset);
}
} else {
deref.parent = null;
deref.ty_without_well_defined_layout = elem_ty;
}
}
// If we're loading an elem that was derived from a different type
// than the true type of the underlying decl, we cannot deref directly
const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: {
const deref_elem_ty = deref.pointee.?.ty.childType(mod);
break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok;
} else false;
if (!ty_matches) {
deref.pointee = null;
break :blk deref;
}
var array_tv = deref.pointee.?;
const check_len = array_tv.ty.arrayLenIncludingSentinel(mod);
if (maybe_array_ty) |load_ty| {
// It's possible that we're loading a [N]T, in which case we'd like to slice
// the pointee array directly from our parent array.
if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) {
const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod));
deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{
.ty = try Type.array(sema.arena, N, null, elem_ty, mod),
.val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N),
} else null;
break :blk deref;
}
}
if (elem_ptr.index >= check_len) {
deref.pointee = null;
break :blk deref;
}
if (elem_ptr.index == check_len - 1) {
if (array_tv.ty.sentinel(mod)) |sent| {
deref.pointee = TypedValue{
.ty = elem_ty,
.val = sent,
};
break :blk deref;
}
}
deref.pointee = TypedValue{
.ty = elem_ty,
.val = try array_tv.val.elemValue(mod, elem_ptr.index),
};
break :blk deref;
},
.field => |field_ptr| blk: {
const field_index = @intCast(u32, field_ptr.index);
const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty);
if (container_ty.hasWellDefinedLayout(mod)) {
const struct_obj = mod.typeToStruct(container_ty);
if (struct_obj != null and struct_obj.?.layout == .Packed) {
// packed structs are not byte addressable
deref.parent = null;
} else if (deref.parent) |*parent| {
// Update the byte offset (in-place)
try sema.resolveTypeLayout(container_ty);
const field_offset = container_ty.structFieldOffset(field_index, mod);
parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset);
}
} else {
deref.parent = null;
deref.ty_without_well_defined_layout = container_ty;
}
const tv = deref.pointee orelse {
deref.pointee = null;
break :blk deref;
};
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
if (!coerce_in_mem_ok) {
deref.pointee = null;
break :blk deref;
}
if (container_ty.isSlice(mod)) {
const slice_val = tv.val.castTag(.slice).?.data;
deref.pointee = switch (field_index) {
Value.Payload.Slice.ptr_index => TypedValue{
.ty = container_ty.slicePtrFieldType(mod),
.val = slice_val.ptr,
},
Value.Payload.Slice.len_index => TypedValue{
.ty = Type.usize,
.val = slice_val.len,
},
else => unreachable,
};
} else {
const field_ty = container_ty.structFieldType(field_index, mod);
deref.pointee = TypedValue{
.ty = field_ty,
.val = try tv.val.fieldValue(tv.ty, mod, field_index),
};
}
break :blk deref;
},
},
else => unreachable,
},
@@ -28559,11 +28731,12 @@ fn coerceArrayPtrToSlice(
if (try sema.resolveMaybeUndefVal(inst)) |val| {
const ptr_array_ty = sema.typeOf(inst);
const array_ty = ptr_array_ty.childType(mod);
const slice_val = try Value.Tag.slice.create(sema.arena, .{
.ptr = val,
.len = try mod.intValue(Type.usize, array_ty.arrayLen(mod)),
});
return sema.addConstant(dest_ty, slice_val);
const slice_val = try mod.intern(.{ .ptr = .{
.ty = dest_ty.ip_index,
.addr = mod.intern_pool.indexToKey(val.ip_index).ptr.addr,
.len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).ip_index,
} });
return sema.addConstant(dest_ty, slice_val.toValue());
}
try sema.requireRuntimeBlock(block, inst_src, null);
return block.addTyOp(.array_to_slice, dest_ty, inst);
@@ -29769,6 +29942,7 @@ fn analyzeSlice(
const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src);
const new_ptr_ty = sema.typeOf(new_ptr);
// true if and only if the end index of the slice, implicitly or explicitly, equals
// the length of the underlying object being sliced. we might learn the length of the
@@ -29914,7 +30088,7 @@ fn analyzeSlice(
const end_int = end_val.getUnsignedInt(mod).?;
const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int);
const elem_ptr = try ptr_val.elemPtr(sema.typeOf(new_ptr), sema.arena, sentinel_index, sema.mod);
const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sema.arena, sentinel_index, sema.mod);
const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false);
const actual_sentinel = switch (res) {
.runtime_load => break :sentinel_check,
@@ -29960,7 +30134,7 @@ fn analyzeSlice(
try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false);
const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo(mod);
const new_ptr_ty_info = new_ptr_ty.ptrInfo(mod);
const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize(mod) != .C;
if (opt_new_len_val) |new_len_val| {
@@ -30009,7 +30183,11 @@ fn analyzeSlice(
};
if (!new_ptr_val.isUndef(mod)) {
return sema.addConstant(return_ty, new_ptr_val);
return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced(
mod.gpa,
try new_ptr_val.intern(new_ptr_ty, mod),
return_ty.ip_index,
)).toValue());
}
// Special case: @as([]i32, undefined)[x..x]

View File

@@ -3374,9 +3374,15 @@ pub const DeclGen = struct {
val;
break :ptr addrspace_casted_ptr;
},
.decl => |decl| try lowerDeclRefValue(dg, tv, decl),
.mut_decl => |mut_decl| try lowerDeclRefValue(dg, tv, mut_decl.decl),
.decl => |decl| try dg.lowerDeclRefValue(tv, decl),
.mut_decl => |mut_decl| try dg.lowerDeclRefValue(tv, mut_decl.decl),
.int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int),
.eu_payload,
.opt_payload,
.elem,
.field,
=> try dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0),
.comptime_field => unreachable,
};
switch (ptr.len) {
.none => return ptr_val,
@@ -4091,6 +4097,132 @@ pub const DeclGen = struct {
.decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl),
.mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl),
.int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int),
.eu_payload => |eu_ptr| {
const parent_llvm_ptr = try dg.lowerParentPtr(eu_ptr.toValue(), true);
const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod);
const payload_ty = eu_ty.errorUnionPayload(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// In this case, we represent pointer to error union the same as pointer
// to the payload.
return parent_llvm_ptr;
}
const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1;
const llvm_u32 = dg.context.intType(32);
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(payload_offset, .False),
};
const eu_llvm_ty = try dg.lowerType(eu_ty);
return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
.opt_payload => |opt_ptr| {
const parent_llvm_ptr = try dg.lowerParentPtr(opt_ptr.toValue(), true);
const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod);
const payload_ty = opt_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
payload_ty.optionalReprIsPayload(mod))
{
// In this case, we represent pointer to optional the same as pointer
// to the payload.
return parent_llvm_ptr;
}
const llvm_u32 = dg.context.intType(32);
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(0, .False),
};
const opt_llvm_ty = try dg.lowerType(opt_ty);
return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
.comptime_field => unreachable,
.elem => |elem_ptr| {
const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.base.toValue(), true);
const llvm_usize = try dg.lowerType(Type.usize);
const indices: [1]*llvm.Value = .{
llvm_usize.constInt(elem_ptr.index, .False),
};
const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().childType(mod));
return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
.field => |field_ptr| {
const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.base.toValue(), byte_aligned);
const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
const field_index = @intCast(u32, field_ptr.index);
const llvm_u32 = dg.context.intType(32);
switch (parent_ty.zigTypeTag(mod)) {
.Union => {
if (parent_ty.containerLayout(mod) == .Packed) {
return parent_llvm_ptr;
}
const layout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
// In this case a pointer to the union and a pointer to any
// (void) payload is the same.
return parent_llvm_ptr;
}
const llvm_pl_index = if (layout.tag_size == 0)
0
else
@boolToInt(layout.tag_align >= layout.payload_align);
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(llvm_pl_index, .False),
};
const parent_llvm_ty = try dg.lowerType(parent_ty);
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
.Struct => {
if (parent_ty.containerLayout(mod) == .Packed) {
if (!byte_aligned) return parent_llvm_ptr;
const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth());
const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
// count bits of fields before this one
const prev_bits = b: {
var b: usize = 0;
for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
b += @intCast(usize, field.ty.bitSize(mod));
}
break :b b;
};
const byte_offset = llvm_usize.constInt(prev_bits / 8, .False);
const field_addr = base_addr.constAdd(byte_offset);
const final_llvm_ty = dg.context.pointerType(0);
return field_addr.constIntToPtr(final_llvm_ty);
}
const parent_llvm_ty = try dg.lowerType(parent_ty);
if (llvmField(parent_ty, field_index, mod)) |llvm_field| {
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(llvm_field.index, .False),
};
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
} else {
const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False);
const indices: [1]*llvm.Value = .{llvm_index};
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
}
},
.Pointer => {
assert(parent_ty.isSlice(mod));
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(field_index, .False),
};
const parent_llvm_ty = try dg.lowerType(parent_ty);
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
else => unreachable,
}
},
},
else => unreachable,
};

View File

@@ -1192,7 +1192,7 @@ const Writer = struct {
.field => {
const field_name = self.code.nullTerminatedString(extra.data.field_name_start);
try self.writeInstRef(stream, extra.data.obj_ptr);
try stream.print(", {}", .{std.zig.fmtId(field_name)});
try stream.print(", \"{}\"", .{std.zig.fmtEscapes(field_name)});
},
}
try stream.writeAll(", [");

View File

@@ -559,37 +559,46 @@ pub const Value = struct {
/// Asserts that the value is representable as an array of bytes.
/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 {
switch (val.tag()) {
.bytes => {
const bytes = val.castTag(.bytes).?.data;
const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null);
const adjusted_bytes = bytes[0..adjusted_len];
return allocator.dupe(u8, adjusted_bytes);
switch (val.ip_index) {
.none => switch (val.tag()) {
.bytes => {
const bytes = val.castTag(.bytes).?.data;
const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null);
const adjusted_bytes = bytes[0..adjusted_len];
return allocator.dupe(u8, adjusted_bytes);
},
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
return allocator.dupe(u8, bytes);
},
.enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data),
.repeated => {
const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod));
const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod)));
@memset(result, byte);
return result;
},
.decl_ref => {
const decl_index = val.castTag(.decl_ref).?.data;
const decl = mod.declPtr(decl_index);
const decl_val = try decl.value();
return decl_val.toAllocatedBytes(decl.ty, allocator, mod);
},
.the_only_possible_value => return &[_]u8{},
.slice => {
const slice = val.castTag(.slice).?.data;
return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod);
},
else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod),
},
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
return allocator.dupe(u8, bytes);
else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
.ptr => |ptr| switch (ptr.len) {
.none => unreachable,
else => return arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod),
},
else => unreachable,
},
.enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data),
.repeated => {
const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod));
const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod)));
@memset(result, byte);
return result;
},
.decl_ref => {
const decl_index = val.castTag(.decl_ref).?.data;
const decl = mod.declPtr(decl_index);
const decl_val = try decl.value();
return decl_val.toAllocatedBytes(decl.ty, allocator, mod);
},
.the_only_possible_value => return &[_]u8{},
.slice => {
const slice = val.castTag(.slice).?.data;
return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod);
},
else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod),
}
}
@@ -605,6 +614,16 @@ pub const Value = struct {
pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index {
if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index);
switch (val.tag()) {
.elem_ptr => {
const pl = val.castTag(.elem_ptr).?.data;
return mod.intern(.{ .ptr = .{
.ty = ty.ip_index,
.addr = .{ .elem = .{
.base = pl.array_ptr.ip_index,
.index = pl.index,
} },
} });
},
.slice => {
const pl = val.castTag(.slice).?.data;
const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod);
@@ -2601,7 +2620,10 @@ pub const Value = struct {
.@"var" => unreachable,
.decl => |decl| mod.declPtr(decl).val.elemValue(mod, index),
.mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index),
.int => unreachable,
.int, .eu_payload, .opt_payload => unreachable,
.comptime_field => |field_val| field_val.toValue().elemValue(mod, index),
.elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index),
.field => unreachable,
},
.aggregate => |aggregate| switch (aggregate.storage) {
.elems => |elems| elems[index].toValue(),