stage2: start the InternPool transition

Instead of doing everything at once which is a hopelessly large task,
this introduces a piecemeal transition that can be done in small
increments at a time.

This is a minimal changeset that keeps the compiler compiling. It only
uses the InternPool for a small set of types.

Behavior tests are not passing.

Air.Inst.Ref and Zir.Inst.Ref are separated into different enums but
compile-time verified to have the same fields in the same order.

The large set of changes is mainly to deal with the fact that most Type
and Value methods now require a Module to be passed in, so that the
InternPool object can be accessed.
This commit is contained in:
Andrew Kelley
2023-05-02 15:01:45 -07:00
parent 1e7dcaa3ae
commit 9aec2758cc
38 changed files with 6472 additions and 5783 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -292,19 +292,19 @@ pub const CType = extern union {
.abi = std.math.log2_int(u32, abi_alignment),
};
}
pub fn abiAlign(ty: Type, target: Target) AlignAs {
const abi_align = ty.abiAlignment(target);
pub fn abiAlign(ty: Type, mod: *const Module) AlignAs {
const abi_align = ty.abiAlignment(mod);
return init(abi_align, abi_align);
}
pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs {
pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *const Module) AlignAs {
return init(
struct_ty.structFieldAlign(field_i, target),
struct_ty.structFieldType(field_i).abiAlignment(target),
struct_ty.structFieldAlign(field_i, mod),
struct_ty.structFieldType(field_i).abiAlignment(mod),
);
}
pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs {
pub fn unionPayloadAlign(union_ty: Type, mod: *const Module) AlignAs {
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const union_payload_align = union_obj.abiAlignment(target, false);
const union_payload_align = union_obj.abiAlignment(mod, false);
return init(union_payload_align, union_payload_align);
}
@@ -344,8 +344,8 @@ pub const CType = extern union {
return self.map.entries.items(.hash)[index - Tag.no_payload_count];
}
pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index {
const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } };
pub fn typeToIndex(self: Set, ty: Type, mod: *Module, kind: Kind) ?Index {
const lookup = Convert.Lookup{ .imm = .{ .set = &self, .mod = mod } };
var convert: Convert = undefined;
convert.initType(ty, kind, lookup) catch unreachable;
@@ -405,7 +405,7 @@ pub const CType = extern union {
);
if (!gop.found_existing) {
errdefer _ = self.set.map.pop();
gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert);
gop.key_ptr.* = try createFromConvert(self, ty, lookup.getModule(), kind, convert);
}
if (std.debug.runtime_safety) {
const adapter = TypeAdapter64{
@@ -1236,10 +1236,10 @@ pub const CType = extern union {
}
pub const Lookup = union(enum) {
fail: Target,
fail: *Module,
imm: struct {
set: *const Store.Set,
target: Target,
mod: *Module,
},
mut: struct {
promoted: *Store.Promoted,
@@ -1254,10 +1254,14 @@ pub const CType = extern union {
}
pub fn getTarget(self: @This()) Target {
return self.getModule().getTarget();
}
pub fn getModule(self: @This()) *Module {
return switch (self) {
.fail => |target| target,
.imm => |imm| imm.target,
.mut => |mut| mut.mod.getTarget(),
.fail => |mod| mod,
.imm => |imm| imm.mod,
.mut => |mut| mut.mod,
};
}
@@ -1272,7 +1276,7 @@ pub const CType = extern union {
pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index {
return switch (self) {
.fail => null,
.imm => |imm| imm.set.typeToIndex(ty, imm.target, kind),
.imm => |imm| imm.set.typeToIndex(ty, imm.mod, kind),
.mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind),
};
}
@@ -1284,7 +1288,7 @@ pub const CType = extern union {
pub fn freeze(self: @This()) @This() {
return switch (self) {
.fail, .imm => self,
.mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } },
.mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .mod = mut.mod } },
};
}
};
@@ -1338,7 +1342,7 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "array",
.type = array_idx,
.alignas = AlignAs.abiAlign(ty, lookup.getTarget()),
.alignas = AlignAs.abiAlign(ty, lookup.getModule()),
};
self.initAnon(kind, fwd_idx, 1);
} else self.init(switch (kind) {
@@ -1350,12 +1354,12 @@ pub const CType = extern union {
}
pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void {
const target = lookup.getTarget();
const mod = lookup.getModule();
self.* = undefined;
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime())
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
self.init(.void)
else if (ty.isAbiInt()) switch (ty.tag()) {
else if (ty.isAbiInt(mod)) switch (ty.tag()) {
.usize => self.init(.uintptr_t),
.isize => self.init(.intptr_t),
.c_char => self.init(.char),
@@ -1367,13 +1371,13 @@ pub const CType = extern union {
.c_ulong => self.init(.@"unsigned long"),
.c_longlong => self.init(.@"long long"),
.c_ulonglong => self.init(.@"unsigned long long"),
else => switch (tagFromIntInfo(ty.intInfo(target))) {
else => switch (tagFromIntInfo(ty.intInfo(mod))) {
.void => unreachable,
else => |t| self.init(t),
.array => switch (kind) {
.forward, .complete, .global => {
const abi_size = ty.abiSize(target);
const abi_align = ty.abiAlignment(target);
const abi_size = ty.abiSize(mod);
const abi_align = ty.abiAlignment(mod);
self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
.len = @divExact(abi_size, abi_align),
.elem_type = tagFromIntInfo(.{
@@ -1389,7 +1393,7 @@ pub const CType = extern union {
.payload => unreachable,
},
},
} else switch (ty.zigTypeTag()) {
} else switch (ty.zigTypeTag(mod)) {
.Frame => unreachable,
.AnyFrame => unreachable,
@@ -1434,12 +1438,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "ptr",
.type = ptr_idx,
.alignas = AlignAs.abiAlign(ptr_ty, target),
.alignas = AlignAs.abiAlign(ptr_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "len",
.type = Tag.uintptr_t.toIndex(),
.alignas = AlignAs.abiAlign(Type.usize, target),
.alignas = AlignAs.abiAlign(Type.usize, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@@ -1462,12 +1466,8 @@ pub const CType = extern union {
},
};
var host_int_pl = Type.Payload.Bits{
.base = .{ .tag = .int_unsigned },
.data = info.host_size * 8,
};
const pointee_ty = if (info.host_size > 0 and info.vector_index == .none)
Type.initPayload(&host_int_pl.base)
try mod.intType(.unsigned, info.host_size * 8)
else
info.pointee_type;
@@ -1490,11 +1490,9 @@ pub const CType = extern union {
if (ty.castTag(.@"struct")) |struct_obj| {
try self.initType(struct_obj.data.backing_int_ty, kind, lookup);
} else {
var buf: Type.Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
.data = @intCast(u16, ty.bitSize(target)),
};
try self.initType(Type.initPayload(&buf.base), kind, lookup);
const bits = @intCast(u16, ty.bitSize(mod));
const int_ty = try mod.intType(.unsigned, bits);
try self.initType(int_ty, kind, lookup);
}
} else if (ty.isTupleOrAnonStruct()) {
if (lookup.isMutable()) {
@@ -1505,7 +1503,7 @@ pub const CType = extern union {
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
_ = try lookup.typeToIndex(field_ty, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter => .complete,
@@ -1555,7 +1553,7 @@ pub const CType = extern union {
self.storage.anon.fields[field_count] = .{
.name = "payload",
.type = payload_idx.?,
.alignas = AlignAs.unionPayloadAlign(ty, target),
.alignas = AlignAs.unionPayloadAlign(ty, mod),
};
field_count += 1;
}
@@ -1563,7 +1561,7 @@ pub const CType = extern union {
self.storage.anon.fields[field_count] = .{
.name = "tag",
.type = tag_idx.?,
.alignas = AlignAs.abiAlign(tag_ty.?, target),
.alignas = AlignAs.abiAlign(tag_ty.?, mod),
};
field_count += 1;
}
@@ -1576,7 +1574,7 @@ pub const CType = extern union {
} };
self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) };
} else self.init(.@"struct");
} else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) {
} else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(mod)) {
self.init(.void);
} else {
var is_packed = false;
@@ -1586,9 +1584,9 @@ pub const CType = extern union {
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = AlignAs.fieldAlign(ty, field_i, target);
const field_align = AlignAs.fieldAlign(ty, field_i, mod);
if (field_align.@"align" < field_align.abi) {
is_packed = true;
if (!lookup.isMutable()) break;
@@ -1643,8 +1641,8 @@ pub const CType = extern union {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
if (ty.optionalReprIsPayload()) {
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (ty.optionalReprIsPayload(mod)) {
try self.initType(payload_ty, kind, lookup);
} else if (switch (kind) {
.forward, .forward_parameter => @as(Index, undefined),
@@ -1661,12 +1659,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "payload",
.type = payload_idx,
.alignas = AlignAs.abiAlign(payload_ty, target),
.alignas = AlignAs.abiAlign(payload_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "is_null",
.type = Tag.bool.toIndex(),
.alignas = AlignAs.abiAlign(Type.bool, target),
.alignas = AlignAs.abiAlign(Type.bool, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@@ -1699,12 +1697,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "payload",
.type = payload_idx,
.alignas = AlignAs.abiAlign(payload_ty, target),
.alignas = AlignAs.abiAlign(payload_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "error",
.type = error_idx,
.alignas = AlignAs.abiAlign(error_ty, target),
.alignas = AlignAs.abiAlign(error_ty, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@@ -1733,7 +1731,7 @@ pub const CType = extern union {
};
_ = try lookup.typeToIndex(info.return_type, param_kind);
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
_ = try lookup.typeToIndex(param_type, param_kind);
}
}
@@ -1900,16 +1898,16 @@ pub const CType = extern union {
}
}
fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType {
fn createFromType(store: *Store.Promoted, ty: Type, mod: *const Module, kind: Kind) !CType {
var convert: Convert = undefined;
try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } });
return createFromConvert(store, ty, target, kind, &convert);
try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } });
return createFromConvert(store, ty, mod, kind, &convert);
}
fn createFromConvert(
store: *Store.Promoted,
ty: Type,
target: Target,
mod: *Module,
kind: Kind,
convert: Convert,
) !CType {
@@ -1930,7 +1928,7 @@ pub const CType = extern union {
.packed_struct,
.packed_union,
=> {
const zig_ty_tag = ty.zigTypeTag();
const zig_ty_tag = ty.zigTypeTag(mod);
const fields_len = switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
@@ -1941,7 +1939,7 @@ pub const CType = extern union {
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
c_fields_len += 1;
}
@@ -1950,7 +1948,7 @@ pub const CType = extern union {
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1;
fields_pl[c_field_i] = .{
@@ -1962,12 +1960,12 @@ pub const CType = extern union {
.Union => ty.unionFields().keys()[field_i],
else => unreachable,
}),
.type = store.set.typeToIndex(field_ty, target, switch (kind) {
.type = store.set.typeToIndex(field_ty, mod, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter, .payload => .complete,
.global => .global,
}).?,
.alignas = AlignAs.fieldAlign(ty, field_i, target),
.alignas = AlignAs.fieldAlign(ty, field_i, mod),
};
}
@@ -2004,7 +2002,7 @@ pub const CType = extern union {
const struct_pl = try arena.create(Payload.Aggregate);
struct_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
.fwd_decl = store.set.typeToIndex(ty, target, .forward).?,
.fwd_decl = store.set.typeToIndex(ty, mod, .forward).?,
} };
return initPayload(struct_pl);
},
@@ -2026,21 +2024,21 @@ pub const CType = extern union {
var c_params_len: usize = 0;
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
c_params_len += 1;
}
const params_pl = try arena.alloc(Index, c_params_len);
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?;
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?;
c_param_i += 1;
}
const fn_pl = try arena.create(Payload.Function);
fn_pl.* = .{ .base = .{ .tag = t }, .data = .{
.return_type = store.set.typeToIndex(info.return_type, target, param_kind).?,
.return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?,
.param_types = params_pl,
} };
return initPayload(fn_pl);
@@ -2067,12 +2065,12 @@ pub const CType = extern union {
}
pub fn eql(self: @This(), ty: Type, cty: CType) bool {
const mod = self.lookup.getModule();
switch (self.convert.value) {
.cty => |c| return c.eql(cty),
.tag => |t| {
if (t != cty.tag()) return false;
const target = self.lookup.getTarget();
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
@@ -2084,7 +2082,7 @@ pub const CType = extern union {
]u8 = undefined;
const c_fields = cty.cast(Payload.Fields).?.data;
const zig_ty_tag = ty.zigTypeTag();
const zig_ty_tag = ty.zigTypeTag(mod);
var c_field_i: usize = 0;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
@@ -2093,7 +2091,7 @@ pub const CType = extern union {
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1;
const c_field = &c_fields[c_field_i];
@@ -2113,7 +2111,7 @@ pub const CType = extern union {
else => unreachable,
},
mem.span(c_field.name),
) or AlignAs.fieldAlign(ty, field_i, target).@"align" !=
) or AlignAs.fieldAlign(ty, field_i, mod).@"align" !=
c_field.alignas.@"align") return false;
}
return true;
@@ -2146,7 +2144,7 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
if (ty.zigTypeTag() != .Fn) return false;
if (ty.zigTypeTag(mod) != .Fn) return false;
const info = ty.fnInfo();
assert(!info.is_generic);
@@ -2162,7 +2160,7 @@ pub const CType = extern union {
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (c_param_i >= data.param_types.len) return false;
const param_cty = data.param_types[c_param_i];
@@ -2202,7 +2200,7 @@ pub const CType = extern union {
.tag => |t| {
autoHash(hasher, t);
const target = self.lookup.getTarget();
const mod = self.lookup.getModule();
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
@@ -2211,15 +2209,15 @@ pub const CType = extern union {
std.fmt.count("f{}", .{std.math.maxInt(usize)})
]u8 = undefined;
const zig_ty_tag = ty.zigTypeTag();
for (0..switch (ty.zigTypeTag()) {
const zig_ty_tag = ty.zigTypeTag(mod);
for (0..switch (ty.zigTypeTag(mod)) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
self.updateHasherRecurse(hasher, field_ty, switch (self.kind) {
.forward, .forward_parameter => .forward,
@@ -2234,7 +2232,7 @@ pub const CType = extern union {
.Union => ty.unionFields().keys()[field_i],
else => unreachable,
});
autoHash(hasher, AlignAs.fieldAlign(ty, field_i, target).@"align");
autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align");
}
},
@@ -2271,7 +2269,7 @@ pub const CType = extern union {
self.updateHasherRecurse(hasher, info.return_type, param_kind);
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
self.updateHasherRecurse(hasher, param_type, param_kind);
}
},

File diff suppressed because it is too large Load Diff

View File

@@ -231,9 +231,10 @@ pub const DeclGen = struct {
/// Fetch the result-id for a previously generated instruction or constant.
fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef {
if (self.air.value(inst)) |val| {
const mod = self.module;
if (self.air.value(inst, mod)) |val| {
const ty = self.air.typeOf(inst);
if (ty.zigTypeTag() == .Fn) {
if (ty.zigTypeTag(mod) == .Fn) {
const fn_decl_index = switch (val.tag()) {
.extern_fn => val.castTag(.extern_fn).?.data.owner_decl,
.function => val.castTag(.function).?.data.owner_decl,
@@ -340,8 +341,9 @@ pub const DeclGen = struct {
}
fn arithmeticTypeInfo(self: *DeclGen, ty: Type) !ArithmeticTypeInfo {
const mod = self.module;
const target = self.getTarget();
return switch (ty.zigTypeTag()) {
return switch (ty.zigTypeTag(mod)) {
.Bool => ArithmeticTypeInfo{
.bits = 1, // Doesn't matter for this class.
.is_vector = false,
@@ -355,7 +357,7 @@ pub const DeclGen = struct {
.class = .float,
},
.Int => blk: {
const int_info = ty.intInfo(target);
const int_info = ty.intInfo(mod);
// TODO: Maybe it's useful to also return this value.
const maybe_backing_bits = self.backingIntBits(int_info.bits);
break :blk ArithmeticTypeInfo{
@@ -533,21 +535,22 @@ pub const DeclGen = struct {
}
fn addInt(self: *@This(), ty: Type, val: Value) !void {
const target = self.dg.getTarget();
const int_info = ty.intInfo(target);
const mod = self.dg.module;
const int_info = ty.intInfo(mod);
const int_bits = switch (int_info.signedness) {
.signed => @bitCast(u64, val.toSignedInt(target)),
.unsigned => val.toUnsignedInt(target),
.signed => @bitCast(u64, val.toSignedInt(mod)),
.unsigned => val.toUnsignedInt(mod),
};
// TODO: Swap endianess if the compiler is big endian.
const len = ty.abiSize(target);
const len = ty.abiSize(mod);
try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]);
}
fn addFloat(self: *@This(), ty: Type, val: Value) !void {
const mod = self.dg.module;
const target = self.dg.getTarget();
const len = ty.abiSize(target);
const len = ty.abiSize(mod);
// TODO: Swap endianess if the compiler is big endian.
switch (ty.floatBits(target)) {
@@ -607,15 +610,15 @@ pub const DeclGen = struct {
}
fn lower(self: *@This(), ty: Type, val: Value) !void {
const target = self.dg.getTarget();
const dg = self.dg;
const mod = dg.module;
if (val.isUndef()) {
const size = ty.abiSize(target);
const size = ty.abiSize(mod);
return try self.addUndef(size);
}
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.Int => try self.addInt(ty, val),
.Float => try self.addFloat(ty, val),
.Bool => try self.addConstBool(val.toBool()),
@@ -644,7 +647,7 @@ pub const DeclGen = struct {
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
try self.addBytes(bytes);
if (ty.sentinel()) |sentinel| {
try self.addByte(@intCast(u8, sentinel.toUnsignedInt(target)));
try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod)));
}
},
.bytes => {
@@ -690,13 +693,13 @@ pub const DeclGen = struct {
const struct_begin = self.size;
const field_vals = val.castTag(.aggregate).?.data;
for (struct_ty.fields.values(), 0..) |field, i| {
if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
try self.lower(field.ty, field_vals[i]);
// Add padding if required.
// TODO: Add to type generation as well?
const unpadded_field_end = self.size - struct_begin;
const padded_field_end = ty.structFieldOffset(i + 1, target);
const padded_field_end = ty.structFieldOffset(i + 1, mod);
const padding = padded_field_end - unpadded_field_end;
try self.addUndef(padding);
}
@@ -705,13 +708,13 @@ pub const DeclGen = struct {
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&opt_buf);
const has_payload = !val.isNull();
const abi_size = ty.abiSize(target);
const has_payload = !val.isNull(mod);
const abi_size = ty.abiSize(mod);
if (!payload_ty.hasRuntimeBits()) {
if (!payload_ty.hasRuntimeBits(mod)) {
try self.addConstBool(has_payload);
return;
} else if (ty.optionalReprIsPayload()) {
} else if (ty.optionalReprIsPayload(mod)) {
// Optional representation is a nullable pointer or slice.
if (val.castTag(.opt_payload)) |payload| {
try self.lower(payload_ty, payload.data);
@@ -729,7 +732,7 @@ pub const DeclGen = struct {
// Subtract 1 for @sizeOf(bool).
// TODO: Make this not hardcoded.
const payload_size = payload_ty.abiSize(target);
const payload_size = payload_ty.abiSize(mod);
const padding = abi_size - payload_size - 1;
if (val.castTag(.opt_payload)) |payload| {
@@ -744,14 +747,13 @@ pub const DeclGen = struct {
var int_val_buffer: Value.Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &int_val_buffer);
var int_ty_buffer: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&int_ty_buffer);
const int_ty = ty.intTagType();
try self.lower(int_ty, int_val);
},
.Union => {
const tag_and_val = val.castTag(.@"union").?.data;
const layout = ty.unionGetLayout(target);
const layout = ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
return try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag);
@@ -772,9 +774,9 @@ pub const DeclGen = struct {
try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag);
}
const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: {
const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
try self.lower(active_field_ty, tag_and_val.val);
break :blk active_field_ty.abiSize(target);
break :blk active_field_ty.abiSize(mod);
} else 0;
const payload_padding_len = layout.payload_size - active_field_size;
@@ -808,9 +810,9 @@ pub const DeclGen = struct {
return try self.lower(Type.anyerror, error_val);
}
const payload_size = payload_ty.abiSize(target);
const error_size = Type.anyerror.abiAlignment(target);
const ty_size = ty.abiSize(target);
const payload_size = payload_ty.abiSize(mod);
const error_size = Type.anyerror.abiAlignment(mod);
const ty_size = ty.abiSize(mod);
const padding = ty_size - payload_size - error_size;
const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef);
@@ -886,7 +888,7 @@ pub const DeclGen = struct {
// .id_result = result_id,
// .storage_class = storage_class,
// });
// } else if (ty.abiSize(target) == 0) {
// } else if (ty.abiSize(mod) == 0) {
// // Special case: if the type has no size, then return an undefined pointer.
// return try section.emit(self.spv.gpa, .OpUndef, .{
// .id_result_type = self.typeId(ptr_ty_ref),
@@ -968,6 +970,7 @@ pub const DeclGen = struct {
/// is then loaded using OpLoad. Such values are loaded into the UniformConstant storage class by default.
/// This function should only be called during function code generation.
fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef {
const mod = self.module;
const target = self.getTarget();
const result_ty_ref = try self.resolveType(ty, repr);
@@ -977,12 +980,12 @@ pub const DeclGen = struct {
return self.spv.constUndef(result_ty_ref);
}
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.Int => {
if (ty.isSignedInt()) {
return try self.spv.constInt(result_ty_ref, val.toSignedInt(target));
if (ty.isSignedInt(mod)) {
return try self.spv.constInt(result_ty_ref, val.toSignedInt(mod));
} else {
return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(target));
return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod));
}
},
.Bool => switch (repr) {
@@ -1037,7 +1040,7 @@ pub const DeclGen = struct {
// The value cannot be generated directly, so generate it as an indirect constant,
// and then perform an OpLoad.
const result_id = self.spv.allocId();
const alignment = ty.abiAlignment(target);
const alignment = ty.abiAlignment(mod);
const spv_decl_index = try self.spv.allocDecl(.global);
try self.lowerIndirectConstant(
@@ -1114,8 +1117,8 @@ pub const DeclGen = struct {
/// NOTE: When the active field is set to something other than the most aligned field, the
/// resulting struct will be *underaligned*.
fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef {
const target = self.getTarget();
const layout = ty.unionGetLayout(target);
const mod = self.module;
const layout = ty.unionGetLayout(mod);
const union_ty = ty.cast(Type.Payload.Union).?.data;
if (union_ty.layout == .Packed) {
@@ -1143,11 +1146,11 @@ pub const DeclGen = struct {
const active_field = maybe_active_field orelse layout.most_aligned_field;
const active_field_ty = union_ty.fields.values()[active_field].ty;
const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: {
const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
const active_payload_ty_ref = try self.resolveType(active_field_ty, .indirect);
member_types.appendAssumeCapacity(active_payload_ty_ref);
member_names.appendAssumeCapacity(try self.spv.resolveString("payload"));
break :blk active_field_ty.abiSize(target);
break :blk active_field_ty.abiSize(mod);
} else 0;
const payload_padding_len = layout.payload_size - active_field_size;
@@ -1177,21 +1180,21 @@ pub const DeclGen = struct {
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef {
const mod = self.module;
log.debug("resolveType: ty = {}", .{ty.fmt(self.module)});
const target = self.getTarget();
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.Void, .NoReturn => return try self.spv.resolve(.void_type),
.Bool => switch (repr) {
.direct => return try self.spv.resolve(.bool_type),
.indirect => return try self.intType(.unsigned, 1),
},
.Int => {
const int_info = ty.intInfo(target);
const int_info = ty.intInfo(mod);
return try self.intType(int_info.signedness, int_info.bits);
},
.Enum => {
var buffer: Type.Payload.Bits = undefined;
const tag_ty = ty.intTagType(&buffer);
const tag_ty = ty.intTagType();
return self.resolveType(tag_ty, repr);
},
.Float => {
@@ -1290,7 +1293,7 @@ pub const DeclGen = struct {
var member_index: usize = 0;
for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
member_types[member_index] = try self.resolveType(field_ty, .indirect);
member_index += 1;
@@ -1315,7 +1318,7 @@ pub const DeclGen = struct {
var member_index: usize = 0;
for (struct_ty.fields.values(), 0..) |field, i| {
if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
member_types[member_index] = try self.resolveType(field.ty, .indirect);
member_names[member_index] = try self.spv.resolveString(struct_ty.fields.keys()[i]);
@@ -1334,7 +1337,7 @@ pub const DeclGen = struct {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// Just use a bool.
// Note: Always generate the bool with indirect format, to save on some sanity
// Perform the conversion to a direct bool when the field is extracted.
@@ -1342,7 +1345,7 @@ pub const DeclGen = struct {
}
const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
if (ty.optionalReprIsPayload()) {
if (ty.optionalReprIsPayload(mod)) {
// Optional is actually a pointer or a slice.
return payload_ty_ref;
}
@@ -1445,14 +1448,14 @@ pub const DeclGen = struct {
};
fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout {
const target = self.getTarget();
const mod = self.module;
const error_align = Type.anyerror.abiAlignment(target);
const payload_align = payload_ty.abiAlignment(target);
const error_align = Type.anyerror.abiAlignment(mod);
const payload_align = payload_ty.abiAlignment(mod);
const error_first = error_align > payload_align;
return .{
.payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(),
.payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod),
.error_first = error_first,
};
}
@@ -1529,14 +1532,15 @@ pub const DeclGen = struct {
}
fn genDecl(self: *DeclGen) !void {
const decl = self.module.declPtr(self.decl_index);
const mod = self.module;
const decl = mod.declPtr(self.decl_index);
const spv_decl_index = try self.resolveDecl(self.decl_index);
const decl_id = self.spv.declPtr(spv_decl_index).result_id;
log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name });
if (decl.val.castTag(.function)) |_| {
assert(decl.ty.zigTypeTag() == .Fn);
assert(decl.ty.zigTypeTag(mod) == .Fn);
const prototype_id = try self.resolveTypeId(decl.ty);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()),
@@ -1634,7 +1638,8 @@ pub const DeclGen = struct {
/// Convert representation from indirect (in memory) to direct (in 'register')
/// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct).
fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
return switch (ty.zigTypeTag()) {
const mod = self.module;
return switch (ty.zigTypeTag(mod)) {
.Bool => blk: {
const direct_bool_ty_ref = try self.resolveType(ty, .direct);
const indirect_bool_ty_ref = try self.resolveType(ty, .indirect);
@@ -1655,7 +1660,8 @@ pub const DeclGen = struct {
/// Convert representation from direct (in 'register) to direct (in memory)
/// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect).
fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
return switch (ty.zigTypeTag()) {
const mod = self.module;
return switch (ty.zigTypeTag(mod)) {
.Bool => blk: {
const indirect_bool_ty_ref = try self.resolveType(ty, .indirect);
break :blk self.boolToInt(indirect_bool_ty_ref, operand_id);
@@ -2056,6 +2062,7 @@ pub const DeclGen = struct {
}
fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.module;
if (self.liveness.isUnused(inst)) return null;
const ty = self.air.typeOfIndex(inst);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -2083,7 +2090,7 @@ pub const DeclGen = struct {
if (elem.isUndef()) {
self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF);
} else {
const int = elem.toSignedInt(self.getTarget());
const int = elem.toSignedInt(mod);
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
self.func.body.writeOperand(spec.LiteralInteger, unsigned);
}
@@ -2189,13 +2196,13 @@ pub const DeclGen = struct {
lhs_id: IdRef,
rhs_id: IdRef,
) !IdRef {
const mod = self.module;
var cmp_lhs_id = lhs_id;
var cmp_rhs_id = rhs_id;
const opcode: Opcode = opcode: {
var int_buffer: Type.Payload.Bits = undefined;
const op_ty = switch (ty.zigTypeTag()) {
const op_ty = switch (ty.zigTypeTag(mod)) {
.Int, .Bool, .Float => ty,
.Enum => ty.intTagType(&int_buffer),
.Enum => ty.intTagType(),
.ErrorSet => Type.u16,
.Pointer => blk: {
// Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are
@@ -2303,13 +2310,14 @@ pub const DeclGen = struct {
src_ty: Type,
src_id: IdRef,
) !IdRef {
const mod = self.module;
const dst_ty_ref = try self.resolveType(dst_ty, .direct);
const result_id = self.spv.allocId();
// TODO: Some more cases are missing here
// See fn bitCast in llvm.zig
if (src_ty.zigTypeTag() == .Int and dst_ty.isPtrAtRuntime()) {
if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) {
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
.id_result_type = self.typeId(dst_ty_ref),
.id_result = result_id,
@@ -2342,8 +2350,8 @@ pub const DeclGen = struct {
const dest_ty = self.air.typeOfIndex(inst);
const dest_ty_id = try self.resolveTypeId(dest_ty);
const target = self.getTarget();
const dest_info = dest_ty.intInfo(target);
const mod = self.module;
const dest_info = dest_ty.intInfo(mod);
// TODO: Masking?
@@ -2485,8 +2493,9 @@ pub const DeclGen = struct {
}
fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef {
const mod = self.module;
// Construct new pointer type for the resulting pointer
const elem_ty = ptr_ty.elemType2(); // use elemType() so that we get T for *[N]T.
const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
const elem_ty_ref = try self.resolveType(elem_ty, .direct);
const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace()));
if (ptr_ty.isSinglePointer()) {
@@ -2502,12 +2511,13 @@ pub const DeclGen = struct {
fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const mod = self.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType();
// TODO: Make this return a null ptr or something
if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return null;
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
const ptr_id = try self.resolve(bin_op.lhs);
const index_id = try self.resolve(bin_op.rhs);
@@ -2536,8 +2546,8 @@ pub const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const un_ty = self.air.typeOf(ty_op.operand);
const target = self.module.getTarget();
const layout = un_ty.unionGetLayout(target);
const mod = self.module;
const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return null;
const union_handle = try self.resolve(ty_op.operand);
@@ -2551,6 +2561,7 @@ pub const DeclGen = struct {
fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const mod = self.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -2559,9 +2570,9 @@ pub const DeclGen = struct {
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) return null;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
assert(struct_ty.zigTypeTag() == .Struct); // Cannot do unions yet.
assert(struct_ty.zigTypeTag(mod) == .Struct); // Cannot do unions yet.
return try self.extractField(field_ty, object_id, field_index);
}
@@ -2573,8 +2584,9 @@ pub const DeclGen = struct {
object_ptr: IdRef,
field_index: u32,
) !?IdRef {
const mod = self.module;
const object_ty = object_ptr_ty.childType();
switch (object_ty.zigTypeTag()) {
switch (object_ty.zigTypeTag(mod)) {
.Struct => switch (object_ty.containerLayout()) {
.Packed => unreachable, // TODO
else => {
@@ -2667,6 +2679,7 @@ pub const DeclGen = struct {
// the current block by first generating the code of the block, then a label, and then generate the rest of the current
// ir.Block in a different SPIR-V block.
const mod = self.module;
const label_id = self.spv.allocId();
// 4 chosen as arbitrary initial capacity.
@@ -2690,7 +2703,7 @@ pub const DeclGen = struct {
try self.beginSpvBlock(label_id);
// If this block didn't produce a value, simply return here.
if (!ty.hasRuntimeBitsIgnoreComptime())
if (!ty.hasRuntimeBitsIgnoreComptime(mod))
return null;
// Combine the result from the blocks using the Phi instruction.
@@ -2716,7 +2729,8 @@ pub const DeclGen = struct {
const block = self.blocks.get(br.block_inst).?;
const operand_ty = self.air.typeOf(br.operand);
if (operand_ty.hasRuntimeBits()) {
const mod = self.module;
if (operand_ty.hasRuntimeBits(mod)) {
const operand_id = try self.resolve(br.operand);
// current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
try block.incoming_blocks.append(self.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
@@ -2771,13 +2785,14 @@ pub const DeclGen = struct {
}
fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void {
const mod = self.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const ptr = try self.resolve(bin_op.lhs);
const value = try self.resolve(bin_op.rhs);
const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false;
if (val_is_undef) {
const undef = try self.spv.constUndef(ptr_ty_ref);
try self.store(ptr_ty, ptr, undef);
@@ -2805,7 +2820,8 @@ pub const DeclGen = struct {
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
const operand = self.air.instructions.items(.data)[inst].un_op;
const operand_ty = self.air.typeOf(operand);
if (operand_ty.hasRuntimeBits()) {
const mod = self.module;
if (operand_ty.hasRuntimeBits(mod)) {
const operand_id = try self.resolve(operand);
try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id });
} else {
@@ -2814,11 +2830,12 @@ pub const DeclGen = struct {
}
fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void {
const mod = self.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr_ty = self.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try self.func.body.emit(self.spv.gpa, .OpReturn, {});
return;
}
@@ -2946,6 +2963,7 @@ pub const DeclGen = struct {
fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_null, is_non_null }) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const mod = self.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand_id = try self.resolve(un_op);
const optional_ty = self.air.typeOf(un_op);
@@ -2955,7 +2973,7 @@ pub const DeclGen = struct {
const bool_ty_ref = try self.resolveType(Type.bool, .direct);
if (optional_ty.optionalReprIsPayload()) {
if (optional_ty.optionalReprIsPayload(mod)) {
// Pointer payload represents nullability: pointer or slice.
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
@@ -2985,7 +3003,7 @@ pub const DeclGen = struct {
return result_id;
}
const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime())
const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime(mod))
try self.extractField(Type.bool, operand_id, 1)
else
// Optional representation is bool indicating whether the optional is set
@@ -3009,14 +3027,15 @@ pub const DeclGen = struct {
fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const mod = self.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand);
const payload_ty = self.air.typeOfIndex(inst);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null;
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
if (optional_ty.optionalReprIsPayload()) {
if (optional_ty.optionalReprIsPayload(mod)) {
return operand_id;
}
@@ -3026,16 +3045,17 @@ pub const DeclGen = struct {
fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const mod = self.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return try self.constBool(true, .direct);
}
const operand_id = try self.resolve(ty_op.operand);
const optional_ty = self.air.typeOfIndex(inst);
if (optional_ty.optionalReprIsPayload()) {
if (optional_ty.optionalReprIsPayload(mod)) {
return operand_id;
}
@@ -3045,30 +3065,29 @@ pub const DeclGen = struct {
}
fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void {
const target = self.getTarget();
const mod = self.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolve(pl_op.operand);
const cond_ty = self.air.typeOf(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
const cond_words: u32 = switch (cond_ty.zigTypeTag()) {
const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) {
.Int => blk: {
const bits = cond_ty.intInfo(target).bits;
const bits = cond_ty.intInfo(mod).bits;
const backing_bits = self.backingIntBits(bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
},
.Enum => blk: {
var buffer: Type.Payload.Bits = undefined;
const int_ty = cond_ty.intTagType(&buffer);
const int_info = int_ty.intInfo(target);
const int_ty = cond_ty.intTagType();
const int_info = int_ty.intInfo(mod);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
},
else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag())}), // TODO: Figure out which types apply here, and work around them as we can only do integers.
else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), // TODO: Figure out which types apply here, and work around them as we can only do integers.
};
const num_cases = switch_br.data.cases_len;
@@ -3112,15 +3131,15 @@ pub const DeclGen = struct {
const label = IdRef{ .id = first_case_label.id + case_i };
for (items) |item| {
const value = self.air.value(item) orelse {
const value = self.air.value(item, mod) orelse {
return self.todo("switch on runtime value???", .{});
};
const int_val = switch (cond_ty.zigTypeTag()) {
.Int => if (cond_ty.isSignedInt()) @bitCast(u64, value.toSignedInt(target)) else value.toUnsignedInt(target),
const int_val = switch (cond_ty.zigTypeTag(mod)) {
.Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod),
.Enum => blk: {
var int_buffer: Value.Payload.U64 = undefined;
// TODO: figure out of cond_ty is correct (something with enum literals)
break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(target); // TODO: composite integer constants
break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(mod); // TODO: composite integer constants
},
else => unreachable,
};
@@ -3294,11 +3313,12 @@ pub const DeclGen = struct {
fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef {
_ = modifier;
const mod = self.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const callee_ty = self.air.typeOf(pl_op.operand);
const zig_fn_ty = switch (callee_ty.zigTypeTag()) {
const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
.Pointer => return self.fail("cannot call function pointers", .{}),
else => unreachable,
@@ -3320,7 +3340,7 @@ pub const DeclGen = struct {
// temporary params buffer.
const arg_id = try self.resolve(arg);
const arg_ty = self.air.typeOf(arg);
if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue;
if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
params[n_params] = arg_id;
n_params += 1;
@@ -3337,7 +3357,7 @@ pub const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
}
if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) {
if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) {
return null;
}