@@ -22,8 +22,10 @@ const IdResultType = spec.IdResultType;
|
||||
const StorageClass = spec.StorageClass;
|
||||
|
||||
const SpvModule = @import("spirv/Module.zig");
|
||||
const CacheRef = SpvModule.CacheRef;
|
||||
const CacheString = SpvModule.CacheString;
|
||||
|
||||
const SpvSection = @import("spirv/Section.zig");
|
||||
const SpvType = @import("spirv/type.zig").Type;
|
||||
const SpvAssembler = @import("spirv/Assembler.zig");
|
||||
|
||||
const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef);
|
||||
@@ -377,74 +379,23 @@ pub const DeclGen = struct {
|
||||
};
|
||||
}
|
||||
|
||||
fn genConstInt(self: *DeclGen, ty_ref: SpvType.Ref, result_id: IdRef, value: anytype) !void {
|
||||
const ty = self.spv.typeRefType(ty_ref);
|
||||
const ty_id = self.typeId(ty_ref);
|
||||
|
||||
const Lit = spec.LiteralContextDependentNumber;
|
||||
const literal = switch (ty.intSignedness()) {
|
||||
.signed => switch (ty.intFloatBits()) {
|
||||
1...32 => Lit{ .int32 = @intCast(i32, value) },
|
||||
33...64 => Lit{ .int64 = @intCast(i64, value) },
|
||||
else => unreachable, // TODO: composite integer literals
|
||||
},
|
||||
.unsigned => switch (ty.intFloatBits()) {
|
||||
1...32 => Lit{ .uint32 = @intCast(u32, value) },
|
||||
33...64 => Lit{ .uint64 = @intCast(u64, value) },
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
|
||||
try self.spv.emitConstant(ty_id, result_id, literal);
|
||||
}
|
||||
|
||||
fn constInt(self: *DeclGen, ty_ref: SpvType.Ref, value: anytype) !IdRef {
|
||||
const result_id = self.spv.allocId();
|
||||
try self.genConstInt(ty_ref, result_id, value);
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn constUndef(self: *DeclGen, ty_ref: SpvType.Ref) !IdRef {
|
||||
const result_id = self.spv.allocId();
|
||||
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpUndef, .{
|
||||
.id_result_type = self.typeId(ty_ref),
|
||||
.id_result = result_id,
|
||||
});
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn constNull(self: *DeclGen, ty_ref: SpvType.Ref) !IdRef {
|
||||
const result_id = self.spv.allocId();
|
||||
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpConstantNull, .{
|
||||
.id_result_type = self.typeId(ty_ref),
|
||||
.id_result = result_id,
|
||||
});
|
||||
return result_id;
|
||||
}
|
||||
|
||||
/// Emits a bool constant in a particular representation.
|
||||
fn constBool(self: *DeclGen, value: bool, repr: Repr) !IdRef {
|
||||
switch (repr) {
|
||||
.indirect => {
|
||||
const int_ty_ref = try self.intType(.unsigned, 1);
|
||||
return self.constInt(int_ty_ref, @boolToInt(value));
|
||||
return self.spv.constInt(int_ty_ref, @boolToInt(value));
|
||||
},
|
||||
.direct => {
|
||||
const bool_ty_ref = try self.resolveType(Type.bool, .direct);
|
||||
const result_id = self.spv.allocId();
|
||||
const operands = .{ .id_result_type = self.typeId(bool_ty_ref), .id_result = result_id };
|
||||
if (value) {
|
||||
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpConstantTrue, operands);
|
||||
} else {
|
||||
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpConstantFalse, operands);
|
||||
}
|
||||
return result_id;
|
||||
return self.spv.constBool(bool_ty_ref, value);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct a struct at runtime.
|
||||
/// result_ty_ref must be a struct type.
|
||||
fn constructStruct(self: *DeclGen, result_ty_ref: SpvType.Ref, constituents: []const IdRef) !IdRef {
|
||||
fn constructStruct(self: *DeclGen, result_ty_ref: CacheRef, constituents: []const IdRef) !IdRef {
|
||||
// The Khronos LLVM-SPIRV translator crashes because it cannot construct structs which'
|
||||
// operands are not constant.
|
||||
// See https://github.com/KhronosGroup/SPIRV-LLVM-Translator/issues/1349
|
||||
@@ -453,11 +404,13 @@ pub const DeclGen = struct {
|
||||
const ptr_composite_id = try self.alloc(result_ty_ref, null);
|
||||
// Note: using 32-bit ints here because usize crashes the translator as well
|
||||
const index_ty_ref = try self.intType(.unsigned, 32);
|
||||
const spv_composite_ty = self.spv.typeRefType(result_ty_ref);
|
||||
const members = spv_composite_ty.payload(.@"struct").members;
|
||||
for (constituents, members, 0..) |constitent_id, member, index| {
|
||||
const index_id = try self.constInt(index_ty_ref, index);
|
||||
const ptr_member_ty_ref = try self.spv.ptrType(member.ty, .Generic, 0);
|
||||
|
||||
const spv_composite_ty = self.spv.cache.lookup(result_ty_ref).struct_type;
|
||||
const member_types = spv_composite_ty.member_types;
|
||||
|
||||
for (constituents, member_types, 0..) |constitent_id, member_ty_ref, index| {
|
||||
const index_id = try self.spv.constInt(index_ty_ref, index);
|
||||
const ptr_member_ty_ref = try self.spv.ptrType(member_ty_ref, .Generic);
|
||||
const ptr_id = try self.accessChain(ptr_member_ty_ref, ptr_composite_id, &.{index_id});
|
||||
try self.func.body.emit(self.spv.gpa, .OpStore, .{
|
||||
.pointer = ptr_id,
|
||||
@@ -478,11 +431,9 @@ pub const DeclGen = struct {
|
||||
|
||||
dg: *DeclGen,
|
||||
/// Cached reference of the u32 type.
|
||||
u32_ty_ref: SpvType.Ref,
|
||||
/// Cached type id of the u32 type.
|
||||
u32_ty_id: IdRef,
|
||||
u32_ty_ref: CacheRef,
|
||||
/// The members of the resulting structure type
|
||||
members: std.ArrayList(SpvType.Payload.Struct.Member),
|
||||
members: std.ArrayList(CacheRef),
|
||||
/// The initializers of each of the members.
|
||||
initializers: std.ArrayList(IdRef),
|
||||
/// The current size of the structure. Includes
|
||||
@@ -513,10 +464,8 @@ pub const DeclGen = struct {
|
||||
}
|
||||
|
||||
const word = @bitCast(Word, self.partial_word.buffer);
|
||||
const result_id = self.dg.spv.allocId();
|
||||
// TODO: Integrate with caching mechanism
|
||||
try self.dg.spv.emitConstant(self.u32_ty_id, result_id, .{ .uint32 = word });
|
||||
try self.members.append(.{ .ty = self.u32_ty_ref });
|
||||
const result_id = try self.dg.spv.constInt(self.u32_ty_ref, word);
|
||||
try self.members.append(self.u32_ty_ref);
|
||||
try self.initializers.append(result_id);
|
||||
|
||||
self.partial_word.len = 0;
|
||||
@@ -552,7 +501,7 @@ pub const DeclGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn addPtr(self: *@This(), ptr_ty_ref: SpvType.Ref, ptr_id: IdRef) !void {
|
||||
fn addPtr(self: *@This(), ptr_ty_ref: CacheRef, ptr_id: IdRef) !void {
|
||||
// TODO: Double check pointer sizes here.
|
||||
// shared pointers might be u32...
|
||||
const target = self.dg.getTarget();
|
||||
@@ -560,17 +509,13 @@ pub const DeclGen = struct {
|
||||
if (self.size % width != 0) {
|
||||
return self.dg.todo("misaligned pointer constants", .{});
|
||||
}
|
||||
try self.members.append(.{ .ty = ptr_ty_ref });
|
||||
try self.members.append(ptr_ty_ref);
|
||||
try self.initializers.append(ptr_id);
|
||||
self.size += width;
|
||||
}
|
||||
|
||||
fn addNullPtr(self: *@This(), ptr_ty_ref: SpvType.Ref) !void {
|
||||
const result_id = self.dg.spv.allocId();
|
||||
try self.dg.spv.sections.types_globals_constants.emit(self.dg.spv.gpa, .OpConstantNull, .{
|
||||
.id_result_type = self.dg.typeId(ptr_ty_ref),
|
||||
.id_result = result_id,
|
||||
});
|
||||
fn addNullPtr(self: *@This(), ptr_ty_ref: CacheRef) !void {
|
||||
const result_id = try self.dg.spv.constNull(ptr_ty_ref);
|
||||
try self.addPtr(ptr_ty_ref, result_id);
|
||||
}
|
||||
|
||||
@@ -928,7 +873,7 @@ pub const DeclGen = struct {
|
||||
const section = &self.spv.globals.section;
|
||||
|
||||
const ty_ref = try self.resolveType(ty, .indirect);
|
||||
const ptr_ty_ref = try self.spv.ptrType(ty_ref, storage_class, 0);
|
||||
const ptr_ty_ref = try self.spv.ptrType(ty_ref, storage_class);
|
||||
|
||||
// const target = self.getTarget();
|
||||
|
||||
@@ -956,8 +901,7 @@ pub const DeclGen = struct {
|
||||
var icl = IndirectConstantLowering{
|
||||
.dg = self,
|
||||
.u32_ty_ref = u32_ty_ref,
|
||||
.u32_ty_id = self.typeId(u32_ty_ref),
|
||||
.members = std.ArrayList(SpvType.Payload.Struct.Member).init(self.gpa),
|
||||
.members = std.ArrayList(CacheRef).init(self.gpa),
|
||||
.initializers = std.ArrayList(IdRef).init(self.gpa),
|
||||
.decl_deps = std.AutoArrayHashMap(SpvModule.Decl.Index, void).init(self.gpa),
|
||||
};
|
||||
@@ -969,8 +913,10 @@ pub const DeclGen = struct {
|
||||
try icl.lower(ty, val);
|
||||
try icl.flush();
|
||||
|
||||
const constant_struct_ty_ref = try self.spv.simpleStructType(icl.members.items);
|
||||
const ptr_constant_struct_ty_ref = try self.spv.ptrType(constant_struct_ty_ref, storage_class, 0);
|
||||
const constant_struct_ty_ref = try self.spv.resolve(.{ .struct_type = .{
|
||||
.member_types = icl.members.items,
|
||||
} });
|
||||
const ptr_constant_struct_ty_ref = try self.spv.ptrType(constant_struct_ty_ref, storage_class);
|
||||
|
||||
const constant_struct_id = self.spv.allocId();
|
||||
try section.emit(self.spv.gpa, .OpSpecConstantComposite, .{
|
||||
@@ -1004,7 +950,7 @@ pub const DeclGen = struct {
|
||||
});
|
||||
|
||||
if (cast_to_generic) {
|
||||
const generic_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Generic, 0);
|
||||
const generic_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Generic);
|
||||
try section.emitSpecConstantOp(self.spv.gpa, .OpPtrCastToGeneric, .{
|
||||
.id_result_type = self.typeId(generic_ptr_ty_ref),
|
||||
.id_result = result_id,
|
||||
@@ -1023,52 +969,32 @@ pub const DeclGen = struct {
|
||||
/// This function should only be called during function code generation.
|
||||
fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef {
|
||||
const target = self.getTarget();
|
||||
const section = &self.spv.sections.types_globals_constants;
|
||||
const result_ty_ref = try self.resolveType(ty, repr);
|
||||
const result_ty_id = self.typeId(result_ty_ref);
|
||||
|
||||
log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) });
|
||||
|
||||
if (val.isUndef()) {
|
||||
const result_id = self.spv.allocId();
|
||||
try section.emit(self.spv.gpa, .OpUndef, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_id,
|
||||
});
|
||||
return result_id;
|
||||
return self.spv.constUndef(result_ty_ref);
|
||||
}
|
||||
|
||||
switch (ty.zigTypeTag()) {
|
||||
.Int => {
|
||||
if (ty.isSignedInt()) {
|
||||
return try self.constInt(result_ty_ref, val.toSignedInt(target));
|
||||
return try self.spv.constInt(result_ty_ref, val.toSignedInt(target));
|
||||
} else {
|
||||
return try self.constInt(result_ty_ref, val.toUnsignedInt(target));
|
||||
return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(target));
|
||||
}
|
||||
},
|
||||
.Bool => switch (repr) {
|
||||
.direct => {
|
||||
const result_id = self.spv.allocId();
|
||||
const operands = .{ .id_result_type = result_ty_id, .id_result = result_id };
|
||||
if (val.toBool()) {
|
||||
try section.emit(self.spv.gpa, .OpConstantTrue, operands);
|
||||
} else {
|
||||
try section.emit(self.spv.gpa, .OpConstantFalse, operands);
|
||||
}
|
||||
return result_id;
|
||||
},
|
||||
.indirect => return try self.constInt(result_ty_ref, @boolToInt(val.toBool())),
|
||||
.direct => return try self.spv.constBool(result_ty_ref, val.toBool()),
|
||||
.indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool())),
|
||||
},
|
||||
.Float => {
|
||||
const result_id = self.spv.allocId();
|
||||
switch (ty.floatBits(target)) {
|
||||
16 => try self.spv.emitConstant(result_ty_id, result_id, .{ .float32 = val.toFloat(f16) }),
|
||||
32 => try self.spv.emitConstant(result_ty_id, result_id, .{ .float32 = val.toFloat(f32) }),
|
||||
64 => try self.spv.emitConstant(result_ty_id, result_id, .{ .float64 = val.toFloat(f64) }),
|
||||
80, 128 => unreachable, // TODO
|
||||
else => unreachable,
|
||||
}
|
||||
return result_id;
|
||||
.Float => return switch (ty.floatBits(target)) {
|
||||
16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16) } } }),
|
||||
32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32) } } }),
|
||||
64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64) } } }),
|
||||
80, 128 => unreachable, // TODO
|
||||
else => unreachable,
|
||||
},
|
||||
.ErrorSet => {
|
||||
const value = switch (val.tag()) {
|
||||
@@ -1081,7 +1007,7 @@ pub const DeclGen = struct {
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
return try self.constInt(result_ty_ref, value);
|
||||
return try self.spv.constInt(result_ty_ref, value);
|
||||
},
|
||||
.ErrorUnion => {
|
||||
const payload_ty = ty.errorUnionPayload();
|
||||
@@ -1126,7 +1052,7 @@ pub const DeclGen = struct {
|
||||
try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
|
||||
|
||||
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result_type = self.typeId(result_ty_ref),
|
||||
.id_result = result_id,
|
||||
.pointer = self.spv.declPtr(spv_decl_index).result_id,
|
||||
});
|
||||
@@ -1140,26 +1066,28 @@ pub const DeclGen = struct {
|
||||
/// Turn a Zig type into a SPIR-V Type, and return its type result-id.
|
||||
fn resolveTypeId(self: *DeclGen, ty: Type) !IdResultType {
|
||||
const type_ref = try self.resolveType(ty, .direct);
|
||||
return self.typeId(type_ref);
|
||||
return self.spv.resultId(type_ref);
|
||||
}
|
||||
|
||||
fn typeId(self: *DeclGen, ty_ref: SpvType.Ref) IdRef {
|
||||
return self.spv.typeId(ty_ref);
|
||||
fn typeId(self: *DeclGen, ty_ref: CacheRef) IdRef {
|
||||
return self.spv.resultId(ty_ref);
|
||||
}
|
||||
|
||||
/// Create an integer type suitable for storing at least 'bits' bits.
|
||||
fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !SpvType.Ref {
|
||||
/// The integer type that is returned by this function is the type that is used to perform
|
||||
/// actual operations (as well as store) a Zig type of a particular number of bits. To create
|
||||
/// a type with an exact size, use SpvModule.intType.
|
||||
fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !CacheRef {
|
||||
const backing_bits = self.backingIntBits(bits) orelse {
|
||||
// TODO: Integers too big for any native type are represented as "composite integers":
|
||||
// An array of largestSupportedIntBits.
|
||||
return self.todo("Implement {s} composite int type of {} bits", .{ @tagName(signedness), bits });
|
||||
};
|
||||
|
||||
return try self.spv.resolveType(try SpvType.int(self.spv.arena, signedness, backing_bits));
|
||||
return self.spv.intType(signedness, backing_bits);
|
||||
}
|
||||
|
||||
/// Create an integer type that represents 'usize'.
|
||||
fn sizeType(self: *DeclGen) !SpvType.Ref {
|
||||
fn sizeType(self: *DeclGen) !CacheRef {
|
||||
return try self.intType(.unsigned, self.getTarget().ptrBitWidth());
|
||||
}
|
||||
|
||||
@@ -1185,7 +1113,7 @@ pub const DeclGen = struct {
|
||||
/// If any of the fields' size is 0, it will be omitted.
|
||||
/// NOTE: When the active field is set to something other than the most aligned field, the
|
||||
/// resulting struct will be *underaligned*.
|
||||
fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !SpvType.Ref {
|
||||
fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef {
|
||||
const target = self.getTarget();
|
||||
const layout = ty.unionGetLayout(target);
|
||||
const union_ty = ty.cast(Type.Payload.Union).?.data;
|
||||
@@ -1199,7 +1127,8 @@ pub const DeclGen = struct {
|
||||
return try self.resolveType(union_ty.tag_ty, .indirect);
|
||||
}
|
||||
|
||||
var members = std.BoundedArray(SpvType.Payload.Struct.Member, 4){};
|
||||
var member_types = std.BoundedArray(CacheRef, 4){};
|
||||
var member_names = std.BoundedArray(CacheString, 4){};
|
||||
|
||||
const has_tag = layout.tag_size != 0;
|
||||
const tag_first = layout.tag_align >= layout.payload_align;
|
||||
@@ -1207,7 +1136,8 @@ pub const DeclGen = struct {
|
||||
|
||||
if (has_tag and tag_first) {
|
||||
const tag_ty_ref = try self.resolveType(union_ty.tag_ty, .indirect);
|
||||
members.appendAssumeCapacity(.{ .name = "tag", .ty = tag_ty_ref });
|
||||
member_types.appendAssumeCapacity(tag_ty_ref);
|
||||
member_names.appendAssumeCapacity(try self.spv.resolveString("tag"));
|
||||
}
|
||||
|
||||
const active_field = maybe_active_field orelse layout.most_aligned_field;
|
||||
@@ -1215,40 +1145,44 @@ pub const DeclGen = struct {
|
||||
|
||||
const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: {
|
||||
const active_payload_ty_ref = try self.resolveType(active_field_ty, .indirect);
|
||||
members.appendAssumeCapacity(.{ .name = "payload", .ty = active_payload_ty_ref });
|
||||
member_types.appendAssumeCapacity(active_payload_ty_ref);
|
||||
member_names.appendAssumeCapacity(try self.spv.resolveString("payload"));
|
||||
break :blk active_field_ty.abiSize(target);
|
||||
} else 0;
|
||||
|
||||
const payload_padding_len = layout.payload_size - active_field_size;
|
||||
if (payload_padding_len != 0) {
|
||||
const payload_padding_ty_ref = try self.spv.arrayType(@intCast(u32, payload_padding_len), u8_ty_ref);
|
||||
members.appendAssumeCapacity(.{ .name = "padding_payload", .ty = payload_padding_ty_ref });
|
||||
member_types.appendAssumeCapacity(payload_padding_ty_ref);
|
||||
member_names.appendAssumeCapacity(try self.spv.resolveString("payload_padding"));
|
||||
}
|
||||
|
||||
if (has_tag and !tag_first) {
|
||||
const tag_ty_ref = try self.resolveType(union_ty.tag_ty, .indirect);
|
||||
members.appendAssumeCapacity(.{ .name = "tag", .ty = tag_ty_ref });
|
||||
member_types.appendAssumeCapacity(tag_ty_ref);
|
||||
member_names.appendAssumeCapacity(try self.spv.resolveString("tag"));
|
||||
}
|
||||
|
||||
if (layout.padding != 0) {
|
||||
const padding_ty_ref = try self.spv.arrayType(layout.padding, u8_ty_ref);
|
||||
members.appendAssumeCapacity(.{ .name = "padding", .ty = padding_ty_ref });
|
||||
member_types.appendAssumeCapacity(padding_ty_ref);
|
||||
member_names.appendAssumeCapacity(try self.spv.resolveString("padding"));
|
||||
}
|
||||
|
||||
return try self.spv.simpleStructType(members.slice());
|
||||
return try self.spv.resolve(.{ .struct_type = .{
|
||||
.member_types = member_types.slice(),
|
||||
.member_names = member_names.slice(),
|
||||
} });
|
||||
}
|
||||
|
||||
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
|
||||
fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!SpvType.Ref {
|
||||
fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef {
|
||||
log.debug("resolveType: ty = {}", .{ty.fmt(self.module)});
|
||||
const target = self.getTarget();
|
||||
switch (ty.zigTypeTag()) {
|
||||
.Void, .NoReturn => return try self.spv.resolveType(SpvType.initTag(.void)),
|
||||
.Void, .NoReturn => return try self.spv.resolve(.void_type),
|
||||
.Bool => switch (repr) {
|
||||
.direct => return try self.spv.resolveType(SpvType.initTag(.bool)),
|
||||
// SPIR-V booleans are opaque, which is fine for operations, but they cant be stored.
|
||||
// This function returns the *stored* type, for values directly we convert this into a bool when
|
||||
// it is loaded, and convert it back to this type when stored.
|
||||
.direct => return try self.spv.resolve(.bool_type),
|
||||
.indirect => return try self.intType(.unsigned, 1),
|
||||
},
|
||||
.Int => {
|
||||
@@ -1276,15 +1210,15 @@ pub const DeclGen = struct {
|
||||
return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
|
||||
}
|
||||
|
||||
return try self.spv.resolveType(SpvType.float(bits));
|
||||
return try self.spv.resolve(.{ .float_type = .{ .bits = bits } });
|
||||
},
|
||||
.Array => {
|
||||
const elem_ty = ty.childType();
|
||||
const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
|
||||
const elem_ty_ref = try self.resolveType(elem_ty, .direct);
|
||||
const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel()) orelse {
|
||||
return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel()});
|
||||
};
|
||||
return try self.spv.arrayType(total_len, elem_ty_ref);
|
||||
return self.spv.arrayType(total_len, elem_ty_ref);
|
||||
},
|
||||
.Fn => switch (repr) {
|
||||
.direct => {
|
||||
@@ -1292,18 +1226,17 @@ pub const DeclGen = struct {
|
||||
if (ty.fnIsVarArgs())
|
||||
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
|
||||
|
||||
// TODO: Parameter passing convention etc.
|
||||
|
||||
const param_types = try self.spv.arena.alloc(SpvType.Ref, ty.fnParamLen());
|
||||
for (param_types, 0..) |*param, i| {
|
||||
param.* = try self.resolveType(ty.fnParamType(i), .direct);
|
||||
const param_ty_refs = try self.gpa.alloc(CacheRef, ty.fnParamLen());
|
||||
defer self.gpa.free(param_ty_refs);
|
||||
for (param_ty_refs, 0..) |*param_type, i| {
|
||||
param_type.* = try self.resolveType(ty.fnParamType(i), .direct);
|
||||
}
|
||||
const return_ty_ref = try self.resolveType(ty.fnReturnType(), .direct);
|
||||
|
||||
const return_type = try self.resolveType(ty.fnReturnType(), .direct);
|
||||
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Function);
|
||||
payload.* = .{ .return_type = return_type, .parameters = param_types };
|
||||
return try self.spv.resolveType(SpvType.initPayload(&payload.base));
|
||||
return try self.spv.resolve(.{ .function_type = .{
|
||||
.return_type = return_ty_ref,
|
||||
.parameters = param_ty_refs,
|
||||
} });
|
||||
},
|
||||
.indirect => {
|
||||
// TODO: Represent function pointers properly.
|
||||
@@ -1316,16 +1249,22 @@ pub const DeclGen = struct {
|
||||
|
||||
const storage_class = spvStorageClass(ptr_info.@"addrspace");
|
||||
const child_ty_ref = try self.resolveType(ptr_info.pointee_type, .indirect);
|
||||
const ptr_ty_ref = try self.spv.ptrType(child_ty_ref, storage_class, 0);
|
||||
|
||||
const ptr_ty_ref = try self.spv.resolve(.{ .ptr_type = .{
|
||||
.storage_class = storage_class,
|
||||
.child_type = child_ty_ref,
|
||||
} });
|
||||
if (ptr_info.size != .Slice) {
|
||||
return ptr_ty_ref;
|
||||
}
|
||||
|
||||
return try self.spv.simpleStructType(&.{
|
||||
.{ .ty = ptr_ty_ref, .name = "ptr" },
|
||||
.{ .ty = try self.sizeType(), .name = "len" },
|
||||
});
|
||||
const size_ty_ref = try self.sizeType();
|
||||
return self.spv.resolve(.{ .struct_type = .{
|
||||
.member_types = &.{ ptr_ty_ref, size_ty_ref },
|
||||
.member_names = &.{
|
||||
try self.spv.resolveString("ptr"),
|
||||
try self.spv.resolveString("len"),
|
||||
},
|
||||
} });
|
||||
},
|
||||
.Vector => {
|
||||
// Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
|
||||
@@ -1337,60 +1276,60 @@ pub const DeclGen = struct {
|
||||
|
||||
// TODO: Properly verify sizes and child type.
|
||||
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Vector);
|
||||
payload.* = .{
|
||||
return try self.spv.resolve(.{ .vector_type = .{
|
||||
.component_type = try self.resolveType(ty.elemType(), repr),
|
||||
.component_count = @intCast(u32, ty.vectorLen()),
|
||||
};
|
||||
return try self.spv.resolveType(SpvType.initPayload(&payload.base));
|
||||
} });
|
||||
},
|
||||
.Struct => {
|
||||
if (ty.isSimpleTupleOrAnonStruct()) {
|
||||
const tuple = ty.tupleFields();
|
||||
const members = try self.spv.arena.alloc(SpvType.Payload.Struct.Member, tuple.types.len);
|
||||
var member_index: u32 = 0;
|
||||
const member_types = try self.gpa.alloc(CacheRef, tuple.types.len);
|
||||
defer self.gpa.free(member_types);
|
||||
|
||||
var member_index: usize = 0;
|
||||
for (tuple.types, 0..) |field_ty, i| {
|
||||
const field_val = tuple.values[i];
|
||||
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
|
||||
members[member_index] = .{
|
||||
.ty = try self.resolveType(field_ty, .indirect),
|
||||
};
|
||||
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
|
||||
|
||||
member_types[member_index] = try self.resolveType(field_ty, .indirect);
|
||||
member_index += 1;
|
||||
}
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Struct);
|
||||
payload.* = .{
|
||||
.members = members[0..member_index],
|
||||
};
|
||||
return try self.spv.resolveType(SpvType.initPayload(&payload.base));
|
||||
|
||||
return try self.spv.resolve(.{ .struct_type = .{
|
||||
.member_types = member_types[0..member_index],
|
||||
} });
|
||||
}
|
||||
|
||||
const struct_ty = ty.castTag(.@"struct").?.data;
|
||||
|
||||
if (struct_ty.layout == .Packed) {
|
||||
return try self.resolveType(struct_ty.backing_int_ty, .indirect);
|
||||
return try self.resolveType(struct_ty.backing_int_ty, .direct);
|
||||
}
|
||||
|
||||
const members = try self.spv.arena.alloc(SpvType.Payload.Struct.Member, struct_ty.fields.count());
|
||||
const member_types = try self.gpa.alloc(CacheRef, struct_ty.fields.count());
|
||||
defer self.gpa.free(member_types);
|
||||
|
||||
const member_names = try self.gpa.alloc(CacheString, struct_ty.fields.count());
|
||||
defer self.gpa.free(member_names);
|
||||
|
||||
var member_index: usize = 0;
|
||||
for (struct_ty.fields.values(), 0..) |field, i| {
|
||||
if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
|
||||
|
||||
members[member_index] = .{
|
||||
.ty = try self.resolveType(field.ty, .indirect),
|
||||
.name = struct_ty.fields.keys()[i],
|
||||
};
|
||||
member_types[member_index] = try self.resolveType(field.ty, .indirect);
|
||||
member_names[member_index] = try self.spv.resolveString(struct_ty.fields.keys()[i]);
|
||||
member_index += 1;
|
||||
}
|
||||
|
||||
const name = try struct_ty.getFullyQualifiedName(self.module);
|
||||
defer self.module.gpa.free(name);
|
||||
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Struct);
|
||||
payload.* = .{
|
||||
.members = members[0..member_index],
|
||||
.name = try self.spv.arena.dupe(u8, name),
|
||||
};
|
||||
return try self.spv.resolveType(SpvType.initPayload(&payload.base));
|
||||
return try self.spv.resolve(.{ .struct_type = .{
|
||||
.name = try self.spv.resolveString(name),
|
||||
.member_types = member_types[0..member_index],
|
||||
.member_names = member_names[0..member_index],
|
||||
} });
|
||||
},
|
||||
.Optional => {
|
||||
var buf: Type.Payload.ElemType = undefined;
|
||||
@@ -1398,7 +1337,7 @@ pub const DeclGen = struct {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
|
||||
// Just use a bool.
|
||||
// Note: Always generate the bool with indirect format, to save on some sanity
|
||||
// Perform the converison to a direct bool when the field is extracted.
|
||||
// Perform the conversion to a direct bool when the field is extracted.
|
||||
return try self.resolveType(Type.bool, .indirect);
|
||||
}
|
||||
|
||||
@@ -1410,11 +1349,13 @@ pub const DeclGen = struct {
|
||||
|
||||
const bool_ty_ref = try self.resolveType(Type.bool, .indirect);
|
||||
|
||||
// its an actual optional
|
||||
return try self.spv.simpleStructType(&.{
|
||||
.{ .ty = payload_ty_ref, .name = "payload" },
|
||||
.{ .ty = bool_ty_ref, .name = "valid" },
|
||||
});
|
||||
return try self.spv.resolve(.{ .struct_type = .{
|
||||
.member_types = &.{ payload_ty_ref, bool_ty_ref },
|
||||
.member_names = &.{
|
||||
try self.spv.resolveString("payload"),
|
||||
try self.spv.resolveString("valid"),
|
||||
},
|
||||
} });
|
||||
},
|
||||
.Union => return try self.resolveUnionType(ty, null),
|
||||
.ErrorSet => return try self.intType(.unsigned, 16),
|
||||
@@ -1429,20 +1370,30 @@ pub const DeclGen = struct {
|
||||
|
||||
const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
|
||||
|
||||
var members = std.BoundedArray(SpvType.Payload.Struct.Member, 2){};
|
||||
var member_types: [2]CacheRef = undefined;
|
||||
var member_names: [2]CacheString = undefined;
|
||||
if (eu_layout.error_first) {
|
||||
// Put the error first
|
||||
members.appendAssumeCapacity(.{ .ty = error_ty_ref, .name = "error" });
|
||||
members.appendAssumeCapacity(.{ .ty = payload_ty_ref, .name = "payload" });
|
||||
member_types = .{ error_ty_ref, payload_ty_ref };
|
||||
member_names = .{
|
||||
try self.spv.resolveString("error"),
|
||||
try self.spv.resolveString("payload"),
|
||||
};
|
||||
// TODO: ABI padding?
|
||||
} else {
|
||||
// Put the payload first.
|
||||
members.appendAssumeCapacity(.{ .ty = payload_ty_ref, .name = "payload" });
|
||||
members.appendAssumeCapacity(.{ .ty = error_ty_ref, .name = "error" });
|
||||
member_types = .{ payload_ty_ref, error_ty_ref };
|
||||
member_names = .{
|
||||
try self.spv.resolveString("payload"),
|
||||
try self.spv.resolveString("error"),
|
||||
};
|
||||
// TODO: ABI padding?
|
||||
}
|
||||
|
||||
return try self.spv.simpleStructType(members.slice());
|
||||
return try self.spv.resolve(.{ .struct_type = .{
|
||||
.member_types = &member_types,
|
||||
.member_names = &member_names,
|
||||
} });
|
||||
},
|
||||
|
||||
.Null,
|
||||
@@ -1526,17 +1477,13 @@ pub const DeclGen = struct {
|
||||
/// the name of an error in the text executor.
|
||||
fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
|
||||
const anyerror_ty_ref = try self.resolveType(Type.anyerror, .direct);
|
||||
const ptr_anyerror_ty_ref = try self.spv.ptrType(anyerror_ty_ref, .CrossWorkgroup, 0);
|
||||
const ptr_anyerror_ty_ref = try self.spv.ptrType(anyerror_ty_ref, .CrossWorkgroup);
|
||||
const void_ty_ref = try self.resolveType(Type.void, .direct);
|
||||
|
||||
const kernel_proto_ty_ref = blk: {
|
||||
const proto_payload = try self.spv.arena.create(SpvType.Payload.Function);
|
||||
proto_payload.* = .{
|
||||
.return_type = void_ty_ref,
|
||||
.parameters = try self.spv.arena.dupe(SpvType.Ref, &.{ptr_anyerror_ty_ref}),
|
||||
};
|
||||
break :blk try self.spv.resolveType(SpvType.initPayload(&proto_payload.base));
|
||||
};
|
||||
const kernel_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
|
||||
.return_type = void_ty_ref,
|
||||
.parameters = &.{ptr_anyerror_ty_ref},
|
||||
} });
|
||||
|
||||
const test_id = self.spv.declPtr(spv_test_decl_index).result_id;
|
||||
|
||||
@@ -1670,9 +1617,9 @@ pub const DeclGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn boolToInt(self: *DeclGen, result_ty_ref: SpvType.Ref, condition_id: IdRef) !IdRef {
|
||||
const zero_id = try self.constInt(result_ty_ref, 0);
|
||||
const one_id = try self.constInt(result_ty_ref, 1);
|
||||
fn boolToInt(self: *DeclGen, result_ty_ref: CacheRef, condition_id: IdRef) !IdRef {
|
||||
const zero_id = try self.spv.constInt(result_ty_ref, 0);
|
||||
const one_id = try self.spv.constInt(result_ty_ref, 1);
|
||||
const result_id = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpSelect, .{
|
||||
.id_result_type = self.typeId(result_ty_ref),
|
||||
@@ -1691,7 +1638,7 @@ pub const DeclGen = struct {
|
||||
.Bool => blk: {
|
||||
const direct_bool_ty_ref = try self.resolveType(ty, .direct);
|
||||
const indirect_bool_ty_ref = try self.resolveType(ty, .indirect);
|
||||
const zero_id = try self.constInt(indirect_bool_ty_ref, 0);
|
||||
const zero_id = try self.spv.constInt(indirect_bool_ty_ref, 0);
|
||||
const result_id = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
|
||||
.id_result_type = self.typeId(direct_bool_ty_ref),
|
||||
@@ -1929,10 +1876,10 @@ pub const DeclGen = struct {
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn maskStrangeInt(self: *DeclGen, ty_ref: SpvType.Ref, value_id: IdRef, bits: u16) !IdRef {
|
||||
fn maskStrangeInt(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, bits: u16) !IdRef {
|
||||
const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @intCast(u6, bits)) - 1;
|
||||
const result_id = self.spv.allocId();
|
||||
const mask_id = try self.constInt(ty_ref, mask_value);
|
||||
const mask_id = try self.spv.constInt(ty_ref, mask_value);
|
||||
try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{
|
||||
.id_result_type = self.typeId(ty_ref),
|
||||
.id_result = result_id,
|
||||
@@ -2071,7 +2018,7 @@ pub const DeclGen = struct {
|
||||
// Note that signed overflow is also wrapping in spir-v.
|
||||
|
||||
const rhs_lt_zero_id = self.spv.allocId();
|
||||
const zero_id = try self.constInt(operand_ty_ref, 0);
|
||||
const zero_id = try self.spv.constInt(operand_ty_ref, 0);
|
||||
try self.func.body.emit(self.spv.gpa, .OpSLessThan, .{
|
||||
.id_result_type = self.typeId(bool_ty_ref),
|
||||
.id_result = rhs_lt_zero_id,
|
||||
@@ -2150,7 +2097,7 @@ pub const DeclGen = struct {
|
||||
/// is the latter and PtrAccessChain is the former.
|
||||
fn accessChain(
|
||||
self: *DeclGen,
|
||||
result_ty_ref: SpvType.Ref,
|
||||
result_ty_ref: CacheRef,
|
||||
base: IdRef,
|
||||
indexes: []const IdRef,
|
||||
) !IdRef {
|
||||
@@ -2166,7 +2113,7 @@ pub const DeclGen = struct {
|
||||
|
||||
fn ptrAccessChain(
|
||||
self: *DeclGen,
|
||||
result_ty_ref: SpvType.Ref,
|
||||
result_ty_ref: CacheRef,
|
||||
base: IdRef,
|
||||
element: IdRef,
|
||||
indexes: []const IdRef,
|
||||
@@ -2541,7 +2488,7 @@ pub const DeclGen = struct {
|
||||
// Construct new pointer type for the resulting pointer
|
||||
const elem_ty = ptr_ty.elemType2(); // use elemType() so that we get T for *[N]T.
|
||||
const elem_ty_ref = try self.resolveType(elem_ty, .direct);
|
||||
const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace()), 0);
|
||||
const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace()));
|
||||
if (ptr_ty.isSinglePointer()) {
|
||||
// Pointer-to-array. In this case, the resulting pointer is not of the same type
|
||||
// as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain.
|
||||
@@ -2631,9 +2578,8 @@ pub const DeclGen = struct {
|
||||
.Struct => switch (object_ty.containerLayout()) {
|
||||
.Packed => unreachable, // TODO
|
||||
else => {
|
||||
const u32_ty_id = self.typeId(try self.intType(.unsigned, 32));
|
||||
const field_index_id = self.spv.allocId();
|
||||
try self.spv.emitConstant(u32_ty_id, field_index_id, .{ .uint32 = field_index });
|
||||
const field_index_ty_ref = try self.intType(.unsigned, 32);
|
||||
const field_index_id = try self.spv.constInt(field_index_ty_ref, field_index);
|
||||
const result_ty_ref = try self.resolveType(result_ptr_ty, .direct);
|
||||
return try self.accessChain(result_ty_ref, object_ptr, &.{field_index_id});
|
||||
},
|
||||
@@ -2657,7 +2603,7 @@ pub const DeclGen = struct {
|
||||
fn makePointerConstant(
|
||||
self: *DeclGen,
|
||||
section: *SpvSection,
|
||||
ptr_ty_ref: SpvType.Ref,
|
||||
ptr_ty_ref: CacheRef,
|
||||
ptr_id: IdRef,
|
||||
) !IdRef {
|
||||
const result_id = self.spv.allocId();
|
||||
@@ -2675,11 +2621,11 @@ pub const DeclGen = struct {
|
||||
// placed in the Function address space.
|
||||
fn alloc(
|
||||
self: *DeclGen,
|
||||
ty_ref: SpvType.Ref,
|
||||
ty_ref: CacheRef,
|
||||
initializer: ?IdRef,
|
||||
) !IdRef {
|
||||
const fn_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Function, 0);
|
||||
const general_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Generic, 0);
|
||||
const fn_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Function);
|
||||
const general_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Generic);
|
||||
|
||||
// SPIR-V requires that OpVariable declarations for locals go into the first block, so we are just going to
|
||||
// directly generate them into func.prologue instead of the body.
|
||||
@@ -2833,7 +2779,7 @@ pub const DeclGen = struct {
|
||||
|
||||
const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
|
||||
if (val_is_undef) {
|
||||
const undef = try self.constUndef(ptr_ty_ref);
|
||||
const undef = try self.spv.constUndef(ptr_ty_ref);
|
||||
try self.store(ptr_ty, ptr, undef);
|
||||
} else {
|
||||
try self.store(ptr_ty, ptr, value);
|
||||
@@ -2904,7 +2850,7 @@ pub const DeclGen = struct {
|
||||
else
|
||||
err_union_id;
|
||||
|
||||
const zero_id = try self.constInt(err_ty_ref, 0);
|
||||
const zero_id = try self.spv.constInt(err_ty_ref, 0);
|
||||
const is_err_id = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
|
||||
.id_result_type = self.typeId(bool_ty_ref),
|
||||
@@ -2953,7 +2899,7 @@ pub const DeclGen = struct {
|
||||
|
||||
if (err_union_ty.errorUnionSet().errorSetIsEmpty()) {
|
||||
// No error possible, so just return undefined.
|
||||
return try self.constUndef(err_ty_ref);
|
||||
return try self.spv.constUndef(err_ty_ref);
|
||||
}
|
||||
|
||||
const payload_ty = err_union_ty.errorUnionPayload();
|
||||
@@ -2982,7 +2928,7 @@ pub const DeclGen = struct {
|
||||
|
||||
const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
|
||||
var members = std.BoundedArray(IdRef, 2){};
|
||||
const payload_id = try self.constUndef(payload_ty_ref);
|
||||
const payload_id = try self.spv.constUndef(payload_ty_ref);
|
||||
if (eu_layout.error_first) {
|
||||
members.appendAssumeCapacity(operand_id);
|
||||
members.appendAssumeCapacity(payload_id);
|
||||
@@ -3024,7 +2970,7 @@ pub const DeclGen = struct {
|
||||
operand_id;
|
||||
|
||||
const payload_ty_ref = try self.resolveType(ptr_ty, .direct);
|
||||
const null_id = try self.constNull(payload_ty_ref);
|
||||
const null_id = try self.spv.constNull(payload_ty_ref);
|
||||
const result_id = self.spv.allocId();
|
||||
const operands = .{
|
||||
.id_result_type = self.typeId(bool_ty_ref),
|
||||
|
||||
@@ -11,7 +11,8 @@ const IdRef = spec.IdRef;
|
||||
const IdResult = spec.IdResult;
|
||||
|
||||
const SpvModule = @import("Module.zig");
|
||||
const SpvType = @import("type.zig").Type;
|
||||
const CacheRef = SpvModule.CacheRef;
|
||||
const CacheKey = SpvModule.CacheKey;
|
||||
|
||||
/// Represents a token in the assembly template.
|
||||
const Token = struct {
|
||||
@@ -126,7 +127,7 @@ const AsmValue = union(enum) {
|
||||
value: IdRef,
|
||||
|
||||
/// This result-value represents a type registered into the module's type system.
|
||||
ty: SpvType.Ref,
|
||||
ty: CacheRef,
|
||||
|
||||
/// Retrieve the result-id of this AsmValue. Asserts that this AsmValue
|
||||
/// is of a variant that allows the result to be obtained (not an unresolved
|
||||
@@ -135,7 +136,7 @@ const AsmValue = union(enum) {
|
||||
return switch (self) {
|
||||
.just_declared, .unresolved_forward_reference => unreachable,
|
||||
.value => |result| result,
|
||||
.ty => |ref| spv.typeId(ref),
|
||||
.ty => |ref| spv.resultId(ref),
|
||||
};
|
||||
}
|
||||
};
|
||||
@@ -267,9 +268,9 @@ fn processInstruction(self: *Assembler) !void {
|
||||
/// refers to the result.
|
||||
fn processTypeInstruction(self: *Assembler) !AsmValue {
|
||||
const operands = self.inst.operands.items;
|
||||
const ty = switch (self.inst.opcode) {
|
||||
.OpTypeVoid => SpvType.initTag(.void),
|
||||
.OpTypeBool => SpvType.initTag(.bool),
|
||||
const ref = switch (self.inst.opcode) {
|
||||
.OpTypeVoid => try self.spv.resolve(.void_type),
|
||||
.OpTypeBool => try self.spv.resolve(.bool_type),
|
||||
.OpTypeInt => blk: {
|
||||
const signedness: std.builtin.Signedness = switch (operands[2].literal32) {
|
||||
0 => .unsigned,
|
||||
@@ -282,7 +283,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
|
||||
const width = std.math.cast(u16, operands[1].literal32) orelse {
|
||||
return self.fail(0, "int type of {} bits is too large", .{operands[1].literal32});
|
||||
};
|
||||
break :blk try SpvType.int(self.spv.arena, signedness, width);
|
||||
break :blk try self.spv.intType(signedness, width);
|
||||
},
|
||||
.OpTypeFloat => blk: {
|
||||
const bits = operands[1].literal32;
|
||||
@@ -292,136 +293,36 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
|
||||
return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits});
|
||||
},
|
||||
}
|
||||
break :blk SpvType.float(@intCast(u16, bits));
|
||||
},
|
||||
.OpTypeVector => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Vector);
|
||||
payload.* = .{
|
||||
.component_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.component_count = operands[2].literal32,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypeMatrix => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Matrix);
|
||||
payload.* = .{
|
||||
.column_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.column_count = operands[2].literal32,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypeImage => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Image);
|
||||
payload.* = .{
|
||||
.sampled_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.dim = @intToEnum(spec.Dim, operands[2].value),
|
||||
.depth = switch (operands[3].literal32) {
|
||||
0 => .no,
|
||||
1 => .yes,
|
||||
2 => .maybe,
|
||||
else => {
|
||||
return self.fail(0, "'{}' is not a valid image depth (expected 0, 1 or 2)", .{operands[3].literal32});
|
||||
},
|
||||
},
|
||||
.arrayed = switch (operands[4].literal32) {
|
||||
0 => false,
|
||||
1 => true,
|
||||
else => {
|
||||
return self.fail(0, "'{}' is not a valid image arrayed-ness (expected 0 or 1)", .{operands[4].literal32});
|
||||
},
|
||||
},
|
||||
.multisampled = switch (operands[5].literal32) {
|
||||
0 => false,
|
||||
1 => true,
|
||||
else => {
|
||||
return self.fail(0, "'{}' is not a valid image multisampled-ness (expected 0 or 1)", .{operands[5].literal32});
|
||||
},
|
||||
},
|
||||
.sampled = switch (operands[6].literal32) {
|
||||
0 => .known_at_runtime,
|
||||
1 => .with_sampler,
|
||||
2 => .without_sampler,
|
||||
else => {
|
||||
return self.fail(0, "'{}' is not a valid image sampled-ness (expected 0, 1 or 2)", .{operands[6].literal32});
|
||||
},
|
||||
},
|
||||
.format = @intToEnum(spec.ImageFormat, operands[7].value),
|
||||
.access_qualifier = if (operands.len > 8)
|
||||
@intToEnum(spec.AccessQualifier, operands[8].value)
|
||||
else
|
||||
null,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypeSampler => SpvType.initTag(.sampler),
|
||||
.OpTypeSampledImage => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.SampledImage);
|
||||
payload.* = .{
|
||||
.image_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(u16, bits) } });
|
||||
},
|
||||
.OpTypeVector => try self.spv.resolve(.{ .vector_type = .{
|
||||
.component_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.component_count = operands[2].literal32,
|
||||
} }),
|
||||
.OpTypeArray => {
|
||||
// TODO: The length of an OpTypeArray is determined by a constant (which may be a spec constant),
|
||||
// and so some consideration must be taken when entering this in the type system.
|
||||
return self.todo("process OpTypeArray", .{});
|
||||
},
|
||||
.OpTypeRuntimeArray => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.RuntimeArray);
|
||||
payload.* = .{
|
||||
.element_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
// TODO: Fetch array stride from decorations.
|
||||
.array_stride = 0,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypeOpaque => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Opaque);
|
||||
const name_offset = operands[1].string;
|
||||
payload.* = .{
|
||||
.name = std.mem.sliceTo(self.inst.string_bytes.items[name_offset..], 0),
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypePointer => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Pointer);
|
||||
payload.* = .{
|
||||
.storage_class = @intToEnum(spec.StorageClass, operands[1].value),
|
||||
.child_type = try self.resolveTypeRef(operands[2].ref_id),
|
||||
// TODO: Fetch decorations
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypePointer => try self.spv.ptrType(
|
||||
try self.resolveTypeRef(operands[2].ref_id),
|
||||
@intToEnum(spec.StorageClass, operands[1].value),
|
||||
),
|
||||
.OpTypeFunction => blk: {
|
||||
const param_operands = operands[2..];
|
||||
const param_types = try self.spv.arena.alloc(SpvType.Ref, param_operands.len);
|
||||
const param_types = try self.spv.gpa.alloc(CacheRef, param_operands.len);
|
||||
defer self.spv.gpa.free(param_types);
|
||||
for (param_types, 0..) |*param, i| {
|
||||
param.* = try self.resolveTypeRef(param_operands[i].ref_id);
|
||||
}
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Function);
|
||||
payload.* = .{
|
||||
break :blk try self.spv.resolve(.{ .function_type = .{
|
||||
.return_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.parameters = param_types,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
} });
|
||||
},
|
||||
.OpTypeEvent => SpvType.initTag(.event),
|
||||
.OpTypeDeviceEvent => SpvType.initTag(.device_event),
|
||||
.OpTypeReserveId => SpvType.initTag(.reserve_id),
|
||||
.OpTypeQueue => SpvType.initTag(.queue),
|
||||
.OpTypePipe => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Pipe);
|
||||
payload.* = .{
|
||||
.qualifier = @intToEnum(spec.AccessQualifier, operands[1].value),
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypePipeStorage => SpvType.initTag(.pipe_storage),
|
||||
.OpTypeNamedBarrier => SpvType.initTag(.named_barrier),
|
||||
else => return self.todo("process type instruction {s}", .{@tagName(self.inst.opcode)}),
|
||||
};
|
||||
|
||||
const ref = try self.spv.resolveType(ty);
|
||||
return AsmValue{ .ty = ref };
|
||||
}
|
||||
|
||||
@@ -528,7 +429,7 @@ fn resolveRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue {
|
||||
}
|
||||
|
||||
/// Resolve a value reference as type.
|
||||
fn resolveTypeRef(self: *Assembler, ref: AsmValue.Ref) !SpvType.Ref {
|
||||
fn resolveTypeRef(self: *Assembler, ref: AsmValue.Ref) !CacheRef {
|
||||
const value = try self.resolveRef(ref);
|
||||
switch (value) {
|
||||
.just_declared, .unresolved_forward_reference => unreachable,
|
||||
@@ -761,19 +662,20 @@ fn parseContextDependentNumber(self: *Assembler) !void {
|
||||
|
||||
const tok = self.currentToken();
|
||||
const result_type_ref = try self.resolveTypeRef(self.inst.operands.items[0].ref_id);
|
||||
const result_type = self.spv.type_cache.keys()[@enumToInt(result_type_ref)];
|
||||
if (result_type.isInt()) {
|
||||
try self.parseContextDependentInt(result_type.intSignedness(), result_type.intFloatBits());
|
||||
} else if (result_type.isFloat()) {
|
||||
const width = result_type.intFloatBits();
|
||||
switch (width) {
|
||||
16 => try self.parseContextDependentFloat(16),
|
||||
32 => try self.parseContextDependentFloat(32),
|
||||
64 => try self.parseContextDependentFloat(64),
|
||||
else => return self.fail(tok.start, "cannot parse {}-bit float literal", .{width}),
|
||||
}
|
||||
} else {
|
||||
return self.fail(tok.start, "cannot parse literal constant {s}", .{@tagName(result_type.tag())});
|
||||
const result_type = self.spv.cache.lookup(result_type_ref);
|
||||
switch (result_type) {
|
||||
.int_type => |int| {
|
||||
try self.parseContextDependentInt(int.signedness, int.bits);
|
||||
},
|
||||
.float_type => |float| {
|
||||
switch (float.bits) {
|
||||
16 => try self.parseContextDependentFloat(16),
|
||||
32 => try self.parseContextDependentFloat(32),
|
||||
64 => try self.parseContextDependentFloat(64),
|
||||
else => return self.fail(tok.start, "cannot parse {}-bit float literal", .{float.bits}),
|
||||
}
|
||||
},
|
||||
else => return self.fail(tok.start, "cannot parse literal constant", .{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1046
src/codegen/spirv/Cache.zig
Normal file
1046
src/codegen/spirv/Cache.zig
Normal file
File diff suppressed because it is too large
Load Diff
@@ -20,11 +20,13 @@ const IdResult = spec.IdResult;
|
||||
const IdResultType = spec.IdResultType;
|
||||
|
||||
const Section = @import("Section.zig");
|
||||
const Type = @import("type.zig").Type;
|
||||
|
||||
const TypeCache = std.ArrayHashMapUnmanaged(Type, IdResultType, Type.ShallowHashContext32, true);
|
||||
const Cache = @import("Cache.zig");
|
||||
pub const CacheKey = Cache.Key;
|
||||
pub const CacheRef = Cache.Ref;
|
||||
pub const CacheString = Cache.String;
|
||||
|
||||
/// This structure represents a function that is in-progress of being emitted.
|
||||
/// This structure represents a function that isc in-progress of being emitted.
|
||||
/// Commonly, the contents of this structure will be merged with the appropriate
|
||||
/// sections of the module and re-used. Note that the SPIR-V module system makes
|
||||
/// no attempt of compacting result-id's, so any Fn instance should ultimately
|
||||
@@ -126,7 +128,13 @@ sections: struct {
|
||||
/// Annotation instructions (OpDecorate etc).
|
||||
annotations: Section = .{},
|
||||
/// Type declarations, constants, global variables
|
||||
/// Below this section, OpLine and OpNoLine is allowed.
|
||||
/// From this section, OpLine and OpNoLine is allowed.
|
||||
/// According to the SPIR-V documentation, this section normally
|
||||
/// also holds type and constant instructions. These are managed
|
||||
/// via the cache instead, which is the sole structure that
|
||||
/// manages that section. These will be inserted between this and
|
||||
/// the previous section when emitting the final binary.
|
||||
/// TODO: Do we need this section? Globals are also managed with another mechanism.
|
||||
types_globals_constants: Section = .{},
|
||||
// Functions without a body - skip for now.
|
||||
/// Regular function definitions.
|
||||
@@ -141,11 +149,9 @@ next_result_id: Word,
|
||||
/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
|
||||
source_file_names: std.StringHashMapUnmanaged(IdRef) = .{},
|
||||
|
||||
/// SPIR-V type cache. Note that according to SPIR-V spec section 2.8, Types and Variables, non-pointer
|
||||
/// non-aggrerate types (which includes matrices and vectors) must have a _unique_ representation in
|
||||
/// the final binary.
|
||||
/// Note: Uses ArrayHashMap which is insertion ordered, so that we may refer to other types by index (Type.Ref).
|
||||
type_cache: TypeCache = .{},
|
||||
/// SPIR-V type- and constant cache. This structure is used to store information about these in a more
|
||||
/// efficient manner.
|
||||
cache: Cache = .{},
|
||||
|
||||
/// Set of Decls, referred to by Decl.Index.
|
||||
decls: std.ArrayListUnmanaged(Decl) = .{},
|
||||
@@ -163,7 +169,7 @@ globals: struct {
|
||||
globals: std.AutoArrayHashMapUnmanaged(Decl.Index, Global) = .{},
|
||||
/// This pseudo-section contains the initialization code for all the globals. Instructions from
|
||||
/// here are reordered when flushing the module. Its contents should be part of the
|
||||
/// `types_globals_constants` SPIR-V section.
|
||||
/// `types_globals_constants` SPIR-V section when the module is emitted.
|
||||
section: Section = .{},
|
||||
} = .{},
|
||||
|
||||
@@ -182,11 +188,10 @@ pub fn deinit(self: *Module) void {
|
||||
self.sections.debug_strings.deinit(self.gpa);
|
||||
self.sections.debug_names.deinit(self.gpa);
|
||||
self.sections.annotations.deinit(self.gpa);
|
||||
self.sections.types_globals_constants.deinit(self.gpa);
|
||||
self.sections.functions.deinit(self.gpa);
|
||||
|
||||
self.source_file_names.deinit(self.gpa);
|
||||
self.type_cache.deinit(self.gpa);
|
||||
self.cache.deinit(self);
|
||||
|
||||
self.decls.deinit(self.gpa);
|
||||
self.decl_deps.deinit(self.gpa);
|
||||
@@ -213,6 +218,22 @@ pub fn idBound(self: Module) Word {
|
||||
return self.next_result_id;
|
||||
}
|
||||
|
||||
pub fn resolve(self: *Module, key: CacheKey) !CacheRef {
|
||||
return self.cache.resolve(self, key);
|
||||
}
|
||||
|
||||
pub fn resultId(self: *const Module, ref: CacheRef) IdResult {
|
||||
return self.cache.resultId(ref);
|
||||
}
|
||||
|
||||
pub fn resolveId(self: *Module, key: CacheKey) !IdResult {
|
||||
return self.resultId(try self.resolve(key));
|
||||
}
|
||||
|
||||
pub fn resolveString(self: *Module, str: []const u8) !CacheString {
|
||||
return try self.cache.addString(self, str);
|
||||
}
|
||||
|
||||
fn orderGlobalsInto(
|
||||
self: *Module,
|
||||
decl_index: Decl.Index,
|
||||
@@ -324,6 +345,9 @@ pub fn flush(self: *Module, file: std.fs.File) !void {
|
||||
var entry_points = try self.entryPoints();
|
||||
defer entry_points.deinit(self.gpa);
|
||||
|
||||
var types_constants = try self.cache.materialize(self);
|
||||
defer types_constants.deinit(self.gpa);
|
||||
|
||||
// Note: needs to be kept in order according to section 2.3!
|
||||
const buffers = &[_][]const Word{
|
||||
&header,
|
||||
@@ -334,6 +358,7 @@ pub fn flush(self: *Module, file: std.fs.File) !void {
|
||||
self.sections.debug_strings.toWords(),
|
||||
self.sections.debug_names.toWords(),
|
||||
self.sections.annotations.toWords(),
|
||||
types_constants.toWords(),
|
||||
self.sections.types_globals_constants.toWords(),
|
||||
globals.toWords(),
|
||||
self.sections.functions.toWords(),
|
||||
@@ -386,417 +411,73 @@ pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef {
|
||||
return result.value_ptr.*;
|
||||
}
|
||||
|
||||
/// Fetch a result-id for a spir-v type. This function deduplicates the type as appropriate,
|
||||
/// and returns a cached version if that exists.
|
||||
/// Note: This function does not attempt to perform any validation on the type.
|
||||
/// The type is emitted in a shallow fashion; any child types should already
|
||||
/// be emitted at this point.
|
||||
pub fn resolveType(self: *Module, ty: Type) !Type.Ref {
|
||||
const result = try self.type_cache.getOrPut(self.gpa, ty);
|
||||
const index = @intToEnum(Type.Ref, result.index);
|
||||
|
||||
if (!result.found_existing) {
|
||||
const ref = try self.emitType(ty);
|
||||
self.type_cache.values()[result.index] = ref;
|
||||
}
|
||||
|
||||
return index;
|
||||
pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !CacheRef {
|
||||
return try self.resolve(.{ .int_type = .{
|
||||
.signedness = signedness,
|
||||
.bits = bits,
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn resolveTypeId(self: *Module, ty: Type) !IdResultType {
|
||||
const ty_ref = try self.resolveType(ty);
|
||||
return self.typeId(ty_ref);
|
||||
}
|
||||
|
||||
pub fn typeRefType(self: Module, ty_ref: Type.Ref) Type {
|
||||
return self.type_cache.keys()[@enumToInt(ty_ref)];
|
||||
}
|
||||
|
||||
/// Get the result-id of a particular type, by reference. Asserts type_ref is valid.
|
||||
pub fn typeId(self: Module, ty_ref: Type.Ref) IdResultType {
|
||||
return self.type_cache.values()[@enumToInt(ty_ref)];
|
||||
}
|
||||
|
||||
/// Unconditionally emit a spir-v type into the appropriate section.
|
||||
/// Note: If this function is called with a type that is already generated, it may yield an invalid module
|
||||
/// as non-pointer non-aggregrate types must me unique!
|
||||
/// Note: This function does not attempt to perform any validation on the type.
|
||||
/// The type is emitted in a shallow fashion; any child types should already
|
||||
/// be emitted at this point.
|
||||
pub fn emitType(self: *Module, ty: Type) error{OutOfMemory}!IdResultType {
|
||||
const result_id = self.allocId();
|
||||
const ref_id = result_id;
|
||||
const types = &self.sections.types_globals_constants;
|
||||
const debug_names = &self.sections.debug_names;
|
||||
const result_id_operand = .{ .id_result = result_id };
|
||||
|
||||
switch (ty.tag()) {
|
||||
.void => {
|
||||
try types.emit(self.gpa, .OpTypeVoid, result_id_operand);
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = result_id,
|
||||
.name = "void",
|
||||
});
|
||||
},
|
||||
.bool => {
|
||||
try types.emit(self.gpa, .OpTypeBool, result_id_operand);
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = result_id,
|
||||
.name = "bool",
|
||||
});
|
||||
},
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.int,
|
||||
=> {
|
||||
// TODO: Kernels do not support OpTypeInt that is signed. We can probably
|
||||
// can get rid of the signedness all together, in Shaders also.
|
||||
const bits = ty.intFloatBits();
|
||||
const signedness: spec.LiteralInteger = switch (ty.intSignedness()) {
|
||||
.unsigned => 0,
|
||||
.signed => 1,
|
||||
};
|
||||
|
||||
try types.emit(self.gpa, .OpTypeInt, .{
|
||||
.id_result = result_id,
|
||||
.width = bits,
|
||||
.signedness = signedness,
|
||||
});
|
||||
|
||||
const ui: []const u8 = switch (signedness) {
|
||||
0 => "u",
|
||||
1 => "i",
|
||||
else => unreachable,
|
||||
};
|
||||
const name = try std.fmt.allocPrint(self.gpa, "{s}{}", .{ ui, bits });
|
||||
defer self.gpa.free(name);
|
||||
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = result_id,
|
||||
.name = name,
|
||||
});
|
||||
},
|
||||
.f16, .f32, .f64 => {
|
||||
const bits = ty.intFloatBits();
|
||||
try types.emit(self.gpa, .OpTypeFloat, .{
|
||||
.id_result = result_id,
|
||||
.width = bits,
|
||||
});
|
||||
|
||||
const name = try std.fmt.allocPrint(self.gpa, "f{}", .{bits});
|
||||
defer self.gpa.free(name);
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = result_id,
|
||||
.name = name,
|
||||
});
|
||||
},
|
||||
.vector => try types.emit(self.gpa, .OpTypeVector, .{
|
||||
.id_result = result_id,
|
||||
.component_type = self.typeId(ty.childType()),
|
||||
.component_count = ty.payload(.vector).component_count,
|
||||
}),
|
||||
.matrix => try types.emit(self.gpa, .OpTypeMatrix, .{
|
||||
.id_result = result_id,
|
||||
.column_type = self.typeId(ty.childType()),
|
||||
.column_count = ty.payload(.matrix).column_count,
|
||||
}),
|
||||
.image => {
|
||||
const info = ty.payload(.image);
|
||||
try types.emit(self.gpa, .OpTypeImage, .{
|
||||
.id_result = result_id,
|
||||
.sampled_type = self.typeId(ty.childType()),
|
||||
.dim = info.dim,
|
||||
.depth = @enumToInt(info.depth),
|
||||
.arrayed = @boolToInt(info.arrayed),
|
||||
.ms = @boolToInt(info.multisampled),
|
||||
.sampled = @enumToInt(info.sampled),
|
||||
.image_format = info.format,
|
||||
.access_qualifier = info.access_qualifier,
|
||||
});
|
||||
},
|
||||
.sampler => try types.emit(self.gpa, .OpTypeSampler, result_id_operand),
|
||||
.sampled_image => try types.emit(self.gpa, .OpTypeSampledImage, .{
|
||||
.id_result = result_id,
|
||||
.image_type = self.typeId(ty.childType()),
|
||||
}),
|
||||
.array => {
|
||||
const info = ty.payload(.array);
|
||||
assert(info.length != 0);
|
||||
|
||||
const size_type = Type.initTag(.u32);
|
||||
const size_type_id = try self.resolveTypeId(size_type);
|
||||
const length_id = self.allocId();
|
||||
try self.emitConstant(size_type_id, length_id, .{ .uint32 = info.length });
|
||||
|
||||
try types.emit(self.gpa, .OpTypeArray, .{
|
||||
.id_result = result_id,
|
||||
.element_type = self.typeId(ty.childType()),
|
||||
.length = length_id,
|
||||
});
|
||||
if (info.array_stride != 0) {
|
||||
try self.decorate(ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
|
||||
}
|
||||
},
|
||||
.runtime_array => {
|
||||
const info = ty.payload(.runtime_array);
|
||||
try types.emit(self.gpa, .OpTypeRuntimeArray, .{
|
||||
.id_result = result_id,
|
||||
.element_type = self.typeId(ty.childType()),
|
||||
});
|
||||
if (info.array_stride != 0) {
|
||||
try self.decorate(ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
|
||||
}
|
||||
},
|
||||
.@"struct" => {
|
||||
const info = ty.payload(.@"struct");
|
||||
try types.emitRaw(self.gpa, .OpTypeStruct, 1 + info.members.len);
|
||||
types.writeOperand(IdResult, result_id);
|
||||
for (info.members) |member| {
|
||||
types.writeOperand(IdRef, self.typeId(member.ty));
|
||||
}
|
||||
try self.decorateStruct(ref_id, info);
|
||||
},
|
||||
.@"opaque" => try types.emit(self.gpa, .OpTypeOpaque, .{
|
||||
.id_result = result_id,
|
||||
.literal_string = ty.payload(.@"opaque").name,
|
||||
}),
|
||||
.pointer => {
|
||||
const info = ty.payload(.pointer);
|
||||
try types.emit(self.gpa, .OpTypePointer, .{
|
||||
.id_result = result_id,
|
||||
.storage_class = info.storage_class,
|
||||
.type = self.typeId(ty.childType()),
|
||||
});
|
||||
if (info.array_stride != 0) {
|
||||
try self.decorate(ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
|
||||
}
|
||||
if (info.alignment != 0) {
|
||||
try self.decorate(ref_id, .{ .Alignment = .{ .alignment = info.alignment } });
|
||||
}
|
||||
if (info.max_byte_offset) |max_byte_offset| {
|
||||
try self.decorate(ref_id, .{ .MaxByteOffset = .{ .max_byte_offset = max_byte_offset } });
|
||||
}
|
||||
},
|
||||
.function => {
|
||||
const info = ty.payload(.function);
|
||||
try types.emitRaw(self.gpa, .OpTypeFunction, 2 + info.parameters.len);
|
||||
types.writeOperand(IdResult, result_id);
|
||||
types.writeOperand(IdRef, self.typeId(info.return_type));
|
||||
for (info.parameters) |parameter_type| {
|
||||
types.writeOperand(IdRef, self.typeId(parameter_type));
|
||||
}
|
||||
},
|
||||
.event => try types.emit(self.gpa, .OpTypeEvent, result_id_operand),
|
||||
.device_event => try types.emit(self.gpa, .OpTypeDeviceEvent, result_id_operand),
|
||||
.reserve_id => try types.emit(self.gpa, .OpTypeReserveId, result_id_operand),
|
||||
.queue => try types.emit(self.gpa, .OpTypeQueue, result_id_operand),
|
||||
.pipe => try types.emit(self.gpa, .OpTypePipe, .{
|
||||
.id_result = result_id,
|
||||
.qualifier = ty.payload(.pipe).qualifier,
|
||||
}),
|
||||
.pipe_storage => try types.emit(self.gpa, .OpTypePipeStorage, result_id_operand),
|
||||
.named_barrier => try types.emit(self.gpa, .OpTypeNamedBarrier, result_id_operand),
|
||||
}
|
||||
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct) !void {
|
||||
const debug_names = &self.sections.debug_names;
|
||||
|
||||
if (info.name.len != 0) {
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = target,
|
||||
.name = info.name,
|
||||
});
|
||||
}
|
||||
|
||||
// Decorations for the struct type itself.
|
||||
if (info.decorations.block)
|
||||
try self.decorate(target, .Block);
|
||||
if (info.decorations.buffer_block)
|
||||
try self.decorate(target, .BufferBlock);
|
||||
if (info.decorations.glsl_shared)
|
||||
try self.decorate(target, .GLSLShared);
|
||||
if (info.decorations.glsl_packed)
|
||||
try self.decorate(target, .GLSLPacked);
|
||||
if (info.decorations.c_packed)
|
||||
try self.decorate(target, .CPacked);
|
||||
|
||||
// Decorations for the struct members.
|
||||
const extra = info.member_decoration_extra;
|
||||
var extra_i: u32 = 0;
|
||||
for (info.members, 0..) |member, i| {
|
||||
const d = member.decorations;
|
||||
const index = @intCast(Word, i);
|
||||
|
||||
if (member.name.len != 0) {
|
||||
try debug_names.emit(self.gpa, .OpMemberName, .{
|
||||
.type = target,
|
||||
.member = index,
|
||||
.name = member.name,
|
||||
});
|
||||
}
|
||||
|
||||
switch (member.offset) {
|
||||
.none => {},
|
||||
else => try self.decorateMember(
|
||||
target,
|
||||
index,
|
||||
.{ .Offset = .{ .byte_offset = @enumToInt(member.offset) } },
|
||||
),
|
||||
}
|
||||
|
||||
switch (d.matrix_layout) {
|
||||
.row_major => try self.decorateMember(target, index, .RowMajor),
|
||||
.col_major => try self.decorateMember(target, index, .ColMajor),
|
||||
.none => {},
|
||||
}
|
||||
if (d.matrix_layout != .none) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.MatrixStride = .{ .matrix_stride = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
|
||||
if (d.no_perspective)
|
||||
try self.decorateMember(target, index, .NoPerspective);
|
||||
if (d.flat)
|
||||
try self.decorateMember(target, index, .Flat);
|
||||
if (d.patch)
|
||||
try self.decorateMember(target, index, .Patch);
|
||||
if (d.centroid)
|
||||
try self.decorateMember(target, index, .Centroid);
|
||||
if (d.sample)
|
||||
try self.decorateMember(target, index, .Sample);
|
||||
if (d.invariant)
|
||||
try self.decorateMember(target, index, .Invariant);
|
||||
if (d.@"volatile")
|
||||
try self.decorateMember(target, index, .Volatile);
|
||||
if (d.coherent)
|
||||
try self.decorateMember(target, index, .Coherent);
|
||||
if (d.non_writable)
|
||||
try self.decorateMember(target, index, .NonWritable);
|
||||
if (d.non_readable)
|
||||
try self.decorateMember(target, index, .NonReadable);
|
||||
|
||||
if (d.builtin) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.BuiltIn = .{ .built_in = @intToEnum(spec.BuiltIn, extra[extra_i]) },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.stream) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.Stream = .{ .stream_number = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.location) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.Location = .{ .location = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.component) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.Component = .{ .component = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.xfb_buffer) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.XfbBuffer = .{ .xfb_buffer_number = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.xfb_stride) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.XfbStride = .{ .xfb_stride = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.user_semantic) {
|
||||
const len = extra[extra_i];
|
||||
extra_i += 1;
|
||||
const semantic = @ptrCast([*]const u8, &extra[extra_i])[0..len];
|
||||
try self.decorateMember(target, index, .{
|
||||
.UserSemantic = .{ .semantic = semantic },
|
||||
});
|
||||
extra_i += std.math.divCeil(u32, extra_i, @sizeOf(u32)) catch unreachable;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn simpleStructType(self: *Module, members: []const Type.Payload.Struct.Member) !Type.Ref {
|
||||
const payload = try self.arena.create(Type.Payload.Struct);
|
||||
payload.* = .{
|
||||
.members = try self.arena.dupe(Type.Payload.Struct.Member, members),
|
||||
.decorations = .{},
|
||||
};
|
||||
return try self.resolveType(Type.initPayload(&payload.base));
|
||||
}
|
||||
|
||||
pub fn arrayType(self: *Module, len: u32, ty: Type.Ref) !Type.Ref {
|
||||
const payload = try self.arena.create(Type.Payload.Array);
|
||||
payload.* = .{
|
||||
.element_type = ty,
|
||||
.length = len,
|
||||
};
|
||||
return try self.resolveType(Type.initPayload(&payload.base));
|
||||
pub fn arrayType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef {
|
||||
const len_ty_ref = try self.resolve(.{ .int_type = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = 32,
|
||||
} });
|
||||
const len_ref = try self.resolve(.{ .int = .{
|
||||
.ty = len_ty_ref,
|
||||
.value = .{ .uint64 = len },
|
||||
} });
|
||||
return try self.resolve(.{ .array_type = .{
|
||||
.element_type = elem_ty_ref,
|
||||
.length = len_ref,
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn ptrType(
|
||||
self: *Module,
|
||||
child: Type.Ref,
|
||||
child: CacheRef,
|
||||
storage_class: spec.StorageClass,
|
||||
alignment: u32,
|
||||
) !Type.Ref {
|
||||
const ptr_payload = try self.arena.create(Type.Payload.Pointer);
|
||||
ptr_payload.* = .{
|
||||
) !CacheRef {
|
||||
return try self.resolve(.{ .ptr_type = .{
|
||||
.storage_class = storage_class,
|
||||
.child_type = child,
|
||||
.alignment = alignment,
|
||||
};
|
||||
return try self.resolveType(Type.initPayload(&ptr_payload.base));
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn changePtrStorageClass(self: *Module, ptr_ty_ref: Type.Ref, new_storage_class: spec.StorageClass) !Type.Ref {
|
||||
const payload = try self.arena.create(Type.Payload.Pointer);
|
||||
payload.* = self.typeRefType(ptr_ty_ref).payload(.pointer).*;
|
||||
payload.storage_class = new_storage_class;
|
||||
return try self.resolveType(Type.initPayload(&payload.base));
|
||||
pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef {
|
||||
const ty = self.cache.lookup(ty_ref).int_type;
|
||||
const Value = Cache.Key.Int.Value;
|
||||
return try self.resolveId(.{ .int = .{
|
||||
.ty = ty_ref,
|
||||
.value = switch (ty.signedness) {
|
||||
.signed => Value{ .int64 = @intCast(i64, value) },
|
||||
.unsigned => Value{ .uint64 = @intCast(u64, value) },
|
||||
},
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn constComposite(self: *Module, ty_ref: Type.Ref, members: []const IdRef) !IdRef {
|
||||
pub fn constUndef(self: *Module, ty_ref: CacheRef) !IdRef {
|
||||
return try self.resolveId(.{ .undef = .{ .ty = ty_ref } });
|
||||
}
|
||||
|
||||
pub fn constNull(self: *Module, ty_ref: CacheRef) !IdRef {
|
||||
return try self.resolveId(.{ .null = .{ .ty = ty_ref } });
|
||||
}
|
||||
|
||||
pub fn constBool(self: *Module, ty_ref: CacheRef, value: bool) !IdRef {
|
||||
return try self.resolveId(.{ .bool = .{ .ty = ty_ref, .value = value } });
|
||||
}
|
||||
|
||||
pub fn constComposite(self: *Module, ty_ref: CacheRef, members: []const IdRef) !IdRef {
|
||||
const result_id = self.allocId();
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpSpecConstantComposite, .{
|
||||
.id_result_type = self.typeId(ty_ref),
|
||||
.id_result_type = self.resultId(ty_ref),
|
||||
.id_result = result_id,
|
||||
.constituents = members,
|
||||
});
|
||||
return result_id;
|
||||
}
|
||||
|
||||
pub fn emitConstant(
|
||||
self: *Module,
|
||||
ty_id: IdRef,
|
||||
result_id: IdRef,
|
||||
value: spec.LiteralContextDependentNumber,
|
||||
) !void {
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpConstant, .{
|
||||
.id_result_type = ty_id,
|
||||
.id_result = result_id,
|
||||
.value = value,
|
||||
});
|
||||
}
|
||||
|
||||
/// Decorate a result-id.
|
||||
pub fn decorate(
|
||||
self: *Module,
|
||||
@@ -883,3 +564,22 @@ pub fn declareEntryPoint(self: *Module, decl_index: Decl.Index, name: []const u8
|
||||
.name = try self.arena.dupe(u8, name),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn debugName(self: *Module, target: IdResult, comptime fmt: []const u8, args: anytype) !void {
|
||||
const name = try std.fmt.allocPrint(self.gpa, fmt, args);
|
||||
defer self.gpa.free(name);
|
||||
try self.sections.debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = target,
|
||||
.name = name,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn memberDebugName(self: *Module, target: IdResult, member: u32, comptime fmt: []const u8, args: anytype) !void {
|
||||
const name = try std.fmt.allocPrint(self.gpa, fmt, args);
|
||||
defer self.gpa.free(name);
|
||||
try self.sections.debug_names.emit(self.gpa, .OpMemberName, .{
|
||||
.type = target,
|
||||
.member = member,
|
||||
.name = name,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,567 +0,0 @@
|
||||
//! This module models a SPIR-V Type. These are distinct from Zig types, with some types
|
||||
//! which are not representable by Zig directly.
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Signedness = std.builtin.Signedness;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const spec = @import("spec.zig");
|
||||
|
||||
pub const Type = extern union {
|
||||
tag_if_small_enough: Tag,
|
||||
ptr_otherwise: *Payload,
|
||||
|
||||
/// A reference to another SPIR-V type.
|
||||
pub const Ref = enum(u32) { _ };
|
||||
|
||||
pub fn initTag(comptime small_tag: Tag) Type {
|
||||
comptime assert(@enumToInt(small_tag) < Tag.no_payload_count);
|
||||
return .{ .tag_if_small_enough = small_tag };
|
||||
}
|
||||
|
||||
pub fn initPayload(pl: *Payload) Type {
|
||||
assert(@enumToInt(pl.tag) >= Tag.no_payload_count);
|
||||
return .{ .ptr_otherwise = pl };
|
||||
}
|
||||
|
||||
pub fn int(arena: Allocator, signedness: Signedness, bits: u16) !Type {
|
||||
const bits_and_signedness = switch (signedness) {
|
||||
.signed => -@as(i32, bits),
|
||||
.unsigned => @as(i32, bits),
|
||||
};
|
||||
|
||||
return switch (bits_and_signedness) {
|
||||
8 => initTag(.u8),
|
||||
16 => initTag(.u16),
|
||||
32 => initTag(.u32),
|
||||
64 => initTag(.u64),
|
||||
-8 => initTag(.i8),
|
||||
-16 => initTag(.i16),
|
||||
-32 => initTag(.i32),
|
||||
-64 => initTag(.i64),
|
||||
else => {
|
||||
const int_payload = try arena.create(Payload.Int);
|
||||
int_payload.* = .{
|
||||
.width = bits,
|
||||
.signedness = signedness,
|
||||
};
|
||||
return initPayload(&int_payload.base);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn float(bits: u16) Type {
|
||||
return switch (bits) {
|
||||
16 => initTag(.f16),
|
||||
32 => initTag(.f32),
|
||||
64 => initTag(.f64),
|
||||
else => unreachable, // Enable more types if required.
|
||||
};
|
||||
}
|
||||
|
||||
pub fn tag(self: Type) Tag {
|
||||
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
|
||||
return self.tag_if_small_enough;
|
||||
} else {
|
||||
return self.ptr_otherwise.tag;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() {
|
||||
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count)
|
||||
return null;
|
||||
|
||||
if (self.ptr_otherwise.tag == t)
|
||||
return self.payload(t);
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/// Access the payload of a type directly.
|
||||
pub fn payload(self: Type, comptime t: Tag) *t.Type() {
|
||||
assert(self.tag() == t);
|
||||
return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise);
|
||||
}
|
||||
|
||||
/// Perform a shallow equality test, comparing two types while assuming that any child types
|
||||
/// are equal only if their references are equal.
|
||||
pub fn eqlShallow(a: Type, b: Type) bool {
|
||||
if (a.tag_if_small_enough == b.tag_if_small_enough)
|
||||
return true;
|
||||
|
||||
const tag_a = a.tag();
|
||||
const tag_b = b.tag();
|
||||
if (tag_a != tag_b)
|
||||
return false;
|
||||
|
||||
inline for (@typeInfo(Tag).Enum.fields) |field| {
|
||||
const t = @field(Tag, field.name);
|
||||
if (t == tag_a) {
|
||||
return eqlPayloads(t, a, b);
|
||||
}
|
||||
}
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
/// Compare the payload of two compatible tags, given that we already know the tag of both types.
|
||||
fn eqlPayloads(comptime t: Tag, a: Type, b: Type) bool {
|
||||
switch (t) {
|
||||
.void,
|
||||
.bool,
|
||||
.sampler,
|
||||
.event,
|
||||
.device_event,
|
||||
.reserve_id,
|
||||
.queue,
|
||||
.pipe_storage,
|
||||
.named_barrier,
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.f16,
|
||||
.f32,
|
||||
.f64,
|
||||
=> return true,
|
||||
.int,
|
||||
.vector,
|
||||
.matrix,
|
||||
.sampled_image,
|
||||
.array,
|
||||
.runtime_array,
|
||||
.@"opaque",
|
||||
.pointer,
|
||||
.pipe,
|
||||
.image,
|
||||
=> return std.meta.eql(a.payload(t).*, b.payload(t).*),
|
||||
.@"struct" => {
|
||||
const struct_a = a.payload(.@"struct");
|
||||
const struct_b = b.payload(.@"struct");
|
||||
if (struct_a.members.len != struct_b.members.len)
|
||||
return false;
|
||||
for (struct_a.members, 0..) |mem_a, i| {
|
||||
if (!std.meta.eql(mem_a, struct_b.members[i]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
.function => {
|
||||
const fn_a = a.payload(.function);
|
||||
const fn_b = b.payload(.function);
|
||||
if (fn_a.return_type != fn_b.return_type)
|
||||
return false;
|
||||
return std.mem.eql(Ref, fn_a.parameters, fn_b.parameters);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform a shallow hash, which hashes the reference value of child types instead of recursing.
|
||||
pub fn hashShallow(self: Type) u64 {
|
||||
var hasher = std.hash.Wyhash.init(0);
|
||||
const t = self.tag();
|
||||
std.hash.autoHash(&hasher, t);
|
||||
|
||||
inline for (@typeInfo(Tag).Enum.fields) |field| {
|
||||
if (@field(Tag, field.name) == t) {
|
||||
switch (@field(Tag, field.name)) {
|
||||
.void,
|
||||
.bool,
|
||||
.sampler,
|
||||
.event,
|
||||
.device_event,
|
||||
.reserve_id,
|
||||
.queue,
|
||||
.pipe_storage,
|
||||
.named_barrier,
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.f16,
|
||||
.f32,
|
||||
.f64,
|
||||
=> {},
|
||||
else => self.hashPayload(@field(Tag, field.name), &hasher),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
/// Perform a shallow hash, given that we know the tag of the field ahead of time.
|
||||
fn hashPayload(self: Type, comptime t: Tag, hasher: *std.hash.Wyhash) void {
|
||||
const fields = @typeInfo(t.Type()).Struct.fields;
|
||||
const pl = self.payload(t);
|
||||
comptime assert(std.mem.eql(u8, fields[0].name, "base"));
|
||||
inline for (fields[1..]) |field| { // Skip the 'base' field.
|
||||
std.hash.autoHashStrat(hasher, @field(pl, field.name), .DeepRecursive);
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash context that hashes and compares types in a shallow fashion, useful for type caches.
|
||||
pub const ShallowHashContext32 = struct {
|
||||
pub fn hash(self: @This(), t: Type) u32 {
|
||||
_ = self;
|
||||
return @truncate(u32, t.hashShallow());
|
||||
}
|
||||
pub fn eql(self: @This(), a: Type, b: Type, b_index: usize) bool {
|
||||
_ = self;
|
||||
_ = b_index;
|
||||
return a.eqlShallow(b);
|
||||
}
|
||||
};
|
||||
|
||||
/// Return the reference to any child type. Asserts the type is one of:
|
||||
/// - Vectors
|
||||
/// - Matrices
|
||||
/// - Images
|
||||
/// - SampledImages,
|
||||
/// - Arrays
|
||||
/// - RuntimeArrays
|
||||
/// - Pointers
|
||||
pub fn childType(self: Type) Ref {
|
||||
return switch (self.tag()) {
|
||||
.vector => self.payload(.vector).component_type,
|
||||
.matrix => self.payload(.matrix).column_type,
|
||||
.image => self.payload(.image).sampled_type,
|
||||
.sampled_image => self.payload(.sampled_image).image_type,
|
||||
.array => self.payload(.array).element_type,
|
||||
.runtime_array => self.payload(.runtime_array).element_type,
|
||||
.pointer => self.payload(.pointer).child_type,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isInt(self: Type) bool {
|
||||
return switch (self.tag()) {
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.int,
|
||||
=> true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isFloat(self: Type) bool {
|
||||
return switch (self.tag()) {
|
||||
.f16, .f32, .f64 => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the number of bits that make up an int or float type.
|
||||
/// Asserts type is either int or float.
|
||||
pub fn intFloatBits(self: Type) u16 {
|
||||
return switch (self.tag()) {
|
||||
.u8, .i8 => 8,
|
||||
.u16, .i16, .f16 => 16,
|
||||
.u32, .i32, .f32 => 32,
|
||||
.u64, .i64, .f64 => 64,
|
||||
.int => self.payload(.int).width,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the signedness of an integer type.
|
||||
/// Asserts that the type is an int.
|
||||
pub fn intSignedness(self: Type) Signedness {
|
||||
return switch (self.tag()) {
|
||||
.u8, .u16, .u32, .u64 => .unsigned,
|
||||
.i8, .i16, .i32, .i64 => .signed,
|
||||
.int => self.payload(.int).signedness,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub const Tag = enum(usize) {
|
||||
void,
|
||||
bool,
|
||||
sampler,
|
||||
event,
|
||||
device_event,
|
||||
reserve_id,
|
||||
queue,
|
||||
pipe_storage,
|
||||
named_barrier,
|
||||
u8,
|
||||
u16,
|
||||
u32,
|
||||
u64,
|
||||
i8,
|
||||
i16,
|
||||
i32,
|
||||
i64,
|
||||
f16,
|
||||
f32,
|
||||
f64,
|
||||
|
||||
// After this, the tag requires a payload.
|
||||
int,
|
||||
vector,
|
||||
matrix,
|
||||
image,
|
||||
sampled_image,
|
||||
array,
|
||||
runtime_array,
|
||||
@"struct",
|
||||
@"opaque",
|
||||
pointer,
|
||||
function,
|
||||
pipe,
|
||||
|
||||
pub const last_no_payload_tag = Tag.f64;
|
||||
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
|
||||
|
||||
pub fn Type(comptime t: Tag) type {
|
||||
return switch (t) {
|
||||
.void,
|
||||
.bool,
|
||||
.sampler,
|
||||
.event,
|
||||
.device_event,
|
||||
.reserve_id,
|
||||
.queue,
|
||||
.pipe_storage,
|
||||
.named_barrier,
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.f16,
|
||||
.f32,
|
||||
.f64,
|
||||
=> @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
|
||||
.int => Payload.Int,
|
||||
.vector => Payload.Vector,
|
||||
.matrix => Payload.Matrix,
|
||||
.image => Payload.Image,
|
||||
.sampled_image => Payload.SampledImage,
|
||||
.array => Payload.Array,
|
||||
.runtime_array => Payload.RuntimeArray,
|
||||
.@"struct" => Payload.Struct,
|
||||
.@"opaque" => Payload.Opaque,
|
||||
.pointer => Payload.Pointer,
|
||||
.function => Payload.Function,
|
||||
.pipe => Payload.Pipe,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Payload = struct {
|
||||
tag: Tag,
|
||||
|
||||
pub const Int = struct {
|
||||
base: Payload = .{ .tag = .int },
|
||||
width: u16,
|
||||
signedness: Signedness,
|
||||
};
|
||||
|
||||
pub const Vector = struct {
|
||||
base: Payload = .{ .tag = .vector },
|
||||
component_type: Ref,
|
||||
component_count: u32,
|
||||
};
|
||||
|
||||
pub const Matrix = struct {
|
||||
base: Payload = .{ .tag = .matrix },
|
||||
column_type: Ref,
|
||||
column_count: u32,
|
||||
};
|
||||
|
||||
pub const Image = struct {
|
||||
base: Payload = .{ .tag = .image },
|
||||
sampled_type: Ref,
|
||||
dim: spec.Dim,
|
||||
depth: enum(u2) {
|
||||
no = 0,
|
||||
yes = 1,
|
||||
maybe = 2,
|
||||
},
|
||||
arrayed: bool,
|
||||
multisampled: bool,
|
||||
sampled: enum(u2) {
|
||||
known_at_runtime = 0,
|
||||
with_sampler = 1,
|
||||
without_sampler = 2,
|
||||
},
|
||||
format: spec.ImageFormat,
|
||||
access_qualifier: ?spec.AccessQualifier,
|
||||
};
|
||||
|
||||
pub const SampledImage = struct {
|
||||
base: Payload = .{ .tag = .sampled_image },
|
||||
image_type: Ref,
|
||||
};
|
||||
|
||||
pub const Array = struct {
|
||||
base: Payload = .{ .tag = .array },
|
||||
element_type: Ref,
|
||||
/// Note: Must be emitted as constant, not as literal!
|
||||
length: u32,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// If zero, no stride is present.
|
||||
array_stride: u32 = 0,
|
||||
};
|
||||
|
||||
pub const RuntimeArray = struct {
|
||||
base: Payload = .{ .tag = .runtime_array },
|
||||
element_type: Ref,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// If zero, no stride is present.
|
||||
array_stride: u32 = 0,
|
||||
};
|
||||
|
||||
pub const Struct = struct {
|
||||
base: Payload = .{ .tag = .@"struct" },
|
||||
members: []Member,
|
||||
name: []const u8 = "",
|
||||
decorations: StructDecorations = .{},
|
||||
|
||||
/// Extra information for decorations, packed for efficiency. Fields are stored sequentially by
|
||||
/// order of the `members` slice and `MemberDecorations` struct.
|
||||
member_decoration_extra: []u32 = &.{},
|
||||
|
||||
pub const Member = struct {
|
||||
ty: Ref,
|
||||
name: []const u8 = "",
|
||||
offset: MemberOffset = .none,
|
||||
decorations: MemberDecorations = .{},
|
||||
};
|
||||
|
||||
pub const MemberOffset = enum(u32) { none = 0xFFFF_FFFF, _ };
|
||||
|
||||
pub const StructDecorations = packed struct {
|
||||
/// Type has the 'Block' decoration.
|
||||
block: bool = false,
|
||||
/// Type has the 'BufferBlock' decoration.
|
||||
buffer_block: bool = false,
|
||||
/// Type has the 'GLSLShared' decoration.
|
||||
glsl_shared: bool = false,
|
||||
/// Type has the 'GLSLPacked' decoration.
|
||||
glsl_packed: bool = false,
|
||||
/// Type has the 'CPacked' decoration.
|
||||
c_packed: bool = false,
|
||||
};
|
||||
|
||||
pub const MemberDecorations = packed struct {
|
||||
/// Matrix layout for (arrays of) matrices. If this field is not .none,
|
||||
/// then there is also an extra field containing the matrix stride corresponding
|
||||
/// to the 'MatrixStride' decoration.
|
||||
matrix_layout: enum(u2) {
|
||||
/// Member has the 'RowMajor' decoration. The member type
|
||||
/// must be a matrix or an array of matrices.
|
||||
row_major,
|
||||
/// Member has the 'ColMajor' decoration. The member type
|
||||
/// must be a matrix or an array of matrices.
|
||||
col_major,
|
||||
/// Member is not a matrix or array of matrices.
|
||||
none,
|
||||
} = .none,
|
||||
|
||||
// Regular decorations, these do not imply extra fields.
|
||||
|
||||
/// Member has the 'NoPerspective' decoration.
|
||||
no_perspective: bool = false,
|
||||
/// Member has the 'Flat' decoration.
|
||||
flat: bool = false,
|
||||
/// Member has the 'Patch' decoration.
|
||||
patch: bool = false,
|
||||
/// Member has the 'Centroid' decoration.
|
||||
centroid: bool = false,
|
||||
/// Member has the 'Sample' decoration.
|
||||
sample: bool = false,
|
||||
/// Member has the 'Invariant' decoration.
|
||||
/// Note: requires parent struct to have 'Block'.
|
||||
invariant: bool = false,
|
||||
/// Member has the 'Volatile' decoration.
|
||||
@"volatile": bool = false,
|
||||
/// Member has the 'Coherent' decoration.
|
||||
coherent: bool = false,
|
||||
/// Member has the 'NonWritable' decoration.
|
||||
non_writable: bool = false,
|
||||
/// Member has the 'NonReadable' decoration.
|
||||
non_readable: bool = false,
|
||||
|
||||
// The following decorations all imply extra field(s).
|
||||
|
||||
/// Member has the 'BuiltIn' decoration.
|
||||
/// This decoration has an extra field of type `spec.BuiltIn`.
|
||||
/// Note: If any member of a struct has the BuiltIn decoration, all members must have one.
|
||||
/// Note: Each builtin may only be reachable once for a particular entry point.
|
||||
/// Note: The member type may be constrained by a particular built-in, defined in the client API specification.
|
||||
builtin: bool = false,
|
||||
/// Member has the 'Stream' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
stream: bool = false,
|
||||
/// Member has the 'Location' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
location: bool = false,
|
||||
/// Member has the 'Component' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
component: bool = false,
|
||||
/// Member has the 'XfbBuffer' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
xfb_buffer: bool = false,
|
||||
/// Member has the 'XfbStride' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
xfb_stride: bool = false,
|
||||
/// Member has the 'UserSemantic' decoration.
|
||||
/// This member has an extra field of type `[]u8`, which is encoded
|
||||
/// by an `u32` containing the number of chars exactly, and then the string padded to
|
||||
/// a multiple of 4 bytes with zeroes.
|
||||
user_semantic: bool = false,
|
||||
};
|
||||
};
|
||||
|
||||
pub const Opaque = struct {
|
||||
base: Payload = .{ .tag = .@"opaque" },
|
||||
name: []u8,
|
||||
};
|
||||
|
||||
pub const Pointer = struct {
|
||||
base: Payload = .{ .tag = .pointer },
|
||||
storage_class: spec.StorageClass,
|
||||
child_type: Ref,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// This is valid for pointers to elements of an array.
|
||||
/// If zero, no stride is present.
|
||||
array_stride: u32 = 0,
|
||||
/// If nonzero, type has the 'Alignment' decoration.
|
||||
alignment: u32 = 0,
|
||||
/// Type has the 'MaxByteOffset' decoration.
|
||||
max_byte_offset: ?u32 = null,
|
||||
};
|
||||
|
||||
pub const Function = struct {
|
||||
base: Payload = .{ .tag = .function },
|
||||
return_type: Ref,
|
||||
parameters: []Ref,
|
||||
};
|
||||
|
||||
pub const Pipe = struct {
|
||||
base: Payload = .{ .tag = .pipe },
|
||||
qualifier: spec.AccessQualifier,
|
||||
};
|
||||
};
|
||||
};
|
||||
Reference in New Issue
Block a user