spirv: spir-v dedicated type system

This commit is contained in:
Robin Voetter
2022-01-22 01:49:47 +01:00
parent 1b6ebce0da
commit 98ee39d1b0
4 changed files with 854 additions and 137 deletions

View File

@@ -21,8 +21,8 @@ const IdResultType = spec.IdResultType;
const SpvModule = @import("spirv/Module.zig");
const SpvSection = @import("spirv/Section.zig");
const SpvType = @import("spirv/type.zig").Type;
const TypeCache = std.HashMapUnmanaged(Type, IdResultType, Type.HashContext64, std.hash_map.default_max_load_percentage);
const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef);
const IncomingBlock = struct {
@@ -61,10 +61,6 @@ pub const DeclGen = struct {
/// A counter to keep track of how many `arg` instructions we've seen yet.
next_arg_index: u32,
/// A cache for zig types to prevent having to re-process a particular type. This structure is kept around
/// after a call to `gen` so that they don't have to be re-resolved for different decls.
type_cache: TypeCache = .{},
/// A map keeping track of which instruction generated which result-id.
inst_results: InstMap = .{},
@@ -159,7 +155,6 @@ pub const DeclGen = struct {
self.liveness = liveness;
self.args.items.len = 0;
self.next_arg_index = 0;
// Note: don't clear type_cache.
self.inst_results.clearRetainingCapacity();
self.blocks.clearRetainingCapacity();
self.current_block_label_id = undefined;
@@ -177,7 +172,6 @@ pub const DeclGen = struct {
/// Free resources owned by the DeclGen.
pub fn deinit(self: *DeclGen) void {
self.args.deinit(self.spv.gpa);
self.type_cache.deinit(self.spv.gpa);
self.inst_results.deinit(self.spv.gpa);
self.blocks.deinit(self.spv.gpa);
self.code.deinit(self.spv.gpa);
@@ -220,7 +214,7 @@ pub const DeclGen = struct {
/// Note that there is no such thing as nested blocks like in ZIR or AIR, so we don't need to
/// keep track of the previous block.
fn beginSpvBlock(self: *DeclGen, label_id: IdResult) !void {
try self.code.emit(self.spv.gpa, .OpLabel, .{.id_result = label_id});
try self.code.emit(self.spv.gpa, .OpLabel, .{ .id_result = label_id });
self.current_block_label_id = label_id.toRef();
}
@@ -317,9 +311,9 @@ pub const DeclGen = struct {
};
},
// As of yet, there is no vector support in the self-hosted compiler.
.Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
.Vector => self.todo("implement arithmeticTypeInfo for Vector", .{}),
// TODO: For which types is this the case?
else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
else => self.todo("implement arithmeticTypeInfo for {}", .{ty}),
};
}
@@ -329,7 +323,7 @@ pub const DeclGen = struct {
const target = self.getTarget();
const section = &self.spv.sections.types_globals_constants;
const result_id = self.spv.allocId();
const result_type_id = try self.genType(ty);
const result_type_id = try self.resolveTypeId(ty);
if (val.isUndef()) {
try section.emit(self.spv.gpa, .OpUndef, .{ .id_result_type = result_type_id, .id_result = result_id });
@@ -341,7 +335,7 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
return self.todo("implement composite int constants for {}", .{ty});
};
// We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any
@@ -354,8 +348,8 @@ pub const DeclGen = struct {
var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt();
const value: spec.LiteralContextDependentNumber = switch (backing_bits) {
1...32 => .{.uint32 = @truncate(u32, int_bits)},
33...64 => .{.uint64 = int_bits},
1...32 => .{ .uint32 = @truncate(u32, int_bits) },
33...64 => .{ .uint64 = int_bits },
else => unreachable,
};
@@ -375,14 +369,14 @@ pub const DeclGen = struct {
},
.Float => {
// At this point we are guaranteed that the target floating point type is supported, otherwise the function
// would have exited at genType(ty).
// would have exited at resolveTypeId(ty).
const value: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) {
// Prevent upcasting to f32 by bitcasting and writing as a uint32.
16 => .{.uint32 = @bitCast(u16, val.toFloat(f16))},
32 => .{.float32 = val.toFloat(f32)},
64 => .{.float64 = val.toFloat(f64)},
128 => unreachable, // Filtered out in the call to genType.
16 => .{ .uint32 = @bitCast(u16, val.toFloat(f16)) },
32 => .{ .float32 = val.toFloat(f32) },
64 => .{ .float64 = val.toFloat(f64) },
128 => unreachable, // Filtered out in the call to resolveTypeId.
// TODO: Insert case for long double when the layout for that is determined?
else => unreachable,
};
@@ -394,43 +388,43 @@ pub const DeclGen = struct {
});
},
.Void => unreachable,
else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}),
else => return self.todo("constant generation of type {}", .{ty}),
}
return result_id.toRef();
}
fn genType(self: *DeclGen, ty: Type) Error!IdResultType {
// We can't use getOrPut here so we can recursively generate types.
if (self.type_cache.get(ty)) |already_generated| {
return already_generated;
}
/// Turn a Zig type into a SPIR-V Type, and return its type result-id.
fn resolveTypeId(self: *DeclGen, ty: Type) !IdResultType {
return self.spv.typeResultId(try self.resolveType(ty));
}
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
fn resolveType(self: *DeclGen, ty: Type) Error!SpvType.Ref {
const target = self.getTarget();
const section = &self.spv.sections.types_globals_constants;
const result_id = self.spv.allocId();
switch (ty.zigTypeTag()) {
.Void => try section.emit(self.spv.gpa, .OpTypeVoid, .{.id_result = result_id}),
.Bool => try section.emit(self.spv.gpa, .OpTypeBool, .{.id_result = result_id}),
.Int => {
return switch (ty.zigTypeTag()) {
.Void => try self.spv.resolveType(SpvType.initTag(.void)),
.Bool => blk: {
// TODO: SPIR-V booleans are opaque. For local variables this is fine, but for structs
// members we want to use integer types instead.
break :blk try self.spv.resolveType(SpvType.initTag(.bool));
},
.Int => blk: {
const int_info = ty.intInfo(target);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty});
// TODO: Integers too big for any native type are represented as "composite integers":
// An array of largestSupportedIntBits.
return self.todo("Implement composite int type {}", .{ty});
};
// TODO: If backing_bits != int_info.bits, a duplicate type might be generated here.
try section.emit(self.spv.gpa, .OpTypeInt, .{
.id_result = result_id,
const payload = try self.spv.arena.create(SpvType.Payload.Int);
payload.* = .{
.width = backing_bits,
.signedness = switch (int_info.signedness) {
.unsigned => @as(spec.LiteralInteger, 0),
.signed => 1,
},
});
.signedness = int_info.signedness,
};
break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base));
},
.Float => {
.Float => blk: {
// We can (and want) not really emulate floating points with other floating point types like with the integer types,
// so if the float is not supported, just return an error.
const bits = ty.floatBits(target);
@@ -446,39 +440,34 @@ pub const DeclGen = struct {
return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
}
try section.emit(self.spv.gpa, .OpTypeFloat, .{.id_result = result_id, .width = bits});
const payload = try self.spv.arena.create(SpvType.Payload.Float);
payload.* = .{
.width = bits,
};
break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base));
},
.Fn => {
.Fn => blk: {
// We only support zig-calling-convention functions, no varargs.
if (ty.fnCallingConvention() != .Unspecified)
return self.fail("Unsupported calling convention for SPIR-V", .{});
if (ty.fnIsVarArgs())
return self.fail("VarArgs unsupported for SPIR-V", .{});
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
// In order to avoid a temporary here, first generate all the required types and then simply look them up
// when generating the function type.
const params = ty.fnParamLen();
var i: usize = 0;
while (i < params) : (i += 1) {
_ = try self.genType(ty.fnParamType(i));
const param_types = try self.spv.arena.alloc(SpvType.Ref, ty.fnParamLen());
for (param_types) |*param, i| {
param.* = try self.resolveType(ty.fnParamType(i));
}
const return_type_id = try self.genType(ty.fnReturnType());
const return_type = try self.resolveType(ty.fnReturnType());
try section.emitRaw(self.spv.gpa, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen()));
// result id + result type id + parameter type ids.
section.writeOperand(IdResult, result_id);
section.writeOperand(IdResultType, return_type_id);
i = 0;
while (i < params) : (i += 1) {
const param_type_id = self.type_cache.get(ty.fnParamType(i)).?;
section.writeOperand(IdRef, param_type_id.toRef());
}
const payload = try self.spv.arena.create(SpvType.Payload.Function);
payload.* = .{ .return_type = return_type, .parameters = param_types };
break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base));
},
.Pointer => {
// This type can now be properly implemented, but we still need to implement the storage classes as proper address spaces.
return self.todo("Implement type Pointer properly", .{});
},
// When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType.
.Pointer => return self.fail("Cannot create pointer with unknown storage class", .{}),
.Vector => {
// Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
// which work on them), so simply use those.
@@ -488,23 +477,21 @@ pub const DeclGen = struct {
// is adequate at all for this.
// TODO: Vectors are not yet supported by the self-hosted compiler itself it seems.
return self.fail("TODO: SPIR-V backend: implement type Vector", .{});
return self.todo("Implement type Vector", .{});
},
.Null,
.Undefined,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Type,
=> unreachable, // Must be const or comptime.
=> unreachable, // Must be comptime.
.BoundFn => unreachable, // this type will be deleted from the language.
else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}),
}
try self.type_cache.putNoClobber(self.spv.gpa, ty, result_id.toResultType());
return result_id.toResultType();
else => |tag| return self.todo("Implement zig type '{}'", .{tag}),
};
}
/// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that.
@@ -517,7 +504,7 @@ pub const DeclGen = struct {
// TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types
// if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled.
// These also relates to the pointer's address space.
const child_id = try self.genType(ty.elemType());
const child_id = try self.resolveTypeId(ty.elemType());
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{
.id_result = result_id,
@@ -534,9 +521,9 @@ pub const DeclGen = struct {
if (decl.val.castTag(.function)) |_| {
assert(decl.ty.zigTypeTag() == .Fn);
const prototype_id = try self.genType(decl.ty);
const prototype_id = try self.resolveTypeId(decl.ty);
try self.spv.sections.functions.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = self.type_cache.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype.
.id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()),
.id_result = result_id,
.function_control = .{}, // TODO: We can set inline here if the type requires it.
.function_type = prototype_id.toRef(),
@@ -547,7 +534,7 @@ pub const DeclGen = struct {
try self.args.ensureUnusedCapacity(self.spv.gpa, params);
while (i < params) : (i += 1) {
const param_type_id = self.type_cache.get(decl.ty.fnParamType(i)).?;
const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i));
const arg_result_id = self.spv.allocId();
try self.spv.sections.functions.emit(self.spv.gpa, .OpFunctionParameter, .{
.id_result_type = param_type_id,
@@ -573,7 +560,8 @@ pub const DeclGen = struct {
try self.spv.sections.functions.append(self.spv.gpa, self.code);
try self.spv.sections.functions.emit(self.spv.gpa, .OpFunctionEnd, {});
} else {
return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
// TODO
// return self.todo("generate decl type {}", .{decl.ty.zigTypeTag()});
}
}
@@ -622,7 +610,7 @@ pub const DeclGen = struct {
.unreach => return self.airUnreach(),
// zig fmt: on
else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{
else => |tag| return self.todo("implement AIR tag {s}", .{
@tagName(tag),
}),
};
@@ -635,7 +623,7 @@ pub const DeclGen = struct {
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
const result_id = self.spv.allocId();
const result_type_id = try self.genType(self.air.typeOfIndex(inst));
const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst));
try self.code.emit(self.spv.gpa, opcode, .{
.id_result_type = result_type_id,
.id_result = result_id,
@@ -654,7 +642,7 @@ pub const DeclGen = struct {
const rhs_id = try self.resolve(bin_op.rhs);
const result_id = self.spv.allocId();
const result_type_id = try self.genType(ty);
const result_type_id = try self.resolveTypeId(ty);
assert(self.air.typeOf(bin_op.lhs).eql(ty));
assert(self.air.typeOf(bin_op.rhs).eql(ty));
@@ -665,10 +653,10 @@ pub const DeclGen = struct {
const opcode_index: usize = switch (info.class) {
.composite_integer => {
return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
return self.todo("binary operations for composite integers", .{});
},
.strange_integer => {
return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{});
return self.todo("binary operations for strange integers", .{});
},
.integer => switch (info.signedness) {
.signed => @as(usize, 1),
@@ -702,7 +690,7 @@ pub const DeclGen = struct {
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
const result_id = self.spv.allocId();
const result_type_id = try self.genType(Type.initTag(.bool));
const result_type_id = try self.resolveTypeId(Type.initTag(.bool));
const op_ty = self.air.typeOf(bin_op.lhs);
assert(op_ty.eql(self.air.typeOf(bin_op.rhs)));
@@ -712,10 +700,10 @@ pub const DeclGen = struct {
const opcode_index: usize = switch (info.class) {
.composite_integer => {
return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
return self.todo("binary operations for composite integers", .{});
},
.strange_integer => {
return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{});
return self.todo("comparison for strange integers", .{});
},
.float => 0,
.bool => 1,
@@ -746,7 +734,7 @@ pub const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const result_id = self.spv.allocId();
const result_type_id = try self.genType(Type.initTag(.bool));
const result_type_id = try self.resolveTypeId(Type.initTag(.bool));
try self.code.emit(self.spv.gpa, .OpLogicalNot, .{
.id_result_type = result_type_id,
.id_result = result_id,
@@ -813,9 +801,9 @@ pub const DeclGen = struct {
const result_id = self.spv.allocId();
// TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types
// are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws
// are not allowed to be created from a phi node, and throw an error for those. For now, resolveTypeId already throws
// an error for pointers.
const result_type_id = try self.genType(ty);
const result_type_id = try self.resolveTypeId(ty);
_ = result_type_id;
try self.code.emitRaw(self.spv.gpa, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
@@ -838,7 +826,7 @@ pub const DeclGen = struct {
try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
}
try self.code.emit(self.spv.gpa, .OpBranch, .{.target_label = block.label_id});
try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = block.label_id });
}
fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -882,7 +870,7 @@ pub const DeclGen = struct {
const operand_id = try self.resolve(ty_op.operand);
const ty = self.air.typeOfIndex(inst);
const result_type_id = try self.genType(ty);
const result_type_id = try self.resolveTypeId(ty);
const result_id = self.spv.allocId();
const access = spec.MemoryAccess.Extended{
@@ -906,13 +894,13 @@ pub const DeclGen = struct {
const loop_label_id = self.spv.allocId();
// Jump to the loop entry point
try self.code.emit(self.spv.gpa, .OpBranch, .{.target_label = loop_label_id.toRef()});
try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() });
// TODO: Look into OpLoopMerge.
try self.beginSpvBlock(loop_label_id);
try self.genBody(body);
try self.code.emit(self.spv.gpa, .OpBranch, .{.target_label = loop_label_id.toRef()});
try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() });
}
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -920,7 +908,7 @@ pub const DeclGen = struct {
const operand_ty = self.air.typeOf(operand);
if (operand_ty.hasRuntimeBits()) {
const operand_id = try self.resolve(operand);
try self.code.emit(self.spv.gpa, .OpReturnValue, .{.value = operand_id});
try self.code.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id });
} else {
try self.code.emit(self.spv.gpa, .OpReturn, {});
}

View File

@@ -9,14 +9,20 @@ const Module = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const ZigDecl = @import("../../Module.zig").Decl;
const spec = @import("spec.zig");
const Word = spec.Word;
const IdRef = spec.IdRef;
const IdResult = spec.IdResult;
const IdResultType = spec.IdResultType;
const Section = @import("Section.zig");
const Type = @import("type.zig").Type;
const TypeCache = std.ArrayHashMapUnmanaged(Type, IdResultType, Type.ShallowHashContext32, true);
/// A general-purpose allocator which may be used to allocate resources for this module
gpa: Allocator,
@@ -57,6 +63,12 @@ next_result_id: Word,
/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
source_file_names: std.StringHashMapUnmanaged(IdRef) = .{},
/// SPIR-V type cache. Note that according to SPIR-V spec section 2.8, Types and Variables, non-pointer
/// non-aggrerate types (which includes matrices and vectors) must have a _unique_ representation in
/// the final binary.
/// Note: Uses ArrayHashMap which is insertion ordered, so that we may refer to other types by index (Type.Ref).
type_cache: TypeCache = .{},
pub fn init(gpa: Allocator, arena: Allocator) Module {
return .{
.gpa = gpa,
@@ -75,44 +87,20 @@ pub fn deinit(self: *Module) void {
self.sections.functions.deinit(self.gpa);
self.source_file_names.deinit(self.gpa);
self.type_cache.deinit(self.gpa);
self.* = undefined;
}
pub fn allocId(self: *Module) spec.IdResult {
defer self.next_result_id += 1;
return .{.id = self.next_result_id};
return .{ .id = self.next_result_id };
}
pub fn idBound(self: Module) Word {
return self.next_result_id;
}
/// Fetch the result-id of an OpString instruction that encodes the path of the source
/// file of the decl. This function may also emit an OpSource with source-level information regarding
/// the decl.
pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef {
const path = decl.getFileScope().sub_file_path;
const result = try self.source_file_names.getOrPut(self.gpa, path);
if (!result.found_existing) {
const file_result_id = self.allocId();
result.value_ptr.* = file_result_id.toRef();
try self.sections.debug_strings.emit(self.gpa, .OpString, .{
.id_result = file_result_id,
.string = path,
});
try self.sections.debug_strings.emit(self.gpa, .OpSource, .{
.source_language = .Unknown, // TODO: Register Zig source language.
.version = 0, // TODO: Zig version as u32?
.file = file_result_id.toRef(),
.source = null, // TODO: Store actual source also?
});
}
return result.value_ptr.*;
}
/// Emit this module as a spir-v binary.
pub fn flush(self: Module, file: std.fs.File) !void {
// See SPIR-V Spec section 2.3, "Physical Layout of a SPIR-V Module and Instruction"
@@ -151,3 +139,290 @@ pub fn flush(self: Module, file: std.fs.File) !void {
try file.setEndPos(file_size);
try file.pwritevAll(&iovc_buffers, 0);
}
/// Fetch the result-id of an OpString instruction that encodes the path of the source
/// file of the decl. This function may also emit an OpSource with source-level information regarding
/// the decl.
pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef {
const path = decl.getFileScope().sub_file_path;
const result = try self.source_file_names.getOrPut(self.gpa, path);
if (!result.found_existing) {
const file_result_id = self.allocId();
result.value_ptr.* = file_result_id.toRef();
try self.sections.debug_strings.emit(self.gpa, .OpString, .{
.id_result = file_result_id,
.string = path,
});
try self.sections.debug_strings.emit(self.gpa, .OpSource, .{
.source_language = .Unknown, // TODO: Register Zig source language.
.version = 0, // TODO: Zig version as u32?
.file = file_result_id.toRef(),
.source = null, // TODO: Store actual source also?
});
}
return result.value_ptr.*;
}
/// Fetch a result-id for a spir-v type. This function deduplicates the type as appropriate,
/// and returns a cached version if that exists.
/// Note: This function does not attempt to perform any validation on the type.
/// The type is emitted in a shallow fashion; any child types should already
/// be emitted at this point.
pub fn resolveType(self: *Module, ty: Type) !Type.Ref {
const result = try self.type_cache.getOrPut(self.gpa, ty);
if (!result.found_existing) {
result.value_ptr.* = try self.emitType(ty);
}
return result.index;
}
pub fn resolveTypeId(self: *Module, ty: Type) !IdRef {
return self.typeResultId(try self.resolveType(ty));
}
/// Get the result-id of a particular type, by reference. Asserts type_ref is valid.
pub fn typeResultId(self: Module, type_ref: Type.Ref) IdResultType {
return self.type_cache.values()[type_ref];
}
/// Get the result-id of a particular type as IdRef, by Type.Ref. Asserts type_ref is valid.
pub fn typeRefId(self: Module, type_ref: Type.Ref) IdRef {
return self.type_cache.values()[type_ref].toRef();
}
/// Unconditionally emit a spir-v type into the appropriate section.
/// Note: If this function is called with a type that is already generated, it may yield an invalid module
/// as non-pointer non-aggregrate types must me unique!
/// Note: This function does not attempt to perform any validation on the type.
/// The type is emitted in a shallow fashion; any child types should already
/// be emitted at this point.
pub fn emitType(self: *Module, ty: Type) !IdResultType {
const result_id = self.allocId();
const ref_id = result_id.toRef();
const types = &self.sections.types_globals_constants;
const annotations = &self.sections.annotations;
const result_id_operand = .{ .id_result = result_id };
switch (ty.tag()) {
.void => try types.emit(self.gpa, .OpTypeVoid, result_id_operand),
.bool => try types.emit(self.gpa, .OpTypeBool, result_id_operand),
.int => try types.emit(self.gpa, .OpTypeInt, .{
.id_result = result_id,
.width = ty.payload(.int).width,
.signedness = switch (ty.payload(.int).signedness) {
.unsigned => @as(spec.LiteralInteger, 0),
.signed => 1,
},
}),
.float => try types.emit(self.gpa, .OpTypeFloat, .{
.id_result = result_id,
.width = ty.payload(.float).width,
}),
.vector => try types.emit(self.gpa, .OpTypeVector, .{
.id_result = result_id,
.component_type = self.typeResultId(ty.childType()).toRef(),
.component_count = ty.payload(.vector).component_count,
}),
.matrix => try types.emit(self.gpa, .OpTypeMatrix, .{
.id_result = result_id,
.column_type = self.typeResultId(ty.childType()).toRef(),
.column_count = ty.payload(.matrix).column_count,
}),
.image => {
const info = ty.payload(.image);
try types.emit(self.gpa, .OpTypeImage, .{
.id_result = result_id,
.sampled_type = self.typeResultId(ty.childType()).toRef(),
.dim = info.dim,
.depth = @enumToInt(info.depth),
.arrayed = @boolToInt(info.arrayed),
.ms = @boolToInt(info.multisampled),
.sampled = @enumToInt(info.sampled),
.image_format = info.format,
.access_qualifier = info.access_qualifier,
});
},
.sampler => try types.emit(self.gpa, .OpTypeSampler, result_id_operand),
.sampled_image => try types.emit(self.gpa, .OpTypeSampledImage, .{
.id_result = result_id,
.image_type = self.typeResultId(ty.childType()).toRef(),
}),
.array => {
const info = ty.payload(.array);
assert(info.length != 0);
try types.emit(self.gpa, .OpTypeArray, .{
.id_result = result_id,
.element_type = self.typeResultId(ty.childType()).toRef(),
.length = .{ .id = 0 }, // TODO: info.length must be emitted as constant!
});
if (info.array_stride != 0) {
try annotations.decorate(self.gpa, ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
}
},
.runtime_array => {
const info = ty.payload(.runtime_array);
try types.emit(self.gpa, .OpTypeRuntimeArray, .{
.id_result = result_id,
.element_type = self.typeResultId(ty.childType()).toRef(),
});
if (info.array_stride != 0) {
try annotations.decorate(self.gpa, ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
}
},
.@"struct" => {
const info = ty.payload(.@"struct");
try types.emitRaw(self.gpa, .OpTypeStruct, 1 + info.members.len);
types.writeOperand(IdResult, result_id);
for (info.members) |member| {
types.writeOperand(IdRef, self.typeResultId(member.ty).toRef());
}
try self.decorateStruct(ref_id, info);
},
.@"opaque" => try types.emit(self.gpa, .OpTypeOpaque, .{
.id_result = result_id,
.literal_string = ty.payload(.@"opaque").name,
}),
.pointer => {
const info = ty.payload(.pointer);
try types.emit(self.gpa, .OpTypePointer, .{
.id_result = result_id,
.storage_class = info.storage_class,
.type = self.typeResultId(ty.childType()).toRef(),
});
if (info.array_stride != 0) {
try annotations.decorate(self.gpa, ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
}
if (info.alignment) |alignment| {
try annotations.decorate(self.gpa, ref_id, .{ .Alignment = .{ .alignment = alignment } });
}
if (info.max_byte_offset) |max_byte_offset| {
try annotations.decorate(self.gpa, ref_id, .{ .MaxByteOffset = .{ .max_byte_offset = max_byte_offset } });
}
},
.function => {
const info = ty.payload(.function);
try types.emitRaw(self.gpa, .OpTypeFunction, 2 + info.parameters.len);
types.writeOperand(IdResult, result_id);
types.writeOperand(IdRef, self.typeResultId(info.return_type).toRef());
for (info.parameters) |parameter_type| {
types.writeOperand(IdRef, self.typeResultId(parameter_type).toRef());
}
},
.event => try types.emit(self.gpa, .OpTypeEvent, result_id_operand),
.device_event => try types.emit(self.gpa, .OpTypeDeviceEvent, result_id_operand),
.reserve_id => try types.emit(self.gpa, .OpTypeReserveId, result_id_operand),
.queue => try types.emit(self.gpa, .OpTypeQueue, result_id_operand),
.pipe => try types.emit(self.gpa, .OpTypePipe, .{
.id_result = result_id,
.qualifier = ty.payload(.pipe).qualifier,
}),
.pipe_storage => try types.emit(self.gpa, .OpTypePipeStorage, result_id_operand),
.named_barrier => try types.emit(self.gpa, .OpTypeNamedBarrier, result_id_operand),
}
return result_id.toResultType();
}
fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct) !void {
const annotations = &self.sections.annotations;
// Decorations for the struct type itself.
if (info.decorations.block)
try annotations.decorate(self.gpa, target, .Block);
if (info.decorations.buffer_block)
try annotations.decorate(self.gpa, target, .BufferBlock);
if (info.decorations.glsl_shared)
try annotations.decorate(self.gpa, target, .GLSLShared);
if (info.decorations.glsl_packed)
try annotations.decorate(self.gpa, target, .GLSLPacked);
if (info.decorations.c_packed)
try annotations.decorate(self.gpa, target, .CPacked);
// Decorations for the struct members.
const extra = info.member_decoration_extra;
var extra_i: u32 = 0;
for (info.members) |member, i| {
const d = member.decorations;
const index = @intCast(Word, i);
switch (d.matrix_layout) {
.row_major => try annotations.decorateMember(self.gpa, target, index, .RowMajor),
.col_major => try annotations.decorateMember(self.gpa, target, index, .ColMajor),
.none => {},
}
if (d.matrix_layout != .none) {
try annotations.decorateMember(self.gpa, target, index, .{
.MatrixStride = .{ .matrix_stride = extra[extra_i] },
});
extra_i += 1;
}
if (d.no_perspective)
try annotations.decorateMember(self.gpa, target, index, .NoPerspective);
if (d.flat)
try annotations.decorateMember(self.gpa, target, index, .Flat);
if (d.patch)
try annotations.decorateMember(self.gpa, target, index, .Patch);
if (d.centroid)
try annotations.decorateMember(self.gpa, target, index, .Centroid);
if (d.sample)
try annotations.decorateMember(self.gpa, target, index, .Sample);
if (d.invariant)
try annotations.decorateMember(self.gpa, target, index, .Invariant);
if (d.@"volatile")
try annotations.decorateMember(self.gpa, target, index, .Volatile);
if (d.coherent)
try annotations.decorateMember(self.gpa, target, index, .Coherent);
if (d.non_writable)
try annotations.decorateMember(self.gpa, target, index, .NonWritable);
if (d.non_readable)
try annotations.decorateMember(self.gpa, target, index, .NonReadable);
if (d.builtin) {
try annotations.decorateMember(self.gpa, target, index, .{
.BuiltIn = .{ .built_in = @intToEnum(spec.BuiltIn, extra[extra_i]) },
});
extra_i += 1;
}
if (d.stream) {
try annotations.decorateMember(self.gpa, target, index, .{
.Stream = .{ .stream_number = extra[extra_i] },
});
extra_i += 1;
}
if (d.location) {
try annotations.decorateMember(self.gpa, target, index, .{
.Location = .{ .location = extra[extra_i] },
});
extra_i += 1;
}
if (d.component) {
try annotations.decorateMember(self.gpa, target, index, .{
.Component = .{ .component = extra[extra_i] },
});
extra_i += 1;
}
if (d.xfb_buffer) {
try annotations.decorateMember(self.gpa, target, index, .{
.XfbBuffer = .{ .xfb_buffer_number = extra[extra_i] },
});
extra_i += 1;
}
if (d.xfb_stride) {
try annotations.decorateMember(self.gpa, target, index, .{
.XfbStride = .{ .xfb_stride = extra[extra_i] },
});
extra_i += 1;
}
if (d.user_semantic) {
const len = extra[extra_i];
extra_i += 1;
const semantic = @ptrCast([*]const u8, &extra[extra_i])[0..len];
try annotations.decorateMember(self.gpa, target, index, .{
.UserSemantic = .{ .semantic = semantic },
});
extra_i += std.math.divCeil(u32, extra_i, @sizeOf(u32)) catch unreachable;
}
}
}

View File

@@ -32,11 +32,7 @@ pub fn toWords(section: Section) []Word {
}
/// Append the instructions from another section into this section.
pub fn append(
section: *Section,
allocator: Allocator,
other_section: Section
) !void {
pub fn append(section: *Section, allocator: Allocator, other_section: Section) !void {
try section.instructions.appendSlice(allocator, other_section.instructions.items);
}
@@ -64,6 +60,34 @@ pub fn emit(
section.writeOperands(opcode.Operands(), operands);
}
/// Decorate a result-id.
pub fn decorate(
section: *Section,
allocator: Allocator,
target: spec.IdRef,
decoration: spec.Decoration.Extended,
) !void {
try section.emit(allocator, .OpDecorate, .{
.target = target,
.decoration = decoration,
});
}
/// Decorate a result-id which is a member of some struct.
pub fn decorateMember(
section: *Section,
allocator: Allocator,
structure_type: spec.IdRef,
member: u32,
decoration: spec.Decoration.Extended,
) !void {
try section.emit(allocator, .OpMemberDecorate, .{
.structure_type = structure_type,
.member = member,
.decoration = decoration,
});
}
pub fn writeWord(section: *Section, word: Word) void {
section.instructions.appendAssumeCapacity(word);
}
@@ -93,10 +117,7 @@ fn writeOperands(section: *Section, comptime Operands: type, operands: Operands)
pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand) void {
switch (Operand) {
spec.IdResultType,
spec.IdResult,
spec.IdRef
=> section.writeWord(operand.id),
spec.IdResultType, spec.IdResult, spec.IdRef => section.writeWord(operand.id),
spec.LiteralInteger => section.writeWord(operand),
@@ -320,8 +341,8 @@ test "SPIR-V Section emit() - simple" {
defer section.deinit(std.testing.allocator);
try section.emit(std.testing.allocator, .OpUndef, .{
.id_result_type = .{.id = 0},
.id_result = .{.id = 1},
.id_result_type = .{ .id = 0 },
.id_result = .{ .id = 1 },
});
try testing.expectEqualSlices(Word, &.{
@@ -338,7 +359,7 @@ test "SPIR-V Section emit() - string" {
try section.emit(std.testing.allocator, .OpSource, .{
.source_language = .Unknown,
.version = 123,
.file = .{.id = 456},
.file = .{ .id = 456 },
.source = "pub fn main() void {}",
});
@@ -361,8 +382,8 @@ test "SPIR-V Section emit()- extended mask" {
defer section.deinit(std.testing.allocator);
try section.emit(std.testing.allocator, .OpLoopMerge, .{
.merge_block = .{.id = 10},
.continue_target = .{.id = 20},
.merge_block = .{ .id = 10 },
.continue_target = .{ .id = 20 },
.loop_control = .{
.Unroll = true,
.DependencyLength = .{
@@ -375,7 +396,7 @@ test "SPIR-V Section emit()- extended mask" {
(@as(Word, 5) << 16) | @enumToInt(Opcode.OpLoopMerge),
10,
20,
@bitCast(Word, spec.LoopControl{.Unroll = true, .DependencyLength = true}),
@bitCast(Word, spec.LoopControl{ .Unroll = true, .DependencyLength = true }),
2,
}, section.instructions.items);
}
@@ -385,9 +406,9 @@ test "SPIR-V Section emit() - extended union" {
defer section.deinit(std.testing.allocator);
try section.emit(std.testing.allocator, .OpExecutionMode, .{
.entry_point = .{.id = 888},
.entry_point = .{ .id = 888 },
.mode = .{
.LocalSize = .{.x_size = 4, .y_size = 8, .z_size = 16},
.LocalSize = .{ .x_size = 4, .y_size = 8, .z_size = 16 },
},
});

433
src/codegen/spirv/type.zig Normal file
View File

@@ -0,0 +1,433 @@
//! This module models a SPIR-V Type. These are distinct from Zig types, with some types
//! which are not representable by Zig directly.
const std = @import("std");
const assert = std.debug.assert;
const spec = @import("spec.zig");
pub const Type = extern union {
tag_if_small_enough: Tag,
ptr_otherwise: *Payload,
/// A reference to another SPIR-V type.
pub const Ref = usize;
pub fn initTag(comptime small_tag: Tag) Type {
comptime assert(@enumToInt(small_tag) < Tag.no_payload_count);
return .{ .tag_if_small_enough = small_tag };
}
pub fn initPayload(pl: *Payload) Type {
assert(@enumToInt(pl.tag) >= Tag.no_payload_count);
return .{ .ptr_otherwise = pl };
}
pub fn tag(self: Type) Tag {
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
return self.tag_if_small_enough;
} else {
return self.ptr_otherwise.tag;
}
}
pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() {
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count)
return null;
if (self.ptr_otherwise.tag == t)
return self.payload(t);
return null;
}
/// Access the payload of a type directly.
pub fn payload(self: Type, comptime t: Tag) *t.Type() {
assert(self.tag() == t);
return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise);
}
/// Perform a shallow equality test, comparing two types while assuming that any child types
/// are equal only if their references are equal.
pub fn eqlShallow(a: Type, b: Type) bool {
if (a.tag_if_small_enough == b.tag_if_small_enough)
return true;
const tag_a = a.tag();
const tag_b = b.tag();
if (tag_a != tag_b)
return false;
inline for (@typeInfo(Tag).Enum.fields) |field| {
const t = @field(Tag, field.name);
if (t == tag_a) {
return eqlPayloads(t, a, b);
}
}
unreachable;
}
/// Compare the payload of two compatible tags, given that we already know the tag of both types.
fn eqlPayloads(comptime t: Tag, a: Type, b: Type) bool {
switch (t) {
.void,
.bool,
.sampler,
.event,
.device_event,
.reserve_id,
.queue,
.pipe_storage,
.named_barrier,
=> return true,
.int,
.float,
.vector,
.matrix,
.sampled_image,
.array,
.runtime_array,
.@"opaque",
.pointer,
.pipe,
.image,
=> return std.meta.eql(a.payload(t).*, b.payload(t).*),
.@"struct" => {
const struct_a = a.payload(.@"struct");
const struct_b = b.payload(.@"struct");
if (struct_a.members.len != struct_b.members.len)
return false;
for (struct_a.members) |mem_a, i| {
if (!std.meta.eql(mem_a, struct_b.members[i]))
return false;
}
return true;
},
.@"function" => {
const fn_a = a.payload(.function);
const fn_b = b.payload(.function);
if (fn_a.return_type != fn_b.return_type)
return false;
return std.mem.eql(Ref, fn_a.parameters, fn_b.parameters);
},
}
}
/// Perform a shallow hash, which hashes the reference value of child types instead of recursing.
pub fn hashShallow(self: Type) u64 {
var hasher = std.hash.Wyhash.init(0);
const t = self.tag();
std.hash.autoHash(&hasher, t);
inline for (@typeInfo(Tag).Enum.fields) |field| {
if (@field(Tag, field.name) == t) {
switch (@field(Tag, field.name)) {
.void,
.bool,
.sampler,
.event,
.device_event,
.reserve_id,
.queue,
.pipe_storage,
.named_barrier,
=> {},
else => self.hashPayload(@field(Tag, field.name), &hasher),
}
}
}
return hasher.final();
}
/// Perform a shallow hash, given that we know the tag of the field ahead of time.
fn hashPayload(self: Type, comptime t: Tag, hasher: *std.hash.Wyhash) void {
const fields = @typeInfo(t.Type()).Struct.fields;
const pl = self.payload(t);
comptime assert(std.mem.eql(u8, fields[0].name, "base"));
inline for (fields[1..]) |field| { // Skip the 'base' field.
std.hash.autoHashStrat(hasher, @field(pl, field.name), .DeepRecursive);
}
}
/// Hash context that hashes and compares types in a shallow fashion, useful for type caches.
pub const ShallowHashContext32 = struct {
pub fn hash(self: @This(), t: Type) u32 {
_ = self;
return @truncate(u32, t.hashShallow());
}
pub fn eql(self: @This(), a: Type, b: Type) bool {
_ = self;
return a.eqlShallow(b);
}
};
/// Return the reference to any child type. Asserts the type is one of:
/// - Vectors
/// - Matrices
/// - Images
/// - SampledImages,
/// - Arrays
/// - RuntimeArrays
/// - Pointers
pub fn childType(self: Type) Ref {
return switch (self.tag()) {
.vector => self.payload(.vector).component_type,
.matrix => self.payload(.matrix).column_type,
.image => self.payload(.image).sampled_type,
.sampled_image => self.payload(.sampled_image).image_type,
.array => self.payload(.array).element_type,
.runtime_array => self.payload(.runtime_array).element_type,
.pointer => self.payload(.pointer).child_type,
else => unreachable,
};
}
pub const Tag = enum(usize) {
void,
bool,
sampler,
event,
device_event,
reserve_id,
queue,
pipe_storage,
named_barrier,
// After this, the tag requires a payload.
int,
float,
vector,
matrix,
image,
sampled_image,
array,
runtime_array,
@"struct",
@"opaque",
pointer,
function,
pipe,
pub const last_no_payload_tag = Tag.named_barrier;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
pub fn Type(comptime t: Tag) type {
return switch (t) {
.void, .bool, .sampler, .event, .device_event, .reserve_id, .queue, .pipe_storage, .named_barrier => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
.int => Payload.Int,
.float => Payload.Float,
.vector => Payload.Vector,
.matrix => Payload.Matrix,
.image => Payload.Image,
.sampled_image => Payload.SampledImage,
.array => Payload.Array,
.runtime_array => Payload.RuntimeArray,
.@"struct" => Payload.Struct,
.@"opaque" => Payload.Opaque,
.pointer => Payload.Pointer,
.function => Payload.Function,
.pipe => Payload.Pipe,
};
}
};
pub const Payload = struct {
tag: Tag,
pub const Int = struct {
base: Payload = .{ .tag = .int },
width: u32,
signedness: std.builtin.Signedness,
};
pub const Float = struct {
base: Payload = .{ .tag = .float },
width: u32,
};
pub const Vector = struct {
base: Payload = .{ .tag = .vector },
component_type: Ref,
component_count: u32,
};
pub const Matrix = struct {
base: Payload = .{ .tag = .matrix },
column_type: Ref,
column_count: u32,
};
pub const Image = struct {
base: Payload = .{ .tag = .image },
sampled_type: Ref,
dim: spec.Dim,
depth: enum(u2) {
no = 0,
yes = 1,
maybe = 2,
},
arrayed: bool,
multisampled: bool,
sampled: enum(u2) {
known_at_runtime = 0,
with_sampler = 1,
without_sampler = 2,
},
format: spec.ImageFormat,
access_qualifier: ?spec.AccessQualifier,
};
pub const SampledImage = struct {
base: Payload = .{ .tag = .sampled_image },
image_type: Ref,
};
pub const Array = struct {
base: Payload = .{ .tag = .array },
element_type: Ref,
/// Note: Must be emitted as constant, not as literal!
length: u32,
/// Type has the 'ArrayStride' decoration.
/// If zero, no stride is present.
array_stride: u32,
};
pub const RuntimeArray = struct {
base: Payload = .{ .tag = .runtime_array },
element_type: Ref,
/// Type has the 'ArrayStride' decoration.
/// If zero, no stride is present.
array_stride: u32,
};
pub const Struct = struct {
base: Payload = .{ .tag = .@"struct" },
members: []Member,
decorations: StructDecorations,
/// Extra information for decorations, packed for efficiency. Fields are stored sequentially by
/// order of the `members` slice and `MemberDecorations` struct.
member_decoration_extra: []u32,
pub const Member = struct {
ty: Ref,
offset: u32,
decorations: MemberDecorations,
};
pub const StructDecorations = packed struct {
/// Type has the 'Block' decoration.
block: bool,
/// Type has the 'BufferBlock' decoration.
buffer_block: bool,
/// Type has the 'GLSLShared' decoration.
glsl_shared: bool,
/// Type has the 'GLSLPacked' decoration.
glsl_packed: bool,
/// Type has the 'CPacked' decoration.
c_packed: bool,
};
pub const MemberDecorations = packed struct {
/// Matrix layout for (arrays of) matrices. If this field is not .none,
/// then there is also an extra field containing the matrix stride corresponding
/// to the 'MatrixStride' decoration.
matrix_layout: enum(u2) {
/// Member has the 'RowMajor' decoration. The member type
/// must be a matrix or an array of matrices.
row_major,
/// Member has the 'ColMajor' decoration. The member type
/// must be a matrix or an array of matrices.
col_major,
/// Member is not a matrix or array of matrices.
none,
},
// Regular decorations, these do not imply extra fields.
/// Member has the 'NoPerspective' decoration.
no_perspective: bool,
/// Member has the 'Flat' decoration.
flat: bool,
/// Member has the 'Patch' decoration.
patch: bool,
/// Member has the 'Centroid' decoration.
centroid: bool,
/// Member has the 'Sample' decoration.
sample: bool,
/// Member has the 'Invariant' decoration.
/// Note: requires parent struct to have 'Block'.
invariant: bool,
/// Member has the 'Volatile' decoration.
@"volatile": bool,
/// Member has the 'Coherent' decoration.
coherent: bool,
/// Member has the 'NonWritable' decoration.
non_writable: bool,
/// Member has the 'NonReadable' decoration.
non_readable: bool,
// The following decorations all imply extra field(s).
/// Member has the 'BuiltIn' decoration.
/// This decoration has an extra field of type `spec.BuiltIn`.
/// Note: If any member of a struct has the BuiltIn decoration, all members must have one.
/// Note: Each builtin may only be reachable once for a particular entry point.
/// Note: The member type may be constrained by a particular built-in, defined in the client API specification.
builtin: bool,
/// Member has the 'Stream' decoration.
/// This member has an extra field of type `u32`.
stream: bool,
/// Member has the 'Location' decoration.
/// This member has an extra field of type `u32`.
location: bool,
/// Member has the 'Component' decoration.
/// This member has an extra field of type `u32`.
component: bool,
/// Member has the 'XfbBuffer' decoration.
/// This member has an extra field of type `u32`.
xfb_buffer: bool,
/// Member has the 'XfbStride' decoration.
/// This member has an extra field of type `u32`.
xfb_stride: bool,
/// Member has the 'UserSemantic' decoration.
/// This member has an extra field of type `[]u8`, which is encoded
/// by an `u32` containing the number of chars exactly, and then the string padded to
/// a multiple of 4 bytes with zeroes.
user_semantic: bool,
};
};
pub const Opaque = struct {
base: Payload = .{ .tag = .@"opaque" },
name: []u8,
};
pub const Pointer = struct {
base: Payload = .{ .tag = .pointer },
storage_class: spec.StorageClass,
child_type: Ref,
/// Type has the 'ArrayStride' decoration.
/// This is valid for pointers to elements of an array.
/// If zero, no stride is present.
array_stride: u32,
/// Type has the 'Alignment' decoration.
alignment: ?u32,
/// Type has the 'MaxByteOffset' decoration.
max_byte_offset: ?u32,
};
pub const Function = struct {
base: Payload = .{ .tag = .function },
return_type: Ref,
parameters: []Ref,
};
pub const Pipe = struct {
base: Payload = .{ .tag = .pipe },
qualifier: spec.AccessQualifier,
};
};
};