spirv: translate remaining types
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,8 @@ const IdRef = spec.IdRef;
|
||||
const IdResult = spec.IdResult;
|
||||
|
||||
const SpvModule = @import("Module.zig");
|
||||
const SpvType = @import("type.zig").Type;
|
||||
const CacheRef = SpvModule.CacheRef;
|
||||
const CacheKey = SpvModule.CacheKey;
|
||||
|
||||
/// Represents a token in the assembly template.
|
||||
const Token = struct {
|
||||
@@ -126,7 +127,7 @@ const AsmValue = union(enum) {
|
||||
value: IdRef,
|
||||
|
||||
/// This result-value represents a type registered into the module's type system.
|
||||
ty: SpvType.Ref,
|
||||
ty: CacheRef,
|
||||
|
||||
/// Retrieve the result-id of this AsmValue. Asserts that this AsmValue
|
||||
/// is of a variant that allows the result to be obtained (not an unresolved
|
||||
@@ -135,7 +136,7 @@ const AsmValue = union(enum) {
|
||||
return switch (self) {
|
||||
.just_declared, .unresolved_forward_reference => unreachable,
|
||||
.value => |result| result,
|
||||
.ty => |ref| spv.typeId(ref),
|
||||
.ty => |ref| spv.resultId(ref),
|
||||
};
|
||||
}
|
||||
};
|
||||
@@ -267,9 +268,9 @@ fn processInstruction(self: *Assembler) !void {
|
||||
/// refers to the result.
|
||||
fn processTypeInstruction(self: *Assembler) !AsmValue {
|
||||
const operands = self.inst.operands.items;
|
||||
const ty = switch (self.inst.opcode) {
|
||||
.OpTypeVoid => SpvType.initTag(.void),
|
||||
.OpTypeBool => SpvType.initTag(.bool),
|
||||
const ref = switch (self.inst.opcode) {
|
||||
.OpTypeVoid => try self.spv.resolve(.void_type),
|
||||
.OpTypeBool => try self.spv.resolve(.bool_type),
|
||||
.OpTypeInt => blk: {
|
||||
const signedness: std.builtin.Signedness = switch (operands[2].literal32) {
|
||||
0 => .unsigned,
|
||||
@@ -282,7 +283,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
|
||||
const width = std.math.cast(u16, operands[1].literal32) orelse {
|
||||
return self.fail(0, "int type of {} bits is too large", .{operands[1].literal32});
|
||||
};
|
||||
break :blk try SpvType.int(self.spv.arena, signedness, width);
|
||||
break :blk try self.spv.intType(signedness, width);
|
||||
},
|
||||
.OpTypeFloat => blk: {
|
||||
const bits = operands[1].literal32;
|
||||
@@ -292,136 +293,36 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
|
||||
return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits});
|
||||
},
|
||||
}
|
||||
break :blk SpvType.float(@intCast(u16, bits));
|
||||
},
|
||||
.OpTypeVector => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Vector);
|
||||
payload.* = .{
|
||||
.component_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.component_count = operands[2].literal32,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypeMatrix => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Matrix);
|
||||
payload.* = .{
|
||||
.column_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.column_count = operands[2].literal32,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypeImage => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Image);
|
||||
payload.* = .{
|
||||
.sampled_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.dim = @intToEnum(spec.Dim, operands[2].value),
|
||||
.depth = switch (operands[3].literal32) {
|
||||
0 => .no,
|
||||
1 => .yes,
|
||||
2 => .maybe,
|
||||
else => {
|
||||
return self.fail(0, "'{}' is not a valid image depth (expected 0, 1 or 2)", .{operands[3].literal32});
|
||||
},
|
||||
},
|
||||
.arrayed = switch (operands[4].literal32) {
|
||||
0 => false,
|
||||
1 => true,
|
||||
else => {
|
||||
return self.fail(0, "'{}' is not a valid image arrayed-ness (expected 0 or 1)", .{operands[4].literal32});
|
||||
},
|
||||
},
|
||||
.multisampled = switch (operands[5].literal32) {
|
||||
0 => false,
|
||||
1 => true,
|
||||
else => {
|
||||
return self.fail(0, "'{}' is not a valid image multisampled-ness (expected 0 or 1)", .{operands[5].literal32});
|
||||
},
|
||||
},
|
||||
.sampled = switch (operands[6].literal32) {
|
||||
0 => .known_at_runtime,
|
||||
1 => .with_sampler,
|
||||
2 => .without_sampler,
|
||||
else => {
|
||||
return self.fail(0, "'{}' is not a valid image sampled-ness (expected 0, 1 or 2)", .{operands[6].literal32});
|
||||
},
|
||||
},
|
||||
.format = @intToEnum(spec.ImageFormat, operands[7].value),
|
||||
.access_qualifier = if (operands.len > 8)
|
||||
@intToEnum(spec.AccessQualifier, operands[8].value)
|
||||
else
|
||||
null,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypeSampler => SpvType.initTag(.sampler),
|
||||
.OpTypeSampledImage => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.SampledImage);
|
||||
payload.* = .{
|
||||
.image_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(u16, bits) } });
|
||||
},
|
||||
.OpTypeVector => try self.spv.resolve(.{ .vector_type = .{
|
||||
.component_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.component_count = operands[2].literal32,
|
||||
} }),
|
||||
.OpTypeArray => {
|
||||
// TODO: The length of an OpTypeArray is determined by a constant (which may be a spec constant),
|
||||
// and so some consideration must be taken when entering this in the type system.
|
||||
return self.todo("process OpTypeArray", .{});
|
||||
},
|
||||
.OpTypeRuntimeArray => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.RuntimeArray);
|
||||
payload.* = .{
|
||||
.element_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
// TODO: Fetch array stride from decorations.
|
||||
.array_stride = 0,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypeOpaque => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Opaque);
|
||||
const name_offset = operands[1].string;
|
||||
payload.* = .{
|
||||
.name = std.mem.sliceTo(self.inst.string_bytes.items[name_offset..], 0),
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypePointer => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Pointer);
|
||||
payload.* = .{
|
||||
.storage_class = @intToEnum(spec.StorageClass, operands[1].value),
|
||||
.child_type = try self.resolveTypeRef(operands[2].ref_id),
|
||||
// TODO: Fetch decorations
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypePointer => try self.spv.ptrType(
|
||||
try self.resolveTypeRef(operands[2].ref_id),
|
||||
@intToEnum(spec.StorageClass, operands[1].value),
|
||||
),
|
||||
.OpTypeFunction => blk: {
|
||||
const param_operands = operands[2..];
|
||||
const param_types = try self.spv.arena.alloc(SpvType.Ref, param_operands.len);
|
||||
const param_types = try self.spv.gpa.alloc(CacheRef, param_operands.len);
|
||||
defer self.spv.gpa.free(param_types);
|
||||
for (param_types, 0..) |*param, i| {
|
||||
param.* = try self.resolveTypeRef(param_operands[i].ref_id);
|
||||
}
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Function);
|
||||
payload.* = .{
|
||||
break :blk try self.spv.resolve(.{ .function_type = .{
|
||||
.return_type = try self.resolveTypeRef(operands[1].ref_id),
|
||||
.parameters = param_types,
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
} });
|
||||
},
|
||||
.OpTypeEvent => SpvType.initTag(.event),
|
||||
.OpTypeDeviceEvent => SpvType.initTag(.device_event),
|
||||
.OpTypeReserveId => SpvType.initTag(.reserve_id),
|
||||
.OpTypeQueue => SpvType.initTag(.queue),
|
||||
.OpTypePipe => blk: {
|
||||
const payload = try self.spv.arena.create(SpvType.Payload.Pipe);
|
||||
payload.* = .{
|
||||
.qualifier = @intToEnum(spec.AccessQualifier, operands[1].value),
|
||||
};
|
||||
break :blk SpvType.initPayload(&payload.base);
|
||||
},
|
||||
.OpTypePipeStorage => SpvType.initTag(.pipe_storage),
|
||||
.OpTypeNamedBarrier => SpvType.initTag(.named_barrier),
|
||||
else => return self.todo("process type instruction {s}", .{@tagName(self.inst.opcode)}),
|
||||
};
|
||||
|
||||
const ref = try self.spv.resolveType(ty);
|
||||
return AsmValue{ .ty = ref };
|
||||
}
|
||||
|
||||
@@ -528,7 +429,7 @@ fn resolveRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue {
|
||||
}
|
||||
|
||||
/// Resolve a value reference as type.
|
||||
fn resolveTypeRef(self: *Assembler, ref: AsmValue.Ref) !SpvType.Ref {
|
||||
fn resolveTypeRef(self: *Assembler, ref: AsmValue.Ref) !CacheRef {
|
||||
const value = try self.resolveRef(ref);
|
||||
switch (value) {
|
||||
.just_declared, .unresolved_forward_reference => unreachable,
|
||||
@@ -761,19 +662,20 @@ fn parseContextDependentNumber(self: *Assembler) !void {
|
||||
|
||||
const tok = self.currentToken();
|
||||
const result_type_ref = try self.resolveTypeRef(self.inst.operands.items[0].ref_id);
|
||||
const result_type = self.spv.type_cache.keys()[@enumToInt(result_type_ref)];
|
||||
if (result_type.isInt()) {
|
||||
try self.parseContextDependentInt(result_type.intSignedness(), result_type.intFloatBits());
|
||||
} else if (result_type.isFloat()) {
|
||||
const width = result_type.intFloatBits();
|
||||
switch (width) {
|
||||
16 => try self.parseContextDependentFloat(16),
|
||||
32 => try self.parseContextDependentFloat(32),
|
||||
64 => try self.parseContextDependentFloat(64),
|
||||
else => return self.fail(tok.start, "cannot parse {}-bit float literal", .{width}),
|
||||
}
|
||||
} else {
|
||||
return self.fail(tok.start, "cannot parse literal constant {s}", .{@tagName(result_type.tag())});
|
||||
const result_type = self.spv.cache.lookup(result_type_ref);
|
||||
switch (result_type) {
|
||||
.int_type => |int| {
|
||||
try self.parseContextDependentInt(int.signedness, int.bits);
|
||||
},
|
||||
.float_type => |float| {
|
||||
switch (float.bits) {
|
||||
16 => try self.parseContextDependentFloat(16),
|
||||
32 => try self.parseContextDependentFloat(32),
|
||||
64 => try self.parseContextDependentFloat(64),
|
||||
else => return self.fail(tok.start, "cannot parse {}-bit float literal", .{float.bits}),
|
||||
}
|
||||
},
|
||||
else => return self.fail(tok.start, "cannot parse literal constant", .{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,12 +20,13 @@ const IdResult = spec.IdResult;
|
||||
const IdResultType = spec.IdResultType;
|
||||
|
||||
const Section = @import("Section.zig");
|
||||
const Type = @import("type.zig").Type;
|
||||
pub const TypeConstantCache = @import("TypeConstantCache.zig");
|
||||
|
||||
const TypeCache = std.ArrayHashMapUnmanaged(Type, IdResultType, Type.ShallowHashContext32, true);
|
||||
const Cache = @import("TypeConstantCache.zig");
|
||||
pub const CacheKey = Cache.Key;
|
||||
pub const CacheRef = Cache.Ref;
|
||||
pub const CacheString = Cache.String;
|
||||
|
||||
/// This structure represents a function that is in-progress of being emitted.
|
||||
/// This structure represents a function that isc in-progress of being emitted.
|
||||
/// Commonly, the contents of this structure will be merged with the appropriate
|
||||
/// sections of the module and re-used. Note that the SPIR-V module system makes
|
||||
/// no attempt of compacting result-id's, so any Fn instance should ultimately
|
||||
@@ -130,7 +131,7 @@ sections: struct {
|
||||
/// From this section, OpLine and OpNoLine is allowed.
|
||||
/// According to the SPIR-V documentation, this section normally
|
||||
/// also holds type and constant instructions. These are managed
|
||||
/// via the tc_cache instead, which is the sole structure that
|
||||
/// via the cache instead, which is the sole structure that
|
||||
/// manages that section. These will be inserted between this and
|
||||
/// the previous section when emitting the final binary.
|
||||
/// TODO: Do we need this section? Globals are also managed with another mechanism.
|
||||
@@ -152,10 +153,9 @@ next_result_id: Word,
|
||||
/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
|
||||
source_file_names: std.StringHashMapUnmanaged(IdRef) = .{},
|
||||
|
||||
type_cache: TypeCache = .{},
|
||||
/// SPIR-V type- and constant cache. This structure is used to store information about these in a more
|
||||
/// efficient manner.
|
||||
tc_cache: TypeConstantCache = .{},
|
||||
cache: Cache = .{},
|
||||
|
||||
/// Set of Decls, referred to by Decl.Index.
|
||||
decls: std.ArrayListUnmanaged(Decl) = .{},
|
||||
@@ -196,7 +196,7 @@ pub fn deinit(self: *Module) void {
|
||||
self.sections.functions.deinit(self.gpa);
|
||||
|
||||
self.source_file_names.deinit(self.gpa);
|
||||
self.tc_cache.deinit(self);
|
||||
self.cache.deinit(self);
|
||||
|
||||
self.decls.deinit(self.gpa);
|
||||
self.decl_deps.deinit(self.gpa);
|
||||
@@ -223,20 +223,20 @@ pub fn idBound(self: Module) Word {
|
||||
return self.next_result_id;
|
||||
}
|
||||
|
||||
pub fn resolve(self: *Module, key: TypeConstantCache.Key) !TypeConstantCache.Ref {
|
||||
return self.tc_cache.resolve(self, key);
|
||||
pub fn resolve(self: *Module, key: CacheKey) !CacheRef {
|
||||
return self.cache.resolve(self, key);
|
||||
}
|
||||
|
||||
pub fn resultId(self: *Module, ref: TypeConstantCache.Ref) IdResult {
|
||||
return self.tc_cache.resultId(ref);
|
||||
pub fn resultId(self: *const Module, ref: CacheRef) IdResult {
|
||||
return self.cache.resultId(ref);
|
||||
}
|
||||
|
||||
pub fn resolveId(self: *Module, key: TypeConstantCache.Key) !IdResult {
|
||||
pub fn resolveId(self: *Module, key: CacheKey) !IdResult {
|
||||
return self.resultId(try self.resolve(key));
|
||||
}
|
||||
|
||||
pub fn resolveString(self: *Module, str: []const u8) !TypeConstantCache.String {
|
||||
return try self.tc_cache.addString(self, str);
|
||||
pub fn resolveString(self: *Module, str: []const u8) !CacheString {
|
||||
return try self.cache.addString(self, str);
|
||||
}
|
||||
|
||||
fn orderGlobalsInto(
|
||||
@@ -350,7 +350,7 @@ pub fn flush(self: *Module, file: std.fs.File) !void {
|
||||
var entry_points = try self.entryPoints();
|
||||
defer entry_points.deinit(self.gpa);
|
||||
|
||||
var types_constants = try self.tc_cache.materialize(self);
|
||||
var types_constants = try self.cache.materialize(self);
|
||||
defer types_constants.deinit(self.gpa);
|
||||
|
||||
// Note: needs to be kept in order according to section 2.3!
|
||||
@@ -364,6 +364,7 @@ pub fn flush(self: *Module, file: std.fs.File) !void {
|
||||
self.sections.debug_names.toWords(),
|
||||
self.sections.annotations.toWords(),
|
||||
types_constants.toWords(),
|
||||
self.sections.types_globals_constants.toWords(),
|
||||
self.sections.globals.toWords(),
|
||||
globals.toWords(),
|
||||
self.sections.functions.toWords(),
|
||||
@@ -416,364 +417,14 @@ pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef {
|
||||
return result.value_ptr.*;
|
||||
}
|
||||
|
||||
/// Fetch a result-id for a spir-v type. This function deduplicates the type as appropriate,
|
||||
/// and returns a cached version if that exists.
|
||||
/// Note: This function does not attempt to perform any validation on the type.
|
||||
/// The type is emitted in a shallow fashion; any child types should already
|
||||
/// be emitted at this point.
|
||||
pub fn resolveType(self: *Module, ty: Type) !Type.Ref {
|
||||
const result = try self.type_cache.getOrPut(self.gpa, ty);
|
||||
const index = @intToEnum(Type.Ref, result.index);
|
||||
|
||||
if (!result.found_existing) {
|
||||
const ref = try self.emitType(ty);
|
||||
self.type_cache.values()[result.index] = ref;
|
||||
}
|
||||
|
||||
return index;
|
||||
pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !CacheRef {
|
||||
return try self.resolve(.{ .int_type = .{
|
||||
.signedness = signedness,
|
||||
.bits = bits,
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn resolveTypeId(self: *Module, ty: Type) !IdResultType {
|
||||
const ty_ref = try self.resolveType(ty);
|
||||
return self.typeId(ty_ref);
|
||||
}
|
||||
|
||||
pub fn typeRefType(self: Module, ty_ref: Type.Ref) Type {
|
||||
return self.type_cache.keys()[@enumToInt(ty_ref)];
|
||||
}
|
||||
|
||||
/// Get the result-id of a particular type, by reference. Asserts type_ref is valid.
|
||||
pub fn typeId(self: Module, ty_ref: Type.Ref) IdResultType {
|
||||
return self.type_cache.values()[@enumToInt(ty_ref)];
|
||||
}
|
||||
|
||||
/// Unconditionally emit a spir-v type into the appropriate section.
|
||||
/// Note: If this function is called with a type that is already generated, it may yield an invalid module
|
||||
/// as non-pointer non-aggregrate types must me unique!
|
||||
/// Note: This function does not attempt to perform any validation on the type.
|
||||
/// The type is emitted in a shallow fashion; any child types should already
|
||||
/// be emitted at this point.
|
||||
pub fn emitType(self: *Module, ty: Type) error{OutOfMemory}!IdResultType {
|
||||
const result_id = self.allocId();
|
||||
const ref_id = result_id;
|
||||
const types = &self.sections.types_globals_constants;
|
||||
const debug_names = &self.sections.debug_names;
|
||||
const result_id_operand = .{ .id_result = result_id };
|
||||
|
||||
switch (ty.tag()) {
|
||||
.void => {
|
||||
try types.emit(self.gpa, .OpTypeVoid, result_id_operand);
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = result_id,
|
||||
.name = "void",
|
||||
});
|
||||
},
|
||||
.bool => {
|
||||
try types.emit(self.gpa, .OpTypeBool, result_id_operand);
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = result_id,
|
||||
.name = "bool",
|
||||
});
|
||||
},
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.int,
|
||||
=> {
|
||||
// TODO: Kernels do not support OpTypeInt that is signed. We can probably
|
||||
// can get rid of the signedness all together, in Shaders also.
|
||||
const bits = ty.intFloatBits();
|
||||
const signedness: spec.LiteralInteger = switch (ty.intSignedness()) {
|
||||
.unsigned => 0,
|
||||
.signed => 1,
|
||||
};
|
||||
|
||||
try types.emit(self.gpa, .OpTypeInt, .{
|
||||
.id_result = result_id,
|
||||
.width = bits,
|
||||
.signedness = signedness,
|
||||
});
|
||||
|
||||
const ui: []const u8 = switch (signedness) {
|
||||
0 => "u",
|
||||
1 => "i",
|
||||
else => unreachable,
|
||||
};
|
||||
const name = try std.fmt.allocPrint(self.gpa, "{s}{}", .{ ui, bits });
|
||||
defer self.gpa.free(name);
|
||||
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = result_id,
|
||||
.name = name,
|
||||
});
|
||||
},
|
||||
.f16, .f32, .f64 => {
|
||||
const bits = ty.intFloatBits();
|
||||
try types.emit(self.gpa, .OpTypeFloat, .{
|
||||
.id_result = result_id,
|
||||
.width = bits,
|
||||
});
|
||||
|
||||
const name = try std.fmt.allocPrint(self.gpa, "f{}", .{bits});
|
||||
defer self.gpa.free(name);
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = result_id,
|
||||
.name = name,
|
||||
});
|
||||
},
|
||||
.vector => try types.emit(self.gpa, .OpTypeVector, .{
|
||||
.id_result = result_id,
|
||||
.component_type = self.typeId(ty.childType()),
|
||||
.component_count = ty.payload(.vector).component_count,
|
||||
}),
|
||||
.matrix => try types.emit(self.gpa, .OpTypeMatrix, .{
|
||||
.id_result = result_id,
|
||||
.column_type = self.typeId(ty.childType()),
|
||||
.column_count = ty.payload(.matrix).column_count,
|
||||
}),
|
||||
.image => {
|
||||
const info = ty.payload(.image);
|
||||
try types.emit(self.gpa, .OpTypeImage, .{
|
||||
.id_result = result_id,
|
||||
.sampled_type = self.typeId(ty.childType()),
|
||||
.dim = info.dim,
|
||||
.depth = @enumToInt(info.depth),
|
||||
.arrayed = @boolToInt(info.arrayed),
|
||||
.ms = @boolToInt(info.multisampled),
|
||||
.sampled = @enumToInt(info.sampled),
|
||||
.image_format = info.format,
|
||||
.access_qualifier = info.access_qualifier,
|
||||
});
|
||||
},
|
||||
.sampler => try types.emit(self.gpa, .OpTypeSampler, result_id_operand),
|
||||
.sampled_image => try types.emit(self.gpa, .OpTypeSampledImage, .{
|
||||
.id_result = result_id,
|
||||
.image_type = self.typeId(ty.childType()),
|
||||
}),
|
||||
.array => {
|
||||
const info = ty.payload(.array);
|
||||
assert(info.length != 0);
|
||||
|
||||
const size_type = Type.initTag(.u32);
|
||||
const size_type_id = try self.resolveTypeId(size_type);
|
||||
const length_id = self.allocId();
|
||||
try self.emitConstant(size_type_id, length_id, .{ .uint32 = info.length });
|
||||
|
||||
try types.emit(self.gpa, .OpTypeArray, .{
|
||||
.id_result = result_id,
|
||||
.element_type = self.typeId(ty.childType()),
|
||||
.length = length_id,
|
||||
});
|
||||
if (info.array_stride != 0) {
|
||||
try self.decorate(ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
|
||||
}
|
||||
},
|
||||
.runtime_array => {
|
||||
const info = ty.payload(.runtime_array);
|
||||
try types.emit(self.gpa, .OpTypeRuntimeArray, .{
|
||||
.id_result = result_id,
|
||||
.element_type = self.typeId(ty.childType()),
|
||||
});
|
||||
if (info.array_stride != 0) {
|
||||
try self.decorate(ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
|
||||
}
|
||||
},
|
||||
.@"struct" => {
|
||||
const info = ty.payload(.@"struct");
|
||||
try types.emitRaw(self.gpa, .OpTypeStruct, 1 + info.members.len);
|
||||
types.writeOperand(IdResult, result_id);
|
||||
for (info.members) |member| {
|
||||
types.writeOperand(IdRef, self.typeId(member.ty));
|
||||
}
|
||||
try self.decorateStruct(ref_id, info);
|
||||
},
|
||||
.@"opaque" => try types.emit(self.gpa, .OpTypeOpaque, .{
|
||||
.id_result = result_id,
|
||||
.literal_string = ty.payload(.@"opaque").name,
|
||||
}),
|
||||
.pointer => {
|
||||
const info = ty.payload(.pointer);
|
||||
try types.emit(self.gpa, .OpTypePointer, .{
|
||||
.id_result = result_id,
|
||||
.storage_class = info.storage_class,
|
||||
.type = self.typeId(ty.childType()),
|
||||
});
|
||||
if (info.array_stride != 0) {
|
||||
try self.decorate(ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
|
||||
}
|
||||
if (info.alignment != 0) {
|
||||
try self.decorate(ref_id, .{ .Alignment = .{ .alignment = info.alignment } });
|
||||
}
|
||||
if (info.max_byte_offset) |max_byte_offset| {
|
||||
try self.decorate(ref_id, .{ .MaxByteOffset = .{ .max_byte_offset = max_byte_offset } });
|
||||
}
|
||||
},
|
||||
.function => {
|
||||
const info = ty.payload(.function);
|
||||
try types.emitRaw(self.gpa, .OpTypeFunction, 2 + info.parameters.len);
|
||||
types.writeOperand(IdResult, result_id);
|
||||
types.writeOperand(IdRef, self.typeId(info.return_type));
|
||||
for (info.parameters) |parameter_type| {
|
||||
types.writeOperand(IdRef, self.typeId(parameter_type));
|
||||
}
|
||||
},
|
||||
.event => try types.emit(self.gpa, .OpTypeEvent, result_id_operand),
|
||||
.device_event => try types.emit(self.gpa, .OpTypeDeviceEvent, result_id_operand),
|
||||
.reserve_id => try types.emit(self.gpa, .OpTypeReserveId, result_id_operand),
|
||||
.queue => try types.emit(self.gpa, .OpTypeQueue, result_id_operand),
|
||||
.pipe => try types.emit(self.gpa, .OpTypePipe, .{
|
||||
.id_result = result_id,
|
||||
.qualifier = ty.payload(.pipe).qualifier,
|
||||
}),
|
||||
.pipe_storage => try types.emit(self.gpa, .OpTypePipeStorage, result_id_operand),
|
||||
.named_barrier => try types.emit(self.gpa, .OpTypeNamedBarrier, result_id_operand),
|
||||
}
|
||||
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct) !void {
|
||||
const debug_names = &self.sections.debug_names;
|
||||
|
||||
if (info.name.len != 0) {
|
||||
try debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = target,
|
||||
.name = info.name,
|
||||
});
|
||||
}
|
||||
|
||||
// Decorations for the struct type itself.
|
||||
if (info.decorations.block)
|
||||
try self.decorate(target, .Block);
|
||||
if (info.decorations.buffer_block)
|
||||
try self.decorate(target, .BufferBlock);
|
||||
if (info.decorations.glsl_shared)
|
||||
try self.decorate(target, .GLSLShared);
|
||||
if (info.decorations.glsl_packed)
|
||||
try self.decorate(target, .GLSLPacked);
|
||||
if (info.decorations.c_packed)
|
||||
try self.decorate(target, .CPacked);
|
||||
|
||||
// Decorations for the struct members.
|
||||
const extra = info.member_decoration_extra;
|
||||
var extra_i: u32 = 0;
|
||||
for (info.members, 0..) |member, i| {
|
||||
const d = member.decorations;
|
||||
const index = @intCast(Word, i);
|
||||
|
||||
if (member.name.len != 0) {
|
||||
try debug_names.emit(self.gpa, .OpMemberName, .{
|
||||
.type = target,
|
||||
.member = index,
|
||||
.name = member.name,
|
||||
});
|
||||
}
|
||||
|
||||
switch (member.offset) {
|
||||
.none => {},
|
||||
else => try self.decorateMember(
|
||||
target,
|
||||
index,
|
||||
.{ .Offset = .{ .byte_offset = @enumToInt(member.offset) } },
|
||||
),
|
||||
}
|
||||
|
||||
switch (d.matrix_layout) {
|
||||
.row_major => try self.decorateMember(target, index, .RowMajor),
|
||||
.col_major => try self.decorateMember(target, index, .ColMajor),
|
||||
.none => {},
|
||||
}
|
||||
if (d.matrix_layout != .none) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.MatrixStride = .{ .matrix_stride = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
|
||||
if (d.no_perspective)
|
||||
try self.decorateMember(target, index, .NoPerspective);
|
||||
if (d.flat)
|
||||
try self.decorateMember(target, index, .Flat);
|
||||
if (d.patch)
|
||||
try self.decorateMember(target, index, .Patch);
|
||||
if (d.centroid)
|
||||
try self.decorateMember(target, index, .Centroid);
|
||||
if (d.sample)
|
||||
try self.decorateMember(target, index, .Sample);
|
||||
if (d.invariant)
|
||||
try self.decorateMember(target, index, .Invariant);
|
||||
if (d.@"volatile")
|
||||
try self.decorateMember(target, index, .Volatile);
|
||||
if (d.coherent)
|
||||
try self.decorateMember(target, index, .Coherent);
|
||||
if (d.non_writable)
|
||||
try self.decorateMember(target, index, .NonWritable);
|
||||
if (d.non_readable)
|
||||
try self.decorateMember(target, index, .NonReadable);
|
||||
|
||||
if (d.builtin) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.BuiltIn = .{ .built_in = @intToEnum(spec.BuiltIn, extra[extra_i]) },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.stream) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.Stream = .{ .stream_number = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.location) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.Location = .{ .location = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.component) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.Component = .{ .component = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.xfb_buffer) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.XfbBuffer = .{ .xfb_buffer_number = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.xfb_stride) {
|
||||
try self.decorateMember(target, index, .{
|
||||
.XfbStride = .{ .xfb_stride = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.user_semantic) {
|
||||
const len = extra[extra_i];
|
||||
extra_i += 1;
|
||||
const semantic = @ptrCast([*]const u8, &extra[extra_i])[0..len];
|
||||
try self.decorateMember(target, index, .{
|
||||
.UserSemantic = .{ .semantic = semantic },
|
||||
});
|
||||
extra_i += std.math.divCeil(u32, extra_i, @sizeOf(u32)) catch unreachable;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn simpleStructType(self: *Module, members: []const Type.Payload.Struct.Member) !Type.Ref {
|
||||
const payload = try self.arena.create(Type.Payload.Struct);
|
||||
payload.* = .{
|
||||
.members = try self.arena.dupe(Type.Payload.Struct.Member, members),
|
||||
.decorations = .{},
|
||||
};
|
||||
return try self.resolveType(Type.initPayload(&payload.base));
|
||||
}
|
||||
|
||||
pub fn arrayType2(self: *Module, len: u32, elem_ty_ref: TypeConstantCache.Ref) !TypeConstantCache.Ref {
|
||||
pub fn arrayType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef {
|
||||
const len_ty_ref = try self.resolve(.{ .int_type = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = 32,
|
||||
@@ -788,41 +439,45 @@ pub fn arrayType2(self: *Module, len: u32, elem_ty_ref: TypeConstantCache.Ref) !
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn arrayType(self: *Module, len: u32, ty: Type.Ref) !Type.Ref {
|
||||
const payload = try self.arena.create(Type.Payload.Array);
|
||||
payload.* = .{
|
||||
.element_type = ty,
|
||||
.length = len,
|
||||
};
|
||||
return try self.resolveType(Type.initPayload(&payload.base));
|
||||
}
|
||||
|
||||
pub fn ptrType(
|
||||
self: *Module,
|
||||
child: Type.Ref,
|
||||
child: CacheRef,
|
||||
storage_class: spec.StorageClass,
|
||||
alignment: u32,
|
||||
) !Type.Ref {
|
||||
const ptr_payload = try self.arena.create(Type.Payload.Pointer);
|
||||
ptr_payload.* = .{
|
||||
) !CacheRef {
|
||||
return try self.resolve(.{ .ptr_type = .{
|
||||
.storage_class = storage_class,
|
||||
.child_type = child,
|
||||
.alignment = alignment,
|
||||
};
|
||||
return try self.resolveType(Type.initPayload(&ptr_payload.base));
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn changePtrStorageClass(self: *Module, ptr_ty_ref: Type.Ref, new_storage_class: spec.StorageClass) !Type.Ref {
|
||||
const payload = try self.arena.create(Type.Payload.Pointer);
|
||||
payload.* = self.typeRefType(ptr_ty_ref).payload(.pointer).*;
|
||||
payload.storage_class = new_storage_class;
|
||||
return try self.resolveType(Type.initPayload(&payload.base));
|
||||
pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef {
|
||||
const ty = self.cache.lookup(ty_ref).int_type;
|
||||
const Value = Cache.Key.Int.Value;
|
||||
return try self.resolveId(.{ .int = .{
|
||||
.ty = ty_ref,
|
||||
.value = switch (ty.signedness) {
|
||||
.signed => Value{ .int64 = @intCast(i64, value) },
|
||||
.unsigned => Value{ .uint64 = @intCast(u64, value) },
|
||||
},
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn constComposite(self: *Module, ty_ref: Type.Ref, members: []const IdRef) !IdRef {
|
||||
pub fn constUndef(self: *Module, ty_ref: CacheRef) !IdRef {
|
||||
return try self.resolveId(.{ .undef = .{ .ty = ty_ref } });
|
||||
}
|
||||
|
||||
pub fn constNull(self: *Module, ty_ref: CacheRef) !IdRef {
|
||||
return try self.resolveId(.{ .null = .{ .ty = ty_ref } });
|
||||
}
|
||||
|
||||
pub fn constBool(self: *Module, ty_ref: CacheRef, value: bool) !IdRef {
|
||||
return try self.resolveId(.{ .bool = .{ .ty = ty_ref, .value = value } });
|
||||
}
|
||||
|
||||
pub fn constComposite(self: *Module, ty_ref: CacheRef, members: []const IdRef) !IdRef {
|
||||
const result_id = self.allocId();
|
||||
try self.sections.types_globals_constants.emit(self.gpa, .OpSpecConstantComposite, .{
|
||||
.id_result_type = self.typeId(ty_ref),
|
||||
.id_result_type = self.resultId(ty_ref),
|
||||
.id_result = result_id,
|
||||
.constituents = members,
|
||||
});
|
||||
|
||||
@@ -111,6 +111,18 @@ const Tag = enum {
|
||||
/// Value of type f64
|
||||
/// data is payload to Float16
|
||||
float64,
|
||||
/// Undefined value
|
||||
/// data is type
|
||||
undef,
|
||||
/// Null value
|
||||
/// data is type
|
||||
null,
|
||||
/// Bool value that is true
|
||||
/// data is (bool) type
|
||||
bool_true,
|
||||
/// Bool value that is false
|
||||
/// data is (bool) type
|
||||
bool_false,
|
||||
|
||||
const SimpleType = enum { void, bool };
|
||||
|
||||
@@ -227,6 +239,9 @@ pub const Key = union(enum) {
|
||||
// -- values
|
||||
int: Int,
|
||||
float: Float,
|
||||
undef: Undef,
|
||||
null: Null,
|
||||
bool: Bool,
|
||||
|
||||
pub const IntType = std.builtin.Type.Int;
|
||||
pub const FloatType = std.builtin.Type.Float;
|
||||
@@ -323,6 +338,19 @@ pub const Key = union(enum) {
|
||||
};
|
||||
};
|
||||
|
||||
pub const Undef = struct {
|
||||
ty: Ref,
|
||||
};
|
||||
|
||||
pub const Null = struct {
|
||||
ty: Ref,
|
||||
};
|
||||
|
||||
pub const Bool = struct {
|
||||
ty: Ref,
|
||||
value: bool,
|
||||
};
|
||||
|
||||
fn hash(self: Key) u32 {
|
||||
var hasher = std.hash.Wyhash.init(0);
|
||||
switch (self) {
|
||||
@@ -539,6 +567,32 @@ fn emit(
|
||||
.value = lit,
|
||||
});
|
||||
},
|
||||
.undef => |undef| {
|
||||
try section.emit(spv.gpa, .OpUndef, .{
|
||||
.id_result_type = self.resultId(undef.ty),
|
||||
.id_result = result_id,
|
||||
});
|
||||
},
|
||||
.null => |null_info| {
|
||||
try section.emit(spv.gpa, .OpConstantNull, .{
|
||||
.id_result_type = self.resultId(null_info.ty),
|
||||
.id_result = result_id,
|
||||
});
|
||||
},
|
||||
.bool => |bool_info| switch (bool_info.value) {
|
||||
true => {
|
||||
try section.emit(spv.gpa, .OpConstantTrue, .{
|
||||
.id_result_type = self.resultId(bool_info.ty),
|
||||
.id_result = result_id,
|
||||
});
|
||||
},
|
||||
false => {
|
||||
try section.emit(spv.gpa, .OpConstantFalse, .{
|
||||
.id_result_type = self.resultId(bool_info.ty),
|
||||
.id_result = result_id,
|
||||
});
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -713,6 +767,24 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
.undef => |undef| .{
|
||||
.tag = .undef,
|
||||
.result_id = result_id,
|
||||
.data = @enumToInt(undef.ty),
|
||||
},
|
||||
.null => |null_info| .{
|
||||
.tag = .null,
|
||||
.result_id = result_id,
|
||||
.data = @enumToInt(null_info.ty),
|
||||
},
|
||||
.bool => |bool_info| .{
|
||||
.tag = switch (bool_info.value) {
|
||||
true => Tag.bool_true,
|
||||
false => Tag.bool_false,
|
||||
},
|
||||
.result_id = result_id,
|
||||
.data = @enumToInt(bool_info.ty),
|
||||
},
|
||||
};
|
||||
try self.items.append(spv.gpa, item);
|
||||
|
||||
@@ -850,6 +922,20 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
|
||||
.value = .{ .uint64 = payload.decode() },
|
||||
} };
|
||||
},
|
||||
.undef => .{ .undef = .{
|
||||
.ty = @intToEnum(Ref, data),
|
||||
} },
|
||||
.null => .{ .null = .{
|
||||
.ty = @intToEnum(Ref, data),
|
||||
} },
|
||||
.bool_true => .{ .bool = .{
|
||||
.ty = @intToEnum(Ref, data),
|
||||
.value = true,
|
||||
} },
|
||||
.bool_false => .{ .bool = .{
|
||||
.ty = @intToEnum(Ref, data),
|
||||
.value = false,
|
||||
} },
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,567 +0,0 @@
|
||||
//! This module models a SPIR-V Type. These are distinct from Zig types, with some types
|
||||
//! which are not representable by Zig directly.
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Signedness = std.builtin.Signedness;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const spec = @import("spec.zig");
|
||||
|
||||
pub const Type = extern union {
|
||||
tag_if_small_enough: Tag,
|
||||
ptr_otherwise: *Payload,
|
||||
|
||||
/// A reference to another SPIR-V type.
|
||||
pub const Ref = enum(u32) { _ };
|
||||
|
||||
pub fn initTag(comptime small_tag: Tag) Type {
|
||||
comptime assert(@enumToInt(small_tag) < Tag.no_payload_count);
|
||||
return .{ .tag_if_small_enough = small_tag };
|
||||
}
|
||||
|
||||
pub fn initPayload(pl: *Payload) Type {
|
||||
assert(@enumToInt(pl.tag) >= Tag.no_payload_count);
|
||||
return .{ .ptr_otherwise = pl };
|
||||
}
|
||||
|
||||
pub fn int(arena: Allocator, signedness: Signedness, bits: u16) !Type {
|
||||
const bits_and_signedness = switch (signedness) {
|
||||
.signed => -@as(i32, bits),
|
||||
.unsigned => @as(i32, bits),
|
||||
};
|
||||
|
||||
return switch (bits_and_signedness) {
|
||||
8 => initTag(.u8),
|
||||
16 => initTag(.u16),
|
||||
32 => initTag(.u32),
|
||||
64 => initTag(.u64),
|
||||
-8 => initTag(.i8),
|
||||
-16 => initTag(.i16),
|
||||
-32 => initTag(.i32),
|
||||
-64 => initTag(.i64),
|
||||
else => {
|
||||
const int_payload = try arena.create(Payload.Int);
|
||||
int_payload.* = .{
|
||||
.width = bits,
|
||||
.signedness = signedness,
|
||||
};
|
||||
return initPayload(&int_payload.base);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn float(bits: u16) Type {
|
||||
return switch (bits) {
|
||||
16 => initTag(.f16),
|
||||
32 => initTag(.f32),
|
||||
64 => initTag(.f64),
|
||||
else => unreachable, // Enable more types if required.
|
||||
};
|
||||
}
|
||||
|
||||
pub fn tag(self: Type) Tag {
|
||||
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
|
||||
return self.tag_if_small_enough;
|
||||
} else {
|
||||
return self.ptr_otherwise.tag;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() {
|
||||
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count)
|
||||
return null;
|
||||
|
||||
if (self.ptr_otherwise.tag == t)
|
||||
return self.payload(t);
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/// Access the payload of a type directly.
|
||||
pub fn payload(self: Type, comptime t: Tag) *t.Type() {
|
||||
assert(self.tag() == t);
|
||||
return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise);
|
||||
}
|
||||
|
||||
/// Perform a shallow equality test, comparing two types while assuming that any child types
|
||||
/// are equal only if their references are equal.
|
||||
pub fn eqlShallow(a: Type, b: Type) bool {
|
||||
if (a.tag_if_small_enough == b.tag_if_small_enough)
|
||||
return true;
|
||||
|
||||
const tag_a = a.tag();
|
||||
const tag_b = b.tag();
|
||||
if (tag_a != tag_b)
|
||||
return false;
|
||||
|
||||
inline for (@typeInfo(Tag).Enum.fields) |field| {
|
||||
const t = @field(Tag, field.name);
|
||||
if (t == tag_a) {
|
||||
return eqlPayloads(t, a, b);
|
||||
}
|
||||
}
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
/// Compare the payload of two compatible tags, given that we already know the tag of both types.
|
||||
fn eqlPayloads(comptime t: Tag, a: Type, b: Type) bool {
|
||||
switch (t) {
|
||||
.void,
|
||||
.bool,
|
||||
.sampler,
|
||||
.event,
|
||||
.device_event,
|
||||
.reserve_id,
|
||||
.queue,
|
||||
.pipe_storage,
|
||||
.named_barrier,
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.f16,
|
||||
.f32,
|
||||
.f64,
|
||||
=> return true,
|
||||
.int,
|
||||
.vector,
|
||||
.matrix,
|
||||
.sampled_image,
|
||||
.array,
|
||||
.runtime_array,
|
||||
.@"opaque",
|
||||
.pointer,
|
||||
.pipe,
|
||||
.image,
|
||||
=> return std.meta.eql(a.payload(t).*, b.payload(t).*),
|
||||
.@"struct" => {
|
||||
const struct_a = a.payload(.@"struct");
|
||||
const struct_b = b.payload(.@"struct");
|
||||
if (struct_a.members.len != struct_b.members.len)
|
||||
return false;
|
||||
for (struct_a.members, 0..) |mem_a, i| {
|
||||
if (!std.meta.eql(mem_a, struct_b.members[i]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
.function => {
|
||||
const fn_a = a.payload(.function);
|
||||
const fn_b = b.payload(.function);
|
||||
if (fn_a.return_type != fn_b.return_type)
|
||||
return false;
|
||||
return std.mem.eql(Ref, fn_a.parameters, fn_b.parameters);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform a shallow hash, which hashes the reference value of child types instead of recursing.
|
||||
pub fn hashShallow(self: Type) u64 {
|
||||
var hasher = std.hash.Wyhash.init(0);
|
||||
const t = self.tag();
|
||||
std.hash.autoHash(&hasher, t);
|
||||
|
||||
inline for (@typeInfo(Tag).Enum.fields) |field| {
|
||||
if (@field(Tag, field.name) == t) {
|
||||
switch (@field(Tag, field.name)) {
|
||||
.void,
|
||||
.bool,
|
||||
.sampler,
|
||||
.event,
|
||||
.device_event,
|
||||
.reserve_id,
|
||||
.queue,
|
||||
.pipe_storage,
|
||||
.named_barrier,
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.f16,
|
||||
.f32,
|
||||
.f64,
|
||||
=> {},
|
||||
else => self.hashPayload(@field(Tag, field.name), &hasher),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
/// Perform a shallow hash, given that we know the tag of the field ahead of time.
|
||||
fn hashPayload(self: Type, comptime t: Tag, hasher: *std.hash.Wyhash) void {
|
||||
const fields = @typeInfo(t.Type()).Struct.fields;
|
||||
const pl = self.payload(t);
|
||||
comptime assert(std.mem.eql(u8, fields[0].name, "base"));
|
||||
inline for (fields[1..]) |field| { // Skip the 'base' field.
|
||||
std.hash.autoHashStrat(hasher, @field(pl, field.name), .DeepRecursive);
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash context that hashes and compares types in a shallow fashion, useful for type caches.
|
||||
pub const ShallowHashContext32 = struct {
|
||||
pub fn hash(self: @This(), t: Type) u32 {
|
||||
_ = self;
|
||||
return @truncate(u32, t.hashShallow());
|
||||
}
|
||||
pub fn eql(self: @This(), a: Type, b: Type, b_index: usize) bool {
|
||||
_ = self;
|
||||
_ = b_index;
|
||||
return a.eqlShallow(b);
|
||||
}
|
||||
};
|
||||
|
||||
/// Return the reference to any child type. Asserts the type is one of:
|
||||
/// - Vectors
|
||||
/// - Matrices
|
||||
/// - Images
|
||||
/// - SampledImages,
|
||||
/// - Arrays
|
||||
/// - RuntimeArrays
|
||||
/// - Pointers
|
||||
pub fn childType(self: Type) Ref {
|
||||
return switch (self.tag()) {
|
||||
.vector => self.payload(.vector).component_type,
|
||||
.matrix => self.payload(.matrix).column_type,
|
||||
.image => self.payload(.image).sampled_type,
|
||||
.sampled_image => self.payload(.sampled_image).image_type,
|
||||
.array => self.payload(.array).element_type,
|
||||
.runtime_array => self.payload(.runtime_array).element_type,
|
||||
.pointer => self.payload(.pointer).child_type,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isInt(self: Type) bool {
|
||||
return switch (self.tag()) {
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.int,
|
||||
=> true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isFloat(self: Type) bool {
|
||||
return switch (self.tag()) {
|
||||
.f16, .f32, .f64 => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the number of bits that make up an int or float type.
|
||||
/// Asserts type is either int or float.
|
||||
pub fn intFloatBits(self: Type) u16 {
|
||||
return switch (self.tag()) {
|
||||
.u8, .i8 => 8,
|
||||
.u16, .i16, .f16 => 16,
|
||||
.u32, .i32, .f32 => 32,
|
||||
.u64, .i64, .f64 => 64,
|
||||
.int => self.payload(.int).width,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the signedness of an integer type.
|
||||
/// Asserts that the type is an int.
|
||||
pub fn intSignedness(self: Type) Signedness {
|
||||
return switch (self.tag()) {
|
||||
.u8, .u16, .u32, .u64 => .unsigned,
|
||||
.i8, .i16, .i32, .i64 => .signed,
|
||||
.int => self.payload(.int).signedness,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub const Tag = enum(usize) {
|
||||
void,
|
||||
bool,
|
||||
sampler,
|
||||
event,
|
||||
device_event,
|
||||
reserve_id,
|
||||
queue,
|
||||
pipe_storage,
|
||||
named_barrier,
|
||||
u8,
|
||||
u16,
|
||||
u32,
|
||||
u64,
|
||||
i8,
|
||||
i16,
|
||||
i32,
|
||||
i64,
|
||||
f16,
|
||||
f32,
|
||||
f64,
|
||||
|
||||
// After this, the tag requires a payload.
|
||||
int,
|
||||
vector,
|
||||
matrix,
|
||||
image,
|
||||
sampled_image,
|
||||
array,
|
||||
runtime_array,
|
||||
@"struct",
|
||||
@"opaque",
|
||||
pointer,
|
||||
function,
|
||||
pipe,
|
||||
|
||||
pub const last_no_payload_tag = Tag.f64;
|
||||
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
|
||||
|
||||
pub fn Type(comptime t: Tag) type {
|
||||
return switch (t) {
|
||||
.void,
|
||||
.bool,
|
||||
.sampler,
|
||||
.event,
|
||||
.device_event,
|
||||
.reserve_id,
|
||||
.queue,
|
||||
.pipe_storage,
|
||||
.named_barrier,
|
||||
.u8,
|
||||
.u16,
|
||||
.u32,
|
||||
.u64,
|
||||
.i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
.f16,
|
||||
.f32,
|
||||
.f64,
|
||||
=> @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
|
||||
.int => Payload.Int,
|
||||
.vector => Payload.Vector,
|
||||
.matrix => Payload.Matrix,
|
||||
.image => Payload.Image,
|
||||
.sampled_image => Payload.SampledImage,
|
||||
.array => Payload.Array,
|
||||
.runtime_array => Payload.RuntimeArray,
|
||||
.@"struct" => Payload.Struct,
|
||||
.@"opaque" => Payload.Opaque,
|
||||
.pointer => Payload.Pointer,
|
||||
.function => Payload.Function,
|
||||
.pipe => Payload.Pipe,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Payload = struct {
|
||||
tag: Tag,
|
||||
|
||||
pub const Int = struct {
|
||||
base: Payload = .{ .tag = .int },
|
||||
width: u16,
|
||||
signedness: Signedness,
|
||||
};
|
||||
|
||||
pub const Vector = struct {
|
||||
base: Payload = .{ .tag = .vector },
|
||||
component_type: Ref,
|
||||
component_count: u32,
|
||||
};
|
||||
|
||||
pub const Matrix = struct {
|
||||
base: Payload = .{ .tag = .matrix },
|
||||
column_type: Ref,
|
||||
column_count: u32,
|
||||
};
|
||||
|
||||
pub const Image = struct {
|
||||
base: Payload = .{ .tag = .image },
|
||||
sampled_type: Ref,
|
||||
dim: spec.Dim,
|
||||
depth: enum(u2) {
|
||||
no = 0,
|
||||
yes = 1,
|
||||
maybe = 2,
|
||||
},
|
||||
arrayed: bool,
|
||||
multisampled: bool,
|
||||
sampled: enum(u2) {
|
||||
known_at_runtime = 0,
|
||||
with_sampler = 1,
|
||||
without_sampler = 2,
|
||||
},
|
||||
format: spec.ImageFormat,
|
||||
access_qualifier: ?spec.AccessQualifier,
|
||||
};
|
||||
|
||||
pub const SampledImage = struct {
|
||||
base: Payload = .{ .tag = .sampled_image },
|
||||
image_type: Ref,
|
||||
};
|
||||
|
||||
pub const Array = struct {
|
||||
base: Payload = .{ .tag = .array },
|
||||
element_type: Ref,
|
||||
/// Note: Must be emitted as constant, not as literal!
|
||||
length: u32,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// If zero, no stride is present.
|
||||
array_stride: u32 = 0,
|
||||
};
|
||||
|
||||
pub const RuntimeArray = struct {
|
||||
base: Payload = .{ .tag = .runtime_array },
|
||||
element_type: Ref,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// If zero, no stride is present.
|
||||
array_stride: u32 = 0,
|
||||
};
|
||||
|
||||
pub const Struct = struct {
|
||||
base: Payload = .{ .tag = .@"struct" },
|
||||
members: []Member,
|
||||
name: []const u8 = "",
|
||||
decorations: StructDecorations = .{},
|
||||
|
||||
/// Extra information for decorations, packed for efficiency. Fields are stored sequentially by
|
||||
/// order of the `members` slice and `MemberDecorations` struct.
|
||||
member_decoration_extra: []u32 = &.{},
|
||||
|
||||
pub const Member = struct {
|
||||
ty: Ref,
|
||||
name: []const u8 = "",
|
||||
offset: MemberOffset = .none,
|
||||
decorations: MemberDecorations = .{},
|
||||
};
|
||||
|
||||
pub const MemberOffset = enum(u32) { none = 0xFFFF_FFFF, _ };
|
||||
|
||||
pub const StructDecorations = packed struct {
|
||||
/// Type has the 'Block' decoration.
|
||||
block: bool = false,
|
||||
/// Type has the 'BufferBlock' decoration.
|
||||
buffer_block: bool = false,
|
||||
/// Type has the 'GLSLShared' decoration.
|
||||
glsl_shared: bool = false,
|
||||
/// Type has the 'GLSLPacked' decoration.
|
||||
glsl_packed: bool = false,
|
||||
/// Type has the 'CPacked' decoration.
|
||||
c_packed: bool = false,
|
||||
};
|
||||
|
||||
pub const MemberDecorations = packed struct {
|
||||
/// Matrix layout for (arrays of) matrices. If this field is not .none,
|
||||
/// then there is also an extra field containing the matrix stride corresponding
|
||||
/// to the 'MatrixStride' decoration.
|
||||
matrix_layout: enum(u2) {
|
||||
/// Member has the 'RowMajor' decoration. The member type
|
||||
/// must be a matrix or an array of matrices.
|
||||
row_major,
|
||||
/// Member has the 'ColMajor' decoration. The member type
|
||||
/// must be a matrix or an array of matrices.
|
||||
col_major,
|
||||
/// Member is not a matrix or array of matrices.
|
||||
none,
|
||||
} = .none,
|
||||
|
||||
// Regular decorations, these do not imply extra fields.
|
||||
|
||||
/// Member has the 'NoPerspective' decoration.
|
||||
no_perspective: bool = false,
|
||||
/// Member has the 'Flat' decoration.
|
||||
flat: bool = false,
|
||||
/// Member has the 'Patch' decoration.
|
||||
patch: bool = false,
|
||||
/// Member has the 'Centroid' decoration.
|
||||
centroid: bool = false,
|
||||
/// Member has the 'Sample' decoration.
|
||||
sample: bool = false,
|
||||
/// Member has the 'Invariant' decoration.
|
||||
/// Note: requires parent struct to have 'Block'.
|
||||
invariant: bool = false,
|
||||
/// Member has the 'Volatile' decoration.
|
||||
@"volatile": bool = false,
|
||||
/// Member has the 'Coherent' decoration.
|
||||
coherent: bool = false,
|
||||
/// Member has the 'NonWritable' decoration.
|
||||
non_writable: bool = false,
|
||||
/// Member has the 'NonReadable' decoration.
|
||||
non_readable: bool = false,
|
||||
|
||||
// The following decorations all imply extra field(s).
|
||||
|
||||
/// Member has the 'BuiltIn' decoration.
|
||||
/// This decoration has an extra field of type `spec.BuiltIn`.
|
||||
/// Note: If any member of a struct has the BuiltIn decoration, all members must have one.
|
||||
/// Note: Each builtin may only be reachable once for a particular entry point.
|
||||
/// Note: The member type may be constrained by a particular built-in, defined in the client API specification.
|
||||
builtin: bool = false,
|
||||
/// Member has the 'Stream' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
stream: bool = false,
|
||||
/// Member has the 'Location' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
location: bool = false,
|
||||
/// Member has the 'Component' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
component: bool = false,
|
||||
/// Member has the 'XfbBuffer' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
xfb_buffer: bool = false,
|
||||
/// Member has the 'XfbStride' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
xfb_stride: bool = false,
|
||||
/// Member has the 'UserSemantic' decoration.
|
||||
/// This member has an extra field of type `[]u8`, which is encoded
|
||||
/// by an `u32` containing the number of chars exactly, and then the string padded to
|
||||
/// a multiple of 4 bytes with zeroes.
|
||||
user_semantic: bool = false,
|
||||
};
|
||||
};
|
||||
|
||||
pub const Opaque = struct {
|
||||
base: Payload = .{ .tag = .@"opaque" },
|
||||
name: []u8,
|
||||
};
|
||||
|
||||
pub const Pointer = struct {
|
||||
base: Payload = .{ .tag = .pointer },
|
||||
storage_class: spec.StorageClass,
|
||||
child_type: Ref,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// This is valid for pointers to elements of an array.
|
||||
/// If zero, no stride is present.
|
||||
array_stride: u32 = 0,
|
||||
/// If nonzero, type has the 'Alignment' decoration.
|
||||
alignment: u32 = 0,
|
||||
/// Type has the 'MaxByteOffset' decoration.
|
||||
max_byte_offset: ?u32 = null,
|
||||
};
|
||||
|
||||
pub const Function = struct {
|
||||
base: Payload = .{ .tag = .function },
|
||||
return_type: Ref,
|
||||
parameters: []Ref,
|
||||
};
|
||||
|
||||
pub const Pipe = struct {
|
||||
base: Payload = .{ .tag = .pipe },
|
||||
qualifier: spec.AccessQualifier,
|
||||
};
|
||||
};
|
||||
};
|
||||
Reference in New Issue
Block a user