From de6df2bc12c1cec81bb3c562a9395098c92d8239 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 15 May 2021 01:04:02 +0200 Subject: [PATCH 01/21] SPIR-V: Restructure codegen a bit --- src/codegen/spirv.zig | 64 ++++++++++++++++++++++++++----------------- src/link/SpirV.zig | 36 +++++++++++++++++++----- 2 files changed, 68 insertions(+), 32 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 077e71d4e1..9ceaf107d8 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -6,6 +6,7 @@ const spec = @import("spirv/spec.zig"); const Module = @import("../Module.zig"); const Decl = Module.Decl; const Type = @import("../type.zig").Type; +const LazySrcLoc = Module.LazySrcLoc; pub const TypeMap = std.HashMap(Type, u32, Type.hash, Type.eql, std.hash_map.default_max_load_percentage); @@ -15,30 +16,24 @@ pub fn writeInstruction(code: *std.ArrayList(u32), instr: spec.Opcode, args: []c try code.appendSlice(args); } +/// This structure represents a SPIR-V binary module being compiled, and keeps track of relevant information +/// such as code for the different logical sections, and the next result-id. pub const SPIRVModule = struct { - next_result_id: u32 = 0, - - target: std.Target, - - types: TypeMap, - + next_result_id: u32, types_and_globals: std.ArrayList(u32), fn_decls: std.ArrayList(u32), - pub fn init(target: std.Target, allocator: *Allocator) SPIRVModule { + pub fn init(allocator: *Allocator) SPIRVModule { return .{ - .target = target, - .types = TypeMap.init(allocator), + .next_result_id = 0, .types_and_globals = std.ArrayList(u32).init(allocator), .fn_decls = std.ArrayList(u32).init(allocator), }; } pub fn deinit(self: *SPIRVModule) void { - self.fn_decls.deinit(); self.types_and_globals.deinit(); - self.types.deinit(); - self.* = undefined; + self.fn_decls.deinit(); } pub fn allocResultId(self: *SPIRVModule) u32 { @@ -49,21 +44,40 @@ pub const SPIRVModule = struct { pub fn resultIdBound(self: *SPIRVModule) u32 { return self.next_result_id; } +}; - pub fn getOrGenType(self: *SPIRVModule, t: Type) !u32 { +/// This structure is used to compile a declaration, and contains all relevant meta-information to deal with that. +pub const DeclGen = struct { + module: *Module, + spv: *SPIRVModule, + + types: TypeMap, + + decl: *Decl, + error_msg: ?*Module.ErrorMsg, + + fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { + @setCold(true); + const src_loc = src.toSrcLocWithDecl(self.decl); + self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); + return error.AnalysisFail; + } + + pub fn getOrGenType(self: *DeclGen, t: Type) !u32 { // We can't use getOrPut here so we can recursively generate types. if (self.types.get(t)) |already_generated| { return already_generated; } - const result = self.allocResultId(); + const result = self.spv.allocResultId(); switch (t.zigTypeTag()) { - .Void => try writeInstruction(&self.types_and_globals, .OpTypeVoid, &[_]u32{ result }), - .Bool => try writeInstruction(&self.types_and_globals, .OpTypeBool, &[_]u32{ result }), + .Void => try writeInstruction(&self.spv.types_and_globals, .OpTypeVoid, &[_]u32{ result }), + .Bool => try writeInstruction(&self.spv.types_and_globals, .OpTypeBool, &[_]u32{ result }), .Int => { - const int_info = t.intInfo(self.target); - try writeInstruction(&self.types_and_globals, .OpTypeInt, &[_]u32{ + const int_info = t.intInfo(self.module.getTarget()); + // TODO: Capabilities. + try writeInstruction(&self.spv.types_and_globals, .OpTypeInt, &[_]u32{ result, int_info.bits, switch (int_info.signedness) { @@ -72,8 +86,8 @@ pub const SPIRVModule = struct { }, }); }, - // TODO: Verify that floatBits() will be correct. - .Float => try writeInstruction(&self.types_and_globals, .OpTypeFloat, &[_]u32{ result, t.floatBits(self.target) }), + // TODO: Capabilities. + .Float => try writeInstruction(&self.spv.types_and_globals, .OpTypeFloat, &[_]u32{ result, t.floatBits(self.module.getTarget()) }), .Null, .Undefined, .EnumLiteral, @@ -84,23 +98,23 @@ pub const SPIRVModule = struct { .BoundFn => unreachable, // this type will be deleted from the language. - else => return error.TODO, + else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement type with tag {}", .{ tag }), } try self.types.put(t, result); return result; } - pub fn gen(self: *SPIRVModule, decl: *Decl) !void { - const typed_value = decl.typed_value.most_recent.typed_value; + pub fn gen(self: *DeclGen) !void { + const typed_value = self.decl.typed_value.most_recent.typed_value; switch (typed_value.ty.zigTypeTag()) { .Fn => { - log.debug("Generating code for function '{s}'", .{ std.mem.spanZ(decl.name) }); + log.debug("Generating code for function '{s}'", .{ std.mem.spanZ(self.decl.name) }); _ = try self.getOrGenType(typed_value.ty.fnReturnType()); }, - else => return error.TODO, + else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl with tag {}", .{ tag }), } } }; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 95c747c170..8d5feafa5a 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -118,8 +118,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { const module = self.base.options.module.?; const target = comp.getTarget(); - var spirv_module = codegen.SPIRVModule.init(target, self.base.allocator); - defer spirv_module.deinit(); + var spv = codegen.SPIRVModule.init(self.base.allocator); + defer spv.deinit(); // Allocate an ID for every declaration before generating code, // so that we can access them before processing them. @@ -132,19 +132,41 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { if (decl.typed_value != .most_recent) continue; - decl.fn_link.spirv.id = spirv_module.allocResultId(); + decl.fn_link.spirv.id = spv.allocResultId(); log.debug("Allocating id {} to '{s}'", .{ decl.fn_link.spirv.id, std.mem.spanZ(decl.name) }); } } // Now, actually generate the code for all declarations. { + // We are just going to re-use this same DeclGen for every Decl, and we are just going to + // change the decl. Otherwise, we would have to keep a separate `types`, and re-construct this + // structure every time. + var decl_gen = codegen.DeclGen{ + .module = module, + .spv = &spv, + .types = codegen.TypeMap.init(self.base.allocator), + .decl = undefined, + .error_msg = undefined, + }; + + defer decl_gen.types.deinit(); + for (module.decl_table.items()) |entry| { const decl = entry.value; if (decl.typed_value != .most_recent) continue; - try spirv_module.gen(decl); + decl_gen.decl = decl; + decl_gen.error_msg = null; + + decl_gen.gen() catch |err| switch (err) { + error.AnalysisFail => { + try module.failed_decls.put(module.gpa, decl, decl_gen.error_msg.?); + return; + }, + else => |e| return e, + }; } } @@ -155,7 +177,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { spec.magic_number, (spec.version.major << 16) | (spec.version.minor << 8), 0, // TODO: Register Zig compiler magic number. - spirv_module.resultIdBound(), // ID bound. + spv.resultIdBound(), // ID bound. 0, // Schema (currently reserved for future use in the SPIR-V spec). }); @@ -166,8 +188,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { // follows the SPIR-V logical module format! var all_buffers = [_]std.os.iovec_const{ wordsToIovConst(binary.items), - wordsToIovConst(spirv_module.types_and_globals.items), - wordsToIovConst(spirv_module.fn_decls.items), + wordsToIovConst(spv.types_and_globals.items), + wordsToIovConst(spv.fn_decls.items), }; const file = self.base.file.?; From 458c338aeb0ce72e772c19efcfc633f908ee91b3 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 15 May 2021 01:38:39 +0200 Subject: [PATCH 02/21] SPIR-V: Compute backing integer bits --- src/codegen/spirv.zig | 45 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 9ceaf107d8..fd153533c8 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2,6 +2,8 @@ const std = @import("std"); const Allocator = std.mem.Allocator; const log = std.log.scoped(.codegen); +const Target = std.Target; + const spec = @import("spirv/spec.zig"); const Module = @import("../Module.zig"); const Decl = Module.Decl; @@ -63,6 +65,43 @@ pub const DeclGen = struct { return error.AnalysisFail; } + /// SPIR-V requires enabling specific integer sizes through capabilities, and so if they are not enabled, we need + /// to emulate them in other instructions/types. This function returns, given an integer bit width (signed or unsigned, sign + /// included), the width of the underlying type which represents it, given the enabled features for the current target. + /// If the result is `null`, the largest type the target platform supports natively is not able to perform computations using + /// that size. In this case, multiple elements of the largest type should be used. + /// The backing type will be chosen as the smallest supported integer larger or equal to it in number of bits. + /// The result is valid to be used with OpTypeInt. + /// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits). + /// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers). + fn backingIntBits(self: *DeclGen, bits: u32) ?u32 { + // TODO: Figure out what to do with u0/i0. + std.debug.assert(bits != 0); + + const target = self.module.getTarget(); + + // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. + const ints = [_]struct{ bits: u32, feature: ?Target.spirv.Feature } { + .{ .bits = 8, .feature = .Int8 }, + .{ .bits = 16, .feature = .Int16 }, + .{ .bits = 32, .feature = null }, + .{ .bits = 64, .feature = .Int64 }, + }; + + for (ints) |int| { + const has_feature = if (int.feature) |feature| + Target.spirv.featureSetHas(target.cpu.features, feature) + else + true; + + if (bits <= int.bits and has_feature) { + return int.bits; + } + } + + return null; + } + pub fn getOrGenType(self: *DeclGen, t: Type) !u32 { // We can't use getOrPut here so we can recursively generate types. if (self.types.get(t)) |already_generated| { @@ -76,10 +115,12 @@ pub const DeclGen = struct { .Bool => try writeInstruction(&self.spv.types_and_globals, .OpTypeBool, &[_]u32{ result }), .Int => { const int_info = t.intInfo(self.module.getTarget()); - // TODO: Capabilities. + const backing_bits = self.backingIntBits(int_info.bits) orelse + return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement fallback for integer of {} bits", .{ int_info.bits }); + try writeInstruction(&self.spv.types_and_globals, .OpTypeInt, &[_]u32{ result, - int_info.bits, + backing_bits, switch (int_info.signedness) { .unsigned => 0, .signed => 1, From 38cdfebad3889853e5393b9f7b63e3c85e9d793f Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 15 May 2021 02:22:12 +0200 Subject: [PATCH 03/21] SPIR-V: Function prototype generation --- src/codegen/spirv.zig | 85 ++++++++++++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 25 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index fd153533c8..076cce4a89 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -12,9 +12,13 @@ const LazySrcLoc = Module.LazySrcLoc; pub const TypeMap = std.HashMap(Type, u32, Type.hash, Type.eql, std.hash_map.default_max_load_percentage); -pub fn writeInstruction(code: *std.ArrayList(u32), instr: spec.Opcode, args: []const u32) !void { - const word_count = @intCast(u32, args.len + 1); - try code.append((word_count << 16) | @enumToInt(instr)); +pub fn writeOpcode(code: *std.ArrayList(u32), opcode: spec.Opcode, arg_count: u32) !void { + const word_count = arg_count + 1; + try code.append((word_count << 16) | @enumToInt(opcode)); +} + +pub fn writeInstruction(code: *std.ArrayList(u32), opcode: spec.Opcode, args: []const u32) !void { + try writeOpcode(code, opcode, @intCast(u32, args.len)); try code.appendSlice(args); } @@ -58,7 +62,12 @@ pub const DeclGen = struct { decl: *Decl, error_msg: ?*Module.ErrorMsg, - fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { + const Error = error{ + AnalysisFail, + OutOfMemory + }; + + fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error { @setCold(true); const src_loc = src.toSrcLocWithDecl(self.decl); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); @@ -102,24 +111,25 @@ pub const DeclGen = struct { return null; } - pub fn getOrGenType(self: *DeclGen, t: Type) !u32 { + fn getOrGenType(self: *DeclGen, ty: Type) Error!u32 { // We can't use getOrPut here so we can recursively generate types. - if (self.types.get(t)) |already_generated| { + if (self.types.get(ty)) |already_generated| { return already_generated; } - const result = self.spv.allocResultId(); + const code = &self.spv.types_and_globals; + const result_id = self.spv.allocResultId(); - switch (t.zigTypeTag()) { - .Void => try writeInstruction(&self.spv.types_and_globals, .OpTypeVoid, &[_]u32{ result }), - .Bool => try writeInstruction(&self.spv.types_and_globals, .OpTypeBool, &[_]u32{ result }), + switch (ty.zigTypeTag()) { + .Void => try writeInstruction(code, .OpTypeVoid, &[_]u32{ result_id }), + .Bool => try writeInstruction(code, .OpTypeBool, &[_]u32{ result_id }), .Int => { - const int_info = t.intInfo(self.module.getTarget()); + const int_info = ty.intInfo(self.module.getTarget()); const backing_bits = self.backingIntBits(int_info.bits) orelse return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement fallback for integer of {} bits", .{ int_info.bits }); - try writeInstruction(&self.spv.types_and_globals, .OpTypeInt, &[_]u32{ - result, + try writeInstruction(code, .OpTypeInt, &[_]u32{ + result_id, backing_bits, switch (int_info.signedness) { .unsigned => 0, @@ -128,7 +138,34 @@ pub const DeclGen = struct { }); }, // TODO: Capabilities. - .Float => try writeInstruction(&self.spv.types_and_globals, .OpTypeFloat, &[_]u32{ result, t.floatBits(self.module.getTarget()) }), + .Float => try writeInstruction(code, .OpTypeFloat, &[_]u32{ result_id, ty.floatBits(self.module.getTarget()) }), + .Fn => { + // We only support zig-calling-convention functions, no varargs. + if (ty.fnCallingConvention() != .Unspecified) + return self.fail(.{.node_offset = 0}, "Invalid calling convention for SPIR-V", .{}); + if (ty.fnIsVarArgs()) + return self.fail(.{.node_offset = 0}, "VarArgs are not supported for SPIR-V", .{}); + + // In order to avoid a temporary here, first generate all the required types and then simply look them up + // when generating the function type. + const params = ty.fnParamLen(); + var i: usize = 0; + while (i < params) : (i += 1) { + _ = try self.getOrGenType(ty.fnParamType(i)); + } + + const return_type_id = try self.getOrGenType(ty.fnReturnType()); + + // result id + result type id + parameter type ids. + try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u32, ty.fnParamLen()) ); + try code.appendSlice(&.{ result_id, return_type_id }); + + i = 0; + while (i < params) : (i += 1) { + const param_type_id = self.types.get(ty.fnParamType(i)).?; + try code.append(param_type_id); + } + }, .Null, .Undefined, .EnumLiteral, @@ -139,23 +176,21 @@ pub const DeclGen = struct { .BoundFn => unreachable, // this type will be deleted from the language. - else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement type with tag {}", .{ tag }), + else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement type {}", .{ tag }), } - try self.types.put(t, result); - return result; + try self.types.put(ty, result_id); + return result_id; } pub fn gen(self: *DeclGen) !void { - const typed_value = self.decl.typed_value.most_recent.typed_value; + const tv = self.decl.typed_value.most_recent.typed_value; - switch (typed_value.ty.zigTypeTag()) { - .Fn => { - log.debug("Generating code for function '{s}'", .{ std.mem.spanZ(self.decl.name) }); - - _ = try self.getOrGenType(typed_value.ty.fnReturnType()); - }, - else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl with tag {}", .{ tag }), + if (tv.val.castTag(.function)) |func_payload| { + std.debug.assert(tv.ty.zigTypeTag() == .Fn); + _ = try self.getOrGenType(tv.ty); + } else { + return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: generate decl type {}", .{ tv.ty.zigTypeTag() }); } } }; From 4403f3598a97d93466c62a91cf3a5aacc6d113a6 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 15 May 2021 02:39:58 +0200 Subject: [PATCH 04/21] SPIR-V: Proper floating point type generation --- src/codegen/spirv.zig | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 076cce4a89..b297b1181c 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -117,6 +117,7 @@ pub const DeclGen = struct { return already_generated; } + const target = self.module.getTarget(); const code = &self.spv.types_and_globals; const result_id = self.spv.allocResultId(); @@ -126,7 +127,7 @@ pub const DeclGen = struct { .Int => { const int_info = ty.intInfo(self.module.getTarget()); const backing_bits = self.backingIntBits(int_info.bits) orelse - return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement fallback for integer of {} bits", .{ int_info.bits }); + return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement fallback for {}", .{ ty }); try writeInstruction(code, .OpTypeInt, &[_]u32{ result_id, @@ -137,14 +138,32 @@ pub const DeclGen = struct { }, }); }, - // TODO: Capabilities. - .Float => try writeInstruction(code, .OpTypeFloat, &[_]u32{ result_id, ty.floatBits(self.module.getTarget()) }), + .Float => { + // We can (and want) not really emulate floating points with other floating point types like with the integer types, + // so if the float is not supported, just return an error. + const bits = ty.floatBits(target); + const supported = switch (bits) { + 16 => Target.spirv.featureSetHas(target.cpu.features, .Float16), + 32 => true, + 64 => Target.spirv.featureSetHas(target.cpu.features, .Float64), + else => false + }; + + if (!supported) { + return self.fail(.{.node_offset = 0}, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{ bits }); + } + + try writeInstruction(code, .OpTypeFloat, &.{ + result_id, + bits + }); + }, .Fn => { // We only support zig-calling-convention functions, no varargs. if (ty.fnCallingConvention() != .Unspecified) - return self.fail(.{.node_offset = 0}, "Invalid calling convention for SPIR-V", .{}); + return self.fail(.{.node_offset = 0}, "Unsupported calling convention for SPIR-V", .{}); if (ty.fnIsVarArgs()) - return self.fail(.{.node_offset = 0}, "VarArgs are not supported for SPIR-V", .{}); + return self.fail(.{.node_offset = 0}, "VarArgs unsupported for SPIR-V", .{}); // In order to avoid a temporary here, first generate all the required types and then simply look them up // when generating the function type. From 074cb9f1daff0f9d8c3bc5736b8990aa53eb8226 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 15 May 2021 03:14:12 +0200 Subject: [PATCH 05/21] SPIR-V: OpFunction/OpFunctionEnd generation --- src/codegen/spirv.zig | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index b297b1181c..db4ddbd657 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -31,7 +31,7 @@ pub const SPIRVModule = struct { pub fn init(allocator: *Allocator) SPIRVModule { return .{ - .next_result_id = 0, + .next_result_id = 1, // 0 is an invalid SPIR-V result ID. .types_and_globals = std.ArrayList(u32).init(allocator), .fn_decls = std.ArrayList(u32).init(allocator), }; @@ -146,17 +146,14 @@ pub const DeclGen = struct { 16 => Target.spirv.featureSetHas(target.cpu.features, .Float16), 32 => true, 64 => Target.spirv.featureSetHas(target.cpu.features, .Float64), - else => false + else => false, }; if (!supported) { return self.fail(.{.node_offset = 0}, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{ bits }); } - try writeInstruction(code, .OpTypeFloat, &.{ - result_id, - bits - }); + try writeInstruction(code, .OpTypeFloat, &[_]u32{ result_id, bits }); }, .Fn => { // We only support zig-calling-convention functions, no varargs. @@ -195,7 +192,7 @@ pub const DeclGen = struct { .BoundFn => unreachable, // this type will be deleted from the language. - else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement type {}", .{ tag }), + else => |tag| return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement type {}", .{ tag }), } try self.types.put(ty, result_id); @@ -203,11 +200,23 @@ pub const DeclGen = struct { } pub fn gen(self: *DeclGen) !void { + const result_id = self.decl.fn_link.spirv.id; const tv = self.decl.typed_value.most_recent.typed_value; if (tv.val.castTag(.function)) |func_payload| { std.debug.assert(tv.ty.zigTypeTag() == .Fn); - _ = try self.getOrGenType(tv.ty); + const prototype_id = try self.getOrGenType(tv.ty); + try writeInstruction(&self.spv.fn_decls, .OpFunction, &[_]u32{ + self.types.get(tv.ty.fnReturnType()).?, // This type should be generated along with the prototype. + result_id, + @bitCast(u32, spec.FunctionControl{}), // TODO: We can set inline here if the type requires it. + prototype_id, + }); + + // TODO: Parameters + // TODO: Body + + try writeInstruction(&self.spv.fn_decls, .OpFunctionEnd, &[_]u32{}); } else { return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: generate decl type {}", .{ tv.ty.zigTypeTag() }); } From da0cc732ea899d2284200faf54c3c12e8c798b7f Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 15 May 2021 03:27:59 +0200 Subject: [PATCH 06/21] SPIR-V: Function parameter generation --- src/codegen/spirv.zig | 13 ++++++++++++- src/link/SpirV.zig | 5 ++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index db4ddbd657..a6b8216774 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -57,6 +57,7 @@ pub const DeclGen = struct { module: *Module, spv: *SPIRVModule, + args: std.ArrayList(u32), types: TypeMap, decl: *Decl, @@ -213,7 +214,17 @@ pub const DeclGen = struct { prototype_id, }); - // TODO: Parameters + const params = tv.ty.fnParamLen(); + var i: usize = 0; + + try self.args.ensureCapacity(params); + while (i < params) : (i += 1) { + const param_type_id = self.types.get(tv.ty.fnParamType(i)).?; + const arg_result_id = self.spv.allocResultId(); + try writeInstruction(&self.spv.fn_decls, .OpFunctionParameter, &[_]u32{ param_type_id, arg_result_id }); + self.args.appendAssumeCapacity(arg_result_id); + } + // TODO: Body try writeInstruction(&self.spv.fn_decls, .OpFunctionEnd, &[_]u32{}); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 8d5feafa5a..8bc0d9fe9f 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -140,23 +140,26 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { // Now, actually generate the code for all declarations. { // We are just going to re-use this same DeclGen for every Decl, and we are just going to - // change the decl. Otherwise, we would have to keep a separate `types`, and re-construct this + // change the decl. Otherwise, we would have to keep a separate `args` and `types`, and re-construct this // structure every time. var decl_gen = codegen.DeclGen{ .module = module, .spv = &spv, + .args = std.ArrayList(u32).init(self.base.allocator), .types = codegen.TypeMap.init(self.base.allocator), .decl = undefined, .error_msg = undefined, }; defer decl_gen.types.deinit(); + defer decl_gen.args.deinit(); for (module.decl_table.items()) |entry| { const decl = entry.value; if (decl.typed_value != .most_recent) continue; + decl_gen.args.items.len = 0; decl_gen.decl = decl; decl_gen.error_msg = null; From cbf5280f54509e7aa58d8fd14258274a12efeee1 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 15 May 2021 09:43:57 +0200 Subject: [PATCH 07/21] SPIR-V: Some instructions + constant generation setup --- src/codegen/spirv.zig | 150 ++++++++++++++++++++++++++++++++++++------ src/link/SpirV.zig | 6 +- 2 files changed, 135 insertions(+), 21 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index a6b8216774..8be42627ff 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1,16 +1,19 @@ const std = @import("std"); const Allocator = std.mem.Allocator; -const log = std.log.scoped(.codegen); - const Target = std.Target; +const log = std.log.scoped(.codegen); const spec = @import("spirv/spec.zig"); const Module = @import("../Module.zig"); const Decl = Module.Decl; const Type = @import("../type.zig").Type; +const Value = @import("../value.zig").Value; const LazySrcLoc = Module.LazySrcLoc; +const ir = @import("../ir.zig"); +const Inst = ir.Inst; pub const TypeMap = std.HashMap(Type, u32, Type.hash, Type.eql, std.hash_map.default_max_load_percentage); +pub const ValueMap = std.AutoHashMap(*Inst, u32); pub fn writeOpcode(code: *std.ArrayList(u32), opcode: spec.Opcode, arg_count: u32) !void { const word_count = arg_count + 1; @@ -26,19 +29,19 @@ pub fn writeInstruction(code: *std.ArrayList(u32), opcode: spec.Opcode, args: [] /// such as code for the different logical sections, and the next result-id. pub const SPIRVModule = struct { next_result_id: u32, - types_and_globals: std.ArrayList(u32), + types_globals_constants: std.ArrayList(u32), fn_decls: std.ArrayList(u32), pub fn init(allocator: *Allocator) SPIRVModule { return .{ .next_result_id = 1, // 0 is an invalid SPIR-V result ID. - .types_and_globals = std.ArrayList(u32).init(allocator), + .types_globals_constants = std.ArrayList(u32).init(allocator), .fn_decls = std.ArrayList(u32).init(allocator), }; } pub fn deinit(self: *SPIRVModule) void { - self.types_and_globals.deinit(); + self.types_globals_constants.deinit(); self.fn_decls.deinit(); } @@ -58,7 +61,10 @@ pub const DeclGen = struct { spv: *SPIRVModule, args: std.ArrayList(u32), + next_arg_index: u32, + types: TypeMap, + values: ValueMap, decl: *Decl, error_msg: ?*Module.ErrorMsg, @@ -75,6 +81,14 @@ pub const DeclGen = struct { return error.AnalysisFail; } + fn resolve(self: *DeclGen, inst: *Inst) !u32 { + if (inst.value()) |val| { + return self.genConstant(inst.ty, val); + } + + return self.values.get(inst).?; // Instruction does not dominate all uses! + } + /// SPIR-V requires enabling specific integer sizes through capabilities, and so if they are not enabled, we need /// to emulate them in other instructions/types. This function returns, given an integer bit width (signed or unsigned, sign /// included), the width of the underlying type which represents it, given the enabled features for the current target. @@ -82,13 +96,16 @@ pub const DeclGen = struct { /// that size. In this case, multiple elements of the largest type should be used. /// The backing type will be chosen as the smallest supported integer larger or equal to it in number of bits. /// The result is valid to be used with OpTypeInt. + /// asserts `ty` is an integer. /// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits). /// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers). - fn backingIntBits(self: *DeclGen, bits: u32) ?u32 { - // TODO: Figure out what to do with u0/i0. - std.debug.assert(bits != 0); - + /// TODO: Should the result of this function be cached? + fn backingIntBits(self: *DeclGen, ty: Type) ?u32 { const target = self.module.getTarget(); + const int_info = ty.intInfo(target); + + // TODO: Figure out what to do with u0/i0. + std.debug.assert(int_info.bits != 0); // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. const ints = [_]struct{ bits: u32, feature: ?Target.spirv.Feature } { @@ -104,7 +121,7 @@ pub const DeclGen = struct { else true; - if (bits <= int.bits and has_feature) { + if (int_info.bits <= int.bits and has_feature) { return int.bits; } } @@ -112,6 +129,43 @@ pub const DeclGen = struct { return null; } + /// Return the amount of bits in the largest supported integer type. This is either 32 (always supported), or 64 (if + /// the Int64 capability is enabled). + /// Note: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits). + /// In theory that could also be used, but since the spec says that it only guarantees support up to 32-bit ints there + /// is no way of knowing whether those are actually supported. + /// TODO: Maybe this should be cached? + fn largestSupportedIntBits(self: *DeclGen) u32 { + const target = self.module.getTarget(); + return if (Target.spirv.featureSetHas(target.cpu.features, .Int64)) + 64 + else + 32; + } + + /// Generate a constant representing `val`. + /// TODO: Deduplication? + fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!u32 { + const code = &self.spv.types_globals_constants; + const result_id = self.spv.allocResultId(); + const result_type_id = try self.getOrGenType(ty); + + if (val.isUndef()) { + try writeInstruction(code, .OpUndef, &[_]u32{ result_type_id, result_id }); + return result_id; + } + + switch (ty.zigTypeTag()) { + .Bool => { + const opcode: spec.Opcode = if (val.toBool()) .OpConstantTrue else .OpConstantFalse; + try writeInstruction(code, opcode, &[_]u32{ result_type_id, result_id }); + }, + else => return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: constant generation of type {s}\n", .{ ty.zigTypeTag() }), + } + + return result_id; + } + fn getOrGenType(self: *DeclGen, ty: Type) Error!u32 { // We can't use getOrPut here so we can recursively generate types. if (self.types.get(ty)) |already_generated| { @@ -119,24 +173,21 @@ pub const DeclGen = struct { } const target = self.module.getTarget(); - const code = &self.spv.types_and_globals; + const code = &self.spv.types_globals_constants; const result_id = self.spv.allocResultId(); switch (ty.zigTypeTag()) { .Void => try writeInstruction(code, .OpTypeVoid, &[_]u32{ result_id }), .Bool => try writeInstruction(code, .OpTypeBool, &[_]u32{ result_id }), .Int => { - const int_info = ty.intInfo(self.module.getTarget()); - const backing_bits = self.backingIntBits(int_info.bits) orelse + const backing_bits = self.backingIntBits(ty) orelse return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement fallback for {}", .{ ty }); + // TODO: If backing_bits != int_info.bits, a duplicate type might be generated here. try writeInstruction(code, .OpTypeInt, &[_]u32{ result_id, backing_bits, - switch (int_info.signedness) { - .unsigned => 0, - .signed => 1, - }, + @boolToInt(ty.isSignedInt()), }); }, .Float => { @@ -183,6 +234,15 @@ pub const DeclGen = struct { try code.append(param_type_id); } }, + .Vector => { + // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations + // which work on them), so simply use those. + // Note: SPIR-V vectors only support bools, ints and floats, so pointer vectors need to be supported another way. + // "big integers" (larger than the largest supported native type) can probably be represented by an array of vectors. + + // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems. + return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement type Vector", .{}); + }, .Null, .Undefined, .EnumLiteral, @@ -193,10 +253,10 @@ pub const DeclGen = struct { .BoundFn => unreachable, // this type will be deleted from the language. - else => |tag| return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement type {}", .{ tag }), + else => |tag| return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement type {}s", .{ tag }), } - try self.types.put(ty, result_id); + try self.types.putNoClobber(ty, result_id); return result_id; } @@ -225,11 +285,61 @@ pub const DeclGen = struct { self.args.appendAssumeCapacity(arg_result_id); } - // TODO: Body + // TODO: This could probably be done in a better way... + const root_block_id = self.spv.allocResultId(); + _ = try writeInstruction(&self.spv.fn_decls, .OpLabel, &[_]u32{root_block_id}); + try self.genBody(func_payload.data.body); try writeInstruction(&self.spv.fn_decls, .OpFunctionEnd, &[_]u32{}); } else { return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: generate decl type {}", .{ tv.ty.zigTypeTag() }); } } + + fn genBody(self: *DeclGen, body: ir.Body) !void { + for (body.instructions) |inst| { + const maybe_result_id = try self.genInst(inst); + if (maybe_result_id) |result_id| + try self.values.putNoClobber(inst, result_id); + } + } + + fn genInst(self: *DeclGen, inst: *Inst) !?u32 { + return switch (inst.tag) { + .arg => self.genArg(), + // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them + // throughout the IR. + .breakpoint => null, + // TODO: What does this entail? + .dbg_stmt => null, + .ret => self.genRet(inst.castTag(.ret).?), + .retvoid => self.genRetVoid(), + .unreach => self.genUnreach(), + else => self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement inst {}", .{inst.tag}), + }; + } + + fn genArg(self: *DeclGen) u32 { + defer self.next_arg_index += 1; + return self.args.items[self.next_arg_index]; + } + + fn genRet(self: *DeclGen, inst: *Inst.UnOp) !?u32 { + const operand_id = try self.resolve(inst.operand); + // TODO: This instruction needs to be the last in a block. Is that guaranteed? + try writeInstruction(&self.spv.fn_decls, .OpReturnValue, &[_]u32{ operand_id }); + return null; + } + + fn genRetVoid(self: *DeclGen) !?u32 { + // TODO: This instruction needs to be the last in a block. Is that guaranteed? + try writeInstruction(&self.spv.fn_decls, .OpReturn, &[_]u32{}); + return null; + } + + fn genUnreach(self: *DeclGen) !?u32 { + // TODO: This instruction needs to be the last in a block. Is that guaranteed? + try writeInstruction(&self.spv.fn_decls, .OpUnreachable, &[_]u32{}); + return null; + } }; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 8bc0d9fe9f..96ed2d3e77 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -146,11 +146,14 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { .module = module, .spv = &spv, .args = std.ArrayList(u32).init(self.base.allocator), + .next_arg_index = undefined, .types = codegen.TypeMap.init(self.base.allocator), + .values = codegen.ValueMap.init(self.base.allocator), .decl = undefined, .error_msg = undefined, }; + defer decl_gen.values.deinit(); defer decl_gen.types.deinit(); defer decl_gen.args.deinit(); @@ -160,6 +163,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { continue; decl_gen.args.items.len = 0; + decl_gen.next_arg_index = 0; decl_gen.decl = decl; decl_gen.error_msg = null; @@ -191,7 +195,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { // follows the SPIR-V logical module format! var all_buffers = [_]std.os.iovec_const{ wordsToIovConst(binary.items), - wordsToIovConst(spv.types_and_globals.items), + wordsToIovConst(spv.types_globals_constants.items), wordsToIovConst(spv.fn_decls.items), }; From ae2e21639a958b0bc8c085c8eac6733aaa933457 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 15 May 2021 14:04:41 +0200 Subject: [PATCH 08/21] SPIR-V: Some initial floating point constant generation --- src/codegen/spirv.zig | 47 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 4 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 8be42627ff..f4f437a064 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -143,6 +143,13 @@ pub const DeclGen = struct { 32; } + /// Checks whether the type is "composite int", an integer consisting of multiple native integers. These are represented by + /// arrays of largestSupportedIntBits(). + /// Asserts `ty` is an integer. + fn isCompositeInt(self: *DeclGen, ty: Type) bool { + return self.backingIntBits(ty) == null; + } + /// Generate a constant representing `val`. /// TODO: Deduplication? fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!u32 { @@ -160,6 +167,37 @@ pub const DeclGen = struct { const opcode: spec.Opcode = if (val.toBool()) .OpConstantTrue else .OpConstantFalse; try writeInstruction(code, opcode, &[_]u32{ result_type_id, result_id }); }, + .Float => { + // At this point we are guaranteed that the target floating point type is supported, otherwise the function + // would have exited at getOrGenType(ty). + + // f16 and f32 require one word of storage. f64 requires 2, low-order first. + + switch (val.tag()) { + .float_16 => try writeInstruction(code, .OpConstant, &[_]u32{ + result_type_id, + result_id, + @bitCast(u16, val.castTag(.float_16).?.data) + }), + .float_32 => try writeInstruction(code, .OpConstant, &[_]u32{ + result_type_id, + result_id, + @bitCast(u32, val.castTag(.float_32).?.data) + }), + .float_64 => { + const float_bits = @bitCast(u64, val.castTag(.float_64).?.data); + try writeInstruction(code, .OpConstant, &[_]u32{ + result_type_id, + result_id, + @truncate(u32, float_bits), + @truncate(u32, float_bits >> 32), + }); + }, + .float_128 => unreachable, // Filtered out in the call to getOrGenType. + // TODO: What tags do we need to handle here anyway? + else => return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: float constant generation of value {s}\n", .{ val.tag() }), + } + }, else => return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: constant generation of type {s}\n", .{ ty.zigTypeTag() }), } @@ -180,8 +218,10 @@ pub const DeclGen = struct { .Void => try writeInstruction(code, .OpTypeVoid, &[_]u32{ result_id }), .Bool => try writeInstruction(code, .OpTypeBool, &[_]u32{ result_id }), .Int => { - const backing_bits = self.backingIntBits(ty) orelse - return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement fallback for {}", .{ ty }); + const backing_bits = self.backingIntBits(ty) orelse { + // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. + return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement composite ints {}", .{ ty }); + }; // TODO: If backing_bits != int_info.bits, a duplicate type might be generated here. try writeInstruction(code, .OpTypeInt, &[_]u32{ @@ -238,7 +278,7 @@ pub const DeclGen = struct { // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations // which work on them), so simply use those. // Note: SPIR-V vectors only support bools, ints and floats, so pointer vectors need to be supported another way. - // "big integers" (larger than the largest supported native type) can probably be represented by an array of vectors. + // "composite integers" (larger than the largest supported native type) can probably be represented by an array of vectors. // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems. return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement type Vector", .{}); @@ -310,7 +350,6 @@ pub const DeclGen = struct { // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them // throughout the IR. .breakpoint => null, - // TODO: What does this entail? .dbg_stmt => null, .ret => self.genRet(inst.castTag(.ret).?), .retvoid => self.genRetVoid(), From 10678af8768a8d8cad7640837d0ec354dc8c07bc Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sun, 16 May 2021 13:09:32 +0200 Subject: [PATCH 09/21] SPIR-V: genBinOp setup --- src/codegen/spirv.zig | 118 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 111 insertions(+), 7 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index f4f437a064..3272de47ae 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -74,6 +74,44 @@ pub const DeclGen = struct { OutOfMemory }; + /// This structure is used to return information about a type typically used for arithmetic operations. + /// These types may either be integers, floats, or a vector of these. Most scalar operations also work on vectors, + /// so we can easily represent those as arithmetic types. + /// If the type is a scalar, 'inner type' refers to the scalar type. Otherwise, if its a vector, it refers + /// to the vector's element type. + const ArithmeticTypeInfo = struct { + /// A classification of the inner type. + const Class = enum { + /// A regular, **native**, integer operation. + /// This is only returned when the backend supports this int as a native type (when + /// the relevant capability is enabled). + integer, + + /// A regular float. These are all required to be natively supported. Floating points for + /// which the relevant capability is not enabled are not emulated. + float, + + /// An integer of a 'strange' size (which' bit size is not the same as its backing type. **Note**: this + /// may **also** include power-of-2 integers for which the relevant capability is not enabled), but still + /// within the limits of the largest natively supported integer type. + strange_integer, + + /// An integer with more bits than the largest natively supported integer type. + composite_integer, + }; + + /// The number of bits in the inner type. + /// Note: this is the actual number of bits of the type, not the size of the backing integer. + bits: u32, + + /// Whether the type is a vector. + is_vector: bool, + + /// A classification of the inner type. These four scenarios + /// will all have to be handled slightly different. + class: Class, + }; + fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error { @setCold(true); const src_loc = src.toSrcLocWithDecl(self.decl); @@ -96,16 +134,14 @@ pub const DeclGen = struct { /// that size. In this case, multiple elements of the largest type should be used. /// The backing type will be chosen as the smallest supported integer larger or equal to it in number of bits. /// The result is valid to be used with OpTypeInt. - /// asserts `ty` is an integer. /// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits). /// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers). /// TODO: Should the result of this function be cached? - fn backingIntBits(self: *DeclGen, ty: Type) ?u32 { + fn backingIntBits(self: *DeclGen, bits: u32) ?u32 { const target = self.module.getTarget(); - const int_info = ty.intInfo(target); // TODO: Figure out what to do with u0/i0. - std.debug.assert(int_info.bits != 0); + std.debug.assert(bits != 0); // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. const ints = [_]struct{ bits: u32, feature: ?Target.spirv.Feature } { @@ -121,7 +157,7 @@ pub const DeclGen = struct { else true; - if (int_info.bits <= int.bits and has_feature) { + if (bits <= int.bits and has_feature) { return int.bits; } } @@ -150,6 +186,34 @@ pub const DeclGen = struct { return self.backingIntBits(ty) == null; } + fn arithmeticTypeInfo(self: *DeclGen, ty: Type) !ArithmeticTypeInfo { + const target = self.module.getTarget(); + + return switch (ty.zigTypeTag()) { + .Float => ArithmeticTypeInfo{ .bits = ty.floatBits(target), .is_vector = false, .class = .float }, + .Int => blk: { + const int_info = ty.intInfo(target); + // TODO: Maybe it's useful to also return this value. + const maybe_backing_bits = self.backingIntBits(int_info.bits); + break :blk ArithmeticTypeInfo{ + .bits = int_info.bits, + .is_vector = false, + .class = if (maybe_backing_bits) |backing_bits| + if (backing_bits == int_info.bits) + ArithmeticTypeInfo.Class.integer + else + ArithmeticTypeInfo.Class.strange_integer + else + .composite_integer + }; + }, + // As of yet, there is no vector support in the self-hosted compiler. + .Vector => self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}), + // TODO: For which types is this the case? + else => self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}), + }; + } + /// Generate a constant representing `val`. /// TODO: Deduplication? fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!u32 { @@ -218,7 +282,8 @@ pub const DeclGen = struct { .Void => try writeInstruction(code, .OpTypeVoid, &[_]u32{ result_id }), .Bool => try writeInstruction(code, .OpTypeBool, &[_]u32{ result_id }), .Int => { - const backing_bits = self.backingIntBits(ty) orelse { + const int_info = ty.intInfo(target); + const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement composite ints {}", .{ ty }); }; @@ -227,7 +292,10 @@ pub const DeclGen = struct { try writeInstruction(code, .OpTypeInt, &[_]u32{ result_id, backing_bits, - @boolToInt(ty.isSignedInt()), + switch (int_info.signedness) { + .unsigned => 0, + .signed => 1, + }, }); }, .Float => { @@ -346,6 +414,7 @@ pub const DeclGen = struct { fn genInst(self: *DeclGen, inst: *Inst) !?u32 { return switch (inst.tag) { + .add => try self.genBinOp(inst.castTag(.add).?), .arg => self.genArg(), // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them // throughout the IR. @@ -358,6 +427,41 @@ pub const DeclGen = struct { }; } + fn genBinOp(self: *DeclGen, inst: *Inst.BinOp) !u32 { + // TODO: Will lhs and rhs have the same type? + const lhs_id = try self.resolve(inst.lhs); + const rhs_id = try self.resolve(inst.rhs); + + const binop_result_id = self.spv.allocResultId(); + const result_type_id = try self.getOrGenType(inst.base.ty); + + // TODO: Is the result the same as the argument types? + // This is supposed to be the case for SPIR-V. + std.debug.assert(inst.base.ty.eql(inst.lhs.ty) and inst.base.ty.eql(inst.rhs.ty)); + + // Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float + // versions of operations require different opcodes. + const info = try self.arithmeticTypeInfo(inst.base.ty); + + if (info.class == .composite_integer) + return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: binary operations for composite integers", .{}); + + // Fetch the integer and float opcodes for each operation. + // Doing it this way removes a bit of code clutter. + const opcodes: [2]spec.Opcode = switch (inst.base.tag) { + .add => .{.OpIAdd, .OpFAdd}, + else => unreachable, + }; + + const opcode = if (info.class == .float) opcodes[1] else opcodes[0]; + try writeInstruction(&self.spv.fn_decls, opcode, &[_]u32{ result_type_id, binop_result_id, lhs_id, rhs_id }); + + if (info.class != .strange_integer) + return binop_result_id; + + return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: strange integer operation mask", .{}); + } + fn genArg(self: *DeclGen) u32 { defer self.next_arg_index += 1; return self.args.items[self.next_arg_index]; From 4735e95d1699c90e821655bdbe0afbb1044738ea Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sun, 16 May 2021 13:32:32 +0200 Subject: [PATCH 10/21] SPIR-V: More binary operations --- src/codegen/spirv.zig | 53 +++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3272de47ae..c8bebbb70b 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -4,6 +4,8 @@ const Target = std.Target; const log = std.log.scoped(.codegen); const spec = @import("spirv/spec.zig"); +const Opcode = spec.Opcode; + const Module = @import("../Module.zig"); const Decl = Module.Decl; const Type = @import("../type.zig").Type; @@ -15,12 +17,12 @@ const Inst = ir.Inst; pub const TypeMap = std.HashMap(Type, u32, Type.hash, Type.eql, std.hash_map.default_max_load_percentage); pub const ValueMap = std.AutoHashMap(*Inst, u32); -pub fn writeOpcode(code: *std.ArrayList(u32), opcode: spec.Opcode, arg_count: u32) !void { +pub fn writeOpcode(code: *std.ArrayList(u32), opcode: Opcode, arg_count: u32) !void { const word_count = arg_count + 1; try code.append((word_count << 16) | @enumToInt(opcode)); } -pub fn writeInstruction(code: *std.ArrayList(u32), opcode: spec.Opcode, args: []const u32) !void { +pub fn writeInstruction(code: *std.ArrayList(u32), opcode: Opcode, args: []const u32) !void { try writeOpcode(code, opcode, @intCast(u32, args.len)); try code.appendSlice(args); } @@ -102,11 +104,14 @@ pub const DeclGen = struct { /// The number of bits in the inner type. /// Note: this is the actual number of bits of the type, not the size of the backing integer. - bits: u32, + bits: u16, /// Whether the type is a vector. is_vector: bool, + /// Whether the inner type is signed. Only relevant for integers. + signedness: std.builtin.Signedness, + /// A classification of the inner type. These four scenarios /// will all have to be handled slightly different. class: Class, @@ -137,14 +142,14 @@ pub const DeclGen = struct { /// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits). /// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers). /// TODO: Should the result of this function be cached? - fn backingIntBits(self: *DeclGen, bits: u32) ?u32 { + fn backingIntBits(self: *DeclGen, bits: u16) ?u16 { const target = self.module.getTarget(); // TODO: Figure out what to do with u0/i0. std.debug.assert(bits != 0); // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. - const ints = [_]struct{ bits: u32, feature: ?Target.spirv.Feature } { + const ints = [_]struct{ bits: u16, feature: ?Target.spirv.Feature } { .{ .bits = 8, .feature = .Int8 }, .{ .bits = 16, .feature = .Int16 }, .{ .bits = 32, .feature = null }, @@ -171,7 +176,7 @@ pub const DeclGen = struct { /// In theory that could also be used, but since the spec says that it only guarantees support up to 32-bit ints there /// is no way of knowing whether those are actually supported. /// TODO: Maybe this should be cached? - fn largestSupportedIntBits(self: *DeclGen) u32 { + fn largestSupportedIntBits(self: *DeclGen) u16 { const target = self.module.getTarget(); return if (Target.spirv.featureSetHas(target.cpu.features, .Int64)) 64 @@ -190,7 +195,12 @@ pub const DeclGen = struct { const target = self.module.getTarget(); return switch (ty.zigTypeTag()) { - .Float => ArithmeticTypeInfo{ .bits = ty.floatBits(target), .is_vector = false, .class = .float }, + .Float => ArithmeticTypeInfo{ + .bits = ty.floatBits(target), + .is_vector = false, + .signedness = .signed, // I guess technically it is. + .class = .float + }, .Int => blk: { const int_info = ty.intInfo(target); // TODO: Maybe it's useful to also return this value. @@ -198,6 +208,7 @@ pub const DeclGen = struct { break :blk ArithmeticTypeInfo{ .bits = int_info.bits, .is_vector = false, + .signedness = int_info.signedness, .class = if (maybe_backing_bits) |backing_bits| if (backing_bits == int_info.bits) ArithmeticTypeInfo.Class.integer @@ -228,7 +239,7 @@ pub const DeclGen = struct { switch (ty.zigTypeTag()) { .Bool => { - const opcode: spec.Opcode = if (val.toBool()) .OpConstantTrue else .OpConstantFalse; + const opcode: Opcode = if (val.toBool()) .OpConstantTrue else .OpConstantFalse; try writeInstruction(code, opcode, &[_]u32{ result_type_id, result_id }); }, .Float => { @@ -414,7 +425,10 @@ pub const DeclGen = struct { fn genInst(self: *DeclGen, inst: *Inst) !?u32 { return switch (inst.tag) { - .add => try self.genBinOp(inst.castTag(.add).?), + .add, .addwrap => try self.genBinOp(inst.castTag(.add).?), + .sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?), + .mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?), + .div => try self.genBinOp(inst.castTag(.div).?), .arg => self.genArg(), // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them // throughout the IR. @@ -446,16 +460,27 @@ pub const DeclGen = struct { if (info.class == .composite_integer) return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: binary operations for composite integers", .{}); - // Fetch the integer and float opcodes for each operation. - // Doing it this way removes a bit of code clutter. - const opcodes: [2]spec.Opcode = switch (inst.base.tag) { - .add => .{.OpIAdd, .OpFAdd}, + const is_float = info.class == .float; + const is_signed = info.signedness == .signed; + // **Note**: All these operations must be valid for vectors of floats and integers as well! + const opcode = switch (inst.base.tag) { + // The regular integer operations are all defined for wrapping. Since theyre only relevant for integers, + // we can just switch on both cases here. + .add, .addwrap => if (is_float) Opcode.OpFAdd else Opcode.OpIAdd, + .sub, .subwrap => if (is_float) Opcode.OpFSub else Opcode.OpISub, + .mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul, + // TODO: Trap if divisor is 0? + // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful. + .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv, + else => unreachable, }; - const opcode = if (info.class == .float) opcodes[1] else opcodes[0]; try writeInstruction(&self.spv.fn_decls, opcode, &[_]u32{ result_type_id, binop_result_id, lhs_id, rhs_id }); + // TODO: Trap on overflow? Probably going to be annoying. + // TODO: Look into NoSignedWrap/NoUnsignedWrap extensions. + if (info.class != .strange_integer) return binop_result_id; From f14000c7e1bd9032185b3c0a2a28a73eec6c48f7 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sun, 16 May 2021 13:39:17 +0200 Subject: [PATCH 11/21] SPIR-V: More bitwise binary operations --- src/codegen/spirv.zig | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index c8bebbb70b..88d6f74a2d 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -84,7 +84,7 @@ pub const DeclGen = struct { const ArithmeticTypeInfo = struct { /// A classification of the inner type. const Class = enum { - /// A regular, **native**, integer operation. + /// A regular, **native**, integer. /// This is only returned when the backend supports this int as a native type (when /// the relevant capability is enabled). integer, @@ -112,7 +112,7 @@ pub const DeclGen = struct { /// Whether the inner type is signed. Only relevant for integers. signedness: std.builtin.Signedness, - /// A classification of the inner type. These four scenarios + /// A classification of the inner type. These scenarios /// will all have to be handled slightly different. class: Class, }; @@ -149,6 +149,7 @@ pub const DeclGen = struct { std.debug.assert(bits != 0); // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. + // 32-bit integers are always supported (see spec, 2.16.1, Data rules). const ints = [_]struct{ bits: u16, feature: ?Target.spirv.Feature } { .{ .bits = 8, .feature = .Int8 }, .{ .bits = 16, .feature = .Int16 }, @@ -198,8 +199,8 @@ pub const DeclGen = struct { .Float => ArithmeticTypeInfo{ .bits = ty.floatBits(target), .is_vector = false, - .signedness = .signed, // I guess technically it is. - .class = .float + .signedness = .signed, // Technically, but doesn't matter for this class. + .class = .float, }, .Int => blk: { const int_info = ty.intInfo(target); @@ -315,6 +316,7 @@ pub const DeclGen = struct { const bits = ty.floatBits(target); const supported = switch (bits) { 16 => Target.spirv.featureSetHas(target.cpu.features, .Float16), + // 32-bit floats are always supported (see spec, 2.16.1, Data rules). 32 => true, 64 => Target.spirv.featureSetHas(target.cpu.features, .Float64), else => false, @@ -358,6 +360,8 @@ pub const DeclGen = struct { // which work on them), so simply use those. // Note: SPIR-V vectors only support bools, ints and floats, so pointer vectors need to be supported another way. // "composite integers" (larger than the largest supported native type) can probably be represented by an array of vectors. + // TODO: The SPIR-V spec mentions that vector sizes may be quite restricted! look into which we can use, and whether OpTypeVector + // is adequate at all for this. // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems. return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: implement type Vector", .{}); @@ -429,6 +433,9 @@ pub const DeclGen = struct { .sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?), .mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?), .div => try self.genBinOp(inst.castTag(.div).?), + .bit_and => try self.genBinOp(inst.castTag(.bit_and).?), + .bit_or => try self.genBinOp(inst.castTag(.bit_or).?), + .xor => try self.genBinOp(inst.castTag(.xor).?), .arg => self.genArg(), // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them // throughout the IR. @@ -472,7 +479,10 @@ pub const DeclGen = struct { // TODO: Trap if divisor is 0? // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful. .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv, - + // Only integer versions for these. + .bit_and => Opcode.OpBitwiseAnd, + .bit_or => Opcode.OpBitwiseOr, + .xor => Opcode.OpBitwiseXor, else => unreachable, }; From 585122b1ac51f9ac23bae537dfc40bbae1d7cb3c Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sun, 16 May 2021 14:46:58 +0200 Subject: [PATCH 12/21] SPIR-V: comparison and equality operations --- src/codegen/spirv.zig | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 88d6f74a2d..14511e287f 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -84,6 +84,9 @@ pub const DeclGen = struct { const ArithmeticTypeInfo = struct { /// A classification of the inner type. const Class = enum { + /// A boolean. + bool, + /// A regular, **native**, integer. /// This is only returned when the backend supports this int as a native type (when /// the relevant capability is enabled). @@ -196,6 +199,12 @@ pub const DeclGen = struct { const target = self.module.getTarget(); return switch (ty.zigTypeTag()) { + .Bool => ArithmeticTypeInfo{ + .bits = 1, // Doesn't matter for this class. + .is_vector = false, + .signedness = .unsigned, // Technically, but doesn't matter for this class. + .class = .bool, + }, .Float => ArithmeticTypeInfo{ .bits = ty.floatBits(target), .is_vector = false, @@ -436,6 +445,12 @@ pub const DeclGen = struct { .bit_and => try self.genBinOp(inst.castTag(.bit_and).?), .bit_or => try self.genBinOp(inst.castTag(.bit_or).?), .xor => try self.genBinOp(inst.castTag(.xor).?), + .cmp_eq => try self.genBinOp(inst.castTag(.cmp_eq).?), + .cmp_neq => try self.genBinOp(inst.castTag(.cmp_neq).?), + .cmp_gt => try self.genBinOp(inst.castTag(.cmp_gt).?), + .cmp_gte => try self.genBinOp(inst.castTag(.cmp_gte).?), + .cmp_lt => try self.genBinOp(inst.castTag(.cmp_lt).?), + .cmp_lte => try self.genBinOp(inst.castTag(.cmp_lte).?), .arg => self.genArg(), // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them // throughout the IR. @@ -458,18 +473,23 @@ pub const DeclGen = struct { // TODO: Is the result the same as the argument types? // This is supposed to be the case for SPIR-V. - std.debug.assert(inst.base.ty.eql(inst.lhs.ty) and inst.base.ty.eql(inst.rhs.ty)); + std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty)); + std.debug.assert(inst.base.ty.tag() == .bool or inst.base.ty.eql(inst.lhs.ty)); // Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float // versions of operations require different opcodes. - const info = try self.arithmeticTypeInfo(inst.base.ty); + // For operations which produce bools, the information of inst.base.ty is not useful, so just pick either operand + // instead. + const info = try self.arithmeticTypeInfo(inst.lhs.ty); if (info.class == .composite_integer) return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: binary operations for composite integers", .{}); + const is_bool = info.class == .bool; const is_float = info.class == .float; const is_signed = info.signedness == .signed; - // **Note**: All these operations must be valid for vectors of floats and integers as well! + // **Note**: All these operations must be valid for vectors of floats, integers and bools as well! + // For floating points, we generally want ordered operations (which return false if either operand is nan). const opcode = switch (inst.base.tag) { // The regular integer operations are all defined for wrapping. Since theyre only relevant for integers, // we can just switch on both cases here. @@ -478,11 +498,24 @@ pub const DeclGen = struct { .mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul, // TODO: Trap if divisor is 0? // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful. + // => Those are probably for divTrunc and divFloor, though the compiler does not yet generate those. + // => TODO: Figure out how those work on the SPIR-V side. + // => TODO: Test these. .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv, // Only integer versions for these. .bit_and => Opcode.OpBitwiseAnd, .bit_or => Opcode.OpBitwiseOr, .xor => Opcode.OpBitwiseXor, + // Int/bool/float -> bool operations. + .cmp_eq => if (is_float) Opcode.OpFOrdEqual else if (is_bool) Opcode.OpLogicalEqual else Opcode.OpIEqual, + .cmp_neq => if (is_float) Opcode.OpFOrdNotEqual else if (is_bool) Opcode.OpLogicalNotEqual else Opcode.OpINotEqual, + // Int/float -> bool operations. + // TODO: Verify that these OpFOrd type operations produce the right value. + // TODO: Is there a more fundamental difference between OpU and OpS operations here than just the type? + .cmp_gt => if (is_float) Opcode.OpFOrdGreaterThan else if (is_signed) Opcode.OpSGreaterThan else Opcode.OpUGreaterThan, + .cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual, + .cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan, + .cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual, else => unreachable, }; From 489b3ef7d47c877aa7e761ddf00763bfe1dc03a7 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sun, 16 May 2021 14:52:11 +0200 Subject: [PATCH 13/21] SPIR-V: bool binary operations --- src/codegen/spirv.zig | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 14511e287f..949e9fc6a2 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -451,6 +451,8 @@ pub const DeclGen = struct { .cmp_gte => try self.genBinOp(inst.castTag(.cmp_gte).?), .cmp_lt => try self.genBinOp(inst.castTag(.cmp_lt).?), .cmp_lte => try self.genBinOp(inst.castTag(.cmp_lte).?), + .bool_and => try self.genBinOp(inst.castTag(.bool_and).?), + .bool_or => try self.genBinOp(inst.castTag(.bool_or).?), .arg => self.genArg(), // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them // throughout the IR. @@ -468,7 +470,7 @@ pub const DeclGen = struct { const lhs_id = try self.resolve(inst.lhs); const rhs_id = try self.resolve(inst.rhs); - const binop_result_id = self.spv.allocResultId(); + const result_id = self.spv.allocResultId(); const result_type_id = try self.getOrGenType(inst.base.ty); // TODO: Is the result the same as the argument types? @@ -516,16 +518,19 @@ pub const DeclGen = struct { .cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual, .cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan, .cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual, + // Bool -> bool operations. + .bool_and => Opcode.OpLogicalAnd, + .bool_or => Opcode.OpLogicalOr, else => unreachable, }; - try writeInstruction(&self.spv.fn_decls, opcode, &[_]u32{ result_type_id, binop_result_id, lhs_id, rhs_id }); + try writeInstruction(&self.spv.fn_decls, opcode, &[_]u32{ result_type_id, result_id, lhs_id, rhs_id }); // TODO: Trap on overflow? Probably going to be annoying. // TODO: Look into NoSignedWrap/NoUnsignedWrap extensions. if (info.class != .strange_integer) - return binop_result_id; + return result_id; return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: strange integer operation mask", .{}); } From 880473dc3f08e2f8c0cef85777d50e25e4bcb062 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sun, 16 May 2021 14:55:09 +0200 Subject: [PATCH 14/21] SPIR-V: Unary not operation --- src/codegen/spirv.zig | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 949e9fc6a2..7992a7a465 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -453,6 +453,7 @@ pub const DeclGen = struct { .cmp_lte => try self.genBinOp(inst.castTag(.cmp_lte).?), .bool_and => try self.genBinOp(inst.castTag(.bool_and).?), .bool_or => try self.genBinOp(inst.castTag(.bool_or).?), + .not => try self.genUnOp(inst.castTag(.not).?), .arg => self.genArg(), // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them // throughout the IR. @@ -527,7 +528,7 @@ pub const DeclGen = struct { try writeInstruction(&self.spv.fn_decls, opcode, &[_]u32{ result_type_id, result_id, lhs_id, rhs_id }); // TODO: Trap on overflow? Probably going to be annoying. - // TODO: Look into NoSignedWrap/NoUnsignedWrap extensions. + // TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap. if (info.class != .strange_integer) return result_id; @@ -535,6 +536,25 @@ pub const DeclGen = struct { return self.fail(.{.node_offset = 0}, "TODO: SPIR-V backend: strange integer operation mask", .{}); } + fn genUnOp(self: *DeclGen, inst: *Inst.UnOp) !u32 { + const operand_id = try self.resolve(inst.operand); + + const result_id = self.spv.allocResultId(); + const result_type_id = try self.getOrGenType(inst.base.ty); + + const info = try self.arithmeticTypeInfo(inst.operand.ty); + + const opcode = switch (inst.base.tag) { + // Bool -> bool + .not => Opcode.OpLogicalNot, + else => unreachable, + }; + + try writeInstruction(&self.spv.fn_decls, opcode, &[_]u32{ result_type_id, result_id, operand_id }); + + return result_id; + } + fn genArg(self: *DeclGen) u32 { defer self.next_arg_index += 1; return self.args.items[self.next_arg_index]; From 5042a476822af59ef005df2e7ff9f994907cceb4 Mon Sep 17 00:00:00 2001 From: Jonathan Marler Date: Sun, 16 May 2021 10:37:29 -0600 Subject: [PATCH 15/21] add missing EBADF error code for openat --- lib/std/os.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/std/os.zig b/lib/std/os.zig index 0ee20d7c8e..5df2c441cd 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -1244,6 +1244,7 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t) EFAULT => unreachable, EINVAL => unreachable, + EBADF => unreachable, EACCES => return error.AccessDenied, EFBIG => return error.FileTooBig, EOVERFLOW => return error.FileTooBig, From 8467373bb84fc7b0690c0e4d024d2cb298931cd9 Mon Sep 17 00:00:00 2001 From: Aiz672 Date: Sat, 15 May 2021 14:58:53 +0700 Subject: [PATCH 16/21] Remove `isIntegerNumber` and `isFloatingNumber` --- lib/std/meta/trait.zig | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig index 2bf2e6a0a5..b9bb6c332d 100644 --- a/lib/std/meta/trait.zig +++ b/lib/std/meta/trait.zig @@ -298,20 +298,6 @@ pub fn isNumber(comptime T: type) bool { }; } -pub fn isIntegerNumber(comptime T: type) bool { - return switch (@typeInfo(T)) { - .Int, .ComptimeInt => true, - else => false, - }; -} - -pub fn isFloatingNumber(comptime T: type) bool { - return switch (@typeInfo(T)) { - .Float, .ComptimeFloat => true, - else => false, - }; -} - test "std.meta.trait.isNumber" { const NotANumber = struct { number: u8, From 04d95ea4192a7f70e7c11b8ee67d237cf38da9b7 Mon Sep 17 00:00:00 2001 From: Michael Dusan Date: Mon, 17 May 2021 09:10:47 -0400 Subject: [PATCH 17/21] ci linux: bump qemu-5.2.0.1 - apply patch for qemu-user syscall do_sendrecvmsg_locked - see #8750 --- ci/azure/linux_script | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/azure/linux_script b/ci/azure/linux_script index 64407b79b7..2ca7ed9f75 100755 --- a/ci/azure/linux_script +++ b/ci/azure/linux_script @@ -20,7 +20,7 @@ cd $HOME wget -nv "https://ziglang.org/deps/$CACHE_BASENAME.tar.xz" tar xf "$CACHE_BASENAME.tar.xz" -QEMUBASE="qemu-linux-x86_64-5.2.0" +QEMUBASE="qemu-linux-x86_64-5.2.0.1" wget -nv "https://ziglang.org/deps/$QEMUBASE.tar.xz" tar xf "$QEMUBASE.tar.xz" export PATH="$(pwd)/$QEMUBASE/bin:$PATH" From 5414bd48edd460ae8667c811e13aa9b5d9fab919 Mon Sep 17 00:00:00 2001 From: aiz <78422626+aizawey672@users.noreply.github.com> Date: Tue, 18 May 2021 02:57:51 +0700 Subject: [PATCH 18/21] std.math.Complex: Change `new()` to `init()` --- lib/std/math/complex.zig | 27 ++++++++-------- lib/std/math/complex/abs.zig | 2 +- lib/std/math/complex/acos.zig | 4 +-- lib/std/math/complex/acosh.zig | 4 +-- lib/std/math/complex/arg.zig | 2 +- lib/std/math/complex/asin.zig | 8 ++--- lib/std/math/complex/asinh.zig | 6 ++-- lib/std/math/complex/atan.zig | 20 ++++++------ lib/std/math/complex/atanh.zig | 6 ++-- lib/std/math/complex/conj.zig | 4 +-- lib/std/math/complex/cos.zig | 4 +-- lib/std/math/complex/cosh.zig | 56 +++++++++++++++++----------------- lib/std/math/complex/exp.zig | 28 ++++++++--------- lib/std/math/complex/ldexp.zig | 4 +-- lib/std/math/complex/log.zig | 4 +-- lib/std/math/complex/pow.zig | 4 +-- lib/std/math/complex/proj.zig | 6 ++-- lib/std/math/complex/sin.zig | 6 ++-- lib/std/math/complex/sinh.zig | 56 +++++++++++++++++----------------- lib/std/math/complex/sqrt.zig | 32 +++++++++---------- lib/std/math/complex/tan.zig | 6 ++-- lib/std/math/complex/tanh.zig | 24 +++++++-------- lib/std/zig/parser_test.zig | 12 ++++---- 23 files changed, 164 insertions(+), 161 deletions(-) diff --git a/lib/std/math/complex.zig b/lib/std/math/complex.zig index abac923cdd..614d9d0ae1 100644 --- a/lib/std/math/complex.zig +++ b/lib/std/math/complex.zig @@ -38,9 +38,12 @@ pub fn Complex(comptime T: type) type { /// Imaginary part. im: T, + + /// Deprecated, use init() + pub const new = init; /// Create a new Complex number from the given real and imaginary parts. - pub fn new(re: T, im: T) Self { + pub fn init(re: T, im: T) Self { return Self{ .re = re, .im = im, @@ -110,32 +113,32 @@ pub fn Complex(comptime T: type) type { const epsilon = 0.0001; test "complex.add" { - const a = Complex(f32).new(5, 3); - const b = Complex(f32).new(2, 7); + const a = Complex(f32).init(5, 3); + const b = Complex(f32).init(2, 7); const c = a.add(b); try testing.expect(c.re == 7 and c.im == 10); } test "complex.sub" { - const a = Complex(f32).new(5, 3); - const b = Complex(f32).new(2, 7); + const a = Complex(f32).init(5, 3); + const b = Complex(f32).init(2, 7); const c = a.sub(b); try testing.expect(c.re == 3 and c.im == -4); } test "complex.mul" { - const a = Complex(f32).new(5, 3); - const b = Complex(f32).new(2, 7); + const a = Complex(f32).init(5, 3); + const b = Complex(f32).init(2, 7); const c = a.mul(b); try testing.expect(c.re == -11 and c.im == 41); } test "complex.div" { - const a = Complex(f32).new(5, 3); - const b = Complex(f32).new(2, 7); + const a = Complex(f32).init(5, 3); + const b = Complex(f32).init(2, 7); const c = a.div(b); try testing.expect(math.approxEqAbs(f32, c.re, @as(f32, 31) / 53, epsilon) and @@ -143,14 +146,14 @@ test "complex.div" { } test "complex.conjugate" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = a.conjugate(); try testing.expect(c.re == 5 and c.im == -3); } test "complex.reciprocal" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = a.reciprocal(); try testing.expect(math.approxEqAbs(f32, c.re, @as(f32, 5) / 34, epsilon) and @@ -158,7 +161,7 @@ test "complex.reciprocal" { } test "complex.magnitude" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = a.magnitude(); try testing.expect(math.approxEqAbs(f32, c, 5.83095, epsilon)); diff --git a/lib/std/math/complex/abs.zig b/lib/std/math/complex/abs.zig index a2678d21db..6890d15f8a 100644 --- a/lib/std/math/complex/abs.zig +++ b/lib/std/math/complex/abs.zig @@ -18,7 +18,7 @@ pub fn abs(z: anytype) @TypeOf(z.re) { const epsilon = 0.0001; test "complex.cabs" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = abs(a); try testing.expect(math.approxEqAbs(f32, c, 5.83095, epsilon)); } diff --git a/lib/std/math/complex/acos.zig b/lib/std/math/complex/acos.zig index 72abea47fe..3d02ad6358 100644 --- a/lib/std/math/complex/acos.zig +++ b/lib/std/math/complex/acos.zig @@ -13,13 +13,13 @@ const Complex = cmath.Complex; pub fn acos(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = cmath.asin(z); - return Complex(T).new(@as(T, math.pi) / 2 - q.re, -q.im); + return Complex(T).init(@as(T, math.pi) / 2 - q.re, -q.im); } const epsilon = 0.0001; test "complex.cacos" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = acos(a); try testing.expect(math.approxEqAbs(f32, c.re, 0.546975, epsilon)); diff --git a/lib/std/math/complex/acosh.zig b/lib/std/math/complex/acosh.zig index 4f76dea01a..c239936d47 100644 --- a/lib/std/math/complex/acosh.zig +++ b/lib/std/math/complex/acosh.zig @@ -13,13 +13,13 @@ const Complex = cmath.Complex; pub fn acosh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = cmath.acos(z); - return Complex(T).new(-q.im, q.re); + return Complex(T).init(-q.im, q.re); } const epsilon = 0.0001; test "complex.cacosh" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = acosh(a); try testing.expect(math.approxEqAbs(f32, c.re, 2.452914, epsilon)); diff --git a/lib/std/math/complex/arg.zig b/lib/std/math/complex/arg.zig index c583b9e360..8a79daa073 100644 --- a/lib/std/math/complex/arg.zig +++ b/lib/std/math/complex/arg.zig @@ -18,7 +18,7 @@ pub fn arg(z: anytype) @TypeOf(z.re) { const epsilon = 0.0001; test "complex.carg" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = arg(a); try testing.expect(math.approxEqAbs(f32, c, 0.540420, epsilon)); } diff --git a/lib/std/math/complex/asin.zig b/lib/std/math/complex/asin.zig index 7ce200fae2..f2c377df56 100644 --- a/lib/std/math/complex/asin.zig +++ b/lib/std/math/complex/asin.zig @@ -15,17 +15,17 @@ pub fn asin(z: anytype) Complex(@TypeOf(z.re)) { const x = z.re; const y = z.im; - const p = Complex(T).new(1.0 - (x - y) * (x + y), -2.0 * x * y); - const q = Complex(T).new(-y, x); + const p = Complex(T).init(1.0 - (x - y) * (x + y), -2.0 * x * y); + const q = Complex(T).init(-y, x); const r = cmath.log(q.add(cmath.sqrt(p))); - return Complex(T).new(r.im, -r.re); + return Complex(T).init(r.im, -r.re); } const epsilon = 0.0001; test "complex.casin" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = asin(a); try testing.expect(math.approxEqAbs(f32, c.re, 1.023822, epsilon)); diff --git a/lib/std/math/complex/asinh.zig b/lib/std/math/complex/asinh.zig index 821218acf7..b1083c81bf 100644 --- a/lib/std/math/complex/asinh.zig +++ b/lib/std/math/complex/asinh.zig @@ -12,15 +12,15 @@ const Complex = cmath.Complex; /// Returns the hyperbolic arc-sine of z. pub fn asinh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); - const q = Complex(T).new(-z.im, z.re); + const q = Complex(T).init(-z.im, z.re); const r = cmath.asin(q); - return Complex(T).new(r.im, -r.re); + return Complex(T).init(r.im, -r.re); } const epsilon = 0.0001; test "complex.casinh" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = asinh(a); try testing.expect(math.approxEqAbs(f32, c.re, 2.459831, epsilon)); diff --git a/lib/std/math/complex/atan.zig b/lib/std/math/complex/atan.zig index d9a95c8dc1..b26b1e7ef3 100644 --- a/lib/std/math/complex/atan.zig +++ b/lib/std/math/complex/atan.zig @@ -50,14 +50,14 @@ fn atan32(z: Complex(f32)) Complex(f32) { if ((x == 0.0) and (y > 1.0)) { // overflow - return Complex(f32).new(maxnum, maxnum); + return Complex(f32).init(maxnum, maxnum); } const x2 = x * x; var a = 1.0 - x2 - (y * y); if (a == 0.0) { // overflow - return Complex(f32).new(maxnum, maxnum); + return Complex(f32).init(maxnum, maxnum); } var t = 0.5 * math.atan2(f32, 2.0 * x, a); @@ -67,12 +67,12 @@ fn atan32(z: Complex(f32)) Complex(f32) { a = x2 + t * t; if (a == 0.0) { // overflow - return Complex(f32).new(maxnum, maxnum); + return Complex(f32).init(maxnum, maxnum); } t = y + 1.0; a = (x2 + (t * t)) / a; - return Complex(f32).new(w, 0.25 * math.ln(a)); + return Complex(f32).init(w, 0.25 * math.ln(a)); } fn redupif64(x: f64) f64 { @@ -99,14 +99,14 @@ fn atan64(z: Complex(f64)) Complex(f64) { if ((x == 0.0) and (y > 1.0)) { // overflow - return Complex(f64).new(maxnum, maxnum); + return Complex(f64).init(maxnum, maxnum); } const x2 = x * x; var a = 1.0 - x2 - (y * y); if (a == 0.0) { // overflow - return Complex(f64).new(maxnum, maxnum); + return Complex(f64).init(maxnum, maxnum); } var t = 0.5 * math.atan2(f64, 2.0 * x, a); @@ -116,18 +116,18 @@ fn atan64(z: Complex(f64)) Complex(f64) { a = x2 + t * t; if (a == 0.0) { // overflow - return Complex(f64).new(maxnum, maxnum); + return Complex(f64).init(maxnum, maxnum); } t = y + 1.0; a = (x2 + (t * t)) / a; - return Complex(f64).new(w, 0.25 * math.ln(a)); + return Complex(f64).init(w, 0.25 * math.ln(a)); } const epsilon = 0.0001; test "complex.catan32" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = atan(a); try testing.expect(math.approxEqAbs(f32, c.re, 1.423679, epsilon)); @@ -135,7 +135,7 @@ test "complex.catan32" { } test "complex.catan64" { - const a = Complex(f64).new(5, 3); + const a = Complex(f64).init(5, 3); const c = atan(a); try testing.expect(math.approxEqAbs(f64, c.re, 1.423679, epsilon)); diff --git a/lib/std/math/complex/atanh.zig b/lib/std/math/complex/atanh.zig index 420f401f17..41232570b5 100644 --- a/lib/std/math/complex/atanh.zig +++ b/lib/std/math/complex/atanh.zig @@ -12,15 +12,15 @@ const Complex = cmath.Complex; /// Returns the hyperbolic arc-tangent of z. pub fn atanh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); - const q = Complex(T).new(-z.im, z.re); + const q = Complex(T).init(-z.im, z.re); const r = cmath.atan(q); - return Complex(T).new(r.im, -r.re); + return Complex(T).init(r.im, -r.re); } const epsilon = 0.0001; test "complex.catanh" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = atanh(a); try testing.expect(math.approxEqAbs(f32, c.re, 0.146947, epsilon)); diff --git a/lib/std/math/complex/conj.zig b/lib/std/math/complex/conj.zig index 960295830a..ea4ba7356c 100644 --- a/lib/std/math/complex/conj.zig +++ b/lib/std/math/complex/conj.zig @@ -12,11 +12,11 @@ const Complex = cmath.Complex; /// Returns the complex conjugate of z. pub fn conj(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); - return Complex(T).new(z.re, -z.im); + return Complex(T).init(z.re, -z.im); } test "complex.conj" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = a.conjugate(); try testing.expect(c.re == 5 and c.im == -3); diff --git a/lib/std/math/complex/cos.zig b/lib/std/math/complex/cos.zig index 2de3735d12..0760485bb0 100644 --- a/lib/std/math/complex/cos.zig +++ b/lib/std/math/complex/cos.zig @@ -12,14 +12,14 @@ const Complex = cmath.Complex; /// Returns the cosine of z. pub fn cos(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); - const p = Complex(T).new(-z.im, z.re); + const p = Complex(T).init(-z.im, z.re); return cmath.cosh(p); } const epsilon = 0.0001; test "complex.ccos" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = cos(a); try testing.expect(math.approxEqAbs(f32, c.re, 2.855815, epsilon)); diff --git a/lib/std/math/complex/cosh.zig b/lib/std/math/complex/cosh.zig index 2514e72bdb..7f82fb98d4 100644 --- a/lib/std/math/complex/cosh.zig +++ b/lib/std/math/complex/cosh.zig @@ -40,55 +40,55 @@ fn cosh32(z: Complex(f32)) Complex(f32) { if (ix < 0x7f800000 and iy < 0x7f800000) { if (iy == 0) { - return Complex(f32).new(math.cosh(x), y); + return Complex(f32).init(math.cosh(x), y); } // small x: normal case if (ix < 0x41100000) { - return Complex(f32).new(math.cosh(x) * math.cos(y), math.sinh(x) * math.sin(y)); + return Complex(f32).init(math.cosh(x) * math.cos(y), math.sinh(x) * math.sin(y)); } // |x|>= 9, so cosh(x) ~= exp(|x|) if (ix < 0x42b17218) { // x < 88.7: exp(|x|) won't overflow const h = math.exp(math.fabs(x)) * 0.5; - return Complex(f32).new(math.copysign(f32, h, x) * math.cos(y), h * math.sin(y)); + return Complex(f32).init(math.copysign(f32, h, x) * math.cos(y), h * math.sin(y)); } // x < 192.7: scale to avoid overflow else if (ix < 0x4340b1e7) { - const v = Complex(f32).new(math.fabs(x), y); + const v = Complex(f32).init(math.fabs(x), y); const r = ldexp_cexp(v, -1); - return Complex(f32).new(r.re, r.im * math.copysign(f32, 1, x)); + return Complex(f32).init(r.re, r.im * math.copysign(f32, 1, x)); } // x >= 192.7: result always overflows else { const h = 0x1p127 * x; - return Complex(f32).new(h * h * math.cos(y), h * math.sin(y)); + return Complex(f32).init(h * h * math.cos(y), h * math.sin(y)); } } if (ix == 0 and iy >= 0x7f800000) { - return Complex(f32).new(y - y, math.copysign(f32, 0, x * (y - y))); + return Complex(f32).init(y - y, math.copysign(f32, 0, x * (y - y))); } if (iy == 0 and ix >= 0x7f800000) { if (hx & 0x7fffff == 0) { - return Complex(f32).new(x * x, math.copysign(f32, 0, x) * y); + return Complex(f32).init(x * x, math.copysign(f32, 0, x) * y); } - return Complex(f32).new(x, math.copysign(f32, 0, (x + x) * y)); + return Complex(f32).init(x, math.copysign(f32, 0, (x + x) * y)); } if (ix < 0x7f800000 and iy >= 0x7f800000) { - return Complex(f32).new(y - y, x * (y - y)); + return Complex(f32).init(y - y, x * (y - y)); } if (ix >= 0x7f800000 and (hx & 0x7fffff) == 0) { if (iy >= 0x7f800000) { - return Complex(f32).new(x * x, x * (y - y)); + return Complex(f32).init(x * x, x * (y - y)); } - return Complex(f32).new((x * x) * math.cos(y), x * math.sin(y)); + return Complex(f32).init((x * x) * math.cos(y), x * math.sin(y)); } - return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y)); + return Complex(f32).init((x * x) * (y - y), (x + x) * (y - y)); } fn cosh64(z: Complex(f64)) Complex(f64) { @@ -108,61 +108,61 @@ fn cosh64(z: Complex(f64)) Complex(f64) { // nearly non-exceptional case where x, y are finite if (ix < 0x7ff00000 and iy < 0x7ff00000) { if (iy | ly == 0) { - return Complex(f64).new(math.cosh(x), x * y); + return Complex(f64).init(math.cosh(x), x * y); } // small x: normal case if (ix < 0x40360000) { - return Complex(f64).new(math.cosh(x) * math.cos(y), math.sinh(x) * math.sin(y)); + return Complex(f64).init(math.cosh(x) * math.cos(y), math.sinh(x) * math.sin(y)); } // |x|>= 22, so cosh(x) ~= exp(|x|) if (ix < 0x40862e42) { // x < 710: exp(|x|) won't overflow const h = math.exp(math.fabs(x)) * 0.5; - return Complex(f64).new(h * math.cos(y), math.copysign(f64, h, x) * math.sin(y)); + return Complex(f64).init(h * math.cos(y), math.copysign(f64, h, x) * math.sin(y)); } // x < 1455: scale to avoid overflow else if (ix < 0x4096bbaa) { - const v = Complex(f64).new(math.fabs(x), y); + const v = Complex(f64).init(math.fabs(x), y); const r = ldexp_cexp(v, -1); - return Complex(f64).new(r.re, r.im * math.copysign(f64, 1, x)); + return Complex(f64).init(r.re, r.im * math.copysign(f64, 1, x)); } // x >= 1455: result always overflows else { const h = 0x1p1023; - return Complex(f64).new(h * h * math.cos(y), h * math.sin(y)); + return Complex(f64).init(h * h * math.cos(y), h * math.sin(y)); } } if (ix | lx == 0 and iy >= 0x7ff00000) { - return Complex(f64).new(y - y, math.copysign(f64, 0, x * (y - y))); + return Complex(f64).init(y - y, math.copysign(f64, 0, x * (y - y))); } if (iy | ly == 0 and ix >= 0x7ff00000) { if ((hx & 0xfffff) | lx == 0) { - return Complex(f64).new(x * x, math.copysign(f64, 0, x) * y); + return Complex(f64).init(x * x, math.copysign(f64, 0, x) * y); } - return Complex(f64).new(x * x, math.copysign(f64, 0, (x + x) * y)); + return Complex(f64).init(x * x, math.copysign(f64, 0, (x + x) * y)); } if (ix < 0x7ff00000 and iy >= 0x7ff00000) { - return Complex(f64).new(y - y, x * (y - y)); + return Complex(f64).init(y - y, x * (y - y)); } if (ix >= 0x7ff00000 and (hx & 0xfffff) | lx == 0) { if (iy >= 0x7ff00000) { - return Complex(f64).new(x * x, x * (y - y)); + return Complex(f64).init(x * x, x * (y - y)); } - return Complex(f64).new(x * x * math.cos(y), x * math.sin(y)); + return Complex(f64).init(x * x * math.cos(y), x * math.sin(y)); } - return Complex(f64).new((x * x) * (y - y), (x + x) * (y - y)); + return Complex(f64).init((x * x) * (y - y), (x + x) * (y - y)); } const epsilon = 0.0001; test "complex.ccosh32" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = cosh(a); try testing.expect(math.approxEqAbs(f32, c.re, -73.467300, epsilon)); @@ -170,7 +170,7 @@ test "complex.ccosh32" { } test "complex.ccosh64" { - const a = Complex(f64).new(5, 3); + const a = Complex(f64).init(5, 3); const c = cosh(a); try testing.expect(math.approxEqAbs(f64, c.re, -73.467300, epsilon)); diff --git a/lib/std/math/complex/exp.zig b/lib/std/math/complex/exp.zig index a7eb3d51b5..fa74300dea 100644 --- a/lib/std/math/complex/exp.zig +++ b/lib/std/math/complex/exp.zig @@ -39,25 +39,25 @@ fn exp32(z: Complex(f32)) Complex(f32) { const hy = @bitCast(u32, y) & 0x7fffffff; // cexp(x + i0) = exp(x) + i0 if (hy == 0) { - return Complex(f32).new(math.exp(x), y); + return Complex(f32).init(math.exp(x), y); } const hx = @bitCast(u32, x); // cexp(0 + iy) = cos(y) + isin(y) if ((hx & 0x7fffffff) == 0) { - return Complex(f32).new(math.cos(y), math.sin(y)); + return Complex(f32).init(math.cos(y), math.sin(y)); } if (hy >= 0x7f800000) { // cexp(finite|nan +- i inf|nan) = nan + i nan if ((hx & 0x7fffffff) != 0x7f800000) { - return Complex(f32).new(y - y, y - y); + return Complex(f32).init(y - y, y - y); } // cexp(-inf +- i inf|nan) = 0 + i0 else if (hx & 0x80000000 != 0) { - return Complex(f32).new(0, 0); + return Complex(f32).init(0, 0); } // cexp(+inf +- i inf|nan) = inf + i nan else { - return Complex(f32).new(x, y - y); + return Complex(f32).init(x, y - y); } } @@ -70,7 +70,7 @@ fn exp32(z: Complex(f32)) Complex(f32) { // - x = nan else { const exp_x = math.exp(x); - return Complex(f32).new(exp_x * math.cos(y), exp_x * math.sin(y)); + return Complex(f32).init(exp_x * math.cos(y), exp_x * math.sin(y)); } } @@ -87,7 +87,7 @@ fn exp64(z: Complex(f64)) Complex(f64) { // cexp(x + i0) = exp(x) + i0 if (hy | ly == 0) { - return Complex(f64).new(math.exp(x), y); + return Complex(f64).init(math.exp(x), y); } const fx = @bitCast(u64, x); @@ -96,19 +96,19 @@ fn exp64(z: Complex(f64)) Complex(f64) { // cexp(0 + iy) = cos(y) + isin(y) if ((hx & 0x7fffffff) | lx == 0) { - return Complex(f64).new(math.cos(y), math.sin(y)); + return Complex(f64).init(math.cos(y), math.sin(y)); } if (hy >= 0x7ff00000) { // cexp(finite|nan +- i inf|nan) = nan + i nan if (lx != 0 or (hx & 0x7fffffff) != 0x7ff00000) { - return Complex(f64).new(y - y, y - y); + return Complex(f64).init(y - y, y - y); } // cexp(-inf +- i inf|nan) = 0 + i0 else if (hx & 0x80000000 != 0) { - return Complex(f64).new(0, 0); + return Complex(f64).init(0, 0); } // cexp(+inf +- i inf|nan) = inf + i nan else { - return Complex(f64).new(x, y - y); + return Complex(f64).init(x, y - y); } } @@ -121,14 +121,14 @@ fn exp64(z: Complex(f64)) Complex(f64) { // - x = nan else { const exp_x = math.exp(x); - return Complex(f64).new(exp_x * math.cos(y), exp_x * math.sin(y)); + return Complex(f64).init(exp_x * math.cos(y), exp_x * math.sin(y)); } } const epsilon = 0.0001; test "complex.cexp32" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = exp(a); try testing.expect(math.approxEqAbs(f32, c.re, -146.927917, epsilon)); @@ -136,7 +136,7 @@ test "complex.cexp32" { } test "complex.cexp64" { - const a = Complex(f64).new(5, 3); + const a = Complex(f64).init(5, 3); const c = exp(a); try testing.expect(math.approxEqAbs(f64, c.re, -146.927917, epsilon)); diff --git a/lib/std/math/complex/ldexp.zig b/lib/std/math/complex/ldexp.zig index 3ae0382fe3..9f31aae549 100644 --- a/lib/std/math/complex/ldexp.zig +++ b/lib/std/math/complex/ldexp.zig @@ -48,7 +48,7 @@ fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) { const half_expt2 = exptf - half_expt1; const scale2 = @bitCast(f32, (0x7f + half_expt2) << 23); - return Complex(f32).new(math.cos(z.im) * exp_x * scale1 * scale2, math.sin(z.im) * exp_x * scale1 * scale2); + return Complex(f32).init(math.cos(z.im) * exp_x * scale1 * scale2, math.sin(z.im) * exp_x * scale1 * scale2); } fn frexp_exp64(x: f64, expt: *i32) f64 { @@ -78,7 +78,7 @@ fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) { const half_expt2 = exptf - half_expt1; const scale2 = @bitCast(f64, (0x3ff + half_expt2) << 20); - return Complex(f64).new( + return Complex(f64).init( math.cos(z.im) * exp_x * scale1 * scale2, math.sin(z.im) * exp_x * scale1 * scale2, ); diff --git a/lib/std/math/complex/log.zig b/lib/std/math/complex/log.zig index 7f8f649953..e59870d556 100644 --- a/lib/std/math/complex/log.zig +++ b/lib/std/math/complex/log.zig @@ -15,13 +15,13 @@ pub fn log(z: anytype) Complex(@TypeOf(z.re)) { const r = cmath.abs(z); const phi = cmath.arg(z); - return Complex(T).new(math.ln(r), phi); + return Complex(T).init(math.ln(r), phi); } const epsilon = 0.0001; test "complex.clog" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = log(a); try testing.expect(math.approxEqAbs(f32, c.re, 1.763180, epsilon)); diff --git a/lib/std/math/complex/pow.zig b/lib/std/math/complex/pow.zig index 0d3a45e6d2..092c8c2422 100644 --- a/lib/std/math/complex/pow.zig +++ b/lib/std/math/complex/pow.zig @@ -19,8 +19,8 @@ pub fn pow(comptime T: type, z: T, c: T) T { const epsilon = 0.0001; test "complex.cpow" { - const a = Complex(f32).new(5, 3); - const b = Complex(f32).new(2.3, -1.3); + const a = Complex(f32).init(5, 3); + const b = Complex(f32).init(2.3, -1.3); const c = pow(Complex(f32), a, b); try testing.expect(math.approxEqAbs(f32, c.re, 58.049110, epsilon)); diff --git a/lib/std/math/complex/proj.zig b/lib/std/math/complex/proj.zig index 260816481b..8527be2293 100644 --- a/lib/std/math/complex/proj.zig +++ b/lib/std/math/complex/proj.zig @@ -14,16 +14,16 @@ pub fn proj(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); if (math.isInf(z.re) or math.isInf(z.im)) { - return Complex(T).new(math.inf(T), math.copysign(T, 0, z.re)); + return Complex(T).init(math.inf(T), math.copysign(T, 0, z.re)); } - return Complex(T).new(z.re, z.im); + return Complex(T).init(z.re, z.im); } const epsilon = 0.0001; test "complex.cproj" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = proj(a); try testing.expect(c.re == 5 and c.im == 3); diff --git a/lib/std/math/complex/sin.zig b/lib/std/math/complex/sin.zig index 68551b8596..39b5f584ac 100644 --- a/lib/std/math/complex/sin.zig +++ b/lib/std/math/complex/sin.zig @@ -12,15 +12,15 @@ const Complex = cmath.Complex; /// Returns the sine of z. pub fn sin(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); - const p = Complex(T).new(-z.im, z.re); + const p = Complex(T).init(-z.im, z.re); const q = cmath.sinh(p); - return Complex(T).new(q.im, -q.re); + return Complex(T).init(q.im, -q.re); } const epsilon = 0.0001; test "complex.csin" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = sin(a); try testing.expect(math.approxEqAbs(f32, c.re, -9.654126, epsilon)); diff --git a/lib/std/math/complex/sinh.zig b/lib/std/math/complex/sinh.zig index ea09f8e17d..b673da3d2f 100644 --- a/lib/std/math/complex/sinh.zig +++ b/lib/std/math/complex/sinh.zig @@ -40,55 +40,55 @@ fn sinh32(z: Complex(f32)) Complex(f32) { if (ix < 0x7f800000 and iy < 0x7f800000) { if (iy == 0) { - return Complex(f32).new(math.sinh(x), y); + return Complex(f32).init(math.sinh(x), y); } // small x: normal case if (ix < 0x41100000) { - return Complex(f32).new(math.sinh(x) * math.cos(y), math.cosh(x) * math.sin(y)); + return Complex(f32).init(math.sinh(x) * math.cos(y), math.cosh(x) * math.sin(y)); } // |x|>= 9, so cosh(x) ~= exp(|x|) if (ix < 0x42b17218) { // x < 88.7: exp(|x|) won't overflow const h = math.exp(math.fabs(x)) * 0.5; - return Complex(f32).new(math.copysign(f32, h, x) * math.cos(y), h * math.sin(y)); + return Complex(f32).init(math.copysign(f32, h, x) * math.cos(y), h * math.sin(y)); } // x < 192.7: scale to avoid overflow else if (ix < 0x4340b1e7) { - const v = Complex(f32).new(math.fabs(x), y); + const v = Complex(f32).init(math.fabs(x), y); const r = ldexp_cexp(v, -1); - return Complex(f32).new(r.re * math.copysign(f32, 1, x), r.im); + return Complex(f32).init(r.re * math.copysign(f32, 1, x), r.im); } // x >= 192.7: result always overflows else { const h = 0x1p127 * x; - return Complex(f32).new(h * math.cos(y), h * h * math.sin(y)); + return Complex(f32).init(h * math.cos(y), h * h * math.sin(y)); } } if (ix == 0 and iy >= 0x7f800000) { - return Complex(f32).new(math.copysign(f32, 0, x * (y - y)), y - y); + return Complex(f32).init(math.copysign(f32, 0, x * (y - y)), y - y); } if (iy == 0 and ix >= 0x7f800000) { if (hx & 0x7fffff == 0) { - return Complex(f32).new(x, y); + return Complex(f32).init(x, y); } - return Complex(f32).new(x, math.copysign(f32, 0, y)); + return Complex(f32).init(x, math.copysign(f32, 0, y)); } if (ix < 0x7f800000 and iy >= 0x7f800000) { - return Complex(f32).new(y - y, x * (y - y)); + return Complex(f32).init(y - y, x * (y - y)); } if (ix >= 0x7f800000 and (hx & 0x7fffff) == 0) { if (iy >= 0x7f800000) { - return Complex(f32).new(x * x, x * (y - y)); + return Complex(f32).init(x * x, x * (y - y)); } - return Complex(f32).new(x * math.cos(y), math.inf_f32 * math.sin(y)); + return Complex(f32).init(x * math.cos(y), math.inf_f32 * math.sin(y)); } - return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y)); + return Complex(f32).init((x * x) * (y - y), (x + x) * (y - y)); } fn sinh64(z: Complex(f64)) Complex(f64) { @@ -107,61 +107,61 @@ fn sinh64(z: Complex(f64)) Complex(f64) { if (ix < 0x7ff00000 and iy < 0x7ff00000) { if (iy | ly == 0) { - return Complex(f64).new(math.sinh(x), y); + return Complex(f64).init(math.sinh(x), y); } // small x: normal case if (ix < 0x40360000) { - return Complex(f64).new(math.sinh(x) * math.cos(y), math.cosh(x) * math.sin(y)); + return Complex(f64).init(math.sinh(x) * math.cos(y), math.cosh(x) * math.sin(y)); } // |x|>= 22, so cosh(x) ~= exp(|x|) if (ix < 0x40862e42) { // x < 710: exp(|x|) won't overflow const h = math.exp(math.fabs(x)) * 0.5; - return Complex(f64).new(math.copysign(f64, h, x) * math.cos(y), h * math.sin(y)); + return Complex(f64).init(math.copysign(f64, h, x) * math.cos(y), h * math.sin(y)); } // x < 1455: scale to avoid overflow else if (ix < 0x4096bbaa) { - const v = Complex(f64).new(math.fabs(x), y); + const v = Complex(f64).init(math.fabs(x), y); const r = ldexp_cexp(v, -1); - return Complex(f64).new(r.re * math.copysign(f64, 1, x), r.im); + return Complex(f64).init(r.re * math.copysign(f64, 1, x), r.im); } // x >= 1455: result always overflows else { const h = 0x1p1023 * x; - return Complex(f64).new(h * math.cos(y), h * h * math.sin(y)); + return Complex(f64).init(h * math.cos(y), h * h * math.sin(y)); } } if (ix | lx == 0 and iy >= 0x7ff00000) { - return Complex(f64).new(math.copysign(f64, 0, x * (y - y)), y - y); + return Complex(f64).init(math.copysign(f64, 0, x * (y - y)), y - y); } if (iy | ly == 0 and ix >= 0x7ff00000) { if ((hx & 0xfffff) | lx == 0) { - return Complex(f64).new(x, y); + return Complex(f64).init(x, y); } - return Complex(f64).new(x, math.copysign(f64, 0, y)); + return Complex(f64).init(x, math.copysign(f64, 0, y)); } if (ix < 0x7ff00000 and iy >= 0x7ff00000) { - return Complex(f64).new(y - y, x * (y - y)); + return Complex(f64).init(y - y, x * (y - y)); } if (ix >= 0x7ff00000 and (hx & 0xfffff) | lx == 0) { if (iy >= 0x7ff00000) { - return Complex(f64).new(x * x, x * (y - y)); + return Complex(f64).init(x * x, x * (y - y)); } - return Complex(f64).new(x * math.cos(y), math.inf_f64 * math.sin(y)); + return Complex(f64).init(x * math.cos(y), math.inf_f64 * math.sin(y)); } - return Complex(f64).new((x * x) * (y - y), (x + x) * (y - y)); + return Complex(f64).init((x * x) * (y - y), (x + x) * (y - y)); } const epsilon = 0.0001; test "complex.csinh32" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = sinh(a); try testing.expect(math.approxEqAbs(f32, c.re, -73.460617, epsilon)); @@ -169,7 +169,7 @@ test "complex.csinh32" { } test "complex.csinh64" { - const a = Complex(f64).new(5, 3); + const a = Complex(f64).init(5, 3); const c = sinh(a); try testing.expect(math.approxEqAbs(f64, c.re, -73.460617, epsilon)); diff --git a/lib/std/math/complex/sqrt.zig b/lib/std/math/complex/sqrt.zig index 25c486f838..07fed152fa 100644 --- a/lib/std/math/complex/sqrt.zig +++ b/lib/std/math/complex/sqrt.zig @@ -32,15 +32,15 @@ fn sqrt32(z: Complex(f32)) Complex(f32) { const y = z.im; if (x == 0 and y == 0) { - return Complex(f32).new(0, y); + return Complex(f32).init(0, y); } if (math.isInf(y)) { - return Complex(f32).new(math.inf(f32), y); + return Complex(f32).init(math.inf(f32), y); } if (math.isNan(x)) { // raise invalid if y is not nan const t = (y - y) / (y - y); - return Complex(f32).new(x, t); + return Complex(f32).init(x, t); } if (math.isInf(x)) { // sqrt(inf + i nan) = inf + nan i @@ -48,9 +48,9 @@ fn sqrt32(z: Complex(f32)) Complex(f32) { // sqrt(-inf + i nan) = nan +- inf i // sqrt(-inf + iy) = 0 + inf i if (math.signbit(x)) { - return Complex(f32).new(math.fabs(x - y), math.copysign(f32, x, y)); + return Complex(f32).init(math.fabs(x - y), math.copysign(f32, x, y)); } else { - return Complex(f32).new(x, math.copysign(f32, y - y, y)); + return Complex(f32).init(x, math.copysign(f32, y - y, y)); } } @@ -62,13 +62,13 @@ fn sqrt32(z: Complex(f32)) Complex(f32) { if (dx >= 0) { const t = math.sqrt((dx + math.hypot(f64, dx, dy)) * 0.5); - return Complex(f32).new( + return Complex(f32).init( @floatCast(f32, t), @floatCast(f32, dy / (2.0 * t)), ); } else { const t = math.sqrt((-dx + math.hypot(f64, dx, dy)) * 0.5); - return Complex(f32).new( + return Complex(f32).init( @floatCast(f32, math.fabs(y) / (2.0 * t)), @floatCast(f32, math.copysign(f64, t, y)), ); @@ -83,15 +83,15 @@ fn sqrt64(z: Complex(f64)) Complex(f64) { var y = z.im; if (x == 0 and y == 0) { - return Complex(f64).new(0, y); + return Complex(f64).init(0, y); } if (math.isInf(y)) { - return Complex(f64).new(math.inf(f64), y); + return Complex(f64).init(math.inf(f64), y); } if (math.isNan(x)) { // raise invalid if y is not nan const t = (y - y) / (y - y); - return Complex(f64).new(x, t); + return Complex(f64).init(x, t); } if (math.isInf(x)) { // sqrt(inf + i nan) = inf + nan i @@ -99,9 +99,9 @@ fn sqrt64(z: Complex(f64)) Complex(f64) { // sqrt(-inf + i nan) = nan +- inf i // sqrt(-inf + iy) = 0 + inf i if (math.signbit(x)) { - return Complex(f64).new(math.fabs(x - y), math.copysign(f64, x, y)); + return Complex(f64).init(math.fabs(x - y), math.copysign(f64, x, y)); } else { - return Complex(f64).new(x, math.copysign(f64, y - y, y)); + return Complex(f64).init(x, math.copysign(f64, y - y, y)); } } @@ -118,10 +118,10 @@ fn sqrt64(z: Complex(f64)) Complex(f64) { var result: Complex(f64) = undefined; if (x >= 0) { const t = math.sqrt((x + math.hypot(f64, x, y)) * 0.5); - result = Complex(f64).new(t, y / (2.0 * t)); + result = Complex(f64).init(t, y / (2.0 * t)); } else { const t = math.sqrt((-x + math.hypot(f64, x, y)) * 0.5); - result = Complex(f64).new(math.fabs(y) / (2.0 * t), math.copysign(f64, t, y)); + result = Complex(f64).init(math.fabs(y) / (2.0 * t), math.copysign(f64, t, y)); } if (scale) { @@ -135,7 +135,7 @@ fn sqrt64(z: Complex(f64)) Complex(f64) { const epsilon = 0.0001; test "complex.csqrt32" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = sqrt(a); try testing.expect(math.approxEqAbs(f32, c.re, 2.327117, epsilon)); @@ -143,7 +143,7 @@ test "complex.csqrt32" { } test "complex.csqrt64" { - const a = Complex(f64).new(5, 3); + const a = Complex(f64).init(5, 3); const c = sqrt(a); try testing.expect(math.approxEqAbs(f64, c.re, 2.3271175190399496, epsilon)); diff --git a/lib/std/math/complex/tan.zig b/lib/std/math/complex/tan.zig index ca9d4ce7e9..0ee34dfcc2 100644 --- a/lib/std/math/complex/tan.zig +++ b/lib/std/math/complex/tan.zig @@ -12,15 +12,15 @@ const Complex = cmath.Complex; /// Returns the tanget of z. pub fn tan(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); - const q = Complex(T).new(-z.im, z.re); + const q = Complex(T).init(-z.im, z.re); const r = cmath.tanh(q); - return Complex(T).new(r.im, -r.re); + return Complex(T).init(r.im, -r.re); } const epsilon = 0.0001; test "complex.ctan" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = tan(a); try testing.expect(math.approxEqAbs(f32, c.re, -0.002708233, epsilon)); diff --git a/lib/std/math/complex/tanh.zig b/lib/std/math/complex/tanh.zig index 096bdaac06..cb5e7b3f6d 100644 --- a/lib/std/math/complex/tanh.zig +++ b/lib/std/math/complex/tanh.zig @@ -36,22 +36,22 @@ fn tanh32(z: Complex(f32)) Complex(f32) { if (ix >= 0x7f800000) { if (ix & 0x7fffff != 0) { const r = if (y == 0) y else x * y; - return Complex(f32).new(x, r); + return Complex(f32).init(x, r); } const xx = @bitCast(f32, hx - 0x40000000); const r = if (math.isInf(y)) y else math.sin(y) * math.cos(y); - return Complex(f32).new(xx, math.copysign(f32, 0, r)); + return Complex(f32).init(xx, math.copysign(f32, 0, r)); } if (!math.isFinite(y)) { const r = if (ix != 0) y - y else x; - return Complex(f32).new(r, y - y); + return Complex(f32).init(r, y - y); } // x >= 11 if (ix >= 0x41300000) { const exp_mx = math.exp(-math.fabs(x)); - return Complex(f32).new(math.copysign(f32, 1, x), 4 * math.sin(y) * math.cos(y) * exp_mx * exp_mx); + return Complex(f32).init(math.copysign(f32, 1, x), 4 * math.sin(y) * math.cos(y) * exp_mx * exp_mx); } // Kahan's algorithm @@ -61,7 +61,7 @@ fn tanh32(z: Complex(f32)) Complex(f32) { const rho = math.sqrt(1 + s * s); const den = 1 + beta * s * s; - return Complex(f32).new((beta * rho * s) / den, t / den); + return Complex(f32).init((beta * rho * s) / den, t / den); } fn tanh64(z: Complex(f64)) Complex(f64) { @@ -78,23 +78,23 @@ fn tanh64(z: Complex(f64)) Complex(f64) { if (ix >= 0x7ff00000) { if ((ix & 0x7fffff) | lx != 0) { const r = if (y == 0) y else x * y; - return Complex(f64).new(x, r); + return Complex(f64).init(x, r); } const xx = @bitCast(f64, (@as(u64, hx - 0x40000000) << 32) | lx); const r = if (math.isInf(y)) y else math.sin(y) * math.cos(y); - return Complex(f64).new(xx, math.copysign(f64, 0, r)); + return Complex(f64).init(xx, math.copysign(f64, 0, r)); } if (!math.isFinite(y)) { const r = if (ix != 0) y - y else x; - return Complex(f64).new(r, y - y); + return Complex(f64).init(r, y - y); } // x >= 22 if (ix >= 0x40360000) { const exp_mx = math.exp(-math.fabs(x)); - return Complex(f64).new(math.copysign(f64, 1, x), 4 * math.sin(y) * math.cos(y) * exp_mx * exp_mx); + return Complex(f64).init(math.copysign(f64, 1, x), 4 * math.sin(y) * math.cos(y) * exp_mx * exp_mx); } // Kahan's algorithm @@ -104,13 +104,13 @@ fn tanh64(z: Complex(f64)) Complex(f64) { const rho = math.sqrt(1 + s * s); const den = 1 + beta * s * s; - return Complex(f64).new((beta * rho * s) / den, t / den); + return Complex(f64).init((beta * rho * s) / den, t / den); } const epsilon = 0.0001; test "complex.ctanh32" { - const a = Complex(f32).new(5, 3); + const a = Complex(f32).init(5, 3); const c = tanh(a); try testing.expect(math.approxEqAbs(f32, c.re, 0.999913, epsilon)); @@ -118,7 +118,7 @@ test "complex.ctanh32" { } test "complex.ctanh64" { - const a = Complex(f64).new(5, 3); + const a = Complex(f64).init(5, 3); const c = tanh(a); try testing.expect(math.approxEqAbs(f64, c.re, 0.999913, epsilon)); diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index 03586bc777..b2ce8df73f 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -1608,13 +1608,13 @@ test "zig fmt: if-else with comment before else" { \\comptime { \\ // cexp(finite|nan +- i inf|nan) = nan + i nan \\ if ((hx & 0x7fffffff) != 0x7f800000) { - \\ return Complex(f32).new(y - y, y - y); + \\ return Complex(f32).init(y - y, y - y); \\ } // cexp(-inf +- i inf|nan) = 0 + i0 \\ else if (hx & 0x80000000 != 0) { - \\ return Complex(f32).new(0, 0); + \\ return Complex(f32).init(0, 0); \\ } // cexp(+inf +- i inf|nan) = inf + i nan \\ else { - \\ return Complex(f32).new(x, y - y); + \\ return Complex(f32).init(x, y - y); \\ } \\} \\ @@ -2267,16 +2267,16 @@ test "zig fmt: line comment between if block and else keyword" { \\test "aoeu" { \\ // cexp(finite|nan +- i inf|nan) = nan + i nan \\ if ((hx & 0x7fffffff) != 0x7f800000) { - \\ return Complex(f32).new(y - y, y - y); + \\ return Complex(f32).init(y - y, y - y); \\ } \\ // cexp(-inf +- i inf|nan) = 0 + i0 \\ else if (hx & 0x80000000 != 0) { - \\ return Complex(f32).new(0, 0); + \\ return Complex(f32).init(0, 0); \\ } \\ // cexp(+inf +- i inf|nan) = inf + i nan \\ // another comment \\ else { - \\ return Complex(f32).new(x, y - y); + \\ return Complex(f32).init(x, y - y); \\ } \\} \\ From fe1a166589db0f2371429c93e1e1e622c19378f1 Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Mon, 17 May 2021 17:41:42 +0200 Subject: [PATCH 19/21] translate-c: Add `@truncate` where needed Make getLimitedValue API much easier to use with zig. Fixes the compilation on 32bit hosts. --- src/clang.zig | 5 ++++- src/translate_c.zig | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/clang.zig b/src/clang.zig index 256dbda1e1..b9e152aef4 100644 --- a/src/clang.zig +++ b/src/clang.zig @@ -1,3 +1,4 @@ +const std = @import("std"); pub const builtin = @import("builtin"); pub const SourceLocation = extern struct { @@ -115,7 +116,9 @@ pub const APFloatBaseSemantics = extern enum { }; pub const APInt = opaque { - pub const getLimitedValue = ZigClangAPInt_getLimitedValue; + pub fn getLimitedValue(self: *const APInt, comptime T: type) T { + return @truncate(T, ZigClangAPInt_getLimitedValue(self, std.math.maxInt(T))); + } extern fn ZigClangAPInt_getLimitedValue(*const APInt, limit: u64) u64; }; diff --git a/src/translate_c.zig b/src/translate_c.zig index 348e284db3..19aec279ec 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -2341,7 +2341,7 @@ fn transInitListExprArray( assert(@ptrCast(*const clang.Type, arr_type).isConstantArrayType()); const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, arr_type); const size_ap_int = const_arr_ty.getSize(); - const all_count = size_ap_int.getLimitedValue(math.maxInt(usize)); + const all_count = size_ap_int.getLimitedValue(usize); const leftover_count = all_count - init_count; if (all_count == 0) { @@ -4266,7 +4266,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, ty); const size_ap_int = const_arr_ty.getSize(); - const size = size_ap_int.getLimitedValue(math.maxInt(usize)); + const size = size_ap_int.getLimitedValue(usize); const elem_type = try transType(c, scope, const_arr_ty.getElementType().getTypePtr(), source_loc); return Tag.array_type.create(c.arena, .{ .len = size, .elem_type = elem_type }); From b7eab32f42040ecb80501111adb05bc8369625e8 Mon Sep 17 00:00:00 2001 From: LemonBoy Date: Sun, 16 May 2021 11:51:39 +0200 Subject: [PATCH 20/21] std: Allocate tlscsprng memory as needed Let mmap allocate a block of memory that's wide enough to use with MADV_WIPEONFORK, madvise granularity is the current system page size (using a static buffer of mem.page_size bytes would be wrong, that's the minimum page size). As a result, we don't zero some random chunk of memory every time we fork the process. Fixes #7609 --- lib/std/crypto/tlcsprng.zig | 117 +++++++++++++++++++++++------------- 1 file changed, 75 insertions(+), 42 deletions(-) diff --git a/lib/std/crypto/tlcsprng.zig b/lib/std/crypto/tlcsprng.zig index 115a7ab882..7c43125920 100644 --- a/lib/std/crypto/tlcsprng.zig +++ b/lib/std/crypto/tlcsprng.zig @@ -12,6 +12,7 @@ const std = @import("std"); const root = @import("root"); const mem = std.mem; +const os = std.os; /// We use this as a layer of indirection because global const pointers cannot /// point to thread-local variables. @@ -42,16 +43,12 @@ const maybe_have_wipe_on_fork = std.Target.current.os.isAtLeast(.linux, .{ .minor = 14, }) orelse true; -const WipeMe = struct { - init_state: enum { uninitialized, initialized, failed }, +const Context = struct { + init_state: enum(u8) { uninitialized = 0, initialized, failed }, gimli: std.crypto.core.Gimli, }; -const wipe_align = if (maybe_have_wipe_on_fork) mem.page_size else @alignOf(WipeMe); -threadlocal var wipe_me: WipeMe align(wipe_align) = .{ - .gimli = undefined, - .init_state = .uninitialized, -}; +threadlocal var wipe_mem: []align(mem.page_size) u8 = &[_]u8{}; fn tlsCsprngFill(_: *const std.rand.Random, buffer: []u8) void { if (std.builtin.link_libc and @hasDecl(std.c, "arc4random_buf")) { @@ -64,35 +61,69 @@ fn tlsCsprngFill(_: *const std.rand.Random, buffer: []u8) void { if (comptime std.meta.globalOption("crypto_always_getrandom", bool) orelse false) { return fillWithOsEntropy(buffer); } - switch (wipe_me.init_state) { + + if (wipe_mem.len == 0) { + // Not initialized yet. + if (want_fork_safety and maybe_have_wipe_on_fork) { + // Allocate a per-process page, madvise operates with page + // granularity. + wipe_mem = os.mmap( + null, + @sizeOf(Context), + os.PROT_READ | os.PROT_WRITE, + os.MAP_PRIVATE | os.MAP_ANONYMOUS, + -1, + 0, + ) catch |err| { + // Could not allocate memory for the local state, fall back to + // the OS syscall. + return fillWithOsEntropy(buffer); + }; + // The memory is already zero-initialized. + } else { + // Use a static thread-local buffer. + const S = struct { + threadlocal var buf: Context align(mem.page_size) = .{ + .init_state = .uninitialized, + .gimli = undefined, + }; + }; + wipe_mem = mem.asBytes(&S.buf); + } + } + const ctx = @ptrCast(*Context, wipe_mem.ptr); + + switch (ctx.init_state) { .uninitialized => { - if (want_fork_safety) { - if (maybe_have_wipe_on_fork) { - if (std.os.madvise( - @ptrCast([*]align(mem.page_size) u8, &wipe_me), - @sizeOf(@TypeOf(wipe_me)), - std.os.MADV_WIPEONFORK, - )) |_| { - return initAndFill(buffer); - } else |_| if (std.Thread.use_pthreads) { - return setupPthreadAtforkAndFill(buffer); - } else { - // Since we failed to set up fork safety, we fall back to always - // calling getrandom every time. - wipe_me.init_state = .failed; - return fillWithOsEntropy(buffer); - } - } else if (std.Thread.use_pthreads) { - return setupPthreadAtforkAndFill(buffer); - } else { - // We have no mechanism to provide fork safety, but we want fork safety, - // so we fall back to calling getrandom every time. - wipe_me.init_state = .failed; - return fillWithOsEntropy(buffer); - } - } else { + if (!want_fork_safety) { return initAndFill(buffer); } + + if (maybe_have_wipe_on_fork) wof: { + // Qemu user-mode emulation ignores any valid/invalid madvise + // hint and returns success. Check if this is the case by + // passing bogus parameters, we expect EINVAL as result. + if (os.madvise(wipe_mem.ptr, 0, 0xffffffff)) |_| { + break :wof; + } else |_| {} + + os.madvise( + wipe_mem.ptr, + wipe_mem.len, + os.MADV_WIPEONFORK, + ) catch |_| { + return initAndFill(buffer); + }; + } + + if (std.Thread.use_pthreads) { + return setupPthreadAtforkAndFill(buffer); + } + + // Since we failed to set up fork safety, we fall back to always + // calling getrandom every time. + ctx.init_state = .failed; + return fillWithOsEntropy(buffer); }, .initialized => { return fillWithCsprng(buffer); @@ -110,7 +141,8 @@ fn tlsCsprngFill(_: *const std.rand.Random, buffer: []u8) void { fn setupPthreadAtforkAndFill(buffer: []u8) void { const failed = std.c.pthread_atfork(null, null, childAtForkHandler) != 0; if (failed) { - wipe_me.init_state = .failed; + const ctx = @ptrCast(*Context, wipe_mem.ptr); + ctx.init_state = .failed; return fillWithOsEntropy(buffer); } else { return initAndFill(buffer); @@ -118,21 +150,21 @@ fn setupPthreadAtforkAndFill(buffer: []u8) void { } fn childAtForkHandler() callconv(.C) void { - const wipe_slice = @ptrCast([*]u8, &wipe_me)[0..@sizeOf(@TypeOf(wipe_me))]; - std.crypto.utils.secureZero(u8, wipe_slice); + std.crypto.utils.secureZero(u8, wipe_mem); } fn fillWithCsprng(buffer: []u8) void { + const ctx = @ptrCast(*Context, wipe_mem.ptr); if (buffer.len != 0) { - wipe_me.gimli.squeeze(buffer); + ctx.gimli.squeeze(buffer); } else { - wipe_me.gimli.permute(); + ctx.gimli.permute(); } - mem.set(u8, wipe_me.gimli.toSlice()[0..std.crypto.core.Gimli.RATE], 0); + mem.set(u8, ctx.gimli.toSlice()[0..std.crypto.core.Gimli.RATE], 0); } fn fillWithOsEntropy(buffer: []u8) void { - std.os.getrandom(buffer) catch @panic("getrandom() failed to provide entropy"); + os.getrandom(buffer) catch @panic("getrandom() failed to provide entropy"); } fn initAndFill(buffer: []u8) void { @@ -147,11 +179,12 @@ fn initAndFill(buffer: []u8) void { fillWithOsEntropy(&seed); } - wipe_me.gimli = std.crypto.core.Gimli.init(seed); + const ctx = @ptrCast(*Context, wipe_mem.ptr); + ctx.gimli = std.crypto.core.Gimli.init(seed); // This is at the end so that accidental recursive dependencies result // in stack overflows instead of invalid random data. - wipe_me.init_state = .initialized; + ctx.init_state = .initialized; return fillWithCsprng(buffer); } From 65cee0b3fd5f9b3f83b79cc8fd1b64d13f4dd0c4 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 16 May 2021 12:54:51 +0800 Subject: [PATCH 21/21] stage2 ARM: correct spilling in genArmMul as well --- src/codegen.zig | 69 ++++++++++++++++++++++++++++++++++----------- test/stage2/arm.zig | 49 ++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+), 17 deletions(-) diff --git a/src/codegen.zig b/src/codegen.zig index 9d344bf1d0..379a0f9b7d 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1563,28 +1563,63 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); + const lhs_is_register = lhs == .register; + const rhs_is_register = rhs == .register; + const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); + const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); + // Destination must be a register // LHS must be a register // RHS must be a register var dst_mcv: MCValue = undefined; - var lhs_mcv: MCValue = undefined; - var rhs_mcv: MCValue = undefined; - if (self.reuseOperand(inst, 0, lhs)) { - // LHS is the destination - lhs_mcv = if (lhs != .register) try self.copyToNewRegister(inst, lhs) else lhs; - rhs_mcv = if (rhs != .register) try self.copyToNewRegister(inst, rhs) else rhs; - dst_mcv = lhs_mcv; - } else if (self.reuseOperand(inst, 1, rhs)) { - // RHS is the destination - lhs_mcv = if (lhs != .register) try self.copyToNewRegister(inst, lhs) else lhs; - rhs_mcv = if (rhs != .register) try self.copyToNewRegister(inst, rhs) else rhs; - dst_mcv = rhs_mcv; + var lhs_mcv: MCValue = lhs; + var rhs_mcv: MCValue = rhs; + + // Allocate registers for operands and/or destination + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + if (reuse_lhs) { + // Allocate 0 or 1 registers + if (!rhs_is_register) { + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; + branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + } + dst_mcv = lhs; + } else if (reuse_rhs) { + // Allocate 0 or 1 registers + if (!lhs_is_register) { + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; + branch.inst_table.putAssumeCapacity(op_lhs, lhs_mcv); + } + dst_mcv = rhs; } else { - // TODO save 1 copy instruction by directly allocating the destination register - // LHS is the destination - lhs_mcv = try self.copyToNewRegister(inst, lhs); - rhs_mcv = if (rhs != .register) try self.copyToNewRegister(inst, rhs) else rhs; - dst_mcv = lhs_mcv; + // Allocate 1 or 2 registers + if (lhs_is_register and rhs_is_register) { + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{ lhs.register, rhs.register }) }; + } else if (lhs_is_register) { + // Move RHS to register + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{lhs.register}) }; + rhs_mcv = dst_mcv; + } else if (rhs_is_register) { + // Move LHS to register + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{rhs.register}) }; + lhs_mcv = dst_mcv; + } else { + // Move LHS and RHS to register + const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{}); + lhs_mcv = MCValue{ .register = regs[0] }; + rhs_mcv = MCValue{ .register = regs[1] }; + dst_mcv = lhs_mcv; + + branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + } + } + + // Move the operands to the newly allocated registers + if (!lhs_is_register) { + try self.genSetReg(op_lhs.src, op_lhs.ty, lhs_mcv.register, lhs); + } + if (!rhs_is_register) { + try self.genSetReg(op_rhs.src, op_rhs.ty, rhs_mcv.register, rhs); } writeInt(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32()); diff --git a/test/stage2/arm.zig b/test/stage2/arm.zig index 31b3c06dcd..02033005d1 100644 --- a/test/stage2/arm.zig +++ b/test/stage2/arm.zig @@ -510,5 +510,54 @@ pub fn addCases(ctx: *TestContext) !void { , "", ); + + case.addCompareOutput( + \\export fn _start() noreturn { + \\ assert(addMul(3, 4) == 357747496); + \\ exit(); + \\} + \\ + \\fn addMul(a: u32, b: u32) u32 { + \\ const x: u32 = blk: { + \\ const c = a + b; // 7 + \\ const d = a + c; // 10 + \\ const e = d + b; // 14 + \\ const f = d + e; // 24 + \\ const g = e + f; // 38 + \\ const h = f + g; // 62 + \\ const i = g + h; // 100 + \\ const j = i + d; // 110 + \\ const k = i + j; // 210 + \\ const l = k + c; // 217 + \\ const m = l * d; // 2170 + \\ const n = m + e; // 2184 + \\ const o = n * f; // 52416 + \\ const p = o + g; // 52454 + \\ const q = p * h; // 3252148 + \\ const r = q + i; // 3252248 + \\ const s = r * j; // 357747280 + \\ const t = s + k; // 357747490 + \\ break :blk t; + \\ }; + \\ const y = x + a; // 357747493 + \\ const z = y + a; // 357747496 + \\ return z; + \\} + \\ + \\fn assert(ok: bool) void { + \\ if (!ok) unreachable; + \\} + \\ + \\fn exit() noreturn { + \\ asm volatile ("svc #0" + \\ : + \\ : [number] "{r7}" (1), + \\ [arg1] "{r0}" (0) + \\ : "memory" + \\ ); + \\ unreachable; + \\} + , + "",); } }