From c746cbc686c46904a5d381725079a69e38b201cd Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:06:25 +0100
Subject: [PATCH 001/294] codegen: move gen logic for typed values, consts and
decl ref to common codegen
---
src/arch/aarch64/CodeGen.zig | 6 +-
src/arch/arm/CodeGen.zig | 6 +-
src/arch/riscv64/CodeGen.zig | 14 +-
src/arch/sparc64/CodeGen.zig | 6 +-
src/arch/wasm/CodeGen.zig | 2 -
src/arch/x86_64/CodeGen.zig | 216 +++---------------------
src/codegen.zig | 307 ++++++++++++++++++++++++++++++++---
src/link/Coff.zig | 2 +-
src/link/Elf.zig | 2 +-
src/link/MachO.zig | 2 +-
src/link/Plan9.zig | 2 +-
src/link/Wasm.zig | 2 +-
src/register_manager.zig | 3 +
13 files changed, 323 insertions(+), 247 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 818b04f890..23f458f910 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -41,11 +41,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index ceabe70438..87806223e3 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -42,11 +42,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index afcf4b0bb7..fad5482cbc 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -21,10 +21,10 @@ const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
+const codegen = @import("../../codegen.zig");
-const Result = @import("../../codegen.zig").Result;
-const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
-const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
+const Result = codegen.Result;
+const DebugInfoOutput = codegen.DebugInfoOutput;
const bits = @import("bits.zig");
const abi = @import("abi.zig");
@@ -35,11 +35,7 @@ const Instruction = abi.Instruction;
const callee_preserved_regs = abi.callee_preserved_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -225,7 +221,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) codegen.CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index c8f77fe702..5a108eca85 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -38,11 +38,7 @@ const gp = abi.RegisterClass.gp;
const Self = @This();
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
const RegisterView = enum(u1) {
caller,
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 2f191fd834..511a10769e 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -733,8 +733,6 @@ const InnerError = error{
OutOfMemory,
/// An error occurred when trying to lower AIR to MIR.
CodegenFail,
- /// Can occur when dereferencing a pointer that points to a `Decl` of which the analysis has failed
- AnalysisFail,
/// Compiler implementation could not handle a large integer.
Overflow,
};
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 53d38f520a..2ec1a33619 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -40,11 +40,7 @@ const Register = bits.Register;
const gp = abi.RegisterClass.gp;
const sse = abi.RegisterClass.sse;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = codegen.CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -6683,7 +6679,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
}
-pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
+fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
@@ -6752,200 +6748,26 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- log.debug("lowerDeclRef: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
- // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.zigTypeTag() == .Pointer) blk: {
- if (tv.ty.castPtrToFn()) |_| break :blk;
- if (!tv.ty.elemType2().hasRuntimeBits()) {
- return MCValue.none;
- }
- }
-
- const module = self.bin_file.options.module.?;
- const decl = module.declPtr(decl_index);
- module.markDeclAlive(decl);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-}
-
-fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
- log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
- const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
- return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
- };
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_index = local_sym_index; // the plan9 backend returns the got_index
- const got_addr = p9.bases.data + got_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO lower unnamed const", .{});
- }
-}
-
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
- var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
- }
- log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
-
- const target = self.target.*;
-
- switch (typed_value.ty.zigTypeTag()) {
- .Void => return MCValue{ .none = {} },
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (typed_value.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ arg_tv,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => |ll| .{ .linker_load = ll },
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= ptr_bits and info.signedness == .signed) {
- return MCValue{ .immediate = @bitCast(u64, typed_value.val.toSignedInt(target)) };
- }
- if (!(info.bits > ptr_bits or info.signedness == .signed)) {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- }
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(!typed_value.val.isNull()) };
- }
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
- const is_pl = typed_value.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
- return self.genTypedValue(.{ .ty = error_type, .val = err_val });
- }
- },
-
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- else => {},
- }
-
- return self.lowerUnnamedConst(typed_value);
+ };
+ return mcv;
}
const CallMCValues = struct {
diff --git a/src/codegen.zig b/src/codegen.zig
index df7ceff1f0..245745d6f6 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -29,13 +29,14 @@ pub const Result = union(enum) {
fail: *ErrorMsg,
};
-pub const GenerateSymbolError = error{
+pub const CodeGenError = error{
OutOfMemory,
Overflow,
- /// A Decl that this symbol depends on had a semantic analysis failure.
- AnalysisFail,
+ CodegenFail,
};
+pub const GenerateSymbolError = CodeGenError;
+
pub const DebugInfoOutput = union(enum) {
dwarf: *link.File.Dwarf.DeclState,
/// the plan9 debuginfo output is a bytecode with 4 opcodes
@@ -63,19 +64,6 @@ pub const DebugInfoOutput = union(enum) {
none,
};
-/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:
-/// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
-/// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
-/// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc)
-pub const LinkerLoad = struct {
- type: enum {
- got,
- direct,
- import,
- },
- sym_index: u32,
-};
-
pub fn generateFunction(
bin_file: *link.File,
src_loc: Module.SrcLoc,
@@ -84,7 +72,7 @@ pub fn generateFunction(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
switch (bin_file.options.target.cpu.arch) {
.arm,
.armeb,
@@ -120,7 +108,7 @@ pub fn generateSymbol(
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
const tracy = trace(@src());
defer tracy.end();
@@ -823,7 +811,7 @@ fn lowerDeclRef(
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
const target = bin_file.options.target;
const module = bin_file.options.module.?;
if (typed_value.ty.isSlice()) {
@@ -880,6 +868,287 @@ fn lowerDeclRef(
return Result.ok;
}
+/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:
+/// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
+/// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
+/// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc)
+pub const LinkerLoad = struct {
+ type: enum {
+ got,
+ direct,
+ import,
+ },
+ sym_index: u32,
+};
+
+pub const GenResult = union(enum) {
+ mcv: MCValue,
+ fail: *ErrorMsg,
+
+ const MCValue = union(enum) {
+ none,
+ undef,
+ /// The bit-width of the immediate may be smaller than `u64`. For example, on 32-bit targets
+ /// such as ARM, the immediate will never exceed 32-bits.
+ immediate: u64,
+ linker_load: LinkerLoad,
+ /// Direct by-address reference to memory location.
+ memory: u64,
+ };
+
+ fn mcv(val: MCValue) GenResult {
+ return .{ .mcv = val };
+ }
+
+ fn fail(
+ gpa: Allocator,
+ src_loc: Module.SrcLoc,
+ comptime format: []const u8,
+ args: anytype,
+ ) Allocator.Error!GenResult {
+ const msg = try ErrorMsg.create(gpa, src_loc, format, args);
+ return .{ .fail = msg };
+ }
+};
+
+fn genDeclRef(
+ bin_file: *link.File,
+ src_loc: Module.SrcLoc,
+ tv: TypedValue,
+ decl_index: Module.Decl.Index,
+) CodeGenError!GenResult {
+ log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
+
+ const target = bin_file.options.target;
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+
+ const module = bin_file.options.module.?;
+ const decl = module.declPtr(decl_index);
+
+ if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) {
+ const imm: u64 = switch (ptr_bytes) {
+ 1 => 0xaa,
+ 2 => 0xaaaa,
+ 4 => 0xaaaaaaaa,
+ 8 => 0xaaaaaaaaaaaaaaaa,
+ else => unreachable,
+ };
+ return GenResult.mcv(.{ .immediate = imm });
+ }
+
+ // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
+ if (tv.ty.zigTypeTag() == .Pointer) blk: {
+ if (tv.ty.castPtrToFn()) |_| break :blk;
+ if (!tv.ty.elemType2().hasRuntimeBits()) {
+ return GenResult.mcv(.none);
+ }
+ }
+
+ module.markDeclAlive(decl);
+
+ if (bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return GenResult.mcv(.{ .memory = atom.getOffsetTableAddress(elf_file) });
+ } else if (bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .got,
+ .sym_index = sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .got,
+ .sym_index = sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Plan9)) |p9| {
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
+ return GenResult.mcv(.{ .memory = got_addr });
+ } else {
+ return GenResult.fail(bin_file.allocator, src_loc, "TODO genDeclRef for target {}", .{target});
+ }
+}
+
+fn genUnnamedConst(
+ bin_file: *link.File,
+ src_loc: Module.SrcLoc,
+ tv: TypedValue,
+ owner_decl_index: Module.Decl.Index,
+) CodeGenError!GenResult {
+ log.debug("genUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
+
+ const target = bin_file.options.target;
+ const local_sym_index = bin_file.lowerUnnamedConst(tv, owner_decl_index) catch |err| {
+ return GenResult.fail(bin_file.allocator, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)});
+ };
+ if (bin_file.cast(link.File.Elf)) |elf_file| {
+ return GenResult.mcv(.{ .memory = elf_file.getSymbol(local_sym_index).st_value });
+ } else if (bin_file.cast(link.File.MachO)) |_| {
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .direct,
+ .sym_index = local_sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Coff)) |_| {
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .direct,
+ .sym_index = local_sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Plan9)) |p9| {
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+ const got_index = local_sym_index; // the plan9 backend returns the got_index
+ const got_addr = p9.bases.data + got_index * ptr_bytes;
+ return GenResult.mcv(.{ .memory = got_addr });
+ } else {
+ return GenResult.fail(bin_file.allocator, src_loc, "TODO genUnnamedConst for target {}", .{target});
+ }
+}
+
+pub fn genTypedValue(
+ bin_file: *link.File,
+ src_loc: Module.SrcLoc,
+ arg_tv: TypedValue,
+ owner_decl_index: Module.Decl.Index,
+) CodeGenError!GenResult {
+ var typed_value = arg_tv;
+ if (typed_value.val.castTag(.runtime_value)) |rt| {
+ typed_value.val = rt.data;
+ }
+
+ log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
+
+ if (typed_value.val.isUndef())
+ return GenResult.mcv(.undef);
+
+ const target = bin_file.options.target;
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+
+ if (typed_value.val.castTag(.decl_ref)) |payload| {
+ return genDeclRef(bin_file, src_loc, typed_value, payload.data);
+ }
+ if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
+ return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index);
+ }
+
+ switch (typed_value.ty.zigTypeTag()) {
+ .Void => return GenResult.mcv(.none),
+ .Pointer => switch (typed_value.ty.ptrSize()) {
+ .Slice => {},
+ else => {
+ switch (typed_value.val.tag()) {
+ .int_u64 => {
+ return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) });
+ },
+ else => {},
+ }
+ },
+ },
+ .Int => {
+ const info = typed_value.ty.intInfo(target);
+ if (info.bits <= ptr_bits and info.signedness == .signed) {
+ return GenResult.mcv(.{ .immediate = @bitCast(u64, typed_value.val.toSignedInt(target)) });
+ }
+ if (!(info.bits > ptr_bits or info.signedness == .signed)) {
+ return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) });
+ }
+ },
+ .Bool => {
+ return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) });
+ },
+ .Optional => {
+ if (typed_value.ty.isPtrLikeOptional()) {
+ if (typed_value.val.isNull())
+ return GenResult.mcv(.{ .immediate = 0 });
+
+ var buf: Type.Payload.ElemType = undefined;
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = typed_value.ty.optionalChild(&buf),
+ .val = typed_value.val,
+ }, owner_decl_index);
+ } else if (typed_value.ty.abiSize(target) == 1) {
+ return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) });
+ }
+ },
+ .Enum => {
+ if (typed_value.val.castTag(.enum_field_index)) |field_index| {
+ switch (typed_value.ty.tag()) {
+ .enum_simple => {
+ return GenResult.mcv(.{ .immediate = field_index.data });
+ },
+ .enum_full, .enum_nonexhaustive => {
+ const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
+ if (enum_full.values.count() != 0) {
+ const tag_val = enum_full.values.keys()[field_index.data];
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = enum_full.tag_ty,
+ .val = tag_val,
+ }, owner_decl_index);
+ } else {
+ return GenResult.mcv(.{ .immediate = field_index.data });
+ }
+ },
+ else => unreachable,
+ }
+ } else {
+ var int_tag_buffer: Type.Payload.Bits = undefined;
+ const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = int_tag_ty,
+ .val = typed_value.val,
+ }, owner_decl_index);
+ }
+ },
+ .ErrorSet => {
+ switch (typed_value.val.tag()) {
+ .@"error" => {
+ const err_name = typed_value.val.castTag(.@"error").?.data.name;
+ const module = bin_file.options.module.?;
+ const global_error_set = module.global_error_set;
+ const error_index = global_error_set.get(err_name).?;
+ return GenResult.mcv(.{ .immediate = error_index });
+ },
+ else => {
+ // In this case we are rendering an error union which has a 0 bits payload.
+ return GenResult.mcv(.{ .immediate = 0 });
+ },
+ }
+ },
+ .ErrorUnion => {
+ const error_type = typed_value.ty.errorUnionSet();
+ const payload_type = typed_value.ty.errorUnionPayload();
+ const is_pl = typed_value.val.errorUnionIsPayload();
+
+ if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
+ // We use the error type directly as the type.
+ const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = error_type,
+ .val = err_val,
+ }, owner_decl_index);
+ }
+ },
+
+ .ComptimeInt => unreachable,
+ .ComptimeFloat => unreachable,
+ .Type => unreachable,
+ .EnumLiteral => unreachable,
+ .NoReturn => unreachable,
+ .Undefined => unreachable,
+ .Null => unreachable,
+ .Opaque => unreachable,
+
+ else => {},
+ }
+
+ return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index);
+}
+
pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 {
const payload_align = payload_ty.abiAlignment(target);
const error_align = Type.anyerror.abiAlignment(target);
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index c0ac7e0b88..f210f2f2b3 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1060,7 +1060,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 1a9d594c56..f499a9952a 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -2618,7 +2618,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 7c1d4776af..eaf16e4009 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -2089,7 +2089,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 87e3ca5c22..cf6e4f8418 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -377,7 +377,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
// duped_code is freed when the unnamed const is freed
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 00a52177f7..ac0c8e9ca5 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -1255,7 +1255,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
.fail => |em| {
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
};
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 2fe0cd2b6a..4d16348c27 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -19,6 +19,9 @@ pub const AllocateRegistersError = error{
/// Can happen when spilling an instruction in codegen runs out of
/// memory, so we propagate that error
OutOfMemory,
+ /// Can happen when spilling an instruction in codegen triggers integer
+ /// overflow, so we propagate that error
+ Overflow,
/// Can happen when spilling an instruction triggers a codegen
/// error, so we propagate that error
CodegenFail,
From 1024332adc88928299dfc07426f11624ae8ba18b Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:24:58 +0100
Subject: [PATCH 002/294] arm: use common implementation of genTypedValue
helper
---
src/arch/arm/CodeGen.zig | 192 ++++--------------------------------
src/arch/x86_64/CodeGen.zig | 6 +-
2 files changed, 23 insertions(+), 175 deletions(-)
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 87806223e3..7d8708c44d 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -24,7 +24,7 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const Result = codegen.Result;
-const GenerateSymbolError = codegen.GenerateSymbolError;
+const CodeGenError = codegen.CodeGenError;
const DebugInfoOutput = codegen.DebugInfoOutput;
const bits = @import("bits.zig");
@@ -42,7 +42,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -339,7 +339,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -6083,178 +6083,26 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
- mod.markDeclAlive(decl);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable; // unsupported architecture for MachO
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return self.fail("TODO codegen COFF const Decl pointer", .{});
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-
- _ = tv;
-}
-
-fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
- const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
- return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
- };
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable;
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return self.fail("TODO lower unnamed const in COFF", .{});
- } else if (self.bin_file.cast(link.File.Plan9)) |_| {
- return self.fail("TODO lower unnamed const in Plan9", .{});
- } else {
- return self.fail("TODO lower unnamed const", .{});
- }
-}
-
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
- var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
- }
- log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
- const target = self.target.*;
-
- switch (typed_value.ty.zigTypeTag()) {
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (typed_value.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt(target)) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ arg_tv,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => unreachable, // TODO
+ .immediate => |imm| .{ .immediate = @truncate(u32, imm) },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= ptr_bits) {
- const unsigned = switch (info.signedness) {
- .signed => blk: {
- const signed = @intCast(i32, typed_value.val.toSignedInt(target));
- break :blk @bitCast(u32, signed);
- },
- .unsigned => @intCast(u32, typed_value.val.toUnsignedInt(target)),
- };
-
- return MCValue{ .immediate = unsigned };
- } else {
- return self.lowerUnnamedConst(typed_value);
- }
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
- }
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
- const is_pl = typed_value.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
- return self.genTypedValue(.{ .ty = error_type, .val = err_val });
- }
- },
-
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- else => {},
- }
-
- return self.lowerUnnamedConst(typed_value);
+ };
+ return mcv;
}
const CallMCValues = struct {
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 2ec1a33619..a2c11b332b 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -12,12 +12,12 @@ const trace = @import("../../tracy.zig").trace;
const Air = @import("../../Air.zig");
const Allocator = mem.Allocator;
+const CodeGenError = codegen.CodeGenError;
const Compilation = @import("../../Compilation.zig");
const DebugInfoOutput = codegen.DebugInfoOutput;
const DW = std.dwarf;
const ErrorMsg = Module.ErrorMsg;
const Result = codegen.Result;
-const GenerateSymbolError = codegen.GenerateSymbolError;
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Mir = @import("Mir.zig");
@@ -40,7 +40,7 @@ const Register = bits.Register;
const gp = abi.RegisterClass.gp;
const sse = abi.RegisterClass.sse;
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -253,7 +253,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
From c413ac100fa5a4cece5702d3afb6b0898e9c6214 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:40:16 +0100
Subject: [PATCH 003/294] codegen: refactor generating Int as immediate where
appropriate
---
src/codegen.zig | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/src/codegen.zig b/src/codegen.zig
index 245745d6f6..7e7f34f992 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -1051,11 +1051,12 @@ pub fn genTypedValue(
},
.Int => {
const info = typed_value.ty.intInfo(target);
- if (info.bits <= ptr_bits and info.signedness == .signed) {
- return GenResult.mcv(.{ .immediate = @bitCast(u64, typed_value.val.toSignedInt(target)) });
- }
- if (!(info.bits > ptr_bits or info.signedness == .signed)) {
- return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) });
+ if (info.bits <= ptr_bits) {
+ const unsigned = switch (info.signedness) {
+ .signed => @bitCast(u64, typed_value.val.toSignedInt(target)),
+ .unsigned => typed_value.val.toUnsignedInt(target),
+ };
+ return GenResult.mcv(.{ .immediate = unsigned });
}
},
.Bool => {
From d8d8842190214cf727611b965e830ccbfffb52d1 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:42:29 +0100
Subject: [PATCH 004/294] arm: skip unimplemented behavior test for
@fieldParentPtr
---
test/behavior/field_parent_ptr.zig | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig
index 6bbd6ad7ef..bf99fd1795 100644
--- a/test/behavior/field_parent_ptr.zig
+++ b/test/behavior/field_parent_ptr.zig
@@ -3,6 +3,7 @@ const builtin = @import("builtin");
test "@fieldParentPtr non-first field" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testParentFieldPtr(&foo.c);
comptime try testParentFieldPtr(&foo.c);
@@ -10,6 +11,7 @@ test "@fieldParentPtr non-first field" {
test "@fieldParentPtr first field" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testParentFieldPtrFirst(&foo.a);
comptime try testParentFieldPtrFirst(&foo.a);
@@ -47,6 +49,7 @@ fn testParentFieldPtrFirst(a: *const bool) !void {
test "@fieldParentPtr untagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -73,6 +76,7 @@ fn testFieldParentPtrUnion(c: *const i32) !void {
test "@fieldParentPtr tagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -99,6 +103,7 @@ fn testFieldParentPtrTaggedUnion(c: *const i32) !void {
test "@fieldParentPtr extern union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
From 0d2c25ca9d0794b1c822a12f3bdf8e57ede4c840 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:46:08 +0100
Subject: [PATCH 005/294] aarch64: use common implementation of genTypedValue
---
src/arch/aarch64/CodeGen.zig | 215 ++++-------------------------------
1 file changed, 20 insertions(+), 195 deletions(-)
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 23f458f910..28f8370bd9 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -23,7 +23,7 @@ const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
-const GenerateSymbolError = codegen.GenerateSymbolError;
+const CodeGenError = codegen.CodeGenError;
const Result = codegen.Result;
const DebugInfoOutput = codegen.DebugInfoOutput;
@@ -41,7 +41,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -333,7 +333,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -6133,201 +6133,26 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
- // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.zigTypeTag() == .Pointer) blk: {
- if (tv.ty.castPtrToFn()) |_| break :blk;
- if (!tv.ty.elemType2().hasRuntimeBits()) {
- return MCValue.none;
- }
- }
-
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
- mod.markDeclAlive(decl);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-}
-
-fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
- log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
- const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
- return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
- };
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |_| {
- return self.fail("TODO lower unnamed const in Plan9", .{});
- } else {
- return self.fail("TODO lower unnamed const", .{});
- }
-}
-
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
- var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
- }
- log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
- const target = self.target.*;
-
- switch (typed_value.ty.zigTypeTag()) {
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (typed_value.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ arg_tv,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => |ll| .{ .linker_load = ll },
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= 64) {
- const unsigned = switch (info.signedness) {
- .signed => blk: {
- const signed = typed_value.val.toSignedInt(target);
- break :blk @bitCast(u64, signed);
- },
- .unsigned => typed_value.val.toUnsignedInt(target),
- };
-
- return MCValue{ .immediate = unsigned };
- }
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
- }
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
-
- const is_pl = typed_value.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
- return self.genTypedValue(.{ .ty = error_type, .val = err_val });
- }
-
- return self.lowerUnnamedConst(typed_value);
- },
-
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- else => {},
- }
-
- return self.lowerUnnamedConst(typed_value);
+ };
+ return mcv;
}
const CallMCValues = struct {
From 5b3ea49806f5d0b9034e3eacbef9e19428a5db8a Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:53:13 +0100
Subject: [PATCH 006/294] riscv64: use common implementation of genTypedValue
---
src/arch/riscv64/CodeGen.zig | 158 +++++------------------------------
1 file changed, 20 insertions(+), 138 deletions(-)
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index fad5482cbc..c7191145f9 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -23,6 +23,7 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const codegen = @import("../../codegen.zig");
+const CodeGenError = codegen.CodeGenError;
const Result = codegen.Result;
const DebugInfoOutput = codegen.DebugInfoOutput;
@@ -35,7 +36,7 @@ const Instruction = abi.Instruction;
const callee_preserved_regs = abi.callee_preserved_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -221,7 +222,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) codegen.CodeGenError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -2548,145 +2549,26 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
- mod.markDeclAlive(decl);
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable;
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return self.fail("TODO codegen COFF const Decl pointer", .{});
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
- _ = tv;
-}
-
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
- const target = self.target.*;
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- switch (typed_value.ty.zigTypeTag()) {
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
- const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
- const mod = self.bin_file.options.module.?;
- const slice_len = typed_value.val.sliceLen(mod);
- // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
- // the Sema code needs to use anonymous Decls or alloca instructions to store data.
- const ptr_imm = ptr_mcv.memory;
- _ = slice_len;
- _ = ptr_imm;
- // We need more general support for const data being stored in memory to make this work.
- return self.fail("TODO codegen for const slices", .{});
- },
- else => {
- if (typed_value.val.tag() == .int_u64) {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- }
- return self.fail("TODO codegen more kinds of const pointers", .{});
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ typed_value,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => unreachable, // TODO
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits > ptr_bits or info.signedness == .signed) {
- return self.fail("TODO const int bigger than ptr and signed int", .{});
- }
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
- }
- return self.fail("TODO non pointer optionals", .{});
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
- const sub_val = typed_value.val.castTag(.eu_payload).?.data;
-
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
- }
-
- return self.fail("TODO implement error union const of type '{}'", .{typed_value.ty.fmtDebug()});
- },
- else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty.fmtDebug()}),
- }
+ };
+ return mcv;
}
const CallMCValues = struct {
From f6eeb6c8ce83af392dc075e3f80846aefc791f42 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:53:30 +0100
Subject: [PATCH 007/294] sparc64: use common implementation of genTypedValue
---
src/arch/sparc64/CodeGen.zig | 170 +++++------------------------------
1 file changed, 20 insertions(+), 150 deletions(-)
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 5a108eca85..dc1a450e9a 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -19,7 +19,7 @@ const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Type = @import("../../type.zig").Type;
-const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
+const CodeGenError = codegen.CodeGenError;
const Result = @import("../../codegen.zig").Result;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
@@ -38,7 +38,7 @@ const gp = abi.RegisterClass.gp;
const Self = @This();
-const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+const InnerError = CodeGenError || error{OutOfRegisters};
const RegisterView = enum(u1) {
caller,
@@ -261,7 +261,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -3894,133 +3894,25 @@ fn genStore(self: *Self, value_reg: Register, addr_reg: Register, comptime off_t
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
- var tv = typed_value;
- log.debug("genTypedValue: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
-
- if (tv.val.castTag(.runtime_value)) |rt| {
- tv.val = rt.data;
- }
-
- if (tv.val.isUndef())
- return MCValue{ .undef = {} };
-
- if (tv.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(tv, payload.data);
- }
- if (tv.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(tv, payload.data.decl_index);
- }
- const target = self.target.*;
-
- switch (tv.ty.zigTypeTag()) {
- .Pointer => switch (tv.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (tv.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = tv.val.toUnsignedInt(target) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ typed_value,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => unreachable, // TODO
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(tv.val.toBool()) };
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Int => {
- const info = tv.ty.intInfo(self.target.*);
- if (info.bits <= 64) {
- const unsigned = switch (info.signedness) {
- .signed => blk: {
- const signed = tv.val.toSignedInt(target);
- break :blk @bitCast(u64, signed);
- },
- .unsigned => tv.val.toUnsignedInt(target),
- };
-
- return MCValue{ .immediate = unsigned };
- } else {
- return self.fail("TODO implement int genTypedValue of > 64 bits", .{});
- }
- },
- .Optional => {
- if (tv.ty.isPtrLikeOptional()) {
- if (tv.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = tv.ty.optionalChild(&buf),
- .val = tv.val,
- });
- } else if (tv.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(tv.val.isNull()) };
- }
- },
- .Enum => {
- if (tv.val.castTag(.enum_field_index)) |field_index| {
- switch (tv.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = tv.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = tv.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = tv.val });
- }
- },
- .ErrorSet => {
- const err_name = tv.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- .ErrorUnion => {
- const error_type = tv.ty.errorUnionSet();
- const payload_type = tv.ty.errorUnionPayload();
-
- if (tv.val.castTag(.eu_payload)) |pl| {
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return MCValue{ .immediate = 0 };
- }
-
- _ = pl;
- return self.fail("TODO implement error union const of type '{}' (non-error)", .{tv.ty.fmtDebug()});
- } else {
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return self.genTypedValue(.{ .ty = error_type, .val = tv.val });
- }
-
- return self.fail("TODO implement error union const of type '{}' (error)", .{tv.ty.fmtDebug()});
- }
- },
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
- else => {},
- }
-
- return self.fail("TODO implement const of type '{}'", .{tv.ty.fmtDebug()});
+ };
+ return mcv;
}
fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
@@ -4196,28 +4088,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.zigTypeTag() == .Pointer) blk: {
- if (tv.ty.castPtrToFn()) |_| break :blk;
- if (!tv.ty.elemType2().hasRuntimeBits()) {
- return MCValue.none;
- }
- }
-
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
-
- mod.markDeclAlive(decl);
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-}
-
fn minMax(
self: *Self,
tag: Air.Inst.Tag,
From d23472747eb288e4c2332e03f6185c69e864f67d Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:53:47 +0100
Subject: [PATCH 008/294] elf: fully zero out symbol when appending to freelist
---
src/link/Elf.zig | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index f499a9952a..a91722d072 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -2097,9 +2097,16 @@ fn freeAtom(self: *Elf, atom_index: Atom.Index) void {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const local_sym_index = atom.getSymbolIndex().?;
+ log.debug("adding %{d} to local symbols free list", .{local_sym_index});
self.local_symbol_free_list.append(gpa, local_sym_index) catch {};
- self.local_symbols.items[local_sym_index].st_info = 0;
- self.local_symbols.items[local_sym_index].st_shndx = 0;
+ self.local_symbols.items[local_sym_index] = .{
+ .st_name = 0,
+ .st_info = 0,
+ .st_other = 0,
+ .st_shndx = 0,
+ .st_value = 0,
+ .st_size = 0,
+ };
_ = self.atom_by_index_table.remove(local_sym_index);
self.getAtomPtr(atom_index).local_sym_index = 0;
From dc709fbf48798ae74d5c7763cf99dffeb8143795 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 3 Mar 2023 18:56:57 +0100
Subject: [PATCH 009/294] codegen: rename GenerateSymbolError to CodeGenError
---
src/arch/wasm/CodeGen.zig | 2 +-
src/codegen.zig | 2 --
2 files changed, 1 insertion(+), 3 deletions(-)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 511a10769e..5cd6c95690 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1162,7 +1162,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: codegen.DebugInfoOutput,
-) codegen.GenerateSymbolError!codegen.Result {
+) codegen.CodeGenError!codegen.Result {
_ = src_loc;
var code_gen: CodeGen = .{
.gpa = bin_file.allocator,
diff --git a/src/codegen.zig b/src/codegen.zig
index 7e7f34f992..a91795841c 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -35,8 +35,6 @@ pub const CodeGenError = error{
CodegenFail,
};
-pub const GenerateSymbolError = CodeGenError;
-
pub const DebugInfoOutput = union(enum) {
dwarf: *link.File.Dwarf.DeclState,
/// the plan9 debuginfo output is a bytecode with 4 opcodes
From d6bd00e85500fa1a7909695ae5943be438f7521d Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Fri, 3 Mar 2023 17:30:18 +0100
Subject: [PATCH 010/294] Zir: move set_cold from Inst.Tag to Inst.Extended
If I could mark a builtin function as cold, I would mark @setCold as cold.
We have run out of `Zir.Inst.Tag`s so I had to move a tag from Zir.Inst.Tag to
Zir.Inst.Extended. This is because a new noreturn builtin will be added and
noreturn builtins cannot be part of Inst.Tag:
```
/// `noreturn` instructions may not go here; they must be part of the main `Tag` enum.
pub const Extended = enum(u16) {
```
Here's another reason I went for @setCold:
```
$ git grep setRuntimeSafety | wc -l
322
$ git grep setCold | wc -l
79
$ git grep setEvalBranchQuota | wc -l
82
```
This also simply removes @setCold from Autodoc and the docs frontend because
as far as I could understand it, builtins represented using Zir extended
instructions are not yet supported because I couldn't find
@setStackAlign or @setFloatMode there, either.
---
lib/docs/main.js | 4 ----
src/AstGen.zig | 13 ++++++++++---
src/Autodoc.zig | 1 -
src/Sema.zig | 18 +++++++++---------
src/Zir.zig | 10 ++++------
src/print_zir.zig | 2 +-
6 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/lib/docs/main.js b/lib/docs/main.js
index a0647bbe61..fc99b2f861 100644
--- a/lib/docs/main.js
+++ b/lib/docs/main.js
@@ -1187,10 +1187,6 @@ const NAV_MODES = {
payloadHtml += "panic";
break;
}
- case "set_cold": {
- payloadHtml += "setCold";
- break;
- }
case "set_runtime_safety": {
payloadHtml += "setRuntimeSafety";
break;
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 41a8ccadb2..679fc2df0c 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2609,8 +2609,9 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.extended => switch (gz.astgen.instructions.items(.data)[inst].extended.opcode) {
.breakpoint,
.fence,
- .set_align_stack,
.set_float_mode,
+ .set_align_stack,
+ .set_cold,
=> break :b true,
else => break :b false,
},
@@ -2658,7 +2659,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.validate_struct_init_comptime,
.validate_array_init,
.validate_array_init_comptime,
- .set_cold,
.set_runtime_safety,
.closure_capture,
.memcpy,
@@ -8078,6 +8078,14 @@ fn builtinCall(
});
return rvalue(gz, ri, result, node);
},
+ .set_cold => {
+ const order = try expr(gz, scope, ri, params[0]);
+ const result = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{
+ .node = gz.nodeIndexToRelative(node),
+ .operand = order,
+ });
+ return rvalue(gz, ri, result, node);
+ },
.src => {
const token_starts = tree.tokens.items(.start);
@@ -8111,7 +8119,6 @@ fn builtinCall(
.bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int),
.embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file),
.error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name),
- .set_cold => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_cold),
.set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety),
.sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt),
.sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin),
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 3cf3fff4c0..15d90b104b 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1338,7 +1338,6 @@ fn walkInstruction(
.embed_file,
.error_name,
.panic,
- .set_cold, // @check
.set_runtime_safety, // @check
.sqrt,
.sin,
diff --git a/src/Sema.zig b/src/Sema.zig
index f9a6f39867..4702d10688 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1167,6 +1167,11 @@ fn analyzeBodyInner(
i += 1;
continue;
},
+ .set_cold => {
+ try sema.zirSetCold(block, extended);
+ i += 1;
+ continue;
+ },
.breakpoint => {
if (!block.is_comptime) {
_ = try block.addNoOp(.breakpoint);
@@ -1304,11 +1309,6 @@ fn analyzeBodyInner(
i += 1;
continue;
},
- .set_cold => {
- try sema.zirSetCold(block, inst);
- i += 1;
- continue;
- },
.set_runtime_safety => {
try sema.zirSetRuntimeSafety(block, inst);
i += 1;
@@ -5721,10 +5721,10 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
gop.value_ptr.* = .{ .alignment = alignment, .src = src };
}
-fn zirSetCold(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand, "operand to @setCold must be comptime-known");
+fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
+ const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
+ const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, "operand to @setCold must be comptime-known");
const func = sema.func orelse return; // does nothing outside a function
func.is_cold = is_cold;
}
diff --git a/src/Zir.zig b/src/Zir.zig
index 4dd2386c51..c7f2141dcc 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -808,8 +808,6 @@ pub const Inst = struct {
panic,
/// Same as `panic` but forces comptime.
panic_comptime,
- /// Implement builtin `@setCold`. Uses `un_node`.
- set_cold,
/// Implement builtin `@setRuntimeSafety`. Uses `un_node`.
set_runtime_safety,
/// Implement builtin `@sqrt`. Uses `un_node`.
@@ -1187,7 +1185,6 @@ pub const Inst = struct {
.bool_to_int,
.embed_file,
.error_name,
- .set_cold,
.set_runtime_safety,
.sqrt,
.sin,
@@ -1323,7 +1320,6 @@ pub const Inst = struct {
.validate_deref,
.@"export",
.export_value,
- .set_cold,
.set_runtime_safety,
.memcpy,
.memset,
@@ -1561,7 +1557,7 @@ pub const Inst = struct {
=> false,
.extended => switch (data.extended.opcode) {
- .breakpoint, .fence => true,
+ .fence, .set_cold, .breakpoint => true,
else => false,
},
};
@@ -1750,7 +1746,6 @@ pub const Inst = struct {
.error_name = .un_node,
.panic = .un_node,
.panic_comptime = .un_node,
- .set_cold = .un_node,
.set_runtime_safety = .un_node,
.sqrt = .un_node,
.sin = .un_node,
@@ -1979,6 +1974,9 @@ pub const Inst = struct {
/// Implement builtin `@setAlignStack`.
/// `operand` is payload index to `UnNode`.
set_align_stack,
+ /// Implements `@setCold`.
+ /// `operand` is payload index to `UnNode`.
+ set_cold,
/// Implements the `@errSetCast` builtin.
/// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand.
err_set_cast,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index fb9031296d..5ec9fbcdfc 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -196,7 +196,6 @@ const Writer = struct {
.error_name,
.panic,
.panic_comptime,
- .set_cold,
.set_runtime_safety,
.sqrt,
.sin,
@@ -503,6 +502,7 @@ const Writer = struct {
.fence,
.set_float_mode,
.set_align_stack,
+ .set_cold,
.wasm_memory_size,
.error_to_int,
.int_to_error,
From e0d390463865340adc8055d1e34c0bc7acf4e4c3 Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Fri, 3 Mar 2023 09:42:34 +0100
Subject: [PATCH 011/294] Ast: properly handle sentinel-terminated slices in
tuple
Co-authored-by: Veikka Tuominen
---
lib/std/zig/Ast.zig | 9 ++++++---
test/behavior/tuple.zig | 19 +++++++++++++++++++
2 files changed, 25 insertions(+), 3 deletions(-)
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index f99d58aafa..cb86696e13 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -1407,7 +1407,8 @@ pub fn containerField(tree: Ast, node: Node.Index) full.ContainerField {
.type_expr = data.lhs,
.value_expr = extra.value_expr,
.align_expr = extra.align_expr,
- .tuple_like = tree.tokens.items(.tag)[main_token + 1] != .colon,
+ .tuple_like = tree.tokens.items(.tag)[main_token] != .identifier or
+ tree.tokens.items(.tag)[main_token + 1] != .colon,
});
}
@@ -1420,7 +1421,8 @@ pub fn containerFieldInit(tree: Ast, node: Node.Index) full.ContainerField {
.type_expr = data.lhs,
.value_expr = data.rhs,
.align_expr = 0,
- .tuple_like = tree.tokens.items(.tag)[main_token + 1] != .colon,
+ .tuple_like = tree.tokens.items(.tag)[main_token] != .identifier or
+ tree.tokens.items(.tag)[main_token + 1] != .colon,
});
}
@@ -1433,7 +1435,8 @@ pub fn containerFieldAlign(tree: Ast, node: Node.Index) full.ContainerField {
.type_expr = data.lhs,
.value_expr = 0,
.align_expr = data.rhs,
- .tuple_like = tree.tokens.items(.tag)[main_token + 1] != .colon,
+ .tuple_like = tree.tokens.items(.tag)[main_token] != .identifier or
+ tree.tokens.items(.tag)[main_token + 1] != .colon,
});
}
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index 13b02b40e8..f7860be34e 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -397,3 +397,22 @@ test "nested runtime conditionals in tuple initializer" {
};
try expectEqualStrings("up", x[0]);
}
+
+test "sentinel slice in tuple with other fields" {
+ const S = struct {
+ a: u32,
+ b: u32,
+ };
+
+ const Submission = union(enum) {
+ open: struct { *S, [:0]const u8, u32 },
+ };
+
+ _ = Submission;
+}
+
+test "sentinel slice in tuple" {
+ const S = struct { [:0]const u8 };
+
+ _ = S;
+}
From 653814f76ba5d678ebad91f140417cd5829c6aad Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 3 Mar 2023 16:17:23 -0700
Subject: [PATCH 012/294] std.Build.addModule: return the created module
---
lib/std/Build.zig | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index 26919962e3..120196f972 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -550,17 +550,13 @@ pub fn addAssembly(b: *Build, options: AssemblyOptions) *CompileStep {
return obj_step;
}
-pub const AddModuleOptions = struct {
- name: []const u8,
- source_file: FileSource,
- dependencies: []const ModuleDependency = &.{},
-};
-
-pub fn addModule(b: *Build, options: AddModuleOptions) void {
- b.modules.put(b.dupe(options.name), b.createModule(.{
- .source_file = options.source_file,
- .dependencies = options.dependencies,
- })) catch @panic("OOM");
+/// This function creates a module and adds it to the package's module set, making
+/// it available to other packages which depend on this one.
+/// `createModule` can be used instead to create a private module.
+pub fn addModule(b: *Build, name: []const u8, options: CreateModuleOptions) *Module {
+ const module = b.createModule(options);
+ b.modules.put(b.dupe(name), module) catch @panic("OOM");
+ return module;
}
pub const ModuleDependency = struct {
@@ -573,8 +569,9 @@ pub const CreateModuleOptions = struct {
dependencies: []const ModuleDependency = &.{},
};
-/// Prefer to use `addModule` which will make the module available to other
-/// packages which depend on this package.
+/// This function creates a private module, to be used by the current package,
+/// but not exposed to other packages depending on this one.
+/// `addModule` can be used instead to create a public module.
pub fn createModule(b: *Build, options: CreateModuleOptions) *Module {
const module = b.allocator.create(Module) catch @panic("OOM");
module.* = .{
From 65368683ad92b858d0a391cb29d37c0476784b40 Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Fri, 3 Mar 2023 18:35:03 +0100
Subject: [PATCH 013/294] add @trap builtin
This introduces a new builtin function that compiles down to something that results in an illegal instruction exception/interrupt.
It can be used to exit a program abnormally.
This implements the builtin for all backends.
---
doc/langref.html.in | 17 ++++++++++++++++-
lib/zig.h | 10 ++++++++--
src/Air.zig | 10 +++++++++-
src/AstGen.zig | 8 +++++++-
src/BuiltinFn.zig | 8 ++++++++
src/Liveness.zig | 2 ++
src/Sema.zig | 9 +++++++++
src/Zir.zig | 11 +++++++++--
src/arch/aarch64/CodeGen.zig | 11 ++++++++++-
src/arch/arm/CodeGen.zig | 9 +++++++++
src/arch/arm/Emit.zig | 9 +++++++--
src/arch/arm/Mir.zig | 2 ++
src/arch/arm/bits.zig | 11 +++++++++++
src/arch/riscv64/CodeGen.zig | 9 +++++++++
src/arch/riscv64/Emit.zig | 2 ++
src/arch/riscv64/Mir.zig | 1 +
src/arch/riscv64/bits.zig | 1 +
src/arch/sparc64/CodeGen.zig | 16 ++++++++++++++++
src/arch/wasm/CodeGen.zig | 6 ++++++
src/arch/x86_64/CodeGen.zig | 10 ++++++++++
src/arch/x86_64/Emit.zig | 7 +++++++
src/arch/x86_64/Mir.zig | 3 +++
src/codegen/c.zig | 6 ++++++
src/codegen/llvm.zig | 8 ++++++++
src/print_air.zig | 1 +
src/print_zir.zig | 1 +
26 files changed, 178 insertions(+), 10 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index e016ef13f8..0290d3acd6 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -7818,12 +7818,14 @@ comptime {
This function inserts a platform-specific debug trap instruction which causes
debuggers to break there.
+ Unlike for {#syntax#}@trap(){#endsyntax#}, execution may continue after this point if the program is resumed.
This function is only valid within function scope.
-
+ {#see_also|@trap#}
{#header_close#}
+
{#header_open|@mulAdd#}
{#syntax#}@mulAdd(comptime T: type, a: T, b: T, c: T) T{#endsyntax#}
@@ -9393,6 +9395,19 @@ fn List(comptime T: type) type {
{#header_close#}
+ {#header_open|@trap#}
+ {#syntax#}@trap() noreturn{#endsyntax#}
+
+ This function inserts a platform-specific trap/jam instruction which can be used to exit the program abnormally.
+ This may be implemented by explicitly emitting an invalid instruction which may cause an illegal instruction exception of some sort.
+ Unlike for {#syntax#}@breakpoint(){#endsyntax#}, execution does not continue after this point.
+
+
+ This function is only valid within function scope.
+
+ {#see_also|@breakpoint#}
+ {#header_close#}
+
{#header_open|@truncate#}
{#syntax#}@truncate(comptime T: type, integer: anytype) T{#endsyntax#}
diff --git a/lib/zig.h b/lib/zig.h
index c10720d1bd..f3ad7db8a1 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -180,10 +180,16 @@ typedef char bool;
#define zig_export(sig, symbol, name) __asm(name " = " symbol)
#endif
+#if zig_has_builtin(trap)
+#define zig_trap() __builtin_trap()
+#elif defined(__i386__) || defined(__x86_64__)
+#define zig_trap() __asm__ volatile("ud2");
+#else
+#define zig_trap() raise(SIGILL)
+#endif
+
#if zig_has_builtin(debugtrap)
#define zig_breakpoint() __builtin_debugtrap()
-#elif zig_has_builtin(trap) || defined(zig_gnuc)
-#define zig_breakpoint() __builtin_trap()
#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
#define zig_breakpoint() __debugbreak()
#elif defined(__i386__) || defined(__x86_64__)
diff --git a/src/Air.zig b/src/Air.zig
index 3ebdd319de..4646dcc89e 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -232,7 +232,14 @@ pub const Inst = struct {
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `br` field.
br,
- /// Lowers to a hardware trap instruction, or the next best thing.
+ /// Lowers to a trap/jam instruction causing program abortion.
+ /// This may lower to an instruction known to be invalid.
+ /// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code.
+ /// Result type is always noreturn; no instructions in a block follow this one.
+ trap,
+ /// Lowers to a trap instruction causing debuggers to break here, or the next best thing.
+ /// The debugger or something else may allow the program to resume after this point.
+ /// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code.
/// Result type is always void.
breakpoint,
/// Yields the return address of the current function.
@@ -1186,6 +1193,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.ret,
.ret_load,
.unreach,
+ .trap,
=> return Type.initTag(.noreturn),
.breakpoint,
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 679fc2df0c..fd51e73cf9 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2631,6 +2631,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.repeat_inline,
.panic,
.panic_comptime,
+ .trap,
.check_comptime_control_flow,
=> {
noreturn_src_node = statement;
@@ -8105,7 +8106,7 @@ fn builtinCall(
.error_return_trace => return rvalue(gz, ri, try gz.addNodeExtended(.error_return_trace, node), node),
.frame => return rvalue(gz, ri, try gz.addNodeExtended(.frame, node), node),
.frame_address => return rvalue(gz, ri, try gz.addNodeExtended(.frame_address, node), node),
- .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node),
+ .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node),
.type_info => return simpleUnOpType(gz, scope, ri, node, params[0], .type_info),
.size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .size_of),
@@ -8178,6 +8179,11 @@ fn builtinCall(
try emitDbgNode(gz, node);
return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], if (gz.force_comptime) .panic_comptime else .panic);
},
+ .trap => {
+ try emitDbgNode(gz, node);
+ _ = try gz.addNode(.trap, node);
+ return rvalue(gz, ri, .void_value, node);
+ },
.error_to_int => {
const operand = try expr(gz, scope, .{ .rl = .none }, params[0]);
const result = try gz.addExtendedPayload(.error_to_int, Zir.Inst.UnNode{
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index 20edbabe47..79c6617483 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -109,6 +109,7 @@ pub const Tag = enum {
sub_with_overflow,
tag_name,
This,
+ trap,
truncate,
Type,
type_info,
@@ -915,6 +916,13 @@ pub const list = list: {
.param_count = 0,
},
},
+ .{
+ "@trap",
+ .{
+ .tag = .trap,
+ .param_count = 0,
+ },
+ },
.{
"@truncate",
.{
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 481cf25d04..8dc81aa165 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -226,6 +226,7 @@ pub fn categorizeOperand(
.ret_ptr,
.constant,
.const_ty,
+ .trap,
.breakpoint,
.dbg_stmt,
.dbg_inline_begin,
@@ -848,6 +849,7 @@ fn analyzeInst(
.ret_ptr,
.constant,
.const_ty,
+ .trap,
.breakpoint,
.dbg_stmt,
.dbg_inline_begin,
diff --git a/src/Sema.zig b/src/Sema.zig
index 4702d10688..8940527bc0 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1101,6 +1101,7 @@ fn analyzeBodyInner(
.@"unreachable" => break sema.zirUnreachable(block, inst),
.panic => break sema.zirPanic(block, inst, false),
.panic_comptime => break sema.zirPanic(block, inst, true),
+ .trap => break sema.zirTrap(block, inst),
// zig fmt: on
.extended => ext: {
@@ -5144,6 +5145,14 @@ fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index, force_comptime: bo
return always_noreturn;
}
+fn zirTrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
+ const src_node = sema.code.instructions.items(.data)[inst].node;
+ const src = LazySrcLoc.nodeOffset(src_node);
+ sema.src = src;
+ _ = try block.addNoOp(.trap);
+ return always_noreturn;
+}
+
fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
diff --git a/src/Zir.zig b/src/Zir.zig
index c7f2141dcc..b8ea2ea295 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -617,7 +617,7 @@ pub const Inst = struct {
/// Uses the `un_node` field.
typeof_log2_int_type,
/// Asserts control-flow will not reach this instruction (`unreachable`).
- /// Uses the `unreachable` union field.
+ /// Uses the `@"unreachable"` union field.
@"unreachable",
/// Bitwise XOR. `^`
/// Uses the `pl_node` union field. Payload is `Bin`.
@@ -808,6 +808,9 @@ pub const Inst = struct {
panic,
/// Same as `panic` but forces comptime.
panic_comptime,
+ /// Implements `@trap`.
+ /// Uses the `node` field.
+ trap,
/// Implement builtin `@setRuntimeSafety`. Uses `un_node`.
set_runtime_safety,
/// Implement builtin `@sqrt`. Uses `un_node`.
@@ -1274,6 +1277,7 @@ pub const Inst = struct {
.repeat_inline,
.panic,
.panic_comptime,
+ .trap,
.check_comptime_control_flow,
=> true,
};
@@ -1549,6 +1553,7 @@ pub const Inst = struct {
.repeat_inline,
.panic,
.panic_comptime,
+ .trap,
.for_len,
.@"try",
.try_ptr,
@@ -1746,6 +1751,7 @@ pub const Inst = struct {
.error_name = .un_node,
.panic = .un_node,
.panic_comptime = .un_node,
+ .trap = .node,
.set_runtime_safety = .un_node,
.sqrt = .un_node,
.sin = .un_node,
@@ -1982,6 +1988,7 @@ pub const Inst = struct {
err_set_cast,
/// `operand` is payload index to `UnNode`.
await_nosuspend,
+ /// Implements `@breakpoint`.
/// `operand` is `src_node: i32`.
breakpoint,
/// Implements the `@select` builtin.
@@ -1995,7 +2002,7 @@ pub const Inst = struct {
int_to_error,
/// Implement builtin `@Type`.
/// `operand` is payload index to `UnNode`.
- /// `small` contains `NameStrategy
+ /// `small` contains `NameStrategy`.
reify,
/// Implements the `@asyncCall` builtin.
/// `operand` is payload index to `AsyncCall`.
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 818b04f890..a42d0539f2 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -737,6 +737,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -4198,10 +4199,18 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .brk,
+ .data = .{ .imm16 = 0x0001 },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .brk,
- .data = .{ .imm16 = 1 },
+ .data = .{ .imm16 = 0xf000 },
});
return self.finishAirBookkeeping();
}
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index ceabe70438..cecda8fd4a 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -721,6 +721,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -4146,6 +4147,14 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .undefined_instruction,
+ .data = .{ .nop = {} },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .bkpt,
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index 17540f0968..17415318de 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -1,4 +1,4 @@
-//! This file contains the functionality for lowering AArch64 MIR into
+//! This file contains the functionality for lowering AArch32 MIR into
//! machine code
const Emit = @This();
@@ -15,7 +15,7 @@ const Target = std.Target;
const assert = std.debug.assert;
const Instruction = bits.Instruction;
const Register = bits.Register;
-const log = std.log.scoped(.aarch64_emit);
+const log = std.log.scoped(.aarch32_emit);
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const CodeGen = @import("CodeGen.zig");
@@ -100,6 +100,7 @@ pub fn emitMir(
.b => try emit.mirBranch(inst),
+ .undefined_instruction => try emit.mirUndefinedInstruction(),
.bkpt => try emit.mirExceptionGeneration(inst),
.blx => try emit.mirBranchExchange(inst),
@@ -494,6 +495,10 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
+fn mirUndefinedInstruction(emit: *Emit) !void {
+ try emit.writeInstruction(Instruction.undefinedInstruction());
+}
+
fn mirExceptionGeneration(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const imm16 = emit.mir.instructions.items(.data)[inst].imm16;
diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig
index 07a8384c2c..736d0574bb 100644
--- a/src/arch/arm/Mir.zig
+++ b/src/arch/arm/Mir.zig
@@ -35,6 +35,8 @@ pub const Inst = struct {
asr,
/// Branch
b,
+ /// Undefined instruction
+ undefined_instruction,
/// Breakpoint
bkpt,
/// Branch with Link and Exchange
diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig
index 8e76ae9409..185c4ed921 100644
--- a/src/arch/arm/bits.zig
+++ b/src/arch/arm/bits.zig
@@ -307,6 +307,9 @@ pub const Instruction = union(enum) {
fixed: u4 = 0b1111,
cond: u4,
},
+ undefined_instruction: packed struct {
+ imm32: u32 = 0xe7ffdefe,
+ },
breakpoint: packed struct {
imm4: u4,
fixed_1: u4 = 0b0111,
@@ -613,6 +616,7 @@ pub const Instruction = union(enum) {
.branch => |v| @bitCast(u32, v),
.branch_exchange => |v| @bitCast(u32, v),
.supervisor_call => |v| @bitCast(u32, v),
+ .undefined_instruction => |v| v.imm32,
.breakpoint => |v| @intCast(u32, v.imm4) | (@intCast(u32, v.fixed_1) << 4) | (@intCast(u32, v.imm12) << 8) | (@intCast(u32, v.fixed_2_and_cond) << 20),
};
}
@@ -890,6 +894,13 @@ pub const Instruction = union(enum) {
};
}
+ // This instruction has no official mnemonic equivalent so it is public as-is.
+ pub fn undefinedInstruction() Instruction {
+ return Instruction{
+ .undefined_instruction = .{},
+ };
+ }
+
fn breakpoint(imm: u16) Instruction {
return Instruction{
.breakpoint = .{
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index afcf4b0bb7..0b45982fb3 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -550,6 +550,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -1652,6 +1653,14 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, mcv, .{ .none, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .unimp,
+ .data = .{ .nop = {} },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .ebreak,
diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig
index 387c735896..3b330cbd3f 100644
--- a/src/arch/riscv64/Emit.zig
+++ b/src/arch/riscv64/Emit.zig
@@ -51,6 +51,7 @@ pub fn emitMir(
.ebreak => try emit.mirSystem(inst),
.ecall => try emit.mirSystem(inst),
+ .unimp => try emit.mirSystem(inst),
.dbg_line => try emit.mirDbgLine(inst),
@@ -153,6 +154,7 @@ fn mirSystem(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.ebreak => try emit.writeInstruction(Instruction.ebreak),
.ecall => try emit.writeInstruction(Instruction.ecall),
+ .unimp => try emit.writeInstruction(Instruction.unimp),
else => unreachable,
}
}
diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig
index 97accb7642..8905b24c3c 100644
--- a/src/arch/riscv64/Mir.zig
+++ b/src/arch/riscv64/Mir.zig
@@ -32,6 +32,7 @@ pub const Inst = struct {
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
+ unimp,
ebreak,
ecall,
jalr,
diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig
index 6b94927df8..7b3ff0bfe9 100644
--- a/src/arch/riscv64/bits.zig
+++ b/src/arch/riscv64/bits.zig
@@ -380,6 +380,7 @@ pub const Instruction = union(enum) {
pub const ecall = iType(0b1110011, 0b000, .zero, .zero, 0x000);
pub const ebreak = iType(0b1110011, 0b000, .zero, .zero, 0x001);
+ pub const unimp = iType(0, 0, .zero, .zero, 0);
};
pub const Register = enum(u6) {
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index c8f77fe702..1b7290ddce 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -566,6 +566,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => @panic("TODO try self.airRetAddr(inst)"),
.frame_addr => @panic("TODO try self.airFrameAddress(inst)"),
@@ -1160,6 +1161,21 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ branch.operand, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ // ta 0x05
+ _ = try self.addInst(.{
+ .tag = .tcc,
+ .data = .{
+ .trap = .{
+ .is_imm = true,
+ .cond = .al,
+ .rs2_or_imm = .{ .imm = 0x05 },
+ },
+ },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
// ta 0x01
_ = try self.addInst(.{
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 2f191fd834..d388bc8fab 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1829,6 +1829,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.arg => func.airArg(inst),
.bitcast => func.airBitcast(inst),
.block => func.airBlock(inst),
+ .trap => func.airTrap(inst),
.breakpoint => func.airBreakpoint(inst),
.br => func.airBr(inst),
.bool_to_int => func.airBoolToInt(inst),
@@ -3289,6 +3290,11 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
func.finishAir(inst, result, &.{ty_op.operand});
}
+fn airTrap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ try func.addTag(.@"unreachable");
+ func.finishAir(inst, .none, &.{});
+}
+
fn airBreakpoint(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// unsupported by wasm itfunc. Can be implemented once we support DWARF
// for wasm
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 53d38f520a..70b51e50fd 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -638,6 +638,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -3917,6 +3918,15 @@ fn genVarDbgInfo(
}
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .ud,
+ .ops = Mir.Inst.Ops.encode(.{}),
+ .data = undefined,
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .interrupt,
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 12c19915c6..e521de4bd4 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -166,6 +166,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.@"test" => try emit.mirTest(inst),
+ .ud => try emit.mirUndefinedInstruction(),
.interrupt => try emit.mirInterrupt(inst),
.nop => {}, // just skip it
@@ -234,6 +235,10 @@ fn fixupRelocs(emit: *Emit) InnerError!void {
}
}
+fn mirUndefinedInstruction(emit: *Emit) InnerError!void {
+ return lowerToZoEnc(.ud2, emit.code);
+}
+
fn mirInterrupt(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .interrupt);
@@ -1279,6 +1284,7 @@ const Tag = enum {
push,
pop,
@"test",
+ ud2,
int3,
nop,
imul,
@@ -1571,6 +1577,7 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) OpCode {
.zo => return switch (tag) {
.ret_near => OpCode.init(&.{0xc3}),
.ret_far => OpCode.init(&.{0xcb}),
+ .ud2 => OpCode.init(&.{ 0x0F, 0x0B }),
.int3 => OpCode.init(&.{0xcc}),
.nop => OpCode.init(&.{0x90}),
.syscall => OpCode.init(&.{ 0x0f, 0x05 }),
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index 112d9a5982..ba71f4cddd 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -329,6 +329,9 @@ pub const Inst = struct {
/// TODO handle more cases
@"test",
+ /// Undefined Instruction
+ ud,
+
/// Breakpoint form:
/// 0b00 int3
interrupt,
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index cf428d4bd6..c0585c3a4a 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -2741,6 +2741,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.const_ty => unreachable, // excluded from function bodies
.arg => try airArg(f, inst),
+ .trap => try airTrap(f.object.writer()),
.breakpoint => try airBreakpoint(f.object.writer()),
.ret_addr => try airRetAddr(f, inst),
.frame_addr => try airFrameAddress(f, inst),
@@ -4428,6 +4429,11 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
+fn airTrap(writer: anytype) !CValue {
+ try writer.writeAll("zig_trap();\n");
+ return .none;
+}
+
fn airBreakpoint(writer: anytype) !CValue {
try writer.writeAll("zig_breakpoint();\n");
return .none;
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 6f240b88f5..1f8473ac32 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -4590,6 +4590,7 @@ pub const FuncGen = struct {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.switch_br => try self.airSwitchBr(inst),
+ .trap => try self.airTrap(inst),
.breakpoint => try self.airBreakpoint(inst),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -8256,6 +8257,13 @@ pub const FuncGen = struct {
return fg.load(ptr, ptr_ty);
}
+ fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ _ = inst;
+ const llvm_fn = self.getIntrinsic("llvm.trap", &.{});
+ _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, "");
+ return null;
+ }
+
fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
_ = inst;
const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{});
diff --git a/src/print_air.zig b/src/print_air.zig
index 447af5a9c7..f5c06daae2 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -194,6 +194,7 @@ const Writer = struct {
.c_va_end,
=> try w.writeUnOp(s, inst),
+ .trap,
.breakpoint,
.unreach,
.ret_addr,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 5ec9fbcdfc..5e7d0d45de 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -410,6 +410,7 @@ const Writer = struct {
.alloc_inferred_comptime_mut,
.ret_ptr,
.ret_type,
+ .trap,
=> try self.writeNode(stream, inst),
.error_value,
From 4eb3f50fcf6fcfb6b8013571be00b9eeeb909833 Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Fri, 3 Mar 2023 19:59:18 +0100
Subject: [PATCH 014/294] Wasm @breakpoint: emit unreachable
This should improve the developer debugging experience.
---
src/arch/wasm/CodeGen.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index d388bc8fab..dbabb436c8 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -3298,6 +3298,7 @@ fn airTrap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airBreakpoint(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// unsupported by wasm itfunc. Can be implemented once we support DWARF
// for wasm
+ try func.addTag(.@"unreachable");
func.finishAir(inst, .none, &.{});
}
From 2cf27c571880a607401dca181f8103e855d0c46d Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sat, 4 Mar 2023 02:11:04 -0500
Subject: [PATCH 015/294] llvm: fix incorrectly annotated DIType
Closes #14715
Closes #14783
---
src/codegen/llvm.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 6f240b88f5..937c1cf120 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1773,7 +1773,7 @@ pub const Object = struct {
if (ty.optionalReprIsPayload()) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module });
+ try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
return ptr_di_ty;
}
From 010596c93054543c3c218e7d4b045d5e46384dab Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Sat, 4 Mar 2023 12:51:16 +0100
Subject: [PATCH 016/294] AstGen: compile-error on primitive value export
Fixes #14778
Co-authored-by: Veikka Tuominen
---
src/AstGen.zig | 5 +++-
.../exporting_primitive_values.zig | 29 +++++++++++++++++++
2 files changed, 33 insertions(+), 1 deletion(-)
create mode 100644 test/cases/compile_errors/exporting_primitive_values.zig
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 41a8ccadb2..8e3f11df76 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -7976,6 +7976,9 @@ fn builtinCall(
switch (node_tags[params[0]]) {
.identifier => {
const ident_token = main_tokens[params[0]];
+ if (isPrimitive(tree.tokenSlice(ident_token))) {
+ return astgen.failTok(ident_token, "unable to export primitive value", .{});
+ }
decl_name = try astgen.identAsString(ident_token);
var s = scope;
@@ -8988,7 +8991,7 @@ const primitive_instrs = std.ComptimeStringMap(Zir.Inst.Ref, .{
});
comptime {
- // These checks ensure that std.zig.primitives stays in synce with the primitive->Zir map.
+ // These checks ensure that std.zig.primitives stays in sync with the primitive->Zir map.
const primitives = std.zig.primitives;
for (primitive_instrs.kvs) |kv| {
if (!primitives.isPrimitive(kv.key)) {
diff --git a/test/cases/compile_errors/exporting_primitive_values.zig b/test/cases/compile_errors/exporting_primitive_values.zig
new file mode 100644
index 0000000000..bf3c38a553
--- /dev/null
+++ b/test/cases/compile_errors/exporting_primitive_values.zig
@@ -0,0 +1,29 @@
+pub export fn entry1() void {
+ @export(u100, .{ .name = "a" });
+}
+pub export fn entry3() void {
+ @export(undefined, .{ .name = "b" });
+}
+pub export fn entry4() void {
+ @export(null, .{ .name = "c" });
+}
+pub export fn entry5() void {
+ @export(false, .{ .name = "d" });
+}
+pub export fn entry6() void {
+ @export(u8, .{ .name = "e" });
+}
+pub export fn entry7() void {
+ @export(u65535, .{ .name = "f" });
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :2:13: error: unable to export primitive value
+// :5:13: error: unable to export primitive value
+// :8:13: error: unable to export primitive value
+// :11:13: error: unable to export primitive value
+// :14:13: error: unable to export primitive value
+// :17:13: error: unable to export primitive value
From 16302578d5a0ca226c7db76bc8e39574dea1dc1d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:04:58 -0700
Subject: [PATCH 017/294] add behavior test case for previous commit
---
test/behavior/slice.zig | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 435e1887bb..ed5e2a721d 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -747,3 +747,18 @@ test "slice decays to many pointer" {
const p: [*:0]const u8 = buf[0..7 :0];
try expectEqualStrings(buf[0..7], std.mem.span(p));
}
+
+test "write through pointer to optional slice arg" {
+ const S = struct {
+ fn bar(foo: *?[]const u8) !void {
+ foo.* = try baz();
+ }
+
+ fn baz() ![]const u8 {
+ return "ok";
+ }
+ };
+ var foo: ?[]const u8 = null;
+ try S.bar(&foo);
+ try expectEqualStrings(foo.?, "ok");
+}
From c9d990d79083f117564837f762c3e225d7fbc5cf Mon Sep 17 00:00:00 2001
From: tranquillity-codes
Date: Sat, 4 Mar 2023 10:10:07 +0100
Subject: [PATCH 018/294] fix doc Build Mode
---
doc/langref.html.in | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index e016ef13f8..71d99b3aae 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9565,9 +9565,10 @@ pub fn build(b: *std.Build) void {
This causes these options to be available:
- - -Drelease-safe=[bool]
- Optimizations on and safety on
- - -Drelease-fast=[bool]
- Optimizations on and safety off
- - -Drelease-small=[bool]
- Size optimizations on and safety off
+ - -Doptimize=Debug
- Optimizations off and safety on (default)
+ - -Doptimize=ReleaseSafe
- Optimizations on and safety on
+ - -Doptimize=ReleaseFast
- Optimizations on and safety off
+ - -Doptimize=ReleaseSmall
- Size optimizations on and safety off
{#header_open|Debug#}
{#shell_samp#}$ zig build-exe example.zig{#end_shell_samp#}
From 874ae81f1b2ae76cea6f5c79203f4baa68263163 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Fri, 3 Mar 2023 00:18:34 -0500
Subject: [PATCH 019/294] CBE: implement big integer literals
---
lib/std/math/big/int.zig | 1 +
lib/zig.h | 12 +-
src/codegen/c.zig | 361 +++++++++++++++++++-------------------
src/codegen/c/type.zig | 348 ++++++++++++++++++++++++++++++++----
test/behavior/bitcast.zig | 1 -
5 files changed, 502 insertions(+), 221 deletions(-)
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index b7725b9ae9..4e4e7c489e 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -1674,6 +1674,7 @@ pub const Mutable = struct {
/// If a is positive, this passes through to truncate.
/// If a is negative, then r is set to positive with the bit pattern ~(a - 1).
+ /// r may alias a.
///
/// Asserts `r` has enough storage to store the result.
/// The upper bound is `calcTwosCompLimbCount(a.len)`.
diff --git a/lib/zig.h b/lib/zig.h
index f3ad7db8a1..7353ea935d 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -1360,8 +1360,8 @@ typedef signed __int128 zig_i128;
#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo))
-#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo)
-#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#define zig_hi_u128(val) ((uint64_t)((val) >> 64))
#define zig_lo_u128(val) ((uint64_t)((val) >> 0))
#define zig_hi_i128(val) (( int64_t)((val) >> 64))
@@ -1391,11 +1391,11 @@ typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128;
#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
#if _MSC_VER /* MSVC doesn't allow struct literals in constant expressions */
-#define zig_make_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#define zig_make_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
#else /* But non-MSVC doesn't like the unprotected commas */
-#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo)
-#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#endif
#define zig_hi_u128(val) ((val).hi)
#define zig_lo_u128(val) ((val).lo)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index c0585c3a4a..addd3c8332 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -449,7 +449,7 @@ pub const Function = struct {
}
fn fmtIntLiteral(f: *Function, ty: Type, val: Value) !std.fmt.Formatter(formatIntLiteral) {
- return f.object.dg.fmtIntLiteral(ty, val);
+ return f.object.dg.fmtIntLiteral(ty, val, .Other);
}
fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 {
@@ -574,9 +574,9 @@ pub const DeclGen = struct {
const len_val = Value.initPayload(&len_pl.base);
if (location == .StaticInitializer) {
- return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)});
+ return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
} else {
- return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)});
+ return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
}
}
@@ -606,7 +606,7 @@ pub const DeclGen = struct {
try writer.writeByte(')');
}
switch (ptr_val.tag()) {
- .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val)}),
+ .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}),
.decl_ref_mut, .decl_ref, .variable => {
const decl_index = switch (ptr_val.tag()) {
.decl_ref => ptr_val.castTag(.decl_ref).?.data,
@@ -670,7 +670,9 @@ pub const DeclGen = struct {
container_ptr_ty,
location,
);
- try writer.print(" + {})", .{try dg.fmtIntLiteral(Type.usize, byte_offset_val)});
+ try writer.print(" + {})", .{
+ try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other),
+ });
},
.end => {
try writer.writeAll("((");
@@ -680,7 +682,9 @@ pub const DeclGen = struct {
container_ptr_ty,
location,
);
- try writer.print(") + {})", .{try dg.fmtIntLiteral(Type.usize, Value.one)});
+ try writer.print(") + {})", .{
+ try dg.fmtIntLiteral(Type.usize, Value.one, .Other),
+ });
},
}
},
@@ -746,7 +750,7 @@ pub const DeclGen = struct {
return writer.writeAll("false");
}
},
- .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteralLoc(ty, val, location)}),
+ .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}),
.Float => {
const bits = ty.floatBits(target);
var int_pl = Type.Payload.Bits{ .base = .{ .tag = .int_signed }, .data = bits };
@@ -780,11 +784,11 @@ pub const DeclGen = struct {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&buf);
try dg.renderType(writer, ptr_ty);
- return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val)});
+ return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
} else {
try writer.writeAll("((");
try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)});
+ return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
@@ -831,7 +835,7 @@ pub const DeclGen = struct {
return writer.writeByte('}');
},
- .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef)}),
+ .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef, .Other)}),
},
.Union => {
if (!location.isInitializer()) {
@@ -854,7 +858,7 @@ pub const DeclGen = struct {
if (!field.ty.hasRuntimeBits()) continue;
try dg.renderValue(writer, field.ty, val, initializer_type);
break;
- } else try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef)});
+ } else try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef, .Other)});
if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}');
return writer.writeByte('}');
},
@@ -868,7 +872,7 @@ pub const DeclGen = struct {
try writer.writeAll("{ .payload = ");
try dg.renderValue(writer, ty.errorUnionPayload(), val, initializer_type);
return writer.print(", .error = {x} }}", .{
- try dg.fmtIntLiteral(ty.errorUnionSet(), val),
+ try dg.fmtIntLiteral(ty.errorUnionSet(), val, .Other),
});
},
.Array, .Vector => {
@@ -927,7 +931,7 @@ pub const DeclGen = struct {
.decl_ref_mut,
.decl_ref,
=> try dg.renderParentPtr(writer, val, ty, location),
- else => try writer.print("{}", .{try dg.fmtIntLiteralLoc(ty, val, location)}),
+ else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}),
},
.Float => {
const bits = ty.floatBits(target);
@@ -1020,7 +1024,7 @@ pub const DeclGen = struct {
try writer.writeAll(", ");
empty = false;
}
- try writer.print("{x}", .{try dg.fmtIntLiteralLoc(int_ty, int_val, location)});
+ try writer.print("{x}", .{try dg.fmtIntLiteral(int_ty, int_val, location)});
if (!empty) try writer.writeByte(')');
return;
},
@@ -1069,7 +1073,7 @@ pub const DeclGen = struct {
.int_u64, .one => {
try writer.writeAll("((");
try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)});
+ return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
},
.field_ptr,
.elem_ptr,
@@ -1889,11 +1893,11 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
if (int_info.signedness == .signed) {
const min_val = try ty.minInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, min_val)});
+ try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, min_val, .Other)});
}
const max_val = try ty.maxInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, max_val)});
+ try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, max_val, .Other)});
},
.Bits => {
var bits_pl = Value.Payload.U64{
@@ -1901,7 +1905,7 @@ pub const DeclGen = struct {
.data = ty.bitSize(target),
};
const bits_val = Value.initPayload(&bits_pl.base);
- try writer.print(", {}", .{try dg.fmtIntLiteral(Type.u8, bits_val)});
+ try writer.print(", {}", .{try dg.fmtIntLiteral(Type.u8, bits_val, .Other)});
},
}
}
@@ -1910,30 +1914,21 @@ pub const DeclGen = struct {
dg: *DeclGen,
ty: Type,
val: Value,
+ loc: ValueRenderLocation,
) !std.fmt.Formatter(formatIntLiteral) {
- const int_info = ty.intInfo(dg.module.getTarget());
- const c_bits = toCIntBits(int_info.bits);
- if (c_bits == null or c_bits.? > 128)
- return dg.fail("TODO implement integer constants larger than 128 bits", .{});
+ const kind: CType.Kind = switch (loc) {
+ .FunctionArgument => .parameter,
+ .Initializer, .Other => .complete,
+ .StaticInitializer => .global,
+ };
return std.fmt.Formatter(formatIntLiteral){ .data = .{
- .ty = ty,
+ .dg = dg,
+ .int_info = ty.intInfo(dg.module.getTarget()),
+ .kind = kind,
+ .cty = try dg.typeToCType(ty, kind),
.val = val,
- .mod = dg.module,
} };
}
-
- fn fmtIntLiteralLoc(
- dg: *DeclGen,
- ty: Type,
- val: Value,
- location: ValueRenderLocation, // TODO: Instead add this as optional arg to fmtIntLiteral
- ) !std.fmt.Formatter(formatIntLiteral) {
- const int_info = ty.intInfo(dg.module.getTarget());
- const c_bits = toCIntBits(int_info.bits);
- if (c_bits == null or c_bits.? > 128)
- return dg.fail("TODO implement integer constants larger than 128 bits", .{});
- return std.fmt.Formatter(formatIntLiteral){ .data = .{ .ty = ty, .val = val, .mod = dg.module, .location = location } };
- }
};
const CTypeFix = enum { prefix, suffix };
@@ -2450,7 +2445,7 @@ pub fn genErrDecls(o: *Object) !void {
const len_val = Value.initPayload(&len_pl.base);
try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{
- fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val),
+ fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other),
});
}
try writer.writeAll("};\n");
@@ -2501,7 +2496,10 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
var int_pl: Value.Payload.U64 = undefined;
const int_val = tag_val.enumToInt(enum_ty, &int_pl);
- var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len };
+ var name_ty_pl = Type.Payload.Len{
+ .base = .{ .tag = .array_u8_sentinel_0 },
+ .data = name.len,
+ };
const name_ty = Type.initPayload(&name_ty_pl.base);
var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name };
@@ -2510,14 +2508,16 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len };
const len_val = Value.initPayload(&len_pl.base);
- try w.print(" case {}: {{\n static ", .{try o.dg.fmtIntLiteral(enum_ty, int_val)});
+ try w.print(" case {}: {{\n static ", .{
+ try o.dg.fmtIntLiteral(enum_ty, int_val, .Other),
+ });
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete);
try w.writeAll(" = ");
try o.dg.renderValue(w, name_ty, name_val, .Initializer);
try w.writeAll(";\n return (");
try o.dg.renderType(w, name_slice_ty);
try w.print("){{{}, {}}};\n", .{
- fmtIdent("name"), try o.dg.fmtIntLiteral(Type.usize, len_val),
+ fmtIdent("name"), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other),
});
try w.writeAll(" }\n");
@@ -2535,7 +2535,12 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
const fwd_decl_writer = o.dg.fwd_decl.writer();
try fwd_decl_writer.print("static zig_{s} ", .{@tagName(key)});
- try o.dg.renderFunctionSignature(fwd_decl_writer, fn_decl_index, .forward, .{ .string = fn_name });
+ try o.dg.renderFunctionSignature(
+ fwd_decl_writer,
+ fn_decl_index,
+ .forward,
+ .{ .string = fn_name },
+ );
try fwd_decl_writer.writeAll(";\n");
try w.print("static zig_{s} ", .{@tagName(key)});
@@ -7177,30 +7182,33 @@ fn undefPattern(comptime IntType: type) IntType {
return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3));
}
-const FormatIntLiteralContext = struct { ty: Type, val: Value, mod: *Module, location: ?ValueRenderLocation = null };
+const FormatIntLiteralContext = struct {
+ dg: *DeclGen,
+ int_info: std.builtin.Type.Int,
+ kind: CType.Kind,
+ cty: CType,
+ val: Value,
+};
fn formatIntLiteral(
data: FormatIntLiteralContext,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- const target = data.mod.getTarget();
- const int_info = data.ty.intInfo(target);
+ const target = data.dg.module.getTarget();
const ExpectedContents = struct {
const base = 10;
- const limbs_count_128 = BigInt.calcTwosCompLimbCount(128);
- const expected_needed_limbs_count = BigInt.calcToStringLimbsBufferLen(limbs_count_128, base);
- const worst_case_int = BigInt.Const{
- .limbs = &([1]BigIntLimb{std.math.maxInt(BigIntLimb)} ** expected_needed_limbs_count),
- .positive = false,
- };
+ const bits = 128;
+ const limbs_count = BigInt.calcTwosCompLimbCount(bits);
- undef_limbs: [limbs_count_128]BigIntLimb,
- wrap_limbs: [limbs_count_128]BigIntLimb,
+ undef_limbs: [limbs_count]BigIntLimb,
+ wrap_limbs: [limbs_count]BigIntLimb,
+ to_string_buf: [bits]u8,
+ to_string_limbs: [BigInt.calcToStringLimbsBufferLen(limbs_count, base)]BigIntLimb,
};
var stack align(@alignOf(ExpectedContents)) =
- std.heap.stackFallback(@sizeOf(ExpectedContents), data.mod.gpa);
+ std.heap.stackFallback(@sizeOf(ExpectedContents), data.dg.gpa);
const allocator = stack.get();
var undef_limbs: []BigIntLimb = &.{};
@@ -7208,7 +7216,7 @@ fn formatIntLiteral(
var int_buf: Value.BigIntSpace = undefined;
const int = if (data.val.isUndefDeep()) blk: {
- undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(int_info.bits));
+ undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits));
std.mem.set(BigIntLimb, undef_limbs, undefPattern(BigIntLimb));
var undef_int = BigInt.Mutable{
@@ -7216,163 +7224,150 @@ fn formatIntLiteral(
.len = undef_limbs.len,
.positive = true,
};
- undef_int.truncate(undef_int.toConst(), int_info.signedness, int_info.bits);
+ undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
break :blk undef_int.toConst();
} else data.val.toBigInt(&int_buf, target);
- assert(int.fitsInTwosComp(int_info.signedness, int_info.bits));
+ assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
- const c_bits = toCIntBits(int_info.bits) orelse unreachable;
+ const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8);
var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined;
const one = BigInt.Mutable.init(&one_limbs, 1).toConst();
- const wrap_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(c_bits));
- defer allocator.free(wrap_limbs);
- var wrap = BigInt.Mutable{ .limbs = wrap_limbs, .len = undefined, .positive = undefined };
- if (wrap.addWrap(int, one, int_info.signedness, c_bits) or
- int_info.signedness == .signed and wrap.subWrap(int, one, int_info.signedness, c_bits))
- {
- const abbrev = switch (data.ty.tag()) {
- .c_short, .c_ushort => "SHRT",
- .c_int, .c_uint => "INT",
- .c_long, .c_ulong => "LONG",
- .c_longlong, .c_ulonglong => "LLONG",
- .isize, .usize => "INTPTR",
- else => return writer.print("zig_{s}Int_{c}{d}", .{
- if (int.positive) "max" else "min", signAbbrev(int_info.signedness), c_bits,
+ var wrap = BigInt.Mutable{
+ .limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(c_bits)),
+ .len = undefined,
+ .positive = undefined,
+ };
+ defer allocator.free(wrap.limbs);
+ if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or
+ data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits))
+ return writer.print("{s}_{s}", .{
+ data.cty.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{
+ if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits,
}),
- };
- if (int_info.signedness == .unsigned) try writer.writeByte('U');
- return writer.print("{s}_{s}", .{ abbrev, if (int.positive) "MAX" else "MIN" });
- }
+ if (int.positive) "MAX" else "MIN",
+ });
- var use_twos_comp = false;
- if (!int.positive) {
- if (c_bits > 64) {
- // TODO: Can this be done for decimal literals as well?
- if (fmt.len == 1 and fmt[0] != 'd') {
- use_twos_comp = true;
- } else {
- // TODO: Use fmtIntLiteral for 0?
- try writer.print("zig_sub_{c}{d}(zig_make_{c}{d}(0, 0), ", .{ signAbbrev(int_info.signedness), c_bits, signAbbrev(int_info.signedness), c_bits });
- }
- } else {
- try writer.writeByte('-');
- }
- }
-
- switch (data.ty.tag()) {
- .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong => {},
- else => {
- if (int_info.bits <= 64) {
- try writer.print("{s}INT{d}_C(", .{ switch (int_info.signedness) {
- .signed => "",
- .unsigned => "U",
- }, c_bits });
- } else if (data.location != null and data.location.? == .StaticInitializer) {
- // MSVC treats casting the struct initializer as not constant (C2099), so an alternate form is used in global initializers
- try writer.print("zig_make_constant_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits });
- } else {
- try writer.print("zig_make_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits });
- }
+ const c_limb_info: struct {
+ cty: CType,
+ count: usize,
+ endian: std.builtin.Endian,
+ homogeneous: bool,
+ } = switch (data.cty.tag()) {
+ else => .{
+ .cty = CType.initTag(.void),
+ .count = 1,
+ .endian = .Little,
+ .homogeneous = true,
},
- }
+ .zig_u128, .zig_i128 => .{
+ .cty = CType.initTag(.uint64_t),
+ .count = 2,
+ .endian = .Big,
+ .homogeneous = false,
+ },
+ .array => info: {
+ const array_data = data.cty.castTag(.array).?.data;
+ break :info .{
+ .cty = data.dg.indexToCType(array_data.elem_type),
+ .count = @intCast(usize, array_data.len),
+ .endian = target.cpu.arch.endian(),
+ .homogeneous = true,
+ };
+ },
+ };
+ if (c_limb_info.count == 1) {
+ if (!int.positive) try writer.writeByte('-');
+ try data.cty.renderLiteralPrefix(writer, data.kind);
- const limbs_count_64 = @divExact(64, @bitSizeOf(BigIntLimb));
- if (c_bits <= 64) {
- var base: u8 = undefined;
- var case: std.fmt.Case = undefined;
- switch (fmt.len) {
- 0 => base = 10,
+ const style: struct { base: u8, case: std.fmt.Case = undefined } = switch (fmt.len) {
+ 0 => .{ .base = 10 },
1 => switch (fmt[0]) {
- 'b' => {
- base = 2;
+ 'b' => style: {
try writer.writeAll("0b");
+ break :style .{ .base = 2 };
},
- 'o' => {
- base = 8;
+ 'o' => style: {
try writer.writeByte('0');
+ break :style .{ .base = 8 };
},
- 'd' => base = 10,
- 'x' => {
- base = 16;
- case = .lower;
- try writer.writeAll("0x");
- },
- 'X' => {
- base = 16;
- case = .upper;
+ 'd' => .{ .base = 10 },
+ 'x', 'X' => |base| style: {
try writer.writeAll("0x");
+ break :style .{ .base = 16, .case = switch (base) {
+ 'x' => .lower,
+ 'X' => .upper,
+ else => unreachable,
+ } };
},
else => @compileError("Invalid fmt: " ++ fmt),
},
else => @compileError("Invalid fmt: " ++ fmt),
- }
+ };
- var str: [64]u8 = undefined;
- var limbs_buf: [BigInt.calcToStringLimbsBufferLen(limbs_count_64, 10)]BigIntLimb = undefined;
- try writer.writeAll(str[0..int.abs().toString(&str, base, case, &limbs_buf)]);
+ const string = try int.abs().toStringAlloc(allocator, style.base, style.case);
+ defer allocator.free(string);
+ try writer.writeAll(string);
} else {
- assert(c_bits == 128);
- const split = std.math.min(int.limbs.len, limbs_count_64);
- var twos_comp_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
+ try data.cty.renderLiteralPrefix(writer, data.kind);
+ wrap.convertToTwosComplement(int, .unsigned, data.int_info.bits);
+ std.mem.set(BigIntLimb, wrap.limbs[wrap.len..], 0);
+ wrap.len = wrap.limbs.len;
+ const limbs_per_c_limb = @divExact(wrap.len, c_limb_info.count);
- // Adding a negation in the C code before the doesn't work in all cases:
- // - struct versions would require an extra zig_sub_ call to negate, which wouldn't work in constant expressions
- // - negating the f80 int representation (i128) doesn't make sense
- // Instead we write out the literal as a negative number in twos complement
- var limbs = int.limbs;
+ var c_limb_int_info = std.builtin.Type.Int{
+ .signedness = undefined,
+ .bits = @intCast(u16, @divExact(c_bits, c_limb_info.count)),
+ };
+ var c_limb_cty: CType = undefined;
- if (use_twos_comp) {
- var twos_comp = BigInt.Mutable{
- .limbs = &twos_comp_limbs,
- .positive = undefined,
+ var limb_offset: usize = 0;
+ const most_significant_limb_i = wrap.len - limbs_per_c_limb;
+ while (limb_offset < wrap.len) : (limb_offset += limbs_per_c_limb) {
+ const limb_i = switch (c_limb_info.endian) {
+ .Little => limb_offset,
+ .Big => most_significant_limb_i - limb_offset,
+ };
+ var c_limb_mut = BigInt.Mutable{
+ .limbs = wrap.limbs[limb_i..][0..limbs_per_c_limb],
.len = undefined,
+ .positive = true,
+ };
+ c_limb_mut.normalize(limbs_per_c_limb);
+
+ if (limb_i == most_significant_limb_i and
+ !c_limb_info.homogeneous and data.int_info.signedness == .signed)
+ {
+ // most significant limb is actually signed
+ c_limb_int_info.signedness = .signed;
+ c_limb_cty = c_limb_info.cty.toSigned();
+
+ c_limb_mut.positive = wrap.positive;
+ c_limb_mut.convertToTwosComplement(
+ c_limb_mut.toConst(),
+ .signed,
+ data.int_info.bits - limb_i * @bitSizeOf(BigIntLimb),
+ );
+ } else {
+ c_limb_int_info.signedness = .unsigned;
+ c_limb_cty = c_limb_info.cty;
+ }
+ var c_limb_val_pl = Value.Payload.BigInt{
+ .base = .{ .tag = if (c_limb_mut.positive) .int_big_positive else .int_big_negative },
+ .data = c_limb_mut.limbs[0..c_limb_mut.len],
};
- twos_comp.convertToTwosComplement(int, .signed, int_info.bits);
- limbs = twos_comp.limbs;
+ if (limb_offset > 0) try writer.writeAll(", ");
+ try formatIntLiteral(.{
+ .dg = data.dg,
+ .int_info = c_limb_int_info,
+ .kind = data.kind,
+ .cty = c_limb_cty,
+ .val = Value.initPayload(&c_limb_val_pl.base),
+ }, fmt, options, writer);
}
-
- var upper_pl = Value.Payload.BigInt{
- .base = .{ .tag = .int_big_positive },
- .data = limbs[split..],
- };
- const upper_val = Value.initPayload(&upper_pl.base);
- try formatIntLiteral(.{
- .ty = switch (int_info.signedness) {
- .unsigned => Type.u64,
- .signed => if (use_twos_comp) Type.u64 else Type.i64,
- },
- .val = upper_val,
- .mod = data.mod,
- }, fmt, options, writer);
-
- try writer.writeAll(", ");
-
- var lower_pl = Value.Payload.BigInt{
- .base = .{ .tag = .int_big_positive },
- .data = limbs[0..split],
- };
- const lower_val = Value.initPayload(&lower_pl.base);
- try formatIntLiteral(.{
- .ty = Type.u64,
- .val = lower_val,
- .mod = data.mod,
- }, fmt, options, writer);
-
- if (!int.positive and c_bits > 64 and !use_twos_comp) try writer.writeByte(')');
- return writer.writeByte(')');
- }
-
- switch (data.ty.tag()) {
- .c_short, .c_ushort, .c_int => {},
- .c_uint => try writer.writeAll("u"),
- .c_long => try writer.writeAll("l"),
- .c_ulong => try writer.writeAll("ul"),
- .c_longlong => try writer.writeAll("ll"),
- .c_ulonglong => try writer.writeAll("ull"),
- else => try writer.writeByte(')'),
}
+ try data.cty.renderLiteralSuffix(writer);
}
fn isByRef(ty: Type) bool {
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index 1f1a220cd2..a1b11df315 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -496,6 +496,296 @@ pub const CType = extern union {
}
};
+ pub fn toSigned(self: CType) CType {
+ return CType.initTag(switch (self.tag()) {
+ .char, .@"signed char", .@"unsigned char" => .@"signed char",
+ .short, .@"unsigned short" => .short,
+ .int, .@"unsigned int" => .int,
+ .long, .@"unsigned long" => .long,
+ .@"long long", .@"unsigned long long" => .@"long long",
+ .size_t, .ptrdiff_t => .ptrdiff_t,
+ .uint8_t, .int8_t => .int8_t,
+ .uint16_t, .int16_t => .int16_t,
+ .uint32_t, .int32_t => .int32_t,
+ .uint64_t, .int64_t => .int64_t,
+ .uintptr_t, .intptr_t => .intptr_t,
+ .zig_u128, .zig_i128 => .zig_i128,
+ .float,
+ .double,
+ .@"long double",
+ .zig_f16,
+ .zig_f32,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => |t| t,
+ else => unreachable,
+ });
+ }
+
+ pub fn toUnsigned(self: CType) CType {
+ return CType.initTag(switch (self.tag()) {
+ .char, .@"signed char", .@"unsigned char" => .@"unsigned char",
+ .short, .@"unsigned short" => .@"unsigned short",
+ .int, .@"unsigned int" => .@"unsigned int",
+ .long, .@"unsigned long" => .@"unsigned long",
+ .@"long long", .@"unsigned long long" => .@"unsigned long long",
+ .size_t, .ptrdiff_t => .size_t,
+ .uint8_t, .int8_t => .uint8_t,
+ .uint16_t, .int16_t => .uint16_t,
+ .uint32_t, .int32_t => .uint32_t,
+ .uint64_t, .int64_t => .uint64_t,
+ .uintptr_t, .intptr_t => .uintptr_t,
+ .zig_u128, .zig_i128 => .zig_u128,
+ else => unreachable,
+ });
+ }
+
+ pub fn getStandardDefineAbbrev(self: CType) ?[]const u8 {
+ return switch (self.tag()) {
+ .char => "CHAR",
+ .@"signed char" => "SCHAR",
+ .short => "SHRT",
+ .int => "INT",
+ .long => "LONG",
+ .@"long long" => "LLONG",
+ .@"unsigned char" => "UCHAR",
+ .@"unsigned short" => "USHRT",
+ .@"unsigned int" => "UINT",
+ .@"unsigned long" => "ULONG",
+ .@"unsigned long long" => "ULLONG",
+ .float => "FLT",
+ .double => "DBL",
+ .@"long double" => "LDBL",
+ .size_t => "SIZE",
+ .ptrdiff_t => "PTRDIFF",
+ .uint8_t => "UINT8",
+ .int8_t => "INT8",
+ .uint16_t => "UINT16",
+ .int16_t => "INT16",
+ .uint32_t => "UINT32",
+ .int32_t => "INT32",
+ .uint64_t => "UINT64",
+ .int64_t => "INT64",
+ .uintptr_t => "UINTPTR",
+ .intptr_t => "INTPTR",
+ else => null,
+ };
+ }
+
+ pub fn renderLiteralPrefix(self: CType, writer: anytype, kind: Kind) @TypeOf(writer).Error!void {
+ switch (self.tag()) {
+ .void => unreachable,
+ ._Bool,
+ .char,
+ .@"signed char",
+ .short,
+ .@"unsigned short",
+ .bool,
+ .size_t,
+ .ptrdiff_t,
+ .uintptr_t,
+ .intptr_t,
+ => |t| switch (kind) {
+ else => try writer.print("({s})", .{@tagName(t)}),
+ .global => {},
+ },
+ .int,
+ .long,
+ .@"long long",
+ .@"unsigned char",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .float,
+ .double,
+ .@"long double",
+ => {},
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ => try writer.print("{s}_C(", .{self.getStandardDefineAbbrev().?}),
+ .zig_u128,
+ .zig_i128,
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => |t| try writer.print("zig_{s}_{s}(", .{
+ switch (kind) {
+ else => "make",
+ .global => "init",
+ },
+ @tagName(t)["zig_".len..],
+ }),
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => unreachable,
+ .array,
+ .vector,
+ => try writer.writeByte('{'),
+ .fwd_anon_struct,
+ .fwd_anon_union,
+ .fwd_struct,
+ .fwd_union,
+ .unnamed_struct,
+ .unnamed_union,
+ .packed_unnamed_struct,
+ .packed_unnamed_union,
+ .anon_struct,
+ .anon_union,
+ .@"struct",
+ .@"union",
+ .packed_struct,
+ .packed_union,
+ .function,
+ .varargs_function,
+ => unreachable,
+ }
+ }
+
+ pub fn renderLiteralSuffix(self: CType, writer: anytype) @TypeOf(writer).Error!void {
+ switch (self.tag()) {
+ .void => unreachable,
+ ._Bool => {},
+ .char,
+ .@"signed char",
+ .short,
+ .int,
+ => {},
+ .long => try writer.writeByte('l'),
+ .@"long long" => try writer.writeAll("ll"),
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ => try writer.writeByte('u'),
+ .@"unsigned long",
+ .size_t,
+ .uintptr_t,
+ => try writer.writeAll("ul"),
+ .@"unsigned long long" => try writer.writeAll("ull"),
+ .float => try writer.writeByte('f'),
+ .double => {},
+ .@"long double" => try writer.writeByte('l'),
+ .bool,
+ .ptrdiff_t,
+ .intptr_t,
+ => {},
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ .zig_u128,
+ .zig_i128,
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => try writer.writeByte(')'),
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => unreachable,
+ .array,
+ .vector,
+ => try writer.writeByte('}'),
+ .fwd_anon_struct,
+ .fwd_anon_union,
+ .fwd_struct,
+ .fwd_union,
+ .unnamed_struct,
+ .unnamed_union,
+ .packed_unnamed_struct,
+ .packed_unnamed_union,
+ .anon_struct,
+ .anon_union,
+ .@"struct",
+ .@"union",
+ .packed_struct,
+ .packed_union,
+ .function,
+ .varargs_function,
+ => unreachable,
+ }
+ }
+
+ pub fn byteSize(self: CType, store: Store.Set, target: Target) u64 {
+ return switch (self.tag()) {
+ .void => 0,
+ .char, .@"signed char", ._Bool, .@"unsigned char", .bool, .uint8_t, .int8_t => 1,
+ .short => target.c_type_byte_size(.short),
+ .int => target.c_type_byte_size(.int),
+ .long => target.c_type_byte_size(.long),
+ .@"long long" => target.c_type_byte_size(.longlong),
+ .@"unsigned short" => target.c_type_byte_size(.ushort),
+ .@"unsigned int" => target.c_type_byte_size(.uint),
+ .@"unsigned long" => target.c_type_byte_size(.ulong),
+ .@"unsigned long long" => target.c_type_byte_size(.ulonglong),
+ .float => target.c_type_byte_size(.float),
+ .double => target.c_type_byte_size(.double),
+ .@"long double" => target.c_type_byte_size(.longdouble),
+ .size_t,
+ .ptrdiff_t,
+ .uintptr_t,
+ .intptr_t,
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => @divExact(target.cpu.arch.ptrBitWidth(), 8),
+ .uint16_t, .int16_t, .zig_f16 => 2,
+ .uint32_t, .int32_t, .zig_f32 => 4,
+ .uint64_t, .int64_t, .zig_f64 => 8,
+ .zig_u128, .zig_i128, .zig_f128 => 16,
+ .zig_f80 => if (target.c_type_bit_size(.longdouble) == 80)
+ target.c_type_byte_size(.longdouble)
+ else
+ 16,
+ .zig_c_longdouble => target.c_type_byte_size(.longdouble),
+
+ .array,
+ .vector,
+ => {
+ const data = self.cast(Payload.Sequence).?.data;
+ return data.len * store.indexToCType(data.elem_type).byteSize(store, target);
+ },
+
+ .fwd_anon_struct,
+ .fwd_anon_union,
+ .fwd_struct,
+ .fwd_union,
+ .unnamed_struct,
+ .unnamed_union,
+ .packed_unnamed_struct,
+ .packed_unnamed_union,
+ .anon_struct,
+ .anon_union,
+ .@"struct",
+ .@"union",
+ .packed_struct,
+ .packed_union,
+ .function,
+ .varargs_function,
+ => unreachable,
+ };
+ }
+
pub fn isPacked(self: CType) bool {
return switch (self.tag()) {
else => false,
@@ -787,26 +1077,26 @@ pub const CType = extern union {
};
}
- fn tagFromIntInfo(signedness: std.builtin.Signedness, bits: u16) Tag {
- return switch (bits) {
+ fn tagFromIntInfo(int_info: std.builtin.Type.Int) Tag {
+ return switch (int_info.bits) {
0 => .void,
- 1...8 => switch (signedness) {
+ 1...8 => switch (int_info.signedness) {
.unsigned => .uint8_t,
.signed => .int8_t,
},
- 9...16 => switch (signedness) {
+ 9...16 => switch (int_info.signedness) {
.unsigned => .uint16_t,
.signed => .int16_t,
},
- 17...32 => switch (signedness) {
+ 17...32 => switch (int_info.signedness) {
.unsigned => .uint32_t,
.signed => .int32_t,
},
- 33...64 => switch (signedness) {
+ 33...64 => switch (int_info.signedness) {
.unsigned => .uint64_t,
.signed => .int64_t,
},
- 65...128 => switch (signedness) {
+ 65...128 => switch (int_info.signedness) {
.unsigned => .zig_u128,
.signed => .zig_i128,
},
@@ -945,31 +1235,27 @@ pub const CType = extern union {
.c_ulong => self.init(.@"unsigned long"),
.c_longlong => self.init(.@"long long"),
.c_ulonglong => self.init(.@"unsigned long long"),
- else => {
- const info = ty.intInfo(target);
- const t = tagFromIntInfo(info.signedness, info.bits);
- switch (t) {
- .void => unreachable,
- else => self.init(t),
- .array => switch (kind) {
- .forward, .complete, .global => {
- const abi_size = ty.abiSize(target);
- const abi_align = ty.abiAlignment(target);
- self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
- .len = @divExact(abi_size, abi_align),
- .elem_type = tagFromIntInfo(
- .unsigned,
- @intCast(u16, abi_align * 8),
- ).toIndex(),
- } } };
- self.value = .{ .cty = initPayload(&self.storage.seq) };
- },
- .forward_parameter,
- .parameter,
- => try self.initArrayParameter(ty, kind, lookup),
- .payload => unreachable,
+ else => switch (tagFromIntInfo(ty.intInfo(target))) {
+ .void => unreachable,
+ else => |t| self.init(t),
+ .array => switch (kind) {
+ .forward, .complete, .global => {
+ const abi_size = ty.abiSize(target);
+ const abi_align = ty.abiAlignment(target);
+ self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
+ .len = @divExact(abi_size, abi_align),
+ .elem_type = tagFromIntInfo(.{
+ .signedness = .unsigned,
+ .bits = @intCast(u16, abi_align * 8),
+ }).toIndex(),
+ } } };
+ self.value = .{ .cty = initPayload(&self.storage.seq) };
},
- }
+ .forward_parameter,
+ .parameter,
+ => try self.initArrayParameter(ty, kind, lookup),
+ .payload => unreachable,
+ },
},
} else switch (ty.zigTypeTag()) {
.Frame => unreachable,
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index f8a1928dd1..70ac38d6fa 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -368,7 +368,6 @@ test "comptime @bitCast packed struct to int and back" {
}
test "comptime bitcast with fields following f80" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
From a8f4ac2b94e7945a5a1623547f258f5f32f12674 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Fri, 3 Mar 2023 00:18:35 -0500
Subject: [PATCH 020/294] CBE: implement big integer and vector comparisons
---
lib/zig.h | 313 +++++++++++++++++++++++++++-------
src/codegen/c.zig | 342 ++++++++++++++++++++++++--------------
src/codegen/c/type.zig | 124 ++++++++++++++
src/type.zig | 2 +-
test/behavior/bitcast.zig | 2 -
test/behavior/math.zig | 1 -
test/behavior/vector.zig | 2 -
7 files changed, 595 insertions(+), 191 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index 7353ea935d..c39cffee24 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -37,6 +37,14 @@ typedef char bool;
#define zig_has_attribute(attribute) 0
#endif
+#if __LITTLE_ENDIAN__ || _MSC_VER
+#define zig_little_endian 1
+#define zig_big_endian 0
+#else
+#define zig_little_endian 0
+#define zig_big_endian 1
+#endif
+
#if __STDC_VERSION__ >= 201112L
#define zig_threadlocal _Thread_local
#elif defined(__GNUC__)
@@ -1379,7 +1387,7 @@ typedef signed __int128 zig_i128;
#else /* zig_has_int128 */
-#if __LITTLE_ENDIAN__ || _MSC_VER
+#if zig_little_endian
typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128;
typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128;
#else
@@ -1909,6 +1917,177 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits));
}
+/* ========================== Big Integer Support =========================== */
+
+static inline uint16_t zig_big_bytes(uint16_t bits) {
+ uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
+ uint16_t alignment = 16;
+ while (alignment / 2 >= bytes) alignment /= 2;
+ return (bytes + alignment - 1) / alignment * alignment;
+}
+
+static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ bool do_signed = is_signed;
+ uint16_t remaining_bytes = zig_big_bytes(bits);
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ int32_t limb_cmp;
+
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_i128(lhs_limb, rhs_limb);
+ do_signed = false;
+ } else {
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_u128(lhs_limb, rhs_limb);
+ }
+
+ if (limb_cmp != 0) return limb_cmp;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+ }
+
+ return 0;
+}
+
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
@@ -1933,7 +2112,6 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg)
#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg)
#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg)
-#define zig_make_special_c_longdouble(sign, name, arg, repr) sign zig_make_c_longdouble(__builtin_##name, )(arg)
#else
#define zig_has_float_builtins 0
#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
@@ -1941,13 +2119,13 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
-#define zig_make_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr)
#endif
#define zig_has_f16 1
#define zig_bitSizeOf_f16 16
+typedef int16_t zig_repr_f16;
#define zig_libc_name_f16(name) __##name##h
-#define zig_make_special_constant_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
+#define zig_init_special_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
#if FLT_MANT_DIG == 11
typedef float zig_f16;
#define zig_make_f16(fp, repr) fp##f
@@ -1956,7 +2134,9 @@ typedef double zig_f16;
#define zig_make_f16(fp, repr) fp
#elif LDBL_MANT_DIG == 11
#define zig_bitSizeOf_c_longdouble 16
-typedef uint16_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f16 zig_repr_c_longdouble;
+#endif
typedef long double zig_f16;
#define zig_make_f16(fp, repr) fp##l
#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc))
@@ -1973,17 +2153,18 @@ typedef int16_t zig_f16;
#define zig_make_f16(fp, repr) repr
#undef zig_make_special_f16
#define zig_make_special_f16(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f16
-#define zig_make_special_constant_f16(sign, name, arg, repr) repr
+#undef zig_init_special_f16
+#define zig_init_special_f16(sign, name, arg, repr) repr
#endif
#define zig_has_f32 1
#define zig_bitSizeOf_f32 32
+typedef int32_t zig_repr_f32;
#define zig_libc_name_f32(name) name##f
#if _MSC_VER
-#define zig_make_special_constant_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
+#define zig_init_special_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
#else
-#define zig_make_special_constant_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
+#define zig_init_special_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
#endif
#if FLT_MANT_DIG == 24
typedef float zig_f32;
@@ -1993,7 +2174,9 @@ typedef double zig_f32;
#define zig_make_f32(fp, repr) fp
#elif LDBL_MANT_DIG == 24
#define zig_bitSizeOf_c_longdouble 32
-typedef uint32_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f32 zig_repr_c_longdouble;
+#endif
typedef long double zig_f32;
#define zig_make_f32(fp, repr) fp##l
#elif FLT32_MANT_DIG == 24
@@ -2007,21 +2190,24 @@ typedef int32_t zig_f32;
#define zig_make_f32(fp, repr) repr
#undef zig_make_special_f32
#define zig_make_special_f32(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f32
-#define zig_make_special_constant_f32(sign, name, arg, repr) repr
+#undef zig_init_special_f32
+#define zig_init_special_f32(sign, name, arg, repr) repr
#endif
#define zig_has_f64 1
#define zig_bitSizeOf_f64 64
+typedef int64_t zig_repr_f64;
#define zig_libc_name_f64(name) name
#if _MSC_VER
#ifdef ZIG_TARGET_ABI_MSVC
#define zig_bitSizeOf_c_longdouble 64
-typedef uint64_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
#endif
-#define zig_make_special_constant_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
+#endif
+#define zig_init_special_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
#else /* _MSC_VER */
-#define zig_make_special_constant_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
+#define zig_init_special_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
#endif /* _MSC_VER */
#if FLT_MANT_DIG == 53
typedef float zig_f64;
@@ -2031,7 +2217,9 @@ typedef double zig_f64;
#define zig_make_f64(fp, repr) fp
#elif LDBL_MANT_DIG == 53
#define zig_bitSizeOf_c_longdouble 64
-typedef uint64_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
+#endif
typedef long double zig_f64;
#define zig_make_f64(fp, repr) fp##l
#elif FLT64_MANT_DIG == 53
@@ -2048,14 +2236,15 @@ typedef int64_t zig_f64;
#define zig_make_f64(fp, repr) repr
#undef zig_make_special_f64
#define zig_make_special_f64(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f64
-#define zig_make_special_constant_f64(sign, name, arg, repr) repr
+#undef zig_init_special_f64
+#define zig_init_special_f64(sign, name, arg, repr) repr
#endif
#define zig_has_f80 1
#define zig_bitSizeOf_f80 80
+typedef zig_i128 zig_repr_f80;
#define zig_libc_name_f80(name) __##name##x
-#define zig_make_special_constant_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
+#define zig_init_special_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
#if FLT_MANT_DIG == 64
typedef float zig_f80;
#define zig_make_f80(fp, repr) fp##f
@@ -2064,7 +2253,9 @@ typedef double zig_f80;
#define zig_make_f80(fp, repr) fp
#elif LDBL_MANT_DIG == 64
#define zig_bitSizeOf_c_longdouble 80
-typedef zig_u128 zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f80 zig_repr_c_longdouble;
+#endif
typedef long double zig_f80;
#define zig_make_f80(fp, repr) fp##l
#elif FLT80_MANT_DIG == 64
@@ -2084,14 +2275,15 @@ typedef zig_i128 zig_f80;
#define zig_make_f80(fp, repr) repr
#undef zig_make_special_f80
#define zig_make_special_f80(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f80
-#define zig_make_special_constant_f80(sign, name, arg, repr) repr
+#undef zig_init_special_f80
+#define zig_init_special_f80(sign, name, arg, repr) repr
#endif
#define zig_has_f128 1
#define zig_bitSizeOf_f128 128
+typedef zig_i128 zig_repr_f128;
#define zig_libc_name_f128(name) name##q
-#define zig_make_special_constant_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
+#define zig_init_special_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
#if FLT_MANT_DIG == 113
typedef float zig_f128;
#define zig_make_f128(fp, repr) fp##f
@@ -2100,7 +2292,9 @@ typedef double zig_f128;
#define zig_make_f128(fp, repr) fp
#elif LDBL_MANT_DIG == 113
#define zig_bitSizeOf_c_longdouble 128
-typedef zig_u128 zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f128 zig_repr_c_longdouble;
+#endif
typedef long double zig_f128;
#define zig_make_f128(fp, repr) fp##l
#elif FLT128_MANT_DIG == 113
@@ -2122,63 +2316,44 @@ typedef zig_i128 zig_f128;
#define zig_make_f128(fp, repr) repr
#undef zig_make_special_f128
#define zig_make_special_f128(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f128
-#define zig_make_special_constant_f128(sign, name, arg, repr) repr
+#undef zig_init_special_f128
+#define zig_init_special_f128(sign, name, arg, repr) repr
#endif
-#define zig_has_c_longdouble 1
-
-#ifdef ZIG_TARGET_ABI_MSVC
-#define zig_libc_name_c_longdouble(name) name
-#else
-#define zig_libc_name_c_longdouble(name) name##l
-#endif
-
-#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) zig_make_special_c_longdouble(sign, name, arg, repr)
#ifdef zig_bitSizeOf_c_longdouble
+#define zig_has_c_longdouble 1
#ifdef ZIG_TARGET_ABI_MSVC
#undef zig_bitSizeOf_c_longdouble
#define zig_bitSizeOf_c_longdouble 64
-typedef uint64_t zig_repr_c_longdouble;
typedef zig_f64 zig_c_longdouble;
-#define zig_make_c_longdouble(fp, repr) fp
+typedef zig_repr_f64 zig_repr_c_longdouble;
#else
typedef long double zig_c_longdouble;
-#define zig_make_c_longdouble(fp, repr) fp##l
#endif
#else /* zig_bitSizeOf_c_longdouble */
-#undef zig_has_c_longdouble
#define zig_has_c_longdouble 0
-#define zig_bitSizeOf_c_longdouble 80
-typedef zig_u128 zig_repr_c_longdouble;
-#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80
#define zig_bitSizeOf_repr_c_longdouble 128
-typedef zig_i128 zig_c_longdouble;
-#define zig_make_c_longdouble(fp, repr) repr
-#undef zig_make_special_c_longdouble
-#define zig_make_special_c_longdouble(sign, name, arg, repr) repr
-#undef zig_make_special_constant_c_longdouble
-#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) repr
+typedef zig_f128 zig_c_longdouble;
+typedef zig_repr_f128 zig_repr_c_longdouble;
#endif /* zig_bitSizeOf_c_longdouble */
#if !zig_has_float_builtins
-#define zig_float_from_repr(Type, ReprType) \
- static inline zig_##Type zig_float_from_repr_##Type(ReprType repr) { \
+#define zig_float_from_repr(Type) \
+ static inline zig_##Type zig_float_from_repr_##Type(zig_repr_##Type repr) { \
zig_##Type result; \
memcpy(&result, &repr, sizeof(result)); \
return result; \
}
-zig_float_from_repr(f16, uint16_t)
-zig_float_from_repr(f32, uint32_t)
-zig_float_from_repr(f64, uint64_t)
-zig_float_from_repr(f80, zig_u128)
-zig_float_from_repr(f128, zig_u128)
-zig_float_from_repr(c_longdouble, zig_repr_c_longdouble)
+zig_float_from_repr(f16)
+zig_float_from_repr(f32)
+zig_float_from_repr(f64)
+zig_float_from_repr(f80)
+zig_float_from_repr(f128)
#endif
#define zig_cast_f16 (zig_f16)
@@ -2187,11 +2362,9 @@ zig_float_from_repr(c_longdouble, zig_repr_c_longdouble)
#if _MSC_VER && !zig_has_f128
#define zig_cast_f80
-#define zig_cast_c_longdouble
#define zig_cast_f128
#else
#define zig_cast_f80 (zig_f80)
-#define zig_cast_c_longdouble (zig_c_longdouble)
#define zig_cast_f128 (zig_f128)
#endif
@@ -2320,7 +2493,6 @@ zig_float_builtins(f32)
zig_float_builtins(f64)
zig_float_builtins(f80)
zig_float_builtins(f128)
-zig_float_builtins(c_longdouble)
#if _MSC_VER && (_M_IX86 || _M_X64)
@@ -2563,6 +2735,29 @@ zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
+/* ============================= Vector Support ============================= */
+
+#define zig_cmp_vec(operation, operator) \
+ static inline void zig_##operation##_vec(bool *result, const void *lhs, const void *rhs, uint32_t len, bool is_signed, uint16_t elem_bits) { \
+ uint32_t index = 0; \
+ const uint8_t *lhs_ptr = lhs; \
+ const uint8_t *rhs_ptr = rhs; \
+ uint16_t elem_bytes = zig_big_bytes(elem_bits); \
+ \
+ while (index < len) { \
+ result[index] = zig_cmp_big(lhs_ptr, rhs_ptr, is_signed, elem_bits) operator 0; \
+ lhs_ptr += elem_bytes; \
+ rhs_ptr += elem_bytes; \
+ index += 1; \
+ } \
+ }
+zig_cmp_vec(eq, ==)
+zig_cmp_vec(ne, !=)
+zig_cmp_vec(lt, < )
+zig_cmp_vec(le, <=)
+zig_cmp_vec(gt, > )
+zig_cmp_vec(ge, >=)
+
/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index addd3c8332..f4a817cecd 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -112,11 +112,7 @@ const ValueRenderLocation = enum {
}
};
-const BuiltinInfo = enum {
- None,
- Range,
- Bits,
-};
+const BuiltinInfo = enum { none, bits };
const reserved_idents = std.ComptimeStringMap(void, .{
// C language
@@ -440,6 +436,10 @@ pub const Function = struct {
return f.object.dg.typeToCType(ty, kind);
}
+ fn byteSize(f: *Function, cty: CType) u64 {
+ return f.object.dg.byteSize(cty);
+ }
+
fn renderType(f: *Function, w: anytype, t: Type) !void {
return f.object.dg.renderType(w, t);
}
@@ -1003,8 +1003,9 @@ pub const DeclGen = struct {
// return dg.fail("Only quiet nans are supported in global variable initializers", .{});
}
- try writer.writeAll("zig_make_special_");
- if (location == .StaticInitializer) try writer.writeAll("constant_");
+ try writer.writeAll("zig_");
+ try writer.writeAll(if (location == .StaticInitializer) "init" else "make");
+ try writer.writeAll("_special_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
if (std.math.signbit(f128_val)) try writer.writeByte('-');
@@ -1565,6 +1566,10 @@ pub const DeclGen = struct {
return dg.ctypes.typeToCType(dg.gpa, ty, dg.module, kind);
}
+ fn byteSize(dg: *DeclGen, cty: CType) u64 {
+ return cty.byteSize(dg.ctypes.set, dg.module.getTarget());
+ }
+
/// Renders a type as a single identifier, generating intermediate typedefs
/// if necessary.
///
@@ -1861,51 +1866,64 @@ pub const DeclGen = struct {
}
fn renderTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, ty: Type) !void {
- const target = dg.module.getTarget();
- if (ty.isAbiInt()) {
- const int_info = ty.intInfo(target);
- const c_bits = toCIntBits(int_info.bits) orelse
- return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- try writer.print("{c}{d}", .{ signAbbrev(int_info.signedness), c_bits });
- } else if (ty.isRuntimeFloat()) {
- try ty.print(writer, dg.module);
- } else if (ty.isPtrAtRuntime()) {
- try writer.print("p{d}", .{ty.bitSize(target)});
- } else if (ty.zigTypeTag() == .Bool) {
- try writer.print("u8", .{});
- } else return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{
- ty.fmt(dg.module),
- });
+ try dg.renderCTypeForBuiltinFnName(writer, try dg.typeToCType(ty, .complete));
+ }
+
+ fn renderCTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, cty: CType) !void {
+ switch (cty.tag()) {
+ else => try writer.print("{c}{d}", .{
+ if (cty.isBool())
+ signAbbrev(.unsigned)
+ else if (cty.isInteger())
+ signAbbrev(cty.signedness() orelse .unsigned)
+ else if (cty.isFloat())
+ @as(u8, 'f')
+ else if (cty.isPointer())
+ @as(u8, 'p')
+ else
+ return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{
+ cty.tag(),
+ }),
+ if (cty.isFloat()) cty.floatActiveBits(dg.module.getTarget()) else dg.byteSize(cty) * 8,
+ }),
+ .array => try writer.writeAll("big"),
+ .vector => try writer.writeAll("vec"),
+ }
}
fn renderBuiltinInfo(dg: *DeclGen, writer: anytype, ty: Type, info: BuiltinInfo) !void {
- const target = dg.module.getTarget();
switch (info) {
- .None => {},
- .Range => {
- var arena = std.heap.ArenaAllocator.init(dg.gpa);
- defer arena.deinit();
-
- const ExpectedContents = union { u: Value.Payload.U64, i: Value.Payload.I64 };
- var stack align(@alignOf(ExpectedContents)) =
- std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
-
- const int_info = ty.intInfo(target);
- if (int_info.signedness == .signed) {
- const min_val = try ty.minInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, min_val, .Other)});
+ .none => {},
+ .bits => {
+ const cty = try dg.typeToCType(ty, .complete);
+ if (cty.castTag(.vector)) |pl| {
+ var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = pl.data.len };
+ try writer.print(", {}", .{try dg.fmtIntLiteral(
+ Type.u32,
+ Value.initPayload(&len_pl.base),
+ .FunctionArgument,
+ )});
}
- const max_val = try ty.maxInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, max_val, .Other)});
- },
- .Bits => {
- var bits_pl = Value.Payload.U64{
- .base = .{ .tag = .int_u64 },
- .data = ty.bitSize(target),
- };
- const bits_val = Value.initPayload(&bits_pl.base);
- try writer.print(", {}", .{try dg.fmtIntLiteral(Type.u8, bits_val, .Other)});
+ const target = dg.module.getTarget();
+ const elem_ty = ty.shallowElemType();
+ const elem_info = if (elem_ty.isAbiInt())
+ elem_ty.intInfo(target)
+ else
+ std.builtin.Type.Int{
+ .signedness = .unsigned,
+ .bits = @intCast(u16, elem_ty.bitSize(target)),
+ };
+ switch (cty.tag()) {
+ else => {},
+ .array, .vector => try writer.print(", {}", .{elem_info.signedness == .signed}),
+ }
+
+ var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = elem_info.bits };
+ try writer.print(", {}", .{try dg.fmtIntLiteral(switch (cty.tag()) {
+ else => Type.u8,
+ .array, .vector => Type.u16,
+ }, Value.initPayload(&bits_pl.base), .FunctionArgument)});
},
}
}
@@ -2758,35 +2776,35 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
// TODO use a different strategy for add, sub, mul, div
// that communicates to the optimizer that wrapping is UB.
- .add => try airBinOp(f, inst, "+", "add", .None),
- .sub => try airBinOp(f, inst, "-", "sub", .None),
- .mul => try airBinOp(f, inst, "*", "mul", .None),
+ .add => try airBinOp(f, inst, "+", "add", .none),
+ .sub => try airBinOp(f, inst, "-", "sub", .none),
+ .mul => try airBinOp(f, inst, "*", "mul", .none),
.neg => try airFloatNeg(f, inst),
- .div_float => try airBinBuiltinCall(f, inst, "div", .None),
+ .div_float => try airBinBuiltinCall(f, inst, "div", .none),
- .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .None),
+ .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const lhs_ty = f.air.typeOf(bin_op.lhs);
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
break :blk if (lhs_ty.isInt())
- try airBinOp(f, inst, "%", "rem", .None)
+ try airBinOp(f, inst, "%", "rem", .none)
else
try airBinFloatOp(f, inst, "fmod");
},
- .div_floor => try airBinBuiltinCall(f, inst, "div_floor", .None),
- .mod => try airBinBuiltinCall(f, inst, "mod", .None),
+ .div_floor => try airBinBuiltinCall(f, inst, "div_floor", .none),
+ .mod => try airBinBuiltinCall(f, inst, "mod", .none),
- .addwrap => try airBinBuiltinCall(f, inst, "addw", .Bits),
- .subwrap => try airBinBuiltinCall(f, inst, "subw", .Bits),
- .mulwrap => try airBinBuiltinCall(f, inst, "mulw", .Bits),
+ .addwrap => try airBinBuiltinCall(f, inst, "addw", .bits),
+ .subwrap => try airBinBuiltinCall(f, inst, "subw", .bits),
+ .mulwrap => try airBinBuiltinCall(f, inst, "mulw", .bits),
- .add_sat => try airBinBuiltinCall(f, inst, "adds", .Bits),
- .sub_sat => try airBinBuiltinCall(f, inst, "subs", .Bits),
- .mul_sat => try airBinBuiltinCall(f, inst, "muls", .Bits),
- .shl_sat => try airBinBuiltinCall(f, inst, "shls", .Bits),
+ .add_sat => try airBinBuiltinCall(f, inst, "adds", .bits),
+ .sub_sat => try airBinBuiltinCall(f, inst, "subs", .bits),
+ .mul_sat => try airBinBuiltinCall(f, inst, "muls", .bits),
+ .shl_sat => try airBinBuiltinCall(f, inst, "shls", .bits),
.sqrt => try airUnFloatOp(f, inst, "sqrt"),
.sin => try airUnFloatOp(f, inst, "sin"),
@@ -2805,34 +2823,38 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.mul_add => try airMulAdd(f, inst),
- .add_with_overflow => try airOverflow(f, inst, "add", .Bits),
- .sub_with_overflow => try airOverflow(f, inst, "sub", .Bits),
- .mul_with_overflow => try airOverflow(f, inst, "mul", .Bits),
- .shl_with_overflow => try airOverflow(f, inst, "shl", .Bits),
+ .add_with_overflow => try airOverflow(f, inst, "add", .bits),
+ .sub_with_overflow => try airOverflow(f, inst, "sub", .bits),
+ .mul_with_overflow => try airOverflow(f, inst, "mul", .bits),
+ .shl_with_overflow => try airOverflow(f, inst, "shl", .bits),
.min => try airMinMax(f, inst, '<', "fmin"),
.max => try airMinMax(f, inst, '>', "fmax"),
.slice => try airSlice(f, inst),
- .cmp_gt => try airCmpOp(f, inst, ">", "gt"),
- .cmp_gte => try airCmpOp(f, inst, ">=", "ge"),
- .cmp_lt => try airCmpOp(f, inst, "<", "lt"),
- .cmp_lte => try airCmpOp(f, inst, "<=", "le"),
+ .cmp_gt => try airCmpOp(f, inst, .gt),
+ .cmp_gte => try airCmpOp(f, inst, .gte),
+ .cmp_lt => try airCmpOp(f, inst, .lt),
+ .cmp_lte => try airCmpOp(f, inst, .lte),
- .cmp_eq => try airEquality(f, inst, "((", "==", "eq"),
- .cmp_neq => try airEquality(f, inst, "!((", "!=", "ne"),
+ .cmp_eq => try airEquality(f, inst, .eq),
+ .cmp_neq => try airEquality(f, inst, .neq),
- .cmp_vector => return f.fail("TODO: C backend: implement cmp_vector", .{}),
+ .cmp_vector => blk: {
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.VectorCmp, ty_pl.payload).data;
+ break :blk try cmpBuiltinCall(f, inst, extra, extra.compareOperator(), .operator, .bits);
+ },
.cmp_lt_errors_len => try airCmpLtErrorsLen(f, inst),
// bool_and and bool_or are non-short-circuit operations
- .bool_and, .bit_and => try airBinOp(f, inst, "&", "and", .None),
- .bool_or, .bit_or => try airBinOp(f, inst, "|", "or", .None),
- .xor => try airBinOp(f, inst, "^", "xor", .None),
- .shr, .shr_exact => try airBinBuiltinCall(f, inst, "shr", .None),
- .shl, => try airBinBuiltinCall(f, inst, "shlw", .Bits),
- .shl_exact => try airBinOp(f, inst, "<<", "shl", .None),
+ .bool_and, .bit_and => try airBinOp(f, inst, "&", "and", .none),
+ .bool_or, .bit_or => try airBinOp(f, inst, "|", "or", .none),
+ .xor => try airBinOp(f, inst, "^", "xor", .none),
+ .shr, .shr_exact => try airBinBuiltinCall(f, inst, "shr", .none),
+ .shl, => try airBinBuiltinCall(f, inst, "shlw", .bits),
+ .shl_exact => try airBinOp(f, inst, "<<", "shl", .none),
.not => try airNot (f, inst),
.optional_payload => try airOptionalPayload(f, inst),
@@ -2877,11 +2899,11 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.memcpy => try airMemcpy(f, inst),
.set_union_tag => try airSetUnionTag(f, inst),
.get_union_tag => try airGetUnionTag(f, inst),
- .clz => try airUnBuiltinCall(f, inst, "clz", .Bits),
- .ctz => try airUnBuiltinCall(f, inst, "ctz", .Bits),
- .popcount => try airUnBuiltinCall(f, inst, "popcount", .Bits),
- .byte_swap => try airUnBuiltinCall(f, inst, "byte_swap", .Bits),
- .bit_reverse => try airUnBuiltinCall(f, inst, "bit_reverse", .Bits),
+ .clz => try airUnBuiltinCall(f, inst, "clz", .bits),
+ .ctz => try airUnBuiltinCall(f, inst, "ctz", .bits),
+ .popcount => try airUnBuiltinCall(f, inst, "popcount", .bits),
+ .byte_swap => try airUnBuiltinCall(f, inst, "byte_swap", .bits),
+ .bit_reverse => try airUnBuiltinCall(f, inst, "bit_reverse", .bits),
.tag_name => try airTagName(f, inst),
.error_name => try airErrorName(f, inst),
.splat => try airSplat(f, inst),
@@ -3349,7 +3371,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValueDeref(writer, operand);
try writer.print(", {})", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
if (cant_cast) try writer.writeByte(')');
- try f.object.dg.renderBuiltinInfo(writer, field_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, field_ty, .bits);
try writer.writeByte(')');
} else {
try f.writeCValue(writer, local, .Other);
@@ -3744,7 +3766,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.air.typeOfIndex(inst);
if (inst_ty.tag() != .bool)
- return try airUnBuiltinCall(f, inst, "not", .Bits);
+ return try airUnBuiltinCall(f, inst, "not", .bits);
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
@@ -3803,7 +3825,7 @@ fn airBinOp(
return local;
}
-fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation: []const u8) !CValue {
+fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: std.math.CompareOperator) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
if (f.liveness.isUnused(inst)) {
@@ -3813,10 +3835,11 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation:
const operand_ty = f.air.typeOf(bin_op.lhs);
const target = f.object.dg.module.getTarget();
- if (operand_ty.isInt() and operand_ty.bitSize(target) > 64)
- return try cmpBuiltinCall(f, inst, operator, "cmp");
+ const operand_bits = operand_ty.bitSize(target);
+ if (operand_ty.isInt() and operand_bits > 64)
+ return cmpBuiltinCall(f, inst, bin_op, operator, .cmp, if (operand_bits > 128) .bits else .none);
if (operand_ty.isRuntimeFloat())
- return try cmpBuiltinCall(f, inst, operator, operation);
+ return cmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
const inst_ty = f.air.typeOfIndex(inst);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -3829,7 +3852,7 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation:
try writer.writeAll(" = ");
try f.writeCValue(writer, lhs, .Other);
try writer.writeByte(' ');
- try writer.writeAll(operator);
+ try writer.writeAll(compareOperatorC(operator));
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
try writer.writeAll(";\n");
@@ -3840,9 +3863,7 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation:
fn airEquality(
f: *Function,
inst: Air.Inst.Index,
- negate_prefix: []const u8,
- operator: []const u8,
- operation: []const u8,
+ operator: std.math.CompareOperator,
) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
@@ -3853,10 +3874,11 @@ fn airEquality(
const operand_ty = f.air.typeOf(bin_op.lhs);
const target = f.object.dg.module.getTarget();
- if (operand_ty.isInt() and operand_ty.bitSize(target) > 64)
- return try cmpBuiltinCall(f, inst, operator, "cmp");
+ const operand_bits = operand_ty.bitSize(target);
+ if (operand_ty.isInt() and operand_bits > 64)
+ return cmpBuiltinCall(f, inst, bin_op, operator, .cmp, if (operand_bits > 128) .bits else .none);
if (operand_ty.isRuntimeFloat())
- return try cmpBuiltinCall(f, inst, operator, operation);
+ return cmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
@@ -3872,7 +3894,12 @@ fn airEquality(
// (A && B) || (C && (A == B))
// A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload
- try writer.writeAll(negate_prefix);
+ switch (operator) {
+ .eq => {},
+ .neq => try writer.writeByte('!'),
+ else => unreachable,
+ }
+ try writer.writeAll("((");
try f.writeCValue(writer, lhs, .Other);
try writer.writeAll(".is_null && ");
try f.writeCValue(writer, rhs, .Other);
@@ -3891,7 +3918,7 @@ fn airEquality(
try f.writeCValue(writer, lhs, .Other);
try writer.writeByte(' ');
- try writer.writeAll(operator);
+ try writer.writeAll(compareOperatorC(operator));
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
try writer.writeAll(";\n");
@@ -3972,7 +3999,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
const inst_ty = f.air.typeOfIndex(inst);
const target = f.object.dg.module.getTarget();
if (inst_ty.isInt() and inst_ty.bitSize(target) > 64)
- return try airBinBuiltinCall(f, inst, operation[1..], .None);
+ return try airBinBuiltinCall(f, inst, operation[1..], .none);
if (inst_ty.isRuntimeFloat())
return try airBinFloatOp(f, inst, operation);
@@ -4418,12 +4445,35 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
// Ensure padding bits have the expected value.
if (dest_ty.isAbiInt()) {
+ const dest_cty = try f.typeToCType(dest_ty, .complete);
+ const dest_info = dest_ty.intInfo(target);
+ var wrap_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) {
+ .unsigned => .int_unsigned,
+ .signed => .int_signed,
+ } }, .data = dest_info.bits };
+
try f.writeCValue(writer, local, .Other);
+ if (dest_cty.castTag(.array)) |pl| {
+ try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
+ .Little => pl.data.len - 1,
+ .Big => 0,
+ }});
+ wrap_ty_pl.data -= 1;
+ wrap_ty_pl.data %= @intCast(u16, f.byteSize(f.indexToCType(pl.data.elem_type)) * 8);
+ wrap_ty_pl.data += 1;
+ }
+ const wrap_ty = Type.initPayload(&wrap_ty_pl.base);
try writer.writeAll(" = zig_wrap_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, dest_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, wrap_ty);
try writer.writeByte('(');
try f.writeCValue(writer, local, .Other);
- try f.object.dg.renderBuiltinInfo(writer, dest_ty, .Bits);
+ if (dest_cty.castTag(.array)) |pl| {
+ try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
+ .Little => pl.data.len - 1,
+ .Big => 0,
+ }});
+ }
+ try f.object.dg.renderBuiltinInfo(writer, wrap_ty, .bits);
try writer.writeAll(");\n");
}
@@ -5438,7 +5488,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
if (cant_cast) try writer.writeByte(')');
- try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
try writer.writeAll(");\n");
if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
@@ -5871,7 +5921,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, operand, .FunctionArgument);
try writer.writeByte(')');
if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) {
- try f.object.dg.renderBuiltinInfo(writer, inst_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
}
try writer.writeAll(";\n");
@@ -5972,29 +6022,46 @@ fn airBinBuiltinCall(
fn cmpBuiltinCall(
f: *Function,
inst: Air.Inst.Index,
- operator: []const u8,
- operation: []const u8,
+ data: anytype,
+ operator: std.math.CompareOperator,
+ operation: enum { cmp, operator },
+ info: BuiltinInfo,
) !CValue {
const inst_ty = f.air.typeOfIndex(inst);
- const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const operand_ty = f.air.typeOf(bin_op.lhs);
+ const operand_ty = f.air.typeOf(data.lhs);
- const lhs = try f.resolveInst(bin_op.lhs);
- const rhs = try f.resolveInst(bin_op.rhs);
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const lhs = try f.resolveInst(data.lhs);
+ const rhs = try f.resolveInst(data.rhs);
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+
+ const ref_ret = inst_ty.tag() != .bool;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_");
- try writer.writeAll(operation);
- try writer.writeByte('_');
+ if (!ref_ret) {
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeAll(" = ");
+ }
+ try writer.print("zig_{s}_", .{switch (operation) {
+ else => @tagName(operation),
+ .operator => compareOperatorAbbrev(operator),
+ }});
try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
try writer.writeByte('(');
+ if (ref_ret) {
+ try f.writeCValue(writer, local, .FunctionArgument);
+ try writer.writeAll(", ");
+ }
try f.writeCValue(writer, lhs, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
- try writer.print(") {s} {};\n", .{ operator, try f.fmtIntLiteral(Type.initTag(.i32), Value.zero) });
+ try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try writer.writeByte(')');
+ if (!ref_ret) try writer.print(" {s} {}", .{
+ compareOperatorC(operator),
+ try f.fmtIntLiteral(Type.initTag(.i32), Value.zero),
+ });
+ try writer.writeAll(";\n");
return local;
}
@@ -6675,7 +6742,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, inst_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
if (!empty) try writer.writeByte(')');
@@ -7094,6 +7161,28 @@ fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 {
} else unreachable;
}
+fn compareOperatorAbbrev(operator: std.math.CompareOperator) []const u8 {
+ return switch (operator) {
+ .lt => "lt",
+ .lte => "le",
+ .eq => "eq",
+ .gte => "ge",
+ .gt => "gt",
+ .neq => "ne",
+ };
+}
+
+fn compareOperatorC(operator: std.math.CompareOperator) []const u8 {
+ return switch (operator) {
+ .lt => "<",
+ .lte => "<=",
+ .eq => "==",
+ .gte => ">=",
+ .gt => ">",
+ .neq => "!=",
+ };
+}
+
fn StringLiteral(comptime WriterType: type) type {
// MSVC has a length limit of 16380 per string literal (before concatenation)
const max_char_len = 4;
@@ -7239,14 +7328,6 @@ fn formatIntLiteral(
.positive = undefined,
};
defer allocator.free(wrap.limbs);
- if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or
- data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits))
- return writer.print("{s}_{s}", .{
- data.cty.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{
- if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits,
- }),
- if (int.positive) "MAX" else "MIN",
- });
const c_limb_info: struct {
cty: CType,
@@ -7277,6 +7358,15 @@ fn formatIntLiteral(
},
};
if (c_limb_info.count == 1) {
+ if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or
+ data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits))
+ return writer.print("{s}_{s}", .{
+ data.cty.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{
+ if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits,
+ }),
+ if (int.positive) "MAX" else "MIN",
+ });
+
if (!int.positive) try writer.writeByte('-');
try data.cty.renderLiteralPrefix(writer, data.kind);
@@ -7310,7 +7400,7 @@ fn formatIntLiteral(
try writer.writeAll(string);
} else {
try data.cty.renderLiteralPrefix(writer, data.kind);
- wrap.convertToTwosComplement(int, .unsigned, data.int_info.bits);
+ wrap.convertToTwosComplement(int, data.int_info.signedness, c_bits);
std.mem.set(BigIntLimb, wrap.limbs[wrap.len..], 0);
wrap.len = wrap.limbs.len;
const limbs_per_c_limb = @divExact(wrap.len, c_limb_info.count);
@@ -7343,7 +7433,7 @@ fn formatIntLiteral(
c_limb_cty = c_limb_info.cty.toSigned();
c_limb_mut.positive = wrap.positive;
- c_limb_mut.convertToTwosComplement(
+ c_limb_mut.truncate(
c_limb_mut.toConst(),
.signed,
data.int_info.bits - limb_i * @bitSizeOf(BigIntLimb),
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index a1b11df315..85e4cc9840 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -496,6 +496,116 @@ pub const CType = extern union {
}
};
+ pub fn isBool(self: CType) bool {
+ return switch (self.tag()) {
+ ._Bool,
+ .bool,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn isInteger(self: CType) bool {
+ return switch (self.tag()) {
+ .char,
+ .@"signed char",
+ .short,
+ .int,
+ .long,
+ .@"long long",
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .size_t,
+ .ptrdiff_t,
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ .uintptr_t,
+ .intptr_t,
+ .zig_u128,
+ .zig_i128,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn signedness(self: CType) ?std.builtin.Signedness {
+ return switch (self.tag()) {
+ .char => null, // unknown signedness
+ .@"signed char",
+ .short,
+ .int,
+ .long,
+ .@"long long",
+ .ptrdiff_t,
+ .int8_t,
+ .int16_t,
+ .int32_t,
+ .int64_t,
+ .intptr_t,
+ .zig_i128,
+ => .signed,
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .size_t,
+ .uint8_t,
+ .uint16_t,
+ .uint32_t,
+ .uint64_t,
+ .uintptr_t,
+ .zig_u128,
+ => .unsigned,
+ else => unreachable,
+ };
+ }
+
+ pub fn isFloat(self: CType) bool {
+ return switch (self.tag()) {
+ .float,
+ .double,
+ .@"long double",
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn isPointer(self: CType) bool {
+ return switch (self.tag()) {
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn isFunction(self: CType) bool {
+ return switch (self.tag()) {
+ .function,
+ .varargs_function,
+ => true,
+ else => false,
+ };
+ }
+
pub fn toSigned(self: CType) CType {
return CType.initTag(switch (self.tag()) {
.char, .@"signed char", .@"unsigned char" => .@"signed char",
@@ -725,6 +835,20 @@ pub const CType = extern union {
}
}
+ pub fn floatActiveBits(self: CType, target: Target) u16 {
+ return switch (self.tag()) {
+ .float => target.c_type_bit_size(.float),
+ .double => target.c_type_bit_size(.double),
+ .@"long double", .zig_c_longdouble => target.c_type_bit_size(.longdouble),
+ .zig_f16 => 16,
+ .zig_f32 => 32,
+ .zig_f64 => 64,
+ .zig_f80 => 80,
+ .zig_f128 => 128,
+ else => unreachable,
+ };
+ }
+
pub fn byteSize(self: CType, store: Store.Set, target: Target) u64 {
return switch (self.tag()) {
.void => 0,
diff --git a/src/type.zig b/src/type.zig
index 15525f14eb..9e501d893c 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -4213,7 +4213,7 @@ pub const Type = extern union {
};
}
- fn shallowElemType(child_ty: Type) Type {
+ pub fn shallowElemType(child_ty: Type) Type {
return switch (child_ty.zigTypeTag()) {
.Array, .Vector => child_ty.childType(),
else => child_ty,
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index 70ac38d6fa..552080c836 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -34,7 +34,6 @@ test "@bitCast iX -> uX (8, 16, 128)" {
test "@bitCast iX -> uX exotic integers" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -81,7 +80,6 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe
test "bitcast uX to bytes" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 54263e1daf..9ebeca8541 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -1526,7 +1526,6 @@ fn testNanEqNan(comptime F: type) !void {
}
test "vector comparison" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 50fef7f646..d885a7fabc 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -48,7 +48,6 @@ test "vector wrap operators" {
test "vector bin compares with mem.eql" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -403,7 +402,6 @@ test "initialize vector which is a struct field" {
test "vector comparison operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 93d696e84ef17a32d5c2f1520a295ebcda968e91 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Fri, 3 Mar 2023 01:18:23 -0500
Subject: [PATCH 021/294] CBE: implement some big integer and vector unary
operations
---
lib/zig.h | 422 ++++++++++++++++++++++++++++++++++-
src/codegen/c.zig | 51 ++++-
test/behavior/bugs/10147.zig | 1 -
test/behavior/math.zig | 8 +-
test/behavior/popcount.zig | 1 -
5 files changed, 460 insertions(+), 23 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index c39cffee24..e5cb421c6f 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -1919,7 +1919,7 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
/* ========================== Big Integer Support =========================== */
-static inline uint16_t zig_big_bytes(uint16_t bits) {
+static inline uint16_t zig_int_bytes(uint16_t bits) {
uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
uint16_t alignment = 16;
while (alignment / 2 >= bytes) alignment /= 2;
@@ -1931,7 +1931,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
const uint8_t *rhs_bytes = rhs;
uint16_t byte_offset = 0;
bool do_signed = is_signed;
- uint16_t remaining_bytes = zig_big_bytes(bits);
+ uint16_t remaining_bytes = zig_int_bytes(bits);
#if zig_little_endian
byte_offset = remaining_bytes;
@@ -1965,7 +1965,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 128 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 128 / CHAR_BIT;
+ byte_offset += 128 / CHAR_BIT;
#endif
}
@@ -1994,7 +1994,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 64 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 64 / CHAR_BIT;
+ byte_offset += 64 / CHAR_BIT;
#endif
}
@@ -2023,7 +2023,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 32 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 32 / CHAR_BIT;
+ byte_offset += 32 / CHAR_BIT;
#endif
}
@@ -2052,7 +2052,7 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 16 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 16 / CHAR_BIT;
+ byte_offset += 16 / CHAR_BIT;
#endif
}
@@ -2081,13 +2081,368 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
remaining_bytes -= 8 / CHAR_BIT;
#if zig_big_endian
- byte_offset -= 8 / CHAR_BIT;
+ byte_offset += 8 / CHAR_BIT;
#endif
}
return 0;
}
+static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t skip_bits = remaining_bytes * 8 - bits;
+ uint16_t total_lz = 0;
+ uint16_t limb_lz;
+ (void)is_signed;
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u128(val_limb, 128 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 128 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u64(val_limb, 64 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 64 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u32(val_limb, 32 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 32 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u16(val_limb, 16 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 16 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u8(val_limb, 8 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 8 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_lz;
+}
+
+static inline uint16_t zig_ctz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_tz = 0;
+ uint16_t limb_tz;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u128(val_limb, 128);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 128) return total_tz;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u64(val_limb, 64);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 64) return total_tz;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u32(val_limb, 32);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 32) return total_tz;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u16(val_limb, 16);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 16) return total_tz;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u8(val_limb, 8);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 8) return total_tz;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_tz;
+}
+
+static inline uint16_t zig_popcount_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_pc = 0;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u128(val_limb, 128);
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u64(val_limb, 64);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u32(val_limb, 32);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u16(val_limb, 16);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u8(val_limb, 8);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_pc;
+}
+
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
@@ -2742,7 +3097,7 @@ zig_msvc_atomics_128op(u128, max)
uint32_t index = 0; \
const uint8_t *lhs_ptr = lhs; \
const uint8_t *rhs_ptr = rhs; \
- uint16_t elem_bytes = zig_big_bytes(elem_bits); \
+ uint16_t elem_bytes = zig_int_bytes(elem_bits); \
\
while (index < len) { \
result[index] = zig_cmp_big(lhs_ptr, rhs_ptr, is_signed, elem_bits) operator 0; \
@@ -2758,6 +3113,57 @@ zig_cmp_vec(le, <=)
zig_cmp_vec(gt, > )
zig_cmp_vec(ge, >=)
+static inline void zig_clz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
+ uint32_t index = 0;
+ const uint8_t *val_ptr = val;
+ uint16_t elem_bytes = zig_int_bytes(elem_bits);
+
+ while (index < len) {
+ uint16_t lz = zig_clz_big(val_ptr, is_signed, elem_bits);
+ if (elem_bits <= 128) {
+ ((uint8_t *)result)[index] = (uint8_t)lz;
+ } else {
+ ((uint16_t *)result)[index] = lz;
+ }
+ val_ptr += elem_bytes;
+ index += 1;
+ }
+}
+
+static inline void zig_ctz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
+ uint32_t index = 0;
+ const uint8_t *val_ptr = val;
+ uint16_t elem_bytes = zig_int_bytes(elem_bits);
+
+ while (index < len) {
+ uint16_t tz = zig_ctz_big(val_ptr, is_signed, elem_bits);
+ if (elem_bits <= 128) {
+ ((uint8_t *)result)[index] = (uint8_t)tz;
+ } else {
+ ((uint16_t *)result)[index] = tz;
+ }
+ val_ptr += elem_bytes;
+ index += 1;
+ }
+}
+
+static inline void zig_popcount_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
+ uint32_t index = 0;
+ const uint8_t *val_ptr = val;
+ uint16_t elem_bytes = zig_int_bytes(elem_bits);
+
+ while (index < len) {
+ uint16_t pc = zig_popcount_big(val_ptr, is_signed, elem_bits);
+ if (elem_bits <= 128) {
+ ((uint8_t *)result)[index] = (uint8_t)pc;
+ } else {
+ ((uint16_t *)result)[index] = pc;
+ }
+ val_ptr += elem_bytes;
+ index += 1;
+ }
+}
+
/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index f4a817cecd..4d3e71e78a 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -2844,7 +2844,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.cmp_vector => blk: {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.VectorCmp, ty_pl.payload).data;
- break :blk try cmpBuiltinCall(f, inst, extra, extra.compareOperator(), .operator, .bits);
+ break :blk try airCmpBuiltinCall(f, inst, extra, extra.compareOperator(), .operator, .bits,);
},
.cmp_lt_errors_len => try airCmpLtErrorsLen(f, inst),
@@ -3837,9 +3837,16 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: std.math.CompareOperat
const target = f.object.dg.module.getTarget();
const operand_bits = operand_ty.bitSize(target);
if (operand_ty.isInt() and operand_bits > 64)
- return cmpBuiltinCall(f, inst, bin_op, operator, .cmp, if (operand_bits > 128) .bits else .none);
+ return airCmpBuiltinCall(
+ f,
+ inst,
+ bin_op,
+ operator,
+ .cmp,
+ if (operand_bits > 128) .bits else .none,
+ );
if (operand_ty.isRuntimeFloat())
- return cmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
+ return airCmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
const inst_ty = f.air.typeOfIndex(inst);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -3876,9 +3883,16 @@ fn airEquality(
const target = f.object.dg.module.getTarget();
const operand_bits = operand_ty.bitSize(target);
if (operand_ty.isInt() and operand_bits > 64)
- return cmpBuiltinCall(f, inst, bin_op, operator, .cmp, if (operand_bits > 128) .bits else .none);
+ return airCmpBuiltinCall(
+ f,
+ inst,
+ bin_op,
+ operator,
+ .cmp,
+ if (operand_bits > 128) .bits else .none,
+ );
if (operand_ty.isRuntimeFloat())
- return cmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
+ return airCmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
@@ -5969,14 +5983,25 @@ fn airUnBuiltinCall(
const inst_ty = f.air.typeOfIndex(inst);
const operand_ty = f.air.typeOf(ty_op.operand);
+ const inst_cty = try f.typeToCType(inst_ty, .complete);
+ const ref_ret = switch (inst_cty.tag()) {
+ else => false,
+ .array, .vector => true,
+ };
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_");
- try writer.writeAll(operation);
- try writer.writeByte('_');
+ if (!ref_ret) {
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeAll(" = ");
+ }
+ try writer.print("zig_{s}_", .{operation});
try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
try writer.writeByte('(');
+ if (ref_ret) {
+ try f.writeCValue(writer, local, .FunctionArgument);
+ try writer.writeAll(", ");
+ }
try f.writeCValue(writer, operand, .FunctionArgument);
try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
try writer.writeAll(");\n");
@@ -6019,7 +6044,7 @@ fn airBinBuiltinCall(
return local;
}
-fn cmpBuiltinCall(
+fn airCmpBuiltinCall(
f: *Function,
inst: Air.Inst.Index,
data: anytype,
@@ -6034,7 +6059,11 @@ fn cmpBuiltinCall(
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
- const ref_ret = inst_ty.tag() != .bool;
+ const inst_cty = try f.typeToCType(inst_ty, .complete);
+ const ref_ret = switch (inst_cty.tag()) {
+ else => false,
+ .array, .vector => true,
+ };
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
diff --git a/test/behavior/bugs/10147.zig b/test/behavior/bugs/10147.zig
index 3ca9085805..77c513caa6 100644
--- a/test/behavior/bugs/10147.zig
+++ b/test/behavior/bugs/10147.zig
@@ -6,7 +6,6 @@ test "test calling @clz on both vector and scalar inputs" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: u32 = 0x1;
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 9ebeca8541..d7b8e4764b 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -100,7 +100,6 @@ test "@clz vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testClzVectors();
@@ -163,7 +162,6 @@ test "@ctz vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
@@ -1562,6 +1560,12 @@ test "signed zeros are represented properly" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and
+ builtin.zig_backend == .stage2_c)
+ {
+ return error.SkipZigTest;
+ }
+
const S = struct {
fn doTheTest() !void {
try testOne(f16);
diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig
index b27d5d77d3..9dce5820cd 100644
--- a/test/behavior/popcount.zig
+++ b/test/behavior/popcount.zig
@@ -67,7 +67,6 @@ fn testPopCountIntegers() !void {
}
test "@popCount vectors" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
From e96a0fd0a1a05fe8c3b4d87df03d78ae99b7dbcb Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Fri, 3 Mar 2023 01:48:13 -0500
Subject: [PATCH 022/294] CBE: "compute" max int alignment the lazy way
---
lib/zig.h | 2 +-
src/link/C.zig | 22 ++++++++++++++--------
2 files changed, 15 insertions(+), 9 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index e5cb421c6f..5d77c76c8f 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -1921,7 +1921,7 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
static inline uint16_t zig_int_bytes(uint16_t bits) {
uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
- uint16_t alignment = 16;
+ uint16_t alignment = ZIG_TARGET_MAX_INT_ALIGNMENT;
while (alignment / 2 >= bytes) alignment /= 2;
return (bytes + alignment - 1) / alignment * alignment;
}
diff --git a/src/link/C.zig b/src/link/C.zig
index 5663ba71e2..7e3ad2eddd 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -221,14 +221,19 @@ pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void
return self.flushModule(comp, prog_node);
}
-fn abiDefine(comp: *Compilation) ?[]const u8 {
- return switch (comp.getTarget().abi) {
- .msvc => "#define ZIG_TARGET_ABI_MSVC\n",
- else => null,
- };
+fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
+ var defines = std.ArrayList(u8).init(self.base.allocator);
+ errdefer defines.deinit();
+ const writer = defines.writer();
+ switch (target.abi) {
+ .msvc => try writer.writeAll("#define ZIG_TARGET_ABI_MSVC\n"),
+ else => {},
+ }
+ try writer.print("#define ZIG_TARGET_MAX_INT_ALIGNMENT {d}\n", .{target.maxIntAlignment()});
+ return defines;
}
-pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {
+pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -245,12 +250,13 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
var f: Flush = .{};
defer f.deinit(gpa);
- const abi_define = abiDefine(comp);
+ const abi_defines = try self.abiDefines(module.getTarget());
+ defer abi_defines.deinit();
// Covers defines, zig.h, ctypes, asm, lazy fwd.
try f.all_buffers.ensureUnusedCapacity(gpa, 5);
- if (abi_define) |buf| f.appendBufAssumeCapacity(buf);
+ f.appendBufAssumeCapacity(abi_defines.items);
f.appendBufAssumeCapacity(zig_h);
const ctypes_index = f.all_buffers.items.len;
From 9e3a5ecd39227aff3b2821d0c0b489eb9713b146 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sat, 4 Mar 2023 15:18:05 -0500
Subject: [PATCH 023/294] CBE: fix behavior test failures on msvc
---
lib/zig.h | 4 +++-
src/codegen/c.zig | 37 +++++++++++++++++++++++++++++--------
src/codegen/c/type.zig | 7 +++++++
3 files changed, 39 insertions(+), 9 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index 5d77c76c8f..6b95ba3358 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -1646,7 +1646,9 @@ static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) {
}
static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) {
- return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ if (bits > UINT8_C(64)) return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ int64_t lo = zig_wrap_i64((int64_t)zig_lo_i128(val), bits);
+ return zig_make_i128(zig_shr_i64(lo, 63), (uint64_t)lo);
}
static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 4d3e71e78a..b8606b1a17 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -4461,10 +4461,12 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
if (dest_ty.isAbiInt()) {
const dest_cty = try f.typeToCType(dest_ty, .complete);
const dest_info = dest_ty.intInfo(target);
- var wrap_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) {
+ var info_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) {
.unsigned => .int_unsigned,
.signed => .int_signed,
} }, .data = dest_info.bits };
+ var wrap_cty: ?CType = null;
+ var need_bitcasts = false;
try f.writeCValue(writer, local, .Other);
if (dest_cty.castTag(.array)) |pl| {
@@ -4472,14 +4474,31 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
.Little => pl.data.len - 1,
.Big => 0,
}});
- wrap_ty_pl.data -= 1;
- wrap_ty_pl.data %= @intCast(u16, f.byteSize(f.indexToCType(pl.data.elem_type)) * 8);
- wrap_ty_pl.data += 1;
+ const elem_cty = f.indexToCType(pl.data.elem_type);
+ wrap_cty = elem_cty.toSignedness(dest_info.signedness);
+ need_bitcasts = wrap_cty.?.tag() == .zig_i128;
+ info_ty_pl.data -= 1;
+ info_ty_pl.data %= @intCast(u16, f.byteSize(elem_cty) * 8);
+ info_ty_pl.data += 1;
}
- const wrap_ty = Type.initPayload(&wrap_ty_pl.base);
- try writer.writeAll(" = zig_wrap_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, wrap_ty);
+ try writer.writeAll(" = ");
+ if (need_bitcasts) {
+ try writer.writeAll("zig_bitcast_");
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?.toUnsigned());
+ try writer.writeByte('(');
+ }
+ try writer.writeAll("zig_wrap_");
+ const info_ty = Type.initPayload(&info_ty_pl.base);
+ if (wrap_cty) |cty|
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, cty)
+ else
+ try f.object.dg.renderTypeForBuiltinFnName(writer, info_ty);
try writer.writeByte('(');
+ if (need_bitcasts) {
+ try writer.writeAll("zig_bitcast_");
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?);
+ try writer.writeByte('(');
+ }
try f.writeCValue(writer, local, .Other);
if (dest_cty.castTag(.array)) |pl| {
try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
@@ -4487,7 +4506,9 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
.Big => 0,
}});
}
- try f.object.dg.renderBuiltinInfo(writer, wrap_ty, .bits);
+ if (need_bitcasts) try writer.writeByte(')');
+ try f.object.dg.renderBuiltinInfo(writer, info_ty, .bits);
+ if (need_bitcasts) try writer.writeByte(')');
try writer.writeAll(");\n");
}
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index 85e4cc9840..313fcc130c 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -651,6 +651,13 @@ pub const CType = extern union {
});
}
+ pub fn toSignedness(self: CType, s: std.builtin.Signedness) CType {
+ return switch (s) {
+ .unsigned => self.toUnsigned(),
+ .signed => self.toSigned(),
+ };
+ }
+
pub fn getStandardDefineAbbrev(self: CType) ?[]const u8 {
return switch (self.tag()) {
.char => "CHAR",
From b2e9c0d0ff1dc6799fe3b5fdbecd53af176f37b7 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sat, 4 Mar 2023 19:02:42 -0500
Subject: [PATCH 024/294] Sema: fix cmp_vector type
---
src/Sema.zig | 40 +++++++++++++++++++---------------------
1 file changed, 19 insertions(+), 21 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index 8940527bc0..8c6e3cf05c 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -574,11 +574,13 @@ pub const Block = struct {
});
}
- fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator, vector_ty: Air.Inst.Ref) !Air.Inst.Ref {
+ fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref {
return block.addInst(.{
.tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector,
.data = .{ .ty_pl = .{
- .ty = vector_ty,
+ .ty = try block.sema.addType(
+ try Type.vector(block.sema.arena, block.sema.typeOf(lhs).vectorLen(), Type.bool),
+ ),
.payload = try block.sema.addExtra(Air.VectorCmp{
.lhs = lhs,
.rhs = rhs,
@@ -9412,7 +9414,7 @@ fn intCast(
const ok = if (is_vector) ok: {
const zeros = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros);
- const is_in_range = try block.addCmpVector(operand, zero_inst, .eq, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(operand, zero_inst, .eq);
const all_in_range = try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{ .operand = is_in_range, .operation = .And } },
@@ -9466,7 +9468,7 @@ fn intCast(
const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val);
const ok = if (is_vector) ok: {
- const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -9483,7 +9485,7 @@ fn intCast(
try sema.addSafetyCheck(block, ok, .cast_truncated_data);
} else {
const ok = if (is_vector) ok: {
- const is_in_range = try block.addCmpVector(diff, dest_max, .lte, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(diff, dest_max, .lte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -9504,7 +9506,7 @@ fn intCast(
const ok = if (is_vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero_inst = try sema.addConstant(operand_ty, zero_val);
- const is_in_range = try block.addCmpVector(operand, zero_inst, .gte, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -12016,7 +12018,7 @@ fn zirShl(
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
- const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
+ const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -12172,7 +12174,7 @@ fn zirShr(
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
- const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
+ const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -12191,7 +12193,7 @@ fn zirShr(
const back = try block.addBinOp(.shl, result, rhs);
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
- const eql = try block.addCmpVector(lhs, back, .eq, try sema.addType(rhs_ty));
+ const eql = try block.addCmpVector(lhs, back, .eq);
break :ok try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -13192,7 +13194,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const floored = try block.addUnOp(.floor, result);
if (resolved_type.zigTypeTag() == .Vector) {
- const eql = try block.addCmpVector(result, floored, .eq, try sema.addType(resolved_type));
+ const eql = try block.addCmpVector(result, floored, .eq);
break :ok try block.addInst(.{
.tag = switch (block.float_mode) {
.Strict => .reduce,
@@ -13216,7 +13218,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (resolved_type.zigTypeTag() == .Vector) {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero = try sema.addConstant(resolved_type, zero_val);
- const eql = try block.addCmpVector(remainder, zero, .eq, try sema.addType(resolved_type));
+ const eql = try block.addCmpVector(remainder, zero, .eq);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -13514,14 +13516,13 @@ fn addDivIntOverflowSafety(
var ok: Air.Inst.Ref = .none;
if (resolved_type.zigTypeTag() == .Vector) {
- const vector_ty_ref = try sema.addType(resolved_type);
if (maybe_lhs_val == null) {
const min_int_ref = try sema.addConstant(resolved_type, min_int);
- ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq, vector_ty_ref);
+ ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq);
}
if (maybe_rhs_val == null) {
const neg_one_ref = try sema.addConstant(resolved_type, neg_one);
- const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq, vector_ty_ref);
+ const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq);
if (ok == .none) {
ok = rhs_ok;
} else {
@@ -13573,7 +13574,7 @@ fn addDivByZeroSafety(
const ok = if (resolved_type.zigTypeTag() == .Vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero = try sema.addConstant(resolved_type, zero_val);
- const ok = try block.addCmpVector(casted_rhs, zero, .neq, try sema.addType(resolved_type));
+ const ok = try block.addCmpVector(casted_rhs, zero, .neq);
break :ok try block.addInst(.{
.tag = if (is_int) .reduce else .reduce_optimized,
.data = .{ .reduce = .{
@@ -15202,9 +15203,7 @@ fn cmpSelf(
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (resolved_type.zigTypeTag() == .Vector) {
- const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool);
- const result_ty_ref = try sema.addType(result_ty);
- return block.addCmpVector(casted_lhs, casted_rhs, op, result_ty_ref);
+ return block.addCmpVector(casted_lhs, casted_rhs, op);
}
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized);
return block.addBinOp(tag, casted_lhs, casted_rhs);
@@ -23035,7 +23034,7 @@ fn panicSentinelMismatch(
const ok = if (sentinel_ty.zigTypeTag() == .Vector) ok: {
const eql =
- try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq, try sema.addType(sentinel_ty));
+ try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
break :ok try parent_block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -29368,8 +29367,7 @@ fn cmpVector(
};
try sema.requireRuntimeBlock(block, src, runtime_src);
- const result_ty_inst = try sema.addType(result_ty);
- return block.addCmpVector(lhs, rhs, op, result_ty_inst);
+ return block.addCmpVector(lhs, rhs, op);
}
fn wrapOptional(
From c478c7609e4529267d1ce030577777e836ffc10b Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 00:01:15 -0500
Subject: [PATCH 025/294] CBE: implement vector operations
Also, bigint add and sub which is all I was actually trying to do.
---
lib/zig.h | 660 ++++++++++++++++++++----------
src/codegen/c.zig | 620 +++++++++++++++++-----------
src/type.zig | 2 +-
src/value.zig | 2 +-
test/behavior/bitreverse.zig | 3 -
test/behavior/byteswap.zig | 3 -
test/behavior/cast.zig | 1 -
test/behavior/floatop.zig | 12 -
test/behavior/maximum_minimum.zig | 2 -
test/behavior/muladd.zig | 5 -
test/behavior/vector.zig | 30 +-
11 files changed, 835 insertions(+), 505 deletions(-)
diff --git a/lib/zig.h b/lib/zig.h
index 6b95ba3358..22a9dbbb9e 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -612,12 +612,6 @@ static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
-static inline void zig_vaddo_u32(uint8_t *ov, uint32_t *res, int n,
- const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
@@ -632,12 +626,6 @@ static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vaddo_i32(uint8_t *ov, int32_t *res, int n,
- const int32_t *lhs, const int32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint64_t full_res;
@@ -650,12 +638,6 @@ static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
-static inline void zig_vaddo_u64(uint8_t *ov, uint64_t *res, int n,
- const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
@@ -670,12 +652,6 @@ static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vaddo_i64(uint8_t *ov, int64_t *res, int n,
- const int64_t *lhs, const int64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint8_t full_res;
@@ -690,12 +666,6 @@ static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
-static inline void zig_vaddo_u8(uint8_t *ov, uint8_t *res, int n,
- const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
int8_t full_res;
@@ -710,12 +680,6 @@ static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
-static inline void zig_vaddo_i8(uint8_t *ov, int8_t *res, int n,
- const int8_t *lhs, const int8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint16_t full_res;
@@ -730,12 +694,6 @@ static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
-static inline void zig_vaddo_u16(uint8_t *ov, uint16_t *res, int n,
- const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
int16_t full_res;
@@ -750,12 +708,6 @@ static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
-static inline void zig_vaddo_i16(uint8_t *ov, int16_t *res, int n,
- const int16_t *lhs, const int16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint32_t full_res;
@@ -768,12 +720,6 @@ static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
-static inline void zig_vsubo_u32(uint8_t *ov, uint32_t *res, int n,
- const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
@@ -788,12 +734,6 @@ static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vsubo_i32(uint8_t *ov, int32_t *res, int n,
- const int32_t *lhs, const int32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint64_t full_res;
@@ -806,12 +746,6 @@ static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
-static inline void zig_vsubo_u64(uint8_t *ov, uint64_t *res, int n,
- const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
@@ -826,12 +760,6 @@ static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vsubo_i64(uint8_t *ov, int64_t *res, int n,
- const int64_t *lhs, const int64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint8_t full_res;
@@ -846,12 +774,6 @@ static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
-static inline void zig_vsubo_u8(uint8_t *ov, uint8_t *res, int n,
- const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
int8_t full_res;
@@ -866,13 +788,6 @@ static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
-static inline void zig_vsubo_i8(uint8_t *ov, int8_t *res, int n,
- const int8_t *lhs, const int8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-
static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint16_t full_res;
@@ -887,13 +802,6 @@ static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
-static inline void zig_vsubo_u16(uint8_t *ov, uint16_t *res, int n,
- const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-
static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
int16_t full_res;
@@ -908,12 +816,6 @@ static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
-static inline void zig_vsubo_i16(uint8_t *ov, int16_t *res, int n,
- const int16_t *lhs, const int16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint32_t full_res;
@@ -926,12 +828,6 @@ static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
-static inline void zig_vmulo_u32(uint8_t *ov, uint32_t *res, int n,
- const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
@@ -946,12 +842,6 @@ static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vmulo_i32(uint8_t *ov, int32_t *res, int n,
- const int32_t *lhs, const int32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint64_t full_res;
@@ -964,12 +854,6 @@ static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
-static inline void zig_vmulo_u64(uint8_t *ov, uint64_t *res, int n,
- const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
@@ -984,12 +868,6 @@ static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vmulo_i64(uint8_t *ov, int64_t *res, int n,
- const int64_t *lhs, const int64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint8_t full_res;
@@ -1004,12 +882,6 @@ static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
-static inline void zig_vmulo_u8(uint8_t *ov, uint8_t *res, int n,
- const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
int8_t full_res;
@@ -1024,12 +896,6 @@ static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
-static inline void zig_vmulo_i8(uint8_t *ov, int8_t *res, int n,
- const int8_t *lhs, const int8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint16_t full_res;
@@ -1044,12 +910,6 @@ static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
-static inline void zig_vmulo_u16(uint8_t *ov, uint16_t *res, int n,
- const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
int16_t full_res;
@@ -1064,12 +924,6 @@ static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
-static inline void zig_vmulo_i16(uint8_t *ov, int16_t *res, int n,
- const int16_t *lhs, const int16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
#define zig_int_builtins(w) \
static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_u##w(lhs, rhs, bits); \
@@ -2090,6 +1944,446 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
return 0;
}
+static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline bool zig_subo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline void zig_addw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_addo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline void zig_subw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_subo_big(res, lhs, rhs, is_signed, bits);
+}
+
static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
const uint8_t *val_bytes = val;
uint16_t byte_offset = 0;
@@ -3092,80 +3386,6 @@ zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
-/* ============================= Vector Support ============================= */
-
-#define zig_cmp_vec(operation, operator) \
- static inline void zig_##operation##_vec(bool *result, const void *lhs, const void *rhs, uint32_t len, bool is_signed, uint16_t elem_bits) { \
- uint32_t index = 0; \
- const uint8_t *lhs_ptr = lhs; \
- const uint8_t *rhs_ptr = rhs; \
- uint16_t elem_bytes = zig_int_bytes(elem_bits); \
- \
- while (index < len) { \
- result[index] = zig_cmp_big(lhs_ptr, rhs_ptr, is_signed, elem_bits) operator 0; \
- lhs_ptr += elem_bytes; \
- rhs_ptr += elem_bytes; \
- index += 1; \
- } \
- }
-zig_cmp_vec(eq, ==)
-zig_cmp_vec(ne, !=)
-zig_cmp_vec(lt, < )
-zig_cmp_vec(le, <=)
-zig_cmp_vec(gt, > )
-zig_cmp_vec(ge, >=)
-
-static inline void zig_clz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
- uint32_t index = 0;
- const uint8_t *val_ptr = val;
- uint16_t elem_bytes = zig_int_bytes(elem_bits);
-
- while (index < len) {
- uint16_t lz = zig_clz_big(val_ptr, is_signed, elem_bits);
- if (elem_bits <= 128) {
- ((uint8_t *)result)[index] = (uint8_t)lz;
- } else {
- ((uint16_t *)result)[index] = lz;
- }
- val_ptr += elem_bytes;
- index += 1;
- }
-}
-
-static inline void zig_ctz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
- uint32_t index = 0;
- const uint8_t *val_ptr = val;
- uint16_t elem_bytes = zig_int_bytes(elem_bits);
-
- while (index < len) {
- uint16_t tz = zig_ctz_big(val_ptr, is_signed, elem_bits);
- if (elem_bits <= 128) {
- ((uint8_t *)result)[index] = (uint8_t)tz;
- } else {
- ((uint16_t *)result)[index] = tz;
- }
- val_ptr += elem_bytes;
- index += 1;
- }
-}
-
-static inline void zig_popcount_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
- uint32_t index = 0;
- const uint8_t *val_ptr = val;
- uint16_t elem_bytes = zig_int_bytes(elem_bits);
-
- while (index < len) {
- uint16_t pc = zig_popcount_big(val_ptr, is_signed, elem_bits);
- if (elem_bits <= 128) {
- ((uint8_t *)result)[index] = (uint8_t)pc;
- } else {
- ((uint16_t *)result)[index] = pc;
- }
- val_ptr += elem_bytes;
- index += 1;
- }
-}
-
/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index b8606b1a17..5e92a6f76c 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -444,8 +444,8 @@ pub const Function = struct {
return f.object.dg.renderType(w, t);
}
- fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, src_ty: Type, location: ValueRenderLocation) !void {
- return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src } }, src_ty, location);
+ fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, v: Vectorizer, src_ty: Type, location: ValueRenderLocation) !void {
+ return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src, .v = v } }, src_ty, location);
}
fn fmtIntLiteral(f: *Function, ty: Type, val: Value) !std.fmt.Formatter(formatIntLiteral) {
@@ -1593,6 +1593,7 @@ pub const DeclGen = struct {
c_value: struct {
f: *Function,
value: CValue,
+ v: Vectorizer,
},
value: struct {
value: Value,
@@ -1602,6 +1603,7 @@ pub const DeclGen = struct {
switch (self.*) {
.c_value => |v| {
try v.f.writeCValue(w, v.value, location);
+ try v.v.elem(v.f, w);
},
.value => |v| {
try dg.renderValue(w, value_ty, v.value, location);
@@ -1887,7 +1889,6 @@ pub const DeclGen = struct {
if (cty.isFloat()) cty.floatActiveBits(dg.module.getTarget()) else dg.byteSize(cty) * 8,
}),
.array => try writer.writeAll("big"),
- .vector => try writer.writeAll("vec"),
}
}
@@ -1895,34 +1896,19 @@ pub const DeclGen = struct {
switch (info) {
.none => {},
.bits => {
- const cty = try dg.typeToCType(ty, .complete);
- if (cty.castTag(.vector)) |pl| {
- var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = pl.data.len };
- try writer.print(", {}", .{try dg.fmtIntLiteral(
- Type.u32,
- Value.initPayload(&len_pl.base),
- .FunctionArgument,
- )});
- }
-
const target = dg.module.getTarget();
- const elem_ty = ty.shallowElemType();
- const elem_info = if (elem_ty.isAbiInt())
- elem_ty.intInfo(target)
- else
- std.builtin.Type.Int{
- .signedness = .unsigned,
- .bits = @intCast(u16, elem_ty.bitSize(target)),
- };
- switch (cty.tag()) {
- else => {},
- .array, .vector => try writer.print(", {}", .{elem_info.signedness == .signed}),
- }
+ const int_info = if (ty.isAbiInt()) ty.intInfo(target) else std.builtin.Type.Int{
+ .signedness = .unsigned,
+ .bits = @intCast(u16, ty.bitSize(target)),
+ };
- var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = elem_info.bits };
+ const cty = try dg.typeToCType(ty, .complete);
+ if (cty.tag() == .array) try writer.print(", {}", .{int_info.signedness == .signed});
+
+ var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = int_info.bits };
try writer.print(", {}", .{try dg.fmtIntLiteral(switch (cty.tag()) {
else => Type.u8,
- .array, .vector => Type.u16,
+ .array => Type.u16,
}, Value.initPayload(&bits_pl.base), .FunctionArgument)});
},
}
@@ -2786,10 +2772,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const lhs_ty = f.air.typeOf(bin_op.lhs);
+ const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType();
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
- break :blk if (lhs_ty.isInt())
+ break :blk if (lhs_scalar_ty.isInt())
try airBinOp(f, inst, "%", "rem", .none)
else
try airBinFloatOp(f, inst, "fmod");
@@ -2833,10 +2819,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.slice => try airSlice(f, inst),
- .cmp_gt => try airCmpOp(f, inst, .gt),
- .cmp_gte => try airCmpOp(f, inst, .gte),
- .cmp_lt => try airCmpOp(f, inst, .lt),
- .cmp_lte => try airCmpOp(f, inst, .lte),
+ .cmp_gt => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .gt),
+ .cmp_gte => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .gte),
+ .cmp_lt => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .lt),
+ .cmp_lte => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .lte),
.cmp_eq => try airEquality(f, inst, .eq),
.cmp_neq => try airEquality(f, inst, .neq),
@@ -2844,7 +2830,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.cmp_vector => blk: {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.VectorCmp, ty_pl.payload).data;
- break :blk try airCmpBuiltinCall(f, inst, extra, extra.compareOperator(), .operator, .bits,);
+ break :blk try airCmpOp(f, inst, extra, extra.compareOperator());
},
.cmp_lt_errors_len => try airCmpLtErrorsLen(f, inst),
@@ -3294,7 +3280,10 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue {
fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const ptr_info = f.air.typeOf(ty_op.operand).ptrInfo().data;
+
+ const ptr_ty = f.air.typeOf(ty_op.operand);
+ const ptr_scalar_ty = ptr_ty.scalarType();
+ const ptr_info = ptr_scalar_ty.ptrInfo().data;
const src_ty = ptr_info.pointee_type;
if (!src_ty.hasRuntimeBitsIgnoreComptime() or
@@ -3312,16 +3301,19 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(target);
const is_array = lowersToArray(src_ty, target);
const need_memcpy = !is_aligned or is_array;
- const writer = f.object.writer();
+ const writer = f.object.writer();
const local = try f.allocLocal(inst, src_ty);
+ const v = try Vectorizer.start(f, inst, writer, ptr_ty);
if (need_memcpy) {
try writer.writeAll("memcpy(");
if (!is_array) try writer.writeByte('&');
- try f.writeCValue(writer, local, .FunctionArgument);
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(", (const char *)");
try f.writeCValue(writer, operand, .Other);
+ try v.elem(f, writer);
try writer.writeAll(", sizeof(");
try f.renderType(writer, src_ty);
try writer.writeAll("))");
@@ -3351,6 +3343,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const field_ty = Type.initPayload(&field_pl.base);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = (");
try f.renderType(writer, src_ty);
try writer.writeAll(")zig_wrap_");
@@ -3369,16 +3362,21 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
try f.writeCValueDeref(writer, operand);
+ try v.elem(f, writer);
try writer.print(", {})", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_ty, .bits);
try writer.writeByte(')');
} else {
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try f.writeCValueDeref(writer, operand);
+ try v.elem(f, writer);
}
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3444,15 +3442,22 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
- const local = try f.allocLocal(inst, inst_ty);
- const operand_ty = f.air.typeOf(ty_op.operand);
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const operand_ty = f.air.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType();
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
- try f.renderIntCast(writer, inst_ty, operand, operand_ty, .Other);
+ try f.renderIntCast(writer, inst_scalar_ty, operand, v, scalar_ty, .Other);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3578,7 +3583,10 @@ fn storeUndefined(f: *Function, lhs_child_ty: Type, dest_ptr: CValue) !CValue {
fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
// *a = b;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const ptr_info = f.air.typeOf(bin_op.lhs).ptrInfo().data;
+
+ const ptr_ty = f.air.typeOf(bin_op.lhs);
+ const ptr_scalar_ty = ptr_ty.scalarType();
+ const ptr_info = ptr_scalar_ty.ptrInfo().data;
if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
@@ -3601,11 +3609,13 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(target);
const is_array = lowersToArray(ptr_info.pointee_type, target);
const need_memcpy = !is_aligned or is_array;
- const writer = f.object.writer();
const src_val = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const writer = f.object.writer();
+ const v = try Vectorizer.start(f, inst, writer, ptr_ty);
+
if (need_memcpy) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
@@ -3626,9 +3636,11 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("memcpy((char *)");
try f.writeCValue(writer, ptr_val, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
if (!is_array) try writer.writeByte('&');
try f.writeCValue(writer, array_src, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", sizeof(");
try f.renderType(writer, src_ty);
try writer.writeAll("))");
@@ -3672,12 +3684,14 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
const mask_val = Value.initPayload(&mask_pl.base);
try f.writeCValueDeref(writer, ptr_val);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_or_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeAll("(zig_and_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
try f.writeCValueDeref(writer, ptr_val);
+ try v.elem(f, writer);
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
@@ -3699,14 +3713,19 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
}
try f.writeCValue(writer, src_val, .Other);
+ try v.elem(f, writer);
if (cant_cast) try writer.writeByte(')');
try writer.print(", {}))", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
} else {
try f.writeCValueDeref(writer, ptr_val);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try f.writeCValue(writer, src_val, .Other);
+ try v.elem(f, writer);
}
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return .none;
}
@@ -3724,51 +3743,39 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const vector_ty = f.air.typeOf(bin_op.lhs);
- const scalar_ty = vector_ty.scalarType();
+ const operand_ty = f.air.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
const w = f.object.writer();
-
const local = try f.allocLocal(inst, inst_ty);
-
- switch (vector_ty.zigTypeTag()) {
- .Vector => {
- try w.writeAll("zig_v");
- try w.writeAll(operation);
- try w.writeAll("o_");
- try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
- try w.writeAll("(");
- try f.writeCValueMember(w, local, .{ .field = 1 });
- try w.writeAll(", ");
- try f.writeCValueMember(w, local, .{ .field = 0 });
- try w.print(", {d}, ", .{vector_ty.vectorLen()});
- },
- else => {
- try f.writeCValueMember(w, local, .{ .field = 1 });
- try w.writeAll(" = zig_");
- try w.writeAll(operation);
- try w.writeAll("o_");
- try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
- try w.writeAll("(&");
- try f.writeCValueMember(w, local, .{ .field = 0 });
- try w.writeAll(", ");
- },
- }
-
+ const v = try Vectorizer.start(f, inst, w, operand_ty);
+ try f.writeCValueMember(w, local, .{ .field = 1 });
+ try v.elem(f, w);
+ try w.writeAll(" = zig_");
+ try w.writeAll(operation);
+ try w.writeAll("o_");
+ try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
+ try w.writeAll("(&");
+ try f.writeCValueMember(w, local, .{ .field = 0 });
+ try v.elem(f, w);
+ try w.writeAll(", ");
try f.writeCValue(w, lhs, .FunctionArgument);
+ try v.elem(f, w);
try w.writeAll(", ");
try f.writeCValue(w, rhs, .FunctionArgument);
+ try v.elem(f, w);
try f.object.dg.renderBuiltinInfo(w, scalar_ty, info);
try w.writeAll(");\n");
+ try v.end(f, inst, w);
return local;
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
- if (inst_ty.tag() != .bool)
- return try airUnBuiltinCall(f, inst, "not", .bits);
-
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand_ty = f.air.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType();
+ if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits);
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{ty_op.operand});
@@ -3778,14 +3785,20 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
const op = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
+ const inst_ty = f.air.typeOfIndex(inst);
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
-
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try writer.writeByte('!');
try f.writeCValue(writer, op, .Other);
+ try v.elem(f, writer);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3798,71 +3811,89 @@ fn airBinOp(
) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const operand_ty = f.air.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType();
const target = f.object.dg.module.getTarget();
- if ((operand_ty.isInt() and operand_ty.bitSize(target) > 64) or operand_ty.isRuntimeFloat())
+ if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info);
- const lhs = try f.resolveInst(bin_op.lhs);
- const rhs = try f.resolveInst(bin_op.rhs);
-
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
-
- if (f.liveness.isUnused(inst)) return .none;
-
- const inst_ty = f.air.typeOfIndex(inst);
-
- const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = ");
- try f.writeCValue(writer, lhs, .Other);
- try writer.writeByte(' ');
- try writer.writeAll(operator);
- try writer.writeByte(' ');
- try f.writeCValue(writer, rhs, .Other);
- try writer.writeAll(";\n");
-
- return local;
-}
-
-fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: std.math.CompareOperator) !CValue {
- const bin_op = f.air.instructions.items(.data)[inst].bin_op;
-
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
- const operand_ty = f.air.typeOf(bin_op.lhs);
- const target = f.object.dg.module.getTarget();
- const operand_bits = operand_ty.bitSize(target);
- if (operand_ty.isInt() and operand_bits > 64)
- return airCmpBuiltinCall(
- f,
- inst,
- bin_op,
- operator,
- .cmp,
- if (operand_bits > 128) .bits else .none,
- );
- if (operand_ty.isRuntimeFloat())
- return airCmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
-
- const inst_ty = f.air.typeOfIndex(inst);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const inst_ty = f.air.typeOfIndex(inst);
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeByte(' ');
+ try writer.writeAll(operator);
+ try writer.writeByte(' ');
+ try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
+ return local;
+}
+
+fn airCmpOp(
+ f: *Function,
+ inst: Air.Inst.Index,
+ data: anytype,
+ operator: std.math.CompareOperator,
+) !CValue {
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+ return .none;
+ }
+
+ const operand_ty = f.air.typeOf(data.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
+ const target = f.object.dg.module.getTarget();
+ const scalar_bits = scalar_ty.bitSize(target);
+ if (scalar_ty.isInt() and scalar_bits > 64)
+ return airCmpBuiltinCall(
+ f,
+ inst,
+ data,
+ operator,
+ .cmp,
+ if (scalar_bits > 128) .bits else .none,
+ );
+ if (scalar_ty.isRuntimeFloat())
+ return airCmpBuiltinCall(f, inst, data, operator, .operator, .none);
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const lhs = try f.resolveInst(data.lhs);
+ const rhs = try f.resolveInst(data.rhs);
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeByte(' ');
try writer.writeAll(compareOperatorC(operator));
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
return local;
}
@@ -3974,11 +4005,14 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const elem_ty = inst_ty.elemType2();
+ const inst_scalar_ty = inst_ty.scalarType();
+ const elem_ty = inst_scalar_ty.elemType2();
const local = try f.allocLocal(inst, inst_ty);
const writer = f.object.writer();
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
if (elem_ty.hasRuntimeBitsIgnoreComptime()) {
@@ -3986,19 +4020,26 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
// results in a NULL pointer, or if LHS is NULL. The operation is only UB
// if the result is NULL and then dereferenced.
try writer.writeByte('(');
- try f.renderType(writer, inst_ty);
+ try f.renderType(writer, inst_scalar_ty);
try writer.writeAll(")(((uintptr_t)");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(") ");
try writer.writeByte(operator);
try writer.writeAll(" (");
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll("*sizeof(");
try f.renderType(writer, elem_ty);
try writer.writeAll(")))");
- } else try f.writeCValue(writer, lhs, .Initializer);
+ } else {
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ }
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -4011,10 +4052,12 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
}
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
const target = f.object.dg.module.getTarget();
- if (inst_ty.isInt() and inst_ty.bitSize(target) > 64)
+ if (inst_scalar_ty.isInt() and inst_scalar_ty.bitSize(target) > 64)
return try airBinBuiltinCall(f, inst, operation[1..], .none);
- if (inst_ty.isRuntimeFloat())
+ if (inst_scalar_ty.isRuntimeFloat())
return try airBinFloatOp(f, inst, operation);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -4023,19 +4066,26 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
// (lhs <> rhs) ? lhs : rhs
try writer.writeAll(" = (");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeByte(' ');
try writer.writeByte(operator);
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(") ? ");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" : ");
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
return local;
}
@@ -6002,30 +6052,35 @@ fn airUnBuiltinCall(
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
const operand_ty = f.air.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType();
- const inst_cty = try f.typeToCType(inst_ty, .complete);
- const ref_ret = switch (inst_cty.tag()) {
- else => false,
- .array, .vector => true,
- };
+ const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_cty.tag() == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
if (!ref_ret) {
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
}
try writer.print("zig_{s}_", .{operation});
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
if (ref_ret) {
try f.writeCValue(writer, local, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
}
try f.writeCValue(writer, operand, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6047,21 +6102,38 @@ fn airBinBuiltinCall(
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
const operand_ty = f.air.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
+ const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_cty.tag() == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_");
- try writer.writeAll(operation);
- try writer.writeByte('_');
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ if (!ref_ret) {
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ }
+ try writer.print("zig_{s}_", .{operation});
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
+ if (ref_ret) {
+ try f.writeCValue(writer, local, .FunctionArgument);
+ try v.elem(f, writer);
+ try writer.writeAll(", ");
+ }
try f.writeCValue(writer, lhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6073,45 +6145,56 @@ fn airCmpBuiltinCall(
operation: enum { cmp, operator },
info: BuiltinInfo,
) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
- const operand_ty = f.air.typeOf(data.lhs);
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+ return .none;
+ }
const lhs = try f.resolveInst(data.lhs);
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
- const inst_cty = try f.typeToCType(inst_ty, .complete);
- const ref_ret = switch (inst_cty.tag()) {
- else => false,
- .array, .vector => true,
- };
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const operand_ty = f.air.typeOf(data.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
+ const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_cty.tag() == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
if (!ref_ret) {
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
}
try writer.print("zig_{s}_", .{switch (operation) {
else => @tagName(operation),
.operator => compareOperatorAbbrev(operator),
}});
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
if (ref_ret) {
try f.writeCValue(writer, local, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
}
try f.writeCValue(writer, lhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
try writer.writeByte(')');
if (!ref_ret) try writer.print(" {s} {}", .{
compareOperatorC(operator),
try f.fmtIntLiteral(Type.initTag(.i32), Value.zero),
});
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6498,65 +6581,35 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(reduce.operand);
try reap(f, inst, &.{reduce.operand});
const operand_ty = f.air.typeOf(reduce.operand);
- const vector_len = operand_ty.vectorLen();
const writer = f.object.writer();
- const Op = union(enum) {
- call_fn: []const u8,
+ const op: union(enum) {
+ float_op: []const u8,
+ builtin: []const u8,
infix: []const u8,
ternary: []const u8,
- };
- var fn_name_buf: [64]u8 = undefined;
- const op: Op = switch (reduce.operation) {
+ } = switch (reduce.operation) {
.And => .{ .infix = " &= " },
.Or => .{ .infix = " |= " },
.Xor => .{ .infix = " ^= " },
.Min => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .ternary = " < " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmin{s}", .{
- libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
- }) catch unreachable,
- };
- },
+ .Int => .{ .ternary = " < " },
+ .Float => .{ .float_op = "fmin" },
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .ternary = " > " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmax{s}", .{
- libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
- }) catch unreachable,
- };
- },
+ .Int => .{ .ternary = " > " },
+ .Float => .{ .float_op = "fmax" },
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .infix = " += " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__add{s}f3", .{
- compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
- };
- },
+ .Int => .{ .infix = " += " },
+ .Float => .{ .builtin = "add" },
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .infix = " *= " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__mul{s}f3", .{
- compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
- };
- },
+ .Int => .{ .infix = " *= " },
+ .Float => .{ .builtin = "mul" },
else => unreachable,
},
};
@@ -6572,75 +6625,94 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
// }
// break :reduce accum;
// }
- const it = try f.allocLocal(inst, Type.usize);
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll(" = 0;\n");
const accum = try f.allocLocal(inst, scalar_ty);
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(" = ");
- const init_val = switch (reduce.operation) {
- .And, .Or, .Xor, .Add => "0",
+ var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
+ defer arena.deinit();
+
+ const ExpectedContents = union {
+ u: Value.Payload.U64,
+ i: Value.Payload.I64,
+ f16: Value.Payload.Float_16,
+ f32: Value.Payload.Float_32,
+ f64: Value.Payload.Float_64,
+ f80: Value.Payload.Float_80,
+ f128: Value.Payload.Float_128,
+ };
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
+
+ try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) {
+ .Or, .Xor, .Add => Value.zero,
+ .And => switch (scalar_ty.zigTypeTag()) {
+ .Bool => Value.one,
+ else => switch (scalar_ty.intInfo(target).signedness) {
+ .unsigned => try scalar_ty.maxInt(stack.get(), target),
+ .signed => Value.negative_one,
+ },
+ },
.Min => switch (scalar_ty.zigTypeTag()) {
- .Int => "TODO_intmax",
- .Float => "TODO_nan",
+ .Bool => Value.one,
+ .Int => try scalar_ty.maxInt(stack.get(), target),
+ .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag()) {
- .Int => "TODO_intmin",
- .Float => "TODO_nan",
+ .Bool => Value.zero,
+ .Int => try scalar_ty.minInt(stack.get(), target),
+ .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
else => unreachable,
},
- .Mul => "1",
- };
- try writer.writeAll(init_val);
- try writer.writeAll(";");
- try f.object.indent_writer.insertNewline();
- try writer.writeAll("for (;");
- try f.writeCValue(writer, it, .Other);
- try writer.print("<{d};++", .{vector_len});
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll(") ");
- try f.writeCValue(writer, accum, .Other);
+ .Mul => Value.one,
+ }, .Initializer);
+ try writer.writeAll(";\n");
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ try f.writeCValue(writer, accum, .Other);
switch (op) {
- .call_fn => |fn_name| {
- try writer.print(" = {s}(", .{fn_name});
+ .float_op => |operation| {
+ try writer.writeAll(" = zig_libc_name_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
+ try writer.print("({s})(", .{operation});
try f.writeCValue(writer, accum, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("])");
+ try v.elem(f, writer);
+ try writer.writeByte(')');
+ },
+ .builtin => |operation| {
+ try writer.print(" = zig_{s}_", .{operation});
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
+ try writer.writeByte('(');
+ try f.writeCValue(writer, accum, .FunctionArgument);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, operand, .Other);
+ try v.elem(f, writer);
+ try writer.writeByte(')');
},
.infix => |ass| {
try writer.writeAll(ass);
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("]");
+ try v.elem(f, writer);
},
.ternary => |cmp| {
try writer.writeAll(" = ");
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(cmp);
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("] ? ");
+ try v.elem(f, writer);
+ try writer.writeAll(" ? ");
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(" : ");
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("]");
+ try v.elem(f, writer);
},
}
-
try writer.writeAll(";\n");
-
- try freeLocal(f, inst, it.new_local, 0);
+ try v.end(f, inst, writer);
return accum;
}
@@ -6774,7 +6846,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte('(');
if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) {
- try f.renderIntCast(writer, inst_ty, element, field_ty, .FunctionArgument);
+ try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument);
} else {
try writer.writeByte('(');
try f.renderType(writer, inst_ty);
@@ -6916,7 +6988,6 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
const un_op = f.air.instructions.items(.data)[inst].un_op;
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{un_op});
@@ -6925,16 +6996,23 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
+
const operand_ty = f.air.typeOf(un_op);
+ const scalar_ty = operand_ty.scalarType();
const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
+ const local = try f.allocLocal(inst, operand_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_neg_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6944,19 +7022,28 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal
try reap(f, inst, &.{un_op});
return .none;
}
+
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
- const writer = f.object.writer();
+
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
+ const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_libc_name_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeByte('(');
try writer.writeAll(operation);
try writer.writeAll(")(");
try f.writeCValue(writer, operand, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6966,23 +7053,32 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
+
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const writer = f.object.writer();
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
+ const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_libc_name_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeByte('(');
try writer.writeAll(operation);
try writer.writeAll(")(");
try f.writeCValue(writer, lhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6993,23 +7089,34 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
return .none;
}
- const inst_ty = f.air.typeOfIndex(inst);
+
const mulend1 = try f.resolveInst(bin_op.lhs);
const mulend2 = try f.resolveInst(bin_op.rhs);
const addend = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_libc_name_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeAll("(fma)(");
try f.writeCValue(writer, mulend1, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, mulend2, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, addend, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -7510,6 +7617,47 @@ fn formatIntLiteral(
try data.cty.renderLiteralSuffix(writer);
}
+const Vectorizer = struct {
+ index: CValue = .none,
+
+ pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorizer {
+ return if (ty.zigTypeTag() == .Vector) index: {
+ var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() };
+
+ const local = try f.allocLocal(inst, Type.usize);
+
+ try writer.writeAll("for (");
+ try f.writeCValue(writer, local, .Other);
+ try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)});
+ try f.writeCValue(writer, local, .Other);
+ try writer.print(" < {d}; ", .{
+ try f.fmtIntLiteral(Type.usize, Value.initPayload(&len_pl.base)),
+ });
+ try f.writeCValue(writer, local, .Other);
+ try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)});
+ f.object.indent_writer.pushIndent();
+
+ break :index .{ .index = local };
+ } else .{};
+ }
+
+ pub fn elem(self: Vectorizer, f: *Function, writer: anytype) !void {
+ if (self.index != .none) {
+ try writer.writeByte('[');
+ try f.writeCValue(writer, self.index, .Other);
+ try writer.writeByte(']');
+ }
+ }
+
+ pub fn end(self: Vectorizer, f: *Function, inst: Air.Inst.Index, writer: anytype) !void {
+ if (self.index != .none) {
+ f.object.indent_writer.popIndent();
+ try writer.writeAll("}\n");
+ try freeLocal(f, inst, self.index.new_local, 0);
+ }
+ }
+};
+
fn isByRef(ty: Type) bool {
_ = ty;
return false;
diff --git a/src/type.zig b/src/type.zig
index 9e501d893c..15525f14eb 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -4213,7 +4213,7 @@ pub const Type = extern union {
};
}
- pub fn shallowElemType(child_ty: Type) Type {
+ fn shallowElemType(child_ty: Type) Type {
return switch (child_ty.zigTypeTag()) {
.Array, .Vector => child_ty.childType(),
else => child_ty,
diff --git a/src/value.zig b/src/value.zig
index 4a5683df36..00bf59ca38 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -3319,7 +3319,7 @@ pub const Value = extern union {
}
}
- fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
+ pub fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
switch (dest_ty.floatBits(target)) {
16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),
diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig
index aa830144d1..80167b9a17 100644
--- a/test/behavior/bitreverse.zig
+++ b/test/behavior/bitreverse.zig
@@ -96,7 +96,6 @@ fn vector8() !void {
test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -115,7 +114,6 @@ fn vector16() !void {
test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -134,7 +132,6 @@ fn vector24() !void {
test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig
index fc385e0443..d173c13275 100644
--- a/test/behavior/byteswap.zig
+++ b/test/behavior/byteswap.zig
@@ -62,7 +62,6 @@ fn vector8() !void {
test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -81,7 +80,6 @@ fn vector16() !void {
test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -100,7 +98,6 @@ fn vector24() !void {
test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index 927caa965b..f179cbe525 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -598,7 +598,6 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
test "vector casts" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index 7befa41380..f05901f7d9 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -141,7 +141,6 @@ fn testSqrt() !void {
test "@sqrt with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -234,7 +233,6 @@ fn testSin() !void {
test "@sin with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -275,7 +273,6 @@ fn testCos() !void {
test "@cos with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -315,7 +312,6 @@ fn testExp() !void {
test "@exp with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -355,7 +351,6 @@ fn testExp2() !void {
test "@exp2" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -409,7 +404,6 @@ test "@log with @vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
{
@@ -447,7 +441,6 @@ test "@log2 with vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// https://github.com/ziglang/zig/issues/13681
if (builtin.zig_backend == .stage2_llvm and
builtin.cpu.arch == .aarch64 and
@@ -491,7 +484,6 @@ test "@log10 with vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime try testLog10WithVectors();
try testLog10WithVectors();
@@ -537,7 +529,6 @@ fn testFabs() !void {
test "@fabs with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -660,7 +651,6 @@ fn testFloor() !void {
test "@floor with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -754,7 +744,6 @@ fn testCeil() !void {
test "@ceil with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -848,7 +837,6 @@ fn testTrunc() !void {
test "@trunc with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig
index 133a543d42..34a7d0976a 100644
--- a/test/behavior/maximum_minimum.zig
+++ b/test/behavior/maximum_minimum.zig
@@ -25,7 +25,6 @@ test "@max" {
test "@max on vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -75,7 +74,6 @@ test "@min for vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig
index a2d9e6d16d..218edc5a2d 100644
--- a/test/behavior/muladd.zig
+++ b/test/behavior/muladd.zig
@@ -100,7 +100,6 @@ fn vector16() !void {
}
test "vector f16" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -124,7 +123,6 @@ fn vector32() !void {
}
test "vector f32" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -148,7 +146,6 @@ fn vector64() !void {
}
test "vector f64" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -171,7 +168,6 @@ fn vector80() !void {
}
test "vector f80" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -195,7 +191,6 @@ fn vector128() !void {
}
test "vector f128" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index d885a7fabc..e74bcdad86 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -25,7 +25,6 @@ test "implicit cast vector to array - bool" {
test "vector wrap operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -116,7 +115,6 @@ test "vector float operators" {
test "vector bit operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -442,7 +440,6 @@ test "vector comparison operators" {
test "vector division operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -525,7 +522,6 @@ test "vector division operators" {
test "vector bitwise not operator" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -557,7 +553,6 @@ test "vector bitwise not operator" {
test "vector shift operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -651,7 +646,6 @@ test "vector shift operators" {
test "vector reduce operation" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -707,7 +701,7 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
- if (builtin.target.cpu.arch != .aarch64) {
+ if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) {
try testReduce(.Min, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, -386));
try testReduce(.Min, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 9));
}
@@ -725,7 +719,7 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
- if (builtin.target.cpu.arch != .aarch64) {
+ if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) {
try testReduce(.Max, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, 1234567));
try testReduce(.Max, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 99999));
}
@@ -773,14 +767,14 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
- if (false) {
- try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
- try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan);
- try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan);
+ if (builtin.zig_backend != .stage2_llvm) {
+ try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, -1.9));
+ try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, -1.9));
+ try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, -1.9));
- try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
- try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan);
- try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan);
+ try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, 100.0));
+ try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, 100.0));
+ try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, 100.0));
}
try testReduce(.Mul, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
@@ -831,7 +825,6 @@ test "mask parameter of @shuffle is comptime scope" {
test "saturating add" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -863,7 +856,6 @@ test "saturating add" {
test "saturating subtraction" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -886,7 +878,6 @@ test "saturating subtraction" {
test "saturating multiplication" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -913,7 +904,6 @@ test "saturating multiplication" {
test "saturating shift-left" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1047,7 +1037,6 @@ test "@mulWithOverflow" {
}
test "@shlWithOverflow" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -1202,7 +1191,6 @@ test "zero multiplicand" {
test "@intCast to u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 8f6da78fb1bfc9d5e8b3d5affd33cf6a62f5e8c7 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 00:30:55 -0500
Subject: [PATCH 026/294] CBE: implement vector element pointers
---
src/codegen/c.zig | 10 ++--------
src/codegen/c/type.zig | 2 +-
test/behavior/vector.zig | 3 ---
3 files changed, 3 insertions(+), 12 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 5e92a6f76c..60f93311a4 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -17,12 +17,6 @@ const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
-const target_util = @import("../target.zig");
-const libcFloatPrefix = target_util.libcFloatPrefix;
-const libcFloatSuffix = target_util.libcFloatSuffix;
-const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev;
-const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev;
-
const BigIntLimb = std.math.big.Limb;
const BigInt = std.math.big.int;
@@ -3317,7 +3311,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", sizeof(");
try f.renderType(writer, src_ty);
try writer.writeAll("))");
- } else if (ptr_info.host_size != 0) {
+ } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
var host_pl = Type.Payload.Bits{
.base = .{ .tag = .int_unsigned },
.data = ptr_info.host_size * 8,
@@ -3647,7 +3641,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
if (src_val == .constant) {
try freeLocal(f, inst, array_src.new_local, 0);
}
- } else if (ptr_info.host_size != 0) {
+ } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
const host_bits = ptr_info.host_size * 8;
var host_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = host_bits };
const host_ty = Type.initPayload(&host_pl.base);
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index 313fcc130c..038f53f186 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -1465,7 +1465,7 @@ pub const CType = extern union {
.base = .{ .tag = .int_unsigned },
.data = info.host_size * 8,
};
- const pointee_ty = if (info.host_size > 0)
+ const pointee_ty = if (info.host_size > 0 and info.vector_index == .none)
Type.initPayload(&host_int_pl.base)
else
info.pointee_type;
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index e74bcdad86..42befa9c0f 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -1118,7 +1118,6 @@ test "byte vector initialized in inline function" {
}
test "byte vector initialized in inline function" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -1233,7 +1232,6 @@ test "load packed vector element" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var x: @Vector(2, u15) = .{ 1, 4 };
try expect((&x[0]).* == 1);
@@ -1246,7 +1244,6 @@ test "store packed vector element" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var v = @Vector(4, u1){ 1, 1, 1, 1 };
try expectEqual(@Vector(4, u1){ 1, 1, 1, 1 }, v);
From ba69ee488baec677d6e206eb0670240b1c2167a6 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 00:44:27 -0500
Subject: [PATCH 027/294] CBE: implement vector truncate
---
src/codegen/c.zig | 34 ++++++++++++++++++++++------------
test/behavior/truncate.zig | 1 -
2 files changed, 22 insertions(+), 13 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 60f93311a4..3fea7c2ef2 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -3465,34 +3465,40 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
- const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
+ const inst_scalar_ty = inst_ty.scalarType();
const target = f.object.dg.module.getTarget();
- const dest_int_info = inst_ty.intInfo(target);
+ const dest_int_info = inst_scalar_ty.intInfo(target);
const dest_bits = dest_int_info.bits;
const dest_c_bits = toCIntBits(dest_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
const operand_ty = f.air.typeOf(ty_op.operand);
- const operand_int_info = operand_ty.intInfo(target);
+ const scalar_ty = operand_ty.scalarType();
+ const scalar_int_info = scalar_ty.intInfo(target);
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
if (dest_c_bits < 64) {
try writer.writeByte('(');
- try f.renderType(writer, inst_ty);
+ try f.renderType(writer, inst_scalar_ty);
try writer.writeByte(')');
}
- const needs_lo = operand_int_info.bits > 64 and dest_bits <= 64;
+ const needs_lo = scalar_int_info.bits > 64 and dest_bits <= 64;
if (needs_lo) {
try writer.writeAll("zig_lo_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
}
if (dest_bits >= 8 and std.math.isPowerOfTwo(dest_bits)) {
try f.writeCValue(writer, operand, .Other);
+ try v.elem(f, writer);
} else switch (dest_int_info.signedness) {
.unsigned => {
var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
@@ -3502,15 +3508,16 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
- const mask_val = try inst_ty.maxInt(stack.get(), target);
+ const mask_val = try inst_scalar_ty.maxInt(stack.get(), target);
try writer.writeAll("zig_and_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
- try writer.print(", {x})", .{try f.fmtIntLiteral(operand_ty, mask_val)});
+ try v.elem(f, writer);
+ try writer.print(", {x})", .{try f.fmtIntLiteral(scalar_ty, mask_val)});
},
.signed => {
- const c_bits = toCIntBits(operand_int_info.bits) orelse
+ const c_bits = toCIntBits(scalar_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
var shift_pl = Value.Payload.U64{
.base = .{ .tag = .int_u64 },
@@ -3519,7 +3526,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
const shift_val = Value.initPayload(&shift_pl.base);
try writer.writeAll("zig_shr_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
if (c_bits == 128) {
try writer.print("(zig_bitcast_i{d}(", .{c_bits});
} else {
@@ -3532,6 +3539,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.print("(uint{d}_t)", .{c_bits});
}
try f.writeCValue(writer, operand, .FunctionArgument);
+ try v.elem(f, writer);
if (c_bits == 128) try writer.writeByte(')');
try writer.print(", {})", .{try f.fmtIntLiteral(Type.u8, shift_val)});
if (c_bits == 128) try writer.writeByte(')');
@@ -3541,6 +3549,8 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
if (needs_lo) try writer.writeByte(')');
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig
index c81abebe68..e70d33eea2 100644
--- a/test/behavior/truncate.zig
+++ b/test/behavior/truncate.zig
@@ -60,7 +60,6 @@ test "truncate on comptime integer" {
}
test "truncate on vectors" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
From aac47079026d0daf4d5acac08b7d0ad1150002d0 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 01:23:21 -0500
Subject: [PATCH 028/294] CBE: implement splat
---
src/codegen/c.zig | 33 ++++++++++++++++++++++++++++-----
test/behavior/vector.zig | 1 -
2 files changed, 28 insertions(+), 6 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 3fea7c2ef2..f5309918bf 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -438,6 +438,10 @@ pub const Function = struct {
return f.object.dg.renderType(w, t);
}
+ fn renderCType(f: *Function, w: anytype, t: CType.Index) !void {
+ return f.object.dg.renderCType(w, t);
+ }
+
fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, v: Vectorizer, src_ty: Type, location: ValueRenderLocation) !void {
return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src, .v = v } }, src_ty, location);
}
@@ -1576,9 +1580,12 @@ pub const DeclGen = struct {
/// | `renderType` | "uint8_t *" | "uint8_t *[10]" |
///
fn renderType(dg: *DeclGen, w: anytype, t: Type) error{ OutOfMemory, AnalysisFail }!void {
+ try dg.renderCType(w, try dg.typeToIndex(t, .complete));
+ }
+
+ fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void {
const store = &dg.ctypes.set;
const module = dg.module;
- const idx = try dg.typeToIndex(t, .complete);
_ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
}
@@ -6543,21 +6550,37 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{ty_op.operand});
return .none;
}
- const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_cty = try f.typeToIndex(inst_scalar_ty, .complete);
+ const need_memcpy = f.indexToCType(inst_scalar_cty).tag() == .array;
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
+ if (need_memcpy) try writer.writeAll("memcpy(&");
try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = ");
+ try v.elem(f, writer);
+ try writer.writeAll(if (need_memcpy) ", &" else " = ");
+ try f.writeCValue(writer, operand, .Other);
+ if (need_memcpy) {
+ try writer.writeAll(", sizeof(");
+ try f.renderCType(writer, inst_scalar_cty);
+ try writer.writeAll("))");
+ }
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
- _ = operand;
- return f.fail("TODO: C backend: implement airSplat", .{});
+ return local;
}
fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 42befa9c0f..5d569bd815 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -234,7 +234,6 @@ test "vector casts of sizes not divisible by 8" {
}
test "vector @splat" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 0b0298aff27a31a7f45828d96d95adfdde61a085 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 02:06:53 -0500
Subject: [PATCH 029/294] CBE: implement select and shuffle
---
src/codegen/c.zig | 79 +++++++++++++++++++++++++++++++++++++--
test/behavior/select.zig | 2 -
test/behavior/shuffle.zig | 2 -
test/behavior/vector.zig | 2 -
4 files changed, 75 insertions(+), 10 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index f5309918bf..5e64823a0d 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -6584,15 +6584,86 @@ fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
- return f.fail("TODO: C backend: implement airSelect", .{});
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
+ return .none;
+ }
+
+ const pred = try f.resolveInst(pl_op.operand);
+ const lhs = try f.resolveInst(extra.lhs);
+ const rhs = try f.resolveInst(extra.rhs);
+ try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
+
+ const inst_ty = f.air.typeOfIndex(inst);
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, pred, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" ? ");
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" : ");
+ try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
+ return local;
}
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
- return f.fail("TODO: C backend: implement airShuffle", .{});
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ extra.a, extra.b });
+ return .none;
+ }
+
+ const mask = f.air.values[extra.mask];
+ const lhs = try f.resolveInst(extra.a);
+ const rhs = try f.resolveInst(extra.b);
+
+ const module = f.object.dg.module;
+ const target = module.getTarget();
+ const inst_ty = f.air.typeOfIndex(inst);
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands
+ for (0..extra.mask_len) |index| {
+ var dst_pl = Value.Payload.U64{
+ .base = .{ .tag = .int_u64 },
+ .data = @intCast(u64, index),
+ };
+
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeByte('[');
+ try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&dst_pl.base), .Other);
+ try writer.writeAll("] = ");
+
+ var buf: Value.ElemValueBuffer = undefined;
+ const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(target);
+ var src_pl = Value.Payload.U64{
+ .base = .{ .tag = .int_u64 },
+ .data = @intCast(u64, mask_elem ^ mask_elem >> 63),
+ };
+
+ try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
+ try writer.writeByte('[');
+ try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&src_pl.base), .Other);
+ try writer.writeAll("];\n");
+ }
+
+ return local;
}
fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
diff --git a/test/behavior/select.zig b/test/behavior/select.zig
index d09683b67c..73d69c6530 100644
--- a/test/behavior/select.zig
+++ b/test/behavior/select.zig
@@ -4,7 +4,6 @@ const mem = std.mem;
const expect = std.testing.expect;
test "@select vectors" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -33,7 +32,6 @@ fn selectVectors() !void {
}
test "@select arrays" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/shuffle.zig b/test/behavior/shuffle.zig
index bcc4618aee..b591aee2e2 100644
--- a/test/behavior/shuffle.zig
+++ b/test/behavior/shuffle.zig
@@ -8,7 +8,6 @@ test "@shuffle int" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -50,7 +49,6 @@ test "@shuffle bool 1" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 5d569bd815..816bd6c23a 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -804,7 +804,6 @@ test "vector @reduce comptime" {
test "mask parameter of @shuffle is comptime scope" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1212,7 +1211,6 @@ test "modRem with zero divisor" {
test "array operands to shuffle are coerced to vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 33fa25ba4470bf000280a94f0376988b05918b75 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 02:35:32 -0500
Subject: [PATCH 030/294] CBE: ensure uniqueness of more internal identifiers
---
src/codegen/c.zig | 35 +++++++++++++----------------------
test/behavior/vector.zig | 1 -
2 files changed, 13 insertions(+), 23 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 5e64823a0d..f1761ed80d 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -1841,30 +1841,21 @@ pub const DeclGen = struct {
dg.module.markDeclAlive(decl);
if (dg.module.decl_exports.get(decl_index)) |exports| {
- return writer.writeAll(exports.items[export_index].options.name);
+ try writer.writeAll(exports.items[export_index].options.name);
} else if (decl.isExtern()) {
- return writer.writeAll(mem.sliceTo(decl.name, 0));
- } else if (dg.module.test_functions.get(decl_index)) |_| {
- const gpa = dg.gpa;
- const name = try decl.getFullyQualifiedName(dg.module);
- defer gpa.free(name);
- return writer.print("{}_{d}", .{ fmtIdent(name), @enumToInt(decl_index) });
+ try writer.writeAll(mem.sliceTo(decl.name, 0));
} else {
- const gpa = dg.gpa;
- const name = try decl.getFullyQualifiedName(dg.module);
- defer gpa.free(name);
-
- // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), expand
- // to 3x the length of its input
- if (name.len > 1365) {
- var hash = ident_hasher_init;
- hash.update(name);
- const ident_hash = hash.finalInt();
- try writer.writeAll("zig_D_");
- return std.fmt.formatIntValue(ident_hash, "x", .{}, writer);
- } else {
- return writer.print("{}", .{fmtIdent(name)});
- }
+ // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case),
+ // expand to 3x the length of its input, but let's cut it off at a much shorter limit.
+ var name: [100]u8 = undefined;
+ var name_stream = std.io.fixedBufferStream(&name);
+ decl.renderFullyQualifiedName(dg.module, name_stream.writer()) catch |err| switch (err) {
+ error.NoSpaceLeft => {},
+ };
+ try writer.print("{}__{d}", .{
+ fmtIdent(name_stream.getWritten()),
+ @enumToInt(decl_index),
+ });
}
}
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 816bd6c23a..0215572f8f 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -91,7 +91,6 @@ test "vector int operators" {
test "vector float operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
From 7352d461cff72d92b07cf2d2b7ee17714005b9cf Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 03:29:50 -0500
Subject: [PATCH 031/294] behavior: fix comptime issue and disable failing test
---
test/behavior/muladd.zig | 7 +++++++
test/behavior/shuffle.zig | 3 +--
test/behavior/vector.zig | 7 +++++++
3 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig
index 218edc5a2d..25ed3641b8 100644
--- a/test/behavior/muladd.zig
+++ b/test/behavior/muladd.zig
@@ -197,6 +197,13 @@ test "vector f128" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and
+ builtin.zig_backend == .stage2_c)
+ {
+ // https://github.com/ziglang/zig/issues/13876
+ return error.SkipZigTest;
+ }
+
comptime try vector128();
try vector128();
}
diff --git a/test/behavior/shuffle.zig b/test/behavior/shuffle.zig
index b591aee2e2..97223cc263 100644
--- a/test/behavior/shuffle.zig
+++ b/test/behavior/shuffle.zig
@@ -69,7 +69,6 @@ test "@shuffle bool 2" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm) {
@@ -81,7 +80,7 @@ test "@shuffle bool 2" {
fn doTheTest() !void {
var x: @Vector(3, bool) = [3]bool{ false, true, false };
var v: @Vector(2, bool) = [2]bool{ true, false };
- const mask: @Vector(4, i32) = [4]i32{ 0, ~@as(i32, 1), 1, 2 };
+ const mask = [4]i32{ 0, ~@as(i32, 1), 1, 2 };
var res = @shuffle(bool, x, v, mask);
try expect(mem.eql(bool, &@as([4]bool, res), &[4]bool{ false, false, true, false }));
}
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 0215572f8f..1d9d517a96 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -96,6 +96,13 @@ test "vector float operators" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and
+ builtin.zig_backend == .stage2_c)
+ {
+ // https://github.com/ziglang/zig/issues/13876
+ return error.SkipZigTest;
+ }
+
inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| {
const S = struct {
fn doTheTest() !void {
From 8ea1c1932e7bd869ec77a161da7876d171d4ef1d Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 04:25:04 -0500
Subject: [PATCH 032/294] behavior: disable failing tests
---
test/behavior/slice.zig | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index ed5e2a721d..6239de2d76 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -749,6 +749,11 @@ test "slice decays to many pointer" {
}
test "write through pointer to optional slice arg" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const S = struct {
fn bar(foo: *?[]const u8) !void {
foo.* = try baz();
From 1efd36cd5c9a1128ae702b081d60ee32f21bc258 Mon Sep 17 00:00:00 2001
From: Jacob Young
Date: Sun, 5 Mar 2023 06:32:23 -0500
Subject: [PATCH 033/294] CBE: fix reduce of emulated integers
---
src/codegen/c.zig | 46 +++++++++++++++++++++++++++++-----------------
1 file changed, 29 insertions(+), 17 deletions(-)
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index f1761ed80d..3d059adc15 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -6672,33 +6672,43 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.air.typeOf(reduce.operand);
const writer = f.object.writer();
+ const use_operator = scalar_ty.bitSize(target) <= 64;
const op: union(enum) {
- float_op: []const u8,
- builtin: []const u8,
+ const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
+ float_op: Func,
+ builtin: Func,
infix: []const u8,
ternary: []const u8,
} = switch (reduce.operation) {
- .And => .{ .infix = " &= " },
- .Or => .{ .infix = " |= " },
- .Xor => .{ .infix = " ^= " },
+ .And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } },
+ .Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } },
+ .Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } },
.Min => switch (scalar_ty.zigTypeTag()) {
- .Int => .{ .ternary = " < " },
- .Float => .{ .float_op = "fmin" },
+ .Int => if (use_operator) .{ .ternary = " < " } else .{
+ .builtin = .{ .operation = "min" },
+ },
+ .Float => .{ .float_op = .{ .operation = "fmin" } },
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag()) {
- .Int => .{ .ternary = " > " },
- .Float => .{ .float_op = "fmax" },
+ .Int => if (use_operator) .{ .ternary = " > " } else .{
+ .builtin = .{ .operation = "max" },
+ },
+ .Float => .{ .float_op = .{ .operation = "fmax" } },
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag()) {
- .Int => .{ .infix = " += " },
- .Float => .{ .builtin = "add" },
+ .Int => if (use_operator) .{ .infix = " += " } else .{
+ .builtin = .{ .operation = "addw", .info = .bits },
+ },
+ .Float => .{ .builtin = .{ .operation = "add" } },
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag()) {
- .Int => .{ .infix = " *= " },
- .Float => .{ .builtin = "mul" },
+ .Int => if (use_operator) .{ .infix = " *= " } else .{
+ .builtin = .{ .operation = "mulw", .info = .bits },
+ },
+ .Float => .{ .builtin = .{ .operation = "mul" } },
else => unreachable,
},
};
@@ -6762,24 +6772,26 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, accum, .Other);
switch (op) {
- .float_op => |operation| {
+ .float_op => |func| {
try writer.writeAll(" = zig_libc_name_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
- try writer.print("({s})(", .{operation});
+ try writer.print("({s})(", .{func.operation});
try f.writeCValue(writer, accum, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, operand, .Other);
try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, func.info);
try writer.writeByte(')');
},
- .builtin => |operation| {
- try writer.print(" = zig_{s}_", .{operation});
+ .builtin => |func| {
+ try writer.print(" = zig_{s}_", .{func.operation});
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, accum, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, operand, .Other);
try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, func.info);
try writer.writeByte(')');
},
.infix => |ass| {
From a63134a4a56e8683aeee292b641b4e943cbfb999 Mon Sep 17 00:00:00 2001
From: jim price
Date: Sat, 4 Mar 2023 18:03:37 -0800
Subject: [PATCH 034/294] std.os: Add DeviceBusy as a possible write error
In Linux when writing to various files in the virtual file system,
for example /sys/fs/cgroup, if you write an invalid value to a file
you'll get errno 16.
This change allows for these specific cases to be caught instead of
being lumped together in UnexpectedError.
---
lib/std/os.zig | 5 +++++
src/link.zig | 1 +
2 files changed, 6 insertions(+)
diff --git a/lib/std/os.zig b/lib/std/os.zig
index fe664302a7..3a3433d819 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -1036,6 +1036,7 @@ pub const WriteError = error{
FileTooBig,
InputOutput,
NoSpaceLeft,
+ DeviceBusy,
/// In WASI, this error may occur when the file descriptor does
/// not hold the required rights to write to it.
@@ -1134,6 +1135,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
.PERM => return error.AccessDenied,
.PIPE => return error.BrokenPipe,
.CONNRESET => return error.ConnectionResetByPeer,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -1203,6 +1205,7 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
.PERM => return error.AccessDenied,
.PIPE => return error.BrokenPipe,
.CONNRESET => return error.ConnectionResetByPeer,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -1299,6 +1302,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
.NXIO => return error.Unseekable,
.SPIPE => return error.Unseekable,
.OVERFLOW => return error.Unseekable,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -1388,6 +1392,7 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz
.NXIO => return error.Unseekable,
.SPIPE => return error.Unseekable,
.OVERFLOW => return error.Unseekable,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
diff --git a/src/link.zig b/src/link.zig
index 4c4915441d..24cc0a3861 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -460,6 +460,7 @@ pub const File = struct {
CurrentWorkingDirectoryUnlinked,
LockViolation,
NetNameDeleted,
+ DeviceBusy,
};
/// Called from within the CodeGen to lower a local variable instantion as an unnamed
From 29c56a8aa74d1b1a19bece5ba5d738af1e3c9f6d Mon Sep 17 00:00:00 2001
From: jiacai2050
Date: Sat, 4 Mar 2023 10:47:25 +0800
Subject: [PATCH 035/294] fix package redeclaration when cache is not found
---
src/Package.zig | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/src/Package.zig b/src/Package.zig
index 2aa5e85294..ed93500980 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -432,6 +432,12 @@ fn fetchAndUnpack(
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
errdefer gpa.free(build_root);
+ var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => break :cached,
+ else => |e| return e,
+ };
+ errdefer pkg_dir.close();
+
try build_roots_source.writer().print(" pub const {s} = \"{}\";\n", .{
std.zig.fmtId(fqn), std.zig.fmtEscapes(build_root),
});
@@ -444,12 +450,6 @@ fn fetchAndUnpack(
return gop.value_ptr.*;
}
- var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
- error.FileNotFound => break :cached,
- else => |e| return e,
- };
- errdefer pkg_dir.close();
-
const ptr = try gpa.create(Package);
errdefer gpa.destroy(ptr);
From f1ae688d371f49fdbf65f952d655905c74871fdb Mon Sep 17 00:00:00 2001
From: r00ster91
Date: Sun, 5 Mar 2023 15:45:23 +0100
Subject: [PATCH 036/294] AstGen: ensure certain builtin functions return void
Fixes #14779
Co-authored-by: Veikka Tuominen
---
src/AstGen.zig | 32 +++++++++----------
...n_functions_returning_void_or_noreturn.zig | 32 +++++++++++++++++++
2 files changed, 48 insertions(+), 16 deletions(-)
create mode 100644 test/behavior/builtin_functions_returning_void_or_noreturn.zig
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 587b574a01..20f4fb6df3 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -8060,35 +8060,35 @@ fn builtinCall(
},
.fence => {
const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[0]);
- const result = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.set_float_mode => {
const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .float_mode_type } }, params[0]);
- const result = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.set_align_stack => {
const order = try expr(gz, scope, align_ri, params[0]);
- const result = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.set_cold => {
const order = try expr(gz, scope, ri, params[0]);
- const result = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.src => {
@@ -8373,14 +8373,14 @@ fn builtinCall(
},
.atomic_store => {
const int_type = try typeExpr(gz, scope, params[0]);
- const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
+ _ = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
// zig fmt: off
.ptr = try expr(gz, scope, .{ .rl = .none }, params[1]),
.operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]),
.ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[3]),
// zig fmt: on
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.mul_add => {
const float_type = try typeExpr(gz, scope, params[0]);
@@ -8421,20 +8421,20 @@ fn builtinCall(
return rvalue(gz, ri, result, node);
},
.memcpy => {
- const result = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
+ _ = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
.dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
.source = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_const_u8_type } }, params[1]),
.byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.memset => {
- const result = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
+ _ = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
.dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
.byte = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u8_type } }, params[1]),
.byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.shuffle => {
const result = try gz.addPlNode(.shuffle, node, Zir.Inst.Shuffle{
@@ -8475,12 +8475,12 @@ fn builtinCall(
.prefetch => {
const ptr = try expr(gz, scope, .{ .rl = .none }, params[0]);
const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .prefetch_options_type } }, params[1]);
- const result = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{
+ _ = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(node),
.lhs = ptr,
.rhs = options,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.c_va_arg => {
if (astgen.fn_block == null) {
diff --git a/test/behavior/builtin_functions_returning_void_or_noreturn.zig b/test/behavior/builtin_functions_returning_void_or_noreturn.zig
new file mode 100644
index 0000000000..072f5576cc
--- /dev/null
+++ b/test/behavior/builtin_functions_returning_void_or_noreturn.zig
@@ -0,0 +1,32 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const testing = std.testing;
+
+var x: u8 = 1;
+
+// This excludes builtin functions that return void or noreturn that cannot be tested.
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
+
+ var val: u8 = undefined;
+ try testing.expectEqual({}, @atomicStore(u8, &val, 0, .Unordered));
+ try testing.expectEqual(void, @TypeOf(@breakpoint()));
+ try testing.expectEqual({}, @export(x, .{ .name = "x" }));
+ try testing.expectEqual({}, @fence(.Acquire));
+ try testing.expectEqual({}, @memcpy(@intToPtr([*]u8, 1), @intToPtr([*]u8, 1), 0));
+ try testing.expectEqual({}, @memset(@intToPtr([*]u8, 1), undefined, 0));
+ try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {}));
+ try testing.expectEqual({}, @prefetch(&val, .{}));
+ try testing.expectEqual({}, @setAlignStack(16));
+ try testing.expectEqual({}, @setCold(true));
+ try testing.expectEqual({}, @setEvalBranchQuota(0));
+ try testing.expectEqual({}, @setFloatMode(.Optimized));
+ try testing.expectEqual({}, @setRuntimeSafety(true));
+ try testing.expectEqual(noreturn, @TypeOf(if (true) @trap() else {}));
+}
From 34a23db664e0fe50fb21c892f33b0aec8a7a2f7f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:21:57 -0700
Subject: [PATCH 037/294] zig.h: lower trap to SIGTRAP instead of SIGILL
---
lib/zig.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/zig.h b/lib/zig.h
index 22a9dbbb9e..65fb21f99a 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -193,7 +193,7 @@ typedef char bool;
#elif defined(__i386__) || defined(__x86_64__)
#define zig_trap() __asm__ volatile("ud2");
#else
-#define zig_trap() raise(SIGILL)
+#define zig_trap() raise(SIGTRAP)
#endif
#if zig_has_builtin(debugtrap)
From fb04ff45cd1b4eca5c56e0295bbbe961557ef820 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:22:46 -0700
Subject: [PATCH 038/294] langref: small clarification to `@trap`
---
doc/langref.html.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index a413c3aab5..7044fe977f 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9403,7 +9403,7 @@ fn List(comptime T: type) type {
Unlike for {#syntax#}@breakpoint(){#endsyntax#}, execution does not continue after this point.
- This function is only valid within function scope.
+ Outside function scope, this builtin causes a compile error.
{#see_also|@breakpoint#}
{#header_close#}
From 48e72960a496edc86b231d45bfa39d618b6adfaf Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:48:31 -0700
Subject: [PATCH 039/294] llvm: fix lowering of `@trap`
It needed an unreachable instruction after it.
---
src/codegen/llvm.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index baeaeee58f..85a82f4eda 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -8261,6 +8261,7 @@ pub const FuncGen = struct {
_ = inst;
const llvm_fn = self.getIntrinsic("llvm.trap", &.{});
_ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, "");
+ _ = self.builder.buildUnreachable();
return null;
}
From c839c180ef1686794c039fc6d3c20a8716e87357 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 5 Mar 2023 12:46:12 -0700
Subject: [PATCH 040/294] stage2: add zig_backend to ZIR cache namespace
---
src/Module.zig | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/Module.zig b/src/Module.zig
index a2502d36d3..7ea69a0a2e 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -3528,6 +3528,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
const digest = hash: {
var path_hash: Cache.HashHelper = .{};
path_hash.addBytes(build_options.version);
+ path_hash.add(builtin.zig_backend);
if (!want_local_cache) {
path_hash.addOptionalBytes(file.pkg.root_src_directory.path);
}
From cdb9cc8f6bda4b4faa270278e3b67c4ef9246a84 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 4 Mar 2023 14:41:12 -0700
Subject: [PATCH 041/294] update zig1.wasm
---
stage1/zig.h | 2759 +++++++++++++++++++++++++++++++---------------
stage1/zig1.wasm | Bin 2408069 -> 2412111 bytes
2 files changed, 1858 insertions(+), 901 deletions(-)
diff --git a/stage1/zig.h b/stage1/zig.h
index 0756d9f731..65fb21f99a 100644
--- a/stage1/zig.h
+++ b/stage1/zig.h
@@ -1,8 +1,11 @@
#undef linux
+#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__
#define __STDC_WANT_IEC_60559_TYPES_EXT__
+#endif
#include
#include
+#include
#include
#include
@@ -34,6 +37,14 @@ typedef char bool;
#define zig_has_attribute(attribute) 0
#endif
+#if __LITTLE_ENDIAN__ || _MSC_VER
+#define zig_little_endian 1
+#define zig_big_endian 0
+#else
+#define zig_little_endian 0
+#define zig_big_endian 1
+#endif
+
#if __STDC_VERSION__ >= 201112L
#define zig_threadlocal _Thread_local
#elif defined(__GNUC__)
@@ -75,6 +86,32 @@ typedef char bool;
#define zig_cold
#endif
+#if zig_has_attribute(flatten)
+#define zig_maybe_flatten __attribute__((flatten))
+#else
+#define zig_maybe_flatten
+#endif
+
+#if zig_has_attribute(noinline)
+#define zig_never_inline __attribute__((noinline)) zig_maybe_flatten
+#elif defined(_MSC_VER)
+#define zig_never_inline __declspec(noinline) zig_maybe_flatten
+#else
+#define zig_never_inline zig_never_inline_unavailable
+#endif
+
+#if zig_has_attribute(not_tail_called)
+#define zig_never_tail __attribute__((not_tail_called)) zig_never_inline
+#else
+#define zig_never_tail zig_never_tail_unavailable
+#endif
+
+#if zig_has_attribute(always_inline)
+#define zig_always_tail __attribute__((musttail))
+#else
+#define zig_always_tail zig_always_tail_unavailable
+#endif
+
#if __STDC_VERSION__ >= 199901L
#define zig_restrict restrict
#elif defined(__GNUC__)
@@ -151,10 +188,16 @@ typedef char bool;
#define zig_export(sig, symbol, name) __asm(name " = " symbol)
#endif
+#if zig_has_builtin(trap)
+#define zig_trap() __builtin_trap()
+#elif defined(__i386__) || defined(__x86_64__)
+#define zig_trap() __asm__ volatile("ud2");
+#else
+#define zig_trap() raise(SIGTRAP)
+#endif
+
#if zig_has_builtin(debugtrap)
#define zig_breakpoint() __builtin_debugtrap()
-#elif zig_has_builtin(trap) || defined(zig_gnuc)
-#define zig_breakpoint() __builtin_trap()
#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
#define zig_breakpoint() __debugbreak()
#elif defined(__i386__) || defined(__x86_64__)
@@ -286,701 +329,656 @@ typedef char bool;
#endif
#if __STDC_VERSION__ >= 201112L
-#define zig_noreturn _Noreturn void
+#define zig_noreturn _Noreturn
#elif zig_has_attribute(noreturn) || defined(zig_gnuc)
-#define zig_noreturn __attribute__((noreturn)) void
+#define zig_noreturn __attribute__((noreturn))
#elif _MSC_VER
-#define zig_noreturn __declspec(noreturn) void
+#define zig_noreturn __declspec(noreturn)
#else
-#define zig_noreturn void
+#define zig_noreturn
#endif
#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T))
-typedef uintptr_t zig_usize;
-typedef intptr_t zig_isize;
-typedef signed short int zig_c_short;
-typedef unsigned short int zig_c_ushort;
-typedef signed int zig_c_int;
-typedef unsigned int zig_c_uint;
-typedef signed long int zig_c_long;
-typedef unsigned long int zig_c_ulong;
-typedef signed long long int zig_c_longlong;
-typedef unsigned long long int zig_c_ulonglong;
+#define zig_compiler_rt_abbrev_uint32_t si
+#define zig_compiler_rt_abbrev_int32_t si
+#define zig_compiler_rt_abbrev_uint64_t di
+#define zig_compiler_rt_abbrev_int64_t di
+#define zig_compiler_rt_abbrev_zig_u128 ti
+#define zig_compiler_rt_abbrev_zig_i128 ti
+#define zig_compiler_rt_abbrev_zig_f16 hf
+#define zig_compiler_rt_abbrev_zig_f32 sf
+#define zig_compiler_rt_abbrev_zig_f64 df
+#define zig_compiler_rt_abbrev_zig_f80 xf
+#define zig_compiler_rt_abbrev_zig_f128 tf
-typedef uint8_t zig_u8;
-typedef int8_t zig_i8;
-typedef uint16_t zig_u16;
-typedef int16_t zig_i16;
-typedef uint32_t zig_u32;
-typedef int32_t zig_i32;
-typedef uint64_t zig_u64;
-typedef int64_t zig_i64;
+zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t);
+zig_extern void *memset (void *, int, size_t);
-#define zig_as_u8(val) UINT8_C(val)
-#define zig_as_i8(val) INT8_C(val)
-#define zig_as_u16(val) UINT16_C(val)
-#define zig_as_i16(val) INT16_C(val)
-#define zig_as_u32(val) UINT32_C(val)
-#define zig_as_i32(val) INT32_C(val)
-#define zig_as_u64(val) UINT64_C(val)
-#define zig_as_i64(val) INT64_C(val)
+/* ===================== 8/16/32/64-bit Integer Support ===================== */
+
+#if __STDC_VERSION__ >= 199901L || _MSC_VER
+#include
+#else
+
+#if SCHAR_MIN == ~0x7F && SCHAR_MAX == 0x7F && UCHAR_MAX == 0xFF
+typedef unsigned char uint8_t;
+typedef signed char int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif SHRT_MIN == ~0x7F && SHRT_MAX == 0x7F && USHRT_MAX == 0xFF
+typedef unsigned short uint8_t;
+typedef signed short int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif INT_MIN == ~0x7F && INT_MAX == 0x7F && UINT_MAX == 0xFF
+typedef unsigned int uint8_t;
+typedef signed int int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif LONG_MIN == ~0x7F && LONG_MAX == 0x7F && ULONG_MAX == 0xFF
+typedef unsigned long uint8_t;
+typedef signed long int8_t;
+#define INT8_C(c) c##L
+#define UINT8_C(c) c##LU
+#elif LLONG_MIN == ~0x7F && LLONG_MAX == 0x7F && ULLONG_MAX == 0xFF
+typedef unsigned long long uint8_t;
+typedef signed long long int8_t;
+#define INT8_C(c) c##LL
+#define UINT8_C(c) c##LLU
+#endif
+#define INT8_MIN (~INT8_C(0x7F))
+#define INT8_MAX ( INT8_C(0x7F))
+#define UINT8_MAX ( INT8_C(0xFF))
+
+#if SCHAR_MIN == ~0x7FFF && SCHAR_MAX == 0x7FFF && UCHAR_MAX == 0xFFFF
+typedef unsigned char uint16_t;
+typedef signed char int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif SHRT_MIN == ~0x7FFF && SHRT_MAX == 0x7FFF && USHRT_MAX == 0xFFFF
+typedef unsigned short uint16_t;
+typedef signed short int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif INT_MIN == ~0x7FFF && INT_MAX == 0x7FFF && UINT_MAX == 0xFFFF
+typedef unsigned int uint16_t;
+typedef signed int int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif LONG_MIN == ~0x7FFF && LONG_MAX == 0x7FFF && ULONG_MAX == 0xFFFF
+typedef unsigned long uint16_t;
+typedef signed long int16_t;
+#define INT16_C(c) c##L
+#define UINT16_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFF && LLONG_MAX == 0x7FFF && ULLONG_MAX == 0xFFFF
+typedef unsigned long long uint16_t;
+typedef signed long long int16_t;
+#define INT16_C(c) c##LL
+#define UINT16_C(c) c##LLU
+#endif
+#define INT16_MIN (~INT16_C(0x7FFF))
+#define INT16_MAX ( INT16_C(0x7FFF))
+#define UINT16_MAX ( INT16_C(0xFFFF))
+
+#if SCHAR_MIN == ~0x7FFFFFFF && SCHAR_MAX == 0x7FFFFFFF && UCHAR_MAX == 0xFFFFFFFF
+typedef unsigned char uint32_t;
+typedef signed char int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif SHRT_MIN == ~0x7FFFFFFF && SHRT_MAX == 0x7FFFFFFF && USHRT_MAX == 0xFFFFFFFF
+typedef unsigned short uint32_t;
+typedef signed short int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif INT_MIN == ~0x7FFFFFFF && INT_MAX == 0x7FFFFFFF && UINT_MAX == 0xFFFFFFFF
+typedef unsigned int uint32_t;
+typedef signed int int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif LONG_MIN == ~0x7FFFFFFF && LONG_MAX == 0x7FFFFFFF && ULONG_MAX == 0xFFFFFFFF
+typedef unsigned long uint32_t;
+typedef signed long int32_t;
+#define INT32_C(c) c##L
+#define UINT32_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFFFFFF && LLONG_MAX == 0x7FFFFFFF && ULLONG_MAX == 0xFFFFFFFF
+typedef unsigned long long uint32_t;
+typedef signed long long int32_t;
+#define INT32_C(c) c##LL
+#define UINT32_C(c) c##LLU
+#endif
+#define INT32_MIN (~INT32_C(0x7FFFFFFF))
+#define INT32_MAX ( INT32_C(0x7FFFFFFF))
+#define UINT32_MAX ( INT32_C(0xFFFFFFFF))
+
+#if SCHAR_MIN == ~0x7FFFFFFFFFFFFFFF && SCHAR_MAX == 0x7FFFFFFFFFFFFFFF && UCHAR_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned char uint64_t;
+typedef signed char int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif SHRT_MIN == ~0x7FFFFFFFFFFFFFFF && SHRT_MAX == 0x7FFFFFFFFFFFFFFF && USHRT_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned short uint64_t;
+typedef signed short int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif INT_MIN == ~0x7FFFFFFFFFFFFFFF && INT_MAX == 0x7FFFFFFFFFFFFFFF && UINT_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned int uint64_t;
+typedef signed int int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif LONG_MIN == ~0x7FFFFFFFFFFFFFFF && LONG_MAX == 0x7FFFFFFFFFFFFFFF && ULONG_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned long uint64_t;
+typedef signed long int64_t;
+#define INT64_C(c) c##L
+#define UINT64_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFFFFFFFFFFFFFF && LLONG_MAX == 0x7FFFFFFFFFFFFFFF && ULLONG_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned long long uint64_t;
+typedef signed long long int64_t;
+#define INT64_C(c) c##LL
+#define UINT64_C(c) c##LLU
+#endif
+#define INT64_MIN (~INT64_C(0x7FFFFFFFFFFFFFFF))
+#define INT64_MAX ( INT64_C(0x7FFFFFFFFFFFFFFF))
+#define UINT64_MAX ( INT64_C(0xFFFFFFFFFFFFFFFF))
+
+typedef size_t uintptr_t;
+typedef ptrdiff_t intptr_t;
+
+#endif
-#define zig_minInt_u8 zig_as_u8(0)
-#define zig_maxInt_u8 UINT8_MAX
#define zig_minInt_i8 INT8_MIN
#define zig_maxInt_i8 INT8_MAX
-#define zig_minInt_u16 zig_as_u16(0)
-#define zig_maxInt_u16 UINT16_MAX
+#define zig_minInt_u8 UINT8_C(0)
+#define zig_maxInt_u8 UINT8_MAX
#define zig_minInt_i16 INT16_MIN
#define zig_maxInt_i16 INT16_MAX
-#define zig_minInt_u32 zig_as_u32(0)
-#define zig_maxInt_u32 UINT32_MAX
+#define zig_minInt_u16 UINT16_C(0)
+#define zig_maxInt_u16 UINT16_MAX
#define zig_minInt_i32 INT32_MIN
#define zig_maxInt_i32 INT32_MAX
-#define zig_minInt_u64 zig_as_u64(0)
-#define zig_maxInt_u64 UINT64_MAX
+#define zig_minInt_u32 UINT32_C(0)
+#define zig_maxInt_u32 UINT32_MAX
#define zig_minInt_i64 INT64_MIN
#define zig_maxInt_i64 INT64_MAX
+#define zig_minInt_u64 UINT64_C(0)
+#define zig_maxInt_u64 UINT64_MAX
-#define zig_compiler_rt_abbrev_u32 si
-#define zig_compiler_rt_abbrev_i32 si
-#define zig_compiler_rt_abbrev_u64 di
-#define zig_compiler_rt_abbrev_i64 di
-#define zig_compiler_rt_abbrev_u128 ti
-#define zig_compiler_rt_abbrev_i128 ti
-#define zig_compiler_rt_abbrev_f16 hf
-#define zig_compiler_rt_abbrev_f32 sf
-#define zig_compiler_rt_abbrev_f64 df
-#define zig_compiler_rt_abbrev_f80 xf
-#define zig_compiler_rt_abbrev_f128 tf
-
-zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, zig_usize);
-zig_extern void *memset (void *, int, zig_usize);
-
-/* ==================== 8/16/32/64-bit Integer Routines ===================== */
-
-#define zig_maxInt(Type, bits) zig_shr_##Type(zig_maxInt_##Type, (zig_bitSizeOf(zig_##Type) - bits))
-#define zig_expand_maxInt(Type, bits) zig_maxInt(Type, bits)
-#define zig_minInt(Type, bits) zig_not_##Type(zig_maxInt(Type, bits), bits)
-#define zig_expand_minInt(Type, bits) zig_minInt(Type, bits)
+#define zig_intLimit(s, w, limit, bits) zig_shr_##s##w(zig_##limit##Int_##s##w, w - (bits))
+#define zig_minInt_i(w, bits) zig_intLimit(i, w, min, bits)
+#define zig_maxInt_i(w, bits) zig_intLimit(i, w, max, bits)
+#define zig_minInt_u(w, bits) zig_intLimit(u, w, min, bits)
+#define zig_maxInt_u(w, bits) zig_intLimit(u, w, max, bits)
#define zig_int_operator(Type, RhsType, operation, operator) \
- static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##RhsType rhs) { \
+ static inline Type zig_##operation(Type lhs, RhsType rhs) { \
return lhs operator rhs; \
}
#define zig_int_basic_operator(Type, operation, operator) \
- zig_int_operator(Type, Type, operation, operator)
+ zig_int_operator(Type, Type, operation, operator)
#define zig_int_shift_operator(Type, operation, operator) \
- zig_int_operator(Type, u8, operation, operator)
+ zig_int_operator(Type, uint8_t, operation, operator)
#define zig_int_helpers(w) \
- zig_int_basic_operator(u##w, and, &) \
- zig_int_basic_operator(i##w, and, &) \
- zig_int_basic_operator(u##w, or, |) \
- zig_int_basic_operator(i##w, or, |) \
- zig_int_basic_operator(u##w, xor, ^) \
- zig_int_basic_operator(i##w, xor, ^) \
- zig_int_shift_operator(u##w, shl, <<) \
- zig_int_shift_operator(i##w, shl, <<) \
- zig_int_shift_operator(u##w, shr, >>) \
+ zig_int_basic_operator(uint##w##_t, and_u##w, &) \
+ zig_int_basic_operator( int##w##_t, and_i##w, &) \
+ zig_int_basic_operator(uint##w##_t, or_u##w, |) \
+ zig_int_basic_operator( int##w##_t, or_i##w, |) \
+ zig_int_basic_operator(uint##w##_t, xor_u##w, ^) \
+ zig_int_basic_operator( int##w##_t, xor_i##w, ^) \
+ zig_int_shift_operator(uint##w##_t, shl_u##w, <<) \
+ zig_int_shift_operator( int##w##_t, shl_i##w, <<) \
+ zig_int_shift_operator(uint##w##_t, shr_u##w, >>) \
\
- static inline zig_i##w zig_shr_i##w(zig_i##w lhs, zig_u8 rhs) { \
- zig_i##w sign_mask = lhs < zig_as_i##w(0) ? -zig_as_i##w(1) : zig_as_i##w(0); \
+ static inline int##w##_t zig_shr_i##w(int##w##_t lhs, uint8_t rhs) { \
+ int##w##_t sign_mask = lhs < INT##w##_C(0) ? -INT##w##_C(1) : INT##w##_C(0); \
return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \
} \
\
- static inline zig_u##w zig_not_u##w(zig_u##w val, zig_u8 bits) { \
- return val ^ zig_maxInt(u##w, bits); \
+ static inline uint##w##_t zig_not_u##w(uint##w##_t val, uint8_t bits) { \
+ return val ^ zig_maxInt_u(w, bits); \
} \
\
- static inline zig_i##w zig_not_i##w(zig_i##w val, zig_u8 bits) { \
+ static inline int##w##_t zig_not_i##w(int##w##_t val, uint8_t bits) { \
(void)bits; \
return ~val; \
} \
\
- static inline zig_u##w zig_wrap_u##w(zig_u##w val, zig_u8 bits) { \
- return val & zig_maxInt(u##w, bits); \
+ static inline uint##w##_t zig_wrap_u##w(uint##w##_t val, uint8_t bits) { \
+ return val & zig_maxInt_u(w, bits); \
} \
\
- static inline zig_i##w zig_wrap_i##w(zig_i##w val, zig_u8 bits) { \
- return (val & zig_as_u##w(1) << (bits - zig_as_u8(1))) != 0 \
- ? val | zig_minInt(i##w, bits) : val & zig_maxInt(i##w, bits); \
+ static inline int##w##_t zig_wrap_i##w(int##w##_t val, uint8_t bits) { \
+ return (val & UINT##w##_C(1) << (bits - UINT8_C(1))) != 0 \
+ ? val | zig_minInt_i(w, bits) : val & zig_maxInt_i(w, bits); \
} \
\
- zig_int_basic_operator(u##w, div_floor, /) \
+ zig_int_basic_operator(uint##w##_t, div_floor_u##w, /) \
\
- static inline zig_i##w zig_div_floor_i##w(zig_i##w lhs, zig_i##w rhs) { \
- return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < zig_as_i##w(0)); \
+ static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \
+ return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < INT##w##_C(0)); \
} \
\
- zig_int_basic_operator(u##w, mod, %) \
+ zig_int_basic_operator(uint##w##_t, mod_u##w, %) \
\
- static inline zig_i##w zig_mod_i##w(zig_i##w lhs, zig_i##w rhs) { \
- zig_i##w rem = lhs % rhs; \
- return rem + (((lhs ^ rhs) & rem) < zig_as_i##w(0) ? rhs : zig_as_i##w(0)); \
+ static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \
+ int##w##_t rem = lhs % rhs; \
+ return rem + (((lhs ^ rhs) & rem) < INT##w##_C(0) ? rhs : INT##w##_C(0)); \
} \
\
- static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_shlw_u##w(uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \
} \
\
- static inline zig_i##w zig_shlw_i##w(zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)zig_shl_u##w((zig_u##w)lhs, (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_shlw_i##w(int##w##_t lhs, uint8_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)zig_shl_u##w((uint##w##_t)lhs, (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_addw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_addw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs + rhs, bits); \
} \
\
- static inline zig_i##w zig_addw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs + (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_addw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs + (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_subw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_subw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs - rhs, bits); \
} \
\
- static inline zig_i##w zig_subw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs - (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_subw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs - (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_mulw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_mulw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs * rhs, bits); \
} \
\
- static inline zig_i##w zig_mulw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_mulw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs * (uint##w##_t)rhs), bits); \
}
zig_int_helpers(8)
zig_int_helpers(16)
zig_int_helpers(32)
zig_int_helpers(64)
-static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_addw_u32(lhs, rhs, bits);
return *res < lhs;
#endif
}
-static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __addosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __addosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_addw_u64(lhs, rhs, bits);
return *res < lhs;
#endif
}
-static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __addodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __addodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_subw_u32(lhs, rhs, bits);
return *res > lhs;
#endif
}
-static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __subosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __subosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_subw_u64(lhs, rhs, bits);
return *res > lhs;
#endif
}
-static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __subodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __subodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-
-static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-
-static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_mulw_u32(lhs, rhs, bits);
- return rhs != zig_as_u32(0) && lhs > zig_maxInt(u32, bits) / rhs;
+ return rhs != UINT32_C(0) && lhs > zig_maxInt_u(32, bits) / rhs;
#endif
}
-static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __mulosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __mulosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_mulw_u64(lhs, rhs, bits);
- return rhs != zig_as_u64(0) && lhs > zig_maxInt(u64, bits) / rhs;
+ return rhs != UINT64_C(0) && lhs > zig_maxInt_u(64, bits) / rhs;
#endif
}
-static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __mulodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __mulodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
#define zig_int_builtins(w) \
- static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_u##w(lhs, rhs, bits); \
- return lhs > zig_maxInt(u##w, bits) >> rhs; \
+ return lhs > zig_maxInt_u(w, bits) >> rhs; \
} \
\
- static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_i##w(int##w##_t *res, int##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_i##w(lhs, rhs, bits); \
- zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \
- return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \
+ int##w##_t mask = (int##w##_t)(UINT##w##_MAX << (bits - rhs - 1)); \
+ return (lhs & mask) != INT##w##_C(0) && (lhs & mask) != mask; \
} \
\
- static inline zig_u##w zig_shls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- if (rhs >= bits) return lhs != zig_as_u##w(0) ? zig_maxInt(u##w, bits) : lhs; \
- return zig_shlo_u##w(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \
+ return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_shls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
- if ((zig_u##w)rhs < (zig_u##w)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \
- return lhs < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
+ if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \
+ return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_adds_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_adds_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_adds_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \
- return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_subs_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_subs_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_subs_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_subs_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \
- return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_muls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_muls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_muls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_muls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \
- return (lhs ^ rhs) < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return (lhs ^ rhs) < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
}
zig_int_builtins(8)
zig_int_builtins(16)
@@ -988,89 +986,89 @@ zig_int_builtins(32)
zig_int_builtins(64)
#define zig_builtin8(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin8;
+typedef unsigned int zig_Builtin8;
#define zig_builtin16(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin16;
+typedef unsigned int zig_Builtin16;
#if INT_MIN <= INT32_MIN
#define zig_builtin32(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin32;
+typedef unsigned int zig_Builtin32;
#elif LONG_MIN <= INT32_MIN
#define zig_builtin32(name, val) __builtin_##name##l(val)
-typedef zig_c_ulong zig_Builtin32;
+typedef unsigned long zig_Builtin32;
#endif
#if INT_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin64;
+typedef unsigned int zig_Builtin64;
#elif LONG_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name##l(val)
-typedef zig_c_ulong zig_Builtin64;
+typedef unsigned long zig_Builtin64;
#elif LLONG_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name##ll(val)
-typedef zig_c_ulonglong zig_Builtin64;
+typedef unsigned long long zig_Builtin64;
#endif
-static inline zig_u8 zig_byte_swap_u8(zig_u8 val, zig_u8 bits) {
+static inline uint8_t zig_byte_swap_u8(uint8_t val, uint8_t bits) {
return zig_wrap_u8(val >> (8 - bits), bits);
}
-static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) {
- return zig_wrap_i8((zig_i8)zig_byte_swap_u8((zig_u8)val, bits), bits);
+static inline int8_t zig_byte_swap_i8(int8_t val, uint8_t bits) {
+ return zig_wrap_i8((int8_t)zig_byte_swap_u8((uint8_t)val, bits), bits);
}
-static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) {
- zig_u16 full_res;
+static inline uint16_t zig_byte_swap_u16(uint16_t val, uint8_t bits) {
+ uint16_t full_res;
#if zig_has_builtin(bswap16) || defined(zig_gnuc)
full_res = __builtin_bswap16(val);
#else
- full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0), 8) << 8 |
- (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 8), 8) >> 0;
+ full_res = (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 0), 8) << 8 |
+ (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 8), 8) >> 0;
#endif
return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) {
- return zig_wrap_i16((zig_i16)zig_byte_swap_u16((zig_u16)val, bits), bits);
+static inline int16_t zig_byte_swap_i16(int16_t val, uint8_t bits) {
+ return zig_wrap_i16((int16_t)zig_byte_swap_u16((uint16_t)val, bits), bits);
}
-static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) {
- zig_u32 full_res;
+static inline uint32_t zig_byte_swap_u32(uint32_t val, uint8_t bits) {
+ uint32_t full_res;
#if zig_has_builtin(bswap32) || defined(zig_gnuc)
full_res = __builtin_bswap32(val);
#else
- full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0), 16) << 16 |
- (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 16), 16) >> 0;
+ full_res = (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 0), 16) << 16 |
+ (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 16), 16) >> 0;
#endif
return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) {
- return zig_wrap_i32((zig_i32)zig_byte_swap_u32((zig_u32)val, bits), bits);
+static inline int32_t zig_byte_swap_i32(int32_t val, uint8_t bits) {
+ return zig_wrap_i32((int32_t)zig_byte_swap_u32((uint32_t)val, bits), bits);
}
-static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) {
- zig_u64 full_res;
+static inline uint64_t zig_byte_swap_u64(uint64_t val, uint8_t bits) {
+ uint64_t full_res;
#if zig_has_builtin(bswap64) || defined(zig_gnuc)
full_res = __builtin_bswap64(val);
#else
- full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0), 32) << 32 |
- (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 32), 32) >> 0;
+ full_res = (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 0), 32) << 32 |
+ (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 32), 32) >> 0;
#endif
return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline zig_i64 zig_byte_swap_i64(zig_i64 val, zig_u8 bits) {
- return zig_wrap_i64((zig_i64)zig_byte_swap_u64((zig_u64)val, bits), bits);
+static inline int64_t zig_byte_swap_i64(int64_t val, uint8_t bits) {
+ return zig_wrap_i64((int64_t)zig_byte_swap_u64((uint64_t)val, bits), bits);
}
-static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
- zig_u8 full_res;
+static inline uint8_t zig_bit_reverse_u8(uint8_t val, uint8_t bits) {
+ uint8_t full_res;
#if zig_has_builtin(bitreverse8)
full_res = __builtin_bitreverse8(val);
#else
- static zig_u8 const lut[0x10] = {
+ static uint8_t const lut[0x10] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf
};
@@ -1079,62 +1077,62 @@ static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
return zig_wrap_u8(full_res >> (8 - bits), bits);
}
-static inline zig_i8 zig_bit_reverse_i8(zig_i8 val, zig_u8 bits) {
- return zig_wrap_i8((zig_i8)zig_bit_reverse_u8((zig_u8)val, bits), bits);
+static inline int8_t zig_bit_reverse_i8(int8_t val, uint8_t bits) {
+ return zig_wrap_i8((int8_t)zig_bit_reverse_u8((uint8_t)val, bits), bits);
}
-static inline zig_u16 zig_bit_reverse_u16(zig_u16 val, zig_u8 bits) {
- zig_u16 full_res;
+static inline uint16_t zig_bit_reverse_u16(uint16_t val, uint8_t bits) {
+ uint16_t full_res;
#if zig_has_builtin(bitreverse16)
full_res = __builtin_bitreverse16(val);
#else
- full_res = (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 0), 8) << 8 |
- (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 8), 8) >> 0;
+ full_res = (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 0), 8) << 8 |
+ (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 8), 8) >> 0;
#endif
return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline zig_i16 zig_bit_reverse_i16(zig_i16 val, zig_u8 bits) {
- return zig_wrap_i16((zig_i16)zig_bit_reverse_u16((zig_u16)val, bits), bits);
+static inline int16_t zig_bit_reverse_i16(int16_t val, uint8_t bits) {
+ return zig_wrap_i16((int16_t)zig_bit_reverse_u16((uint16_t)val, bits), bits);
}
-static inline zig_u32 zig_bit_reverse_u32(zig_u32 val, zig_u8 bits) {
- zig_u32 full_res;
+static inline uint32_t zig_bit_reverse_u32(uint32_t val, uint8_t bits) {
+ uint32_t full_res;
#if zig_has_builtin(bitreverse32)
full_res = __builtin_bitreverse32(val);
#else
- full_res = (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 0), 16) << 16 |
- (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 16), 16) >> 0;
+ full_res = (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 0), 16) << 16 |
+ (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 16), 16) >> 0;
#endif
return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline zig_i32 zig_bit_reverse_i32(zig_i32 val, zig_u8 bits) {
- return zig_wrap_i32((zig_i32)zig_bit_reverse_u32((zig_u32)val, bits), bits);
+static inline int32_t zig_bit_reverse_i32(int32_t val, uint8_t bits) {
+ return zig_wrap_i32((int32_t)zig_bit_reverse_u32((uint32_t)val, bits), bits);
}
-static inline zig_u64 zig_bit_reverse_u64(zig_u64 val, zig_u8 bits) {
- zig_u64 full_res;
+static inline uint64_t zig_bit_reverse_u64(uint64_t val, uint8_t bits) {
+ uint64_t full_res;
#if zig_has_builtin(bitreverse64)
full_res = __builtin_bitreverse64(val);
#else
- full_res = (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 0), 32) << 32 |
- (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 32), 32) >> 0;
+ full_res = (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 0), 32) << 32 |
+ (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 32), 32) >> 0;
#endif
return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
- return zig_wrap_i64((zig_i64)zig_bit_reverse_u64((zig_u64)val, bits), bits);
+static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) {
+ return zig_wrap_i64((int64_t)zig_bit_reverse_u64((uint64_t)val, bits), bits);
}
#define zig_builtin_popcount_common(w) \
- static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_popcount_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_popcount_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_popcount_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(popcount) || defined(zig_gnuc)
#define zig_builtin_popcount(w) \
- static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \
(void)bits; \
return zig_builtin##w(popcount, val); \
} \
@@ -1142,12 +1140,12 @@ static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
zig_builtin_popcount_common(w)
#else
#define zig_builtin_popcount(w) \
- static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \
(void)bits; \
- zig_u##w temp = val - ((val >> 1) & (zig_maxInt_u##w / 3)); \
- temp = (temp & (zig_maxInt_u##w / 5)) + ((temp >> 2) & (zig_maxInt_u##w / 5)); \
- temp = (temp + (temp >> 4)) & (zig_maxInt_u##w / 17); \
- return temp * (zig_maxInt_u##w / 255) >> (w - 8); \
+ uint##w##_t temp = val - ((val >> 1) & (UINT##w##_MAX / 3)); \
+ temp = (temp & (UINT##w##_MAX / 5)) + ((temp >> 2) & (UINT##w##_MAX / 5)); \
+ temp = (temp + (temp >> 4)) & (UINT##w##_MAX / 17); \
+ return temp * (UINT##w##_MAX / 255) >> (w - 8); \
} \
\
zig_builtin_popcount_common(w)
@@ -1158,12 +1156,12 @@ zig_builtin_popcount(32)
zig_builtin_popcount(64)
#define zig_builtin_ctz_common(w) \
- static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_ctz_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_ctz_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_ctz_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(ctz) || defined(zig_gnuc)
#define zig_builtin_ctz(w) \
- static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \
if (val == 0) return bits; \
return zig_builtin##w(ctz, val); \
} \
@@ -1171,7 +1169,7 @@ zig_builtin_popcount(64)
zig_builtin_ctz_common(w)
#else
#define zig_builtin_ctz(w) \
- static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \
return zig_popcount_u##w(zig_not_u##w(val, bits) & zig_subw_u##w(val, 1, bits), bits); \
} \
\
@@ -1183,12 +1181,12 @@ zig_builtin_ctz(32)
zig_builtin_ctz(64)
#define zig_builtin_clz_common(w) \
- static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_clz_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_clz_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_clz_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(clz) || defined(zig_gnuc)
#define zig_builtin_clz(w) \
- static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \
if (val == 0) return bits; \
return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \
} \
@@ -1196,7 +1194,7 @@ zig_builtin_ctz(64)
zig_builtin_clz_common(w)
#else
#define zig_builtin_clz(w) \
- static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \
return zig_ctz_u##w(zig_bit_reverse_u##w(val, bits), bits); \
} \
\
@@ -1207,7 +1205,7 @@ zig_builtin_clz(16)
zig_builtin_clz(32)
zig_builtin_clz(64)
-/* ======================== 128-bit Integer Routines ======================== */
+/* ======================== 128-bit Integer Support ========================= */
#if !defined(zig_has_int128)
# if defined(__SIZEOF_INT128__)
@@ -1222,18 +1220,18 @@ zig_builtin_clz(64)
typedef unsigned __int128 zig_u128;
typedef signed __int128 zig_i128;
-#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
-#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo))
-#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
-#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
-#define zig_hi_u128(val) ((zig_u64)((val) >> 64))
-#define zig_lo_u128(val) ((zig_u64)((val) >> 0))
-#define zig_hi_i128(val) ((zig_i64)((val) >> 64))
-#define zig_lo_i128(val) ((zig_u64)((val) >> 0))
+#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
+#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo))
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_hi_u128(val) ((uint64_t)((val) >> 64))
+#define zig_lo_u128(val) ((uint64_t)((val) >> 0))
+#define zig_hi_i128(val) (( int64_t)((val) >> 64))
+#define zig_lo_i128(val) ((uint64_t)((val) >> 0))
#define zig_bitcast_u128(val) ((zig_u128)(val))
#define zig_bitcast_i128(val) ((zig_i128)(val))
#define zig_cmp_int128(Type) \
- static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (lhs > rhs) - (lhs < rhs); \
}
#define zig_bit_int128(Type, operation, operator) \
@@ -1243,32 +1241,32 @@ typedef signed __int128 zig_i128;
#else /* zig_has_int128 */
-#if __LITTLE_ENDIAN__ || _MSC_VER
-typedef struct { zig_align(16) zig_u64 lo; zig_u64 hi; } zig_u128;
-typedef struct { zig_align(16) zig_u64 lo; zig_i64 hi; } zig_i128;
+#if zig_little_endian
+typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128;
+typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128;
#else
-typedef struct { zig_align(16) zig_u64 hi; zig_u64 lo; } zig_u128;
-typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
+typedef struct { zig_align(16) uint64_t hi; uint64_t lo; } zig_u128;
+typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128;
#endif
-#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
-#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
+#define zig_make_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
+#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
-#if _MSC_VER
-#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#else
-#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
-#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
+#if _MSC_VER /* MSVC doesn't allow struct literals in constant expressions */
+#define zig_init_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#else /* But non-MSVC doesn't like the unprotected commas */
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#endif
#define zig_hi_u128(val) ((val).hi)
#define zig_lo_u128(val) ((val).lo)
#define zig_hi_i128(val) ((val).hi)
#define zig_lo_i128(val) ((val).lo)
-#define zig_bitcast_u128(val) zig_as_u128((zig_u64)(val).hi, (val).lo)
-#define zig_bitcast_i128(val) zig_as_i128((zig_i64)(val).hi, (val).lo)
+#define zig_bitcast_u128(val) zig_make_u128((uint64_t)(val).hi, (val).lo)
+#define zig_bitcast_i128(val) zig_make_i128(( int64_t)(val).hi, (val).lo)
#define zig_cmp_int128(Type) \
- static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (lhs.hi == rhs.hi) \
? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \
: (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \
@@ -1280,10 +1278,10 @@ typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
#endif /* zig_has_int128 */
-#define zig_minInt_u128 zig_as_u128(zig_minInt_u64, zig_minInt_u64)
-#define zig_maxInt_u128 zig_as_u128(zig_maxInt_u64, zig_maxInt_u64)
-#define zig_minInt_i128 zig_as_i128(zig_minInt_i64, zig_minInt_u64)
-#define zig_maxInt_i128 zig_as_i128(zig_maxInt_i64, zig_maxInt_u64)
+#define zig_minInt_u128 zig_make_u128(zig_minInt_u64, zig_minInt_u64)
+#define zig_maxInt_u128 zig_make_u128(zig_maxInt_u64, zig_maxInt_u64)
+#define zig_minInt_i128 zig_make_i128(zig_minInt_i64, zig_minInt_u64)
+#define zig_maxInt_i128 zig_make_i128(zig_maxInt_i64, zig_maxInt_u64)
zig_cmp_int128(u128)
zig_cmp_int128(i128)
@@ -1297,28 +1295,33 @@ zig_bit_int128(i128, or, |)
zig_bit_int128(u128, xor, ^)
zig_bit_int128(i128, xor, ^)
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs);
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs);
#if zig_has_int128
-static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
- return val ^ zig_maxInt(u128, bits);
+static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) {
+ return val ^ zig_maxInt_u(128, bits);
}
-static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) {
(void)bits;
return ~val;
}
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) {
return lhs >> rhs;
}
-static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) {
return lhs << rhs;
}
-static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
+static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) {
+ zig_i128 sign_mask = lhs < zig_make_i128(0, 0) ? -zig_make_i128(0, 1) : zig_make_i128(0, 0);
+ return ((lhs ^ sign_mask) >> rhs) ^ sign_mask;
+}
+
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) {
return lhs << rhs;
}
@@ -1363,40 +1366,46 @@ static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_as_i128(0, 0));
+ return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_make_i128(0, 0));
}
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
- return rem + (((lhs ^ rhs) & rem) < zig_as_i128(0, 0) ? rhs : zig_as_i128(0, 0));
+ return rem + (((lhs ^ rhs) & rem) < zig_make_i128(0, 0) ? rhs : zig_make_i128(0, 0));
}
#else /* zig_has_int128 */
-static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
- return (zig_u128){ .hi = zig_not_u64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
+static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) {
+ return (zig_u128){ .hi = zig_not_u64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) };
}
-static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
- return (zig_i128){ .hi = zig_not_i64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
+static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) {
+ return (zig_i128){ .hi = zig_not_i64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) };
}
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) };
- return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs };
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - UINT8_C(64)) };
+ return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (UINT8_C(64) - rhs) | lhs.lo >> rhs };
}
-static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
- return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 };
+ return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs };
}
-static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
- return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = zig_shr_i64(lhs.hi, 63), .lo = zig_shr_i64(lhs.hi, (rhs - UINT8_C(64))) };
+ return (zig_i128){ .hi = zig_shr_i64(lhs.hi, rhs), .lo = lhs.lo >> rhs | (uint64_t)lhs.hi << (UINT8_C(64) - rhs) };
+}
+
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 };
+ return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs };
}
static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) {
@@ -1424,14 +1433,14 @@ static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) {
}
zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs);
-static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_bitcast_u128(__multi3(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs)));
-}
-
static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) {
return __multi3(lhs, rhs);
}
+static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
+ return zig_bitcast_u128(zig_mul_i128(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs)));
+}
+
zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs);
static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) {
return __udivti3(lhs, rhs);
@@ -1454,11 +1463,11 @@ static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
- return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0)));
+ return zig_add_i128(rem, ((lhs.hi ^ rhs.hi) & rem.hi) < INT64_C(0) ? rhs : zig_make_i128(0, 0));
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0)));
+ return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_make_i128(0, 0)) < INT32_C(0)));
}
#endif /* zig_has_int128 */
@@ -1471,326 +1480,1265 @@ static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) {
}
static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs;
}
static inline zig_i128 zig_min_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_cmp_i128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_i128(lhs, rhs) < INT32_C(0) ? lhs : rhs;
}
static inline zig_u128 zig_max_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_cmp_u128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_u128(lhs, rhs) > INT32_C(0) ? lhs : rhs;
}
static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_cmp_i128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_i128(lhs, rhs) > INT32_C(0) ? lhs : rhs;
}
-static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) {
- zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0);
- return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask);
+static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) {
+ return zig_and_u128(val, zig_maxInt_u(128, bits));
}
-static inline zig_u128 zig_wrap_u128(zig_u128 val, zig_u8 bits) {
- return zig_and_u128(val, zig_maxInt(u128, bits));
+static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) {
+ if (bits > UINT8_C(64)) return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ int64_t lo = zig_wrap_i64((int64_t)zig_lo_i128(val), bits);
+ return zig_make_i128(zig_shr_i64(lo, 63), (uint64_t)lo);
}
-static inline zig_i128 zig_wrap_i128(zig_i128 val, zig_u8 bits) {
- return zig_as_i128(zig_wrap_i64(zig_hi_i128(val), bits - zig_as_u8(64)), zig_lo_i128(val));
-}
-
-static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) {
return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline zig_i128 zig_shlw_i128(zig_i128 lhs, uint8_t rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits);
}
-static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_add_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
#if zig_has_int128
-static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow)
zig_u128 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_addw_u128(lhs, rhs, bits);
return *res < lhs;
#endif
}
-zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow)
zig_i128 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
-static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow)
zig_u128 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_subw_u128(lhs, rhs, bits);
return *res > lhs;
#endif
}
-zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow)
zig_i128 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
-static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow)
zig_u128 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_mulw_u128(lhs, rhs, bits);
- return rhs != zig_as_u128(0, 0) && lhs > zig_maxInt(u128, bits) / rhs;
+ return rhs != zig_make_u128(0, 0) && lhs > zig_maxInt_u(128, bits) / rhs;
#endif
}
-zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow)
zig_i128 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
#else /* zig_has_int128 */
-static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) {
- return overflow ||
- zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) ||
- zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0);
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
+ uint64_t hi;
+ bool overflow = zig_addo_u64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_addo_u64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) {
- return overflow ||
- zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) ||
- zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0);
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int64_t hi;
+ bool overflow = zig_addo_i64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_addo_i64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
- zig_u128 full_res;
- bool overflow =
- zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
- zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
- *res = zig_wrap_u128(full_res, bits);
- return zig_overflow_u128(overflow, full_res, bits);
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
+ uint64_t hi;
+ bool overflow = zig_subo_u64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_subo_u64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
- zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
- *res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int64_t hi;
+ bool overflow = zig_subo_i64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_subo_i64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
- zig_u128 full_res;
- bool overflow =
- zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
- zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
- *res = zig_wrap_u128(full_res, bits);
- return zig_overflow_u128(overflow, full_res, bits);
-}
-
-zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
- zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
- *res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
-}
-
-static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
*res = zig_mulw_u128(lhs, rhs, bits);
- return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) &&
- zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+ return zig_cmp_u128(*res, zig_make_u128(0, 0)) != INT32_C(0) &&
+ zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0);
}
-zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
+zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
+ bool overflow = overflow_int != 0 ||
+ zig_cmp_i128(full_res, zig_minInt_i(128, bits)) < INT32_C(0) ||
+ zig_cmp_i128(full_res, zig_maxInt_i(128, bits)) > INT32_C(0);
*res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
+ return overflow;
}
#endif /* zig_has_int128 */
-static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, uint8_t rhs, uint8_t bits) {
*res = zig_shlw_u128(lhs, rhs, bits);
- return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+ return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0);
}
-static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8_t bits) {
*res = zig_shlw_i128(lhs, rhs, bits);
- zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
- return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) &&
- zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0);
+ zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - UINT8_C(1)));
+ return zig_cmp_i128(zig_and_i128(lhs, mask), zig_make_i128(0, 0)) != INT32_C(0) &&
+ zig_cmp_i128(zig_and_i128(lhs, mask), mask) != INT32_C(0);
}
-static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0))
- return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs;
-
-#if zig_has_int128
- return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res;
-#else
- return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res;
-#endif
+ if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0))
+ return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs;
+ return zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
- if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res;
- return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res;
+ return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+ return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_addo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt(u128, bits) : res;
+ return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt_u(128, bits) : res;
}
-static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_subo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+ return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) {
- if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits);
- if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64));
- return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64));
+static inline uint8_t zig_clz_u128(zig_u128 val, uint8_t bits) {
+ if (bits <= UINT8_C(64)) return zig_clz_u64(zig_lo_u128(val), bits);
+ if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - UINT8_C(64));
+ return zig_clz_u64(zig_lo_u128(val), UINT8_C(64)) + (bits - UINT8_C(64));
}
-static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_clz_i128(zig_i128 val, uint8_t bits) {
return zig_clz_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u8 zig_ctz_u128(zig_u128 val, zig_u8 bits) {
- if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), zig_as_u8(64));
- return zig_ctz_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + zig_as_u8(64);
+static inline uint8_t zig_ctz_u128(zig_u128 val, uint8_t bits) {
+ if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), UINT8_C(64));
+ return zig_ctz_u64(zig_hi_u128(val), bits - UINT8_C(64)) + UINT8_C(64);
}
-static inline zig_u8 zig_ctz_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_ctz_i128(zig_i128 val, uint8_t bits) {
return zig_ctz_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u8 zig_popcount_u128(zig_u128 val, zig_u8 bits) {
- return zig_popcount_u64(zig_hi_u128(val), bits - zig_as_u8(64)) +
- zig_popcount_u64(zig_lo_u128(val), zig_as_u8(64));
+static inline uint8_t zig_popcount_u128(zig_u128 val, uint8_t bits) {
+ return zig_popcount_u64(zig_hi_u128(val), bits - UINT8_C(64)) +
+ zig_popcount_u64(zig_lo_u128(val), UINT8_C(64));
}
-static inline zig_u8 zig_popcount_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_popcount_i128(zig_i128 val, uint8_t bits) {
return zig_popcount_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) {
+static inline zig_u128 zig_byte_swap_u128(zig_u128 val, uint8_t bits) {
zig_u128 full_res;
#if zig_has_builtin(bswap128)
full_res = __builtin_bswap128(val);
#else
- full_res = zig_as_u128(zig_byte_swap_u64(zig_lo_u128(val), zig_as_u8(64)),
- zig_byte_swap_u64(zig_hi_u128(val), zig_as_u8(64)));
+ full_res = zig_make_u128(zig_byte_swap_u64(zig_lo_u128(val), UINT8_C(64)),
+ zig_byte_swap_u64(zig_hi_u128(val), UINT8_C(64)));
#endif
- return zig_shr_u128(full_res, zig_as_u8(128) - bits);
+ return zig_shr_u128(full_res, UINT8_C(128) - bits);
}
-static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_byte_swap_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits));
}
-static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) {
- return zig_shr_u128(zig_as_u128(zig_bit_reverse_u64(zig_lo_u128(val), zig_as_u8(64)),
- zig_bit_reverse_u64(zig_hi_u128(val), zig_as_u8(64))),
- zig_as_u8(128) - bits);
+static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, uint8_t bits) {
+ return zig_shr_u128(zig_make_u128(zig_bit_reverse_u64(zig_lo_u128(val), UINT8_C(64)),
+ zig_bit_reverse_u64(zig_hi_u128(val), UINT8_C(64))),
+ UINT8_C(128) - bits);
}
-static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits));
}
+/* ========================== Big Integer Support =========================== */
+
+static inline uint16_t zig_int_bytes(uint16_t bits) {
+ uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
+ uint16_t alignment = ZIG_TARGET_MAX_INT_ALIGNMENT;
+ while (alignment / 2 >= bytes) alignment /= 2;
+ return (bytes + alignment - 1) / alignment * alignment;
+}
+
+static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ bool do_signed = is_signed;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ int32_t limb_cmp;
+
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_i128(lhs_limb, rhs_limb);
+ do_signed = false;
+ } else {
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_u128(lhs_limb, rhs_limb);
+ }
+
+ if (limb_cmp != 0) return limb_cmp;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return 0;
+}
+
+static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline bool zig_subo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline void zig_addw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_addo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline void zig_subw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_subo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t skip_bits = remaining_bytes * 8 - bits;
+ uint16_t total_lz = 0;
+ uint16_t limb_lz;
+ (void)is_signed;
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u128(val_limb, 128 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 128 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u64(val_limb, 64 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 64 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u32(val_limb, 32 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 32 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u16(val_limb, 16 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 16 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u8(val_limb, 8 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 8 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_lz;
+}
+
+static inline uint16_t zig_ctz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_tz = 0;
+ uint16_t limb_tz;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u128(val_limb, 128);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 128) return total_tz;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u64(val_limb, 64);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 64) return total_tz;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u32(val_limb, 32);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 32) return total_tz;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u16(val_limb, 16);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 16) return total_tz;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u8(val_limb, 8);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 8) return total_tz;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_tz;
+}
+
+static inline uint16_t zig_popcount_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_pc = 0;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u128(val_limb, 128);
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u64(val_limb, 64);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u32(val_limb, 32);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u16(val_limb, 16);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u8(val_limb, 8);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_pc;
+}
+
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
@@ -1810,252 +2758,253 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
#if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gnuc)
#define zig_has_float_builtins 1
-#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg)
-#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg)
-#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg)
-#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg)
-#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg)
-#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg)
+#define zig_make_special_f16(sign, name, arg, repr) sign zig_make_f16(__builtin_##name, )(arg)
+#define zig_make_special_f32(sign, name, arg, repr) sign zig_make_f32(__builtin_##name, )(arg)
+#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg)
+#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg)
+#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg)
#else
#define zig_has_float_builtins 0
-#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
-#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
-#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
-#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
-#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
-#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr)
+#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
+#define zig_make_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
+#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
+#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
+#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
#endif
#define zig_has_f16 1
#define zig_bitSizeOf_f16 16
+typedef int16_t zig_repr_f16;
#define zig_libc_name_f16(name) __##name##h
-#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr)
+#define zig_init_special_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
#if FLT_MANT_DIG == 11
typedef float zig_f16;
-#define zig_as_f16(fp, repr) fp##f
+#define zig_make_f16(fp, repr) fp##f
#elif DBL_MANT_DIG == 11
typedef double zig_f16;
-#define zig_as_f16(fp, repr) fp
+#define zig_make_f16(fp, repr) fp
#elif LDBL_MANT_DIG == 11
#define zig_bitSizeOf_c_longdouble 16
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f16 zig_repr_c_longdouble;
+#endif
typedef long double zig_f16;
-#define zig_as_f16(fp, repr) fp##l
+#define zig_make_f16(fp, repr) fp##l
#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc))
typedef _Float16 zig_f16;
-#define zig_as_f16(fp, repr) fp##f16
+#define zig_make_f16(fp, repr) fp##f16
#elif defined(__SIZEOF_FP16__)
typedef __fp16 zig_f16;
-#define zig_as_f16(fp, repr) fp##f16
+#define zig_make_f16(fp, repr) fp##f16
#else
#undef zig_has_f16
#define zig_has_f16 0
-#define zig_repr_f16 i16
-typedef zig_i16 zig_f16;
-#define zig_as_f16(fp, repr) repr
-#undef zig_as_special_f16
-#define zig_as_special_f16(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f16
-#define zig_as_special_constant_f16(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f16 16
+typedef int16_t zig_f16;
+#define zig_make_f16(fp, repr) repr
+#undef zig_make_special_f16
+#define zig_make_special_f16(sign, name, arg, repr) repr
+#undef zig_init_special_f16
+#define zig_init_special_f16(sign, name, arg, repr) repr
#endif
#define zig_has_f32 1
#define zig_bitSizeOf_f32 32
+typedef int32_t zig_repr_f32;
#define zig_libc_name_f32(name) name##f
#if _MSC_VER
-#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, )
+#define zig_init_special_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
#else
-#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr)
+#define zig_init_special_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
#endif
#if FLT_MANT_DIG == 24
typedef float zig_f32;
-#define zig_as_f32(fp, repr) fp##f
+#define zig_make_f32(fp, repr) fp##f
#elif DBL_MANT_DIG == 24
typedef double zig_f32;
-#define zig_as_f32(fp, repr) fp
+#define zig_make_f32(fp, repr) fp
#elif LDBL_MANT_DIG == 24
#define zig_bitSizeOf_c_longdouble 32
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f32 zig_repr_c_longdouble;
+#endif
typedef long double zig_f32;
-#define zig_as_f32(fp, repr) fp##l
+#define zig_make_f32(fp, repr) fp##l
#elif FLT32_MANT_DIG == 24
typedef _Float32 zig_f32;
-#define zig_as_f32(fp, repr) fp##f32
+#define zig_make_f32(fp, repr) fp##f32
#else
#undef zig_has_f32
#define zig_has_f32 0
-#define zig_repr_f32 i32
-typedef zig_i32 zig_f32;
-#define zig_as_f32(fp, repr) repr
-#undef zig_as_special_f32
-#define zig_as_special_f32(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f32
-#define zig_as_special_constant_f32(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f32 32
+typedef int32_t zig_f32;
+#define zig_make_f32(fp, repr) repr
+#undef zig_make_special_f32
+#define zig_make_special_f32(sign, name, arg, repr) repr
+#undef zig_init_special_f32
+#define zig_init_special_f32(sign, name, arg, repr) repr
#endif
#define zig_has_f64 1
#define zig_bitSizeOf_f64 64
+typedef int64_t zig_repr_f64;
#define zig_libc_name_f64(name) name
#if _MSC_VER
#ifdef ZIG_TARGET_ABI_MSVC
#define zig_bitSizeOf_c_longdouble 64
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
#endif
-#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, )
+#endif
+#define zig_init_special_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
#else /* _MSC_VER */
-#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr)
+#define zig_init_special_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
#endif /* _MSC_VER */
#if FLT_MANT_DIG == 53
typedef float zig_f64;
-#define zig_as_f64(fp, repr) fp##f
+#define zig_make_f64(fp, repr) fp##f
#elif DBL_MANT_DIG == 53
typedef double zig_f64;
-#define zig_as_f64(fp, repr) fp
+#define zig_make_f64(fp, repr) fp
#elif LDBL_MANT_DIG == 53
#define zig_bitSizeOf_c_longdouble 64
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
+#endif
typedef long double zig_f64;
-#define zig_as_f64(fp, repr) fp##l
+#define zig_make_f64(fp, repr) fp##l
#elif FLT64_MANT_DIG == 53
typedef _Float64 zig_f64;
-#define zig_as_f64(fp, repr) fp##f64
+#define zig_make_f64(fp, repr) fp##f64
#elif FLT32X_MANT_DIG == 53
typedef _Float32x zig_f64;
-#define zig_as_f64(fp, repr) fp##f32x
+#define zig_make_f64(fp, repr) fp##f32x
#else
#undef zig_has_f64
#define zig_has_f64 0
-#define zig_repr_f64 i64
-typedef zig_i64 zig_f64;
-#define zig_as_f64(fp, repr) repr
-#undef zig_as_special_f64
-#define zig_as_special_f64(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f64
-#define zig_as_special_constant_f64(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f64 64
+typedef int64_t zig_f64;
+#define zig_make_f64(fp, repr) repr
+#undef zig_make_special_f64
+#define zig_make_special_f64(sign, name, arg, repr) repr
+#undef zig_init_special_f64
+#define zig_init_special_f64(sign, name, arg, repr) repr
#endif
#define zig_has_f80 1
#define zig_bitSizeOf_f80 80
+typedef zig_i128 zig_repr_f80;
#define zig_libc_name_f80(name) __##name##x
-#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr)
+#define zig_init_special_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
#if FLT_MANT_DIG == 64
typedef float zig_f80;
-#define zig_as_f80(fp, repr) fp##f
+#define zig_make_f80(fp, repr) fp##f
#elif DBL_MANT_DIG == 64
typedef double zig_f80;
-#define zig_as_f80(fp, repr) fp
+#define zig_make_f80(fp, repr) fp
#elif LDBL_MANT_DIG == 64
#define zig_bitSizeOf_c_longdouble 80
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f80 zig_repr_c_longdouble;
+#endif
typedef long double zig_f80;
-#define zig_as_f80(fp, repr) fp##l
+#define zig_make_f80(fp, repr) fp##l
#elif FLT80_MANT_DIG == 64
typedef _Float80 zig_f80;
-#define zig_as_f80(fp, repr) fp##f80
+#define zig_make_f80(fp, repr) fp##f80
#elif FLT64X_MANT_DIG == 64
typedef _Float64x zig_f80;
-#define zig_as_f80(fp, repr) fp##f64x
+#define zig_make_f80(fp, repr) fp##f64x
#elif defined(__SIZEOF_FLOAT80__)
typedef __float80 zig_f80;
-#define zig_as_f80(fp, repr) fp##l
+#define zig_make_f80(fp, repr) fp##l
#else
#undef zig_has_f80
#define zig_has_f80 0
-#define zig_repr_f80 i128
+#define zig_bitSizeOf_repr_f80 128
typedef zig_i128 zig_f80;
-#define zig_as_f80(fp, repr) repr
-#undef zig_as_special_f80
-#define zig_as_special_f80(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f80
-#define zig_as_special_constant_f80(sign, name, arg, repr) repr
+#define zig_make_f80(fp, repr) repr
+#undef zig_make_special_f80
+#define zig_make_special_f80(sign, name, arg, repr) repr
+#undef zig_init_special_f80
+#define zig_init_special_f80(sign, name, arg, repr) repr
#endif
#define zig_has_f128 1
#define zig_bitSizeOf_f128 128
+typedef zig_i128 zig_repr_f128;
#define zig_libc_name_f128(name) name##q
-#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr)
+#define zig_init_special_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
#if FLT_MANT_DIG == 113
typedef float zig_f128;
-#define zig_as_f128(fp, repr) fp##f
+#define zig_make_f128(fp, repr) fp##f
#elif DBL_MANT_DIG == 113
typedef double zig_f128;
-#define zig_as_f128(fp, repr) fp
+#define zig_make_f128(fp, repr) fp
#elif LDBL_MANT_DIG == 113
#define zig_bitSizeOf_c_longdouble 128
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f128 zig_repr_c_longdouble;
+#endif
typedef long double zig_f128;
-#define zig_as_f128(fp, repr) fp##l
+#define zig_make_f128(fp, repr) fp##l
#elif FLT128_MANT_DIG == 113
typedef _Float128 zig_f128;
-#define zig_as_f128(fp, repr) fp##f128
+#define zig_make_f128(fp, repr) fp##f128
#elif FLT64X_MANT_DIG == 113
typedef _Float64x zig_f128;
-#define zig_as_f128(fp, repr) fp##f64x
+#define zig_make_f128(fp, repr) fp##f64x
#elif defined(__SIZEOF_FLOAT128__)
typedef __float128 zig_f128;
-#define zig_as_f128(fp, repr) fp##q
-#undef zig_as_special_f128
-#define zig_as_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg)
+#define zig_make_f128(fp, repr) fp##q
+#undef zig_make_special_f128
+#define zig_make_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg)
#else
#undef zig_has_f128
#define zig_has_f128 0
-#define zig_repr_f128 i128
+#define zig_bitSizeOf_repr_f128 128
typedef zig_i128 zig_f128;
-#define zig_as_f128(fp, repr) repr
-#undef zig_as_special_f128
-#define zig_as_special_f128(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f128
-#define zig_as_special_constant_f128(sign, name, arg, repr) repr
+#define zig_make_f128(fp, repr) repr
+#undef zig_make_special_f128
+#define zig_make_special_f128(sign, name, arg, repr) repr
+#undef zig_init_special_f128
+#define zig_init_special_f128(sign, name, arg, repr) repr
#endif
-#define zig_has_c_longdouble 1
-
-#ifdef ZIG_TARGET_ABI_MSVC
-#define zig_libc_name_c_longdouble(name) name
-#else
-#define zig_libc_name_c_longdouble(name) name##l
-#endif
-
-#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr)
#ifdef zig_bitSizeOf_c_longdouble
+#define zig_has_c_longdouble 1
#ifdef ZIG_TARGET_ABI_MSVC
-typedef double zig_c_longdouble;
#undef zig_bitSizeOf_c_longdouble
#define zig_bitSizeOf_c_longdouble 64
-#define zig_as_c_longdouble(fp, repr) fp
+typedef zig_f64 zig_c_longdouble;
+typedef zig_repr_f64 zig_repr_c_longdouble;
#else
typedef long double zig_c_longdouble;
-#define zig_as_c_longdouble(fp, repr) fp##l
#endif
#else /* zig_bitSizeOf_c_longdouble */
-#undef zig_has_c_longdouble
#define zig_has_c_longdouble 0
-#define zig_bitSizeOf_c_longdouble 80
-#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80
-#define zig_repr_c_longdouble i128
-typedef zig_i128 zig_c_longdouble;
-#define zig_as_c_longdouble(fp, repr) repr
-#undef zig_as_special_c_longdouble
-#define zig_as_special_c_longdouble(sign, name, arg, repr) repr
-#undef zig_as_special_constant_c_longdouble
-#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_c_longdouble 128
+typedef zig_f128 zig_c_longdouble;
+typedef zig_repr_f128 zig_repr_c_longdouble;
#endif /* zig_bitSizeOf_c_longdouble */
#if !zig_has_float_builtins
-#define zig_float_from_repr(Type, ReprType) \
- static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \
- return *((zig_##Type*)&repr); \
+#define zig_float_from_repr(Type) \
+ static inline zig_##Type zig_float_from_repr_##Type(zig_repr_##Type repr) { \
+ zig_##Type result; \
+ memcpy(&result, &repr, sizeof(result)); \
+ return result; \
}
-zig_float_from_repr(f16, u16)
-zig_float_from_repr(f32, u32)
-zig_float_from_repr(f64, u64)
-zig_float_from_repr(f80, u128)
-zig_float_from_repr(f128, u128)
-#if zig_bitSizeOf_c_longdouble == 80
-zig_float_from_repr(c_longdouble, u128)
-#else
-#define zig_expand_float_from_repr(Type, ReprType) zig_float_from_repr(Type, ReprType)
-zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_longdouble))
-#endif
+zig_float_from_repr(f16)
+zig_float_from_repr(f32)
+zig_float_from_repr(f64)
+zig_float_from_repr(f80)
+zig_float_from_repr(f128)
#endif
#define zig_cast_f16 (zig_f16)
@@ -2064,41 +3013,42 @@ zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_lo
#if _MSC_VER && !zig_has_f128
#define zig_cast_f80
-#define zig_cast_c_longdouble
#define zig_cast_f128
#else
#define zig_cast_f80 (zig_f80)
-#define zig_cast_c_longdouble (zig_c_longdouble)
#define zig_cast_f128 (zig_f128)
#endif
#define zig_convert_builtin(ResType, operation, ArgType, version) \
- zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(zig_##ArgType);
-zig_convert_builtin(f16, trunc, f32, 2)
-zig_convert_builtin(f16, trunc, f64, 2)
-zig_convert_builtin(f16, trunc, f80, 2)
-zig_convert_builtin(f16, trunc, f128, 2)
-zig_convert_builtin(f32, extend, f16, 2)
-zig_convert_builtin(f32, trunc, f64, 2)
-zig_convert_builtin(f32, trunc, f80, 2)
-zig_convert_builtin(f32, trunc, f128, 2)
-zig_convert_builtin(f64, extend, f16, 2)
-zig_convert_builtin(f64, extend, f32, 2)
-zig_convert_builtin(f64, trunc, f80, 2)
-zig_convert_builtin(f64, trunc, f128, 2)
-zig_convert_builtin(f80, extend, f16, 2)
-zig_convert_builtin(f80, extend, f32, 2)
-zig_convert_builtin(f80, extend, f64, 2)
-zig_convert_builtin(f80, trunc, f128, 2)
-zig_convert_builtin(f128, extend, f16, 2)
-zig_convert_builtin(f128, extend, f32, 2)
-zig_convert_builtin(f128, extend, f64, 2)
-zig_convert_builtin(f128, extend, f80, 2)
+ zig_extern ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
+ zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(ArgType);
+zig_convert_builtin(zig_f16, trunc, zig_f32, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f64, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f32, extend, zig_f16, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f64, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f64, extend, zig_f16, 2)
+zig_convert_builtin(zig_f64, extend, zig_f32, 2)
+zig_convert_builtin(zig_f64, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f64, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f80, extend, zig_f16, 2)
+zig_convert_builtin(zig_f80, extend, zig_f32, 2)
+zig_convert_builtin(zig_f80, extend, zig_f64, 2)
+zig_convert_builtin(zig_f80, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f128, extend, zig_f16, 2)
+zig_convert_builtin(zig_f128, extend, zig_f32, 2)
+zig_convert_builtin(zig_f128, extend, zig_f64, 2)
+zig_convert_builtin(zig_f128, extend, zig_f80, 2)
#define zig_float_negate_builtin_0(Type) \
static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \
- return zig_expand_concat(zig_xor_, zig_repr_##Type)(arg, zig_expand_minInt(zig_repr_##Type, zig_bitSizeOf_##Type)); \
+ return zig_expand_concat(zig_xor_i, zig_bitSizeOf_repr_##Type)( \
+ arg, \
+ zig_minInt_i(zig_bitSizeOf_repr_##Type, zig_bitSizeOf_##Type) \
+ ); \
}
#define zig_float_negate_builtin_1(Type) \
static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \
@@ -2106,28 +3056,28 @@ zig_convert_builtin(f128, extend, f80, 2)
}
#define zig_float_less_builtin_0(Type, operation) \
- zig_extern zig_i32 zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##Type), 2)(zig_##Type, zig_##Type); \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
- return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 2)(lhs, rhs); \
+ zig_extern int32_t zig_expand_concat(zig_expand_concat(__##operation, \
+ zig_compiler_rt_abbrev_zig_##Type), 2)(zig_##Type, zig_##Type); \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 2)(lhs, rhs); \
}
#define zig_float_less_builtin_1(Type, operation) \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (!(lhs <= rhs) - (lhs < rhs)); \
}
#define zig_float_greater_builtin_0(Type, operation) \
zig_float_less_builtin_0(Type, operation)
#define zig_float_greater_builtin_1(Type, operation) \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
return ((lhs > rhs) - !(lhs >= rhs)); \
}
#define zig_float_binary_builtin_0(Type, operation, operator) \
zig_extern zig_##Type zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##Type), 3)(zig_##Type, zig_##Type); \
+ zig_compiler_rt_abbrev_zig_##Type), 3)(zig_##Type, zig_##Type); \
static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
- return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 3)(lhs, rhs); \
+ return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 3)(lhs, rhs); \
}
#define zig_float_binary_builtin_1(Type, operation, operator) \
static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
@@ -2135,18 +3085,18 @@ zig_convert_builtin(f128, extend, f80, 2)
}
#define zig_float_builtins(Type) \
- zig_convert_builtin(i32, fix, Type, ) \
- zig_convert_builtin(u32, fixuns, Type, ) \
- zig_convert_builtin(i64, fix, Type, ) \
- zig_convert_builtin(u64, fixuns, Type, ) \
- zig_convert_builtin(i128, fix, Type, ) \
- zig_convert_builtin(u128, fixuns, Type, ) \
- zig_convert_builtin(Type, float, i32, ) \
- zig_convert_builtin(Type, floatun, u32, ) \
- zig_convert_builtin(Type, float, i64, ) \
- zig_convert_builtin(Type, floatun, u64, ) \
- zig_convert_builtin(Type, float, i128, ) \
- zig_convert_builtin(Type, floatun, u128, ) \
+ zig_convert_builtin( int32_t, fix, zig_##Type, ) \
+ zig_convert_builtin(uint32_t, fixuns, zig_##Type, ) \
+ zig_convert_builtin( int64_t, fix, zig_##Type, ) \
+ zig_convert_builtin(uint64_t, fixuns, zig_##Type, ) \
+ zig_convert_builtin(zig_i128, fix, zig_##Type, ) \
+ zig_convert_builtin(zig_u128, fixuns, zig_##Type, ) \
+ zig_convert_builtin(zig_##Type, float, int32_t, ) \
+ zig_convert_builtin(zig_##Type, floatun, uint32_t, ) \
+ zig_convert_builtin(zig_##Type, float, int64_t, ) \
+ zig_convert_builtin(zig_##Type, floatun, uint64_t, ) \
+ zig_convert_builtin(zig_##Type, float, zig_i128, ) \
+ zig_convert_builtin(zig_##Type, floatun, zig_u128, ) \
zig_expand_concat(zig_float_negate_builtin_, zig_has_##Type)(Type) \
zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, cmp) \
zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, ne) \
@@ -2194,155 +3144,162 @@ zig_float_builtins(f32)
zig_float_builtins(f64)
zig_float_builtins(f80)
zig_float_builtins(f128)
-zig_float_builtins(c_longdouble)
#if _MSC_VER && (_M_IX86 || _M_X64)
// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64
-#define zig_msvc_atomics(Type, suffix) \
- static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
- zig_##Type comparand = *expected; \
- zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
+#define zig_msvc_atomics(ZigType, Type, suffix) \
+ static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \
+ Type comparand = *expected; \
+ Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
bool exchanged = initial == comparand; \
if (!exchanged) { \
*expected = initial; \
} \
return exchanged; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_xchg_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedExchange##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_add_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedExchangeAdd##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_sub_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = prev - value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_or_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedOr##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_xor_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedXor##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_and_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedAnd##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_nand_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = ~(prev & value); \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_min_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = value < prev ? value : prev; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_max_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = value > prev ? value : prev; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \
_InterlockedExchange##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \
+ static inline Type zig_msvc_atomic_load_##ZigType(Type volatile* obj) { \
return _InterlockedOr##suffix(obj, 0); \
}
-zig_msvc_atomics(u8, 8)
-zig_msvc_atomics(i8, 8)
-zig_msvc_atomics(u16, 16)
-zig_msvc_atomics(i16, 16)
-zig_msvc_atomics(u32, )
-zig_msvc_atomics(i32, )
+zig_msvc_atomics( u8, uint8_t, 8)
+zig_msvc_atomics( i8, int8_t, 8)
+zig_msvc_atomics(u16, uint16_t, 16)
+zig_msvc_atomics(i16, int16_t, 16)
+zig_msvc_atomics(u32, uint32_t, )
+zig_msvc_atomics(i32, int32_t, )
#if _M_X64
-zig_msvc_atomics(u64, 64)
-zig_msvc_atomics(i64, 64)
+zig_msvc_atomics(u64, uint64_t, 64)
+zig_msvc_atomics(i64, int64_t, 64)
#endif
#define zig_msvc_flt_atomics(Type, ReprType, suffix) \
static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
- zig_##ReprType comparand = *((zig_##ReprType*)expected); \
- zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \
- bool exchanged = initial == comparand; \
- if (!exchanged) { \
- *expected = *((zig_##Type*)&initial); \
- } \
- return exchanged; \
+ ReprType exchange; \
+ ReprType comparand; \
+ ReprType initial; \
+ bool success; \
+ memcpy(&comparand, expected, sizeof(comparand)); \
+ memcpy(&exchange, &desired, sizeof(exchange)); \
+ initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, exchange, comparand); \
+ success = initial == comparand; \
+ if (!success) memcpy(expected, &initial, sizeof(*expected)); \
+ return success; \
} \
static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \
- return *((zig_##Type*)&initial); \
+ ReprType repr; \
+ ReprType initial; \
+ zig_##Type result; \
+ memcpy(&repr, &value, sizeof(repr)); \
+ initial = _InterlockedExchange##suffix((ReprType volatile*)obj, repr); \
+ memcpy(&result, &initial, sizeof(result)); \
+ return result; \
} \
static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- bool success = false; \
- zig_##ReprType new; \
- zig_##Type prev; \
- while (!success) { \
- prev = *obj; \
- new = prev + value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
- } \
- return prev; \
+ ReprType repr; \
+ zig_##Type expected; \
+ zig_##Type desired; \
+ repr = *(ReprType volatile*)obj; \
+ memcpy(&expected, &repr, sizeof(expected)); \
+ do { \
+ desired = expected + value; \
+ } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
+ return expected; \
} \
static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- bool success = false; \
- zig_##ReprType new; \
- zig_##Type prev; \
- while (!success) { \
- prev = *obj; \
- new = prev - value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
- } \
- return prev; \
+ ReprType repr; \
+ zig_##Type expected; \
+ zig_##Type desired; \
+ repr = *(ReprType volatile*)obj; \
+ memcpy(&expected, &repr, sizeof(expected)); \
+ do { \
+ desired = expected - value; \
+ } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
+ return expected; \
}
-zig_msvc_flt_atomics(f32, u32, )
+zig_msvc_flt_atomics(f32, uint32_t, )
#if _M_X64
-zig_msvc_flt_atomics(f64, u64, 64)
+zig_msvc_flt_atomics(f64, uint64_t, 64)
#endif
#if _M_IX86
static inline void zig_msvc_atomic_barrier() {
- zig_i32 barrier;
+ int32_t barrier;
__asm {
xchg barrier, eax
}
}
-static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) {
+static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, void* arg) {
return _InterlockedExchangePointer(obj, arg);
}
-static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) {
+static inline void zig_msvc_atomic_store_p32(void** obj, void* arg) {
_InterlockedExchangePointer(obj, arg);
}
@@ -2360,11 +3317,11 @@ static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desir
return exchanged;
}
#else /* _M_IX86 */
-static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) {
+static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, void* arg) {
return _InterlockedExchangePointer(obj, arg);
}
-static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) {
+static inline void zig_msvc_atomic_store_p64(void** obj, void* arg) {
_InterlockedExchangePointer(obj, arg);
}
@@ -2383,11 +3340,11 @@ static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desir
}
static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) {
- return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected);
+ return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (int64_t*)expected);
}
static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) {
- return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected);
+ return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (uint64_t*)expected);
}
#define zig_msvc_atomics_128xchg(Type) \
@@ -2429,7 +3386,7 @@ zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
-/* ========================= Special Case Intrinsics ========================= */
+/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
@@ -2459,8 +3416,8 @@ static inline void* zig_x86_windows_teb(void) {
#if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__)
-static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) {
- zig_u32 cpu_info[4];
+static inline void zig_x86_cpuid(uint32_t leaf_id, uint32_t subid, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) {
+ uint32_t cpu_info[4];
#if _MSC_VER
__cpuidex(cpu_info, leaf_id, subid);
#else
@@ -2472,12 +3429,12 @@ static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, z
*edx = cpu_info[3];
}
-static inline zig_u32 zig_x86_get_xcr0(void) {
+static inline uint32_t zig_x86_get_xcr0(void) {
#if _MSC_VER
- return (zig_u32)_xgetbv(0);
+ return (uint32_t)_xgetbv(0);
#else
- zig_u32 eax;
- zig_u32 edx;
+ uint32_t eax;
+ uint32_t edx;
__asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0));
return eax;
#endif
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index d7bf519b41a74966a3bc46b133fd9b34aa200f75..fa2b2efa03b4ed138a7f8c52e72648d828f2116a 100644
GIT binary patch
delta 933731
zcmZquJBji9oXL#!42i|Lj0_A6jLTWW5?Jd&fFXgYzMdh0xgJEaz(pAA6TlqCdax8@
zeF9@WV|@Y}NC8MCJA_fs025-YhZ+c0%8}y@)1bJ!2inO2)bb
z*7~|SkPKrjV{HOs4Py<&syYx=$1rh^90@KV%i>=143`FOE-7Enxuj
zCyOu(Kx8J{Fk3M4fYeOxVdi3F2eZ~O3qxgZGV_92vXfa@qQNZj$>l7(j9g%Kb6CU}
z)xfM%EZmGTVAfj}98MZczAeNn3&mEnV6XvSy>r**w|Q@m>5}kI9S+t7#SHEnc0|_IG7oQ*qFJQ
zm{|pwSlGl^n3))Pn7LVaKokozBM&1JI~yw#BNGc7J1a92BO4r}HcMtk
z7G@?^Hby2!CT2zk1~x`EW=0kcW_C7aMkZz^9u^)39%e=cHU>s^PG$xsHbyoUCT12!
zRyJlvW;RAnP7Zb!b|$dAjFS(%s^nVA_`7#ZssnZO;28QAR5!CI&V(HYP?!1{M}JCJqh;CT0P4c6LT~4n`IhL3Tz)Mm9DUMiw@Z
z(^!}p*}2(SSh(0&xVTx^nOGT_m>3v%cvx828QFMP*jag)8R{8%*qJz)SlAd@SeTgD
zLD9g@&A`db!NkJC19B@91TcdsMzB>Nf?1rIkx871kx__=kxPw@g@uiUg@J{Ik&%s&
zo0*%Lm7AHBk%@dJ(g(5}gxNsGGlKMjT*J=5!otJ=5@!MvOrTW529g2s
zp+c<8EP^ad;LzvbU|{E9V`OFonaRS<&CSgy$XqYTEXd5xC@8@KN*SzdTnwO?VP$7w
zW@2VyW@lw(W@Bbz;{iFHg@=Wije~=Mk&%UwiG_=eor{H$jfstom5~{gcv;z**zK5@
znVI-yLCKVfm5GfN6mzU>Y@me82#QiJ9!5bXW+pZ!W_Bh}I$&n3XOUxMWM&a!1ceL}
z6AK$R6Udh=tW3;|ObooNpd`c&_5w2_I|m07ix4|IyBG&2ClebtgFT}V69We)DDoK@
z8JJjD1Q}Tw8QB?`+1VLE4&`9rVBlb8Vq#-t5o7@cE)z2c11lpNGYcC#J3B}fdp$cN
z6AL>N6FUPlJ0l|-Hw!Bx6FUndFAF1!023o9-7u=NFtf1mvakt&;+O>-(@c!)ER4*I
z>>8R9jC_1-Z0f9BtlaD@>_QA|EFfc9xWF;U0S2rfyBV1o+1Nk{nuQINGMJcH7(wC4
zSkK7F!wzDDQVJUr4-*?38ynaXMkY`qU}F_xWEKPkrH~L48zVa-3pXg`uyKOIo)MHK
zL7AO_nT?T&je&ugg@Kuyor!^ohl7EEfrF8WgN>7ki`Y9|3@j`x
z46F>itn6&8tPJv6Ow5k#46GbXEDTH>3@l8{ObjfX>}(9Iyu8c;tc(Io%xnxCTpS!s
z?A#n23|#CSP7EAu%;3ZWavB2+)Pot~Vq#*fybP?IoSdA@?3@am3=E*4W9DMzX{zU}t7xWZ-9FX5i%q83j_zz`()44C1jf
zg8>5rgCsix11mEt2Lmg!qBFCk3j;F)D+8z~tLI>0=jG>TW#eFAVq{=oW?*LEWM*b!
z;9zH9;9zFvVPRxr;9%n5=4NJPU}j}wW@F=IWMpFCXXRjGWa40E;@|+0?CczzoUH83
zte_%|0h9um*_c74I6Dg?8#^N?Z?Utmv#_x+u`#mpU43g}e3~X#HtZZyd>?}-dj3BqNf$|+2BLkDGA}F{RAmPuzz`(@K
z%^=Cb&&tdS4n9yhDZtIl%)}+Y$<4vR&CJZkz>Ek8MrIZkCKet6E;c4M9%d#61~vu;
z23AfM21X7hCN?H+enzm%K#|DA%E-XP3W^9eW+r9^4hB$d#O%hz%+Acp%v8_7#>UJH
zDmGZTSeaRwdHI=`m?YVlm<5?Zl@7gOVTvGbbYh3mYo~12ZcV3#j~I
zVPRzg_#0g5r91M&CER4(y0t^DI
zY-|kNEDRhhpqgHmgVT$ViHn1sgB^tG75P~}1qvfGCohu@3j;Gh2PE$?FflVSGI4RS
zGBJy@aWf0Cad2@lF)%YQurPrXFmo_)urV-jGcz!;u!Blj4h9Z(4rUG}1_l-e4kk86
z22jBYDtVb$SvWWtSXfy>$y1jZ5)+KvER3Ma8dUzVGqQrRFEbl6$PK)lyqutX%?z&D
zSee+FnVFbCwG|r^3mY3FsMuv>WaD6B=VfDH1{K&WoUGi;Y)ovRgu%|t0-QnGIOvnb1-wWaxyS6GP1C+vU0K2vvPq-HWn5ZB}n37W8ne2
z5u|_x1e94A^%$k}7+F+!c!X6|`Fwo&G&EKC{PcDG_4(u^a&U4o2yino^YSx`aWHVQ%IdIkvvM;taWgQ1vKSkSJSb~1
zg9=DC1}$*W#mT@dKY6`?jqYsLxvcY88-yE$n}m6oUP^GyInuzmPvjII6IVTBoic;t
z2gYnA1{DSc2FHfon07!t*ktfTM4Z`Bc
za=dk5a)78P)8RFf(?wG-G~N-VpKoXqa8v+^ICgBDJXt(Y>tJiAfFrwrf#d)G%!RxR
z3QUdyC5{56j?5)W%#J%wPZh}0U}8{UcHDD%GN*(R)9Kd9x)QoflSQUauwhkV+%dUc
zLW*(sk)S@L^=+nyf6P%s79tyOc8H!O7WDCQLI#CNGp)
zz{ADBt-#_akfp-FtvTH>T3B@YDQPzD$+^;|{BTp41&)ev>rZEt;*^;F|2i|@DcsYa*n(qW6S1w@&_3imrYJk
zT*(9XkK>NhlLeIAnJ#oro@lSn^rLgyQbr@Df1Q&*`b$l|p(M!E)HC_DlBw~L56uFO
zOaeNNjD@Z&4hpOaY>sbkHwiehL4V_E>B7jS1e&Yw9sSXo8NkwJk)gJ}b|
z5}4k?t;CqE#H7HWz~ni3qOy%hV^0UnSY8&esqb4RKU7v>yfm3jWgk>`2MMX=rHFvVEnikZw
z6+B8zAUCZ6o5rrdq`>YuxlrvQ+rf=6&7zYXOeEPJg7_c
zxnh%ltD7-7GE6qq5N3t=#am;7GQ_JdppJb5HXhpA=G^3o
zhSK#tySoG+);6#xF@ikY!lK0P_~lLuOam{Yo*c&w=O!;OZe{zwsY}4o1e9LDQO>k&^W=}Y
zQj<$f#2J@Q?lm!DoId%0i3!uwr<1>#q%-X}JvrXgo$Y@|r+}j}NRu|xQb>MT4azSv
zGE9(+@oq_rfFlpsm$1BY_U2?)vpH-_rlKaXBOt!YWJ5KX$>!$nAPXAI-PsztIt3hc
zAQr6e0%yprU6Vih%jiKZSn;|U7V6A8D)`dIb(d4gIMobrOPu8?nfEw1p
z2eG(^55wXXKCs2Jtzj1bfLiN+8iX)
zCi~dh2yeUDBH+ji4tQQQa0%8qdA6;&&<$w%b7XPn1(p9-&P{$|Tf)}3rAxrk4ivYL
z6nAI~IK{27;}`z@u2H~I5@M1O*rcY(r|fD#slvnFo#}fkxP+eH21*c&OQ#p|GD=St
za1hnJd=MV>yhaKz>yQI0~UWbA8u}PN@fJ5oafF8utEuSqTk?9Z=1CkTg%e=V>Rp_*}DqBL_GfV6J|3VY8uE2Q1Uyfa<)1q!ViG
z6@FMo*7$s=Nx+c{#h5vuEXIu@bos(&OP>x#kRuQJ!W?jG1u(mSBidyu7dgKR{OA8Un`wY)0e#P;;ipXs&d;zffA0Sas$*0O#$ZO=t
zsA#WHtq75N@S;V)kq_CS%a2WV2sMWW_6=zG+(8PT$rD3eJWoAo5^xlR>6^l+s0X&G
z_h>7^I7dds0I=w?w}>_a$ogP~MwnFFWVtY@r0-`N5ZV-Y6qOY^7!}>XdS8R`E3{Dq
z3IRy?e*}p_G88xj3>0z|Au_k7x4|1hAoT`dQyxs76(%cj@L>}|&XGaU87z1F;pFpS
zGWC1sG{Gt>aL}oOCC`FHVTGmwkD{nTz9K|s?$c&iNd#6e3Rb@wBnpc#r1_8yV
zj0%N{5a}Q5kb=#r0Vd6=z{c#@wSIDLxRgZir52PZ6a)}#vNU_PO
zCLabN
ziip?9kwGyU?7I1r1>$8Tw*GEJDup-|*}-zheouCdS76$AbaHyUirHsSPm)?Hw5u
z{lIG4@4)LNaBh@UuvIixaD~dgUNZS$yomI_B~3_0VveG$0*|5;So`J4AL2!2p6+Ue
z#~rAU@K?x&%3pXlSusIG<_W0PC5EuuM4?{MA8f#uqmu&@M1;>CZAMD}py>EIxgxVmQtq{X|laq>rhSw0>`
zONAswi1%O3n{1FQ;kou7Qc`hbR18t*W>ky>2gkgpNEM1BqoSQc4@e#qqW#EHV8y2=
zPfS)}+&g(&vaWF3{02}CHFJb#nzfU^C95z#nJk@R2d%C<1R+g>9zjIY09swQ2!g8X
z$*WU%>i2*WITs{cj1CP*vt_`pmnT-OD@Z`p*
z7zkF|{ErlyI!K9Xh+8;auI+M*(g_)jgo$Q?|DYI`IQhD#lsHmvW%BUCwHfiSM
zhEyrxi4&Uy9Hn7y1-Y?p^4e4t#_N+WrP@I&;Tce0&q4C_WUVwiSjP?&Wbj7X!^yR2
zilFBCf;3oLcn4Jf9%TKu(lVjOuYgLgL6(k9w-bf7)sc-q^>FfrbVX28dqJA0>IFfx
zzSj+?QFoAy;?3{@HSVJ`VEw%lP*rD;R6%3$2q+f8y;fL59ohDilb>g}LtXs?YRDfX
zLneDbE71m8CMRc=
zulQZ)wK)r*9d9dEW
z@4T~&T$4}cKZWZ00oC;fS=Whz7N)jMll=;f7C`=s=+aWT0?Q@eHVgW{5z%F-HW+
z8+j#}jJ=a@l)PrTbae93QY)q#b0;&FO=5euvP-~GXYzwnqLbH^`7`ZqoczDci0L_~
zG?SX_Ti(nz3p8Gc2xG>5lN$@ACo5FwF|C<9IiSLwZOt@LB!ErWRPl%D+U&_2D)pIO
z%%1$P(unEj?CB4<7^NobS0yvQncg{hW2GM3zv-OA`B7=@h}9TgN9v)8sQS7LPJP+)ZA@ZKC;SHi@!;PT{?
z4W`hL`Tz~7FUTP!(U{2iXL40zE>zJ3QAlu35k(44#wHu43700@H>E>W9e}Djf~@L5
zQ#Mrk0#y16vb1aS8c@>u*bGZr+%5N@S#kkX{SqYg&@4Gebh5(>{mIs?!Hh39_qT>I
zL9I9e^~@P$&-`y+!F1x{<;PCv$$$+tRXG#MPVA%lxw
zt|F2pXmC**k}RQviz}{8USKRU*}Cf@S!5PSWIM9R$$eb{`V0)#j0e0FSR5Dbhw^rScuQft7oG|%
zj$2{886e(){apg}nhXr)4BuT8SR8lm?-Bs_-*>hs}9pk%zF52ZznhLyX7sdmJ%~==^95y!&bqU0Q
zQUNG%SR5}Nf&>*(*gQtD2BsMiHm?q0gw6XSNUnc7dD?`1^}msYz+Sw36iE>{`mZC4
zfW7z}S>)#tsHebQym%DK14sW=7!T~lr!XGaiyw}53B)73`0^-*7k{Hz1JjJ~V$(4U
zFSegRa((NG$wrf02HKBES(_$)gogJIcSk)%4jyKB)`mqbQr4zx+`^)^p2d+4{zepO
zFn@!~uU)4Y>a!dN#NRM)`-4U_;6*tV(zL@={rbts=^UJIUn7fv^X+?N5pcf!k1R6r
z1T^J<^X*F*51en`!g%0(`xnLo=i82xDEYSWBu2iSd=e=WPCN;<29a-Poy5qu^G_pb
znR|M&@3bmtgP=nU(s1b!Lu$A@oaSJPG{wo{0Gbdw-6f#O%wW#EBbbMgTfs$v!*Ri7
zco!Y4WWu@0VbeqS8*W1u2`F-d8l5*c@0@PISic?F1>g|vKZE2ZaEMMu76FIoVq}r6
zr=ea2hiLB^C=VQ>6Jb1Xh%SWjz#+Qk3`&SDJ%bUV+fl56X-0(TzB3phdiWfY>kpor
ze12xF$i3!H0Y^rGbnt{4FKCg(pIws^W~qxj0!c6-Bp4k3?3z4#mLAjd=E7CCzk8YAHN
zS$ZDI1IN!w7!MpjJ7GL<{2V!tQW@_(j}brTQLKS!M#Rsx^BD1S`y!I-Z%z)M=Ou|A
zlwW#V1RPmFA-ZdxIw(W~=1WcfJ5NXfUE%HHpxFw?Kf9)YMotgdPc~d64^gj!p>__)
zP-d9g3kN12oTmU$D>eDXd`z?NfQJ29Vd{4rL{{&%KnTOpw|0V-B!C=UxB%|xEm#!(
zKh+HN-F<{YrG=O#Hk@n~aAX9T=(`YZ;sh)TZ>$H6j)4^JMkxHZ5Ytb`KyHMG!3>lz
z$XSG`{`(@(m^Um8Ho(;%ScDnApVvVB4~~lq@VL-joXFI0-TBS;7B}%4
zHnui^rf72^SyIWdXL-ATj*KNBzfIg2#`OKn@l88uhzn)}ctz8nU6UI(3A23!MfUH>vo;wr{n<77>?SpiB_PGD0=bjt
zY_yzgvN)6-Yotpn!)H)NW
zGC7`~Jael&%i1F^^``aX#x^7JVxy@3#;c^pb
z0I~qmibINvtG%0jx9?=udoX_jXyKR=uOmm9qO9X1D3=e!Rd##=@5L@{Z3Yo9uqA
zzIO#;SeTJpfl-0W@ik~l5<0ob3>KNxIoWNGgY=&!#EJq2DFr@7ZUtqq{Ld+q7w!?4
zTye4)lvu&DmJCt~N{*oAb6k!`?@zwCr;?FtviIJ@&|3R|BBa(nqKH&$YwTOV(Q~#{
zKvB~1_v8!v)ERpx|JWxdwDCGfh?~iQ$w5I%K?@Wzjr&C=+w2!+de=NTaK8vB?KbWg
zVFS4xl(6?pfJA2R*M?el18UhFWXm4xUjZG3n4tu5%N!+yTcD#5Q{zDeEaYb
zM#im^7aY}RzT40_`O;A@rp4zb%N^5Tygu3eSf&Wt0s(dfUdNl0j~p}R-?MD8fTONJ
zwE{@!uW2&&V^qSvFbyc#RlT3Yw%DU@CSVS7&;&Z1bbz+Kfyunp<>xdLsWrvC;gb3S5E$S(u3{t@-6|#AJZ4IF^Wyjtk7e-IjvK`@gju3=9C`O
zxoMN{oRVYgoBaD!IMcf?lS5AHfxXsx+K*|)>&aJ7YcQUf{O|M?{&yfBs0manFe&hY
z(#gX!@*LBbPX;w=swZ!_AUWCPxboyT7p%mf3ZedCf~lW!R-I}3^2K}4GJ+y;|2ake
zZ;%XIt-!0m2{Q5hIWhiUFfl#_ZjhMB`5dtF^RkoYpXU;TYJr-q4AZ&)yt)`vCtM0@
zz@PI5Oik}5>s<(7nz?*(!v!^_Rm&&aU#|o4G%lKgc<~oY*?ye?O{GsRydg6A-9=0O
zH6V}i3RElbC@_JX0gA3YAbpCHD=x)~L8YN~slrsfzNF5yXZd82%k%j6fehe8HQ*G;
zfI2a#6gJJLmQS94rHbwAThKx|uw#s`S~8tkJ~{8I8q;O4cD6GhlUXL~Tr>oU#9vI9
zy!M(XTgzEch5&21bIp?J>hj6r*VULFfVF^l`>&gVc<~p-CiC2gXZm<%a_)^l{zo9Q
zSW#X13am#ADuvCJua-}azbLl&;7vw$&{7*SCWgtgZplu5dP~|7dHw8EfhF01gb?2D1&NAq!nl<
zpc(fJa>o68I|f<;-GEBpL6U}+Kv$Hf>vJ*cO&-iaaR=5gam_3hba%yXFC`nBD<(>KaHT$fU>xDw`iGOS}a6mq`H0ztFBx{bO|r%)k&=
z&;)sG-(zq!bmOr+sBrlCSO@CR6HteqL3XJ96FbI@lk1+qDuNqOMR$-D9e6U4@y6t!
zr=9$pCV^VA0@Vr%3c?_}UOkm%+%cKsS&JA{2AU-pU<$50S7+QY`P4ICowuOY88cif
zxE1>0Btw=!jRK1Tj~P>gw*reJxPkZi;biCM)nZT$SWIO4{BZK$=dd}&32KlCn4*Rh
z0p>3vpx#~pm0p4@z4C<<+rtA50*+fYe|!-RTl#iF9b(cMb%aUK>~KVV`d>Rn{mHXl
zZ3ito3x4g+w!E!Vz!5xyq|I~*vcU2NWPzph8%NYCC;3e-)76&gvh0ldZ0|w&R~kG&
z)7uKJvI5_Ru|1g8CE&=8=#RddHTmw_EKmgM{*+=~|Dk!Z;KvdW_rOneru82tv%ZI=
z-VP0jqkA+Ej-KrGz8U1q^Y1l5)UWsMY*TtV1sttG7HBhF>Y28b(TM3r&*XrhV8gS&
zy0bTc3_l2-^VDWK(hc^+scx_*tiHiaXIwD3@mm?xV8#WL6~DvgX*;x`UeHGJLeqD!
zE$lxu1rZB2@kbL
zEt6mV^oJ(C4jrhKI!IOq{ptk8%H3b?>}x@WAA$PlOE1{)U%lW|vFx`Z)3YU$PySW~
zDgN-=o$X;iEWtqrJ9;21*xUbzfmF=@u^jm!88R@Ly=oozd4
zsW^B!u{P5W$WrltU6UXFke}@PZwAy;j6IW?{?~vw4gb{`dnPacUjy~h2VIDlzUU&n
zG}V@I5z~&-Qy((Av!Cee6mUEMbwfAA4HNq(Grp0ZzMhfs4$Kris402~Q>HIvVmu73
znI`B%1og+Q>p25*cn?v
z_9$||?Xl)y^kHi0ncm64sLA+y`eqKsH0X4)m;#4m$DQe_oQ%(*4*3Cf$RA{fsBtm2
zFwLJ0?v)<|Px5B6tp-i(s)4c=B>JyR2hHv-gBml#5aORXhDiRg=V1h;?&(K(7~Pp}
zwN3tC2A-j3YJ;p;>zUrj&Zxk8z%a|PW%@K;#zl;;r(5zdO3Qsm9s~MdpXCUhfBbBp
z<=A{_dJ!L^vfRsykYOr@oAwGUj$dKCryySArRkgb80|&+FCmR3bsu2Ja-4l>IvYQu
zwm|PCsP4ZY^)oL`cjRYGX8bjMDL>;4#*NeK1Q-jL{%)TBL4Yw;?KQG%{vTk-azt@o
z_dy1*tEvSVtr4#J2pW2YyXr59*LZ3A6+uRQR97tq>7IRQy0{QyB*Ilw9Tiv{XI`4#
zAjBBQ^mFs{2SVuXcn;Es;*QTCk>*R&vxSk}aT8<`+#OFryv9q@j|ekbqq?K}5F~Vj
zMHmwi?)VE*Kl9S`77@lcrjMJaKM_H9$9a%G6nESPi8NoDo-c~*j)P9fv2+&1YrHi5
zgebZ@K7(|F!b1$%9Zx~(XI`4#A;uWTG;z!H7h>q{*nSv2UJipqnlDW+7H5n>#LG%&
zWOr-@@ft5pe=E)?iy9ucLAqyOnl2~77>5Xtvmo^|FHP@}V2op$w`KYp33PYNKZ5R#
z)gY1POVi6Gk=-#7H1h$Em$@Kb=y@boCe{(tK(9Zdt}ig!@l|tb)7$Du@Sn|3eV3{TKth{CE#C
z0>%BmK_as+O;46XcK=(D#+jFe{{me_#vsAG-bl#TfylQ9;^-)82=ycHP6cmRp
z28lFZn%=2~?9fioQWdyEr-FEmm!|(xLwD$IknY)+rfaJ+Mk7k2wIKB~FHN7O&e(^M
zOglk4HLyf~sRp`ZpMwlXaqMT1Nb{xXn=}}M5n1gf$U3-VpMrSsto9Man+{s~11&24
zgN#7QYTY2&*_Wn=Xd?UJFG%CeOVjsjGPW=s-ZkA-i%}mnD{TkqLvh|=kVx~T>9e#L
zgPq|e*-FrwA-MClf_QM}9R%^tgS4SI={87Y_ND0t+Kj=7U_A>`JoD1@$=Zx9Ol`ZS
zOX{FIsU5UL2*pX0K_bnUrswG(NAFLNNpL5%f)+eAUYdSi2R(XMgLKcnG+jU!IcVmB
z)X%&$y;_&?B;(!bu6m3{jMJxIRAiKzK1q*JOa-M~a@-cJS#sGH+$uSvhwPx8w#dcJ
zQ4p{3(sWyWMrmP`fO%~TZYpK#BfIA=Ncqf5+pp;}axycW-9G)H5u>4C&yFt8pa{Hp
zzc+oIDWfXWgdK>|J=qvN$xR1Mq@V=gVvtDlrRfWdk=@$~>XgF+a4Lw`cxk$z33>qT
z2I-!CX}XgMvV+%x)X%&$eXR-O9;V4Vre~Wn8VO#*?%rES?tN#9?%w&J)nq8{T@4ax
zzBFCi4B5RCeUZa=E{NB7Y5E*9boU+x>7IRQ`dKq%_ihELpLuDzg*oFi!C9bc(;QK4
zuAP40oKcnW3M2u@TcA5}`vvq0|1e0T`O@?l3uGs*3_y0`RuHf8()6K-K+0QU~o*
zeK7)$_Dz?vW%OcN*)cuMma&uR+KcHQZ5dxeI~XU7p|%(!+49AXaSK$w!2}}TVuB=p
z$Q~{~11diUS$={8<29)M8Bp`*Aeq0t!IAMIH)>BT!k5vV>1^xtg}#hNOkcqxDBIur
zGL~?1v`lLeP~>oInVucasKN{4IkE{P^6+yzwoIQB&ZxoIGW}>cV>r{FUDM?v7&RrJ
zgW~B*T;TakUIqnK1u4fryOzgCFfuXCoi#l%it#+tk=xUaq8Y82j^3W07tNT=v|{7*
z%h8OJ7#p{z#V{5!GrgP&QYSV2b3EfQm{)8eK{3Y`5fs}mCor-zLKSpC74#q}nEoY^
z@f1}4hYiGlKQ;&hrr$_ntY(}#JtUcNKjY--A}NdsOm|whm!&ZBFiCVbLPtg!VP4@;
z0L>#$pOwa_$=E;rNE%}b5OV1?+2zc-hx_n0&3M6WUE{=7|R(SP2ZaVD)A;a
z779;i&SaECjR)mSMt8Oip!FI0)4^vaK-Ox22ezI!PrsPSsBXuo0-AdQ?}=7o&}0%Y
zX9h18&Qf3i9fiT5!NlPxkt1+`5yaSnW(n*E%?M7n%3}0nY?DWC5
zw04%o!Leoc6wp~AiYyAuprbVemU2u_$Y%@$hk*iXwi2rXivp{sv?42LvX9BZv1j*m
ze{+7N>2LEHrMP<`#wf4~ESf&uoL`m?F5t*mCa`GwLvw!N=>Y|ds)9%oSr9oM3w~J?
z1J)HV)-mpyu3N}x&e$|PwUDua@z?Zcg^cD*cXn)-FJiQ0tiJ}@JjW!^&IHConrf(FOK)+oL8VJHbMCuY%Tbz*K^T=71)$
z*ieKX>;=sY!G)L{8QIO4Tojlbe@(wr!Wh8R{%E>hDPsX+$MpH7jN(!qObSf;j3K;C
z+zPCq`c)qyB%~FYL2JeyOs_0sbYW};`u#tQ?9=~LFg7rro?ctY
z_@1$KdTtft1jd`w|5Y(6Gqz4ws%Fe*yg7YRHDfB%gge`RRx{3HVLY^bMg!v-R_+aF
zCJQ+3IWt*6;LvpcHpVE%wbNI%F`6;1o&KZ=I!iBO9`9OV1t!M}Oj!zS3Jk{6ANDXRF|M7ysFyL1aqV=$
zK1OrKwbMQOz`PxOU|r84ys&<-Twgy}?k|LAIswdEGJ(;7aqaY{6BzS2z~)b2$(sIl
z3Zv5W3B8PhjBBU&PXw#`HxaDPeiE3sZW5UH9>R;8%;?FucKV9RU^($AV7Z7XVBYa5
zU^&65U|z#iFz*zEXE}{Ah;i-oo@tDr5Ir-EQJwMB^e@vGL->EpoeWuX2s#_-!`$iN
z(-}W7?wsy1gVB(2?ewY{jFA{h9?f9XVLUROe=Rk+HK-GsVWt_msHT~&Q#)XV6)7zIZ?q<9=-FZ1)#O`QUc>eIW=3F|TK0OfDt>F3S~v&lO$3M^)1;Fg4)G-dC&
zt}+N(J=8?af50D$HPn71r(WhEV&)I6qz00{A&U6EFio?
z7r=7n+>V_GT0!Dw+>ZBGHGx>B+>We@%#IsxgKXe2f$-)-Og82Q)mDzDS2TcB8bNqn
zH^3?lx$D8!zB>Zu8gPROTgOM|z!vF4cpD&2(t~K+^c<{G7sA`Nsu5(U4mYTVbL_YY
zmeYptrY;7XqQ&jVp~&p`{4Q8d6C$_gGgwZ8+p(Tqk=b!7#6RlXj+~0jj=LU$eXR!J
zOkkV>`%%dLB^-7qr??k%>nHV#Mr)VD}0`{CV#@m?s3`HA9RM
zgy@?EiAe#7zII4t@k8WRL*)1%a{UlFUWh4gHiJ`6JrBffXCbl04N2=6<@1{Q8dK1F87RgmamR)KJ5
zLE?vr8&roo-h>#+2r;zn==24f7^M^2wsr|PDmXp{o$Vv-cytS>a1i0<24_e|7J+FZ
z+%p)NnChA8nCh4u6qp>jvmMJJTR4S4C&QIE?zlApvO1YLTM1O!33-DLt71`L1|3%Q
z$F4EGc?+W^pCkAboNOgF&=z3N>ASWt`Y^5H
zm@e4Qt_GqFwlcba=np#?l|l3#by3;rJGL^GF>P2p-C!G|bp4gZlLZ`AKzqA*xVROV
z9XYZbk%Ty*LO&3yIG{pvmcTW#Lxm0?gxH`$PY^<^P@#^caE&b7ju0!?AjFuV$}S*;
zn4m&m5JHSlp&84jb8lxR~)qP)C4K%8^k4v?`Lx
zaR=y-7AApqbnzb0*%|DZ;=4iWSuw@?Z$R3c=;myC)&N>SXoz7!+b;0=0T|*R?t%~c
zYoG4BmrPvm9b{
zWBj(=`w*i76XV9|IY$^X881!0c7(A8(&k|Ww|Q6{ITTnyZJyMljMJDHUrhgVobd#x
zk#p<>;~GZBOVf)_G3qhC*uLl#qZT9M<>_ZnGpaFOp8n-DV=Ci|=}~7G|A}1zt?A;(
za{RJ?f&l!i$~V*V&N6y3UYWk*ETgqJOv#palMzbRyqnH-j?shh>h$1qjP{IIr%yh|
zxQ+4Rbg%P_8BDNjeCIr)tRQMOmR4k)zIO|w#&n|#j1o+1c%}<(Wt3oJ1@%SOOwYQ&
zXbcitpe`!G%qp;E`mPI%s~EYar(R?{sdN~0pb4|%UJ#|@*w@}EAaF^9yM>LBsos%C
zk;!p&>-2z2j5F&0Eo%}`Waa_agG`QRKqVwIk0z*cV{+^QRm#jf8laMu$?;f2D@dg}
zgg509n5V|=$gIfZIBRtyNKO@0n=&~bg{V{k6_8AhZ`U+{P;fQK1U{)yk(oypVrUn{6d8!2Yj=Y!1XqPj
zj^EaSdEly$$??G-utOz5wG5ME$G!SykZU9$#`LcTD;0-G-h%j048r>iY9TO#szWwK
zCdcjzVBd>C^zEGm_LDG#H|K5}$V?$fSZszkMi633AH<>H>X6BC-s;ABkWzj~aL<1T
zww@0X7T5QKt>A@(#lII|hk~m^CdY~Q!8~q=%4Uc@E{GN9Afd(y;Z1?);{dhMm>e&i
z0PAB1wN{uMx8H6S2vTI`VS^~01c`faeaPha1L9v6i1ph)fqlpfv2Zu2DZ|Xe1PN}?
zrUgZ2(9(5QMJC55=cb!oWt3ywKfQ1{pXp=)MXBj)E-`XWUwf5Nr+z6YFqn9hLH=cS
zY=Bh2N}zyZcAO5W|Di3S-5|#>@hCu4-s%Nc1z5(fnrv*QzxCMF&+
zh@K4)Yel&o?}4HQ-Wa-Z3T%Qf#G?J*z%4Q%h@L}`W{e=j8}*mhgIhHM5T#ckmO)!X
zO_1g(IQE$xH-eHP6Av%MS@Y+C4dvl>Y`qG$3>@>!j@yobdC-Q?!ujA(f;NQaF9hr1
z;CAFyWOh8Ys98WUf(g_T;sG^j=YcgsTS6;A35W@FE-t89zX8Hy;dWfz18zSvLmaX8
zDcE=>ZpSyT!MYhCMn78FJbmFNM&aoPuQS>+PMFSkgVCFD!gSvojB*CxokENPHQ?@_
z3In$QsQ(IG;>jrRiI1Bf)IMU+U}8~V%n~@oI9-0VhzZ0Dsp)yEMYKTdSXMcCB!dh<
zhvT9ew0e5bO~zx4-P0v+F)A_kOkePl(VvAOOQ3gp|1HKuEwE;0#|KPVO6&qPj^KeL
z&?t@qle8n4W|LN818;zuE_ItRUka+egE>ne-x1t96HtK4FTTwf2^w*I|B+FW`OWj@
z>D-?f^FRW%4;fXM-aMba;SS?+#^2K&UNY)1_D*lT%V@;d2Xc=e>kP&$f&S^-_ZX8I
ze^1Z92bQ06k5N$@YFz_!mJ+8xjRLCzXe-tf0R<3;S%J%pse@U8*|CE;%W={47xx$y
z8COo{y3Z)fxOTe6ea1YPYfmu2T?;x#3uMOh4fh#U7#B^ydY{plapiQ*2aLvyYp2^j
zV6;KFaQOp9MI)#SZZKsjaUi<@#9>xoH)DFhqyP>-$CuDmpVJj?Gm1?Yc*qzGv%dim
zT5$Wpf%5X<^u-SugBd4GU+|LAk97rOmcYd6UXK_TBOD?97!gb-z`?ZeF+7;+LCb3u
zK`l#Ar}_hv0w^>-Fl8xmnlW8qQeanLgDebY1`P!%aUr`O#9>z8G-GODc4Slp`JsV1
z%W(o|)R0jCwBDUdpas+j<3tMMDKJ?M1&HkQ4Nn+(8D~u2_k>ZFanAHRPZ-l-em;N*
zbI>71;4qJU%BaFPVS3Y3Mq|b))3<@B8Pi{Zs5#RMpEHV1H-5$_itsv#;ol5$9h*Qk
z-tdRJ#pgMr3S%?K+7MXq-v9@{6b2D@GNjf2XFuf5iwotpuC|
z8kj&ypy4%RAV>u$D=}R=G5y_Z#(WSz{tcrFh~N5#F%-lH%Rk;Zo$C`sKI1K;3e)4A
z)BD~c$zNMP?HxqA@Eu6|`svf(A*7i*k0Eku)q6%2#?I;U-!sl;oHX6~1EU-)2o^Ab
zf?(|jMr(xc#XiD(@5j1<36z89d_;2hpEYoIRexesVfwRX`od4h7T;)|&iD>eM6`Tn
zRAK78F@4o%#%|Cwhxr#qZMKFxT>=7!r)PX&lw||$!&GFNuBgT%KHcYq0LS!IUl@hz
zL7OfWnRqxstqW#H(6&lNCLU=27PM(n5nQT+23SB_A{CiH^*LzIBjluWMnz^vP@hDR
ziH8MZ0%-T5A`=fYga_K9sK^AV%b6X)bvYBHE@yTGclxI<-oz-l-RCP~5o7&o(DGJh
z$E6_Z1cT$P9!RTF6jZ7!Wa%<6uz*&_%VY`MVC3dr!wEa4)o8top$jBOKR+UNMCL_qe3?*i;fsCNBO6C=yv9%S9Sqkh5Oadkf
zYzoY9d;Wk1`62eOm@!o-fUPS5ImL{r0Ay#1A`6I0Py`*D)xZd`>O(z4mLtrr4n`Eq
zCV(uPzzCWEbF=^nUtk2Ca1RM4HcjRejP>Ts1`06K
zUV!w!V9ZkBR^USN@duFb2aJID0TTWJiww}&2a@IsOkZ^NrOp6
zkr(6=1w}puHb;pp1rCUBB$VnoSR53X6*v{ZE`^>*%*f5{$e_r<3_2EzIZF{V(kcLQ
zJv^j(w
zsL1KapvcS&@&kuIC}x-yIJ1=a9e?bf06x%%fkA=aktxgZ*Zv6t&@+OV9htMhjcaZN
zUe|i0)O`cw5d)A%Z!l&la4LZ2hZ(a4ZZm=-TTuYyCkv1rYzhMA%vilcq;J5H1v2D8
zJ;+W_V&PQaL5gyU3SPkjz1Vd<+CGWwxfNaBbz_yt_B%_`;3l^g)9z^tcs$J&v#5ez{%v#wD#z9
zK`tg`_8p+P=r@<9TXQiPFus|d!^Pw-iRN;s+bVbjrZ;jjiPYa^$xdG<#Du>4lL2ZOM8N?*CN8u+pbSuprf=nC^3#KQ735qcDX>>r%$N?q
z9oN*>0NVdut-!4y>iB2ZbVEL-0uGp->5jZi64T!ZF>#7P)xjMJRsMjFDV6E_nd$EQ
zObPDbIUylOc12M{G&!;;ih`mFJaYn(W9D{zu&Y5pQ3#aSK+O&CEC+2g9$pgmI4CkipC
zi$V3E+c8myNe&$Rj5|Sgh)w4aX1d6DZ@QxhlgRW>!c4Ll8C_07AOiV{VOmC84GGywQH+`!F)78k+|64$}
z4*XJTyMGHhmZWG=K~SP-DVsle>G-~vOI61yX#z)z_38AeBYm^4(JQ-Mi?
zX$hke3urMAiv|;mz|ZOLC7Hw|VS2ux>zOVm#pKHKn^A#Nq0C94W4XZZ>G@Jj#=L((
zBFv6cK)YoBOkXd>6sZ6eS_2jWNwW+5VdPfeRAAC&xXUK+ce;i&lUy{!CdZ$U!j2!q
zftCV)L3)KClmvuQf>0U|N)${vG6;a3Wd-Ib2nhV0zEGM;*c%i?AS-XPK_r-T8SX%t
zx)2dr2xShTL_icXNXHta@B|0g^#9UK;yMrm9p^A*Dd`FP138Y#@d0y|l0KB9!Ssg_
z6s`?S0zaoa$uKE%{{(s4@d87ZqVV*Er}%`YH_I@Y2>t{eE{zEJ|BTa5$S~=P-g!P*
zK%jw1i4{DK0h;0$7Pv8;N0!N;{x3*}4#<71jtpQ*!2&`t=`!@O!HJCg>}9gx@qWHDWaNiZ$C43nWuYmj-&juSYu6j%isnG~3(@0MlqV%q+6
zI=dW`6XT8PUUE!9j5nswm1F8=yfIx`Vs}E2+CJE?WC(~Z(JD+HQa2bCm>o|rWC`diFeva@GqQlsL{VUL+&q1+3X`WaPWc%P
z(-l>jJQ#0G&r)TQWxO%HLzPLE>Giqk%T<|N7;j8}qspYuIB~j+8j~mEjp=D>Otwrd
z*QYO4WAbLaG5w<&lRe|a>4xe|eh{Ac^y%tMY9JN+)tLfdV(J=98yIg)zoEh8Ce(8s
zQpGb_C~!MEWIIl|KHWf*X$Iqs=_fUr7K*gofl8lXQs4rOzBu;WnO>*Gq$~L43V8BE
zi3QYq;BtI(W%?d1rVQg5F!O$ZG=Rop7F>t7g}59)oSF<-X41l}z~$J(oaOlA)bwy|
zCQrtj(-&$psW3G$P0!S1if6pBU0sK%kx>wo6__1YSb&z7d@#=vXq>)Nm&uoD`_t_l
zdQ6gx5-0)53JPczZgAqU)Mt`ryfHmgpXoGX^K?Z6CRz3-CWw@)0h2Lf^Ylt&vAG6J
zUIxb*l~};J2-FF2TyX-_0TpOrQji3-vbhCVrMMNiSR53%1YjnK8#3iEp4i@E$aIm@
z@G&?M@9_gCVrC+=Z*Q|^5@!`!0t+$?1vW>6EXOt1r>}QlieP*)ox_o-i1F$4YDXp?
z#@6WvotV_7zjI`2g$UX6u_%D{CNhnrA>)(j$<9owP!-cpI5Y7=b#DLT%(Re^
z@#*w=u1q82>aS;pml-IrYCqaN$~P;D{wg800}V*fQ1Yh+on(QX0n86QDA&N
z{jE2Xtu06elfVm5cfj!i$j_h?)=v0?G75}$0K_{00G`{28^$#Tl+G}G2WOS<;$dp
z*M!BsOlB|>zWFkVKpZY*#^KnoZW?HM^#!8>hXS+XkMj&!0QUn#3Gj2?e_G6M~x^R1XjUST@HGbeT-jzo^nrqXx>5jBEof?U
zVF1$?My4Zkw@(jZ3SbmJzMhFmv7Qxt0pAX$Y$ecwUIu~H)BgoCNok6LR$nqXfaY!m
zl$dlGF0g=BL<+oNR$z2|!I-7Q0+PAIsKhkgF@#Bi>BOn&(IHII^-N|=8$b(+8k!n7
zz$3DZpmEw0pj*#a%$PuDp&mFj0la#RX%AT4?k)i{rZqd56u=5EtY=bSRA2xP*_>F<
z1hPXwiP>=p6R5wU$ff`~&HBW8rYtk26+4)i92A%}m^Ltha}lHCiC(6xdIeSuCK)9r
zM-EU*2bDLB3QS|1=-xgn+0~RqezzHCD5G$>;fB@1iHayUg?Kf&S=HZ(#r%^sK^2`
z4;=hVW=svepc7^o9YNutz~U$ZGO`|Gazigu7O0|U0vAzWKY-#45}vvYUsyo(FO2sS
zg?9yo_Zx-R#0u4?%P8tia%SgBcXy0`~tIL94tqnC^fCA3y|QVgkAhe^Jc)
z2j#J9Fg;;%WN;VwJAGpqlf3VGCMBrL86c@mK~#f@L5WF&=>wAjlj9dAuwBTiAo8GE
zLW79`l&dtDI+($uE3DIH!|4;U5MLG%+wMOJ19@X}y-OoGf|2RRPW+Jabh1{{GftLj-CC9^=;hymb)a17_=GtN)LdjyU~p_;fimkE9OuB;430Yx%qwuF1`~L(!VT~^pTI^Y
z1#k`*0Ik&A1m_rJ2_QIatXTqEU{bmax4_{E(WT398--VY2Z;-c$OFs@OpZq&=>u*T
zNa6%Y;tZOEF2jE${kjYd;M51R3X-T`yt^nW??HI=;F3g_;XZ^5*Jw)L>Lg76|
z;wdp{GCMedt^hs304kQG1vbM10VEXwmb!tE0xhJiXA;n5Xl4a>qCgSN0*z!PCU7)^
z$~+B_k{2wFjD=PV5G7DKn8z+6yMe{Al@(`#YGH+&0xql-7KVP+)PCfvEtCC@8Qv
zsz61c=@Fi9w}A5P4oJTJ&j=Q-2d!!cJ5PxTRF3}u&B*#2^;1CdU=bSpr)@Sr0TC3i6@j1?DV)?Vwm<)L^>72+GWN>KP%494y}fOUfXg
zF2hS`P(yfMk$9jax(DQ$18|o@EY)Rr&LXf5WCcXiDuSFnJ?AGu_N1PmlJ!IJ@-K(&Ub
z22(wQ5{u&`+=*x+Mj}E^Lny|sVaXEM0X3FEmtitEcS5YxWtf7*Q(|%KhS^vTD(_JX
z!Y)=vc4+Dcn-9$-;4A`7QA#jlpMV1m#(RpwdxpewyupOzIMB4ezz!xy21O=CkcptW
zQ$SwBM>_NGl6LDljtD
zgIcMOTAk6cqrRUhOP~+pKv3TT956~ujyG0L7I3rxHEI<=ZUNUA0s@^NHmG6asG`8)
zs34%K!05Q{#lywyW(+UD^$R4~@`5Ia7(oYr>;`$wv5Cc+aR-P6${8D&G?)U+m^MK8
z^|}lfL3@>~774H>9ajE+0{n6d;mg4<0~KsBEun>(nz=qL%Mc-cS)
ziCQsS0y&S-iUH&l4W#+f1uOLq{L^&v;ZXF*tLTxOHm4>?f{b_C^2_{1SAv~
z9Y6FlWhrt`H%MYqss~M6GlJS=bF=YuH0QVgBfKmcTy<-DN7Bq`y0vb|c)L`1t&!oiW$l!Q^;SVuy7*fEm*X5LAZ;nNS(zJ^gz2vpo4tiJ`$J4-;*
zM~oUw3m6pz6c`=BJx^8zW`Tp?_}j3fo(W_+XlfnYa|Caz1@%i6SQMB94uZP9jQWfR
zb|6&k*uiAZ%%GqE(k9`Ev^|W`@dIqHSi=s`S`bi0qadxzz~HEz&Bx5Z#>fV$1;PDW
z(Eb7i1xLng0h4#2WuRcT5|b-0n*vBmi4~;lK7$)CXsA~q%aJJ?F5twVs5G51l}Q2I
z{bX_E64(j$N&~MFGcT6{qai~Ruc9)OgMv~3i-Qw`q6#GSf>uZxn;F$k;`
zR$$a*o&lP(W}d*Pq~c`X9>mMUt)T405Tu}@prW8$tE3!S%*&{ttf&OalZ@Gl;GR&H
zA}gp184b7s(31*{0na)i{w8lVvlPDp-b1tq5gOyKg9S%DK&DT0MI
zFoB1D--A}MF=i_=E3kq}c!+-#7(t8GK$+U{0B@EexDeh0F@h74_jspINMjP^>||2l
z1odJ$r>{$6QZz$&AEmN_mm`jhRty)Im>ocA0fIqN)BmS2N!D{XYJj>ljyyS8p!GN4
zRqYQzwV2}raH;P2090*pXfP=#a6lUSj(2uU7JyblPqu?fckm#E2GfJ>OiIjVOfNvX
zLF;%vfEeJyo6+$DsJPk$t`Sb~DuIe^4p7`LsOMEuHfI)40P{L{&6znM%m!Xi`sQ$y
z$Wj7@@(SK8O=boqC6FMvJ*1$lpd_##)Jy?YAfOQ^0R;{a#h}0hra(1GYG7^!X3&%iv^mPlIK3g0
zNn8Sv;y@J*k0WE2A`cG(H^X%2OeRI7?C5w4-gsgMSqv$66chyBF>`~$x?=~EB4{PK
z0;6LKZx(3D2B^kmFk@=qbyNp+7r`}W1Fsn)gMySJ3uwTrg*Qu|k-?e~lo^?=89P8t
z9|cfLeFqcdgjwc#@E91VXatw24Lg`X2?w->S0YPE8eIH<(h<8OW1#}Oqk?x4FC%DS
z4tRZWiDO_xE#jm1GhKtU3;3QIwWNr4SgjwwN}D+LE0B0(!KI%Y6f
zF~FKNtf0kX%vKDb`4>hj1|3kt4ceLnRVo4sk^+gKl>7rU&tfq8Iytn+jQMrCe`{2;BxW+$T&vF3!tpYq`;0`${v9>@R%TdEUdnN
z3~Gb1a)S~ZaRmWLEy!&L)cTn~-P9XkS3rlWZy<{62@{w=Z34#WjhRd$^(>&m0NDZ1
z7COlB;D#r9!d2kbWM)uQ0u}Zz`k53}L5vgqOo}RygbdUM6z}1_g!bw)spF(x4^RVqz{lwa@|iU1K^#WMAF!baX3&VG3NL7(E~I+`n&J@n#0(0s23|#F$m|KI
z_XZkzfVAG3%pg-0;E4{$HK60BSp=#TlpO^?Tgf5K@z3C;@Sp()P38t(B_*f-|NsBz
zk6@{H01ZikCr&_38~AWHIETWjMo>&Kxq{*VR7_)ct0prjdX+%w0MgS0#S++^;Nk?-
ziNxmGXsC0c%VC%yu7)g~f_6l{FoO$rP`M21Ybwfsl5YpEqO1a=;|}I51qR0+(1;7T
z2?MGfWX+h)fOc5QOm{3~vW*6vM+0@Xf{cQ!z+03c2F+@Ks(1~i3(Sh3DX$02Sqdy*
zg%iQemM6^M6Kxn2m=)LrOh7vlL6HSk@OJvmLMA1CHZ!I_pq@6f8Pg9Y1<~nTMNC?Z
zqT5Z2n3x$YmVufv4V+nEjW3w91RjG;ZQ+EP3XXb+X~^yponBDPByI^B+W)`+8tPL5
z9jWsJ!eq<#SSihUt#wOrrG#AfJKiC?(K#
z;)VmD<%A6f7(AtwI6%oPM}b4&AZXB7#30ZBFH=cYy#gE9d`AW|rV?&P@bGpHNL7Ur!c36U
z+?1H49ak|rf;`2X?Z{N3z$E>Ln*luekq$0}6c`*|fIY9s>IAyAlnJ!86LheO22+Y6
z8)Wvf0Mge4r4~>pUV&NOotGK3DA$px1Y{v-kEI3^gCHw|61yuevjY3{h6*N$dM400
zA{yWzU;(LQafCQui5a4SMUaaD(pg{v&%i)Tgvh}R1MNov?NDJ+U{+x9l>P&{DU-=@
z6{7+R$Qx`5ObRTHOb{&`f(#7Y3Jjo)6FCYD3d|CYOl6?VQ2=Tu{a^qkXOS!=1%Z{*
z7nU-m)Pu%=LCr^ylNl7)L4%wM?2ZfqyBHPN71%YHt}r<=W|=Wv0VT5zPSC1%b`2&O
z&_p9-or}N*CM6DmW8n7a1l)-ZG>pRJ03KA=WoTjrH4_*eXK-fKLu`?Ony`Qq6dRz;
zTbSt5GHyy0Hs|-
zHkfWx4r-F}g4*QFj>u`B6O;s7K#SfWiNA#roccK-JC-@Ysht^|a6yai5y`)U5t9Bn
z!Ezu&dKeWzy3Ck*7~wepY8oUBv1Wm?!1RaZOp^8RB;UbERGJ6Pt2TgrikaqXU}+v?
z7O2IGNc8T!tdJCnmgd=&xLkQz6}Ui&UaFoAA?tFY;$WmZ;{KAkWFk!l3C6gv2)AZ;{CKFH47#*lx$L@#}
z`3t~70-pa|0bxSY0TaA91WnDr(*Y>yESc_5&LrlvfDvbL2u>1n5J_SV_9QXgpo&Sn
zo<#wX0+zs1z!F3XfNF-M0B`~TZ69YaV_L!pEd=L)OV$;Ppi&7^v@RenO@Jza;2B_V
zVlw_4)y^~P~UF{XO=+M^n2Az@%5nj)C-(h0Jw;|osE5Y&kqT>_3L7{PT1qvHy$Ebu*Tpzc1S;}S0DYLn^KwM_Qf4ZK+bUztIT
zIeo?sUQkSf+r~dYPGE$z@64G$fOtyNm)0`rGRjZCT+8HD4;cbxFk`v_9_-!I-vl2E
zmWPf2gW6ULxIh&WxU~kVx_5vSI|-UIL59JfbTDNm`YIgx}EyxCnAU0iwmEfK%8)#x{C9{IGg0uz`gCl6Q5Cf>U#{pV^F$+9!%>)`3
znFZ?ffG0H-zd!6M!35r)#so5Y7Ap^9J!k-u!HQuPE9gik
zO(yVY6nOGU$($K94Wz*|V+VM+2r|A4s)#`oOiRJ-ybU1Zc)=|hD~7$G0aZrmDj`sh
z610Yg8MNSoS%V3*lLvedjKDW$P)8qxK@O_F!K=g$iDGCc4HWzPm_Qu@P`g}7fz_1B
zAP~It=KyH=5;O9WC}wcW9=r@nff>9c3NnKO9*|*l1P?YdIzHe5j`(rGOSS)q~a^>^TiNJ4Xq$qj`#e0-Hb$C>4Ur
z8Azc5=`@Cc=GKsw?&0h+g4BWI88i+6DoH>Skf0QR6e?Cl$fOz
zq^JLAX42OIcYb;pmDoVdOVBb721QVN6;vxTXfSm!DlvijJ)r!=<~=>Qg~?b0T*5Jc
z+G#&nz(v~+7Esd(lpH_=E3b=I%F;@tbDM2Tsf+n62<}!Ov
z|JcH0%p(Nu$2+cJ%u^Ka}TYCU@3-?*ii;ha1%%~S=ela$2B83O2LDm4dAE+kBTTT2>b)*_%om-oZvPC
zivqKN2{a;4>|jy?jSzu0!-A$TK+ytP#6CT76RU8&gaSAkHZX$bqW?2PC(FURG8qI`
zf!bu?`T=YfC@m^TDDVq3uz<$odLXqkD0M0Dn=ye#oC6Qh^JQ1y0;Nn)SsN-dO^TETCcnr`6-ITkeC!UEnX3tmkE8nn>K
z$x`G6udtTL$%3SBX>brQ=`x6bszoW#v5=r!_&}8jcnvTMWR?!HU1+^i6cwme5CmWHI0ZC<_+vX0=$m_jL=X9H7mfyAZXN?CksAB1*(>rc=3JPbteh-R4{Gr+aVYRQ3S>E6SP9-dW5xt-0YR3ZgXWmv1}I2_
zb%LCKVm*^N6N>_`BTtrs6xa|3ZUtUY6(pm;3z{R8P~Zh6M-c^HN0ls)ID>*1$h86{
z;1xBBpo5#Z73%pM*+46Cz-!pCr5<>yZ2=YFpxzB=VbcSiEXOM|TLl~?1ggQV0nM~Q
zOL}Nx<^)yDkRk(5Vy;(W;swtD6H3iyOrRVFcTF>7M-Hzfxb_DnA~Pm<&V#xP8b^|#
zas?C>;2HrG6`GH9{^Iatk@4uIAQg6AF__s?pAwC51z0djfF;j5?F$W6-&s#QY$NJYE=N8ddsAuzzRxm5(=!K
z`cnjQ1}><^108H+#w4S_qadarq`)fB!UC;7d0DtYGrgeMNbuw#=s-tEYIM8-npOf?
z1MUZc`r{Y+!CO8YIS?g1E4V^B0SZv`aE28r%%JKB)W-+M5k?x`3rfQxhz1L^R0hWa
zC|#lkKT|#S;3pvg!=2I!J>XvyTLOkU3mQA33XG0RctLqMfe9269pD`$pzr{tRM6TK
z#|dB|P@uuW3zXyM@PZ5nx1}J<{Xo%tVF!~L(+sc-cz=r-(-hEd6>vXLY`S5Ch-iH)
zxSBiwZ6|>zZ@|?fXvQDhBisWrLjlx@(*Si;K!Gg-YBqxM6sUE|o8_nj>Ux2y572on
z;3f&91{3I56-Ll_J@_mPS6)s~7mfkcRR=dWm=weW+E}>jK?M|KfeZ`08U|I+vp~z5
z6vRNgj9DFZz{BZc8cd-565tVaXi>(Z0M6TB7juJJA0X^0t;nnZs_?-T77LyVi_fv1
z6;lT@9v!e^6>dCe>K|8g0z)yV>xrtk9?|#^Q(#fx7iecu0MEVgFoMQHK?7QdmLI5u
z1$VAM=>e1r7@+MFP_6(q32uNG;M48F%|l4z@CQ?tqO<}dIGaJ%C`N$Rr)e<7DDpej
zGbpk%IWRjYuq*I`8ivr`oqz&6tbND}T4$%h#30ZC&&o^+yrBJ~ip(H-4P^HWGpN}J
z*$2f6Qhf@kfd!6P$Pq5W3gDV?1ET`3;}XU!jd~^qMG25G3m6r7m>m>^6(m5;kXGPP
z5Qe!xQs5L5s4Xa=$fv*vT1N%iD$O7utH7ur2I&a$Ir3yFii1*o2&m<&!4#m#!Q`O8
z=O_W5P>=()961y?1>7MCRzX~#lLe%@g}2@jv_iHD#F@aW#3HQ-UXInk3l(OOR%8XQ
z8e!C6>fr^=SV=3gEmwrJTG`xqL5uc4-c?`$Z=(Ra2ej1NbCUHi&>4LkMM8K=;#1%lg7$MylIR!aKmMkS9M@9vv5+xRa
z9&j(Cg%>oR#R%$UfcBYWDKaZSP8S9(nNj3WU{T~^c7O>faw-TZFgw;O$Z0S!lsMKG
z7b-D<0#=DlpOFEwW896G33QMQQ@s+KD=(7*rvfWz-3{o}9>#2IM$jf+#%x6{P(}r9
zrGhk+!5tsGZtaD;6&j?-Ze=V1E!}{*l?me7dd8A0tj=XD$x>p|WdJ*u(T$e@wA_Oc
z9aNl-{~1AQI28CC!NW3;BWsxyI6S2l1r<0H
zL1WLH3WDyD;3Nzh5dfzTPDjQpa0ecgIN&LQQG;m$uQVuGDI$leB9|j*e;NyTKd6ub
zvjSJ4Bgom@x(o~oY||~LGRf9+D{z9`!KTOwTGpr_uD}6G3ryS!oS^YEP?!^RA|)=A
z#o|P!k}M@|eMV#_g4}`TMDVl?I9#wKPC-cGqo{#FSV-E5?`=1Ql2unZa9>
zL94Eq9AE;9914OcNmHqwyHE+N7L?r?l-L~WL9xxG!NdZYhy<^MV^H8wU~**4293O6
z#x{pzJ*eFbD%MdV8+1_&rvj@Y8|Vlv$VyE}We1LHHc(tMIo30kK%5TE{z#F{0V)PS
zdwoG}R^Z4|;4TDt3VaR*sKf%TT!*%^Ss?v=Ch%%*ZUu1#R)KC7ZUr#~Nl+gWwC|%I
zUX+41a7zkIW>OFag;l*UIAwr~$R&&l>>x4NHb6wR4{r%zHV(k;bI{-`Xv__~-Vy9c
z0mz0&ACP(AO5FpzVimO85WL_Uv}%lpiCe*$8MMt3G?m2yDpkZ3m;_eA20%cI@j-Pw
zcvxCWU>az%6lmg|mk}{O!Q{%zFx_tklWP5~7SLn^XfvHk7HAp(#8Su-XkzA8U~oLc
z05YAy(IgAB5FR}9175q&A}|4*hFjH{Ws
zK?Bc>jvqh+Wz1kR%yk)l^n&V9@InlbgaDE$4BQAOqZq?DJ$Dw94CAEfU9*^E#V4^K
zsROx)32fbTf!RzV^`Py=0;@qwNEU$H2xfuS3<<0T&D4Ng2};8on3P07%lRBY8$%%z
z?F<5wSwKUj3L>D{1!Z&Q2Jn(B&@iZih=K@oIv+k=0i8ha*ulihs~`eejG_cu=Ti?V
z{&z5GFe!irU>xh+qyd^Lc(
zM+>+VnG{4EO+c+pCI$r&N1ZItybp+_k>v=BK@mq-42po7{0g89GNG@YNs(Vc#8Dwj
ziAA7^SwTdBU!a9qkx79C;(sPkiB<^mrsIr0CeSFofC4|L8iy|L1WjtJ1qUA3kpk;L
z>;}l5aggIdjT^|W1!Z$)@YED^r9NoG%ZffGg?jLE@)dpHMW2u@O-km>;Jr?cplARs
z76KV9;`j!#p>H!YcpQmQpRoh9Fprl(K?-zn2{<5HdYKeCK>ljzg)9JuZXsF#GJ(-?
z1t`eofX5S$^nq4FGbk|DgOnfWV=@Ev_Z1iwIM$kjhIv349H9xs1MNHC2-bTBrdNaM
zL?3vP1+okfVu1pi0*AnHu)-@ag&;dFfb6&f3KP%}KVJp9)*-C<-#t+DG9FB}70*~PJb}?ic2fA3?
z5tLCt%jH0BVANo;09j$9#12_mxdId`V4Fa*IRg5SEDbU23yNW&1?Lj=pvdC{nXmv7
zd5%B8#)6ligM>j_!kO0!oB)UDA5f^^FuVoSW(LI+`1k?vPCyN&AJ7Qu0Ci2F3n4ia
zm<3LPjqB-Wss}IVg7^>WSdgt9pxtCsz$UVofi}XycEO1#NGNcE*XMyY+6h1+3%vIY
z%~{}xn*my^DR2rjK>-epC$PwZE3SuFDyYCEa0*<*f+!IM7J)Y~rMw{B&@FMGU3~(3
z!EOidfdk2DFfHh30*%NzLU*o#lDPtlz-h2D@MblLvK9T1#JT|z7GP!U^=kz{tM2p}
zAsf^{3*ta)>_Oh&1M~h45Cc5=0on}%i5}3-CV{hHE5Perc|q%_7&Sm6C7^1X5w;%z
zYzr6EF_8TSplazTgBcTO+J&(myb%G^1b__5z-(Z4WQOJ<@N#@m%N7(bKejW0jOPY5
z4!ISCL2Ykg&}q+*9ar26pq*Oa5*;4WybKB=3OuO62~O=GPbnzZn}ZfWf^rnN(0T&i
zAPDaMfJ!Lv=oDz^hS3pPhJfaXAw|d+(5Mk;;Uc3Z^8+4p&{iN&VFjAYh3pv;cnZ$R
zdq7=sPzeg@b~8I(0$(}=F0elE)I+=ut~Mb+pvsgdOF_hx
z=?xP!>Okw2nLsK*A@re_$qY0!uK=D(giIUHfH(?iz8Dg7b9R8zKS(EJn4C#LN6
zA(Mg>Xud2+K?+n+aDa*@kSjq;WI>Gv0eIMg*Osyh=!52dK{W?F&4MN5LDed#P(ojo
z!Og?N4XFYZnd=oq9M>>qfqP;ipdKvZlgDY9}ah$yf*G6=vMWGoH}IPK@<;o}C)7ZNr4DF^ya7B$531l{hmwGL>;Yd4Aeda_2faPrzi-4+Rfl5se}UP+$g2DQYcLCfYP6q&*0tp*e5%oAv{T}nX^)KLV5prQs7i=w!KBB*u9
z%jCdR&oo_0O-^{a&Jrf>`W{e11+BsdZB<|@ah!0c3*7QnVDaXK^c*0`2t4YoY=*-vDp711&%nRsfX*a2G(e
zfN!D{26Y%&963FCLG!_ap5O!m>clXD=UN0n`54rNh6F7r3ryfuVv<(mcVqx99E0}v
znWPnY92pf@N)$OD#RjOe$fm&K#>=L_uK=3ftq1o7S;2K9xEIKzzyUf~7+iO9DzG}%
zD{z7P-Jrl!5&-qOLCg1;LE$c-zzynd34k`Ff_5x0g32Q%Rwf4p0ewa$C3ffrcF>?b
zbTulf4p4n80P316a&s$yu3}MSQDC3WxRpt)9^CE+l~14s2&H~yh5D6fuQJD#WGR7e
zMp0sRtcUs(Q~(QrN(L4MPoLLxUYHyd!0DRVk<%YEpU3Pd=ntAhcU*zT
zTaYcvkfaaFZ_uRQ0vqH4_i{mFUp$~(03YdKkyhjetv4xA4nd@j2J
zI9Qh}v4MK_purdgjzVxcXF;TMUO@#GeMZn`7f^T6lUGoIO#w9e4>`3A77m~VQOqS-
z;Qq7%_*_YFV6bq*Msy&ZVNm%C8z-&@Rr&+qZ$+rL>(TrT8t(z^A5&s>{LkpdD*!qJ
z2-J!H&sgZmD?o~`neq4<;puu%t%H$>iH>dX*+<2BkK*oTQ<%RXgd2l_GbQVuNXxA?^i}Nx;913d4L)xjJ4z&O%BZ8*pG?*ZH
z5_C*VKa&zWXoi3hIs39pD{?M}bOJB*gFAs5Oo(jHF0BL^*<)sLU~*7kRp11bgq)yF
zZVVuMr~9sElHdllC?TWC?9&TYGbz@C`zN3p732verYr>xPXrbUJ58MfreR;OK<^Q21ZC-#aM`3RVgYU>%dctD+rr`3Tn{CPS7DK;1Z4rRKkId
z))Rzm(S#IVNP#hdmzNO|W{Nyenh!#QMzYYVKcprVL=BRjdT^XU+g2UCph*VMq8&wO
zX~?a}4<6xEWP@Z60p#ofKI(66`JoW-A6pWPgDA`Jk;Oip&ZE
z;38IuRe>Ak34V|#I6$5Nm1`^x3j7KzprpVLHin0XyB^d|VJuVt4FGXCa+E2td4i5u
zfhuDIZOc|@&rGq9nJyv1c(5o0e%G*km3AbxAW9;Ubk1fQ`2
z3KZ0q2EPK+^u`&?!qWp7Sy<|oKu1z5u_-WvItbk0WVeG!31%o0XtDupA0xtOa6ST0
zQ-Ixo9EI$lSs<`Hs{&URXqPrS%s1Rn-*7^F!wqT)Gbo62gKqK!sWf9^0TufY_i#*4
zXlE4xEk+0V6=aqomjZ_(n*ygI6C@*nJ9m(EJ$wpE%Rzy_ttgN7)LBl(s5{((uPh(Nw2kmA8&p!#Yg2u>sLETu$WEiI+ivlzFUJ+0_
zLFuXS=`+fMrl9^q$_qXP4p1iI15J*CMxsFF*K`plCWU%rafs0hY>FI+{usD*qW~^l
zLBj>0`BTusQ1A>Y<=$fsC&PcBhBhClKmpCLu{bF3L0Tq!ptWS6Wtd>kaXT|9)q`6s
z#QO@KbdXYxD=!ld_#jJA6;7gub683g(IXVplmRuUKus`WBNWt{;R7WyHfR;br@#TK
z!uS+eLA5U=Gfh`?VUn*$7AM~0A_}}_Of$fT41mTF6%-Xj1THdzMod@~!4v~%-8rbI
zxnlKj*6b0}S
z`W2voCh$%a$7#I{0*=xG)u0`EunpCq0X)#EevrM0ZQ!sasGtHHw4EMhCwM(LkHeOn
zfwr80hX}z7i@}nBk6cnfic?e_&j!=XQ
z^+CE`(6K9U)RG#GW=!>vgaMkvf>{m?Gg(mO)WHiGoNC|&58Z==e4k&{-973UZ)s5l9ng
z6(49LvrbNyqYh}9H+bRVbgrFDQuT;kFA7YM*a2;gfi6d5f(JDdXtNI+D8@v{D19>yXx|Lc`{Iy>_!-nO1FyW1WN}bX1lOC;{v`{fEuz3Nea}uN
zIdkwdD(FB9aARNsXr`K}1a!P9Oqn!j{u#7|6ntSCC~lb?6vPpWa6pa&ISj4V;g?nv
zfb<186vRQ53@=Dg2d^TRf+DCS0*&>FLzme=+BM)_1-_CV!^nDqW->c~7X2!47Ao-|
z`WT>c4b;;35AI{Y0su6UYsNGIv|kN04w8AWK!T%WCOK-Sf>Z`K1+kA;}fotLgwX%rTQI=;R<~rg~;jV}MzKHA{&{fu&H14LoGSp;-^w2)SH|
z3(|Il1qNt^6gYWtC~$&uIkZjA3+c-$GC4AUy5fooypY`{pal`&6NeyeT+nslEDn%0
z4-=S_&=P$K|p~?
zffb|>G^~v5JT`6xh+|klD!|D#ONrT$G20u|fraX3Rp6=5Qshx!0e57%KA3iZ5T`@oh9f*Q1f;7|Y!3WA&GJv*2b
zg+UAE88tzt@IzK0gUdb0Iu&p^xr51(r9|KWqk=FCXrU)`ciw(RSW*Wqki;?hX05`@
z1!_cs7SDi3(!txXtQi_vaZlHQG=L7dn67<*Nu^!^y0k$9)a(T<={p8_xHX3%7M
z!VC#f&^F8l7IUU0ASD;TnwLO~1FsCf0Tx+$q68G#Kon>i4BWT^uiA%>%tFo}hYIgK9!HzIO*^LTn{MJL3+Cp1(@b=bvCM8zT1hppU2uE1E^#W)*
z2h?yS=pfjhQ9*>OpvMzI+o^(}lmcpy)N_GYpx!Mg`Ex?{w=%-kx`4aHpuv2k(-c7s
ze9-n~@FXT^%99m*>jgA?K*Rf>q3=!LP}XMz9n8fJ@(HLN3z=62?RlI4y(ogQUXvN*
z3}ujZ$U&B%F;dWKB2ae$)UTKdUg7^{J!sQ3Xj&b#fd;DDQ3bSeAM63p_9f7HpP(sZ
z=vpgKk*J{L#2_#YY{C)H@n)dOCR5NcvPvoepw^s{n}SM>qLPBLf=UoCXtgaQB9xc}
zrn7+3guo0I1y;~$VxZ04%!RJJ;Kk1nMFKNexWSSIKArqHpmdHV4qOolkz-Q0}oZf*rG93~>%xDuoX
z$&KvjZp7w9Ii@V!Zd}(qeZo;DG0?Qq^p!`MG#MMGUp~rYX9sHeDY8J%(gYoF$(;pC
zW}wvxjG7D^xXqa_aD#e1KbSyS2(-ZA2WX`a=qLnk1(xal$CxBRmwRO$W3sLXEsz8y
zuMgZ=0y9}~7t4I0UI+LPFGWcO2FDi2!O5T!6x2fom93E733GT9!A;>AJc@jfLroPV
z!DTn-qz(lp@HL{BVT)6hnCl%`!OL)6c^N^&Xp9iWY-UW54QY^RLeP>NJ_TM!R)JaI
zBn?>#$Oc;e3fjKg0$DN&s#p^!)wMg2SoFkG0gz&
z)?qLMpSWEQYNm96j@)JdmDP@n;Qkq-8B@a~(4p+ipq2H^jw?8F6d1D{Z?I%3DS#$-
zg}{qnK}l9YL0~r6O&5BZl-NLv6*NFv72!KsAX{|7TV(`pfX*ve!2y~GWn@)g16>Dg
z$PF4;f|Qw{Y@^F?2E2-vQ3G_=vm&^ZI?&4m8kEIMK_b#6Las1e3gC!z4yW0mvLO
zxUdpZ0A2Mv2kaEk5+t_iQ%^7{#6yJVvM52@)~tMzJj^W2%*;G2%q%P{U?vMQE3*PK
zGYbP~0^1~;kBNzinHy4`fHna$z|Io~_ly}KC$oa`1HZtm=^`hYl
zDsU_EC~!f}T?O^$uYj_L0<#jE854NDAt)`LSP!~<2~w6o3p8jL-q^vU!~r@%PM6`z
z4p72m1mEienF$1K50nN)_k|ryj!bT#mC&GIQ(#trocje?9|GEo3raN73akQ<&}i8)
z{n|+;RT1!NDvsg-y8-m
z@j(ZbGMj;phXlE~pD9a;$FV^{0c4=6BZH!#0-GXeojNhK+2+{&_1gP}~ra>ce&{#nX
zB7x%`$q>gwpzRji3NX`nxj~&YxI1_`6~JbJwts3cHPlZ6C3@)2DE!Hl51d*-!=NOk
zavrcY1*Bk65CUZeMg;{8rUjtE$_KqnAodInQ1$~=Y@qZF+AGj73AC;Vyk3r3LBVkj
zcqR=LV9*2&^P&PX$kRojdJf?|}BTWk4n#Xa?luW~c{kAr}I*
zE5R39fwhA=6QB_#u1lx
zAMDt{WX8k+o|{}3ArKc0C>h=1&<=z^rEv&O5mymoapdckIi=QF0Ttb
zpaZLUAa`kjhoe})gH{?$S9m~YqJj>%lNY!K8gu}~2>5DuP*#I1Vt3`O2OVMtIrfy8
z3QmYe7h4JH*{K^`G)P`Yc
zLy;RKDWIsL!0adiI=LHk?kl)C4jx0&%yLwRG%p<=fF_7kAxAc-m@|X7DS{RUd;m40
z6a>wf-hdfAW=t=@3?*|=qgH_tJUZRa1m5riGfMz;2vt3(8NvxWg$i*Q3;6mK@CYiX
zaNPkqA__FJ3m(e_9oY|R4?{0q;S~pMm<1n~3Mq|2=PQ89c7gfO{pO%kMIo(zSj`1G
z3B&PoL#IH!Bcnh%q}|D^0BU>V)QBje9ltkp3LxooVo*@2RZvk-bz%qxEj(0mVo*?3
zP;p`iT3b`ApsK(mt*GMIzzC|J6jTC~m=u(po-l-fOLE3~(8#Kif{N1<27yh&3XI?Z
zaZP3kC1nK`&=Kk&3Fy)L3d~uK|3RlpfEscfwaXnDOB^RGZ4=1SXJD{q0G$!a1Zq?$
zD=L8oGngC{l$r`>b&k4LpyMz>okx&~j*Xx*7GWJha1ds|0+kVzUVA|ih^U?%f5Sqs9yBxts^B5L
z0|i26fKONg4|_ZQhnWF2g4>Y`MfUvIBt1Hkt4A$?KrIbYN>+Xz
z0i+=t@a8K91=QjWbch@1Vl-X`#7-;F<}lE#mE#u{@Y#>|E;A``ECda_F$gT0u8=Du
zQje|IfG7*W>%Vaf5W&WW@YPT7;uUntk_s;;4;QGMX3${z02wF$FkN9b6N?_GH4a*W
z4(Y^0SfJ?`5DU}x=?zzyBXZmc}4?V^Qbf(W57)J)$qlceKv}Ol*Moj~B)g|nR
z6i^`rjxW&dQJ^cguyy&AKv#jm`lU)tuvB|u2UC3(mRUaVpaG;pMVa9P&l$l7O8UW$
zAuzoM*<}r~9(;d1q{$5(BVlpi;Z}pRPCz5epuq`VMRo;2P^%rZ!IK>{f+z@H$_qLM
zO{5;w++cE0Ks#m$obN#=HEA&IU{qoTwO1jlbQL6!kAaW?w-q4y5jxQZ8t;Iv5DKLZ9aD%URE#E_6M+V^K|3wM3u!^UPjK}Mx;Tyv
zvU-*oG*JR>et_J_3|c+Q1r8ujUN&%L2wBmh!0*P(0$P`n4eBg`hGn@FKuH<4*a~ty
z9kH$lZ7D=|KIm3>mMldM1+cq8^IItH2F=ECfEF}E7KD~$DS@QH3wC%}L2U&V1r|?U
zR?s$h&;k|EVGjIm;Pv>7SqMjh`<)UB^^jAvG??~)#-czSv?Dvf_b-4qz<|%w;sH;v
z(?C({3_J@^UM%3z&cp+h$M@oUV40$)XDQuuyM>2yh`9ylkpwD-K_jq>!DAqx33RZ-VI#>x^%_hXN-Ut`zJ(lhKtpk$X(J}kAQ7m5WN?I>
zNh-*|0Cu$kxY;WNy01V$K}bOjbbEmiXbf6IK?u~m1-p<#iCvcga)K#4XijGqE6Bkd
zir{6yELIF>z-|_T6vOrGjwayc4s@9?%J@03dO^8g^(Z=)Bxs46vN0MGv^fv!&%(qK{no%Eo=qyU*QU;%YX
zK*0fN?14)J=x#X3vY0)HqM#l;hy_aRh*}eLDh>FQCeWe}@a{X%sa3rb8U!2#1gaId
z5XVV^&IW=WnaKnl9Q?qO<+uQ(lu@7>bie_mVuc*5$pl?&1D+ppTmn*6&n$rJTunU3
zYC@7Z>NEyu&nzfJ!F?uZhXsE6+lBS8nt45V^c5WHpspb3=1QDC2jwF0iF8oEL)VTl
zpa%i8@&+XlPh-iG!URmfWQOQplv)%
z3<}(!#)pgoc#Wlm0yk((TSS2ywk`!U*~thhJQU2C!45|{B^7c;D!7rc02DQ#GzqEd
zp#{Z>9g$2~0+2*`W%nOf+8hA&5{w%u*CkkaTPS_t(M8IHw0XuuFjU+i}!OI(9(BK?9pOmT-d3
zP6Imwv5_x(vH^!1vlAdrufNpx(ix#8l7X0KSywQ$Lfd$rV0@VNlplS}Z*d2AF5tFo{1bCnmyp_?B0o2juas=B()3GXYDhPrba~ulnj`a#Wpt>8(!Y-OD4eUrI|o?!DT55LN=fAgSOd$oDJ#>g7z-3fDi2eEvZD(0p7a6qre0@
zz>KpVw1*gcI8GL5w;ia}1v;VwG^Pd`&jhCd%6-m(#pg^6iad@C^@=PCID8IT)(KuG
z4qY+L51Nx;04>)Cx4}V9h5DOCfgiN#jt#Ww4(??ZkSeGi6fdLrxE`_*i-!?B7Rvxy
zxBwl?ho?2rxTFF+nL@9G+yKfB0?2K7&{_@H{%2_J0-ZkzO*LoGHvE7#sDT!KfQEiF
zn69jcZm|P3L75a-+;~|bTj_*AO=8IX>994opr`@u#lmddgHO=L;wH#&0VoYa7bzeu
zLatwR6?$BhG-%m}0#k`1C;@?6$wJ^n>R1oj-vmBv34D4K$cdnBQ|yqmiQKqnRRA~c
zAwv#mEoHDzp!+2tt$Of*F!hS;;0XcH_y~?5#piq^*K;VafEKZ_DuA8MhU{!+Pz#G0
zw9|=Afjvus8FAK)Qavbf@v?xt$fCgR$;*P={uKhX?x6_;9PIE^Ak51Os^LIW)S%n}
zG9T3Xg~TJMw+5QAhaSI$hz?biV7OjEDm%!V16j-Gd*s~Ow
zc(_KOgrc%M$jcYiY$;jW^tvB
zdLl!Y6Vme|K5c;3C?oYUm>|6j@a7>;@UdK=9s~HqG|={L(Dm(%+2EuH>Pvx>1|nsE
z=lDV44Bj>b3TV(|7sRWez}~?m4GJt)&}lr7Q;9(%_243X7WDFFQ0fPpfL4OnPXKR|
zCbkG?#95HxF2_J2!4L0mK?-otIzjM;qIyQu5*)Po3>3)!A)C+Or5Q6^2P1gbsv@ia
zM=8yqX%$|WNh%1NF?BF;Ll!TCny#=dx3CRq(`9!tae$9%!xlOdc$FmJt3;V0)fxD9
z!wz0WK}bdeRaKzk6;xY+%LmY|08o2@H%oz7Bhv_)$?TvYrN9bW7zbH<$q5=#0B_;~
zP2qx9iNf*(XfK}`Qv>MaH_$pu9`N!|Aq4>iw&^#XGs)HCPzSaFY&dvJAo4aC(0B&8
z$O4@V0SaOzb_Fib5>0T-f?UE1T4D<7O@m_>wB{Z(vj|S?3ZO$PSV5~lnZeu0c=#b}
z^_f8lW_sfbCb1BZk&3Wd7witGPeE%dIYFxgAQNODuR@l9V)KJFBLigZC&aI>Uohz+
zDmO%tfgC*6j0|AwAp3)u&^B=4^ft&@;EI_Q+!g~7pe3LD;5{Xv;a5lnpuhyK+(BzS
zAzp_lPyqW}kpouj$8+V2d#ouQ1U=lP{INZwCWF>NLWGhH4F+;kR^nm{uYaZ0Hl=$8fyb}DM8^04k0Db
z@ET}Y27HkvXjL%|BS8UMk7O!xf&v}F2)ct+N`YO01GJD6v`dT=w1!cMAJl18S`I!e
z4xE_4+qGcMV^LrSDF?M|kR1tM_sOil0xmk0*g==FFh)TZe)21T4FTDu$N>(1HbwBw
zr=W3fMo`HKTI!^#2X>W2#O%kPzb8z^w2j;auVbOhW_5m*Y^r|tM*2UC^`g8~yjw`2Q;CILrr0X+up7lup<^-Oij42~1H
zvXwZPKnffic28%0%T&rZcY5_(rcI1rryuysBr@IU9g`FP)Sd7I>6Ravf<-s7fVM;b2d!gc
z03`*8!i6816d0FHKlG7Flc{0%^tT_GiW%2WPyEE>!nkqzvQJFTj2oxF`o!d}Idco>
zZk!XG3ScV~m>f@VW(gph36T)^I(^|sCXwlRpP8f>S4{8x%#GpO60zc8gTexAPa3zGuliRq`lFauJ(&5o$=K4zF$nb!tY=Ki{yuMznBzQSAjY^)8GGM%4PgEJ@z+~
z7US3H9lx238NW^61K~aY&E(AZZMya!uw2+5ru8BlS-^M6fT9UBk`IX`$-hj>jNhi)
z{spVc_{-#itd0R5^auVjImoPF0rg8Ym>O7>m_bWfTUeD?vXwxG3bA@lm-)w}&v2CR>EBh5s`-FgAdeWcXA^W;@2OQyZAI85^dqVU}S0I`t^Ci}bl;tpbjx
z7z7eQ3Hb#pNLXc&kn`H#%
zA?XJRIyV3B0Nv^{Z~7y4W;Mn+(>Xbq5#0
zV76skFkOX{S%-1X^gvE#4aSAjt2mhz80So%&B<)VxM2DdJ)aA9$Fh85d7q$IC3qbYsGFbzWvursnC><9L}REO2Y=n%*Vg
zs4HLqOD1fPNicTD2~1f6P}6ozpMH{;S&8ZL^yzPTnJpL>OjqLrJKCL(*@bb=^d1mz
z(e$-^%r;ET|EIs^V^(FHGhL9MxtMX*^j3bb(q;V2T8s;)U*u<2WSleoGe5Hv(~bGt
z%>|g57{v}v0Ift2Fi-?02&@v}76C0P
zV+7rp=?E2EEyAtC$XKt;;3$wi-B5zrp#B0k6L&o$sPS`wAzKO5_F>Rq5&%u1ZeUVm
zRA3eW-6jlL-yzTho)84hQl^_H58Z}H9&W!f)6ogaD?1GEFcHEzsa$O
z5oQSJa?%z?H2pOo?JVF^{S_EMm!^SEI)~_7f}(Q{W0n#lcu1WAbYHP1(-cOKR?y{+
zpc!EX#|7XkE(PQq859{oU2bLvrg~6Qb_X-~j$cRceN20pc^RcZeI*7Braj>0)GUrH
z*&0j?+@Rf|jwLyuDI`bc5=Ca{l4CYE(3rOZOO_4;gCm0s{*ql2WbCi
z6zGB{MMlu7Cj}Nqo@^yhX9cpK6*MjeavC#aLR*2^5xhl5z(j#HTY<@uBTHZflLE4K
zP_st?94p`uojzM!L5j^$BqvMY!1PEdW_6e*(0I!hCeYdEpd{tEf+-6sGkt*+vq(ME
zXppKY%y3m5pbQAH6eP0*Au|J2<_p5)7jToAKzdpbdO+=dBzv|XWL5|;XMtC?f>Ia6
zAI{RuDxf>d9Yw%L!-Dq@Nr0>7z3vQB}PXLkfR+Lv-y}A7#YEdmIoBo
zphO1h6@b>lD1fF+1xyqe9aTWKfySL=vXodrGd2vMBPt-L5p#prFM;oUs0Ur52indC
zx}z@(9Dj`2(4)2yK@PsMT10`tQ3hnbBNKR|83S}nH0XXSMo36Qwws**-@Lp9obJHW
zp`Z-0hS3~!z_+6YC{i4ARwtaW5CQQ{~Dx95m^^
zb21&CI(;^h*d29dPWcB@I|UqBQB8h>ZnC@%GbhvYsnZ3O;U;o-fCk|^Sh5`3rcL)j
zh)>s5W#(jCJgrl}k#oAE60^wk`O3_kOlzl2U!~0K%h)#khcdGPQ
zruQo|OLF~O-vw$Srz^2cKd8$rJiS4M*_Y|*jOpi9m=)Px&u9>EWC5GT54ZLMNKk%y
zpboPvy(-0rY9IPvrM0<%52Q|XZl%H<`TBq
ztGWandBFyPqm*qWNI(c80E$nUJs<&;$P-?!zziz*1!jO_kNX7ZqLdTNS&mm$O+To{
zEW-#9pAL#u#<$adt1(N$gg{Zq)V6xMy*9HV+oaW90*=BUt7N5@f!HXvfR5N&0d6RP
z(;GP2_N|`2L7iCwW*R8kq_2Z?iX%rGsD4D!368Q4U^^wa;DPpMx`PI@5>v~X=>
zicD>5rnhS_Pi5*{GhI%TS&?bln(2<3%!+KQ*K`Ruih;sbbb75OGl$F}kbsl`Qsw{$
z#|D^7Wf<>G->1ne3v=UpO=eE%-)p)+Cz~K?0w-;l9%1ekpdehqoaH!U?Q}yeW<|CY
zYr6y-#X(^s0*)8y10Vqud%<}KZp3s@WHH{EzE%q!+A8pf`3cf2Er4V{y8^Q#+|9z^
zsA8J74rI5)bZ{gsTL+1xP3yV@97RD6k(E9HVq-)Sk}=>&daw=@Ng~rPXfw0GLIE68
zKSA0tVhTw+IHo482U#qE$W`;!Pj}Q{R%BYX9uie6)=!_T!|cJddj0gfps3rmewr?`
z8PnPI(=BwFEtwudmA`~4f4zRXoiVc#X#Ju?w&ThrT>_5G(`${HrKU6LF*`FI|1jN6
zk6BUf!P^#4sma9c$gRlX*m|`b33vrvN|5Q3Fa|Dcn?m2d5jR1?_W%}&}UxBwr_ryfTJ8JNFi~{
zeitNgngJpLibtlE3#Mxsz)KhnctqY`&?Vrgf*g^|ju)7+6qvz%3TQD5%6Cky3#TtI
zV3uHnmaw2Wm7c$_3pDnKlng)zOM!YLP*vb^bN9mOe1-^}dFsrZ(oaD;<&o{>h1v<#
z36A-;Mbm4Lbb=#&*`h81M-611+^9N1kuH4!q)HE26)1xsT!u&u4U4BM8X;TE_2*@izFGXVnEy897T~NRvFmw7_
zW9C@Z-5iSGlOsT#v>B|hJ~pT~#R%QT&g8gqdcFy>s0=n~(Dg7JS&nO`FEC*iwVT1J
z$N=i#p&AD2i9ig4sh-pT8qrCI*#_DP#3(Rh`ezg7T*kH26HS=|>;G(Q6HsJwWZ@Cy
zcD($&8N_Dg5#V;*b-fA12A!wlxOHzkh|R{s$L+ZMd@G2}%EQa;IQJk_FAulltp`vw
z+}w@}zJl4TJY3w4uMUIRoIIS|j&JWm*&N)C?+<|4{+vAQ+>S5SfjOL@X}XQGplnud
z#|d}A7I1=c*6RCUaW2sG*6)*GM{w{kaXT(J3U)LH4NV$0BX)`G-Fn_p4HzW;HWN;4jp~w1+`oS92p(oET1f(z$mbX10eyPJYX!<74FpKl=1x?U1STjZ_Feoqy?3*59!#oo-LfQao)HNu0ZvSD!%*DdEdpoluvn(SI
z7X#>E4bTt>_xF0)TE|R|MVN#h}5&
zq6EG>9XMRiaBrA1I{6Wr_M0O>q1UB+8ZB2z(uS%C@cKhQa8G78L~o&tk_8))6I
zg(4G(GJ&)$H9)--4JHGSkPc`D$*@2IneA*N6qp@vfch+;UMYhkN0t&O@q*n3x~h{I($4|)WEenuvLwuz
z7(i!pu!2g92h-(Um^G$*x-iR&!p~-B^vqIXaAZ|raAftKUhTqsN8$je+W`(je(2bR
zBa`luFRHYzWu5^VC-rw@)?++Az0Qr`Y_b!UDBp`xZA
z@?h>`JT^VhlX*SkvFVIn%&v?lrn`GFPvbt#%*Y8E0C~WWJ^hU$n}IAmf><5-;1LA6
z9S0Oa)A_%%iHl;y2VaiBf$0w4*(4$q5b?oc#srU#V~n6cghvRt&94DEa};ZYKvbi|
z$n>33a?*@!(-Tctgt-nv51Cb9nV##+EHmBOhnZ{oi|=fF_0auOpi7h#L5oh9z$-``
zL6;JM`#i7|g(L1j{=gS^Ea13f01fN0a0`Q@#PPtX2?CCL&P)~%I3zOtzYnvH7(7Cm
z{6VvZpvBls0!ODi`Z8xR9b=pxSjT8ReU5Xh{_cx-xoD6`b`%4TNn?e;;;_RNgOw%3O;$1^c*n|?o>
z*^Ke~cCiTNBaDnYr{9ibUMqo^*h5M~%mUlDFNk8UW@OyCT_%Rvmr>#}XhS8l0t?#c
z%H`>evCNYgFHRSUWA11Cy?sd>^KwT13#TW5&T0dvZO0R*r$;3)PiFi%{apgHBjc~>
z28qlPj6b$}CNeWKGX9+Io5ZZk_-lG~5_2}+ul*B12N;8vfv`IMnEoY+c^9%6qvMb1
zdy<(?GG3kDmcnep+R4PoQa}B`aaL&)&=m@x4Ja(2$u`j1640V<&A6UEe$Fb6_^}h{bHzrj$q?Jqah%h7!+9?>lGP6S)Bt^jY@D>Gjb>}
zfY!T$ZjWaK1sc1fK$gn(9VyH+85P0na9JH6Fk~rl3e-UEYRzzD1TCy^e8G_AIDL9i
z8go7;T-o#wnam>7|D`c=FixM&p3dB_36;{wQsMy{z-h*0pup;Akmb1YQj35ipFlNe
z4>_p%*q_cUp}Gqs2w7GQny;NApa9pFp}-2ZbKj-uf6|$gnf~mWo|wU`Cy4Cj$lD>aQen9<}{GulG)5Ej0dONWiuzpAuGMu`jL1Qt+*TMs!uj0Kdy
zVV(h7X94mH#JeCq$W8|Z&>$xB1SSYAp}^wU0P(+)usL%FqXLV9usL&p0*fOopqMoj
zKs(VLH$d!#*t!Cu2QDX=)sm_DV1SsfJPFi$Z%o?!#YYygjqf`W(H@d#LC
z1tVy`Iq0-cX2(5Xkp)O1TRbbysqI=ju7T;h1jaou1ZUKpCG6z7+oB-iDK#ZIL;(?MF+{if~5lvu(%!ndNxk>h!tg%!VRIU@~(+GD|qK9M4>x{-~UJ8)MV-
znH9_m#us1;dO(Iw;mC5l0kIzvzR;4z9DMwR;{u4CZz`CT_?{puyE9#;lG(%I1G3l)
zn8{$Pzrc7WKn}UUndSHgCISlD8=P5=E!U!
zuZ1hiaRIX8B{1nJpsw~Dt}Mqj$kHpgvK)6H3v7U?U%;gRKHOsmmjd(jv?^u+-y_KC
z_Q2Gw0hxOO#ybMyo#D!Iynw9u3QYO}Ncsj>mgAl6hpU(;FbY3_$vj{MRbZ@HjxVOC
z)iApWe1YhPL^-SDo9TOMn3V#4z?8SJE3i8DuxB|oT!)7Yi-Hie*kS_Zi3_zZZjWYi0A27hgGq@4+-TBZnlN3jmRVd9T(opBW`P<&Z&*R(7jQfO!1Ux=
zW>wywzuG}PZg)`DXPUlXl7y%|xM%}iBF+MyNI%91n%qc%_K;XKmfC1z