From 8d30fc45c424ed1aaf9067436a64b0744619c250 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Fri, 5 Jul 2024 03:49:09 -0700 Subject: [PATCH] riscv: implement more operators we can run `std.debug.print` now, with both run-time strings and integers! --- lib/compiler/test_runner.zig | 16 +-- lib/std/builtin.zig | 9 +- src/Package/Module.zig | 2 +- src/arch/riscv64/CodeGen.zig | 206 ++++++++++++++++---------------- src/target.zig | 3 +- test/behavior/align.zig | 1 - test/behavior/basic.zig | 2 - test/behavior/call.zig | 1 - test/behavior/cast.zig | 2 - test/behavior/destructure.zig | 2 - test/behavior/enum.zig | 1 - test/behavior/fn.zig | 1 - test/behavior/for.zig | 2 - test/behavior/if.zig | 3 - test/behavior/math.zig | 9 +- test/behavior/null.zig | 1 - test/behavior/optional.zig | 3 - test/behavior/packed-struct.zig | 1 - test/behavior/struct.zig | 1 - test/behavior/switch.zig | 1 - test/behavior/threadlocal.zig | 2 - test/behavior/while.zig | 1 - 22 files changed, 124 insertions(+), 146 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 25f029a183..cf8d8df3f0 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -271,6 +271,7 @@ pub fn mainSimple() anyerror!void { }; // is the backend capable of using std.fmt.format to print a summary at the end? const print_summary = switch (builtin.zig_backend) { + .stage2_riscv64 => true, else => false, }; @@ -282,11 +283,13 @@ pub fn mainSimple() anyerror!void { const stderr = if (comptime enable_print) std.io.getStdErr() else {}; for (builtin.test_functions) |test_fn| { - if (enable_print) { - stderr.writeAll(test_fn.name) catch {}; - stderr.writeAll("... ") catch {}; - } - test_fn.func() catch |err| { + if (test_fn.func()) |_| { + if (enable_print) { + stderr.writeAll(test_fn.name) catch {}; + stderr.writeAll("... ") catch {}; + stderr.writeAll("PASS\n") catch {}; + } + } else |err| if (enable_print) { if (enable_print) { stderr.writeAll(test_fn.name) catch {}; stderr.writeAll("... ") catch {}; @@ -300,8 +303,7 @@ pub fn mainSimple() anyerror!void { if (enable_print) stderr.writeAll("SKIP\n") catch {}; skipped += 1; continue; - }; - if (enable_print) stderr.writeAll("PASS\n") catch {}; + } passed += 1; } if (enable_print and print_summary) { diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 0ef5cffd24..a600f055df 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -775,14 +775,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr } if (builtin.zig_backend == .stage2_riscv64) { - asm volatile ("ecall" - : - : [number] "{a7}" (64), - [arg1] "{a0}" (1), - [arg2] "{a1}" (@intFromPtr(msg.ptr)), - [arg3] "{a2}" (msg.len), - : "memory" - ); + std.debug.print("panic: {s}\n", .{msg}); std.posix.exit(127); } diff --git a/src/Package/Module.zig b/src/Package/Module.zig index 02d9921016..371b8e9816 100644 --- a/src/Package/Module.zig +++ b/src/Package/Module.zig @@ -159,7 +159,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module { if (options.inherited.single_threaded) |x| break :b x; if (options.parent) |p| break :b p.single_threaded; - break :b target_util.defaultSingleThreaded(target); + break :b target_util.defaultSingleThreaded(target, zig_backend); }; const error_tracing = b: { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 13713ccb84..ad5bec87c6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -51,7 +51,6 @@ const InnerError = CodeGenError || error{OutOfRegisters}; pt: Zcu.PerThread, air: Air, liveness: Liveness, -zcu: *Zcu, bin_file: *link.File, gpa: Allocator, @@ -264,13 +263,13 @@ const MCValue = union(enum) { .register_pair, .memory, .indirect, - .load_frame, .load_symbol, .lea_symbol, => switch (off) { 0 => mcv, - else => unreachable, // not offsettable + else => unreachable, }, + .load_frame => |frame| .{ .load_frame = .{ .index = frame.index, .off = frame.off + off } }, .immediate => |imm| .{ .immediate = @bitCast(@as(i64, @bitCast(imm)) +% off) }, .register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } }, .register_offset => |reg_off| .{ .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off } }, @@ -737,7 +736,6 @@ pub fn generate( .air = air, .pt = pt, .mod = mod, - .zcu = zcu, .bin_file = bin_file, .liveness = liveness, .target = target, @@ -946,7 +944,7 @@ fn formatDecl( } fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) { return .{ .data = .{ - .zcu = func.zcu, + .zcu = func.pt.zcu, .decl_index = decl_index, } }; } @@ -1325,6 +1323,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .mul, .mul_wrap, .div_trunc, + .rem, .shl, .shl_exact, .shr, .shr_exact, @@ -1344,7 +1343,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .ptr_add, .ptr_sub => try func.airPtrArithmetic(inst, tag), - .rem, .mod, .div_float, .div_floor, @@ -2151,11 +2149,16 @@ fn airTrunc(func: *Func, inst: Air.Inst.Index) !void { const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; if (func.liveness.isUnused(inst)) return func.finishAir(inst, .unreach, .{ ty_op.operand, .none, .none }); - + // we assume no zeroext in the "Zig ABI", so it's fine to just not truncate it. const operand = try func.resolveInst(ty_op.operand); - _ = operand; - return func.fail("TODO implement trunc for {}", .{func.target.cpu.arch}); - // return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + + // we can do it just to be safe, but this shouldn't be needed for no-runtime safety modes + switch (operand) { + .register => |reg| try func.truncateRegister(func.typeOf(ty_op.operand), reg), + else => {}, + } + + return func.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); } fn airIntFromBool(func: *Func, inst: Air.Inst.Index) !void { @@ -2305,10 +2308,7 @@ fn binOp( 80, 128 => true, else => unreachable, }; - switch (air_tag) { - .rem, .mod => {}, - else => if (!type_needs_libcall) break :libcall, - } + if (!type_needs_libcall) break :libcall; return func.fail("binOp libcall runtime-float ops", .{}); } @@ -2384,6 +2384,7 @@ fn genBinOp( .sub_wrap, .mul, .mul_wrap, + .rem, => { if (!math.isPowerOfTwo(bit_size)) return func.fail( @@ -2391,6 +2392,15 @@ fn genBinOp( .{ @tagName(tag), bit_size }, ); + switch (tag) { + .rem, + => { + try func.truncateRegister(lhs_ty, lhs_reg); + try func.truncateRegister(rhs_ty, rhs_reg); + }, + else => {}, + } + switch (lhs_ty.zigTypeTag(zcu)) { .Int => { const mir_tag: Mir.Inst.Tag = switch (tag) { @@ -2409,6 +2419,10 @@ fn genBinOp( 32 => .mulw, else => unreachable, }, + .rem => switch (bit_size) { + 64 => if (is_unsigned) .remu else .rem, + else => if (is_unsigned) .remuw else .remu, + }, else => unreachable, }; @@ -2423,14 +2437,6 @@ fn genBinOp( }, }, }); - - // truncate when the instruction is larger than the bit size. - switch (bit_size) { - 8, 16 => try func.truncateRegister(lhs_ty, dst_reg), - 32 => {}, // addw/subw affects the first 32-bits - 64 => {}, // add/sub affects the entire register - else => unreachable, - } }, .Float => { const mir_tag: Mir.Inst.Tag = switch (tag) { @@ -2627,23 +2633,17 @@ fn genBinOp( .shl, .shl_exact, => { - if (!math.isPowerOfTwo(bit_size)) - return func.fail( - "TODO: genBinOp {s} non-pow 2, found {}", - .{ @tagName(tag), bit_size }, - ); - - // it's important that the shift amount is exact + if (bit_size > 64) return func.fail("TODO: genBinOp shift > 64 bits, {}", .{bit_size}); try func.truncateRegister(rhs_ty, rhs_reg); const mir_tag: Mir.Inst.Tag = switch (tag) { .shl, .shl_exact => switch (bit_size) { - 8, 16, 64 => .sll, + 1...31, 33...64 => .sll, 32 => .sllw, else => unreachable, }, .shr, .shr_exact => switch (bit_size) { - 8, 16, 64 => .srl, + 1...31, 33...64 => .srl, 32 => .srlw, else => unreachable, }, @@ -2659,13 +2659,6 @@ fn genBinOp( .rs2 = rhs_reg, } }, }); - - switch (bit_size) { - 8, 16 => try func.truncateRegister(lhs_ty, dst_reg), - 32 => {}, - 64 => {}, - else => unreachable, - } }, // TODO: move the isel logic out of lower and into here. @@ -2810,10 +2803,6 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { const add_result = try func.binOp(null, .add, extra.lhs, extra.rhs); - const add_result_reg = try func.copyToTmpRegister(ty, add_result); - const add_result_reg_lock = func.register_manager.lockRegAssumeUnused(add_result_reg); - defer func.register_manager.unlockReg(add_result_reg_lock); - try func.genSetMem( .{ .frame = offset.index }, offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))), @@ -2821,14 +2810,21 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void { add_result, ); + const trunc_reg = try func.copyToTmpRegister(ty, add_result); + const trunc_reg_lock = func.register_manager.lockRegAssumeUnused(trunc_reg); + defer func.register_manager.unlockReg(trunc_reg_lock); + const overflow_reg, const overflow_lock = try func.allocReg(.int); defer func.register_manager.unlockReg(overflow_lock); + // if the result isn't equal after truncating it to the given type, + // an overflow must have happened. + try func.truncateRegister(func.typeOf(extra.lhs), trunc_reg); try func.genBinOp( .cmp_neq, - .{ .register = add_result_reg }, + add_result, ty, - .{ .register = add_result_reg }, + .{ .register = trunc_reg }, ty, overflow_reg, ); @@ -3022,61 +3018,34 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void { switch (lhs_ty.zigTypeTag(zcu)) { else => |x| return func.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}), .Int => { - assert(lhs_ty.eql(rhs_ty, zcu)); - const int_info = lhs_ty.intInfo(zcu); - switch (int_info.bits) { - 1...32 => { - if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { - if (int_info.signedness == .unsigned) { - switch (int_info.bits) { - 1...8 => { - const max_val = std.math.pow(u16, 2, int_info.bits) - 1; + if (std.debug.runtime_safety) assert(lhs_ty.eql(rhs_ty, zcu)); - const add_reg, const add_lock = try func.promoteReg(lhs_ty, lhs); - defer if (add_lock) |lock| func.register_manager.unlockReg(lock); + const trunc_reg = try func.copyToTmpRegister(lhs_ty, .{ .register = dest_reg }); + const trunc_reg_lock = func.register_manager.lockRegAssumeUnused(trunc_reg); + defer func.register_manager.unlockReg(trunc_reg_lock); - const overflow_reg, const overflow_lock = try func.allocReg(.int); - defer func.register_manager.unlockReg(overflow_lock); + const overflow_reg, const overflow_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(overflow_lock); - _ = try func.addInst(.{ - .tag = .andi, - .ops = .rri, - .data = .{ .i_type = .{ - .rd = overflow_reg, - .rs1 = add_reg, - .imm12 = Immediate.s(max_val), - } }, - }); + // if the result isn't equal after truncating it to the given type, + // an overflow must have happened. + try func.truncateRegister(func.typeOf(extra.lhs), trunc_reg); + try func.genBinOp( + .cmp_neq, + .{ .register = dest_reg }, + lhs_ty, + .{ .register = trunc_reg }, + rhs_ty, + overflow_reg, + ); - try func.genBinOp( - .cmp_neq, - .{ .register = overflow_reg }, - lhs_ty, - .{ .register = add_reg }, - lhs_ty, - overflow_reg, - ); + try func.genCopy( + lhs_ty, + result_mcv.offset(overflow_off), + .{ .register = overflow_reg }, + ); - try func.genCopy( - lhs_ty, - result_mcv.offset(overflow_off), - .{ .register = overflow_reg }, - ); - - break :result result_mcv; - }, - - else => return func.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}), - } - } else { - return func.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{}); - } - } else { - return func.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{}); - } - }, - else => return func.fail("TODO: airMulWithOverflow larger than 32-bit mul", .{}), - } + break :result result_mcv; }, } }; @@ -3317,7 +3286,17 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void { Type.u8, .{ .immediate = 1 }, ), - .register => return func.fail("TODO: airWrapOption opt_mcv register", .{}), + + .register => |opt_reg| { + try func.genBinOp( + .shl, + .{ .immediate = 1 }, + Type.u64, + .{ .immediate = 32 }, + Type.u64, + opt_reg, + ); + }, else => unreachable, } } @@ -4059,7 +4038,7 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void { const elem_size = elem_ty.abiSize(pt); const dst_mcv: MCValue = blk: { - // Pointer is 8 bytes, and if the element is more than that, we cannot reuse it. + // "ptr" is 8 bytes, and if the element is more than that, we cannot reuse it. if (elem_size <= 8 and func.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; @@ -4970,7 +4949,7 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .lea_symbol, .reserved_frame, .air_ref, - => return func.fail("TODO: hmm {}", .{opt_mcv}), + => unreachable, .register => |opt_reg| { if (some_info.off == 0) { @@ -4993,9 +4972,27 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC return return_mcv; } assert(some_info.ty.ip_index == .bool_type); - const opt_abi_size: u32 = @intCast(opt_ty.abiSize(pt)); - _ = opt_abi_size; - return func.fail("TODO: isNull some_info.off != 0 register", .{}); + const bit_offset: u7 = @intCast(some_info.off * 8); + + try func.genBinOp( + .shr, + .{ .register = opt_reg }, + Type.u64, + .{ .immediate = bit_offset }, + Type.u8, + return_reg, + ); + try func.truncateRegister(Type.u8, return_reg); + try func.genBinOp( + .cmp_eq, + .{ .register = return_reg }, + Type.u64, + .{ .immediate = 0 }, + Type.u8, + return_reg, + ); + + return return_mcv; }, .load_frame => { @@ -6556,7 +6553,8 @@ fn airAtomicRmw(func: *Func, inst: Air.Inst.Index) !void { } fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void { - const zcu = func.pt.zcu; + const pt = func.pt; + const zcu = pt.zcu; const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; const order: std.builtin.AtomicOrder = atomic_load.order; @@ -6564,6 +6562,9 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void { const elem_ty = ptr_ty.childType(zcu); const ptr_mcv = try func.resolveInst(atomic_load.ptr); + const bit_size = elem_ty.bitSize(pt); + if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{}); + const result_mcv = try func.allocRegOrMem(elem_ty, inst, true); assert(result_mcv == .register); // should be less than 8 bytes @@ -6616,6 +6617,9 @@ fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOr const val_ty = func.typeOf(bin_op.rhs); const val_mcv = try func.resolveInst(bin_op.rhs); + const bit_size = val_ty.bitSize(func.pt); + if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{}); + switch (order) { .unordered, .monotonic => {}, .release, .seq_cst => { diff --git a/src/target.zig b/src/target.zig index 6ff9e69e61..9b7e12408e 100644 --- a/src/target.zig +++ b/src/target.zig @@ -60,9 +60,10 @@ pub fn alwaysSingleThreaded(target: std.Target) bool { return false; } -pub fn defaultSingleThreaded(target: std.Target) bool { +pub fn defaultSingleThreaded(target: std.Target, backend: std.builtin.CompilerBackend) bool { switch (target.cpu.arch) { .wasm32, .wasm64 => return true, + .riscv64 => if (backend == .stage2_riscv64) return true, else => {}, } switch (target.os.tag) { diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 0b588ce091..33e43740f6 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -16,7 +16,6 @@ test "global variable alignment" { } test "large alignment of local constant" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 05d6549683..494240c63a 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -16,8 +16,6 @@ test "empty function with comments" { } test "truncate" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - try expect(testTruncate(0x10fd) == 0xfd); comptime assert(testTruncate(0x10fd) == 0xfd); } diff --git a/test/behavior/call.zig b/test/behavior/call.zig index 8636955215..b89ffbaef4 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -441,7 +441,6 @@ test "non-anytype generic parameters provide result type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn f(comptime T: type, y: T) !void { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 6cc881b64d..816fa02f5b 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1845,7 +1845,6 @@ test "peer type resolution: three-way resolution combines error set and optional if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const E = error{Foo}; var a: E = error.Foo; @@ -1960,7 +1959,6 @@ test "peer type resolution: vector and tuple" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var vec: @Vector(3, i32) = .{ 1, 2, 3 }; _ = &vec; diff --git a/test/behavior/destructure.zig b/test/behavior/destructure.zig index 3164d25187..43ddbb7a4d 100644 --- a/test/behavior/destructure.zig +++ b/test/behavior/destructure.zig @@ -23,8 +23,6 @@ test "simple destructure" { } test "destructure with comptime syntax" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { { diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 7972135bfa..982adc234d 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -1076,7 +1076,6 @@ test "enum literal casting to optional" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var bar: ?Bar = undefined; bar = .B; diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 73ef9bdbfe..c0cb29e33a 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -181,7 +181,6 @@ test "function with complex callconv and return type expressions" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try expect(fComplexCallconvRet(3).x == 9); } diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 4f873bbbe4..bc433c578a 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -112,7 +112,6 @@ test "for with null and T peer types and inferred result location type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(slice: []const u8) !void { @@ -228,7 +227,6 @@ test "else continue outer for" { test "for loop with else branch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; { var x = [_]u32{ 1, 2 }; diff --git a/test/behavior/if.zig b/test/behavior/if.zig index a82d9a5c61..2da6e84daf 100644 --- a/test/behavior/if.zig +++ b/test/behavior/if.zig @@ -82,7 +82,6 @@ test "const result loc, runtime if cond, else unreachable" { test "if copies its payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -147,8 +146,6 @@ test "if-else expression with runtime condition result location is inferred opti } test "result location with inferred type ends up being pointer to comptime_int" { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - var a: ?u32 = 1234; var b: u32 = 2000; _ = .{ &a, &b }; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index cd110bc80d..ec326acc5a 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -689,6 +689,8 @@ fn testSignedWrappingEval(x: i32) !void { } test "signed negation wrapping" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testSignedNegationWrappingEval(minInt(i16)); try comptime testSignedNegationWrappingEval(minInt(i16)); } @@ -699,6 +701,8 @@ fn testSignedNegationWrappingEval(x: i16) !void { } test "unsigned negation wrapping" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testUnsignedNegationWrappingEval(1); try comptime testUnsignedNegationWrappingEval(1); } @@ -725,7 +729,6 @@ fn negateWrap(comptime T: type, x: T) T { test "unsigned 64-bit division" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { // https://github.com/ziglang/zig/issues/16846 @@ -838,7 +841,6 @@ test "@addWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testAddWithOverflow(u8, 250, 100, 94, 1); try testAddWithOverflow(u8, 100, 150, 250, 0); @@ -927,7 +929,6 @@ fn testMulWithOverflow(comptime T: type, a: T, b: T, mul: T, bit: u1) !void { test "basic @mulWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMulWithOverflow(u8, 86, 3, 2, 1); try testMulWithOverflow(u8, 85, 3, 255, 0); @@ -1330,6 +1331,8 @@ test "quad hex float literal parsing accurate" { } test "truncating shift left" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try testShlTrunc(maxInt(u16)); try comptime testShlTrunc(maxInt(u16)); } diff --git a/test/behavior/null.zig b/test/behavior/null.zig index ebc390c36a..0d32f17388 100644 --- a/test/behavior/null.zig +++ b/test/behavior/null.zig @@ -188,7 +188,6 @@ test "unwrap optional which is field of global var" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; struct_with_optional.field = null; if (struct_with_optional.field) |payload| { diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 80156d1dd6..ae9cc5ee4d 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -134,7 +134,6 @@ test "nested optional field in struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S2 = struct { y: u8, @@ -287,7 +286,6 @@ test "nested orelse" { test "self-referential struct through a slice of optional" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Node = struct { @@ -566,7 +564,6 @@ test "Optional slice passed to function" { test "peer type resolution in nested if expressions" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Thing = struct { n: i32 }; var a = false; diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 60fcd5e9f6..a6712d04ed 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -1096,7 +1096,6 @@ test "packed struct used as part of anon decl name" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = packed struct { a: u0 = 0 }; var a: u8 = 0; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 97617a1fd1..689c725db3 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1573,7 +1573,6 @@ test "no dependency loop on optional field wrapped in generic function" { test "optional field init with tuple" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: ?struct { b: u32 }, diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 1275d0f433..4c0b4a88f4 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -516,7 +516,6 @@ test "switch with null and T peer types and inferred result location type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest(c: u8) !void { diff --git a/test/behavior/threadlocal.zig b/test/behavior/threadlocal.zig index 87daebda78..4418870149 100644 --- a/test/behavior/threadlocal.zig +++ b/test/behavior/threadlocal.zig @@ -6,7 +6,6 @@ test "thread local variable" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) { .x86_64, .x86 => {}, else => return error.SkipZigTest, @@ -47,7 +46,6 @@ test "reference a global threadlocal variable" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) { .x86_64, .x86 => {}, else => return error.SkipZigTest, diff --git a/test/behavior/while.zig b/test/behavior/while.zig index 71641ea265..fd288a9460 100644 --- a/test/behavior/while.zig +++ b/test/behavior/while.zig @@ -106,7 +106,6 @@ fn testBreakOuter() void { test "while copies its payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void {