Merge pull request #13966 from r00ster91/stage1things

compiler_rt: re-enable tests and remove old workarounds
This commit is contained in:
Andrew Kelley
2022-12-16 06:11:16 -05:00
committed by GitHub
3 changed files with 5 additions and 30 deletions

View File

@@ -3,24 +3,11 @@ const clz = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__clzsi2(a: u32, expected: i32) !void {
// stage1 and stage2 diverge on function pointer semantics
switch (builtin.zig_backend) {
.stage1 => {
// Use of `var` here is working around a stage1 bug.
var nakedClzsi2 = clz.__clzsi2;
var actualClzsi2 = @ptrCast(fn (a: i32) callconv(.C) i32, nakedClzsi2);
var x = @bitCast(i32, a);
var result = actualClzsi2(x);
try testing.expectEqual(expected, result);
},
else => {
const nakedClzsi2 = clz.__clzsi2;
const actualClzsi2 = @ptrCast(*const fn (a: i32) callconv(.C) i32, &nakedClzsi2);
const x = @bitCast(i32, a);
const result = actualClzsi2(x);
try testing.expectEqual(expected, result);
},
}
const nakedClzsi2 = clz.__clzsi2;
const actualClzsi2 = @ptrCast(*const fn (a: i32) callconv(.C) i32, &nakedClzsi2);
const x = @bitCast(i32, a);
const result = actualClzsi2(x);
try testing.expectEqual(expected, result);
}
test "clzsi2" {

View File

@@ -1,5 +1,4 @@
const std = @import("std");
const builtin = @import("builtin");
const testing = std.testing;
const math = std.math;
@@ -811,8 +810,6 @@ test "conversion to f32" {
}
test "conversion to f80" {
if (builtin.zig_backend == .stage1 and builtin.cpu.arch != .x86_64)
return error.SkipZigTest; // https://github.com/ziglang/zig/issues/11408
if (std.debug.runtime_safety) return error.SkipZigTest;
const intToFloat = @import("./int_to_float.zig").intToFloat;

View File

@@ -65,15 +65,6 @@ pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
}
pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
switch (builtin.zig_backend) {
.stage1, .stage2_llvm => {
// Workaround for https://github.com/llvm/llvm-project/issues/56403
// When we call the genericSmall implementation instead, LLVM optimizer
// optimizes __muloti4 to a call to itself.
return muloXi4_genericFast(i128, a, b, overflow);
},
else => {},
}
if (2 * @bitSizeOf(i128) <= @bitSizeOf(usize)) {
return muloXi4_genericFast(i128, a, b, overflow);
} else {