Merge pull request #15458 from koachan/sparc64-codegen

stage2: sparc64: Yet another patchset for the selfhosted backend
This commit is contained in:
Andrew Kelley
2023-04-29 10:55:50 -07:00
committed by GitHub
14 changed files with 331 additions and 12 deletions

View File

@@ -22,6 +22,7 @@ const Type = @import("../../type.zig").Type;
const CodeGenError = codegen.CodeGenError;
const Result = @import("../../codegen.zig").Result;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const Endian = std.builtin.Endian;
const build_options = @import("build_options");
@@ -30,6 +31,7 @@ const abi = @import("abi.zig");
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
const errUnionErrorOffset = codegen.errUnionErrorOffset;
const Instruction = bits.Instruction;
const ASI = Instruction.ASI;
const ShiftWidth = Instruction.ShiftWidth;
const RegisterManager = abi.RegisterManager;
const RegisterLock = RegisterManager.RegisterLock;
@@ -141,6 +143,8 @@ const MCValue = union(enum) {
/// The value is one of the stack variables.
/// If the type is a pointer, it means the pointer address is in the stack at this offset.
/// Note that this stores the plain value (i.e without the effects of the stack bias).
/// Always convert this value into machine offsets with realStackOffset() before
/// lowering into asm!
stack_offset: u32,
/// The value is a pointer to one of the stack variables (payload is stack offset).
ptr_stack_offset: u32,
@@ -595,7 +599,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ret_load => try self.airRetLoad(inst),
.store => try self.airStore(inst, false),
.store_safe => try self.airStore(inst, true),
.struct_field_ptr=> @panic("TODO try self.airStructFieldPtr(inst)"),
.struct_field_ptr=> try self.airStructFieldPtr(inst),
.struct_field_val=> try self.airStructFieldVal(inst),
.array_to_slice => try self.airArrayToSlice(inst),
.int_to_float => try self.airIntToFloat(inst),
@@ -613,7 +617,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.clz => try self.airClz(inst),
.ctz => try self.airCtz(inst),
.popcount => try self.airPopcount(inst),
.byte_swap => @panic("TODO try self.airByteSwap(inst)"),
.byte_swap => try self.airByteSwap(inst),
.bit_reverse => try self.airBitReverse(inst),
.tag_name => try self.airTagName(inst),
.error_name => try self.airErrorName(inst),
@@ -663,8 +667,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
.ptr_slice_len_ptr => @panic("TODO try self.airPtrSliceLenPtr(inst)"),
.ptr_slice_ptr_ptr => @panic("TODO try self.airPtrSlicePtrPtr(inst)"),
.ptr_slice_len_ptr => try self.airPtrSliceLenPtr(inst),
.ptr_slice_ptr_ptr => try self.airPtrSlicePtrPtr(inst),
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
@@ -720,10 +724,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.error_set_has_value => @panic("TODO implement error_set_has_value"),
.vector_store_elem => @panic("TODO implement vector_store_elem"),
.c_va_arg => @panic("TODO implement c_va_arg"),
.c_va_copy => @panic("TODO implement c_va_copy"),
.c_va_end => @panic("TODO implement c_va_end"),
.c_va_start => @panic("TODO implement c_va_start"),
.c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
.c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
.c_va_end => return self.fail("TODO implement c_va_end", .{}),
.c_va_start => return self.fail("TODO implement c_va_start", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
@@ -1198,6 +1202,90 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
// We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you.
// That being said, the strategy to lower this is:
// - If src is an immediate, comptime-swap it.
// - If src is in memory then issue an LD*A with #ASI_P_[oppposite-endian]
// - If src is a register then issue an ST*A with #ASI_P_[oppposite-endian]
// to a stack slot, then follow with a normal load from said stack slot.
// This is because on some implementations, ASI-tagged memory operations are non-piplelinable
// and loads tend to have longer latency than stores, so the sequence will minimize stall.
// The result will always be either another immediate or stored in a register.
// TODO: Fold byteswap+store into a single ST*A and load+byteswap into a single LD*A.
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
switch (operand_ty.zigTypeTag()) {
.Vector => return self.fail("TODO byteswap for vectors", .{}),
.Int => {
const int_info = operand_ty.intInfo(self.target.*);
if (int_info.bits == 8) break :result operand;
const abi_size = int_info.bits >> 3;
const abi_align = operand_ty.abiAlignment(self.target.*);
const opposite_endian_asi = switch (self.target.cpu.arch.endian()) {
Endian.Big => ASI.asi_primary_little,
Endian.Little => ASI.asi_primary,
};
switch (operand) {
.immediate => |imm| {
const swapped = switch (int_info.bits) {
16 => @byteSwap(@intCast(u16, imm)),
24 => @byteSwap(@intCast(u24, imm)),
32 => @byteSwap(@intCast(u32, imm)),
40 => @byteSwap(@intCast(u40, imm)),
48 => @byteSwap(@intCast(u48, imm)),
56 => @byteSwap(@intCast(u56, imm)),
64 => @byteSwap(@intCast(u64, imm)),
else => return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}),
};
break :result .{ .immediate = swapped };
},
.register => |reg| {
if (int_info.bits > 64 or @popCount(int_info.bits) != 1)
return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{});
const off = try self.allocMem(inst, abi_size, abi_align);
const off_reg = try self.copyToTmpRegister(operand_ty, .{ .immediate = realStackOffset(off) });
try self.genStoreASI(reg, .sp, off_reg, abi_size, opposite_endian_asi);
try self.genLoad(reg, .sp, Register, off_reg, abi_size);
break :result .{ .register = reg };
},
.memory => {
if (int_info.bits > 64 or @popCount(int_info.bits) != 1)
return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{});
const addr_reg = try self.copyToTmpRegister(operand_ty, operand);
const dst_reg = try self.register_manager.allocReg(null, gp);
try self.genLoadASI(dst_reg, addr_reg, .g0, abi_size, opposite_endian_asi);
break :result .{ .register = dst_reg };
},
.stack_offset => |off| {
if (int_info.bits > 64 or @popCount(int_info.bits) != 1)
return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{});
const off_reg = try self.copyToTmpRegister(operand_ty, .{ .immediate = realStackOffset(off) });
const dst_reg = try self.register_manager.allocReg(null, gp);
try self.genLoadASI(dst_reg, .sp, off_reg, abi_size, opposite_endian_asi);
break :result .{ .register = dst_reg };
},
else => unreachable,
}
},
else => unreachable,
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void {
if (modifier == .always_tail) return self.fail("TODO implement tail calls for {}", .{self.target.cpu.arch});
@@ -2150,6 +2238,38 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
.dead, .unreach, .none => unreachable,
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - ptr_bytes };
},
else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}),
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
.dead, .unreach, .none => unreachable,
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off };
},
else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}),
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result = try self.resolveInst(un_op);
@@ -2425,6 +2545,13 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index);
return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = try self.structFieldPtr(inst, ty_op.operand, index);
@@ -3574,6 +3701,34 @@ fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, comptime off_ty
}
}
fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Register, abi_size: u64, asi: ASI) !void {
switch (abi_size) {
1, 2, 4, 8 => {
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .lduba,
2 => .lduha,
4 => .lduwa,
8 => .ldxa,
else => unreachable, // unexpected abi size
};
_ = try self.addInst(.{
.tag = tag,
.data = .{
.mem_asi = .{
.rd = value_reg,
.rs1 = addr_reg,
.rs2 = off_reg,
.asi = asi,
},
},
});
},
3, 5, 6, 7 => return self.fail("TODO: genLoad for more abi_sizes", .{}),
else => unreachable,
}
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
switch (mcv) {
.dead => unreachable,
@@ -3644,7 +3799,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa });
},
.ptr_stack_offset => |off| {
const real_offset = off + abi.stack_bias + abi.stack_reserved_area;
const real_offset = realStackOffset(off);
const simm13 = math.cast(i13, real_offset) orelse
return self.fail("TODO larger stack offsets: {}", .{real_offset});
@@ -3776,7 +3931,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*));
},
.stack_offset => |off| {
const real_offset = off + abi.stack_bias + abi.stack_reserved_area;
const real_offset = realStackOffset(off);
const simm13 = math.cast(i13, real_offset) orelse
return self.fail("TODO larger stack offsets: {}", .{real_offset});
try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*));
@@ -3810,7 +3965,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
},
.register => |reg| {
const real_offset = stack_offset + abi.stack_bias + abi.stack_reserved_area;
const real_offset = realStackOffset(stack_offset);
const simm13 = math.cast(i13, real_offset) orelse
return self.fail("TODO larger stack offsets: {}", .{real_offset});
return self.genStore(reg, .sp, i13, simm13, abi_size);
@@ -3933,6 +4088,34 @@ fn genStore(self: *Self, value_reg: Register, addr_reg: Register, comptime off_t
}
}
fn genStoreASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Register, abi_size: u64, asi: ASI) !void {
switch (abi_size) {
1, 2, 4, 8 => {
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .stba,
2 => .stha,
4 => .stwa,
8 => .stxa,
else => unreachable, // unexpected abi size
};
_ = try self.addInst(.{
.tag = tag,
.data = .{
.mem_asi = .{
.rd = value_reg,
.rs1 = addr_reg,
.rs2 = off_reg,
.asi = asi,
},
},
});
},
3, 5, 6, 7 => return self.fail("TODO: genLoad for more abi_sizes", .{}),
else => unreachable,
}
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
@@ -4245,6 +4428,17 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void {
}
}
/// Turns stack_offset MCV into a real SPARCv9 stack offset usable for asm.
fn realStackOffset(off: u32) u32 {
return off
// SPARCv9 %sp points away from the stack by some amount.
+ abi.stack_bias
// The first couple bytes of each stack frame is reserved
// for ABI and hardware purposes.
+ abi.stack_reserved_area;
// Only after that we have the usable stack frame portion.
}
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues {
const cc = fn_ty.fnCallingConvention();

View File

@@ -91,6 +91,11 @@ pub fn emitMir(
.lduw => try emit.mirArithmetic3Op(inst),
.ldx => try emit.mirArithmetic3Op(inst),
.lduba => try emit.mirMemASI(inst),
.lduha => try emit.mirMemASI(inst),
.lduwa => try emit.mirMemASI(inst),
.ldxa => try emit.mirMemASI(inst),
.@"and" => try emit.mirArithmetic3Op(inst),
.@"or" => try emit.mirArithmetic3Op(inst),
.xor => try emit.mirArithmetic3Op(inst),
@@ -127,6 +132,11 @@ pub fn emitMir(
.stw => try emit.mirArithmetic3Op(inst),
.stx => try emit.mirArithmetic3Op(inst),
.stba => try emit.mirMemASI(inst),
.stha => try emit.mirMemASI(inst),
.stwa => try emit.mirMemASI(inst),
.stxa => try emit.mirMemASI(inst),
.sub => try emit.mirArithmetic3Op(inst),
.subcc => try emit.mirArithmetic3Op(inst),
@@ -368,6 +378,29 @@ fn mirConditionalMove(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
fn mirMemASI(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const data = emit.mir.instructions.items(.data)[inst].mem_asi;
const rd = data.rd;
const rs1 = data.rs1;
const rs2 = data.rs2;
const asi = data.asi;
switch (tag) {
.lduba => try emit.writeInstruction(Instruction.lduba(rs1, rs2, asi, rd)),
.lduha => try emit.writeInstruction(Instruction.lduha(rs1, rs2, asi, rd)),
.lduwa => try emit.writeInstruction(Instruction.lduwa(rs1, rs2, asi, rd)),
.ldxa => try emit.writeInstruction(Instruction.ldxa(rs1, rs2, asi, rd)),
.stba => try emit.writeInstruction(Instruction.stba(rs1, rs2, asi, rd)),
.stha => try emit.writeInstruction(Instruction.stha(rs1, rs2, asi, rd)),
.stwa => try emit.writeInstruction(Instruction.stwa(rs1, rs2, asi, rd)),
.stxa => try emit.writeInstruction(Instruction.stxa(rs1, rs2, asi, rd)),
else => unreachable,
}
}
fn mirMembar(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const mask = emit.mir.instructions.items(.data)[inst].membar_mask;

View File

@@ -15,6 +15,7 @@ const bits = @import("bits.zig");
const Air = @import("../../Air.zig");
const Instruction = bits.Instruction;
const ASI = bits.Instruction.ASI;
const Register = bits.Register;
instructions: std.MultiArrayList(Inst).Slice,
@@ -70,6 +71,16 @@ pub const Inst = struct {
lduw,
ldx,
/// A.28 Load Integer from Alternate Space
/// This uses the mem_asi field.
/// Note that the ldda variant of this instruction is deprecated, so do not emit
/// it unless specifically requested (e.g. by inline assembly).
// TODO add other operations.
lduba,
lduha,
lduwa,
ldxa,
/// A.31 Logical Operations
/// This uses the arithmetic_3op field.
// TODO add other operations.
@@ -132,6 +143,16 @@ pub const Inst = struct {
stw,
stx,
/// A.55 Store Integer into Alternate Space
/// This uses the mem_asi field.
/// Note that the stda variant of this instruction is deprecated, so do not emit
/// it unless specifically requested (e.g. by inline assembly).
// TODO add other operations.
stba,
stha,
stwa,
stxa,
/// A.56 Subtract
/// This uses the arithmetic_3op field.
// TODO add other operations.
@@ -241,6 +262,15 @@ pub const Inst = struct {
inst: Index,
},
/// ASI-tagged memory operations.
/// Used by e.g. ldxa, stxa
mem_asi: struct {
rd: Register,
rs1: Register,
rs2: Register = .g0,
asi: ASI,
},
/// Membar mask, controls the barrier behavior
/// Used by e.g. membar
membar_mask: struct {

View File

@@ -1229,6 +1229,22 @@ pub const Instruction = union(enum) {
};
}
pub fn lduba(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction {
return format3i(0b11, 0b01_0001, rs1, rs2, rd, asi);
}
pub fn lduha(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction {
return format3i(0b11, 0b01_0010, rs1, rs2, rd, asi);
}
pub fn lduwa(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction {
return format3i(0b11, 0b01_0000, rs1, rs2, rd, asi);
}
pub fn ldxa(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction {
return format3i(0b11, 0b01_1011, rs1, rs2, rd, asi);
}
pub fn @"and"(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction {
return switch (s2) {
Register => format3a(0b10, 0b00_0001, rs1, rs2, rd),
@@ -1417,6 +1433,22 @@ pub const Instruction = union(enum) {
};
}
pub fn stba(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction {
return format3i(0b11, 0b01_0101, rs1, rs2, rd, asi);
}
pub fn stha(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction {
return format3i(0b11, 0b01_0110, rs1, rs2, rd, asi);
}
pub fn stwa(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction {
return format3i(0b11, 0b01_0100, rs1, rs2, rd, asi);
}
pub fn stxa(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction {
return format3i(0b11, 0b01_1110, rs1, rs2, rd, asi);
}
pub fn sub(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction {
return switch (s2) {
Register => format3a(0b10, 0b00_0100, rs1, rs2, rd),

View File

@@ -47,6 +47,7 @@ fn getArrayLen(a: []const u32) usize {
test "array concat with undefined" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
@@ -70,6 +71,7 @@ test "array concat with undefined" {
test "array concat with tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array: [2]u8 = .{ 1, 2 };
{
@@ -103,6 +105,7 @@ test "array init with mult" {
test "array literal with explicit type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const hex_mult: [4]u16 = .{ 4096, 256, 16, 1 };
@@ -203,6 +206,7 @@ test "nested arrays of strings" {
test "nested arrays of integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array_of_numbers = [_][2]u8{
[2]u8{ 1, 2 },

View File

@@ -6,6 +6,7 @@ test {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const x = X{};
try std.testing.expectEqual(@as(u16, 0), x.y.a);
try std.testing.expectEqual(false, x.y.b);

View File

@@ -13,6 +13,7 @@ test {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var f1: *align(16) Foo = @alignCast(16, @ptrCast(*align(1) Foo, &buffer[0]));
try expect(@typeInfo(@TypeOf(f1)).Pointer.alignment == 16);
try expect(@ptrToInt(f1) == @ptrToInt(&f1.a));

View File

@@ -76,6 +76,7 @@ fn bigToNativeEndian(comptime T: type, v: T) T {
}
test "type pun endianness" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
const StructOfBytes = extern struct { x: [4]u8 };
@@ -376,6 +377,8 @@ test "offset field ptr by enclosing array element size" {
test "accessing reinterpreted memory of parent object" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = extern struct {
a: f32,
b: [4]u8,

View File

@@ -563,7 +563,7 @@ test "packed struct passed to callconv(.C) function" {
test "overaligned pointer to packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = packed struct { a: u32, b: u32 };
var foo: S align(4) = .{ .a = 123, .b = 456 };
const ptr: *align(4) S = &foo;
@@ -583,6 +583,7 @@ test "overaligned pointer to packed struct" {
test "packed struct initialized in bitcast" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const T = packed struct { val: u8 };
var val: u8 = 123;
@@ -595,6 +596,7 @@ test "pointer to container level packed struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = packed struct(u32) {
test_bit: bool,

View File

@@ -4,6 +4,8 @@ const expect = std.testing.expect;
const native_endian = builtin.target.cpu.arch.endian();
test "reinterpret bytes as integer with nonzero offset" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testReinterpretBytesAsInteger();
comptime try testReinterpretBytesAsInteger();
}
@@ -74,6 +76,8 @@ fn testReinterpretBytesAsExternStruct() !void {
}
test "reinterpret bytes of an extern struct (with under-aligned fields) into another" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testReinterpretExternStructAsExternStruct();
comptime try testReinterpretExternStructAsExternStruct();
}
@@ -96,6 +100,8 @@ fn testReinterpretExternStructAsExternStruct() !void {
}
test "reinterpret bytes of an extern struct into another" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testReinterpretOverAlignedExternStructAsExternStruct();
comptime try testReinterpretOverAlignedExternStructAsExternStruct();
}
@@ -191,6 +197,8 @@ const Bytes = struct {
};
test "comptime ptrcast keeps larger alignment" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
const a: u32 = 1234;
const p = @ptrCast([*]const u8, &a);
@@ -199,6 +207,8 @@ test "comptime ptrcast keeps larger alignment" {
}
test "ptrcast of const integer has the correct object size" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const is_value = ~@intCast(isize, std.math.minInt(isize));
const is_bytes = @ptrCast([*]const u8, &is_value)[0..@sizeOf(isize)];
if (@sizeOf(isize) == 8) {

View File

@@ -296,6 +296,7 @@ test "coerce tuple to tuple" {
test "tuple type with void field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const T = std.meta.Tuple(&[_]type{void});
const x = T{{}};
@@ -341,6 +342,7 @@ test "tuple type with void field and a runtime field" {
test "branching inside tuple literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
fn foo(a: anytype) !void {
@@ -355,6 +357,7 @@ test "tuple initialized with a runtime known value" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const E = union(enum) { e: []const u8 };
const W = union(enum) { w: E };
@@ -368,6 +371,7 @@ test "tuple of struct concatenation and coercion to array" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const StructWithDefault = struct { value: f32 = 42 };
const SomeStruct = struct { array: [4]StructWithDefault };
@@ -381,6 +385,7 @@ test "tuple of struct concatenation and coercion to array" {
test "nested runtime conditionals in tuple initializer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var data: u8 = 0;
const x = .{

View File

@@ -1495,6 +1495,7 @@ test "packed union with zero-bit field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = packed struct {
nested: packed union {

View File

@@ -95,6 +95,7 @@ fn doNothingWithFirstArg(args: anytype) void {
test "simple variadic function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -143,6 +144,7 @@ test "simple variadic function" {
test "variadic functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO

View File

@@ -1282,6 +1282,7 @@ test "store to vector in slice" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var v = [_]@Vector(3, f32){
.{ 1, 1, 1 },