Merge pull request #10762 from ziglang/stage2-x86_64-new-regalloc-api

stage2: handle more MCValue types in `struct_field_ptr` in x86_64 and pad out nonpacked struct fields when lowering to bytes (all targets incl wasm32)
This commit is contained in:
Jakub Konka
2022-02-02 16:33:58 +01:00
committed by GitHub
7 changed files with 199 additions and 73 deletions

View File

@@ -619,7 +619,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
.code = &value_bytes,
.symbol_index = try self.bin_file.createLocalSymbol(self.decl, ty),
};
const result = decl_gen.genTypedValue(ty, val, value_bytes.writer()) catch |err| {
const result = decl_gen.genTypedValue(ty, val) catch |err| {
// When a codegen error occured, take ownership of the error message
if (err == error.CodegenFail) {
self.err_msg = decl_gen.err_msg;
@@ -907,14 +907,15 @@ pub const DeclGen = struct {
break :init_val payload.data.init;
} else decl.val;
if (init_val.tag() != .unreachable_value) {
return self.genTypedValue(decl.ty, init_val, self.code.writer());
return self.genTypedValue(decl.ty, init_val);
}
return Result{ .appended = {} };
}
}
/// Generates the wasm bytecode for the declaration belonging to `Context`
fn genTypedValue(self: *DeclGen, ty: Type, val: Value, writer: anytype) InnerError!Result {
fn genTypedValue(self: *DeclGen, ty: Type, val: Value) InnerError!Result {
const writer = self.code.writer();
if (val.isUndef()) {
try writer.writeByteNTimes(0xaa, @intCast(usize, ty.abiSize(self.target())));
return Result{ .appended = {} };
@@ -926,7 +927,7 @@ pub const DeclGen = struct {
.function => val.castTag(.function).?.data.owner_decl,
else => unreachable,
};
return try self.lowerDeclRef(ty, val, fn_decl, writer);
return try self.lowerDeclRef(ty, val, fn_decl);
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
@@ -942,9 +943,9 @@ pub const DeclGen = struct {
if (ty.isPtrLikeOptional()) {
if (val.castTag(.opt_payload)) |payload| {
return self.genTypedValue(payload_type, payload.data, writer);
return self.genTypedValue(payload_type, payload.data);
} else if (!val.isNull()) {
return self.genTypedValue(payload_type, val, writer);
return self.genTypedValue(payload_type, val);
} else {
try writer.writeByteNTimes(0, abi_size);
return Result{ .appended = {} };
@@ -956,7 +957,6 @@ pub const DeclGen = struct {
switch (try self.genTypedValue(
payload_type,
if (val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef),
writer,
)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
@@ -972,7 +972,7 @@ pub const DeclGen = struct {
const elem_vals = val.castTag(.array).?.data;
const elem_ty = ty.childType();
for (elem_vals) |elem_val| {
switch (try self.genTypedValue(elem_ty, elem_val, writer)) {
switch (try self.genTypedValue(elem_ty, elem_val)) {
.appended => {},
.externally_managed => |data| try writer.writeAll(data),
}
@@ -987,20 +987,20 @@ pub const DeclGen = struct {
var index: u32 = 0;
while (index < len) : (index += 1) {
switch (try self.genTypedValue(elem_ty, array, writer)) {
switch (try self.genTypedValue(elem_ty, array)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
}
if (sentinel) |sentinel_value| {
return self.genTypedValue(elem_ty, sentinel_value, writer);
return self.genTypedValue(elem_ty, sentinel_value);
}
return Result{ .appended = {} };
},
.empty_array_sentinel => {
const elem_ty = ty.childType();
const sent_val = ty.sentinel().?;
return self.genTypedValue(elem_ty, sent_val, writer);
return self.genTypedValue(elem_ty, sent_val);
},
else => unreachable,
},
@@ -1037,25 +1037,37 @@ pub const DeclGen = struct {
const int_val = val.enumToInt(ty, &int_buffer);
var buf: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&buf);
return self.genTypedValue(int_ty, int_val, writer);
return self.genTypedValue(int_ty, int_val);
},
.Bool => {
try writer.writeByte(@boolToInt(val.toBool()));
return Result{ .appended = {} };
},
.Struct => {
const struct_ty = ty.castTag(.@"struct").?.data;
if (struct_ty.layout == .Packed) {
const struct_obj = ty.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
return self.fail("TODO: Packed structs for wasm", .{});
}
const struct_begin = self.code.items.len;
const field_vals = val.castTag(.@"struct").?.data;
for (field_vals) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBits()) continue;
switch (try self.genTypedValue(field_ty, field_val, writer)) {
switch (try self.genTypedValue(field_ty, field_val)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
}
const unpadded_field_len = self.code.items.len - struct_begin;
// Pad struct members if required
const padded_field_end = ty.structFieldOffset(index + 1, self.target());
const padding = try std.math.cast(usize, padded_field_end - unpadded_field_len);
if (padding > 0) {
try writer.writeByteNTimes(0, padding);
}
}
return Result{ .appended = {} };
},
@@ -1064,12 +1076,12 @@ pub const DeclGen = struct {
const layout = ty.unionGetLayout(self.target());
if (layout.payload_size == 0) {
return self.genTypedValue(ty.unionTagType().?, union_val.tag, writer);
return self.genTypedValue(ty.unionTagType().?, union_val.tag);
}
// Check if we should store the tag first, in which case, do so now:
if (layout.tag_align >= layout.payload_align) {
switch (try self.genTypedValue(ty.unionTagType().?, union_val.tag, writer)) {
switch (try self.genTypedValue(ty.unionTagType().?, union_val.tag)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
}
@@ -1082,7 +1094,7 @@ pub const DeclGen = struct {
if (!field_ty.hasRuntimeBits()) {
try writer.writeByteNTimes(0xaa, @intCast(usize, layout.payload_size));
} else {
switch (try self.genTypedValue(field_ty, union_val.val, writer)) {
switch (try self.genTypedValue(field_ty, union_val.val)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
}
@@ -1098,26 +1110,26 @@ pub const DeclGen = struct {
if (layout.tag_size == 0) {
return Result{ .appended = {} };
}
return self.genTypedValue(union_ty.tag_ty, union_val.tag, writer);
return self.genTypedValue(union_ty.tag_ty, union_val.tag);
},
.Pointer => switch (val.tag()) {
.variable => {
const decl = val.castTag(.variable).?.data.owner_decl;
return self.lowerDeclRef(ty, val, decl, writer);
return self.lowerDeclRef(ty, val, decl);
},
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
return self.lowerDeclRef(ty, val, decl, writer);
return self.lowerDeclRef(ty, val, decl);
},
.slice => {
const slice = val.castTag(.slice).?.data;
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&buf);
switch (try self.genTypedValue(ptr_ty, slice.ptr, writer)) {
switch (try self.genTypedValue(ptr_ty, slice.ptr)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
switch (try self.genTypedValue(Type.usize, slice.len, writer)) {
switch (try self.genTypedValue(Type.usize, slice.len)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
@@ -1135,14 +1147,14 @@ pub const DeclGen = struct {
const is_pl = val.errorUnionIsPayload();
const err_val = if (!is_pl) val else Value.initTag(.zero);
switch (try self.genTypedValue(error_ty, err_val, writer)) {
switch (try self.genTypedValue(error_ty, err_val)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
if (payload_ty.hasRuntimeBits()) {
const pl_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef);
switch (try self.genTypedValue(payload_ty, pl_val, writer)) {
switch (try self.genTypedValue(payload_ty, pl_val)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
@@ -1167,11 +1179,12 @@ pub const DeclGen = struct {
}
}
fn lowerDeclRef(self: *DeclGen, ty: Type, val: Value, decl: *Module.Decl, writer: anytype) InnerError!Result {
fn lowerDeclRef(self: *DeclGen, ty: Type, val: Value, decl: *Module.Decl) InnerError!Result {
const writer = self.code.writer();
if (ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ty = ty.slicePtrFieldType(&buf);
switch (try self.genTypedValue(slice_ty, val, writer)) {
switch (try self.genTypedValue(slice_ty, val)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
}
@@ -1179,7 +1192,7 @@ pub const DeclGen = struct {
.base = .{ .tag = .int_u64 },
.data = val.sliceLen(),
};
return self.genTypedValue(Type.usize, Value.initPayload(&slice_len.base), writer);
return self.genTypedValue(Type.usize, Value.initPayload(&slice_len.base));
}
decl.markAlive();

View File

@@ -680,6 +680,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
// zig fmt: on
}
assert(!self.register_manager.frozenRegsExist());
if (std.debug.runtime_safety) {
if (self.air_bookkeeping < old_air_bookkeeping + 1) {
std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[inst] });
@@ -809,7 +812,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
const stack_mcv = try self.allocRegOrMem(inst, false);
log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv });
const reg_mcv = self.getResolvedInstValue(inst);
assert(reg == reg_mcv.register.to64());
assert(reg.to64() == reg_mcv.register.to64());
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst, stack_mcv);
try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv);
@@ -827,9 +830,9 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
/// Allocates a new register and copies `mcv` into it.
/// `reg_owner` is the instruction that gets associated with the register in the register table.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue {
fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, ty: Type, mcv: MCValue) !MCValue {
const reg = try self.register_manager.allocReg(reg_owner, &.{});
try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
try self.genSetReg(ty, reg, mcv);
return MCValue{ .register = reg };
}
@@ -838,11 +841,12 @@ fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCVa
fn copyToNewRegisterWithExceptions(
self: *Self,
reg_owner: Air.Inst.Index,
ty: Type,
mcv: MCValue,
exceptions: []const Register,
) !MCValue {
const reg = try self.register_manager.allocReg(reg_owner, exceptions);
try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
try self.genSetReg(ty, reg, mcv);
return MCValue{ .register = reg };
}
@@ -892,13 +896,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (operand_abi_size > 8 or dest_abi_size > 8) {
return self.fail("TODO implement intCast for abi sizes larger than 8", .{});
}
const reg = switch (operand) {
.register => |src_reg| try self.register_manager.allocReg(inst, &.{src_reg}),
else => try self.register_manager.allocReg(inst, &.{}),
};
try self.genSetReg(dest_ty, reg, .{ .immediate = 0 });
try self.genSetReg(dest_ty, reg, operand);
break :blk .{ .register = registerAlias(reg, @intCast(u32, dest_abi_size)) };
if (operand.isRegister()) self.register_manager.freezeRegs(&.{operand.register});
defer if (operand.isRegister()) self.register_manager.unfreezeRegs(&.{operand.register});
break :blk try self.copyToNewRegister(inst, dest_ty, operand);
};
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
@@ -1208,7 +1209,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) {
break :result operand;
}
break :result try self.copyToNewRegister(inst, operand);
break :result try self.copyToNewRegister(inst, self.air.typeOfIndex(inst), operand);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1479,16 +1480,11 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const index_ty = self.air.typeOf(extra.rhs);
const index = try self.resolveInst(extra.rhs);
const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
const dst_mcv = blk: {
switch (ptr) {
.ptr_stack_offset => {
const reg = try self.register_manager.allocReg(inst, &.{offset_reg});
try self.genSetReg(ptr_ty, reg, ptr);
break :blk .{ .register = reg };
},
else => return self.fail("TODO implement ptr_elem_ptr when ptr is {}", .{ptr}),
}
};
self.register_manager.freezeRegs(&.{offset_reg});
defer self.register_manager.unfreezeRegs(&.{offset_reg});
const dst_mcv = try self.copyToNewRegister(inst, ptr_ty, ptr);
try self.genBinMathOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
break :result dst_mcv;
};
@@ -1795,22 +1791,62 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
}
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand).childType();
const struct_size = @intCast(i32, struct_ty.abiSize(self.target.*));
const struct_field_offset = @intCast(i32, struct_ty.structFieldOffset(index, self.target.*));
const struct_field_ty = struct_ty.structFieldType(index);
const struct_field_size = @intCast(i32, struct_field_ty.abiSize(self.target.*));
if (self.liveness.isUnused(inst)) {
return MCValue.dead;
}
const mcv = try self.resolveInst(operand);
const ptr_ty = self.air.typeOf(operand);
const struct_ty = ptr_ty.childType();
const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
const struct_field_ty = struct_ty.structFieldType(index);
const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
const dst_mcv: MCValue = result: {
switch (mcv) {
.stack_offset => {
const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
.immediate = struct_field_offset,
});
self.register_manager.freezeRegs(&.{offset_reg});
defer self.register_manager.unfreezeRegs(&.{offset_reg});
const dst_mcv = try self.copyToNewRegister(inst, ptr_ty, mcv);
try self.genBinMathOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
break :result dst_mcv;
},
.ptr_stack_offset => |off| {
const ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size;
const offset_to_field = struct_size - struct_field_offset - struct_field_size;
const ptr_stack_offset = off + @intCast(i32, offset_to_field);
break :result MCValue{ .ptr_stack_offset = ptr_stack_offset };
},
.register => |reg| {
const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
.immediate = struct_field_offset,
});
self.register_manager.freezeRegs(&.{offset_reg});
defer self.register_manager.unfreezeRegs(&.{offset_reg});
const can_reuse_operand = self.reuseOperand(inst, operand, 0, mcv);
const result_reg = blk: {
if (can_reuse_operand) {
break :blk reg;
} else {
self.register_manager.freezeRegs(&.{reg});
const result_reg = try self.register_manager.allocReg(inst, &.{});
try self.genSetReg(ptr_ty, result_reg, mcv);
break :blk result_reg;
}
};
defer if (!can_reuse_operand) self.register_manager.unfreezeRegs(&.{reg});
try self.genBinMathOpMir(.add, ptr_ty, .{ .register = result_reg }, .{ .register = offset_reg });
break :result MCValue{ .register = result_reg };
},
else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}),
}
};
return dst_mcv;
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
@@ -1859,13 +1895,14 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
// Source operand can be an immediate, 8 bits or 32 bits.
// So, if either one of the operands dies with this instruction, we can use it
// as the result MCValue.
const dst_ty = self.air.typeOfIndex(inst);
var dst_mcv: MCValue = undefined;
var src_mcv: MCValue = undefined;
if (self.reuseOperand(inst, op_lhs, 0, lhs)) {
// LHS dies; use it as the destination.
// Both operands cannot be memory.
if (lhs.isMemory() and rhs.isMemory()) {
dst_mcv = try self.copyToNewRegister(inst, lhs);
dst_mcv = try self.copyToNewRegister(inst, dst_ty, lhs);
src_mcv = rhs;
} else {
dst_mcv = lhs;
@@ -1875,7 +1912,7 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
// RHS dies; use it as the destination.
// Both operands cannot be memory.
if (lhs.isMemory() and rhs.isMemory()) {
dst_mcv = try self.copyToNewRegister(inst, rhs);
dst_mcv = try self.copyToNewRegister(inst, dst_ty, rhs);
src_mcv = lhs;
} else {
dst_mcv = rhs;
@@ -1887,18 +1924,18 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
// If the allocated register is the same as the rhs register, don't allocate that one
// and instead spill a subsequent one. Otherwise, this can result in a miscompilation
// in the presence of several binary operations performed in a single block.
try self.copyToNewRegisterWithExceptions(inst, lhs, &.{rhs.register})
try self.copyToNewRegisterWithExceptions(inst, dst_ty, lhs, &.{rhs.register})
else
try self.copyToNewRegister(inst, lhs);
try self.copyToNewRegister(inst, dst_ty, lhs);
src_mcv = rhs;
} else {
dst_mcv = if (lhs.isRegister())
// If the allocated register is the same as the rhs register, don't allocate that one
// and instead spill a subsequent one. Otherwise, this can result in a miscompilation
// in the presence of several binary operations performed in a single block.
try self.copyToNewRegisterWithExceptions(inst, rhs, &.{lhs.register})
try self.copyToNewRegisterWithExceptions(inst, dst_ty, rhs, &.{lhs.register})
else
try self.copyToNewRegister(inst, rhs);
try self.copyToNewRegister(inst, dst_ty, rhs);
src_mcv = lhs;
}
}
@@ -1917,7 +1954,6 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
}
// Now for step 2, we assing an MIR instruction
const dst_ty = self.air.typeOfIndex(inst);
const air_tags = self.air.instructions.items(.tag);
switch (air_tags[inst]) {
.add, .addwrap, .ptr_add => try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv),
@@ -2417,7 +2453,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
.register => |reg| {
if (Register.allocIndex(reg) == null) {
// Save function return value in a callee saved register
break :result try self.copyToNewRegister(inst, info.return_value);
break :result try self.copyToNewRegister(inst, self.air.typeOfIndex(inst), info.return_value);
}
},
else => {},
@@ -2494,7 +2530,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
// Either one, but not both, can be a memory operand.
// Source operand can be an immediate, 8 bits or 32 bits.
const dst_mcv = if (lhs.isImmediate() or (lhs.isMemory() and rhs.isMemory()))
try self.copyToNewRegister(inst, lhs)
try self.copyToNewRegister(inst, ty, lhs)
else
lhs;
// This instruction supports only signed 32-bit immediates at most.

View File

@@ -373,11 +373,24 @@ pub fn generateSymbol(
},
.Struct => {
// TODO debug info
// TODO padding of struct members
const struct_obj = typed_value.ty.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol for packed struct",
.{},
),
};
}
const struct_begin = code.items.len;
const field_vals = typed_value.val.castTag(.@"struct").?.data;
for (field_vals) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index);
if (!field_ty.hasRuntimeBits()) continue;
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
.val = field_val,
@@ -388,6 +401,16 @@ pub fn generateSymbol(
},
.fail => |em| return Result{ .fail = em },
}
const unpadded_field_end = code.items.len - struct_begin;
// Pad struct members if required
const target = bin_file.options.target;
const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target);
const padding = try math.cast(usize, padded_field_end - unpadded_field_end);
if (padding > 0) {
try code.writer().writeByteNTimes(0, padding);
}
}
return Result{ .appended = {} };

View File

@@ -2367,6 +2367,7 @@ fn deinitRelocs(gpa: Allocator, table: *File.DbgInfoTypeRelocsTable) void {
}
fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym {
log.debug("updateDeclCode {s}{*}", .{ mem.sliceTo(decl.name, 0), decl });
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
const block_list = self.getDeclBlockList(decl);

View File

@@ -34,6 +34,7 @@ test {
_ = @import("behavior/slice_sentinel_comptime.zig");
_ = @import("behavior/type.zig");
_ = @import("behavior/truncate.zig");
_ = @import("behavior/struct.zig");
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
// Tests that pass for stage1, llvm backend, C backend, wasm backend.
@@ -69,7 +70,6 @@ test {
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
_ = @import("behavior/src.zig");
_ = @import("behavior/struct.zig");
_ = @import("behavior/this.zig");
_ = @import("behavior/try.zig");
_ = @import("behavior/type_info.zig");

View File

@@ -5,8 +5,6 @@ const maxInt = std.math.maxInt;
const builtin = @import("builtin");
test "int to ptr cast" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const x = @as(usize, 13);
const y = @intToPtr(*u8, x);
const z = @ptrToInt(y);
@@ -14,8 +12,6 @@ test "int to ptr cast" {
}
test "integer literal to pointer cast" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const vga_mem = @intToPtr(*u16, 0xB8000);
try expect(@ptrToInt(vga_mem) == 0xB8000);
}

View File

@@ -9,6 +9,8 @@ const maxInt = std.math.maxInt;
top_level_field: i32,
test "top level fields" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var instance = @This(){
.top_level_field = 1234,
};
@@ -16,6 +18,39 @@ test "top level fields" {
try expect(@as(i32, 1235) == instance.top_level_field);
}
const StructWithFields = struct {
a: u8,
b: u32,
c: u64,
d: u32,
fn first(self: *const StructWithFields) u8 {
return self.a;
}
fn second(self: *const StructWithFields) u32 {
return self.b;
}
fn third(self: *const StructWithFields) u64 {
return self.c;
}
fn fourth(self: *const StructWithFields) u32 {
return self.d;
}
};
test "non-packed struct has fields padded out to the required alignment" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const foo = StructWithFields{ .a = 5, .b = 1, .c = 10, .d = 2 };
try expect(foo.first() == 5);
try expect(foo.second() == 1);
try expect(foo.third() == 10);
try expect(foo.fourth() == 2);
}
const StructWithNoFields = struct {
fn add(a: i32, b: i32) i32 {
return a + b;
@@ -29,6 +64,8 @@ const StructFoo = struct {
};
test "structs" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var foo: StructFoo = undefined;
@memset(@ptrCast([*]u8, &foo), 0, @sizeOf(StructFoo));
foo.a += 1;
@@ -45,6 +82,8 @@ fn testMutation(foo: *StructFoo) void {
}
test "struct byval assign" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var foo1: StructFoo = undefined;
var foo2: StructFoo = undefined;
@@ -56,6 +95,8 @@ test "struct byval assign" {
}
test "call struct static method" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const result = StructWithNoFields.add(3, 4);
try expect(result == 7);
}
@@ -85,6 +126,8 @@ const Val = struct {
};
test "fn call of struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const Foo = struct {
ptr: fn () i32,
};
@@ -114,12 +157,16 @@ const MemberFnTestFoo = struct {
};
test "call member function directly" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const instance = MemberFnTestFoo{ .x = 1234 };
const result = MemberFnTestFoo.member(instance);
try expect(result == 1234);
}
test "store member function in variable" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const instance = MemberFnTestFoo{ .x = 1234 };
const memberFn = MemberFnTestFoo.member;
const result = memberFn(instance);
@@ -127,6 +174,8 @@ test "store member function in variable" {
}
test "member functions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const r = MemberFnRand{ .seed = 1234 };
try expect(r.getSeed() == 1234);
}
@@ -138,6 +187,8 @@ const MemberFnRand = struct {
};
test "return struct byval from function" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const bar = makeBar2(1234, 5678);
try expect(bar.y == 5678);
}
@@ -153,6 +204,8 @@ fn makeBar2(x: i32, y: i32) Bar {
}
test "call method with mutable reference to struct with no fields" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doC(s: *const @This()) bool {
_ = s;
@@ -172,6 +225,8 @@ test "call method with mutable reference to struct with no fields" {
}
test "usingnamespace within struct scope" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
usingnamespace struct {
pub fn inner() i32 {
@@ -183,6 +238,8 @@ test "usingnamespace within struct scope" {
}
test "struct field init with catch" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
var x: anyerror!isize = 1;