Merge pull request #14993 from jacobly0/x86_64

x86_64: implement more operations
This commit is contained in:
Jakub Konka
2023-03-21 11:16:29 +01:00
committed by GitHub
30 changed files with 2882 additions and 1537 deletions

View File

@@ -3011,41 +3011,16 @@ fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty:
return MCValue{ .register = reg };
}
const offset = @intCast(u32, optional_ty.abiSize(self.target.*) - payload_ty.abiSize(self.target.*));
switch (mcv) {
.register => |source_reg| {
.register => {
// TODO should we reuse the operand here?
const raw_reg = try self.register_manager.allocReg(inst, gp);
const dest_reg = raw_reg.toX();
const shift = @intCast(u6, offset * 8);
if (shift == 0) {
try self.genSetReg(payload_ty, dest_reg, mcv);
} else {
_ = try self.addInst(.{
.tag = if (payload_ty.isSignedInt())
Mir.Inst.Tag.asr_immediate
else
Mir.Inst.Tag.lsr_immediate,
.data = .{ .rr_shift = .{
.rd = dest_reg,
.rn = source_reg.toX(),
.shift = shift,
} },
});
}
try self.genSetReg(payload_ty, dest_reg, mcv);
return MCValue{ .register = self.registerAlias(dest_reg, payload_ty) };
},
.stack_argument_offset => |off| {
return MCValue{ .stack_argument_offset = off + offset };
},
.stack_offset => |off| {
return MCValue{ .stack_offset = off - offset };
},
.memory => |addr| {
return MCValue{ .memory = addr + offset };
},
.stack_argument_offset, .stack_offset, .memory => return mcv,
else => unreachable, // invalid MCValue for an error union
}
}
@@ -3289,12 +3264,11 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*));
const optional_abi_align = optional_ty.abiAlignment(self.target.*);
const payload_abi_size = @intCast(u32, payload_ty.abiSize(self.target.*));
const offset = optional_abi_size - payload_abi_size;
const offset = @intCast(u32, payload_ty.abiSize(self.target.*));
const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst);
try self.genSetStack(Type.bool, stack_offset, .{ .immediate = 1 });
try self.genSetStack(payload_ty, stack_offset - @intCast(u32, offset), operand);
try self.genSetStack(payload_ty, stack_offset, operand);
try self.genSetStack(Type.bool, stack_offset - offset, .{ .immediate = 1 });
break :result MCValue{ .stack_offset = stack_offset };
};
@@ -4834,13 +4808,49 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
const sentinel_ty: Type = if (!operand_ty.isPtrLikeOptional()) blk: {
const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional()) blk: {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);
break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime()) Type.bool else operand_ty;
} else operand_ty;
if (!payload_ty.hasRuntimeBitsIgnoreComptime())
break :blk .{ .ty = operand_ty, .bind = operand_bind };
const offset = @intCast(u32, payload_ty.abiSize(self.target.*));
const operand_mcv = try operand_bind.resolveToMcv(self);
const new_mcv: MCValue = switch (operand_mcv) {
.register => |source_reg| new: {
// TODO should we reuse the operand here?
const raw_reg = try self.register_manager.allocReg(null, gp);
const dest_reg = raw_reg.toX();
const shift = @intCast(u6, offset * 8);
if (shift == 0) {
try self.genSetReg(payload_ty, dest_reg, operand_mcv);
} else {
_ = try self.addInst(.{
.tag = if (payload_ty.isSignedInt())
Mir.Inst.Tag.asr_immediate
else
Mir.Inst.Tag.lsr_immediate,
.data = .{ .rr_shift = .{
.rd = dest_reg,
.rn = source_reg.toX(),
.shift = shift,
} },
});
}
break :new .{ .register = self.registerAlias(dest_reg, payload_ty) };
},
.stack_argument_offset => |off| .{ .stack_argument_offset = off + offset },
.stack_offset => |off| .{ .stack_offset = off - offset },
.memory => |addr| .{ .memory = addr + offset },
else => unreachable, // invalid MCValue for an optional
};
break :blk .{ .ty = Type.bool, .bind = .{ .mcv = new_mcv } };
} else .{ .ty = operand_ty, .bind = operand_bind };
const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } };
return self.cmp(operand_bind, imm_bind, sentinel_ty, .eq);
return self.cmp(sentinel.bind, imm_bind, sentinel.ty, .eq);
}
fn isNonNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {

View File

@@ -2715,20 +2715,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, ptr_child_ty: Type) InnerError
},
.opt_payload_ptr => {
const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
const parent_ptr = try func.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = payload_ptr.container_ty.optionalChild(&buf);
if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.optionalReprIsPayload()) {
return parent_ptr;
}
const abi_size = payload_ptr.container_ty.abiSize(func.target);
const offset = abi_size - payload_ty.abiSize(func.target);
return WValue{ .memory_offset = .{
.pointer = parent_ptr.memory,
.offset = @intCast(u32, offset),
} };
return func.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty);
},
else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}),
}
@@ -2889,7 +2876,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
}
} else {
const is_pl = val.tag() == .opt_payload;
return WValue{ .imm32 = if (is_pl) @as(u32, 1) else 0 };
return WValue{ .imm32 = @boolToInt(is_pl) };
},
.Struct => {
const struct_obj = ty.castTag(.@"struct").?.data;
@@ -3882,7 +3869,11 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 });
const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
const module = func.bin_file.base.options.module.?;
return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)});
};
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
}
} else if (payload_ty.isSlice()) {
switch (func.arch()) {
@@ -3911,13 +3902,11 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand);
const offset = opt_ty.abiSize(func.target) - payload_ty.abiSize(func.target);
if (isByRef(payload_ty, func.target)) {
break :result try func.buildPointerOffset(operand, offset, .new);
break :result try func.buildPointerOffset(operand, 0, .new);
}
const payload = try func.load(operand, payload_ty, @intCast(u32, offset));
const payload = try func.load(operand, payload_ty, 0);
break :result try payload.toLocal(func, payload_ty);
};
func.finishAir(inst, result, &.{ty_op.operand});
@@ -3936,8 +3925,7 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result func.reuseOperand(ty_op.operand, operand);
}
const offset = opt_ty.abiSize(func.target) - payload_ty.abiSize(func.target);
break :result try func.buildPointerOffset(operand, offset, .new);
break :result try func.buildPointerOffset(operand, 0, .new);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
@@ -3956,16 +3944,16 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
return func.finishAir(inst, operand, &.{ty_op.operand});
}
const offset = std.math.cast(u32, opt_ty.abiSize(func.target) - payload_ty.abiSize(func.target)) orelse {
const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
const module = func.bin_file.base.options.module.?;
return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)});
};
try func.emitWValue(operand);
try func.addImm32(1);
try func.addMemArg(.i32_store8, .{ .offset = operand.offset(), .alignment = 1 });
try func.addMemArg(.i32_store8, .{ .offset = operand.offset() + offset, .alignment = 1 });
const result = try func.buildPointerOffset(operand, offset, .new);
const result = try func.buildPointerOffset(operand, 0, .new);
return func.finishAir(inst, result, &.{ty_op.operand});
}
@@ -3988,7 +3976,7 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (op_ty.optionalReprIsPayload()) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const offset = std.math.cast(u32, op_ty.abiSize(func.target) - payload_ty.abiSize(func.target)) orelse {
const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
const module = func.bin_file.base.options.module.?;
return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)});
};
@@ -3997,9 +3985,9 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(op_ty);
try func.emitWValue(result_ptr);
try func.addImm32(1);
try func.addMemArg(.i32_store8, .{ .offset = result_ptr.offset(), .alignment = 1 });
try func.addMemArg(.i32_store8, .{ .offset = result_ptr.offset() + offset, .alignment = 1 });
const payload_ptr = try func.buildPointerOffset(result_ptr, offset, .new);
const payload_ptr = try func.buildPointerOffset(result_ptr, 0, .new);
try func.store(payload_ptr, operand, payload_ty, 0);
break :result result_ptr;
};
@@ -4719,7 +4707,6 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
assert(op == .eq or op == .neq);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);
const offset = @intCast(u32, operand_ty.abiSize(func.target) - payload_ty.abiSize(func.target));
// We store the final result in here that will be validated
// if the optional is truly equal.
@@ -4732,8 +4719,8 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
try func.addTag(.i32_ne); // inverse so we can exit early
try func.addLabel(.br_if, 0);
_ = try func.load(lhs, payload_ty, offset);
_ = try func.load(rhs, payload_ty, offset);
_ = try func.load(lhs, payload_ty, 0);
_ = try func.load(rhs, payload_ty, 0);
const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
try func.addLabel(.br_if, 0);

File diff suppressed because it is too large Load Diff

View File

@@ -73,6 +73,13 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.adc,
.add,
.@"and",
.bsf,
.bsr,
.bswap,
.bt,
.btc,
.btr,
.bts,
.call,
.cbw,
.cwde,
@@ -81,6 +88,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.cdq,
.cqo,
.cmp,
.cmpxchg,
.div,
.fisttp,
.fld,
@@ -89,35 +97,71 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.int3,
.jmp,
.lea,
.lfence,
.lzcnt,
.mfence,
.mov,
.movbe,
.movzx,
.mul,
.neg,
.nop,
.not,
.@"or",
.pop,
.popcnt,
.push,
.rcl,
.rcr,
.ret,
.rol,
.ror,
.sal,
.sar,
.sbb,
.sfence,
.shl,
.shr,
.sub,
.syscall,
.@"test",
.tzcnt,
.ud2,
.xadd,
.xchg,
.xor,
.addss,
.cmpss,
.divss,
.maxss,
.minss,
.movss,
.mulss,
.roundss,
.subss,
.ucomiss,
.addsd,
.cmpsd,
.divsd,
.maxsd,
.minsd,
.movsd,
.mulsd,
.roundsd,
.subsd,
.ucomisd,
=> try emit.mirEncodeGeneric(tag, inst),
.cmps,
.lods,
.movs,
.scas,
.stos,
=> try emit.mirString(tag, inst),
.cmpxchgb => try emit.mirCmpxchgBytes(inst),
.jmp_reloc => try emit.mirJmpReloc(inst),
.call_extern => try emit.mirCallExtern(inst),
@@ -171,18 +215,8 @@ fn fixupRelocs(emit: *Emit) InnerError!void {
}
}
fn encode(emit: *Emit, mnemonic: Instruction.Mnemonic, ops: struct {
op1: Instruction.Operand = .none,
op2: Instruction.Operand = .none,
op3: Instruction.Operand = .none,
op4: Instruction.Operand = .none,
}) InnerError!void {
const inst = try Instruction.new(mnemonic, .{
.op1 = ops.op1,
.op2 = ops.op2,
.op3 = ops.op3,
.op4 = ops.op4,
});
fn encode(emit: *Emit, mnemonic: Instruction.Mnemonic, ops: Instruction.Init) InnerError!void {
const inst = try Instruction.new(mnemonic, ops);
return inst.encode(emit.code.writer());
}
@@ -194,6 +228,20 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
const ops = emit.mir.instructions.items(.ops)[inst];
const data = emit.mir.instructions.items(.data)[inst];
const prefix: Instruction.Prefix = switch (ops) {
.lock_m_sib,
.lock_m_rip,
.lock_mi_u_sib,
.lock_mi_u_rip,
.lock_mi_s_sib,
.lock_mi_s_rip,
.lock_mr_sib,
.lock_mr_rip,
.lock_moffs_rax,
=> .lock,
else => .none,
};
var op1: Instruction.Operand = .none;
var op2: Instruction.Operand = .none;
var op3: Instruction.Operand = .none;
@@ -232,35 +280,35 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
op2 = .{ .reg = data.rri.r2 };
op3 = .{ .imm = imm };
},
.m_sib => {
.m_sib, .lock_m_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.payload).data;
op1 = .{ .mem = Mir.MemorySib.decode(msib) };
},
.m_rip => {
.m_rip, .lock_m_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
},
.mi_s_sib, .mi_u_sib => {
.mi_s_sib, .mi_u_sib, .lock_mi_s_sib, .lock_mi_u_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.xi.payload).data;
const imm = switch (ops) {
.mi_s_sib => Immediate.s(@bitCast(i32, data.xi.imm)),
.mi_u_sib => Immediate.u(data.xi.imm),
.mi_s_sib, .lock_mi_s_sib => Immediate.s(@bitCast(i32, data.xi.imm)),
.mi_u_sib, .lock_mi_u_sib => Immediate.u(data.xi.imm),
else => unreachable,
};
op1 = .{ .mem = Mir.MemorySib.decode(msib) };
op2 = .{ .imm = imm };
},
.mi_u_rip, .mi_s_rip => {
.mi_u_rip, .mi_s_rip, .lock_mi_u_rip, .lock_mi_s_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.xi.payload).data;
const imm = switch (ops) {
.mi_s_rip => Immediate.s(@bitCast(i32, data.xi.imm)),
.mi_u_rip => Immediate.u(data.xi.imm),
.mi_s_rip, .lock_mi_s_rip => Immediate.s(@bitCast(i32, data.xi.imm)),
.mi_u_rip, .lock_mi_u_rip => Immediate.u(data.xi.imm),
else => unreachable,
};
op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
op2 = .{ .imm = imm };
},
.rm_sib, .mr_sib => {
.rm_sib, .mr_sib, .lock_mr_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.rx.payload).data;
const op_r = .{ .reg = data.rx.r1 };
const op_m = .{ .mem = Mir.MemorySib.decode(msib) };
@@ -269,23 +317,23 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
op1 = op_r;
op2 = op_m;
},
.mr_sib => {
.mr_sib, .lock_mr_sib => {
op1 = op_m;
op2 = op_r;
},
else => unreachable,
}
},
.rm_rip, .mr_rip => {
.rm_rip, .mr_rip, .lock_mr_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.rx.payload).data;
const op_r = .{ .reg = data.rx.r1 };
const op_m = .{ .mem = Mir.MemoryRip.decode(mrip) };
switch (ops) {
.rm_sib => {
.rm_rip => {
op1 = op_r;
op2 = op_m;
},
.mr_sib => {
.mr_rip, .lock_mr_rip => {
op1 = op_m;
op2 = op_r;
},
@@ -299,6 +347,7 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
}
return emit.encode(mnemonic, .{
.prefix = prefix,
.op1 = op1,
.op2 = op2,
.op3 = op3,
@@ -306,6 +355,61 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
});
}
fn mirString(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
switch (ops) {
.string => {
const data = emit.mir.instructions.items(.data)[inst].string;
const mnemonic = switch (tag) {
inline .cmps, .lods, .movs, .scas, .stos => |comptime_tag| switch (data.width) {
inline else => |comptime_width| @field(
Instruction.Mnemonic,
@tagName(comptime_tag) ++ @tagName(comptime_width),
),
},
else => unreachable,
};
return emit.encode(mnemonic, .{ .prefix = switch (data.repeat) {
inline else => |comptime_repeat| @field(Instruction.Prefix, @tagName(comptime_repeat)),
} });
},
else => unreachable,
}
}
fn mirCmpxchgBytes(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
const data = emit.mir.instructions.items(.data)[inst];
var op1: Instruction.Operand = .none;
switch (ops) {
.m_sib, .lock_m_sib => {
const sib = emit.mir.extraData(Mir.MemorySib, data.payload).data;
op1 = .{ .mem = Mir.MemorySib.decode(sib) };
},
.m_rip, .lock_m_rip => {
const rip = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
op1 = .{ .mem = Mir.MemoryRip.decode(rip) };
},
else => unreachable,
}
const mnemonic: Instruction.Mnemonic = switch (op1.mem.bitSize()) {
64 => .cmpxchg8b,
128 => .cmpxchg16b,
else => unreachable,
};
return emit.encode(mnemonic, .{
.prefix = switch (ops) {
.m_sib, .m_rip => .none,
.lock_m_sib, .lock_m_rip => .lock,
else => unreachable,
},
.op1 = op1,
});
}
fn mirMovMoffs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
const payload = emit.mir.instructions.items(.data)[inst].payload;
@@ -319,8 +423,13 @@ fn mirMovMoffs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.op2 = .{ .mem = Memory.moffs(seg, offset) },
});
},
.moffs_rax => {
.moffs_rax, .lock_moffs_rax => {
try emit.encode(.mov, .{
.prefix = switch (ops) {
.moffs_rax => .none,
.lock_moffs_rax => .lock,
else => unreachable,
},
.op1 = .{ .mem = Memory.moffs(seg, offset) },
.op2 = .{ .reg = .rax },
});
@@ -365,37 +474,70 @@ fn mirMovsx(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}
fn mnemonicFromConditionCode(comptime basename: []const u8, cc: bits.Condition) Instruction.Mnemonic {
inline for (@typeInfo(bits.Condition).Enum.fields) |field| {
if (mem.eql(u8, field.name, @tagName(cc)))
return @field(Instruction.Mnemonic, basename ++ field.name);
} else unreachable;
return switch (cc) {
inline else => |comptime_cc| @field(Instruction.Mnemonic, basename ++ @tagName(comptime_cc)),
};
}
fn mirCmovcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
switch (ops) {
.rr_c => {
const data = emit.mir.instructions.items(.data)[inst].rr_c;
.rr_cc => {
const data = emit.mir.instructions.items(.data)[inst].rr_cc;
const mnemonic = mnemonicFromConditionCode("cmov", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .reg = data.r1 },
.op2 = .{ .reg = data.r2 },
});
},
else => unreachable, // TODO
.rm_sib_cc => {
const data = emit.mir.instructions.items(.data)[inst].rx_cc;
const extra = emit.mir.extraData(Mir.MemorySib, data.payload).data;
const mnemonic = mnemonicFromConditionCode("cmov", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .reg = data.r1 },
.op2 = .{ .mem = Mir.MemorySib.decode(extra) },
});
},
.rm_rip_cc => {
const data = emit.mir.instructions.items(.data)[inst].rx_cc;
const extra = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
const mnemonic = mnemonicFromConditionCode("cmov", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .reg = data.r1 },
.op2 = .{ .mem = Mir.MemoryRip.decode(extra) },
});
},
else => unreachable,
}
}
fn mirSetcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
switch (ops) {
.r_c => {
const data = emit.mir.instructions.items(.data)[inst].r_c;
.r_cc => {
const data = emit.mir.instructions.items(.data)[inst].r_cc;
const mnemonic = mnemonicFromConditionCode("set", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .reg = data.r1 },
});
},
.m_sib_cc => {
const data = emit.mir.instructions.items(.data)[inst].x_cc;
const extra = emit.mir.extraData(Mir.MemorySib, data.payload).data;
const mnemonic = mnemonicFromConditionCode("set", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .mem = Mir.MemorySib.decode(extra) },
});
},
.m_rip_cc => {
const data = emit.mir.instructions.items(.data)[inst].x_cc;
const extra = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
const mnemonic = mnemonicFromConditionCode("set", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .mem = Mir.MemoryRip.decode(extra) },
});
},
else => unreachable, // TODO
}
}

View File

@@ -19,17 +19,12 @@ op1: Op,
op2: Op,
op3: Op,
op4: Op,
opc_len: u2,
opc: [3]u8,
opc_len: u3,
opc: [7]u8,
modrm_ext: u3,
mode: Mode,
pub fn findByMnemonic(mnemonic: Mnemonic, args: struct {
op1: Instruction.Operand,
op2: Instruction.Operand,
op3: Instruction.Operand,
op4: Instruction.Operand,
}) !?Encoding {
pub fn findByMnemonic(mnemonic: Mnemonic, args: Instruction.Init) !?Encoding {
const input_op1 = Op.fromOperand(args.op1);
const input_op2 = Op.fromOperand(args.op2);
const input_op3 = Op.fromOperand(args.op3);
@@ -69,18 +64,19 @@ pub fn findByMnemonic(mnemonic: Mnemonic, args: struct {
var candidates: [10]Encoding = undefined;
var count: usize = 0;
for (table) |entry| {
const enc = Encoding{
var enc = Encoding{
.mnemonic = entry[0],
.op_en = entry[1],
.op1 = entry[2],
.op2 = entry[3],
.op3 = entry[4],
.op4 = entry[5],
.opc_len = entry[6],
.opc = .{ entry[7], entry[8], entry[9] },
.modrm_ext = entry[10],
.mode = entry[11],
.opc_len = @intCast(u3, entry[6].len),
.opc = undefined,
.modrm_ext = entry[7],
.mode = entry[8],
};
std.mem.copy(u8, &enc.opc, entry[6]);
if (enc.mnemonic == mnemonic and
input_op1.isSubset(enc.op1, enc.mode) and
input_op2.isSubset(enc.op2, enc.mode) and
@@ -108,17 +104,13 @@ pub fn findByMnemonic(mnemonic: Mnemonic, args: struct {
if (count == 1) return candidates[0];
const EncodingLength = struct {
fn estimate(encoding: Encoding, params: struct {
op1: Instruction.Operand,
op2: Instruction.Operand,
op3: Instruction.Operand,
op4: Instruction.Operand,
}) usize {
fn estimate(encoding: Encoding, params: Instruction.Init) usize {
var inst = Instruction{
.op1 = params.op1,
.op2 = params.op2,
.op3 = params.op3,
.op4 = params.op4,
.prefix = params.prefix,
.encoding = encoding,
};
var cwriter = std.io.countingWriter(std.io.null_writer);
@@ -139,12 +131,7 @@ pub fn findByMnemonic(mnemonic: Mnemonic, args: struct {
else => {},
}
const len = EncodingLength.estimate(candidate, .{
.op1 = args.op1,
.op2 = args.op2,
.op3 = args.op3,
.op4 = args.op4,
});
const len = EncodingLength.estimate(candidate, args);
const current = shortest_encoding orelse {
shortest_encoding = .{ .index = i, .len = len };
continue;
@@ -184,7 +171,7 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct {
if (match) {
if (prefixes.rex.w) {
switch (enc.mode) {
.fpu, .sse, .sse2, .none => {},
.fpu, .sse, .sse2, .sse4_1, .none => {},
.long, .rex => return enc,
}
} else if (prefixes.rex.present and !prefixes.rex.isSet()) {
@@ -227,7 +214,11 @@ pub fn modRmExt(encoding: Encoding) u3 {
}
pub fn operandBitSize(encoding: Encoding) u64 {
if (encoding.mode == .long) return 64;
switch (encoding.mode) {
.short => return 16,
.long => return 64,
else => {},
}
const bit_size: u64 = switch (encoding.op_en) {
.np => switch (encoding.op1) {
.o16 => 16,
@@ -316,39 +307,63 @@ pub const Mnemonic = enum {
// zig fmt: off
// General-purpose
adc, add, @"and",
call, cbw, cwde, cdqe, cwd, cdq, cqo, cmp,
bsf, bsr, bswap, bt, btc, btr, bts,
call, cbw, cdq, cdqe,
cmova, cmovae, cmovb, cmovbe, cmovc, cmove, cmovg, cmovge, cmovl, cmovle, cmovna,
cmovnae, cmovnb, cmovnbe, cmovnc, cmovne, cmovng, cmovnge, cmovnl, cmovnle, cmovno,
cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovpe, cmovpo, cmovs, cmovz,
cmp,
cmps, cmpsb, cmpsd, cmpsq, cmpsw,
cmpxchg, cmpxchg8b, cmpxchg16b,
cqo, cwd, cwde,
div,
fisttp, fld,
idiv, imul, int3,
ja, jae, jb, jbe, jc, jrcxz, je, jg, jge, jl, jle, jna, jnae, jnb, jnbe,
jnc, jne, jng, jnge, jnl, jnle, jno, jnp, jns, jnz, jo, jp, jpe, jpo, js, jz,
jmp,
lea,
mov, movsx, movsxd, movzx, mul,
nop,
lea, lfence,
lods, lodsb, lodsd, lodsq, lodsw,
lzcnt,
mfence, mov, movbe,
movs, movsb, movsd, movsq, movsw,
movsx, movsxd, movzx, mul,
neg, nop, not,
@"or",
pop, push,
ret,
sal, sar, sbb, shl, shr, sub, syscall,
pop, popcnt, push,
rcl, rcr, ret, rol, ror,
sal, sar, sbb,
scas, scasb, scasd, scasq, scasw,
shl, shr, sub, syscall,
seta, setae, setb, setbe, setc, sete, setg, setge, setl, setle, setna, setnae,
setnb, setnbe, setnc, setne, setng, setnge, setnl, setnle, setno, setnp, setns,
setnz, seto, setp, setpe, setpo, sets, setz,
@"test",
sfence,
stos, stosb, stosd, stosq, stosw,
@"test", tzcnt,
ud2,
xor,
xadd, xchg, xor,
// SSE
addss,
cmpss,
divss,
maxss, minss,
movss,
mulss,
subss,
ucomiss,
// SSE2
addsd,
cmpsd,
movq, movsd,
//cmpsd,
divsd,
maxsd, minsd,
movq, //movsd,
mulsd,
subsd,
ucomisd,
// SSE4.1
roundss,
roundsd,
// zig fmt: on
};
@@ -374,7 +389,7 @@ pub const Op = enum {
cl,
r8, r16, r32, r64,
rm8, rm16, rm32, rm64,
m8, m16, m32, m64, m80,
m8, m16, m32, m64, m80, m128,
rel8, rel16, rel32,
m,
moffs,
@@ -423,6 +438,7 @@ pub const Op = enum {
32 => .m32,
64 => .m64,
80 => .m80,
128 => .m128,
else => unreachable,
};
},
@@ -460,7 +476,7 @@ pub const Op = enum {
.imm32, .imm32s, .eax, .r32, .m32, .rm32, .rel32, .xmm_m32 => 32,
.imm64, .rax, .r64, .m64, .rm64, .xmm_m64 => 64,
.m80 => 80,
.xmm => 128,
.m128, .xmm => 128,
};
}
@@ -507,7 +523,7 @@ pub const Op = enum {
// zig fmt: off
return switch (op) {
.rm8, .rm16, .rm32, .rm64,
.m8, .m16, .m32, .m64, .m80,
.m8, .m16, .m32, .m64, .m80, .m128,
.m,
.xmm_m32, .xmm_m64,
=> true,
@@ -542,7 +558,7 @@ pub const Op = enum {
else => {
if (op.isRegister() and target.isRegister()) {
switch (mode) {
.sse, .sse2 => return op.isFloatingPointRegister() and target.isFloatingPointRegister(),
.sse, .sse2, .sse4_1 => return op.isFloatingPointRegister() and target.isFloatingPointRegister(),
else => switch (target) {
.cl, .al, .ax, .eax, .rax => return op == target,
else => return op.bitSize() == target.bitSize(),
@@ -579,9 +595,11 @@ pub const Op = enum {
pub const Mode = enum {
none,
short,
fpu,
rex,
long,
sse,
sse2,
sse4_1,
};

View File

@@ -38,6 +38,20 @@ pub const Inst = struct {
add,
/// Logical and
@"and",
/// Bit scan forward
bsf,
/// Bit scan reverse
bsr,
/// Byte swap
bswap,
/// Bit test
bt,
/// Bit test and complement
btc,
/// Bit test and reset
btr,
/// Bit test and set
bts,
/// Call
call,
/// Convert byte to word
@@ -54,6 +68,10 @@ pub const Inst = struct {
cqo,
/// Logical compare
cmp,
/// Compare and exchange
cmpxchg,
/// Compare and exchange bytes
cmpxchgb,
/// Unsigned division
div,
/// Store integer with truncation
@@ -70,30 +88,54 @@ pub const Inst = struct {
jmp,
/// Load effective address
lea,
/// Load fence
lfence,
/// Count the number of leading zero bits
lzcnt,
/// Memory fence
mfence,
/// Move
mov,
/// Move data after swapping bytes
movbe,
/// Move with sign extension
movsx,
/// Move with zero extension
movzx,
/// Multiply
mul,
/// Two's complement negation
neg,
/// No-op
nop,
/// One's complement negation
not,
/// Logical or
@"or",
/// Pop
pop,
/// Return the count of number of bits set to 1
popcnt,
/// Push
push,
/// Rotate left through carry
rcl,
/// Rotate right through carry
rcr,
/// Return
ret,
/// Rotate left
rol,
/// Rotate right
ror,
/// Arithmetic shift left
sal,
/// Arithmetic shift right
sar,
/// Integer subtraction with borrow
sbb,
/// Store fence
sfence,
/// Logical shift left
shl,
/// Logical shift right
@@ -104,28 +146,69 @@ pub const Inst = struct {
syscall,
/// Test condition
@"test",
/// Count the number of trailing zero bits
tzcnt,
/// Undefined instruction
ud2,
/// Exchange and add
xadd,
/// Exchange register/memory with register
xchg,
/// Logical exclusive-or
xor,
/// Add single precision floating point
/// Add single precision floating point values
addss,
/// Compare scalar single-precision floating-point values
cmpss,
/// Divide scalar single-precision floating-point values
divss,
/// Return maximum single-precision floating-point value
maxss,
/// Return minimum single-precision floating-point value
minss,
/// Move scalar single-precision floating-point value
movss,
/// Multiply scalar single-precision floating-point values
mulss,
/// Round scalar single-precision floating-point values
roundss,
/// Subtract scalar single-precision floating-point values
subss,
/// Unordered compare scalar single-precision floating-point values
ucomiss,
/// Add double precision floating point
/// Add double precision floating point values
addsd,
/// Compare scalar double-precision floating-point values
cmpsd,
/// Divide scalar double-precision floating-point values
divsd,
/// Return maximum double-precision floating-point value
maxsd,
/// Return minimum double-precision floating-point value
minsd,
/// Move scalar double-precision floating-point value
movsd,
/// Multiply scalar double-precision floating-point values
mulsd,
/// Round scalar double-precision floating-point values
roundsd,
/// Subtract scalar double-precision floating-point values
subsd,
/// Unordered compare scalar double-precision floating-point values
ucomisd,
/// Compare string operands
cmps,
/// Load string
lods,
/// Move data from string to string
movs,
/// Scan string
scas,
/// Store string
stos,
/// Conditional move
cmovcc,
/// Conditional jump
@@ -185,11 +268,11 @@ pub const Inst = struct {
/// Uses `rri` payload.
rri_u,
/// Register with condition code (CC).
/// Uses `r_c` payload.
r_c,
/// Uses `r_cc` payload.
r_cc,
/// Register, register with condition code (CC).
/// Uses `rr_c` payload.
rr_c,
/// Uses `rr_cc` payload.
rr_cc,
/// Register, immediate (sign-extended) operands.
/// Uses `ri` payload.
ri_s,
@@ -214,12 +297,24 @@ pub const Inst = struct {
/// Register, memory (RIP) operands.
/// Uses `rx` payload.
rm_rip,
/// Register, memory (SIB) operands with condition code (CC).
/// Uses `rx_cc` payload.
rm_sib_cc,
/// Register, memory (RIP) operands with condition code (CC).
/// Uses `rx_cc` payload.
rm_rip_cc,
/// Single memory (SIB) operand.
/// Uses `payload` with extra data of type `MemorySib`.
m_sib,
/// Single memory (RIP) operand.
/// Uses `payload` with extra data of type `MemoryRip`.
m_rip,
/// Single memory (SIB) operand with condition code (CC).
/// Uses `x_cc` with extra data of type `MemorySib`.
m_sib_cc,
/// Single memory (RIP) operand with condition code (CC).
/// Uses `x_cc` with extra data of type `MemoryRip`.
m_rip_cc,
/// Memory (SIB), immediate (unsigned) operands.
/// Uses `xi` payload with extra data of type `MemorySib`.
mi_u_sib,
@@ -244,16 +339,42 @@ pub const Inst = struct {
/// Memory moffs, rax.
/// Uses `payload` with extra data of type `MemoryMoffs`.
moffs_rax,
/// Single memory (SIB) operand with lock prefix.
/// Uses `payload` with extra data of type `MemorySib`.
lock_m_sib,
/// Single memory (RIP) operand with lock prefix.
/// Uses `payload` with extra data of type `MemoryRip`.
lock_m_rip,
/// Memory (SIB), immediate (unsigned) operands with lock prefix.
/// Uses `xi` payload with extra data of type `MemorySib`.
lock_mi_u_sib,
/// Memory (RIP), immediate (unsigned) operands with lock prefix.
/// Uses `xi` payload with extra data of type `MemoryRip`.
lock_mi_u_rip,
/// Memory (SIB), immediate (sign-extend) operands with lock prefix.
/// Uses `xi` payload with extra data of type `MemorySib`.
lock_mi_s_sib,
/// Memory (RIP), immediate (sign-extend) operands with lock prefix.
/// Uses `xi` payload with extra data of type `MemoryRip`.
lock_mi_s_rip,
/// Memory (SIB), register operands with lock prefix.
/// Uses `rx` payload with extra data of type `MemorySib`.
lock_mr_sib,
/// Memory (RIP), register operands with lock prefix.
/// Uses `rx` payload with extra data of type `MemoryRip`.
lock_mr_rip,
/// Memory moffs, rax with lock prefix.
/// Uses `payload` with extra data of type `MemoryMoffs`.
lock_moffs_rax,
/// References another Mir instruction directly.
/// Uses `inst` payload.
inst,
/// References another Mir instruction directly with condition code (CC).
/// Uses `inst_cc` payload.
inst_cc,
/// Uses `payload` payload with data of type `MemoryConditionCode`.
m_cc,
/// Uses `rx` payload with extra data of type `MemoryConditionCode`.
rm_cc,
/// String repeat and width
/// Uses `string` payload.
string,
/// Uses `reloc` payload.
reloc,
/// Linker relocation - GOT indirection.
@@ -295,13 +416,18 @@ pub const Inst = struct {
r2: Register,
imm: u32,
},
/// Condition code (CC), followed by custom payload found in extra.
x_cc: struct {
payload: u32,
cc: bits.Condition,
},
/// Register with condition code (CC).
r_c: struct {
r_cc: struct {
r1: Register,
cc: bits.Condition,
},
/// Register, register with condition code (CC).
rr_c: struct {
rr_cc: struct {
r1: Register,
r2: Register,
cc: bits.Condition,
@@ -316,11 +442,22 @@ pub const Inst = struct {
r1: Register,
payload: u32,
},
/// Register with condition code (CC), followed by custom payload found in extra.
rx_cc: struct {
r1: Register,
cc: bits.Condition,
payload: u32,
},
/// Custom payload followed by an immediate.
xi: struct {
payload: u32,
imm: u32,
},
/// String instruction prefix and width.
string: struct {
repeat: bits.StringRepeat,
width: bits.StringWidth,
},
/// Relocation for the linker where:
/// * `atom_index` is the index of the source
/// * `sym_index` is the index of the target

View File

@@ -6,6 +6,9 @@ const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const DW = std.dwarf;
pub const StringRepeat = enum(u3) { none, rep, repe, repz, repne, repnz };
pub const StringWidth = enum(u2) { b, w, d, q };
/// EFLAGS condition codes
pub const Condition = enum(u5) {
/// above
@@ -97,8 +100,7 @@ pub const Condition = enum(u5) {
};
}
/// Returns the condition which is true iff the given condition is
/// false (if such a condition exists)
/// Returns the condition which is true iff the given condition is false
pub fn negate(cond: Condition) Condition {
return switch (cond) {
.a => .na,
@@ -127,8 +129,8 @@ pub const Condition = enum(u5) {
.nz => .z,
.o => .no,
.p => .np,
.pe => unreachable,
.po => unreachable,
.pe => .po,
.po => .pe,
.s => .ns,
.z => .nz,
};
@@ -470,10 +472,11 @@ pub const Memory = union(enum) {
}
pub fn sib(ptr_size: PtrSize, args: struct {
disp: i32,
disp: i32 = 0,
base: ?Register = null,
scale_index: ?ScaleIndex = null,
}) Memory {
if (args.scale_index) |si| assert(std.math.isPowerOfTwo(si.scale));
return .{ .sib = .{
.base = args.base,
.disp = args.disp,

View File

@@ -15,10 +15,21 @@ pub const Instruction = struct {
op2: Operand = .none,
op3: Operand = .none,
op4: Operand = .none,
prefix: Prefix = .none,
encoding: Encoding,
pub const Mnemonic = Encoding.Mnemonic;
pub const Prefix = enum(u3) {
none,
lock,
rep,
repe,
repz,
repne,
repnz,
};
pub const Operand = union(enum) {
none,
reg: Register,
@@ -96,19 +107,18 @@ pub const Instruction = struct {
}
};
pub fn new(mnemonic: Mnemonic, args: struct {
pub const Init = struct {
prefix: Prefix = .none,
op1: Operand = .none,
op2: Operand = .none,
op3: Operand = .none,
op4: Operand = .none,
}) !Instruction {
const encoding = (try Encoding.findByMnemonic(mnemonic, .{
.op1 = args.op1,
.op2 = args.op2,
.op3 = args.op3,
.op4 = args.op4,
})) orelse {
log.debug("no encoding found for: {s} {s} {s} {s} {s}", .{
};
pub fn new(mnemonic: Mnemonic, args: Init) !Instruction {
const encoding = (try Encoding.findByMnemonic(mnemonic, args)) orelse {
log.debug("no encoding found for: {s} {s} {s} {s} {s} {s}", .{
@tagName(args.prefix),
@tagName(mnemonic),
@tagName(Encoding.Op.fromOperand(args.op1)),
@tagName(Encoding.Op.fromOperand(args.op2)),
@@ -119,6 +129,7 @@ pub const Instruction = struct {
};
log.debug("selected encoding: {}", .{encoding});
return .{
.prefix = args.prefix,
.op1 = args.op1,
.op2 = args.op2,
.op3 = args.op3,
@@ -128,6 +139,7 @@ pub const Instruction = struct {
}
pub fn fmtPrint(inst: Instruction, writer: anytype) !void {
if (inst.prefix != .none) try writer.print("{s} ", .{@tagName(inst.prefix)});
try writer.print("{s}", .{@tagName(inst.encoding.mnemonic)});
const ops = [_]struct { Operand, Encoding.Op }{
.{ inst.op1, inst.encoding.op1 },
@@ -199,14 +211,12 @@ pub const Instruction = struct {
fn encodeOpcode(inst: Instruction, encoder: anytype) !void {
const opcode = inst.encoding.opcode();
const first = @boolToInt(inst.encoding.mandatoryPrefix() != null);
const final = opcode.len - 1;
for (opcode[first..final]) |byte| try encoder.opcode_1byte(byte);
switch (inst.encoding.op_en) {
.o, .oi => try encoder.opcode_withReg(opcode[0], inst.op1.reg.lowEnc()),
else => {
const index: usize = if (inst.encoding.mandatoryPrefix()) |_| 1 else 0;
for (opcode[index..]) |byte| {
try encoder.opcode_1byte(byte);
}
},
.o, .oi => try encoder.opcode_withReg(opcode[final], inst.op1.reg.lowEnc()),
else => try encoder.opcode_1byte(opcode[final]),
}
}
@@ -215,6 +225,14 @@ pub const Instruction = struct {
const op_en = enc.op_en;
var legacy = LegacyPrefixes{};
switch (inst.prefix) {
.none => {},
.lock => legacy.prefix_f0 = true,
.repne, .repnz => legacy.prefix_f2 = true,
.rep, .repe, .repz => legacy.prefix_f3 = true,
}
if (enc.mode == .none) {
const bit_size = enc.operandBitSize();
if (bit_size == 16) {
@@ -811,15 +829,11 @@ const TestEncode = struct {
buffer: [32]u8 = undefined,
index: usize = 0,
fn encode(enc: *TestEncode, mnemonic: Instruction.Mnemonic, args: struct {
op1: Instruction.Operand = .none,
op2: Instruction.Operand = .none,
op3: Instruction.Operand = .none,
op4: Instruction.Operand = .none,
}) !void {
fn encode(enc: *TestEncode, mnemonic: Instruction.Mnemonic, args: Instruction.Init) !void {
var stream = std.io.fixedBufferStream(&enc.buffer);
var count_writer = std.io.countingWriter(stream.writer());
const inst = try Instruction.new(mnemonic, .{
.prefix = args.prefix,
.op1 = args.op1,
.op2 = args.op2,
.op3 = args.op3,
@@ -880,10 +894,10 @@ test "lower MI encoding" {
try enc.encode(.mov, .{ .op1 = .{ .reg = .r12 }, .op2 = .{ .imm = Immediate.u(0x1000) } });
try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000");
try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.byte, .{
.base = .r12,
.disp = 0,
}) }, .op2 = .{ .imm = Immediate.u(0x10) } });
try enc.encode(.mov, .{
.op1 = .{ .mem = Memory.sib(.byte, .{ .base = .r12 }) },
.op2 = .{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings("\x41\xC6\x04\x24\x10", enc.code(), "mov BYTE PTR [r12], 0x10");
try enc.encode(.mov, .{ .op1 = .{ .reg = .r12 }, .op2 = .{ .imm = Immediate.u(0x1000) } });
@@ -895,10 +909,10 @@ test "lower MI encoding" {
try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .imm = Immediate.u(0x10) } });
try expectEqualHexStrings("\x48\xc7\xc0\x10\x00\x00\x00", enc.code(), "mov rax, 0x10");
try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.dword, .{
.base = .r11,
.disp = 0,
}) }, .op2 = .{ .imm = Immediate.u(0x10) } });
try enc.encode(.mov, .{
.op1 = .{ .mem = Memory.sib(.dword, .{ .base = .r11 }) },
.op2 = .{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings("\x41\xc7\x03\x10\x00\x00\x00", enc.code(), "mov DWORD PTR [r11], 0x10");
try enc.encode(.mov, .{
@@ -1014,10 +1028,10 @@ test "lower MI encoding" {
test "lower RM encoding" {
var enc = TestEncode{};
try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .mem = Memory.sib(.qword, .{
.base = .r11,
.disp = 0,
}) } });
try enc.encode(.mov, .{
.op1 = .{ .reg = .rax },
.op2 = .{ .mem = Memory.sib(.qword, .{ .base = .r11 }) },
});
try expectEqualHexStrings("\x49\x8b\x03", enc.code(), "mov rax, QWORD PTR [r11]");
try enc.encode(.mov, .{ .op1 = .{ .reg = .rbx }, .op2 = .{ .mem = Memory.sib(.qword, .{
@@ -1100,20 +1114,16 @@ test "lower RM encoding" {
try enc.encode(.movsx, .{ .op1 = .{ .reg = .ax }, .op2 = .{ .reg = .bl } });
try expectEqualHexStrings("\x66\x0F\xBE\xC3", enc.code(), "movsx ax, bl");
try enc.encode(.movsx, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .mem = Memory.sib(.word, .{
.base = .rbp,
.disp = 0,
}) } });
try enc.encode(.movsx, .{
.op1 = .{ .reg = .eax },
.op2 = .{ .mem = Memory.sib(.word, .{ .base = .rbp }) },
});
try expectEqualHexStrings("\x0F\xBF\x45\x00", enc.code(), "movsx eax, BYTE PTR [rbp]");
try enc.encode(.movsx, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .mem = Memory.sib(.byte, .{
.base = null,
.scale_index = .{
.index = .rax,
.scale = 2,
},
.disp = 0,
}) } });
try enc.encode(.movsx, .{
.op1 = .{ .reg = .eax },
.op2 = .{ .mem = Memory.sib(.byte, .{ .scale_index = .{ .index = .rax, .scale = 2 } }) },
});
try expectEqualHexStrings("\x0F\xBE\x04\x45\x00\x00\x00\x00", enc.code(), "movsx eax, BYTE PTR [rax * 2]");
try enc.encode(.movsx, .{ .op1 = .{ .reg = .ax }, .op2 = .{ .mem = Memory.rip(.byte, 0x10) } });
@@ -1140,14 +1150,13 @@ test "lower RM encoding" {
try enc.encode(.lea, .{ .op1 = .{ .reg = .ax }, .op2 = .{ .mem = Memory.rip(.byte, 0x10) } });
try expectEqualHexStrings("\x66\x8D\x05\x10\x00\x00\x00", enc.code(), "lea ax, BYTE PTR [rip + 0x10]");
try enc.encode(.lea, .{ .op1 = .{ .reg = .rsi }, .op2 = .{ .mem = Memory.sib(.qword, .{
.base = .rbp,
.scale_index = .{
.scale = 1,
.index = .rcx,
},
.disp = 0,
}) } });
try enc.encode(.lea, .{
.op1 = .{ .reg = .rsi },
.op2 = .{ .mem = Memory.sib(.qword, .{
.base = .rbp,
.scale_index = .{ .scale = 1, .index = .rcx },
}) },
});
try expectEqualHexStrings("\x48\x8D\x74\x0D\x00", enc.code(), "lea rsi, QWORD PTR [rbp + rcx*1 + 0]");
try enc.encode(.add, .{ .op1 = .{ .reg = .r11 }, .op2 = .{ .mem = Memory.sib(.qword, .{
@@ -1303,51 +1312,35 @@ test "lower M encoding" {
try enc.encode(.call, .{ .op1 = .{ .reg = .r12 } });
try expectEqualHexStrings("\x41\xFF\xD4", enc.code(), "call r12");
try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
.base = .r12,
.disp = 0,
}) } });
try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{ .base = .r12 }) } });
try expectEqualHexStrings("\x41\xFF\x14\x24", enc.code(), "call QWORD PTR [r12]");
try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
.base = null,
.scale_index = .{
.index = .r11,
.scale = 2,
},
.disp = 0,
}) } });
try enc.encode(.call, .{
.op1 = .{ .mem = Memory.sib(.qword, .{
.base = null,
.scale_index = .{ .index = .r11, .scale = 2 },
}) },
});
try expectEqualHexStrings("\x42\xFF\x14\x5D\x00\x00\x00\x00", enc.code(), "call QWORD PTR [r11 * 2]");
try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
.base = null,
.scale_index = .{
.index = .r12,
.scale = 2,
},
.disp = 0,
}) } });
try enc.encode(.call, .{
.op1 = .{ .mem = Memory.sib(.qword, .{
.base = null,
.scale_index = .{ .index = .r12, .scale = 2 },
}) },
});
try expectEqualHexStrings("\x42\xFF\x14\x65\x00\x00\x00\x00", enc.code(), "call QWORD PTR [r12 * 2]");
try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
.base = .gs,
.disp = 0,
}) } });
try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{ .base = .gs }) } });
try expectEqualHexStrings("\x65\xFF\x14\x25\x00\x00\x00\x00", enc.code(), "call gs:0x0");
try enc.encode(.call, .{ .op1 = .{ .imm = Immediate.s(0) } });
try expectEqualHexStrings("\xE8\x00\x00\x00\x00", enc.code(), "call 0x0");
try enc.encode(.push, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
.base = .rbp,
.disp = 0,
}) } });
try enc.encode(.push, .{ .op1 = .{ .mem = Memory.sib(.qword, .{ .base = .rbp }) } });
try expectEqualHexStrings("\xFF\x75\x00", enc.code(), "push QWORD PTR [rbp]");
try enc.encode(.push, .{ .op1 = .{ .mem = Memory.sib(.word, .{
.base = .rbp,
.disp = 0,
}) } });
try enc.encode(.push, .{ .op1 = .{ .mem = Memory.sib(.word, .{ .base = .rbp }) } });
try expectEqualHexStrings("\x66\xFF\x75\x00", enc.code(), "push QWORD PTR [rbp]");
try enc.encode(.pop, .{ .op1 = .{ .mem = Memory.rip(.qword, 0) } });
@@ -1447,18 +1440,8 @@ test "lower NP encoding" {
try expectEqualHexStrings("\x0f\x05", enc.code(), "syscall");
}
fn invalidInstruction(mnemonic: Instruction.Mnemonic, args: struct {
op1: Instruction.Operand = .none,
op2: Instruction.Operand = .none,
op3: Instruction.Operand = .none,
op4: Instruction.Operand = .none,
}) !void {
const err = Instruction.new(mnemonic, .{
.op1 = args.op1,
.op2 = args.op2,
.op3 = args.op3,
.op4 = args.op4,
});
fn invalidInstruction(mnemonic: Instruction.Mnemonic, args: Instruction.Init) !void {
const err = Instruction.new(mnemonic, args);
try testing.expectError(error.InvalidInstruction, err);
}
@@ -1479,23 +1462,13 @@ test "invalid instruction" {
try invalidInstruction(.push, .{ .op1 = .{ .imm = Immediate.u(0x1000000000000000) } });
}
fn cannotEncode(mnemonic: Instruction.Mnemonic, args: struct {
op1: Instruction.Operand = .none,
op2: Instruction.Operand = .none,
op3: Instruction.Operand = .none,
op4: Instruction.Operand = .none,
}) !void {
try testing.expectError(error.CannotEncode, Instruction.new(mnemonic, .{
.op1 = args.op1,
.op2 = args.op2,
.op3 = args.op3,
.op4 = args.op4,
}));
fn cannotEncode(mnemonic: Instruction.Mnemonic, args: Instruction.Init) !void {
try testing.expectError(error.CannotEncode, Instruction.new(mnemonic, args));
}
test "cannot encode" {
try cannotEncode(.@"test", .{
.op1 = .{ .mem = Memory.sib(.byte, .{ .base = .r12, .disp = 0 }) },
.op1 = .{ .mem = Memory.sib(.byte, .{ .base = .r12 }) },
.op2 = .{ .reg = .ah },
});
try cannotEncode(.@"test", .{

File diff suppressed because it is too large Load Diff

View File

@@ -608,7 +608,6 @@ pub fn generateSymbol(
const payload_type = typed_value.ty.optionalChild(&opt_buf);
const is_pl = !typed_value.val.isNull();
const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
const offset = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow);
if (!payload_type.hasRuntimeBits()) {
try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size);
@@ -639,8 +638,8 @@ pub fn generateSymbol(
return Result.ok;
}
const padding = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow) - 1;
const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef);
try code.writer().writeByteNTimes(@boolToInt(is_pl), offset);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
.val = value,
@@ -648,6 +647,8 @@ pub fn generateSymbol(
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try code.writer().writeByte(@boolToInt(is_pl));
try code.writer().writeByteNTimes(0, padding);
return Result.ok;
},

View File

@@ -305,40 +305,32 @@ pub fn RegisterManager(
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocateRegistersError!void {
const index = indexOfRegIntoTracked(reg) orelse return;
log.debug("getReg {} for inst {?}", .{ reg, inst });
self.markRegAllocated(reg);
if (inst) |tracked_inst|
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
const spilled_inst = self.registers[index];
self.registers[index] = tracked_inst;
try self.getFunction().spillInstruction(reg, spilled_inst);
} else {
self.getRegAssumeFree(reg, tracked_inst);
}
else {
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
const spilled_inst = self.registers[index];
try self.getFunction().spillInstruction(reg, spilled_inst);
self.freeReg(reg);
}
}
if (!self.isRegFree(reg)) {
self.markRegAllocated(reg);
// Move the instruction that was previously there to a
// stack allocation.
const spilled_inst = self.registers[index];
if (inst) |tracked_inst| self.registers[index] = tracked_inst;
try self.getFunction().spillInstruction(reg, spilled_inst);
if (inst == null) self.freeReg(reg);
} else self.getRegAssumeFree(reg, inst);
}
/// Allocates the specified register with the specified
/// instruction. Asserts that the register is free and no
/// spilling is necessary.
pub fn getRegAssumeFree(self: *Self, reg: Register, inst: Air.Inst.Index) void {
pub fn getRegAssumeFree(self: *Self, reg: Register, inst: ?Air.Inst.Index) void {
const index = indexOfRegIntoTracked(reg) orelse return;
log.debug("getRegAssumeFree {} for inst {}", .{ reg, inst });
log.debug("getRegAssumeFree {} for inst {?}", .{ reg, inst });
self.markRegAllocated(reg);
assert(self.isRegFree(reg));
self.registers[index] = inst;
self.markRegUsed(reg);
if (inst) |tracked_inst| {
self.registers[index] = tracked_inst;
self.markRegUsed(reg);
}
}
/// Marks the specified register as free

View File

@@ -33,7 +33,6 @@ fn testCmpxchg() !void {
test "fence" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -44,7 +43,6 @@ test "fence" {
test "atomicrmw and atomicload" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -73,7 +71,6 @@ fn testAtomicLoad(ptr: *u8) !void {
test "cmpxchg with ptr" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -162,7 +159,6 @@ test "cmpxchg on a global variable" {
test "atomic load and rmw with enum" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -180,7 +176,6 @@ test "atomic load and rmw with enum" {
test "atomic store" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -194,7 +189,6 @@ test "atomic store" {
test "atomic store comptime" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -424,7 +418,6 @@ fn testAtomicsWithType(comptime T: type, a: T, b: T) !void {
test "return @atomicStore, using it as a void value" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@@ -14,7 +14,6 @@ pub const CustomDraw = DeleagateWithContext(fn (?OnConfirm) void);
test "simple test" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
var c: CustomDraw = undefined;
_ = c;

View File

@@ -8,7 +8,6 @@ test {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
list.items.len = 0;

View File

@@ -3,7 +3,6 @@ const std = @import("std");
const S = packed struct { a: u0 = 0 };
test {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@@ -329,7 +329,6 @@ test "inline call preserves tail call" {
test "inline call doesn't re-evaluate non generic struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const S = struct {
fn foo(f: struct { a: u8, b: u8 }) !void {

View File

@@ -655,7 +655,6 @@ test "@floatCast cast down" {
}
test "peer type resolution: unreachable, error set, unreachable" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1206,7 +1205,6 @@ fn cast128Float(x: u128) f128 {
test "implicit cast from *[N]T to ?[*]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: ?[*]u16 = null;

View File

@@ -451,7 +451,6 @@ test "optional error set is the same size as error set" {
}
test "nested catch" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -731,7 +730,6 @@ test "pointer to error union payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var err_union: anyerror!u8 = 15;
@@ -876,6 +874,7 @@ test "field access of anyerror results in smaller error set" {
}
test "optional error union return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {

View File

@@ -130,7 +130,6 @@ test "if peer expressions inferred optional type" {
}
test "if-else expression with runtime condition result location is inferred optional" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@@ -5,7 +5,6 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
test "@max" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -52,7 +51,6 @@ test "@max on vectors" {
}
test "@min" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@@ -12,7 +12,6 @@ fn foo() C!void {
}
test "merge error sets" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (foo()) {

View File

@@ -29,7 +29,6 @@ test "optional type" {
}
test "test maybe object and get a pointer to the inner value" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -138,7 +137,6 @@ test "optional pointer to 0 bit type null value at runtime" {
}
test "if var maybe pointer" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@@ -91,7 +91,6 @@ test "address of unwrap optional" {
test "nested optional field in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S2 = struct {
@@ -109,7 +108,6 @@ test "nested optional field in struct" {
test "equality compare optional with non-optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try test_cmp_optional_non_optional();
@@ -227,7 +225,6 @@ test "assigning to an unwrapped optional field in an inline loop" {
}
test "coerce an anon struct literal to optional struct" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -247,7 +244,6 @@ test "coerce an anon struct literal to optional struct" {
}
test "0-bit child type coerced to optional return ptr result location" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -299,7 +295,6 @@ test "0-bit child type coerced to optional" {
}
test "array of optional unaligned types" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -336,7 +331,6 @@ test "array of optional unaligned types" {
}
test "optional pointer to zero bit optional payload" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -450,7 +444,6 @@ test "Optional slice size is optimized" {
test "peer type resolution in nested if expressions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const Thing = struct { n: i32 };
var a = false;

View File

@@ -18,7 +18,6 @@ fn testReinterpretBytesAsInteger() !void {
}
test "reinterpret an array over multiple elements, with no well-defined layout" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@@ -7,7 +7,6 @@ fn retAddr() usize {
test "return address" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@@ -1149,7 +1149,6 @@ test "anon init through error unions and optionals" {
}
test "anon init through optional" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1456,7 +1455,6 @@ test "struct has only one reference" {
test "no dependency loop on pointer to optional struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const S = struct {
const A = struct { b: B };
@@ -1509,7 +1507,6 @@ test "no dependency loop on optional field wrapped in generic function" {
}
test "optional field init with tuple" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@@ -228,7 +228,6 @@ const SwitchProngWithVarEnum = union(enum) {
};
test "switch prong with variable" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@@ -263,7 +263,6 @@ test "initializing anon struct with mixed comptime-runtime fields" {
test "tuple in tuple passed to generic function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -283,7 +282,6 @@ test "tuple in tuple passed to generic function" {
test "coerce tuple to tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const T = std.meta.Tuple(&.{u8});
@@ -298,7 +296,6 @@ test "coerce tuple to tuple" {
test "tuple type with void field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const T = std.meta.Tuple(&[_]type{void});
const x = T{{}};
@@ -335,7 +332,6 @@ test "zero sized struct in tuple handled correctly" {
test "tuple type with void field and a runtime field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const T = std.meta.Tuple(&[_]type{ usize, void });
var t: T = .{ 5, {} };

View File

@@ -1227,7 +1227,6 @@ test "union tag is set when initiated as a temporary value at runtime" {
}
test "extern union most-aligned field is smaller" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@@ -341,7 +341,6 @@ test "else continue outer while" {
}
test "try terminating an infinite loop" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO