41513 lines
2.0 MiB
41513 lines
2.0 MiB
const std = @import("std");
|
|
const assert = std.debug.assert;
|
|
const codegen = @import("../../codegen.zig");
|
|
const link = @import("../../link.zig");
|
|
const log = std.log.scoped(.codegen);
|
|
const tracking_log = std.log.scoped(.tracking);
|
|
const verbose_tracking_log = std.log.scoped(.verbose_tracking);
|
|
const wip_mir_log = std.log.scoped(.wip_mir);
|
|
|
|
const Air = @import("../../Air.zig");
|
|
const Allocator = std.mem.Allocator;
|
|
const Emit = @import("Emit.zig");
|
|
const Liveness = @import("../../Liveness.zig");
|
|
const Lower = @import("Lower.zig");
|
|
const Mir = @import("Mir.zig");
|
|
const Zcu = @import("../../Zcu.zig");
|
|
const Module = @import("../../Package/Module.zig");
|
|
const InternPool = @import("../../InternPool.zig");
|
|
const Type = @import("../../Type.zig");
|
|
const Value = @import("../../Value.zig");
|
|
|
|
const abi = @import("abi.zig");
|
|
const bits = @import("bits.zig");
|
|
const encoder = @import("encoder.zig");
|
|
|
|
const Condition = bits.Condition;
|
|
const Immediate = bits.Immediate;
|
|
const Memory = bits.Memory;
|
|
const Register = bits.Register;
|
|
const RegisterManager = abi.RegisterManager;
|
|
const RegisterLock = RegisterManager.RegisterLock;
|
|
const FrameIndex = bits.FrameIndex;
|
|
|
|
const InnerError = codegen.CodeGenError || error{OutOfRegisters};
|
|
|
|
const err_ret_trace_index: Air.Inst.Index = @enumFromInt(std.math.maxInt(u32));
|
|
|
|
gpa: Allocator,
|
|
pt: Zcu.PerThread,
|
|
air: Air,
|
|
liveness: Liveness,
|
|
bin_file: *link.File,
|
|
debug_output: link.File.DebugInfoOutput,
|
|
target: *const std.Target,
|
|
owner: Owner,
|
|
inline_func: InternPool.Index,
|
|
mod: *Module,
|
|
arg_index: u32,
|
|
args: []MCValue,
|
|
va_info: union {
|
|
sysv: struct {
|
|
gp_count: u32,
|
|
fp_count: u32,
|
|
overflow_arg_area: bits.FrameAddr,
|
|
reg_save_area: bits.FrameAddr,
|
|
},
|
|
win64: struct {},
|
|
},
|
|
ret_mcv: InstTracking,
|
|
err_ret_trace_reg: Register,
|
|
fn_type: Type,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
|
|
eflags_inst: ?Air.Inst.Index = null,
|
|
|
|
/// MIR Instructions
|
|
mir_instructions: std.MultiArrayList(Mir.Inst) = .empty,
|
|
/// MIR extra data
|
|
mir_extra: std.ArrayListUnmanaged(u32) = .empty,
|
|
mir_table: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
|
|
|
|
/// Byte offset within the source file of the ending curly.
|
|
end_di_line: u32,
|
|
end_di_column: u32,
|
|
|
|
/// The value is an offset into the `Function` `code` from the beginning.
|
|
/// To perform the reloc, write 32-bit signed little-endian integer
|
|
/// which is a relative jump, based on the address following the reloc.
|
|
epilogue_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
|
|
|
|
reused_operands: std.StaticBitSet(Liveness.bpi - 1) = undefined,
|
|
const_tracking: ConstTrackingMap = .empty,
|
|
inst_tracking: InstTrackingMap = .empty,
|
|
|
|
// Key is the block instruction
|
|
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
|
|
|
|
register_manager: RegisterManager = .{},
|
|
|
|
/// Generation of the current scope, increments by 1 for every entered scope.
|
|
scope_generation: u32 = 0,
|
|
|
|
frame_allocs: std.MultiArrayList(FrameAlloc) = .empty,
|
|
free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .empty,
|
|
frame_locs: std.MultiArrayList(Mir.FrameLoc) = .empty,
|
|
|
|
loops: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
|
|
/// The state to restore before branching.
|
|
state: State,
|
|
/// The branch target.
|
|
target: Mir.Inst.Index,
|
|
}) = .empty,
|
|
loop_switches: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
|
|
start: u31,
|
|
len: u11,
|
|
min: Value,
|
|
else_relocs: union(enum) {
|
|
@"unreachable",
|
|
forward: std.ArrayListUnmanaged(Mir.Inst.Index),
|
|
backward: Mir.Inst.Index,
|
|
},
|
|
}) = .empty,
|
|
|
|
next_temp_index: Temp.Index = @enumFromInt(0),
|
|
temp_type: [Temp.Index.max]Type = undefined,
|
|
|
|
const Owner = union(enum) {
|
|
nav_index: InternPool.Nav.Index,
|
|
lazy_sym: link.File.LazySymbol,
|
|
|
|
fn getSymbolIndex(owner: Owner, ctx: *CodeGen) !u32 {
|
|
const pt = ctx.pt;
|
|
switch (owner) {
|
|
.nav_index => |nav_index| if (ctx.bin_file.cast(.elf)) |elf_file| {
|
|
return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(pt.zcu, nav_index);
|
|
} else if (ctx.bin_file.cast(.macho)) |macho_file| {
|
|
return macho_file.getZigObject().?.getOrCreateMetadataForNav(macho_file, nav_index);
|
|
} else if (ctx.bin_file.cast(.coff)) |coff_file| {
|
|
const atom = try coff_file.getOrCreateAtomForNav(nav_index);
|
|
return coff_file.getAtom(atom).getSymbolIndex().?;
|
|
} else if (ctx.bin_file.cast(.plan9)) |p9_file| {
|
|
return p9_file.seeNav(pt, nav_index);
|
|
} else unreachable,
|
|
.lazy_sym => |lazy_sym| if (ctx.bin_file.cast(.elf)) |elf_file| {
|
|
return elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err|
|
|
ctx.fail("{s} creating lazy symbol", .{@errorName(err)});
|
|
} else if (ctx.bin_file.cast(.macho)) |macho_file| {
|
|
return macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, pt, lazy_sym) catch |err|
|
|
ctx.fail("{s} creating lazy symbol", .{@errorName(err)});
|
|
} else if (ctx.bin_file.cast(.coff)) |coff_file| {
|
|
const atom = coff_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
|
|
return ctx.fail("{s} creating lazy symbol", .{@errorName(err)});
|
|
return coff_file.getAtom(atom).getSymbolIndex().?;
|
|
} else if (ctx.bin_file.cast(.plan9)) |p9_file| {
|
|
return p9_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
|
|
return ctx.fail("{s} creating lazy symbol", .{@errorName(err)});
|
|
} else unreachable,
|
|
}
|
|
}
|
|
};
|
|
|
|
const MaskKind = enum(u1) { sign, all };
|
|
const MaskInfo = packed struct { kind: MaskKind, inverted: bool = false, scalar: Memory.Size };
|
|
|
|
pub const MCValue = union(enum) {
|
|
/// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc.
|
|
/// TODO Look into deleting this tag and using `dead` instead, since every use
|
|
/// of MCValue.none should be instead looking at the type and noticing it is 0 bits.
|
|
none,
|
|
/// Control flow will not allow this value to be observed.
|
|
unreach,
|
|
/// No more references to this value remain.
|
|
/// The payload is the value of scope_generation at the point where the death occurred
|
|
dead: u32,
|
|
/// The value is undefined.
|
|
undef,
|
|
/// A pointer-sized integer that fits in a register.
|
|
/// If the type is a pointer, this is the pointer address in virtual address space.
|
|
immediate: u64,
|
|
/// The value resides in the EFLAGS register.
|
|
eflags: Condition,
|
|
/// The value is in a register.
|
|
register: Register,
|
|
/// The value is split across two registers.
|
|
register_pair: [2]Register,
|
|
/// The value is split across three registers.
|
|
register_triple: [3]Register,
|
|
/// The value is split across four registers.
|
|
register_quadruple: [4]Register,
|
|
/// The value is a constant offset from the value in a register.
|
|
register_offset: bits.RegisterOffset,
|
|
/// The value is a tuple { wrapped, overflow } where wrapped value is stored in the GP register.
|
|
register_overflow: struct { reg: Register, eflags: Condition },
|
|
/// The value is a bool vector stored in a vector register with a different scalar type.
|
|
register_mask: struct { reg: Register, info: MaskInfo },
|
|
/// The value is in memory at a hard-coded address.
|
|
/// If the type is a pointer, it means the pointer address is stored at this memory location.
|
|
memory: u64,
|
|
/// The value is in memory at an address not-yet-allocated by the linker.
|
|
/// This traditionally corresponds to a relocation emitted in a relocatable object file.
|
|
load_symbol: bits.SymbolOffset,
|
|
/// The address of the memory location not-yet-allocated by the linker.
|
|
lea_symbol: bits.SymbolOffset,
|
|
/// The value is in memory at a constant offset from the address in a register.
|
|
indirect: bits.RegisterOffset,
|
|
/// The value is in memory.
|
|
/// Payload is a symbol index.
|
|
load_direct: u32,
|
|
/// The value is a pointer to a value in memory.
|
|
/// Payload is a symbol index.
|
|
lea_direct: u32,
|
|
/// The value is in memory referenced indirectly via GOT.
|
|
/// Payload is a symbol index.
|
|
load_got: u32,
|
|
/// The value is a pointer to a value referenced indirectly via GOT.
|
|
/// Payload is a symbol index.
|
|
lea_got: u32,
|
|
/// The value is a threadlocal variable.
|
|
/// Payload is a symbol index.
|
|
load_tlv: u32,
|
|
/// The value is a pointer to a threadlocal variable.
|
|
/// Payload is a symbol index.
|
|
lea_tlv: u32,
|
|
/// The value stored at an offset from a frame index
|
|
/// Payload is a frame address.
|
|
load_frame: bits.FrameAddr,
|
|
/// The address of an offset from a frame index
|
|
/// Payload is a frame address.
|
|
lea_frame: bits.FrameAddr,
|
|
/// Supports integer_per_element abi
|
|
elementwise_regs_then_frame: packed struct { regs: u3, frame_off: i29, frame_index: FrameIndex },
|
|
/// This indicates that we have already allocated a frame index for this instruction,
|
|
/// but it has not been spilled there yet in the current control flow.
|
|
/// Payload is a frame index.
|
|
reserved_frame: FrameIndex,
|
|
air_ref: Air.Inst.Ref,
|
|
|
|
fn isModifiable(mcv: MCValue) bool {
|
|
return switch (mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.register_offset,
|
|
.register_mask,
|
|
.eflags,
|
|
.register_overflow,
|
|
.lea_symbol,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> false,
|
|
.register,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.memory,
|
|
.load_symbol,
|
|
.load_got,
|
|
.load_direct,
|
|
.load_tlv,
|
|
.indirect,
|
|
=> true,
|
|
.load_frame => |frame_addr| !frame_addr.index.isNamed(),
|
|
};
|
|
}
|
|
|
|
// hack around linker relocation bugs
|
|
fn isBase(mcv: MCValue) bool {
|
|
return switch (mcv) {
|
|
.memory, .indirect, .load_frame => true,
|
|
else => false,
|
|
};
|
|
}
|
|
|
|
fn isMemory(mcv: MCValue) bool {
|
|
return switch (mcv) {
|
|
.memory, .indirect, .load_frame, .load_symbol => true,
|
|
else => false,
|
|
};
|
|
}
|
|
|
|
fn isImmediate(mcv: MCValue) bool {
|
|
return switch (mcv) {
|
|
.immediate => true,
|
|
else => false,
|
|
};
|
|
}
|
|
|
|
fn isRegister(mcv: MCValue) bool {
|
|
return switch (mcv) {
|
|
.register => true,
|
|
.register_offset => |reg_off| return reg_off.off == 0,
|
|
else => false,
|
|
};
|
|
}
|
|
|
|
fn isRegisterOffset(mcv: MCValue) bool {
|
|
return switch (mcv) {
|
|
.register, .register_offset => true,
|
|
else => false,
|
|
};
|
|
}
|
|
|
|
fn getReg(mcv: MCValue) ?Register {
|
|
return switch (mcv) {
|
|
.register => |reg| reg,
|
|
.register_offset, .indirect => |ro| ro.reg,
|
|
.register_overflow => |ro| ro.reg,
|
|
.register_mask => |rm| rm.reg,
|
|
else => null,
|
|
};
|
|
}
|
|
|
|
fn getRegs(mcv: *const MCValue) []const Register {
|
|
return switch (mcv.*) {
|
|
.register => |*reg| reg[0..1],
|
|
inline .register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
=> |*regs| regs,
|
|
inline .register_offset,
|
|
.indirect,
|
|
.register_overflow,
|
|
.register_mask,
|
|
=> |*pl| (&pl.reg)[0..1],
|
|
else => &.{},
|
|
};
|
|
}
|
|
|
|
fn getCondition(mcv: MCValue) ?Condition {
|
|
return switch (mcv) {
|
|
.eflags => |cc| cc,
|
|
.register_overflow => |reg_ov| reg_ov.eflags,
|
|
else => null,
|
|
};
|
|
}
|
|
|
|
fn isAddress(mcv: MCValue) bool {
|
|
return switch (mcv) {
|
|
.immediate, .register, .register_offset, .lea_frame => true,
|
|
else => false,
|
|
};
|
|
}
|
|
|
|
fn address(mcv: MCValue) MCValue {
|
|
return switch (mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.lea_symbol,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable, // not in memory
|
|
.memory => |addr| .{ .immediate = addr },
|
|
.indirect => |reg_off| switch (reg_off.off) {
|
|
0 => .{ .register = reg_off.reg },
|
|
else => .{ .register_offset = reg_off },
|
|
},
|
|
.load_direct => |sym_index| .{ .lea_direct = sym_index },
|
|
.load_got => |sym_index| .{ .lea_got = sym_index },
|
|
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
|
|
.load_frame => |frame_addr| .{ .lea_frame = frame_addr },
|
|
.load_symbol => |sym_off| .{ .lea_symbol = sym_off },
|
|
};
|
|
}
|
|
|
|
fn deref(mcv: MCValue) MCValue {
|
|
return switch (mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.eflags,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.memory,
|
|
.indirect,
|
|
.load_direct,
|
|
.load_got,
|
|
.load_tlv,
|
|
.load_frame,
|
|
.load_symbol,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable, // not dereferenceable
|
|
.immediate => |addr| .{ .memory = addr },
|
|
.register => |reg| .{ .indirect = .{ .reg = reg } },
|
|
.register_offset => |reg_off| .{ .indirect = reg_off },
|
|
.lea_direct => |sym_index| .{ .load_direct = sym_index },
|
|
.lea_got => |sym_index| .{ .load_got = sym_index },
|
|
.lea_tlv => |sym_index| .{ .load_tlv = sym_index },
|
|
.lea_frame => |frame_addr| .{ .load_frame = frame_addr },
|
|
.lea_symbol => |sym_index| .{ .load_symbol = sym_index },
|
|
};
|
|
}
|
|
|
|
fn offset(mcv: MCValue, off: i32) MCValue {
|
|
return switch (mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable, // not valid
|
|
.eflags,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.memory,
|
|
.indirect,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.load_frame,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
=> switch (off) {
|
|
0 => mcv,
|
|
else => unreachable, // not offsettable
|
|
},
|
|
.immediate => |imm| .{ .immediate = @bitCast(@as(i64, @bitCast(imm)) +% off) },
|
|
.register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } },
|
|
.register_offset => |reg_off| .{
|
|
.register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off },
|
|
},
|
|
.lea_frame => |frame_addr| .{
|
|
.lea_frame = .{ .index = frame_addr.index, .off = frame_addr.off + off },
|
|
},
|
|
};
|
|
}
|
|
|
|
fn mem(mcv: MCValue, function: *CodeGen, mod_rm: Memory.Mod.Rm) !Memory {
|
|
return switch (mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.lea_symbol,
|
|
=> unreachable,
|
|
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| .{
|
|
.base = .{ .reg = .ds },
|
|
.mod = .{ .rm = .{
|
|
.size = mod_rm.size,
|
|
.index = mod_rm.index,
|
|
.scale = mod_rm.scale,
|
|
.disp = small_addr + mod_rm.disp,
|
|
} },
|
|
} else .{ .base = .{ .reg = .ds }, .mod = .{ .off = addr } },
|
|
.indirect => |reg_off| .{
|
|
.base = .{ .reg = registerAlias(reg_off.reg, @divExact(function.target.ptrBitWidth(), 8)) },
|
|
.mod = .{ .rm = .{
|
|
.size = mod_rm.size,
|
|
.index = mod_rm.index,
|
|
.scale = mod_rm.scale,
|
|
.disp = reg_off.off + mod_rm.disp,
|
|
} },
|
|
},
|
|
.load_frame => |frame_addr| .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = mod_rm.size,
|
|
.index = mod_rm.index,
|
|
.scale = mod_rm.scale,
|
|
.disp = frame_addr.off + mod_rm.disp,
|
|
} },
|
|
},
|
|
.load_symbol => |sym_off| {
|
|
assert(sym_off.off == 0);
|
|
return .{
|
|
.base = .{ .reloc = sym_off.sym_index },
|
|
.mod = .{ .rm = .{
|
|
.size = mod_rm.size,
|
|
.index = mod_rm.index,
|
|
.scale = mod_rm.scale,
|
|
.disp = sym_off.off + mod_rm.disp,
|
|
} },
|
|
};
|
|
},
|
|
.air_ref => |ref| (try function.resolveInst(ref)).mem(function, mod_rm),
|
|
};
|
|
}
|
|
|
|
pub fn format(
|
|
mcv: MCValue,
|
|
comptime _: []const u8,
|
|
_: std.fmt.FormatOptions,
|
|
writer: anytype,
|
|
) @TypeOf(writer).Error!void {
|
|
switch (mcv) {
|
|
.none, .unreach, .dead, .undef => try writer.print("({s})", .{@tagName(mcv)}),
|
|
.immediate => |pl| try writer.print("0x{x}", .{pl}),
|
|
.memory => |pl| try writer.print("[ds:0x{x}]", .{pl}),
|
|
inline .eflags, .register => |pl| try writer.print("{s}", .{@tagName(pl)}),
|
|
.register_pair => |pl| try writer.print("{s}:{s}", .{ @tagName(pl[1]), @tagName(pl[0]) }),
|
|
.register_triple => |pl| try writer.print("{s}:{s}:{s}", .{
|
|
@tagName(pl[2]), @tagName(pl[1]), @tagName(pl[0]),
|
|
}),
|
|
.register_quadruple => |pl| try writer.print("{s}:{s}:{s}:{s}", .{
|
|
@tagName(pl[3]), @tagName(pl[2]), @tagName(pl[1]), @tagName(pl[0]),
|
|
}),
|
|
.register_offset => |pl| try writer.print("{s} + 0x{x}", .{ @tagName(pl.reg), pl.off }),
|
|
.register_overflow => |pl| try writer.print("{s}:{s}", .{
|
|
@tagName(pl.eflags),
|
|
@tagName(pl.reg),
|
|
}),
|
|
.register_mask => |pl| try writer.print("mask({s},{}):{c}{s}", .{
|
|
@tagName(pl.info.kind),
|
|
pl.info.scalar,
|
|
@as(u8, if (pl.info.inverted) '!' else ' '),
|
|
@tagName(pl.reg),
|
|
}),
|
|
.load_symbol => |pl| try writer.print("[sym:{} + 0x{x}]", .{ pl.sym_index, pl.off }),
|
|
.lea_symbol => |pl| try writer.print("sym:{} + 0x{x}", .{ pl.sym_index, pl.off }),
|
|
.indirect => |pl| try writer.print("[{s} + 0x{x}]", .{ @tagName(pl.reg), pl.off }),
|
|
.load_direct => |pl| try writer.print("[direct:{d}]", .{pl}),
|
|
.lea_direct => |pl| try writer.print("direct:{d}", .{pl}),
|
|
.load_got => |pl| try writer.print("[got:{d}]", .{pl}),
|
|
.lea_got => |pl| try writer.print("got:{d}", .{pl}),
|
|
.load_tlv => |pl| try writer.print("[tlv:{d}]", .{pl}),
|
|
.lea_tlv => |pl| try writer.print("tlv:{d}", .{pl}),
|
|
.load_frame => |pl| try writer.print("[{} + 0x{x}]", .{ pl.index, pl.off }),
|
|
.elementwise_regs_then_frame => |pl| try writer.print("elementwise:{d}:[{} + 0x{x}]", .{
|
|
pl.regs, pl.frame_index, pl.frame_off,
|
|
}),
|
|
.lea_frame => |pl| try writer.print("{} + 0x{x}", .{ pl.index, pl.off }),
|
|
.reserved_frame => |pl| try writer.print("(dead:{})", .{pl}),
|
|
.air_ref => |pl| try writer.print("(air:0x{x})", .{@intFromEnum(pl)}),
|
|
}
|
|
}
|
|
};
|
|
|
|
const InstTrackingMap = std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InstTracking);
|
|
const ConstTrackingMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, InstTracking);
|
|
const InstTracking = struct {
|
|
long: MCValue,
|
|
short: MCValue,
|
|
|
|
fn init(result: MCValue) InstTracking {
|
|
return .{ .long = switch (result) {
|
|
.none,
|
|
.unreach,
|
|
.undef,
|
|
.immediate,
|
|
.memory,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.load_frame,
|
|
.lea_frame,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
=> result,
|
|
.dead,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable,
|
|
.eflags,
|
|
.register,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.indirect,
|
|
=> .none,
|
|
}, .short = result };
|
|
}
|
|
|
|
fn getReg(self: InstTracking) ?Register {
|
|
return self.short.getReg();
|
|
}
|
|
|
|
fn getRegs(self: *const InstTracking) []const Register {
|
|
return self.short.getRegs();
|
|
}
|
|
|
|
fn getCondition(self: InstTracking) ?Condition {
|
|
return self.short.getCondition();
|
|
}
|
|
|
|
fn spill(self: *InstTracking, cg: *CodeGen, inst: Air.Inst.Index) !void {
|
|
if (std.meta.eql(self.long, self.short)) return; // Already spilled
|
|
// Allocate or reuse frame index
|
|
switch (self.long) {
|
|
.none => self.long = try cg.allocRegOrMem(inst, false),
|
|
.load_frame => {},
|
|
.lea_frame => return,
|
|
.reserved_frame => |index| self.long = .{ .load_frame = .{ .index = index } },
|
|
else => unreachable,
|
|
}
|
|
tracking_log.debug("spill {} from {} to {}", .{ inst, self.short, self.long });
|
|
try cg.genCopy(cg.typeOfIndex(inst), self.long, self.short, .{});
|
|
}
|
|
|
|
fn reuseFrame(self: *InstTracking) void {
|
|
self.* = .init(switch (self.long) {
|
|
.none => switch (self.short) {
|
|
.dead => .none,
|
|
else => |short| short,
|
|
},
|
|
.reserved_frame => |index| .{ .load_frame = .{ .index = index } },
|
|
else => |long| long,
|
|
});
|
|
}
|
|
|
|
fn trackSpill(self: *InstTracking, function: *CodeGen, inst: Air.Inst.Index) !void {
|
|
try function.freeValue(self.short);
|
|
self.reuseFrame();
|
|
tracking_log.debug("{} => {} (spilled)", .{ inst, self.* });
|
|
}
|
|
|
|
fn verifyMaterialize(self: InstTracking, target: InstTracking) void {
|
|
switch (self.long) {
|
|
.none,
|
|
.load_frame,
|
|
.reserved_frame,
|
|
=> switch (target.long) {
|
|
.none,
|
|
.load_frame,
|
|
.reserved_frame,
|
|
=> {},
|
|
else => unreachable,
|
|
},
|
|
.unreach,
|
|
.undef,
|
|
.immediate,
|
|
.memory,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
=> assert(std.meta.eql(self.long, target.long)),
|
|
.dead,
|
|
.eflags,
|
|
.register,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.indirect,
|
|
.elementwise_regs_then_frame,
|
|
.air_ref,
|
|
=> unreachable,
|
|
}
|
|
}
|
|
|
|
fn materialize(
|
|
self: *InstTracking,
|
|
function: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
target: InstTracking,
|
|
) !void {
|
|
self.verifyMaterialize(target);
|
|
try self.materializeUnsafe(function, inst, target);
|
|
}
|
|
|
|
fn materializeUnsafe(
|
|
self: InstTracking,
|
|
function: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
target: InstTracking,
|
|
) !void {
|
|
const ty = function.typeOfIndex(inst);
|
|
if ((self.long == .none or self.long == .reserved_frame) and target.long == .load_frame)
|
|
try function.genCopy(ty, target.long, self.short, .{});
|
|
try function.genCopy(ty, target.short, self.short, .{});
|
|
}
|
|
|
|
fn trackMaterialize(self: *InstTracking, inst: Air.Inst.Index, target: InstTracking) void {
|
|
self.verifyMaterialize(target);
|
|
// Don't clobber reserved frame indices
|
|
self.long = if (target.long == .none) switch (self.long) {
|
|
.load_frame => |addr| .{ .reserved_frame = addr.index },
|
|
.reserved_frame => self.long,
|
|
else => target.long,
|
|
} else target.long;
|
|
self.short = target.short;
|
|
tracking_log.debug("{} => {} (materialize)", .{ inst, self.* });
|
|
}
|
|
|
|
fn resurrect(self: *InstTracking, function: *CodeGen, inst: Air.Inst.Index, scope_generation: u32) !void {
|
|
switch (self.short) {
|
|
.dead => |die_generation| if (die_generation >= scope_generation) {
|
|
self.reuseFrame();
|
|
try function.getValue(self.short, inst);
|
|
tracking_log.debug("{} => {} (resurrect)", .{ inst, self.* });
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
fn die(self: *InstTracking, function: *CodeGen, inst: Air.Inst.Index) !void {
|
|
if (self.short == .dead) return;
|
|
try function.freeValue(self.short);
|
|
if (self.long == .none) self.long = self.short;
|
|
self.short = .{ .dead = function.scope_generation };
|
|
tracking_log.debug("{} => {} (death)", .{ inst, self.* });
|
|
}
|
|
|
|
fn reuse(
|
|
self: *InstTracking,
|
|
function: *CodeGen,
|
|
new_inst: ?Air.Inst.Index,
|
|
old_inst: Air.Inst.Index,
|
|
) void {
|
|
self.short = .{ .dead = function.scope_generation };
|
|
tracking_log.debug("{?} => {} (reuse {})", .{ new_inst, self.*, old_inst });
|
|
}
|
|
|
|
fn liveOut(self: *InstTracking, function: *CodeGen, inst: Air.Inst.Index) void {
|
|
for (self.getRegs()) |reg| {
|
|
if (function.register_manager.isRegFree(reg)) {
|
|
tracking_log.debug("{} => {} (live-out)", .{ inst, self.* });
|
|
continue;
|
|
}
|
|
|
|
const index = RegisterManager.indexOfRegIntoTracked(reg).?;
|
|
const tracked_inst = function.register_manager.registers[index];
|
|
const tracking = function.getResolvedInstValue(tracked_inst);
|
|
|
|
// Disable death.
|
|
var found_reg = false;
|
|
var remaining_reg: Register = .none;
|
|
for (tracking.getRegs()) |tracked_reg| if (tracked_reg.id() == reg.id()) {
|
|
assert(!found_reg);
|
|
found_reg = true;
|
|
} else {
|
|
assert(remaining_reg == .none);
|
|
remaining_reg = tracked_reg;
|
|
};
|
|
assert(found_reg);
|
|
tracking.short = switch (remaining_reg) {
|
|
.none => .{ .dead = function.scope_generation },
|
|
else => .{ .register = remaining_reg },
|
|
};
|
|
|
|
// Perform side-effects of freeValue manually.
|
|
function.register_manager.freeReg(reg);
|
|
|
|
tracking_log.debug("{} => {} (live-out {})", .{ inst, self.*, tracked_inst });
|
|
}
|
|
}
|
|
|
|
pub fn format(
|
|
tracking: InstTracking,
|
|
comptime _: []const u8,
|
|
_: std.fmt.FormatOptions,
|
|
writer: anytype,
|
|
) @TypeOf(writer).Error!void {
|
|
if (!std.meta.eql(tracking.long, tracking.short)) try writer.print("|{}| ", .{tracking.long});
|
|
try writer.print("{}", .{tracking.short});
|
|
}
|
|
};
|
|
|
|
const FrameAlloc = struct {
|
|
abi_size: u31,
|
|
spill_pad: u3,
|
|
abi_align: InternPool.Alignment,
|
|
ref_count: u16,
|
|
|
|
fn init(alloc_abi: struct { size: u64, pad: u3 = 0, alignment: InternPool.Alignment }) FrameAlloc {
|
|
return .{
|
|
.abi_size = @intCast(alloc_abi.size),
|
|
.spill_pad = alloc_abi.pad,
|
|
.abi_align = alloc_abi.alignment,
|
|
.ref_count = 0,
|
|
};
|
|
}
|
|
fn initType(ty: Type, zcu: *Zcu) FrameAlloc {
|
|
return init(.{
|
|
.size = ty.abiSize(zcu),
|
|
.alignment = ty.abiAlignment(zcu),
|
|
});
|
|
}
|
|
fn initSpill(ty: Type, zcu: *Zcu) FrameAlloc {
|
|
const abi_size = ty.abiSize(zcu);
|
|
const spill_size = if (abi_size < 8)
|
|
std.math.ceilPowerOfTwoAssert(u64, abi_size)
|
|
else
|
|
std.mem.alignForward(u64, abi_size, 8);
|
|
return init(.{
|
|
.size = spill_size,
|
|
.pad = @intCast(spill_size - abi_size),
|
|
.alignment = ty.abiAlignment(zcu).maxStrict(
|
|
.fromNonzeroByteUnits(@min(spill_size, 8)),
|
|
),
|
|
});
|
|
}
|
|
};
|
|
|
|
const StackAllocation = struct {
|
|
inst: ?Air.Inst.Index,
|
|
/// TODO do we need size? should be determined by inst.ty.abiSize(zcu)
|
|
size: u32,
|
|
};
|
|
|
|
const BlockData = struct {
|
|
relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
|
|
state: State,
|
|
|
|
fn deinit(self: *BlockData, gpa: Allocator) void {
|
|
self.relocs.deinit(gpa);
|
|
self.* = undefined;
|
|
}
|
|
};
|
|
|
|
const CodeGen = @This();
|
|
|
|
pub fn generate(
|
|
bin_file: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
func_index: InternPool.Index,
|
|
air: Air,
|
|
liveness: Liveness,
|
|
code: *std.ArrayListUnmanaged(u8),
|
|
debug_output: link.File.DebugInfoOutput,
|
|
) codegen.CodeGenError!void {
|
|
const zcu = pt.zcu;
|
|
const comp = zcu.comp;
|
|
const gpa = zcu.gpa;
|
|
const ip = &zcu.intern_pool;
|
|
const func = zcu.funcInfo(func_index);
|
|
const fn_type: Type = .fromInterned(func.ty);
|
|
const mod = zcu.navFileScope(func.owner_nav).mod;
|
|
|
|
var function: CodeGen = .{
|
|
.gpa = gpa,
|
|
.pt = pt,
|
|
.air = air,
|
|
.liveness = liveness,
|
|
.target = &mod.resolved_target.result,
|
|
.mod = mod,
|
|
.bin_file = bin_file,
|
|
.debug_output = debug_output,
|
|
.owner = .{ .nav_index = func.owner_nav },
|
|
.inline_func = func_index,
|
|
.arg_index = undefined,
|
|
.args = undefined, // populated after `resolveCallingConventionValues`
|
|
.va_info = undefined, // populated after `resolveCallingConventionValues`
|
|
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
|
|
.err_ret_trace_reg = undefined, // populated after `resolveCallingConventionValues`
|
|
.fn_type = fn_type,
|
|
.src_loc = src_loc,
|
|
.end_di_line = func.rbrace_line,
|
|
.end_di_column = func.rbrace_column,
|
|
};
|
|
defer {
|
|
function.frame_allocs.deinit(gpa);
|
|
function.free_frame_indices.deinit(gpa);
|
|
function.frame_locs.deinit(gpa);
|
|
function.loops.deinit(gpa);
|
|
function.loop_switches.deinit(gpa);
|
|
var block_it = function.blocks.valueIterator();
|
|
while (block_it.next()) |block| block.deinit(gpa);
|
|
function.blocks.deinit(gpa);
|
|
function.inst_tracking.deinit(gpa);
|
|
function.const_tracking.deinit(gpa);
|
|
function.epilogue_relocs.deinit(gpa);
|
|
function.mir_instructions.deinit(gpa);
|
|
function.mir_extra.deinit(gpa);
|
|
function.mir_table.deinit(gpa);
|
|
}
|
|
try function.inst_tracking.ensureTotalCapacity(gpa, Temp.Index.max);
|
|
for (0..Temp.Index.max) |temp_index| {
|
|
const temp: Temp.Index = @enumFromInt(temp_index);
|
|
function.inst_tracking.putAssumeCapacityNoClobber(temp.toIndex(), .init(.none));
|
|
}
|
|
|
|
wip_mir_log.debug("{}:", .{fmtNav(func.owner_nav, ip)});
|
|
|
|
try function.frame_allocs.resize(gpa, FrameIndex.named_count);
|
|
function.frame_allocs.set(
|
|
@intFromEnum(FrameIndex.stack_frame),
|
|
.init(.{ .size = 0, .alignment = .@"1" }),
|
|
);
|
|
function.frame_allocs.set(
|
|
@intFromEnum(FrameIndex.call_frame),
|
|
.init(.{ .size = 0, .alignment = .@"1" }),
|
|
);
|
|
|
|
const fn_info = zcu.typeToFunc(fn_type).?;
|
|
var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) {
|
|
error.CodegenFail => return error.CodegenFail,
|
|
else => |e| return e,
|
|
};
|
|
defer call_info.deinit(&function);
|
|
|
|
function.args = call_info.args;
|
|
function.ret_mcv = call_info.return_value;
|
|
function.err_ret_trace_reg = call_info.err_ret_trace_reg;
|
|
function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), .init(.{
|
|
.size = Type.usize.abiSize(zcu),
|
|
.alignment = Type.usize.abiAlignment(zcu).min(call_info.stack_align),
|
|
}));
|
|
function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), .init(.{
|
|
.size = Type.usize.abiSize(zcu),
|
|
.alignment = call_info.stack_align.min(
|
|
.fromNonzeroByteUnits(function.target.stackAlignment()),
|
|
),
|
|
}));
|
|
function.frame_allocs.set(
|
|
@intFromEnum(FrameIndex.args_frame),
|
|
.init(.{
|
|
.size = call_info.stack_byte_count,
|
|
.alignment = call_info.stack_align,
|
|
}),
|
|
);
|
|
function.va_info = switch (fn_info.cc) {
|
|
else => undefined,
|
|
.x86_64_sysv => .{ .sysv = .{
|
|
.gp_count = call_info.gp_count,
|
|
.fp_count = call_info.fp_count,
|
|
.overflow_arg_area = .{ .index = .args_frame, .off = call_info.stack_byte_count },
|
|
.reg_save_area = undefined,
|
|
} },
|
|
.x86_64_win => .{ .win64 = .{} },
|
|
};
|
|
if (call_info.err_ret_trace_reg != .none) {
|
|
function.register_manager.getRegAssumeFree(call_info.err_ret_trace_reg, err_ret_trace_index);
|
|
try function.inst_tracking.putNoClobber(
|
|
gpa,
|
|
err_ret_trace_index,
|
|
.init(.{ .register = call_info.err_ret_trace_reg }),
|
|
);
|
|
}
|
|
|
|
function.gen() catch |err| switch (err) {
|
|
error.CodegenFail => return error.CodegenFail,
|
|
error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
|
|
else => |e| return e,
|
|
};
|
|
|
|
var mir: Mir = .{
|
|
.instructions = function.mir_instructions.toOwnedSlice(),
|
|
.extra = try function.mir_extra.toOwnedSlice(gpa),
|
|
.table = try function.mir_table.toOwnedSlice(gpa),
|
|
.frame_locs = function.frame_locs.toOwnedSlice(),
|
|
};
|
|
defer mir.deinit(gpa);
|
|
|
|
var emit: Emit = .{
|
|
.air = function.air,
|
|
.lower = .{
|
|
.bin_file = bin_file,
|
|
.target = function.target,
|
|
.allocator = gpa,
|
|
.mir = mir,
|
|
.cc = fn_info.cc,
|
|
.src_loc = src_loc,
|
|
.output_mode = comp.config.output_mode,
|
|
.link_mode = comp.config.link_mode,
|
|
.pic = mod.pic,
|
|
},
|
|
.atom_index = function.owner.getSymbolIndex(&function) catch |err| switch (err) {
|
|
error.CodegenFail => return error.CodegenFail,
|
|
else => |e| return e,
|
|
},
|
|
.debug_output = debug_output,
|
|
.code = code,
|
|
.prev_di_loc = .{
|
|
.line = func.lbrace_line,
|
|
.column = func.lbrace_column,
|
|
.is_stmt = switch (debug_output) {
|
|
.dwarf => |dwarf| dwarf.dwarf.debug_line.header.default_is_stmt,
|
|
.plan9 => undefined,
|
|
.none => undefined,
|
|
},
|
|
},
|
|
.prev_di_pc = 0,
|
|
};
|
|
emit.emitMir() catch |err| switch (err) {
|
|
error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
|
|
|
|
error.InvalidInstruction, error.CannotEncode => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}),
|
|
else => |e| return function.fail("emit MIR failed: {s}", .{@errorName(e)}),
|
|
};
|
|
}
|
|
|
|
pub fn generateLazy(
|
|
bin_file: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
lazy_sym: link.File.LazySymbol,
|
|
code: *std.ArrayListUnmanaged(u8),
|
|
debug_output: link.File.DebugInfoOutput,
|
|
) codegen.CodeGenError!void {
|
|
const comp = bin_file.comp;
|
|
const gpa = comp.gpa;
|
|
// This function is for generating global code, so we use the root module.
|
|
const mod = comp.root_mod;
|
|
var function: CodeGen = .{
|
|
.gpa = gpa,
|
|
.pt = pt,
|
|
.air = undefined,
|
|
.liveness = undefined,
|
|
.target = &mod.resolved_target.result,
|
|
.mod = mod,
|
|
.bin_file = bin_file,
|
|
.debug_output = debug_output,
|
|
.owner = .{ .lazy_sym = lazy_sym },
|
|
.inline_func = undefined,
|
|
.arg_index = undefined,
|
|
.args = undefined,
|
|
.va_info = undefined,
|
|
.ret_mcv = undefined,
|
|
.err_ret_trace_reg = undefined,
|
|
.fn_type = undefined,
|
|
.src_loc = src_loc,
|
|
.end_di_line = undefined, // no debug info yet
|
|
.end_di_column = undefined, // no debug info yet
|
|
};
|
|
defer {
|
|
function.mir_instructions.deinit(gpa);
|
|
function.mir_extra.deinit(gpa);
|
|
function.mir_table.deinit(gpa);
|
|
}
|
|
|
|
function.genLazy(lazy_sym) catch |err| switch (err) {
|
|
error.CodegenFail => return error.CodegenFail,
|
|
error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
|
|
else => |e| return e,
|
|
};
|
|
|
|
var mir: Mir = .{
|
|
.instructions = function.mir_instructions.toOwnedSlice(),
|
|
.extra = try function.mir_extra.toOwnedSlice(gpa),
|
|
.table = try function.mir_table.toOwnedSlice(gpa),
|
|
.frame_locs = function.frame_locs.toOwnedSlice(),
|
|
};
|
|
defer mir.deinit(gpa);
|
|
|
|
var emit: Emit = .{
|
|
.air = function.air,
|
|
.lower = .{
|
|
.bin_file = bin_file,
|
|
.target = function.target,
|
|
.allocator = gpa,
|
|
.mir = mir,
|
|
.cc = .auto,
|
|
.src_loc = src_loc,
|
|
.output_mode = comp.config.output_mode,
|
|
.link_mode = comp.config.link_mode,
|
|
.pic = mod.pic,
|
|
},
|
|
.atom_index = function.owner.getSymbolIndex(&function) catch |err| switch (err) {
|
|
error.CodegenFail => return error.CodegenFail,
|
|
else => |e| return e,
|
|
},
|
|
.debug_output = debug_output,
|
|
.code = code,
|
|
.prev_di_loc = undefined, // no debug info yet
|
|
.prev_di_pc = undefined, // no debug info yet
|
|
};
|
|
emit.emitMir() catch |err| switch (err) {
|
|
error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
|
|
error.InvalidInstruction => return function.fail("failed to find a viable x86 instruction (Zig compiler bug)", .{}),
|
|
error.CannotEncode => return function.fail("failed to encode x86 instruction (Zig compiler bug)", .{}),
|
|
else => |e| return function.fail("failed to emit MIR: {s}", .{@errorName(e)}),
|
|
};
|
|
}
|
|
|
|
const FormatNavData = struct {
|
|
ip: *const InternPool,
|
|
nav_index: InternPool.Nav.Index,
|
|
};
|
|
fn formatNav(
|
|
data: FormatNavData,
|
|
comptime _: []const u8,
|
|
_: std.fmt.FormatOptions,
|
|
writer: anytype,
|
|
) @TypeOf(writer).Error!void {
|
|
try writer.print("{}", .{data.ip.getNav(data.nav_index).fqn.fmt(data.ip)});
|
|
}
|
|
fn fmtNav(nav_index: InternPool.Nav.Index, ip: *const InternPool) std.fmt.Formatter(formatNav) {
|
|
return .{ .data = .{
|
|
.ip = ip,
|
|
.nav_index = nav_index,
|
|
} };
|
|
}
|
|
|
|
const FormatAirData = struct {
|
|
self: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
};
|
|
fn formatAir(
|
|
data: FormatAirData,
|
|
comptime _: []const u8,
|
|
_: std.fmt.FormatOptions,
|
|
writer: anytype,
|
|
) @TypeOf(writer).Error!void {
|
|
@import("../../print_air.zig").dumpInst(
|
|
data.inst,
|
|
data.self.pt,
|
|
data.self.air,
|
|
data.self.liveness,
|
|
);
|
|
}
|
|
fn fmtAir(self: *CodeGen, inst: Air.Inst.Index) std.fmt.Formatter(formatAir) {
|
|
return .{ .data = .{ .self = self, .inst = inst } };
|
|
}
|
|
|
|
const FormatWipMirData = struct {
|
|
self: *CodeGen,
|
|
inst: Mir.Inst.Index,
|
|
};
|
|
fn formatWipMir(
|
|
data: FormatWipMirData,
|
|
comptime _: []const u8,
|
|
_: std.fmt.FormatOptions,
|
|
writer: anytype,
|
|
) @TypeOf(writer).Error!void {
|
|
const comp = data.self.bin_file.comp;
|
|
const mod = comp.root_mod;
|
|
var lower: Lower = .{
|
|
.bin_file = data.self.bin_file,
|
|
.target = data.self.target,
|
|
.allocator = data.self.gpa,
|
|
.mir = .{
|
|
.instructions = data.self.mir_instructions.slice(),
|
|
.extra = data.self.mir_extra.items,
|
|
.table = data.self.mir_table.items,
|
|
.frame_locs = (std.MultiArrayList(Mir.FrameLoc){}).slice(),
|
|
},
|
|
.cc = .auto,
|
|
.src_loc = data.self.src_loc,
|
|
.output_mode = comp.config.output_mode,
|
|
.link_mode = comp.config.link_mode,
|
|
.pic = mod.pic,
|
|
};
|
|
var first = true;
|
|
for ((lower.lowerMir(data.inst) catch |err| switch (err) {
|
|
error.LowerFail => {
|
|
defer {
|
|
lower.err_msg.?.deinit(data.self.gpa);
|
|
lower.err_msg = null;
|
|
}
|
|
try writer.writeAll(lower.err_msg.?.msg);
|
|
return;
|
|
},
|
|
error.OutOfMemory, error.InvalidInstruction, error.CannotEncode => |e| {
|
|
try writer.writeAll(switch (e) {
|
|
error.OutOfMemory => "Out of memory",
|
|
error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
|
|
error.CannotEncode => "CodeGen failed to encode the instruction.",
|
|
});
|
|
return;
|
|
},
|
|
else => |e| return e,
|
|
}).insts) |lowered_inst| {
|
|
if (!first) try writer.writeAll("\ndebug(wip_mir): ");
|
|
try writer.print(" | {}", .{lowered_inst});
|
|
first = false;
|
|
}
|
|
if (first) {
|
|
const ip = &data.self.pt.zcu.intern_pool;
|
|
const mir_inst = lower.mir.instructions.get(data.inst);
|
|
try writer.print(" | .{s}", .{@tagName(mir_inst.ops)});
|
|
switch (mir_inst.ops) {
|
|
else => unreachable,
|
|
.pseudo_dbg_prologue_end_none,
|
|
.pseudo_dbg_epilogue_begin_none,
|
|
.pseudo_dbg_enter_block_none,
|
|
.pseudo_dbg_leave_block_none,
|
|
.pseudo_dbg_var_args_none,
|
|
.pseudo_dead_none,
|
|
=> {},
|
|
.pseudo_dbg_line_stmt_line_column, .pseudo_dbg_line_line_column => try writer.print(
|
|
" {[line]d}, {[column]d}",
|
|
mir_inst.data.line_column,
|
|
),
|
|
.pseudo_dbg_enter_inline_func, .pseudo_dbg_leave_inline_func => try writer.print(" {}", .{
|
|
ip.getNav(ip.indexToKey(mir_inst.data.func).func.owner_nav).name.fmt(ip),
|
|
}),
|
|
.pseudo_dbg_local_a => try writer.print(" {}", .{mir_inst.data.a.air_inst}),
|
|
.pseudo_dbg_local_ai_s => try writer.print(" {}, {d}", .{
|
|
mir_inst.data.ai.air_inst,
|
|
@as(i32, @bitCast(mir_inst.data.ai.i)),
|
|
}),
|
|
.pseudo_dbg_local_ai_u => try writer.print(" {}, {d}", .{
|
|
mir_inst.data.ai.air_inst,
|
|
mir_inst.data.ai.i,
|
|
}),
|
|
.pseudo_dbg_local_ai_64 => try writer.print(" {}, {d}", .{
|
|
mir_inst.data.ai.air_inst,
|
|
lower.mir.extraData(Mir.Imm64, mir_inst.data.ai.i).data.decode(),
|
|
}),
|
|
.pseudo_dbg_local_as => {
|
|
const mem_op: encoder.Instruction.Operand = .{ .mem = .initSib(.qword, .{
|
|
.base = .{ .reloc = mir_inst.data.as.sym_index },
|
|
}) };
|
|
try writer.print(" {}, {}", .{ mir_inst.data.as.air_inst, mem_op.fmt(.m) });
|
|
},
|
|
.pseudo_dbg_local_aso => {
|
|
const sym_off = lower.mir.extraData(bits.SymbolOffset, mir_inst.data.ax.payload).data;
|
|
const mem_op: encoder.Instruction.Operand = .{ .mem = .initSib(.qword, .{
|
|
.base = .{ .reloc = sym_off.sym_index },
|
|
.disp = sym_off.off,
|
|
}) };
|
|
try writer.print(" {}, {}", .{ mir_inst.data.ax.air_inst, mem_op.fmt(.m) });
|
|
},
|
|
.pseudo_dbg_local_aro => {
|
|
const air_off = lower.mir.extraData(Mir.AirOffset, mir_inst.data.rx.payload).data;
|
|
const mem_op: encoder.Instruction.Operand = .{ .mem = .initSib(.qword, .{
|
|
.base = .{ .reg = mir_inst.data.rx.r1 },
|
|
.disp = air_off.off,
|
|
}) };
|
|
try writer.print(" {}, {}", .{ air_off.air_inst, mem_op.fmt(.m) });
|
|
},
|
|
.pseudo_dbg_local_af => {
|
|
const frame_addr = lower.mir.extraData(bits.FrameAddr, mir_inst.data.ax.payload).data;
|
|
const mem_op: encoder.Instruction.Operand = .{ .mem = .initSib(.qword, .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.disp = frame_addr.off,
|
|
}) };
|
|
try writer.print(" {}, {}", .{ mir_inst.data.ax.air_inst, mem_op.fmt(.m) });
|
|
},
|
|
.pseudo_dbg_local_am => {
|
|
const mem_op: encoder.Instruction.Operand = .{
|
|
.mem = lower.mir.extraData(Mir.Memory, mir_inst.data.ax.payload).data.decode(),
|
|
};
|
|
try writer.print(" {}, {}", .{ mir_inst.data.ax.air_inst, mem_op.fmt(.m) });
|
|
},
|
|
}
|
|
}
|
|
}
|
|
fn fmtWipMir(self: *CodeGen, inst: Mir.Inst.Index) std.fmt.Formatter(formatWipMir) {
|
|
return .{ .data = .{ .self = self, .inst = inst } };
|
|
}
|
|
|
|
const FormatTrackingData = struct {
|
|
self: *CodeGen,
|
|
};
|
|
fn formatTracking(
|
|
data: FormatTrackingData,
|
|
comptime _: []const u8,
|
|
_: std.fmt.FormatOptions,
|
|
writer: anytype,
|
|
) @TypeOf(writer).Error!void {
|
|
var it = data.self.inst_tracking.iterator();
|
|
while (it.next()) |entry| try writer.print("\n{} = {}", .{ entry.key_ptr.*, entry.value_ptr.* });
|
|
}
|
|
fn fmtTracking(self: *CodeGen) std.fmt.Formatter(formatTracking) {
|
|
return .{ .data = .{ .self = self } };
|
|
}
|
|
|
|
fn addInst(self: *CodeGen, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
|
|
const gpa = self.gpa;
|
|
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
|
|
const result_index: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
self.mir_instructions.appendAssumeCapacity(inst);
|
|
if (inst.ops != .pseudo_dead_none) wip_mir_log.debug("{}", .{self.fmtWipMir(result_index)});
|
|
return result_index;
|
|
}
|
|
|
|
fn addExtra(self: *CodeGen, extra: anytype) Allocator.Error!u32 {
|
|
const fields = std.meta.fields(@TypeOf(extra));
|
|
try self.mir_extra.ensureUnusedCapacity(self.gpa, fields.len);
|
|
return self.addExtraAssumeCapacity(extra);
|
|
}
|
|
|
|
fn addExtraAssumeCapacity(self: *CodeGen, extra: anytype) u32 {
|
|
const fields = std.meta.fields(@TypeOf(extra));
|
|
const result: u32 = @intCast(self.mir_extra.items.len);
|
|
inline for (fields) |field| {
|
|
self.mir_extra.appendAssumeCapacity(switch (field.type) {
|
|
u32 => @field(extra, field.name),
|
|
i32, Mir.Memory.Info => @bitCast(@field(extra, field.name)),
|
|
bits.FrameIndex => @intFromEnum(@field(extra, field.name)),
|
|
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
|
|
});
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn asmOps(self: *CodeGen, tag: Mir.Inst.FixedTag, ops: [4]Operand) !void {
|
|
return switch (ops[0]) {
|
|
.none => self.asmOpOnly(tag),
|
|
.reg => |reg0| switch (ops[1]) {
|
|
.none => self.asmRegister(tag, reg0),
|
|
.reg => |reg1| switch (ops[2]) {
|
|
.none => self.asmRegisterRegister(tag, reg0, reg1),
|
|
.reg => |reg2| switch (ops[3]) {
|
|
.none => self.asmRegisterRegisterRegister(tag, reg0, reg1, reg2),
|
|
.reg => |reg3| self.asmRegisterRegisterRegisterRegister(tag, reg0, reg1, reg2, reg3),
|
|
.imm => |imm3| self.asmRegisterRegisterRegisterImmediate(tag, reg0, reg1, reg2, imm3),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.mem => |mem2| switch (ops[3]) {
|
|
.none => self.asmRegisterRegisterMemory(tag, reg0, reg1, mem2),
|
|
.reg => |reg3| self.asmRegisterRegisterMemoryRegister(tag, reg0, reg1, mem2, reg3),
|
|
.imm => |imm3| self.asmRegisterRegisterMemoryImmediate(tag, reg0, reg1, mem2, imm3),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.imm => |imm2| switch (ops[3]) {
|
|
.none => self.asmRegisterRegisterImmediate(tag, reg0, reg1, imm2),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.mem => |mem1| switch (ops[2]) {
|
|
.none => self.asmRegisterMemory(tag, reg0, mem1),
|
|
.reg => |reg2| switch (ops[3]) {
|
|
.none => self.asmRegisterMemoryRegister(tag, reg0, mem1, reg2),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.imm => |imm2| switch (ops[3]) {
|
|
.none => self.asmRegisterMemoryImmediate(tag, reg0, mem1, imm2),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.imm => |imm1| switch (ops[2]) {
|
|
.none => self.asmRegisterImmediate(tag, reg0, imm1),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.mem => |mem0| switch (ops[1]) {
|
|
.none => self.asmMemory(tag, mem0),
|
|
.reg => |reg1| switch (ops[2]) {
|
|
.none => self.asmMemoryRegister(tag, mem0, reg1),
|
|
.reg => |reg2| switch (ops[3]) {
|
|
.none => self.asmMemoryRegisterRegister(tag, mem0, reg1, reg2),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.imm => |imm2| switch (ops[3]) {
|
|
.none => self.asmMemoryRegisterImmediate(tag, mem0, reg1, imm2),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.imm => |imm1| switch (ops[2]) {
|
|
.none => self.asmMemoryImmediate(tag, mem0, imm1),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.imm => |imm0| switch (ops[1]) {
|
|
.none => self.asmImmediate(tag, imm0),
|
|
.reg => |reg1| switch (ops[2]) {
|
|
.none => self.asmImmediateRegister(tag, imm0, reg1),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.imm => |imm1| switch (ops[2]) {
|
|
.none => self.asmImmediateImmediate(tag, imm0, imm1),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
else => error.InvalidInstruction,
|
|
},
|
|
.inst => |inst0| switch (ops[1]) {
|
|
.none => self.asmReloc(tag, inst0),
|
|
else => error.InvalidInstruction,
|
|
},
|
|
};
|
|
}
|
|
|
|
/// A `cc` of `.z_and_np` clobbers `reg2`!
|
|
fn asmCmovccRegisterRegister(self: *CodeGen, cc: Condition, reg1: Register, reg2: Register) !void {
|
|
if (self.hasFeature(.cmov)) _ = try self.addInst(.{
|
|
.tag = switch (cc) {
|
|
else => .cmov,
|
|
.z_and_np, .nz_or_p => .pseudo,
|
|
},
|
|
.ops = switch (cc) {
|
|
else => .rr,
|
|
.z_and_np => .pseudo_cmov_z_and_np_rr,
|
|
.nz_or_p => .pseudo_cmov_nz_or_p_rr,
|
|
},
|
|
.data = .{ .rr = .{
|
|
.fixes = switch (cc) {
|
|
else => .fromCond(cc),
|
|
.z_and_np, .nz_or_p => ._,
|
|
},
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
} },
|
|
}) else {
|
|
const reloc = try self.asmJccReloc(cc.negate(), undefined);
|
|
try self.asmRegisterRegister(.{ ._, .mov }, reg1, reg2);
|
|
self.performReloc(reloc);
|
|
}
|
|
}
|
|
|
|
/// A `cc` of `.z_and_np` is not supported by this encoding!
|
|
fn asmCmovccRegisterMemory(self: *CodeGen, cc: Condition, reg: Register, m: Memory) !void {
|
|
if (self.hasFeature(.cmov)) _ = try self.addInst(.{
|
|
.tag = switch (cc) {
|
|
else => .cmov,
|
|
.z_and_np => unreachable,
|
|
.nz_or_p => .pseudo,
|
|
},
|
|
.ops = switch (cc) {
|
|
else => .rm,
|
|
.z_and_np => unreachable,
|
|
.nz_or_p => .pseudo_cmov_nz_or_p_rm,
|
|
},
|
|
.data = .{ .rx = .{
|
|
.fixes = switch (cc) {
|
|
else => .fromCond(cc),
|
|
.z_and_np => unreachable,
|
|
.nz_or_p => ._,
|
|
},
|
|
.r1 = reg,
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
}) else {
|
|
const reloc = try self.asmJccReloc(cc.negate(), undefined);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, reg, m);
|
|
self.performReloc(reloc);
|
|
}
|
|
}
|
|
|
|
fn asmSetccRegister(self: *CodeGen, cc: Condition, reg: Register) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = switch (cc) {
|
|
else => .set,
|
|
.z_and_np, .nz_or_p => .pseudo,
|
|
},
|
|
.ops = switch (cc) {
|
|
else => .r,
|
|
.z_and_np => .pseudo_set_z_and_np_r,
|
|
.nz_or_p => .pseudo_set_nz_or_p_r,
|
|
},
|
|
.data = switch (cc) {
|
|
else => .{ .r = .{
|
|
.fixes = .fromCond(cc),
|
|
.r1 = reg,
|
|
} },
|
|
.z_and_np, .nz_or_p => .{ .rr = .{
|
|
.r1 = reg,
|
|
.r2 = (try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to8(),
|
|
} },
|
|
},
|
|
});
|
|
}
|
|
|
|
fn asmSetccMemory(self: *CodeGen, cc: Condition, m: Memory) !void {
|
|
const payload = try self.addExtra(Mir.Memory.encode(m));
|
|
_ = try self.addInst(.{
|
|
.tag = switch (cc) {
|
|
else => .set,
|
|
.z_and_np, .nz_or_p => .pseudo,
|
|
},
|
|
.ops = switch (cc) {
|
|
else => .m,
|
|
.z_and_np => .pseudo_set_z_and_np_m,
|
|
.nz_or_p => .pseudo_set_nz_or_p_m,
|
|
},
|
|
.data = switch (cc) {
|
|
else => .{ .x = .{
|
|
.fixes = .fromCond(cc),
|
|
.payload = payload,
|
|
} },
|
|
.z_and_np, .nz_or_p => .{ .rx = .{
|
|
.r1 = (try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to8(),
|
|
.payload = payload,
|
|
} },
|
|
},
|
|
});
|
|
}
|
|
|
|
fn asmJmpReloc(self: *CodeGen, target: Mir.Inst.Index) !Mir.Inst.Index {
|
|
return self.addInst(.{
|
|
.tag = .j,
|
|
.ops = .inst,
|
|
.data = .{ .inst = .{
|
|
.fixes = ._mp,
|
|
.inst = target,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmJccReloc(self: *CodeGen, cc: Condition, target: Mir.Inst.Index) !Mir.Inst.Index {
|
|
return self.addInst(.{
|
|
.tag = switch (cc) {
|
|
else => .j,
|
|
.z_and_np, .nz_or_p => .pseudo,
|
|
},
|
|
.ops = switch (cc) {
|
|
else => .inst,
|
|
.z_and_np => .pseudo_j_z_and_np_inst,
|
|
.nz_or_p => .pseudo_j_nz_or_p_inst,
|
|
},
|
|
.data = .{ .inst = .{
|
|
.fixes = switch (cc) {
|
|
else => .fromCond(cc),
|
|
.z_and_np, .nz_or_p => ._,
|
|
},
|
|
.inst = target,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmReloc(self: *CodeGen, tag: Mir.Inst.FixedTag, target: Mir.Inst.Index) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .inst,
|
|
.data = .{ .inst = .{
|
|
.fixes = tag[0],
|
|
.inst = target,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmPlaceholder(self: *CodeGen) !Mir.Inst.Index {
|
|
return self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_dead_none,
|
|
.data = undefined,
|
|
});
|
|
}
|
|
|
|
const MirTagAir = enum { dbg_local };
|
|
|
|
fn asmAir(self: *CodeGen, tag: MirTagAir, inst: Air.Inst.Index) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = switch (tag) {
|
|
.dbg_local => .pseudo_dbg_local_a,
|
|
},
|
|
.data = .{ .a = .{ .air_inst = inst } },
|
|
});
|
|
}
|
|
|
|
fn asmAirImmediate(self: *CodeGen, tag: MirTagAir, inst: Air.Inst.Index, imm: Immediate) !void {
|
|
switch (imm) {
|
|
.signed => |s| _ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = switch (tag) {
|
|
.dbg_local => .pseudo_dbg_local_ai_s,
|
|
},
|
|
.data = .{ .ai = .{
|
|
.air_inst = inst,
|
|
.i = @bitCast(s),
|
|
} },
|
|
}),
|
|
.unsigned => |u| _ = if (std.math.cast(u32, u)) |small| try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = switch (tag) {
|
|
.dbg_local => .pseudo_dbg_local_ai_u,
|
|
},
|
|
.data = .{ .ai = .{
|
|
.air_inst = inst,
|
|
.i = small,
|
|
} },
|
|
}) else try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = switch (tag) {
|
|
.dbg_local => .pseudo_dbg_local_ai_64,
|
|
},
|
|
.data = .{ .ai = .{
|
|
.air_inst = inst,
|
|
.i = try self.addExtra(Mir.Imm64.encode(u)),
|
|
} },
|
|
}),
|
|
.reloc => |sym_off| _ = if (sym_off.off == 0) try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = switch (tag) {
|
|
.dbg_local => .pseudo_dbg_local_as,
|
|
},
|
|
.data = .{ .as = .{
|
|
.air_inst = inst,
|
|
.sym_index = sym_off.sym_index,
|
|
} },
|
|
}) else try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = switch (tag) {
|
|
.dbg_local => .pseudo_dbg_local_aso,
|
|
},
|
|
.data = .{ .ax = .{
|
|
.air_inst = inst,
|
|
.payload = try self.addExtra(sym_off),
|
|
} },
|
|
}),
|
|
}
|
|
}
|
|
|
|
fn asmAirRegisterImmediate(
|
|
self: *CodeGen,
|
|
tag: MirTagAir,
|
|
inst: Air.Inst.Index,
|
|
reg: Register,
|
|
imm: Immediate,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = switch (tag) {
|
|
.dbg_local => .pseudo_dbg_local_aro,
|
|
},
|
|
.data = .{ .rx = .{
|
|
.r1 = reg,
|
|
.payload = try self.addExtra(Mir.AirOffset{
|
|
.air_inst = inst,
|
|
.off = imm.signed,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmAirFrameAddress(
|
|
self: *CodeGen,
|
|
tag: MirTagAir,
|
|
inst: Air.Inst.Index,
|
|
frame_addr: bits.FrameAddr,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = switch (tag) {
|
|
.dbg_local => .pseudo_dbg_local_af,
|
|
},
|
|
.data = .{ .ax = .{
|
|
.air_inst = inst,
|
|
.payload = try self.addExtra(frame_addr),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmAirMemory(self: *CodeGen, tag: MirTagAir, inst: Air.Inst.Index, m: Memory) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = switch (tag) {
|
|
.dbg_local => .pseudo_dbg_local_am,
|
|
},
|
|
.data = .{ .ax = .{
|
|
.air_inst = inst,
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmOpOnly(self: *CodeGen, tag: Mir.Inst.FixedTag) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .none,
|
|
.data = .{ .none = .{
|
|
.fixes = tag[0],
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmPseudo(self: *CodeGen, ops: Mir.Inst.Ops) !void {
|
|
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
|
|
std.mem.endsWith(u8, @tagName(ops), "_none"));
|
|
_ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = ops,
|
|
.data = undefined,
|
|
});
|
|
}
|
|
|
|
fn asmPseudoRegister(self: *CodeGen, ops: Mir.Inst.Ops, reg: Register) !void {
|
|
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
|
|
std.mem.endsWith(u8, @tagName(ops), "_r"));
|
|
_ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = ops,
|
|
.data = .{ .r = .{ .r1 = reg } },
|
|
});
|
|
}
|
|
|
|
fn asmPseudoImmediate(self: *CodeGen, ops: Mir.Inst.Ops, imm: Immediate) !void {
|
|
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
|
|
std.mem.endsWith(u8, @tagName(ops), "_i_s"));
|
|
_ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = ops,
|
|
.data = .{ .i = .{ .i = @bitCast(imm.signed) } },
|
|
});
|
|
}
|
|
|
|
fn asmPseudoRegisterRegister(self: *CodeGen, ops: Mir.Inst.Ops, reg1: Register, reg2: Register) !void {
|
|
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
|
|
std.mem.endsWith(u8, @tagName(ops), "_rr"));
|
|
_ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = ops,
|
|
.data = .{ .rr = .{ .r1 = reg1, .r2 = reg2 } },
|
|
});
|
|
}
|
|
|
|
fn asmPseudoRegisterImmediate(self: *CodeGen, ops: Mir.Inst.Ops, reg: Register, imm: Immediate) !void {
|
|
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
|
|
std.mem.endsWith(u8, @tagName(ops), "_ri_s"));
|
|
_ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = ops,
|
|
.data = .{ .ri = .{ .r1 = reg, .i = @bitCast(imm.signed) } },
|
|
});
|
|
}
|
|
|
|
fn asmRegister(self: *CodeGen, tag: Mir.Inst.FixedTag, reg: Register) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .r,
|
|
.data = .{ .r = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmImmediate(self: *CodeGen, tag: Mir.Inst.FixedTag, imm: Immediate) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = switch (imm) {
|
|
.signed => .i_s,
|
|
.unsigned => .i_u,
|
|
.reloc => .rel,
|
|
},
|
|
.data = switch (imm) {
|
|
.reloc => |sym_off| reloc: {
|
|
assert(tag[0] == ._);
|
|
break :reloc .{ .reloc = sym_off };
|
|
},
|
|
.signed, .unsigned => .{ .i = .{
|
|
.fixes = tag[0],
|
|
.i = switch (imm) {
|
|
.signed => |s| @bitCast(s),
|
|
.unsigned => |u| @intCast(u),
|
|
.reloc => unreachable,
|
|
},
|
|
} },
|
|
},
|
|
});
|
|
}
|
|
|
|
fn asmImmediateRegister(self: *CodeGen, tag: Mir.Inst.FixedTag, imm: Immediate, reg: Register) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .ir,
|
|
.data = .{ .ri = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg,
|
|
.i = @as(u8, switch (imm) {
|
|
.signed => |s| @bitCast(@as(i8, @intCast(s))),
|
|
.unsigned => |u| @intCast(u),
|
|
.reloc => unreachable,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmImmediateImmediate(self: *CodeGen, tag: Mir.Inst.FixedTag, imm1: Immediate, imm2: Immediate) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .ii,
|
|
.data = .{ .ii = .{
|
|
.fixes = tag[0],
|
|
.i1 = switch (imm1) {
|
|
.signed => |s| @bitCast(@as(i16, @intCast(s))),
|
|
.unsigned => |u| @intCast(u),
|
|
.reloc => unreachable,
|
|
},
|
|
.i2 = switch (imm2) {
|
|
.signed => |s| @bitCast(@as(i8, @intCast(s))),
|
|
.unsigned => |u| @intCast(u),
|
|
.reloc => unreachable,
|
|
},
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterRegister(self: *CodeGen, tag: Mir.Inst.FixedTag, reg1: Register, reg2: Register) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rr,
|
|
.data = .{ .rr = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterImmediate(self: *CodeGen, tag: Mir.Inst.FixedTag, reg: Register, imm: Immediate) !void {
|
|
const ops: Mir.Inst.Ops, const i: u32 = switch (imm) {
|
|
.signed => |s| .{ .ri_s, @bitCast(s) },
|
|
.unsigned => |u| if (std.math.cast(u32, u)) |small|
|
|
.{ .ri_u, small }
|
|
else
|
|
.{ .ri_64, try self.addExtra(Mir.Imm64.encode(imm.unsigned)) },
|
|
.reloc => unreachable,
|
|
};
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = ops,
|
|
.data = .{ .ri = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg,
|
|
.i = i,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterRegisterRegister(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
reg1: Register,
|
|
reg2: Register,
|
|
reg3: Register,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rrr,
|
|
.data = .{ .rrr = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
.r3 = reg3,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterRegisterRegisterRegister(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
reg1: Register,
|
|
reg2: Register,
|
|
reg3: Register,
|
|
reg4: Register,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rrrr,
|
|
.data = .{ .rrrr = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
.r3 = reg3,
|
|
.r4 = reg4,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterRegisterRegisterImmediate(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
reg1: Register,
|
|
reg2: Register,
|
|
reg3: Register,
|
|
imm: Immediate,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rrri,
|
|
.data = .{ .rrri = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
.r3 = reg3,
|
|
.i = switch (imm) {
|
|
.signed => |s| @bitCast(@as(i8, @intCast(s))),
|
|
.unsigned => |u| @intCast(u),
|
|
.reloc => unreachable,
|
|
},
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterRegisterImmediate(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
reg1: Register,
|
|
reg2: Register,
|
|
imm: Immediate,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = switch (imm) {
|
|
.signed => .rri_s,
|
|
.unsigned => .rri_u,
|
|
.reloc => unreachable,
|
|
},
|
|
.data = .{ .rri = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
.i = switch (imm) {
|
|
.signed => |s| @bitCast(s),
|
|
.unsigned => |u| @intCast(u),
|
|
.reloc => unreachable,
|
|
},
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterRegisterMemory(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
reg1: Register,
|
|
reg2: Register,
|
|
m: Memory,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rrm,
|
|
.data = .{ .rrx = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterRegisterMemoryRegister(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
reg1: Register,
|
|
reg2: Register,
|
|
m: Memory,
|
|
reg3: Register,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rrmr,
|
|
.data = .{ .rrrx = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
.r3 = reg3,
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmMemory(self: *CodeGen, tag: Mir.Inst.FixedTag, m: Memory) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .m,
|
|
.data = .{ .x = .{
|
|
.fixes = tag[0],
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterMemory(self: *CodeGen, tag: Mir.Inst.FixedTag, reg: Register, m: Memory) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rm,
|
|
.data = .{ .rx = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg,
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterMemoryRegister(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
reg1: Register,
|
|
m: Memory,
|
|
reg2: Register,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rmr,
|
|
.data = .{ .rrx = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmRegisterMemoryImmediate(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
reg: Register,
|
|
m: Memory,
|
|
imm: Immediate,
|
|
) !void {
|
|
if (switch (imm) {
|
|
.signed => |s| if (std.math.cast(i16, s)) |x| @as(u16, @bitCast(x)) else null,
|
|
.unsigned => |u| std.math.cast(u16, u),
|
|
.reloc => unreachable,
|
|
}) |small_imm| {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rmi,
|
|
.data = .{ .rix = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg,
|
|
.i = small_imm,
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
} else {
|
|
const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) {
|
|
.signed => |s| @bitCast(s),
|
|
.unsigned => unreachable,
|
|
.reloc => unreachable,
|
|
} });
|
|
assert(payload + 1 == try self.addExtra(Mir.Memory.encode(m)));
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = switch (imm) {
|
|
.signed => .rmi_s,
|
|
.unsigned => .rmi_u,
|
|
.reloc => unreachable,
|
|
},
|
|
.data = .{ .rx = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg,
|
|
.payload = payload,
|
|
} },
|
|
});
|
|
}
|
|
}
|
|
|
|
fn asmRegisterRegisterMemoryImmediate(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
reg1: Register,
|
|
reg2: Register,
|
|
m: Memory,
|
|
imm: Immediate,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .rrmi,
|
|
.data = .{ .rrix = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
.i = @intCast(imm.unsigned),
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmMemoryRegister(self: *CodeGen, tag: Mir.Inst.FixedTag, m: Memory, reg: Register) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .mr,
|
|
.data = .{ .rx = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg,
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmMemoryImmediate(self: *CodeGen, tag: Mir.Inst.FixedTag, m: Memory, imm: Immediate) !void {
|
|
const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) {
|
|
.signed => |s| @bitCast(s),
|
|
.unsigned => |u| @intCast(u),
|
|
.reloc => unreachable,
|
|
} });
|
|
assert(payload + 1 == try self.addExtra(Mir.Memory.encode(m)));
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = switch (imm) {
|
|
.signed => .mi_s,
|
|
.unsigned => .mi_u,
|
|
.reloc => unreachable,
|
|
},
|
|
.data = .{ .x = .{
|
|
.fixes = tag[0],
|
|
.payload = payload,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmMemoryRegisterRegister(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
m: Memory,
|
|
reg1: Register,
|
|
reg2: Register,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .mrr,
|
|
.data = .{ .rrx = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg1,
|
|
.r2 = reg2,
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn asmMemoryRegisterImmediate(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
m: Memory,
|
|
reg: Register,
|
|
imm: Immediate,
|
|
) !void {
|
|
_ = try self.addInst(.{
|
|
.tag = tag[1],
|
|
.ops = .mri,
|
|
.data = .{ .rix = .{
|
|
.fixes = tag[0],
|
|
.r1 = reg,
|
|
.i = @intCast(imm.unsigned),
|
|
.payload = try self.addExtra(Mir.Memory.encode(m)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn gen(self: *CodeGen) InnerError!void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const fn_info = zcu.typeToFunc(self.fn_type).?;
|
|
if (fn_info.cc != .naked) {
|
|
try self.asmRegister(.{ ._, .push }, .rbp);
|
|
try self.asmPseudoImmediate(.pseudo_cfi_adjust_cfa_offset_i_s, .s(8));
|
|
try self.asmPseudoRegisterImmediate(.pseudo_cfi_rel_offset_ri_s, .rbp, .s(0));
|
|
try self.asmRegisterRegister(.{ ._, .mov }, .rbp, .rsp);
|
|
try self.asmPseudoRegister(.pseudo_cfi_def_cfa_register_r, .rbp);
|
|
const backpatch_push_callee_preserved_regs = try self.asmPlaceholder();
|
|
const backpatch_frame_align = try self.asmPlaceholder();
|
|
const backpatch_frame_align_extra = try self.asmPlaceholder();
|
|
const backpatch_stack_alloc = try self.asmPlaceholder();
|
|
const backpatch_stack_alloc_extra = try self.asmPlaceholder();
|
|
|
|
switch (self.ret_mcv.long) {
|
|
.none, .unreach => {},
|
|
.indirect => {
|
|
// The address where to store the return value for the caller is in a
|
|
// register which the callee is free to clobber. Therefore, we purposely
|
|
// spill it to stack immediately.
|
|
const frame_index = try self.allocFrameIndex(.initSpill(.usize, zcu));
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
0,
|
|
.usize,
|
|
self.ret_mcv.long.address().offset(-self.ret_mcv.short.indirect.off),
|
|
.{},
|
|
);
|
|
self.ret_mcv.long = .{ .load_frame = .{ .index = frame_index } };
|
|
tracking_log.debug("spill {} to {}", .{ self.ret_mcv.long, frame_index });
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
if (fn_info.is_var_args) switch (fn_info.cc) {
|
|
.x86_64_sysv => {
|
|
const info = &self.va_info.sysv;
|
|
const reg_save_area_fi = try self.allocFrameIndex(.init(.{
|
|
.size = abi.SysV.c_abi_int_param_regs.len * 8 +
|
|
abi.SysV.c_abi_sse_param_regs.len * 16,
|
|
.alignment = .@"16",
|
|
}));
|
|
info.reg_save_area = .{ .index = reg_save_area_fi };
|
|
|
|
for (abi.SysV.c_abi_int_param_regs[info.gp_count..], info.gp_count..) |reg, reg_i|
|
|
try self.genSetMem(.{ .frame = reg_save_area_fi }, @intCast(reg_i * 8), .usize, .{ .register = reg }, .{});
|
|
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, .al, .u(info.fp_count));
|
|
const skip_sse_reloc = try self.asmJccReloc(.na, undefined);
|
|
|
|
const vec_2_f64 = try pt.vectorType(.{ .len = 2, .child = .f64_type });
|
|
for (abi.SysV.c_abi_sse_param_regs[info.fp_count..], info.fp_count..) |reg, reg_i|
|
|
try self.genSetMem(
|
|
.{ .frame = reg_save_area_fi },
|
|
@intCast(abi.SysV.c_abi_int_param_regs.len * 8 + reg_i * 16),
|
|
vec_2_f64,
|
|
.{ .register = reg },
|
|
.{},
|
|
);
|
|
|
|
self.performReloc(skip_sse_reloc);
|
|
},
|
|
.x86_64_win => return self.fail("TODO implement gen var arg function for Win64", .{}),
|
|
else => |cc| return self.fail("{s} does not support var args", .{@tagName(cc)}),
|
|
};
|
|
|
|
if (self.debug_output != .none) try self.asmPseudo(.pseudo_dbg_prologue_end_none);
|
|
|
|
try self.genBody(self.air.getMainBody());
|
|
|
|
const epilogue = if (self.epilogue_relocs.items.len > 0) epilogue: {
|
|
const epilogue_relocs_last_index = self.epilogue_relocs.items.len - 1;
|
|
for (if (self.epilogue_relocs.items[epilogue_relocs_last_index] == self.mir_instructions.len - 1) epilogue_relocs: {
|
|
_ = self.mir_instructions.pop();
|
|
break :epilogue_relocs self.epilogue_relocs.items[0..epilogue_relocs_last_index];
|
|
} else self.epilogue_relocs.items) |epilogue_reloc| self.performReloc(epilogue_reloc);
|
|
|
|
if (self.debug_output != .none) try self.asmPseudo(.pseudo_dbg_epilogue_begin_none);
|
|
const backpatch_stack_dealloc = try self.asmPlaceholder();
|
|
const backpatch_pop_callee_preserved_regs = try self.asmPlaceholder();
|
|
try self.asmRegister(.{ ._, .pop }, .rbp);
|
|
try self.asmPseudoRegisterImmediate(.pseudo_cfi_def_cfa_ri_s, .rsp, .s(8));
|
|
try self.asmOpOnly(.{ ._, .ret });
|
|
break :epilogue .{
|
|
.backpatch_stack_dealloc = backpatch_stack_dealloc,
|
|
.backpatch_pop_callee_preserved_regs = backpatch_pop_callee_preserved_regs,
|
|
};
|
|
} else null;
|
|
|
|
const frame_layout = try self.computeFrameLayout(fn_info.cc);
|
|
const need_frame_align = frame_layout.stack_mask != std.math.maxInt(u32);
|
|
const need_stack_adjust = frame_layout.stack_adjust > 0;
|
|
const need_save_reg = frame_layout.save_reg_list.count() > 0;
|
|
if (need_frame_align) {
|
|
const page_align = @as(u32, std.math.maxInt(u32)) << 12;
|
|
self.mir_instructions.set(backpatch_frame_align, .{
|
|
.tag = .@"and",
|
|
.ops = .ri_s,
|
|
.data = .{ .ri = .{
|
|
.r1 = .rsp,
|
|
.i = @max(frame_layout.stack_mask, page_align),
|
|
} },
|
|
});
|
|
if (frame_layout.stack_mask < page_align) {
|
|
self.mir_instructions.set(backpatch_frame_align_extra, .{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_probe_align_ri_s,
|
|
.data = .{ .ri = .{
|
|
.r1 = .rsp,
|
|
.i = ~frame_layout.stack_mask & page_align,
|
|
} },
|
|
});
|
|
}
|
|
}
|
|
if (need_stack_adjust) {
|
|
const page_size: u32 = 1 << 12;
|
|
if (frame_layout.stack_adjust <= page_size) {
|
|
self.mir_instructions.set(backpatch_stack_alloc, .{
|
|
.tag = .sub,
|
|
.ops = .ri_s,
|
|
.data = .{ .ri = .{
|
|
.r1 = .rsp,
|
|
.i = frame_layout.stack_adjust,
|
|
} },
|
|
});
|
|
} else if (frame_layout.stack_adjust <
|
|
page_size * Lower.pseudo_probe_adjust_unrolled_max_insts)
|
|
{
|
|
self.mir_instructions.set(backpatch_stack_alloc, .{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_probe_adjust_unrolled_ri_s,
|
|
.data = .{ .ri = .{
|
|
.r1 = .rsp,
|
|
.i = frame_layout.stack_adjust,
|
|
} },
|
|
});
|
|
} else {
|
|
const scratch_reg = abi.getCAbiLinkerScratchReg(fn_info.cc);
|
|
self.mir_instructions.set(backpatch_stack_alloc, .{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_probe_adjust_setup_rri_s,
|
|
.data = .{ .rri = .{
|
|
.r1 = .rsp,
|
|
.r2 = scratch_reg,
|
|
.i = frame_layout.stack_adjust,
|
|
} },
|
|
});
|
|
self.mir_instructions.set(backpatch_stack_alloc_extra, .{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_probe_adjust_loop_rr,
|
|
.data = .{ .rr = .{
|
|
.r1 = .rsp,
|
|
.r2 = scratch_reg,
|
|
} },
|
|
});
|
|
}
|
|
}
|
|
if (epilogue) |e| if (need_frame_align or need_stack_adjust) {
|
|
self.mir_instructions.set(e.backpatch_stack_dealloc, switch (-frame_layout.save_reg_list.size(self.target)) {
|
|
0 => .{
|
|
.tag = .mov,
|
|
.ops = .rr,
|
|
.data = .{ .rr = .{
|
|
.r1 = .rsp,
|
|
.r2 = .rbp,
|
|
} },
|
|
},
|
|
else => |disp| .{
|
|
.tag = .lea,
|
|
.ops = .rm,
|
|
.data = .{ .rx = .{
|
|
.r1 = .rsp,
|
|
.payload = try self.addExtra(Mir.Memory.encode(.{
|
|
.base = .{ .reg = .rbp },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = disp,
|
|
} },
|
|
})),
|
|
} },
|
|
},
|
|
});
|
|
};
|
|
if (need_save_reg) {
|
|
self.mir_instructions.set(backpatch_push_callee_preserved_regs, .{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_push_reg_list,
|
|
.data = .{ .reg_list = frame_layout.save_reg_list },
|
|
});
|
|
if (epilogue) |e| self.mir_instructions.set(e.backpatch_pop_callee_preserved_regs, .{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_pop_reg_list,
|
|
.data = .{ .reg_list = frame_layout.save_reg_list },
|
|
});
|
|
}
|
|
} else {
|
|
if (self.debug_output != .none) try self.asmPseudo(.pseudo_dbg_prologue_end_none);
|
|
try self.genBody(self.air.getMainBody());
|
|
if (self.debug_output != .none) try self.asmPseudo(.pseudo_dbg_epilogue_begin_none);
|
|
}
|
|
|
|
// Drop them off at the rbrace.
|
|
if (self.debug_output != .none) _ = try self.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_dbg_line_line_column,
|
|
.data = .{ .line_column = .{
|
|
.line = self.end_di_line,
|
|
.column = self.end_di_column,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn checkInvariantsAfterAirInst(self: *CodeGen) void {
|
|
assert(!self.register_manager.lockedRegsExist());
|
|
|
|
if (std.debug.runtime_safety) {
|
|
// check consistency of tracked registers
|
|
var it = self.register_manager.free_registers.iterator(.{ .kind = .unset });
|
|
while (it.next()) |index| {
|
|
const tracked_inst = self.register_manager.registers[index];
|
|
const tracking = self.getResolvedInstValue(tracked_inst);
|
|
for (tracking.getRegs()) |reg| {
|
|
if (RegisterManager.indexOfRegIntoTracked(reg).? == index) break;
|
|
} else unreachable; // tracked register not in use
|
|
}
|
|
}
|
|
}
|
|
|
|
fn genBodyBlock(self: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
|
|
if (self.debug_output != .none) try self.asmPseudo(.pseudo_dbg_enter_block_none);
|
|
try self.genBody(body);
|
|
if (self.debug_output != .none) try self.asmPseudo(.pseudo_dbg_leave_block_none);
|
|
}
|
|
|
|
fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
|
|
@setEvalBranchQuota(3_600);
|
|
const pt = cg.pt;
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
const air_tags = cg.air.instructions.items(.tag);
|
|
const air_datas = cg.air.instructions.items(.data);
|
|
const use_old = cg.target.ofmt == .coff;
|
|
|
|
cg.arg_index = 0;
|
|
for (body) |inst| switch (air_tags[@intFromEnum(inst)]) {
|
|
.arg => {
|
|
wip_mir_log.debug("{}", .{cg.fmtAir(inst)});
|
|
verbose_tracking_log.debug("{}", .{cg.fmtTracking()});
|
|
|
|
cg.reused_operands = .initEmpty();
|
|
try cg.inst_tracking.ensureUnusedCapacity(cg.gpa, 1);
|
|
|
|
try cg.airArg(inst);
|
|
|
|
cg.resetTemps();
|
|
cg.checkInvariantsAfterAirInst();
|
|
},
|
|
else => break,
|
|
};
|
|
|
|
if (cg.arg_index == 0) try cg.airDbgVarArgs();
|
|
cg.arg_index = 0;
|
|
for (body) |inst| {
|
|
if (cg.liveness.isUnused(inst) and !cg.air.mustLower(inst, ip)) continue;
|
|
wip_mir_log.debug("{}", .{cg.fmtAir(inst)});
|
|
verbose_tracking_log.debug("{}", .{cg.fmtTracking()});
|
|
|
|
cg.reused_operands = .initEmpty();
|
|
try cg.inst_tracking.ensureUnusedCapacity(cg.gpa, 1);
|
|
switch (air_tags[@intFromEnum(inst)]) {
|
|
// zig fmt: off
|
|
.add,
|
|
.add_wrap,
|
|
.sub,
|
|
.sub_wrap,
|
|
=> |air_tag| try cg.airBinOp(inst, air_tag),
|
|
|
|
.shr, .shr_exact => try cg.airShlShrBinOp(inst),
|
|
.shl, .shl_exact => try cg.airShlShrBinOp(inst),
|
|
|
|
.mul,
|
|
.mul_wrap,
|
|
.rem,
|
|
.mod,
|
|
.div_float,
|
|
.div_trunc,
|
|
.div_floor,
|
|
.div_exact,
|
|
=> |air_tag| try cg.airMulDivBinOp(inst, air_tag),
|
|
|
|
.add_sat => try cg.airAddSat(inst),
|
|
.sub_sat => try cg.airSubSat(inst),
|
|
.mul_sat => try cg.airMulSat(inst),
|
|
.shl_sat => try cg.airShlSat(inst),
|
|
|
|
.sin,
|
|
.cos,
|
|
.tan,
|
|
.exp,
|
|
.exp2,
|
|
.log,
|
|
.log2,
|
|
.log10,
|
|
.round,
|
|
=> |air_tag| try cg.airUnaryMath(inst, air_tag),
|
|
|
|
.floor => try cg.airRound(inst, .{ .mode = .down, .precision = .inexact }),
|
|
.ceil => try cg.airRound(inst, .{ .mode = .up, .precision = .inexact }),
|
|
.trunc_float => try cg.airRound(inst, .{ .mode = .zero, .precision = .inexact }),
|
|
.sqrt => try cg.airSqrt(inst),
|
|
.neg => |air_tag| try cg.airFloatSign(inst, air_tag),
|
|
|
|
.add_with_overflow => try cg.airAddSubWithOverflow(inst),
|
|
.sub_with_overflow => try cg.airAddSubWithOverflow(inst),
|
|
.mul_with_overflow => try cg.airMulWithOverflow(inst),
|
|
.shl_with_overflow => try cg.airShlWithOverflow(inst),
|
|
|
|
.cmp_lt_errors_len => try cg.airCmpLtErrorsLen(inst),
|
|
|
|
.bitcast => try cg.airBitCast(inst),
|
|
.fptrunc => try cg.airFptrunc(inst),
|
|
.fpext => try cg.airFpext(inst),
|
|
.intcast => try cg.airIntCast(inst),
|
|
.trunc => try cg.airTrunc(inst),
|
|
.is_non_null => try cg.airIsNonNull(inst),
|
|
.is_null => try cg.airIsNull(inst),
|
|
.is_non_err => try cg.airIsNonErr(inst),
|
|
.is_err => try cg.airIsErr(inst),
|
|
.float_from_int => try cg.airFloatFromInt(inst),
|
|
.int_from_float => try cg.airIntFromFloat(inst),
|
|
.cmpxchg_strong => try cg.airCmpxchg(inst),
|
|
.cmpxchg_weak => try cg.airCmpxchg(inst),
|
|
.atomic_rmw => try cg.airAtomicRmw(inst),
|
|
.atomic_load => try cg.airAtomicLoad(inst),
|
|
.memcpy => try cg.airMemcpy(inst),
|
|
.memset => try cg.airMemset(inst, false),
|
|
.memset_safe => try cg.airMemset(inst, true),
|
|
.ctz => try cg.airCtz(inst),
|
|
.popcount => try cg.airPopCount(inst),
|
|
.byte_swap => try cg.airByteSwap(inst),
|
|
.bit_reverse => try cg.airBitReverse(inst),
|
|
.tag_name => try cg.airTagName(inst),
|
|
.error_name => try cg.airErrorName(inst),
|
|
.splat => try cg.airSplat(inst),
|
|
.select => try cg.airSelect(inst),
|
|
.shuffle => try cg.airShuffle(inst),
|
|
.reduce => try cg.airReduce(inst),
|
|
.aggregate_init => try cg.airAggregateInit(inst),
|
|
.prefetch => try cg.airPrefetch(inst),
|
|
.mul_add => try cg.airMulAdd(inst),
|
|
|
|
.atomic_store_unordered => try cg.airAtomicStore(inst, .unordered),
|
|
.atomic_store_monotonic => try cg.airAtomicStore(inst, .monotonic),
|
|
.atomic_store_release => try cg.airAtomicStore(inst, .release),
|
|
.atomic_store_seq_cst => try cg.airAtomicStore(inst, .seq_cst),
|
|
|
|
.array_elem_val => try cg.airArrayElemVal(inst),
|
|
|
|
.optional_payload => try cg.airOptionalPayload(inst),
|
|
.unwrap_errunion_err => try cg.airUnwrapErrUnionErr(inst),
|
|
.unwrap_errunion_payload => try cg.airUnwrapErrUnionPayload(inst),
|
|
|
|
.wrap_optional => try cg.airWrapOptional(inst),
|
|
.wrap_errunion_payload => try cg.airWrapErrUnionPayload(inst),
|
|
.wrap_errunion_err => try cg.airWrapErrUnionErr(inst),
|
|
// zig fmt: on
|
|
|
|
.add_safe,
|
|
.sub_safe,
|
|
.mul_safe,
|
|
=> return cg.fail("TODO implement safety_checked_instructions", .{}),
|
|
|
|
.add_optimized => try cg.airBinOp(inst, .add),
|
|
.sub_optimized => try cg.airBinOp(inst, .sub),
|
|
.mul_optimized => try cg.airBinOp(inst, .mul),
|
|
.div_float_optimized => try cg.airMulDivBinOp(inst, .div_float),
|
|
.div_trunc_optimized => try cg.airMulDivBinOp(inst, .div_trunc),
|
|
.div_floor_optimized => try cg.airMulDivBinOp(inst, .div_floor),
|
|
.div_exact_optimized => try cg.airMulDivBinOp(inst, .div_exact),
|
|
.rem_optimized => try cg.airMulDivBinOp(inst, .rem),
|
|
.mod_optimized => try cg.airMulDivBinOp(inst, .mod),
|
|
.neg_optimized => try cg.airFloatSign(inst, .neg),
|
|
.reduce_optimized => try cg.airReduce(inst),
|
|
.int_from_float_optimized => try cg.airIntFromFloat(inst),
|
|
|
|
.arg => if (cg.debug_output != .none) {
|
|
// skip zero-bit arguments as they don't have a corresponding arg instruction
|
|
var arg_index = cg.arg_index;
|
|
while (cg.args[arg_index] == .none) arg_index += 1;
|
|
cg.arg_index = arg_index + 1;
|
|
|
|
const name = air_datas[@intFromEnum(inst)].arg.name;
|
|
if (name != .none) try cg.genLocalDebugInfo(inst, cg.getResolvedInstValue(inst).short);
|
|
if (cg.liveness.isUnused(inst)) try cg.processDeath(inst);
|
|
|
|
for (cg.args[arg_index + 1 ..]) |arg| {
|
|
if (arg != .none) break;
|
|
} else try cg.airDbgVarArgs();
|
|
},
|
|
.ptr_add => |air_tag| if (use_old) try cg.airPtrArithmetic(inst, air_tag) else {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
try ops[0].toSlicePtr(cg);
|
|
var res: [1]Temp = undefined;
|
|
if (ty_pl.ty.toType().elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu)) cg.select(&res, &.{ty_pl.ty.toType()}, &ops, comptime &.{ .{
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .simm32 } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leaa(.none, .src0, .add_src0_elem_size_times_src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 1 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 2 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src0, .@"2", .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 2 + 1 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src1, .@"2", .src1), ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .dst0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 4 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src0, .@"4", .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 4 + 1 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src1 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src1, .@"4", .src1), ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .dst0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .elem_size_is = 8 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src0, .@"8", .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .elem_size_is = 8 + 1 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src1 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src1, .@"8", .src1), ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .dst0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.po2_elem_size},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_mut_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src1 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._l, .sh, .src1p, .sa(.none, .add_log2_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .i_, .mul, .dst0p, .src1p, .sa(.none, .add_src0_elem_size), ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .dst0), ._, ._ },
|
|
} },
|
|
} }) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(bin_op.lhs).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
ops[1].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
} else { // hack around Sema OPV bugs
|
|
res[0] = ops[0];
|
|
}
|
|
try res[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.ptr_sub => |air_tag| if (use_old) try cg.airPtrArithmetic(inst, air_tag) else {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
try ops[0].toSlicePtr(cg);
|
|
var res: [1]Temp = undefined;
|
|
if (ty_pl.ty.toType().elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu)) cg.select(&res, &.{ty_pl.ty.toType()}, &ops, comptime &.{ .{
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .simm32 } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leaa(.none, .src0, .sub_src0_elem_size_times_src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 1 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_mut_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src1 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .neg, .src1p, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 2 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_mut_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src1 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .neg, .src1p, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src0, .@"2", .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 2 + 1 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src1, .@"2", .src1), ._, ._ },
|
|
.{ ._, ._, .neg, .dst0p, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .dst0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 4 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_mut_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src1 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .neg, .src1p, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src0, .@"4", .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .elem_size_is = 4 + 1 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src1, .@"4", .src1), ._, ._ },
|
|
.{ ._, ._, .neg, .dst0p, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .dst0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .elem_size_is = 8 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_mut_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src1 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .neg, .src1p, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src0, .@"8", .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .elem_size_is = 8 + 1 }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .dst0p, .leasi(.none, .src1, .@"8", .src1), ._, ._ },
|
|
.{ ._, ._, .neg, .dst0p, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .dst0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.po2_elem_size},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_mut_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src1 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._l, .sa, .src1p, .sa(.none, .add_log2_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .neg, .src1p, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .i_, .mul, .dst0p, .src1p, .sa(.none, .sub_src0_elem_size), ._ },
|
|
.{ ._, ._, .lea, .dst0p, .leai(.none, .src0, .dst0), ._, ._ },
|
|
} },
|
|
} }) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(bin_op.lhs).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
ops[1].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
} else {
|
|
// hack around Sema OPV bugs
|
|
res[0] = ops[0];
|
|
}
|
|
try res[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.max => |air_tag| if (use_old) try cg.airBinOp(inst, air_tag) else fallback: {
|
|
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
|
|
const ty = cg.typeOf(bin_op.lhs);
|
|
if (ty.isVector(zcu) and cg.floatBits(ty.childType(zcu)) != null) break :fallback try cg.airBinOp(inst, air_tag);
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
var res: [1]Temp = undefined;
|
|
cg.select(&res, &.{cg.typeOf(bin_op.lhs)}, &ops, comptime &.{ .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .byte }, .{ .signed_int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
.{ ._, ._l, .cmov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_int = .byte }, .{ .signed_int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
.{ ._, ._nl, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .src1b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .byte }, .{ .unsigned_int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
.{ ._, ._b, .cmov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .byte }, .{ .unsigned_int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
.{ ._, ._nb, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .src1b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .word }, .{ .signed_int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
.{ ._, ._l, .cmov, .dst0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_int = .word }, .{ .signed_int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
.{ ._, ._nl, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .{ .unsigned_int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
.{ ._, ._b, .cmov, .dst0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .{ .unsigned_int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
.{ ._, ._nb, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .dword }, .{ .signed_int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
.{ ._, ._l, .cmov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_int = .dword }, .{ .signed_int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
.{ ._, ._nl, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .{ .unsigned_int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
.{ ._, ._b, .cmov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .{ .unsigned_int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
.{ ._, ._nb, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .qword }, .{ .signed_int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
.{ ._, ._l, .cmov, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .qword }, .{ .signed_int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
.{ ._, ._nl, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .{ .unsigned_int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
.{ ._, ._b, .cmov, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .{ .unsigned_int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
.{ ._, ._nb, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .any_signed_int, .any_signed_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(1, .src0, .sub_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memsiad(.src0q, .@"8", .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memsiad(.src1q, .@"8", .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .memad(.src0q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memad(.src1q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src0), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.src1), ._, ._ },
|
|
.{ ._, ._l, .cmov, .tmp0p, .tmp1p, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .sa(.src0, .add_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_signed_int, .any_signed_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(1, .src0, .sub_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memsiad(.src0q, .@"8", .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memsiad(.src1q, .@"8", .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .memad(.src0q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memad(.src1q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src0), ._, ._ },
|
|
.{ ._, ._nl, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src1), ._, ._ },
|
|
.{ .@"0:", ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .sa(.src0, .add_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .any_unsigned_int, .any_unsigned_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memsia(.src1q, .@"8", .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src0), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.src1), ._, ._ },
|
|
.{ ._, ._b, .cmov, .tmp0p, .tmp1p, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .sa(.src0, .add_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_unsigned_int, .any_unsigned_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memsia(.src1q, .@"8", .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src0), ._, ._ },
|
|
.{ ._, ._nb, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src1), ._, ._ },
|
|
.{ .@"0:", ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .sa(.src0, .add_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .maxs, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .maxs, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._dqa, .mov, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, .p_b, .cmpgt, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .src0x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .@"or", .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .byte } },
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .maxs, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_32_i8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .maxs, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .maxs, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .maxs, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i8, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_16_i8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_b, .cmpgt, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .@"and", .tmp2x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_, .andn, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .@"or", .tmp1x, .tmp2x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .slow_incdec, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .movsx, .tmp2d, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._l, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .movsx, .tmp2d, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._l, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .slow_incdec, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nl, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nl, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .mmx, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .qword, .is = .byte } },
|
|
.{ .scalar_unsigned_int = .{ .of = .qword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mmx, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_mmx }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_mmx, .to_mmx } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .maxu, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .maxu, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .maxu, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .byte } },
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .maxu, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_32_u8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .maxu, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_u8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .maxu, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_u8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .maxu, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .slow_incdec, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .movzx, .tmp2d, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._b, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .movzx, .tmp2d, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._b, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .slow_incdec, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nb, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nb, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .mmx, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .qword, .is = .word } },
|
|
.{ .scalar_signed_int = .{ .of = .qword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mmx, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_mmx }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_mmx, .to_mmx } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .maxs, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .maxs, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .maxs, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .word } },
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .maxs, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .maxs, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .maxs, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .maxs, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._l, .cmov, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nl, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .maxu, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .maxu, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .subus, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_w, .add, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .word } },
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .maxu, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_u16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .maxu, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_u16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .maxu, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_u16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .maxu, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_u16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .subus, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .add, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._b, .cmov, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nb, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .maxs, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .maxs, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._dqa, .mov, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, .p_d, .cmpgt, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .src0x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .@"or", .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .dword } },
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .maxs, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_i32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .maxs, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_i32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .maxs, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_i32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_d, .maxs, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_i32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_i32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_d, .cmpgt, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .@"and", .tmp2x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_, .andn, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .@"or", .tmp1x, .tmp2x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .dword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._l, .cmov, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .dword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nl, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .maxu, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .maxu, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .src1x, ._, ._ },
|
|
.{ ._, .p_d, .cmpgt, .dst0x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .src0x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .@"or", .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .dword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .maxu, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .maxu, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .maxu, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_d, .maxu, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .lea(.xword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp5x, .tmp3x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp6x, .tmp4x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp5x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp6x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_d, .cmpgt, .tmp5x, .tmp6x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .tmp3x, .tmp5x, ._, ._ },
|
|
.{ ._, .p_, .andn, .tmp5x, .tmp4x, ._, ._ },
|
|
.{ ._, .p_, .@"or", .tmp3x, .tmp5x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp3x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .dword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._b, .cmov, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .dword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nb, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpgt, .dst0x, .src1x, .src0x, ._ },
|
|
.{ ._, .vp_b, .blendv, .dst0x, .src0x, .src1x, .dst0x },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .vector_2_i64, .kind = .{ .reg = .xmm0 } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._dqa, .mov, .tmp0x, .src1x, ._, ._ },
|
|
.{ ._, .p_q, .cmpgt, .tmp0x, .src0x, ._, ._ },
|
|
.{ ._, .p_b, .blendv, .dst0x, .src1x, .tmp0x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .qword } },
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpgt, .dst0y, .src1y, .src0y, ._ },
|
|
.{ ._, .vp_b, .blendv, .dst0y, .src0y, .src1y, .dst0y },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_i64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp2y, .memia(.src1y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .tmp3y, .tmp2y, .tmp1y, ._ },
|
|
.{ ._, .vp_b, .blendv, .tmp1y, .tmp1y, .tmp2y, .tmp3y },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp2x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .tmp3x, .tmp2x, .tmp1x, ._ },
|
|
.{ ._, .vp_b, .blendv, .tmp1x, .tmp1x, .tmp2x, .tmp3x },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .reg = .xmm0 } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp3x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_q, .cmpgt, .tmp3x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_b, .blendv, .tmp1x, .tmp2x, .tmp3x, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._l, .cmov, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nl, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_, .movddup, .tmp2x, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, .vp_, .xor, .dst0x, .tmp2x, .src0x, ._ },
|
|
.{ ._, .vp_, .xor, .tmp2x, .tmp2x, .src1x, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .dst0x, .tmp2x, .dst0x, ._ },
|
|
.{ ._, .vp_b, .blendv, .dst0x, .src0x, .src1x, .dst0x },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .reg = .xmm0 } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._, .movddup, .tmp2x, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp3x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .src0x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp3x, .src1x, ._, ._ },
|
|
.{ ._, .p_q, .cmpgt, .tmp3x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_b, .blendv, .dst0x, .src1x, .tmp3x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .qword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_q, .broadcast, .tmp2y, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, .vp_, .xor, .dst0y, .tmp2y, .src0y, ._ },
|
|
.{ ._, .vp_, .xor, .tmp2y, .tmp2y, .src1y, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .dst0y, .tmp2y, .dst0y, ._ },
|
|
.{ ._, .vp_b, .blendv, .dst0y, .src0y, .src1y, .dst0y },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_q, .broadcast, .tmp2y, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp3y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp4y, .memia(.src1y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp5y, .tmp3y, .tmp2y, ._ },
|
|
.{ ._, .vp_, .xor, .tmp6y, .tmp4y, .tmp2y, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .tmp5y, .tmp6y, .tmp5y, ._ },
|
|
.{ ._, .vp_b, .blendv, .tmp3y, .tmp3y, .tmp4y, .tmp5y },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp3y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_, .movddup, .tmp2x, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp5x, .tmp3x, .tmp2x, ._ },
|
|
.{ ._, .vp_, .xor, .tmp6x, .tmp4x, .tmp2x, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .tmp5x, .tmp6x, .tmp5x, ._ },
|
|
.{ ._, .vp_b, .blendv, .tmp3x, .tmp3x, .tmp4x, .tmp5x },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp3x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .reg = .xmm0 } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._, .movddup, .tmp2x, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp5x, .tmp2x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp6x, .tmp2x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp5x, .tmp3x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp6x, .tmp4x, ._, ._ },
|
|
.{ ._, .p_q, .cmpgt, .tmp6x, .tmp5x, ._, ._ },
|
|
.{ ._, .p_b, .blendv, .tmp3x, .tmp4x, .tmp6x, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp3x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._b, .cmov, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nb, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .any_scalar_signed_int, .any_scalar_signed_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .sia(-1, .none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .lead(.none, .tmp0, 8), ._, ._ },
|
|
.{ ._, ._c, .de, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memiad(.src0, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp2p, .memiad(.src1, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._l, .cmov, .tmp1p, .tmp2p, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp2p, .memiad(.dst0, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_scalar_signed_int, .any_scalar_signed_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .sia(-1, .none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .lead(.none, .tmp0, 8), ._, ._ },
|
|
.{ ._, ._c, .de, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memiad(.src0, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._nl, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memiad(.src1, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ .@"1:", ._, .lea, .tmp2p, .memiad(.dst0, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .any_scalar_unsigned_int, .any_scalar_unsigned_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .lead(.none, .tmp0, 8), ._, ._ },
|
|
.{ ._, ._c, .de, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memia(.src0, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp2p, .memia(.src1, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._b, .cmov, .tmp1p, .tmp2p, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp2p, .memia(.dst0, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0p, .tmp0p, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_scalar_unsigned_int, .any_scalar_unsigned_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .lead(.none, .tmp0, 8), ._, ._ },
|
|
.{ ._, ._c, .de, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memia(.src0, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._nb, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memia(.src1, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ .@"1:", ._, .lea, .tmp2p, .memia(.dst0, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0p, .tmp0p, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .f16c, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .mut_rc = .{ .ref = .src1, .rc = .sse } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ps, .cvtph2, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, .v_ps, .cvtph2, .tmp0x, .src1x, ._, ._ },
|
|
.{ ._, .v_ss, .cmp, .tmp1x, .dst0x, .dst0x, .vp(.unord) },
|
|
.{ ._, .v_ss, .max, .dst0x, .tmp0x, .dst0x, ._ },
|
|
.{ ._, .v_ps, .blendv, .dst0x, .dst0x, .tmp0x, .tmp1x },
|
|
.{ ._, .v_, .cvtps2ph, .dst0x, .dst0x, .rm(.{}), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = "__fmaxh" } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ss, .cmp, .tmp0x, .src0x, .src0x, .vp(.unord) },
|
|
.{ ._, .v_ss, .max, .dst0x, .src1x, .src0x, ._ },
|
|
.{ ._, .v_ps, .blendv, .dst0x, .dst0x, .src1x, .tmp0x },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._ps, .mova, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, ._ss, .max, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, ._ss, .cmp, .src0x, .src0x, .vp(.unord), ._ },
|
|
.{ ._, ._ps, .blendv, .dst0x, .src1x, .src0x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .vector_4_f32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._ps, .mova, .tmp0x, .src1x, ._, ._ },
|
|
.{ ._, ._ss, .max, .tmp0x, .src0x, ._, ._ },
|
|
.{ ._, ._ss, .cmp, .dst0x, .src0x, .vp(.ord), ._ },
|
|
.{ ._, ._ps, .@"and", .tmp0x, .dst0x, ._, ._ },
|
|
.{ ._, ._ps, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, ._ps, .@"or", .dst0x, .tmp0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_sd, .cmp, .tmp0x, .src0x, .src0x, .vp(.unord) },
|
|
.{ ._, .v_sd, .max, .dst0x, .src1x, .src0x, ._ },
|
|
.{ ._, .v_pd, .blendv, .dst0x, .dst0x, .src1x, .tmp0x },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._pd, .mova, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, ._sd, .max, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, ._sd, .cmp, .src0x, .src0x, .vp(.unord), ._ },
|
|
.{ ._, ._pd, .blendv, .dst0x, .src1x, .src0x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .vector_2_f64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._pd, .mova, .tmp0x, .src1x, ._, ._ },
|
|
.{ ._, ._sd, .max, .tmp0x, .src0x, ._, ._ },
|
|
.{ ._, ._sd, .cmp, .dst0x, .src0x, .vp(.ord), ._ },
|
|
.{ ._, ._pd, .@"and", .tmp0x, .dst0x, ._, ._ },
|
|
.{ ._, ._pd, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, ._pd, .@"or", .dst0x, .tmp0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = "fmax" } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, .cmov, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src1, .rc = .x87 } }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucomi, .tmp0t, .tmp0t, ._, ._ },
|
|
.{ ._, .f_u, .cmov, .tmp0t, .src1t, ._, ._ },
|
|
.{ ._, .f_, .xch, .src1t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucomi, .tmp0t, .src1t, ._, ._ },
|
|
.{ ._, .f_, .xch, .src1t, ._, ._, ._ },
|
|
.{ ._, .f_nb, .cmov, .tmp0t, .src1t, ._, ._ },
|
|
.{ ._, .f_p, .st, .dst0t, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sahf, .x87, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src1, .rc = .x87 } }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
.{ ._, ._p, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, .f_, .xch, .src1t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .src1t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, .f_, .xch, .src1t, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
.{ ._, ._b, .j, .@"1f", ._, ._, ._ },
|
|
.{ .@"0:", .f_p, .st, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .src1t, ._, ._, ._ },
|
|
.{ .@"1:", .f_p, .st, .dst0t, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .x87, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src1, .rc = .x87 } }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_, .xam, ._, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1b, .si(0b0_1_000_100), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, .f_, .xch, .src1t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .src1t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, .f_, .xch, .src1t, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1b, .si(0b0_0_000_001), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ .@"0:", .f_p, .st, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .src1t, ._, ._, ._ },
|
|
.{ .@"1:", .f_p, .st, .dst0t, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src1, .rc = .x87 } }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
.{ ._, ._p, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, .f_, .xch, .src1t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .src1t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, .f_, .xch, .src1t, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
.{ ._, ._b, .j, .@"1f", ._, ._, ._ },
|
|
.{ .@"0:", .f_p, .st, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .src1t, ._, ._, ._ },
|
|
.{ .@"1:", .f_p, .st, .dst0t, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = "fmaxq" } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
} },
|
|
} }) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(bin_op.lhs).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
ops[1].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
};
|
|
try res[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.min => |air_tag| if (use_old) try cg.airBinOp(inst, air_tag) else fallback: {
|
|
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
|
|
const ty = cg.typeOf(bin_op.lhs);
|
|
if (ty.isVector(zcu) and cg.floatBits(ty.childType(zcu)) != null) break :fallback try cg.airBinOp(inst, air_tag);
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
var res: [1]Temp = undefined;
|
|
cg.select(&res, &.{cg.typeOf(bin_op.lhs)}, &ops, comptime &.{ .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .byte }, .{ .signed_int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
.{ ._, ._ge, .cmov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_int = .byte }, .{ .signed_int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
.{ ._, ._nge, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .src1b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .byte }, .{ .unsigned_int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
.{ ._, ._ae, .cmov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .byte }, .{ .unsigned_int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
.{ ._, ._nae, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .src1b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .word }, .{ .signed_int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
.{ ._, ._ge, .cmov, .dst0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_int = .word }, .{ .signed_int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
.{ ._, ._nge, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .{ .unsigned_int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
.{ ._, ._ae, .cmov, .dst0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .{ .unsigned_int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
.{ ._, ._nae, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .dword }, .{ .signed_int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
.{ ._, ._ge, .cmov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_int = .dword }, .{ .signed_int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
.{ ._, ._nge, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .{ .unsigned_int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
.{ ._, ._ae, .cmov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .{ .unsigned_int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
.{ ._, ._nae, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .qword }, .{ .signed_int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
.{ ._, ._ge, .cmov, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .qword }, .{ .signed_int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
.{ ._, ._nge, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .{ .unsigned_int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
.{ ._, ._ae, .cmov, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .{ .unsigned_int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
.{ ._, ._nae, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .any_signed_int, .any_signed_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(1, .src0, .sub_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memsiad(.src0q, .@"8", .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memsiad(.src1q, .@"8", .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .memad(.src0q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memad(.src1q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src0), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.src1), ._, ._ },
|
|
.{ ._, ._ge, .cmov, .tmp0p, .tmp1p, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .sa(.src0, .add_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_signed_int, .any_signed_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(1, .src0, .sub_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memsiad(.src0q, .@"8", .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memsiad(.src1q, .@"8", .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .memad(.src0q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memad(.src1q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src0), ._, ._ },
|
|
.{ ._, ._nge, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src1), ._, ._ },
|
|
.{ .@"0:", ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .sa(.src0, .add_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .any_unsigned_int, .any_unsigned_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memsia(.src1q, .@"8", .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src0), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.src1), ._, ._ },
|
|
.{ ._, ._ae, .cmov, .tmp0p, .tmp1p, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .sa(.src0, .add_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_unsigned_int, .any_unsigned_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1q, .memsia(.src1q, .@"8", .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src0), ._, ._ },
|
|
.{ ._, ._nae, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.src1), ._, ._ },
|
|
.{ .@"0:", ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .sa(.src0, .add_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .mins, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .mins, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._dqa, .mov, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_b, .cmpgt, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .src0x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .@"or", .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .byte } },
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .mins, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_32_i8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .mins, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .mins, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .mins, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i8, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_16_i8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .cmpgt, .tmp1x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .tmp2x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_, .andn, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .@"or", .tmp1x, .tmp2x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .slow_incdec, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .movsx, .tmp2d, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._ge, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .movsx, .tmp2d, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._ge, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .slow_incdec, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nge, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nge, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .mmx, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .qword, .is = .byte } },
|
|
.{ .scalar_unsigned_int = .{ .of = .qword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mmx, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_mmx }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_mmx, .to_mmx } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .minu, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .minu, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .minu, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .byte } },
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .minu, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_32_u8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .minu, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_u8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .minu, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_u8, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .minu, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .slow_incdec, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .movzx, .tmp2d, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._ae, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .movzx, .tmp2d, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._ae, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .slow_incdec, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nae, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .byte, .is = .byte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nae, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .mmx, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .qword, .is = .word } },
|
|
.{ .scalar_signed_int = .{ .of = .qword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mmx, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_mmx }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_mmx, .to_mmx } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .mins, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .mins, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .mins, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .word } },
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .mins, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .mins, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .mins, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_i16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .mins, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ge, .cmov, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nge, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .minu, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .minu, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._dqa, .mov, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, .p_w, .subus, .src0x, .src1x, ._, ._ },
|
|
.{ ._, .p_w, .sub, .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .word } },
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .minu, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_16_u16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .minu, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_u16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .minu, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_u16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .minu, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_u16, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_8_u16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_w, .subus, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .sub, .tmp2x, .tmp1x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp2x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ae, .cmov, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nae, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .mins, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .mins, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._dqa, .mov, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_d, .cmpgt, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .src0x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .@"or", .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .dword } },
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .mins, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_i32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .mins, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_i32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .mins, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_i32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_d, .mins, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_i32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_i32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_d, .cmpgt, .tmp1x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .tmp2x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_, .andn, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .@"or", .tmp1x, .tmp2x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .dword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ge, .cmov, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .dword, .is = .dword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nge, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .minu, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .minu, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .src0x, ._, ._ },
|
|
.{ ._, .p_d, .cmpgt, .dst0x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .src0x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .@"or", .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .dword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .minu, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .minu, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .minu, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_d, .minu, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .lea(.xword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp5x, .tmp3x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp6x, .tmp4x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp5x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp6x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_d, .cmpgt, .tmp5x, .tmp6x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .tmp4x, .tmp5x, ._, ._ },
|
|
.{ ._, .p_, .andn, .tmp5x, .tmp3x, ._, ._ },
|
|
.{ ._, .p_, .@"or", .tmp4x, .tmp5x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp4x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .dword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ae, .cmov, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .dword, .is = .dword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nae, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpgt, .dst0x, .src0x, .src1x, ._ },
|
|
.{ ._, .vp_b, .blendv, .dst0x, .src0x, .src1x, .dst0x },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .vector_2_i64, .kind = .{ .reg = .xmm0 } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._dqa, .mov, .tmp0x, .src0x, ._, ._ },
|
|
.{ ._, .p_q, .cmpgt, .tmp0x, .src1x, ._, ._ },
|
|
.{ ._, .p_b, .blendv, .dst0x, .src1x, .tmp0x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .qword } },
|
|
.{ .scalar_signed_int = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpgt, .dst0y, .src0y, .src1y, ._ },
|
|
.{ ._, .vp_b, .blendv, .dst0y, .src0y, .src1y, .dst0y },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_i64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp2y, .memia(.src1y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .tmp3y, .tmp1y, .tmp2y, ._ },
|
|
.{ ._, .vp_b, .blendv, .tmp1y, .tmp1y, .tmp2y, .tmp3y },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp2x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .tmp3x, .tmp1x, .tmp2x, ._ },
|
|
.{ ._, .vp_b, .blendv, .tmp1x, .tmp1x, .tmp2x, .tmp3x },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_i64, .kind = .{ .reg = .xmm0 } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp3x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_q, .cmpgt, .tmp3x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_b, .blendv, .tmp1x, .tmp2x, .tmp3x, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ge, .cmov, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_signed_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_signed_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nge, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_, .movddup, .tmp2x, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, .vp_, .xor, .dst0x, .tmp2x, .src0x, ._ },
|
|
.{ ._, .vp_, .xor, .tmp2x, .tmp2x, .src1x, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .dst0x, .dst0x, .tmp2x, ._ },
|
|
.{ ._, .vp_b, .blendv, .dst0x, .src0x, .src1x, .dst0x },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._, .movddup, .tmp2x, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp3x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .src0x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp3x, .src1x, ._, ._ },
|
|
.{ ._, .p_q, .cmpgt, .tmp2x, .tmp3x, ._, ._ },
|
|
.{ ._, .p_b, .blendv, .dst0x, .src1x, .tmp2x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .qword } },
|
|
.{ .scalar_unsigned_int = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_q, .broadcast, .tmp2y, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, .vp_, .xor, .dst0y, .tmp2y, .src0y, ._ },
|
|
.{ ._, .vp_, .xor, .tmp2y, .tmp2y, .src1y, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .dst0y, .dst0y, .tmp2y, ._ },
|
|
.{ ._, .vp_b, .blendv, .dst0y, .src0y, .src1y, .dst0y },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_q, .broadcast, .tmp2y, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp3y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp4y, .memia(.src1y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp5y, .tmp3y, .tmp2y, ._ },
|
|
.{ ._, .vp_, .xor, .tmp6y, .tmp4y, .tmp2y, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .tmp5y, .tmp5y, .tmp6y, ._ },
|
|
.{ ._, .vp_b, .blendv, .tmp3y, .tmp3y, .tmp4y, .tmp5y },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp3y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_, .movddup, .tmp2x, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp5x, .tmp3x, .tmp2x, ._ },
|
|
.{ ._, .vp_, .xor, .tmp6x, .tmp4x, .tmp2x, ._ },
|
|
.{ ._, .vp_q, .cmpgt, .tmp5x, .tmp5x, .tmp6x, ._ },
|
|
.{ ._, .vp_b, .blendv, .tmp3x, .tmp3x, .tmp4x, .tmp5x },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp3x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smin_mem = .{ .ref = .src0, .vectorize_to = .none } } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .vector_2_u64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._, .movddup, .tmp2x, .lea(.qword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp5x, .tmp2x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp6x, .tmp2x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp5x, .tmp3x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp6x, .tmp4x, ._, ._ },
|
|
.{ ._, .p_q, .cmpgt, .tmp5x, .tmp6x, ._, ._ },
|
|
.{ ._, .p_b, .blendv, .tmp3x, .tmp4x, .tmp5x, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp3x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ae, .cmov, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_unsigned_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._nae, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .any_scalar_signed_int, .any_scalar_signed_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .sia(-1, .none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .lead(.none, .tmp0, 8), ._, ._ },
|
|
.{ ._, ._c, .de, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memiad(.src0, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp2p, .memiad(.src1, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._ge, .cmov, .tmp1p, .tmp2p, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp2p, .memiad(.dst0, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_scalar_signed_int, .any_scalar_signed_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .sia(-1, .none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .lead(.none, .tmp0, 8), ._, ._ },
|
|
.{ ._, ._c, .de, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memiad(.src0, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._nge, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memiad(.src1, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ .@"1:", ._, .lea, .tmp2p, .memiad(.dst0, .tmp0, .add_size_sub_elem_size, 8), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .any_scalar_unsigned_int, .any_scalar_unsigned_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .lead(.none, .tmp0, 8), ._, ._ },
|
|
.{ ._, ._c, .de, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memia(.src0, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp2p, .memia(.src1, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._ae, .cmov, .tmp1p, .tmp2p, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp2p, .memia(.dst0, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0p, .tmp0p, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_scalar_unsigned_int, .any_scalar_unsigned_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .isize, .kind = .{ .reg = .rsi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdi } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rcx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, ._c, .cl, ._, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0p, .lead(.none, .tmp0, 8), ._, ._ },
|
|
.{ ._, ._c, .de, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memia(.src0, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._nae, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .memia(.src1, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ .@"1:", ._, .lea, .tmp2p, .memia(.dst0, .tmp0, .add_size_sub_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .sa(.none, .add_src0_elem_size_div_8), ._, ._ },
|
|
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0p, .tmp0p, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .f16c, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .mut_rc = .{ .ref = .src1, .rc = .sse } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ps, .cvtph2, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, .v_ps, .cvtph2, .tmp0x, .src1x, ._, ._ },
|
|
.{ ._, .v_ss, .cmp, .tmp1x, .dst0x, .dst0x, .vp(.unord) },
|
|
.{ ._, .v_ss, .min, .dst0x, .tmp0x, .dst0x, ._ },
|
|
.{ ._, .v_ps, .blendv, .dst0x, .dst0x, .tmp0x, .tmp1x },
|
|
.{ ._, .v_, .cvtps2ph, .dst0x, .dst0x, .rm(.{}), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = "__fminh" } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ss, .cmp, .tmp0x, .src0x, .src0x, .vp(.unord) },
|
|
.{ ._, .v_ss, .min, .dst0x, .src1x, .src0x, ._ },
|
|
.{ ._, .v_ps, .blendv, .dst0x, .dst0x, .src1x, .tmp0x },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._ps, .mova, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, ._ss, .min, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, ._ss, .cmp, .src0x, .src0x, .vp(.unord), ._ },
|
|
.{ ._, ._ps, .blendv, .dst0x, .src1x, .src0x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .vector_4_f32, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._ps, .mova, .tmp0x, .src1x, ._, ._ },
|
|
.{ ._, ._ss, .min, .tmp0x, .src0x, ._, ._ },
|
|
.{ ._, ._ss, .cmp, .dst0x, .src0x, .vp(.ord), ._ },
|
|
.{ ._, ._ps, .@"and", .tmp0x, .dst0x, ._, ._ },
|
|
.{ ._, ._ps, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, ._ps, .@"or", .dst0x, .tmp0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_sd, .cmp, .tmp0x, .src0x, .src0x, .vp(.unord) },
|
|
.{ ._, .v_sd, .min, .dst0x, .src1x, .src0x, ._ },
|
|
.{ ._, .v_pd, .blendv, .dst0x, .dst0x, .src1x, .tmp0x },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._pd, .mova, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, ._sd, .min, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, ._sd, .cmp, .src0x, .src0x, .vp(.unord), ._ },
|
|
.{ ._, ._pd, .blendv, .dst0x, .src1x, .src0x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .vector_2_f64, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._pd, .mova, .tmp0x, .src1x, ._, ._ },
|
|
.{ ._, ._sd, .min, .tmp0x, .src0x, ._, ._ },
|
|
.{ ._, ._sd, .cmp, .dst0x, .src0x, .vp(.ord), ._ },
|
|
.{ ._, ._pd, .@"and", .tmp0x, .dst0x, ._, ._ },
|
|
.{ ._, ._pd, .andn, .dst0x, .src1x, ._, ._ },
|
|
.{ ._, ._pd, .@"or", .dst0x, .tmp0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = "fmin" } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, .cmov, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src1, .rc = .x87 } }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucomi, .tmp0t, .tmp0t, ._, ._ },
|
|
.{ ._, .f_u, .cmov, .tmp0t, .src1t, ._, ._ },
|
|
.{ ._, .f_, .ucomi, .tmp0t, .src1t, ._, ._ },
|
|
.{ ._, .f_nb, .cmov, .tmp0t, .src1t, ._, ._ },
|
|
.{ ._, .f_p, .st, .dst0t, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sahf, .x87, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src1, .rc = .x87 } }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
.{ ._, ._p, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .src1t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
.{ ._, ._b, .j, .@"1f", ._, ._, ._ },
|
|
.{ .@"0:", .f_p, .st, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .src1t, ._, ._, ._ },
|
|
.{ .@"1:", .f_p, .st, .dst0t, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .x87, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src1, .rc = .x87 } }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_, .xam, ._, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1b, .si(0b0_1_000_100), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .src1t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1b, .si(0b0_0_000_001), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ .@"0:", .f_p, .st, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .src1t, ._, ._, ._ },
|
|
.{ .@"1:", .f_p, .st, .dst0t, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src1, .rc = .x87 } }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
.{ ._, ._p, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, .f_, .ucom, .src1t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp1w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
.{ ._, ._b, .j, .@"1f", ._, ._, ._ },
|
|
.{ .@"0:", .f_p, .st, .tmp0t, ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .src1t, ._, ._, ._ },
|
|
.{ .@"1:", .f_p, .st, .dst0t, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = "fminq" } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
} },
|
|
} }) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(bin_op.lhs).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
ops[1].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
};
|
|
try res[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.alloc => if (use_old) try cg.airAlloc(inst) else {
|
|
const ty = air_datas[@intFromEnum(inst)].ty;
|
|
const slot = try cg.tempInit(ty, .{ .lea_frame = .{
|
|
.index = try cg.allocMemPtr(inst),
|
|
} });
|
|
try slot.finish(inst, &.{}, &.{}, cg);
|
|
},
|
|
.inferred_alloc, .inferred_alloc_comptime => unreachable,
|
|
.ret_ptr => if (use_old) try cg.airRetPtr(inst) else {
|
|
const ty = air_datas[@intFromEnum(inst)].ty;
|
|
var slot = switch (cg.ret_mcv.long) {
|
|
else => unreachable,
|
|
.none => try cg.tempInit(ty, .{ .lea_frame = .{
|
|
.index = try cg.allocMemPtr(inst),
|
|
} }),
|
|
.load_frame => slot: {
|
|
var slot = try cg.tempInit(ty, cg.ret_mcv.long);
|
|
try slot.toOffset(cg.ret_mcv.short.indirect.off, cg);
|
|
break :slot slot;
|
|
},
|
|
};
|
|
try slot.finish(inst, &.{}, &.{}, cg);
|
|
},
|
|
.assembly => try cg.airAsm(inst),
|
|
.bit_and, .bit_or, .xor, .bool_and, .bool_or => |air_tag| if (use_old) try cg.airBinOp(inst, air_tag) else {
|
|
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
var res: [1]Temp = undefined;
|
|
cg.select(&res, &.{cg.typeOf(bin_op.lhs)}, &ops, switch (@as(Mir.Inst.Tag, switch (air_tag) {
|
|
else => unreachable,
|
|
.bit_and, .bool_and => .@"and",
|
|
.bit_or, .bool_or => .@"or",
|
|
.xor => .xor,
|
|
})) {
|
|
else => unreachable,
|
|
inline .@"and", .@"or", .xor => |mir_tag| comptime &.{ .{
|
|
.src_constraints = .{ .{ .size = .byte }, .{ .size = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .imm8 } },
|
|
.{ .src = .{ .imm8, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .imm8 } },
|
|
.{ .src = .{ .imm8, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mut_mem, .to_gpr } },
|
|
.{ .src = .{ .to_gpr, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, mir_tag, .dst0b, .src1b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .size = .word }, .{ .size = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .imm16 } },
|
|
.{ .src = .{ .imm16, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .imm16 } },
|
|
.{ .src = .{ .imm16, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mut_mem, .to_gpr } },
|
|
.{ .src = .{ .to_gpr, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, mir_tag, .dst0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .size = .dword }, .{ .size = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .imm32 } },
|
|
.{ .src = .{ .imm32, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .imm32 } },
|
|
.{ .src = .{ .imm32, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mut_mem, .to_gpr } },
|
|
.{ .src = .{ .to_gpr, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, mir_tag, .dst0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .size = .qword }, .{ .size = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .simm32 } },
|
|
.{ .src = .{ .simm32, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .simm32 } },
|
|
.{ .src = .{ .simm32, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mut_mem, .to_gpr } },
|
|
.{ .src = .{ .to_gpr, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, mir_tag, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, null, null, null },
|
|
.src_constraints = .{ .{ .size = .qword }, .{ .size = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_mm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_mm, .to_mm } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_, mir_tag, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .size = .xword }, .{ .size = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_, mir_tag, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .size = .xword }, .{ .size = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_, mir_tag, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .size = .xword }, .{ .size = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._ps, mir_tag, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .size = .yword }, .{ .size = .yword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .mem } },
|
|
.{ .src = .{ .mem, .to_ymm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_ymm, .to_ymm } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_, mir_tag, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .size = .yword }, .{ .size = .yword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .mem } },
|
|
.{ .src = .{ .mem, .to_ymm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_ymm, .to_ymm } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_pd, mir_tag, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_size = .yword }, .{ .multiple_size = .yword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_, mir_tag, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqu, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_size = .yword }, .{ .multiple_size = .yword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_pd, .movu, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_pd, mir_tag, .tmp1y, .tmp1y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_pd, .movu, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_size = .xword }, .{ .multiple_size = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_, mir_tag, .tmp1x, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqu, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_size = .xword }, .{ .multiple_size = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, mir_tag, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqu, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_size = .xword }, .{ .multiple_size = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._ps, .movu, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, mir_tag, .tmp1x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .movu, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_size = .qword }, .{ .multiple_size = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .mmx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._q, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, mir_tag, .tmp1q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._q, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .multiple_size = .qword }, .{ .multiple_size = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1p, .memia(.src0p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, mir_tag, .tmp1p, .memia(.src1p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0p, .tmp0, .add_size), .tmp1p, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .sa(.tmp1, .add_size), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
} },
|
|
}) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(bin_op.lhs).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
ops[1].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
};
|
|
try res[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.not => |air_tag| if (use_old) try cg.airUnOp(inst, air_tag) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
var res: [1]Temp = undefined;
|
|
cg.select(&res, &.{ty_op.ty.toType()}, &ops, comptime &.{ .{
|
|
.src_constraints = .{ .{ .signed_or_exact_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .not, .dst0b, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0b, .sa(.src0, .add_umax), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_or_exact_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .not, .dst0w, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0w, .sa(.src0, .add_umax), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_or_exact_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .not, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .sa(.src0, .add_umax), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .not, .dst0q, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .xor, .dst0q, .src0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_mm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .mmx }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .cmpeq, .dst0q, .dst0q, ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0q, .src0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0q, .lea(.qword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_int = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_xmm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpeq, .dst0x, .dst0x, .dst0x, ._ },
|
|
.{ ._, .vp_, .xor, .dst0x, .dst0x, .src0x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_xmm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_, .xor, .dst0x, .src0x, .lea(.xword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_int = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_xmm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .cmpeq, .dst0x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .int = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._ps, .xor, .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_int = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_ymm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpeq, .dst0y, .dst0y, .dst0y, ._ },
|
|
.{ ._, .vp_, .xor, .dst0y, .dst0y, .src0y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_, .xor, .dst0y, .src0y, .lea(.yword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_int = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_ymm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_pd, .cmp, .dst0y, .dst0y, .dst0y, .vp(.true) },
|
|
.{ ._, .v_pd, .xor, .dst0y, .dst0y, .src0y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_pd, .xor, .dst0y, .src0y, .lea(.yword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_remainder_int = .{ .of = .yword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpeq, .tmp1y, .tmp1y, .tmp1y, ._ },
|
|
.{ .@"0:", .vp_, .xor, .tmp2y, .tmp1y, .memiad(.src0y, .tmp0, .add_size, -16), ._ },
|
|
.{ ._, .v_dqu, .mov, .memiad(.dst0y, .tmp0, .add_size, -16), .tmp2y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ .@"0:", .vp_, .xor, .tmp2x, .tmp1x, .memad(.src0x, .add_size, -16), ._ },
|
|
.{ ._, .v_dqa, .mov, .memad(.dst0x, .add_size, -16), .tmp2x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_remainder_int = .{ .of = .yword, .is = .yword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpeq, .tmp1y, .tmp1y, .tmp1y, ._ },
|
|
.{ .@"0:", .vp_, .xor, .tmp2y, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqu, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp2y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_remainder_int = .{ .of = .yword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ ._, .v_pd, .cmp, .tmp1y, .tmp1y, .tmp1y, .vp(.true) },
|
|
.{ .@"0:", .v_pd, .xor, .tmp2y, .tmp1y, .memiad(.src0y, .tmp0, .add_size, -16), ._ },
|
|
.{ ._, .v_pd, .movu, .memiad(.dst0y, .tmp0, .add_size, -16), .tmp2y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ .@"0:", .v_pd, .xor, .tmp2x, .tmp1x, .memad(.src0x, .add_size, -16), ._ },
|
|
.{ ._, .v_pd, .mova, .memad(.dst0x, .add_size, -16), .tmp2x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_remainder_int = .{ .of = .yword, .is = .yword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .v_pd, .cmp, .tmp1y, .tmp1y, .tmp1y, .vp(.true) },
|
|
.{ .@"0:", .v_pd, .xor, .tmp2y, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_pd, .movu, .memia(.dst0y, .tmp0, .add_size), .tmp2y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpeq, .tmp1x, .tmp1x, .tmp1x, ._ },
|
|
.{ .@"0:", .v_, .xor, .tmp2x, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp2x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .p_d, .cmpeq, .tmp1x, .tmp1x, ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .tmp1x, ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp2x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .not, .memia(.dst0q, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, ._, .not, .memiad(.dst0q, .tmp0, .add_size, 8), ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .signed_or_exact_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .not, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .exact_remainder_int = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .not, .memiad(.dst0q, .tmp0, .add_size, -16), ._, ._, ._ },
|
|
.{ ._, ._, .not, .memiad(.dst0q, .tmp0, .add_size, -16 + 8), ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .not, .memad(.dst0d, .add_size, -16), ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .exact_remainder_int = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memiad(.src0q, .tmp0, .add_size, -16), ._, ._ },
|
|
.{ ._, ._, .not, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memiad(.dst0q, .tmp0, .add_size, -16), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .memad(.src0d, .add_size, -16), ._, ._ },
|
|
.{ ._, ._, .not, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0d, .add_size, -16), .tmp0d, ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0d, .add_size, -16 + 4), .si(0), ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16 + 8), .si(0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .exact_remainder_int = .{ .of = .qword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .not, .memiad(.dst0q, .tmp0, .add_size, -16), ._, ._, ._ },
|
|
.{ ._, ._, .not, .memiad(.dst0q, .tmp0, .add_size, -16 + 8), ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .not, .memad(.dst0q, .add_size, -16), ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .exact_remainder_int = .{ .of = .qword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(8, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memiad(.src0q, .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .not, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memiad(.dst0q, .tmp0, .add_size, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .exact_remainder_int = .{ .of = .dword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(8, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .not, .memiad(.dst0q, .tmp0, .add_size, -8), ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .not, .memad(.dst0d, .add_size, -8), ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .exact_remainder_int = .{ .of = .dword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(8, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memiad(.src0q, .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .not, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memiad(.dst0q, .tmp0, .add_size, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .memad(.src0d, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .not, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0d, .add_size, -8), .tmp0d, ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0d, .add_size, -8 + 4), .si(0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .not, .memiad(.dst0q, .tmp0, .add_size, -16), ._, ._, ._ },
|
|
.{ ._, ._, .not, .memiad(.dst0q, .tmp0, .add_size, -16 + 8), ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .memad(.dst0d, .add_size, -16), .sa(.src0, .add_umax), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memiad(.src0q, .tmp0, .add_size, -16), ._, ._ },
|
|
.{ ._, ._, .not, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memiad(.dst0q, .tmp0, .add_size, -16), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .memad(.src0d, .add_size, -16), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0d, .add_size, -16), .tmp0d, ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0d, .add_size, -16 + 4), .si(0), ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16 + 8), .si(0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .qword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(8, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .not, .memiad(.dst0q, .tmp0, .add_size, -8), ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .memad(.dst0d, .add_size, -8), .sa(.src0, .add_umax), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .qword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(8, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memiad(.src0q, .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .not, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memiad(.dst0q, .tmp0, .add_size, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .memad(.src0d, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0d, .add_size, -8), .tmp0d, ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0d, .add_size, -8 + 4), .si(0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .not, .memiad(.dst0q, .tmp0, .add_size, -16), ._, ._, ._ },
|
|
.{ ._, ._, .not, .memiad(.dst0q, .tmp0, .add_size, -16 + 8), ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .xor, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memiad(.src0q, .tmp0, .add_size, -16), ._, ._ },
|
|
.{ ._, ._, .not, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memiad(.dst0q, .tmp0, .add_size, -16), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(8, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .not, .memiad(.dst0q, .tmp0, .add_size, -8), ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .xor, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(8, .src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memiad(.src0q, .tmp0, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .not, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memiad(.dst0q, .tmp0, .add_size, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0q, .memad(.src0q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int_or_full_vec = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_mm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .mmx }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .cmpeq, .dst0q, .dst0q, ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0q, .src0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int_vec = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0q, .lea(.qword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int_or_full_vec = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_xmm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpeq, .dst0x, .dst0x, .dst0x, ._ },
|
|
.{ ._, .vp_, .xor, .dst0x, .dst0x, .src0x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int_vec = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_xmm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_, .xor, .dst0x, .src0x, .lea(.xword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int_or_full_vec = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_xmm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .cmpeq, .dst0x, .dst0x, ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int_vec = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .p_, .xor, .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .vec = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._ps, .xor, .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int_or_full_vec = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_ymm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpeq, .dst0y, .dst0y, .dst0y, ._ },
|
|
.{ ._, .vp_, .xor, .dst0y, .dst0y, .src0y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int_vec = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_, .xor, .dst0y, .src0y, .lea(.yword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int_or_full_vec = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_ymm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_pd, .cmp, .dst0y, .dst0y, .dst0y, .vp(.true) },
|
|
.{ ._, .v_pd, .xor, .dst0y, .dst0y, .src0y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int_vec = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .sse }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_pd, .xor, .dst0y, .src0y, .lea(.yword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_src0_unaligned_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.tmp3), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_src0_unaligned_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2q, .leaia(.qword, .tmp1, .tmp0, .add_src0_unaligned_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0q, .tmp0, .add_src0_unaligned_size), .tmp2q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .umax_mem = .{ .ref = .src0 } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_src0_unaligned_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.tmp3), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .memia(.src0d, .tmp0, .add_src0_unaligned_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .leaia(.dword, .tmp1, .tmp0, .add_src0_unaligned_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0d, .tmp0, .add_src0_unaligned_size), .tmp2d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
} }) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(ty_op.operand).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
};
|
|
try res[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
|
|
.block => {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const extra = cg.air.extraData(Air.Block, ty_pl.payload);
|
|
if (cg.debug_output != .none) try cg.asmPseudo(.pseudo_dbg_enter_block_none);
|
|
try cg.lowerBlock(inst, @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]));
|
|
if (cg.debug_output != .none) try cg.asmPseudo(.pseudo_dbg_leave_block_none);
|
|
},
|
|
.loop => if (use_old) try cg.airLoop(inst) else {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const extra = cg.air.extraData(Air.Block, ty_pl.payload);
|
|
cg.scope_generation += 1;
|
|
try cg.loops.putNoClobber(cg.gpa, inst, .{
|
|
.state = try cg.saveState(),
|
|
.target = @intCast(cg.mir_instructions.len),
|
|
});
|
|
defer assert(cg.loops.remove(inst));
|
|
try cg.genBodyBlock(@ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]));
|
|
},
|
|
.repeat => if (use_old) try cg.airRepeat(inst) else {
|
|
const repeat = air_datas[@intFromEnum(inst)].repeat;
|
|
const loop = cg.loops.get(repeat.loop_inst).?;
|
|
try cg.restoreState(loop.state, &.{}, .{
|
|
.emit_instructions = true,
|
|
.update_tracking = false,
|
|
.resurrect = false,
|
|
.close_scope = true,
|
|
});
|
|
_ = try cg.asmJmpReloc(loop.target);
|
|
},
|
|
.br => try cg.airBr(inst),
|
|
.trap => try cg.asmOpOnly(.{ ._2, .ud }),
|
|
.breakpoint => try cg.asmOpOnly(.{ ._3, .int }),
|
|
.ret_addr => if (use_old) try cg.airRetAddr(inst) else {
|
|
var slot = try cg.tempInit(.usize, .{ .load_frame = .{
|
|
.index = .ret_addr,
|
|
} });
|
|
while (try slot.toRegClass(true, .general_purpose, cg)) {}
|
|
try slot.finish(inst, &.{}, &.{}, cg);
|
|
},
|
|
.frame_addr => if (use_old) try cg.airFrameAddress(inst) else {
|
|
const slot = try cg.tempInit(.usize, .{ .lea_frame = .{
|
|
.index = .base_ptr,
|
|
} });
|
|
try slot.finish(inst, &.{}, &.{}, cg);
|
|
},
|
|
.call => try cg.airCall(inst, .auto, .{ .safety = true }),
|
|
.call_always_tail => try cg.airCall(inst, .always_tail, .{ .safety = true }),
|
|
.call_never_tail => try cg.airCall(inst, .never_tail, .{ .safety = true }),
|
|
.call_never_inline => try cg.airCall(inst, .never_inline, .{ .safety = true }),
|
|
|
|
.clz => |air_tag| if (use_old) try cg.airClz(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
var res: [1]Temp = undefined;
|
|
cg.select(&res, &.{ty_op.ty.toType()}, &ops, comptime &.{ .{
|
|
.required_features = .{ .slow_incdec, null, null, null },
|
|
.src_constraints = .{ .{ .exact_signed_int = 1 }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .add, .dst0b, .si(1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .exact_signed_int = 1 }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._c, .in, .dst0b, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .exact_unsigned_int = 1 }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0b, .si(1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_or_exact_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .@"and", .dst0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .false_deps_lzcnt_tzcnt, .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .exact_int = 16 }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0w, .src0w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .exact_int = 16 }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0w, .src0w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"and", .src0w, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(16, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .false_deps_lzcnt_tzcnt, .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(16, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(16, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .false_deps_lzcnt_tzcnt, .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .exact_int = 32 }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0d, .src0d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .exact_int = 32 }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0d, .src0d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"and", .src0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .false_deps_lzcnt_tzcnt, .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.src_constraints = .{ .{ .exact_int = 64 }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0q, .src0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .exact_int = 64 }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0q, .src0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .dst0q, .src0q, ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0q, .dst0q, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(64, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0q, .src0q, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(64, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lzcnt, .dst0q, .src0q, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .sia(64, .src0, .sub_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .src0b, ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._z, .cmov, .dst0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .signed_po2_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .@"and", .dst0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._z, .cmov, .dst0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .tmp0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .tmp0d, .src0b, ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .src0b, ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .signed_po2_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .@"and", .dst0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .tmp0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .tmp0d, .src0b, ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .tmp0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_po2_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .tmp0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .@"and", .dst0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .src0b, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .src0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0w, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._nz, .cmov, .dst0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"and", .src0w, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .src0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0w, .si(0xff), ._, ._ },
|
|
.{ ._, ._z, .cmov, .src0w, .dst0w, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .src0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0w, .si(0xff), ._, ._ },
|
|
.{ ._, ._z, .cmov, .src0w, .dst0w, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .dst0w, .src0w, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"and", .src0w, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .src0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .src0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0w, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"and", .src0w, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0w, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0w, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0w, .src0w, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .src0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._nz, .cmov, .dst0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"and", .src0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .src0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._z, .cmov, .src0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .src0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._z, .cmov, .src0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .dst0d, .src0d, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"and", .src0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .src0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .src0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0d, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .signed_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"and", .src0d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .unsigned_int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, .bsf_bsr_0_clobbers_result, null },
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .src0q, .src0q, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._nz, .cmov, .dst0d, .src0d, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, .bsf_bsr_0_clobbers_result, null },
|
|
.src_constraints = .{ .{ .signed_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp0q, .src0q, ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0q, .tmp0q, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, .bsf_bsr_0_clobbers_result, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .src0q, .src0q, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._z, .cmov, .src0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .dst0q, .src0q, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp0q, .src0q, ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0q, .tmp0q, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._r, .bs, .src0q, .src0q, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .dst0b, .src0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_po2_or_exact_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0d, .sia(-1, .src0, .add_2_bit_size), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0q, .src0q, ._, ._ },
|
|
.{ ._, ._, .xor, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .signed_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .dst0q, .src0q, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0q, .dst0q, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp0q, .src0q, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .tmp0b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.src_constraints = .{ .{ .unsigned_or_exact_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-16, .src0, .add_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .neg, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .add_src0_bit_size, -64), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .unsigned_or_exact_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-16, .src0, .add_size), ._, ._ },
|
|
.{ .@"0:", ._, .lzcnt, .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .neg, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .add_src0_bit_size, -64), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_or_exact_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-16, .src0, .add_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .si(-1), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_or_exact_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-16, .src0, .add_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .dst0d, .si(-1), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.src_constraints = .{ .{ .unsigned_or_exact_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-8, .src0, .add_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .neg, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .add_src0_bit_size, -64), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .unsigned_or_exact_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-8, .src0, .add_size), ._, ._ },
|
|
.{ .@"0:", ._, .lzcnt, .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .neg, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .add_src0_bit_size, -64), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .unsigned_or_exact_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-8, .src0, .add_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0d, .si(-1), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .unsigned_or_exact_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-8, .src0, .add_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .dst0d, .si(-1), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-16, .src0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0q, .tmp1q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .neg, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .add_src0_bit_size, -64), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-16, .src0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"0:", ._, .@"and", .tmp1q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0q, .tmp1q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .neg, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .add_src0_bit_size, -64), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-16, .src0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .dst0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"0:", ._, .@"and", .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0q, .dst0q, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-8, .src0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0q, .tmp1q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .neg, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .add_src0_bit_size, -64), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-8, .src0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"0:", ._, .@"and", .tmp1q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .dst0q, .tmp1q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .neg, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .add_src0_bit_size, -64), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0d, .sia(-8, .src0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .dst0q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"0:", ._, .@"and", .dst0q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._r, .bs, .dst0q, .dst0q, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .dst0q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp0d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .lea, .dst0d, .leasiad(.none, .dst0, .@"8", .tmp0, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, .slow_incdec, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, .slow_incdec, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memsia(.src0w, .@"2", .tmp0, .add_2_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memsia(.src0w, .@"2", .tmp0, .add_2_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, .slow_incdec, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memsia(.src0d, .@"4", .tmp0, .add_4_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .lzcnt, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memsia(.src0d, .@"4", .tmp0, .add_4_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .sia(32, .src0, .sub_bit_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, .slow_incdec, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_8_len), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp1q, .tmp1q, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .sia(64, .src0, .sub_bit_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_8_len), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp1q, .tmp1q, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .sia(64, .src0, .sub_bit_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, .slow_incdec, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .si(0xff), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp2d, .memia(.src0b, .tmp0, .add_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp2d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp3b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .si(0xff), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp2d, .memia(.src0b, .tmp0, .add_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp2d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp3b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, .slow_incdec, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp2b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .slow_incdec, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, .slow_incdec, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .si(0xff), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp2d, .memsia(.src0w, .@"2", .tmp0, .add_2_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp2d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp3b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .si(0xff), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp2d, .memsia(.src0w, .@"2", .tmp0, .add_2_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp2d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp3b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, .slow_incdec, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memsia(.src0w, .@"2", .tmp0, .add_2_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memsia(.src0w, .@"2", .tmp0, .add_2_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp2b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .slow_incdec, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memsia(.src0w, .@"2", .tmp0, .add_2_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp1d, .memsia(.src0w, .@"2", .tmp0, .add_2_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, .slow_incdec, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .si(0xff), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .memsia(.src0d, .@"4", .tmp0, .add_4_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp2d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp3b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .si(0xff), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .memsia(.src0d, .@"4", .tmp0, .add_4_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp2d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp3b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, .slow_incdec, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memsia(.src0d, .@"4", .tmp0, .add_4_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .bsf_bsr_0_clobbers_result, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memsia(.src0d, .@"4", .tmp0, .add_4_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp2b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .slow_incdec, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memsia(.src0d, .@"4", .tmp0, .add_4_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memsia(.src0d, .@"4", .tmp0, .add_4_len), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1d, .sa(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, .bsf_bsr_0_clobbers_result, .slow_incdec },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .si(0xff), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp2q, .memsia(.src0q, .@"8", .tmp0, .add_8_len), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2q, .tmp2q, ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp3b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, .bsf_bsr_0_clobbers_result, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1d, .si(0xff), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp2q, .memsia(.src0q, .@"8", .tmp0, .add_8_len), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2q, .tmp2q, ._, ._ },
|
|
.{ ._, ._z, .cmov, .tmp2d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp3b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .bsf_bsr_0_clobbers_result, .slow_incdec, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_8_len), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp1q, .tmp1q, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .bsf_bsr_0_clobbers_result, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_8_len), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp1q, .tmp1q, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2b, .sa(.src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._c, .st, ._, ._, ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp2b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .slow_incdec, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_8_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2q, .tmp1q, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ ._, ._, .@"and", .tmp1q, .memsia(.src0q, .@"8", .tmp0, .add_8_len), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2d, .si(0xff), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp2q, .tmp1q, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1b, .sia(-1, .src0, .add_bit_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .byte }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-16, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp4q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .neg, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp3d, .leasiad(.none, .tmp4, .@"8", .tmp2, .add_src0_bit_size, -64), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .byte }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-16, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp4q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .neg, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp3d, .leasiad(.none, .tmp4, .@"8", .tmp2, .add_src0_bit_size, -64), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .byte }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-16, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp3q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .lea, .tmp3d, .leasiad(.none, .tmp3, .@"8", .tmp2, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .tmp3b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .byte }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-8, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp4q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .neg, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp3d, .leasiad(.none, .tmp4, .@"8", .tmp2, .add_src0_bit_size, -64), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .byte }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-8, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp4q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .neg, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp3d, .leasiad(.none, .tmp4, .@"8", .tmp2, .add_src0_bit_size, -64), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .byte }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-8, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp3q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .lea, .tmp3d, .leasiad(.none, .tmp3, .@"8", .tmp2, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .tmp3b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_len), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .word }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-16, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp4q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .neg, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp3d, .leasiad(.none, .tmp4, .@"8", .tmp2, .add_src0_bit_size, -64), ._, ._ },
|
|
.{ ._, ._, .mov, .memsia(.dst0w, .@"2", .tmp0, .add_2_len), .tmp3w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .word }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-16, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp4q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .neg, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp3d, .leasiad(.none, .tmp4, .@"8", .tmp2, .add_src0_bit_size, -64), ._, ._ },
|
|
.{ ._, ._, .mov, .memsia(.dst0w, .@"2", .tmp0, .add_2_len), .tmp3w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .word }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-16, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp3q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .lea, .tmp3d, .leasiad(.none, .tmp3, .@"8", .tmp2, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .tmp3d, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memsia(.dst0w, .@"2", .tmp0, .add_2_len), .tmp3w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .false_deps_lzcnt_tzcnt, .lzcnt, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .word }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-8, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp4q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .neg, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp3d, .leasiad(.none, .tmp4, .@"8", .tmp2, .add_src0_bit_size, -64), ._, ._ },
|
|
.{ ._, ._, .mov, .memsia(.dst0w, .@"2", .tmp0, .add_2_len), .tmp3w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .lzcnt, null, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .word }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-8, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._, .lzcnt, .tmp4q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .neg, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp3d, .leasiad(.none, .tmp4, .@"8", .tmp2, .add_src0_bit_size, -64), ._, ._ },
|
|
.{ ._, ._, .mov, .memsia(.dst0w, .@"2", .tmp0, .add_2_len), .tmp3w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .scalar_int_is = .word }},
|
|
.src_constraints = .{ .{ .scalar_remainder_int = .{ .of = .xword, .is = .xword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_len), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .mem(.src0), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sia(-8, .none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .ua(.src0, .add_umax), ._, ._ },
|
|
.{ .@"1:", ._, .@"and", .tmp3q, .leai(.qword, .tmp1, .tmp2), ._, ._ },
|
|
.{ ._, ._r, .bs, .tmp3q, .tmp3q, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .si(-1), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .lea, .tmp3d, .leasiad(.none, .tmp3, .@"8", .tmp2, .sub_src0_bit_size, 1), ._, ._ },
|
|
.{ ._, ._, .neg, .tmp3d, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memsia(.dst0w, .@"2", .tmp0, .add_2_len), .tmp3w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1q, .leaa(.none, .tmp1, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
} }) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(ty_op.operand).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
};
|
|
try res[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
|
|
.cmp_vector, .cmp_vector_optimized => |air_tag| if (use_old) try cg.airCmpVector(inst) else fallback: {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const extra = cg.air.extraData(Air.VectorCmp, ty_pl.payload).data;
|
|
switch (extra.compareOperator()) {
|
|
.eq, .neq => {},
|
|
else => break :fallback try cg.airCmpVector(inst),
|
|
}
|
|
var ops = try cg.tempsFromOperands(inst, .{ extra.lhs, extra.rhs });
|
|
var res: [1]Temp = undefined;
|
|
switch (extra.compareOperator()) {
|
|
.lt => unreachable,
|
|
.lte => unreachable,
|
|
.eq, .neq => |cmp_op| cg.select(&res, &.{ty_pl.ty.toType()}, &ops, switch (@as(Condition, switch (cmp_op) {
|
|
else => unreachable,
|
|
.eq => .e,
|
|
.neq => .ne,
|
|
})) {
|
|
else => unreachable,
|
|
inline .e, .ne => |cc| comptime &.{ .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .mem } },
|
|
.{ .src = .{ .mem, .to_ymm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_ymm, .to_ymm } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{ .ref = .src0, .rc = .sse, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .byte,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .cmpeq, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .mem } },
|
|
.{ .src = .{ .mem, .to_ymm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_ymm, .to_ymm } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{ .ref = .src0, .rc = .sse, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .word,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .cmpeq, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .mem } },
|
|
.{ .src = .{ .mem, .to_ymm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_ymm, .to_ymm } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{ .ref = .src0, .rc = .sse, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .dword,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .cmpeq, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .{ .scalar_int_is = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .mem } },
|
|
.{ .src = .{ .mem, .to_ymm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_ymm, .to_ymm } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{ .ref = .src0, .rc = .sse, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .qword,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpeq, .dst0y, .src0y, .src1y, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{ .ref = .src0, .rc = .sse, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .byte,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .cmpeq, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{ .ref = .src0, .rc = .sse, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .word,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .cmpeq, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{ .ref = .src0, .rc = .sse, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .dword,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .cmpeq, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .{ .scalar_int_is = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{ .ref = .src0, .rc = .sse, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .qword,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_q, .cmpeq, .dst0x, .src0x, .src1x, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{ .ref = .src0, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .byte,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .cmpeq, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{ .ref = .src0, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .word,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .cmpeq, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{ .ref = .src0, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .dword,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .cmpeq, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .{ .scalar_int_is = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{ .ref = .src0, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .qword,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_q, .cmpeq, .dst0x, .src1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_mm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_mm, .to_mm } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{ .ref = .src0, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .byte,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .cmpeq, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_mm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_mm, .to_mm } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{ .ref = .src0, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .word,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .cmpeq, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_mm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_mm, .to_mm } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{ .ref = .src0, .info = .{
|
|
.kind = .all,
|
|
.inverted = switch (cc) {
|
|
else => unreachable,
|
|
.e => false,
|
|
.ne => true,
|
|
},
|
|
.scalar = .dword,
|
|
} } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .cmpeq, .dst0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .bool_vec = .byte }, .{ .bool_vec = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .imm8 } },
|
|
.{ .src = .{ .imm8, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .imm8 } },
|
|
.{ .src = .{ .imm8, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mut_mem, .to_gpr } },
|
|
.{ .src = .{ .to_gpr, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .xor, .dst0b, .src1b, ._, ._ },
|
|
.{ ._, ._, .not, .dst0b, ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .xor, .dst0b, .src1b, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .bool_vec = .word }, .{ .bool_vec = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .imm16 } },
|
|
.{ .src = .{ .imm16, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .imm16 } },
|
|
.{ .src = .{ .imm16, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mut_mem, .to_gpr } },
|
|
.{ .src = .{ .to_gpr, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .xor, .dst0w, .src1w, ._, ._ },
|
|
.{ ._, ._, .not, .dst0w, ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .xor, .dst0w, .src1w, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .bool_vec = .dword }, .{ .bool_vec = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .imm32 } },
|
|
.{ .src = .{ .imm32, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .imm32 } },
|
|
.{ .src = .{ .imm32, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mut_mem, .to_gpr } },
|
|
.{ .src = .{ .to_gpr, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .xor, .dst0d, .src1d, ._, ._ },
|
|
.{ ._, ._, .not, .dst0d, ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .xor, .dst0d, .src1d, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .bool_vec = .qword }, .{ .bool_vec = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .simm32 } },
|
|
.{ .src = .{ .simm32, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .simm32 } },
|
|
.{ .src = .{ .simm32, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mut_mem, .to_gpr } },
|
|
.{ .src = .{ .to_gpr, .mut_mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .xor, .dst0q, .src1q, ._, ._ },
|
|
.{ ._, ._, .not, .dst0q, ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .xor, .dst0q, .src1q, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .any_bool_vec, .any_bool_vec },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1p, .memia(.src0p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1p, .memia(.src1p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .not, .tmp1p, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0p, .tmp0, .add_size), .tmp1p, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .sa(.tmp1, .add_size), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1p, .memia(.src0p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1p, .memia(.src1p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0p, .tmp0, .add_size), .tmp1p, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .sa(.tmp1, .add_size), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .cmpeq, .tmp3y, .tmp3y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_b, .movmsk, .tmp2d, .tmp3y, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0d, .tmp1), .tmp2d, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .cmpeq, .tmp3y, .tmp3y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_b, .movmsk, .tmp2d, .tmp3y, ._, ._ },
|
|
.{ ._, ._, .not, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0d, .tmp1), .tmp2d, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .cmpeq, .tmp3y, .tmp3y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_b, .ackssw, .tmp3y, .tmp3y, .tmp3y, ._ },
|
|
.{ ._, .vp_b, .movmsk, .tmp2d, .tmp3y, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0w, .tmp1), .tmp2w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .cmpeq, .tmp3y, .tmp3y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_b, .ackssw, .tmp3y, .tmp3y, .tmp3y, ._ },
|
|
.{ ._, .vp_b, .movmsk, .tmp2d, .tmp3y, ._, ._ },
|
|
.{ ._, ._, .not, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0w, .tmp1), .tmp2w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .cmpeq, .tmp3y, .tmp3y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_ps, .movmsk, .tmp2d, .tmp3y, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .cmpeq, .tmp3y, .tmp3y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_ps, .movmsk, .tmp2d, .tmp3y, ._, ._ },
|
|
.{ ._, ._, .not, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .{ .scalar_int_is = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp4y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpeq, .tmp4y, .tmp4y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_pd, .movmsk, .tmp3d, .tmp4y, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp4y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpeq, .tmp4y, .tmp4y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_pd, .movmsk, .tmp3d, .tmp4y, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3b, .si(0b1111), ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .cmpeq, .tmp3x, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_b, .movmsk, .tmp2d, .tmp3x, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0w, .tmp1), .tmp2w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_b, .cmpeq, .tmp3x, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_b, .movmsk, .tmp2d, .tmp3x, ._, ._ },
|
|
.{ ._, ._, .not, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0w, .tmp1), .tmp2w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .cmpeq, .tmp3x, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_b, .ackssw, .tmp3x, .tmp3x, .tmp3x, ._ },
|
|
.{ ._, .vp_b, .movmsk, .tmp2d, .tmp3x, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_w, .cmpeq, .tmp3x, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_b, .ackssw, .tmp3x, .tmp3x, .tmp3x, ._ },
|
|
.{ ._, .vp_b, .movmsk, .tmp2d, .tmp3x, ._, ._ },
|
|
.{ ._, ._, .not, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp4x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .cmpeq, .tmp4x, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_ps, .movmsk, .tmp3d, .tmp4x, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp4x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_d, .cmpeq, .tmp4x, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_ps, .movmsk, .tmp3d, .tmp4x, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3b, .si(0b1111), ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .{ .scalar_int_is = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp4x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpeq, .tmp4x, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_pd, .movmsk, .tmp3d, .tmp4x, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp4x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_q, .cmpeq, .tmp4x, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_pd, .movmsk, .tmp3d, .tmp4x, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3b, .si(0b11), ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .cmpeq, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp2d, .tmp3x, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0w, .tmp1), .tmp2w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .cmpeq, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp2d, .tmp3x, ._, ._ },
|
|
.{ ._, ._, .not, .tmp2d, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0w, .tmp1), .tmp2w, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .cmpeq, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .ackssw, .tmp3x, .tmp3x, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp2d, .tmp3x, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .cmpeq, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .ackssw, .tmp3x, .tmp3x, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp2d, .tmp3x, ._, ._ },
|
|
.{ ._, ._, .not, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp4x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_d, .cmpeq, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .movmsk, .tmp3d, .tmp4x, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp4x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_d, .cmpeq, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .movmsk, .tmp3d, .tmp4x, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3b, .si(0b1111), ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .{ .scalar_int_is = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp4x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_q, .cmpeq, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._pd, .movmsk, .tmp3d, .tmp4x, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp4x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_q, .cmpeq, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._pd, .movmsk, .tmp3d, .tmp4x, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3b, .si(0b11), ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .mmx, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .mmx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._q, .mov, .tmp3q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .cmpeq, .tmp3q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp2d, .tmp3q, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._q, .mov, .tmp3q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .cmpeq, .tmp3q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp2d, .tmp3q, ._, ._ },
|
|
.{ ._, ._, .not, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .mmx, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .mmx } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .mmx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp3q, .tmp3q, ._, ._ },
|
|
.{ .@"0:", ._q, .mov, .tmp5q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .cmpeq, .tmp5q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .ackssw, .tmp5q, .tmp3q, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp4d, .tmp5q, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp4b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp4b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp4, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp4), .tmp2b, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp3q, .tmp3q, ._, ._ },
|
|
.{ .@"0:", ._q, .mov, .tmp5q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .cmpeq, .tmp5q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_b, .ackssw, .tmp5q, .tmp3q, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp4d, .tmp5q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4b, .si(0b1111), ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp4b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp4b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp4, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp4), .tmp2b, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .mmx, null, null },
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .mmx } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .mmx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = switch (cc) {
|
|
else => unreachable,
|
|
.e => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp3q, .tmp3q, ._, ._ },
|
|
.{ .@"0:", ._q, .mov, .tmp5q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_d, .cmpeq, .tmp5q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .ackssd, .tmp5q, .tmp3q, ._, ._ },
|
|
.{ ._, .p_b, .ackssw, .tmp5q, .tmp3q, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp4d, .tmp5q, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp4b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp4b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp4, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp4), .tmp2b, ._, ._ },
|
|
},
|
|
.ne => &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp3q, .tmp3q, ._, ._ },
|
|
.{ .@"0:", ._q, .mov, .tmp5q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_d, .cmpeq, .tmp5q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_w, .ackssd, .tmp5q, .tmp3q, ._, ._ },
|
|
.{ ._, .p_b, .ackssw, .tmp5q, .tmp3q, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp4d, .tmp5q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4b, .si(0b11), ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp4b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp4b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp4, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .tmp2b, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp4), .tmp2b, ._, ._ },
|
|
},
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .bool_vec = .byte }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0b, .dst0b, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2b, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp2b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .bool_vec = .byte }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0b, .dst0b, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2w, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp2w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2d, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .bool_vec = .byte }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0b, .dst0b, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp2d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .byte }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .{ .scalar_int_is = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0b, .dst0b, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .bool_vec = .byte }},
|
|
.src_constraints = .{ .any_scalar_int, .any_scalar_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0b, .dst0b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sa(.src0p, .add_elem_limbs), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp4p, .memi(.src0p, .tmp0), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4p, .memi(.src1p, .tmp0), ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp3p, .tmp4p, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .sa(.tmp4, .add_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(1), ._, ._ },
|
|
.{ ._, ._b, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp3p, .tmp3p, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0b, .tmp2b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .sa(.dst0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp3b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2d, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3w, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp3w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2d, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp3d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2d, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .{ .scalar_int_is = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp3q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2d, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{ .any_scalar_int, .any_scalar_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sa(.src0p, .add_elem_limbs), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp4p, .memi(.src0p, .tmp0), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4p, .memi(.src1p, .tmp0), ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp3p, .tmp4p, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .sa(.tmp4, .add_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(1), ._, ._ },
|
|
.{ ._, ._b, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp3p, .tmp3p, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2d, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .sa(.dst0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .qword }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3b, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp3b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2q, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0q, .tmp2q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .qword }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .word }, .{ .scalar_int_is = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3w, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp3w, .memia(.src1w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2q, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0q, .tmp2q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .qword }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .dword }, .{ .scalar_int_is = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp3d, .memia(.src1d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2q, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0q, .tmp2q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .qword }},
|
|
.src_constraints = .{ .{ .scalar_int_is = .qword }, .{ .scalar_int_is = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2q, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0q, .tmp2q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .qword }},
|
|
.src_constraints = .{ .any_scalar_int, .any_scalar_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2d, .sa(.src0p, .add_elem_limbs), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp4p, .memi(.src0p, .tmp0), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4p, .memi(.src1p, .tmp0), ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp3p, .tmp4p, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .sa(.tmp4, .add_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp2d, .si(1), ._, ._ },
|
|
.{ ._, ._b, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp3p, .tmp3p, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp2q, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0q, .tmp2q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp1b, .sa(.dst0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .scalar_int_is = .byte }, .{ .scalar_int_is = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .ecx } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4b, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp4b, .memia(.src1b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp3b, ._, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp3p, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2p, .tmp3p, ._, ._ },
|
|
.{ ._, ._, .add, .tmp1d, .si(1), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .sia(-1, .none, .add_ptr_bit_size), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0p, .tmp3, .sub_ptr_size), .tmp2p, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .sia(-1, .none, .add_ptr_bit_size), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .si(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0p, .tmp3), .tmp2p, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .f16c, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .mut_rc = .{ .ref = .src1, .rc = .sse } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{
|
|
.ref = .src0,
|
|
.rc = .sse,
|
|
.info = .{ .kind = .all, .scalar = .dword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ps, .cvtph2, .dst0x, .src0x, ._, ._ },
|
|
.{ ._, .v_ps, .cvtph2, .tmp0x, .src1x, ._, ._ },
|
|
.{ ._, .v_ss, .cmp, .dst0x, .dst0x, .tmp0x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .f16c, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .word } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .mem } },
|
|
.{ .src = .{ .sse, .mem } },
|
|
.{ .src = .{ .mem, .sse } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{
|
|
.ref = .src0,
|
|
.rc = .sse,
|
|
.info = .{ .kind = .all, .scalar = .dword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ps, .cvtph2, .dst0x, .src0q, ._, ._ },
|
|
.{ ._, .v_ps, .cvtph2, .tmp0x, .src1q, ._, ._ },
|
|
.{ ._, .v_ps, .cmp, .dst0x, .dst0x, .tmp0x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .f16c, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .word } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .mem } },
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{
|
|
.ref = .src0,
|
|
.rc = .sse,
|
|
.info = .{ .kind = .all, .scalar = .dword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ps, .cvtph2, .dst0y, .src0x, ._, ._ },
|
|
.{ ._, .v_ps, .cvtph2, .tmp0y, .src1x, ._, ._ },
|
|
.{ ._, .v_ps, .cmp, .dst0y, .dst0y, .tmp0y, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{
|
|
.ref = .src0,
|
|
.rc = .sse,
|
|
.info = .{ .kind = .all, .scalar = .dword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ss, .cmp, .dst0x, .src0x, .src1x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .dword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{
|
|
.ref = .src0,
|
|
.info = .{ .kind = .all, .scalar = .dword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._ss, .cmp, .dst0x, .src1x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{
|
|
.ref = .src0,
|
|
.rc = .sse,
|
|
.info = .{ .kind = .all, .scalar = .dword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ps, .cmp, .dst0x, .src0x, .src1x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{
|
|
.ref = .src0,
|
|
.info = .{ .kind = .all, .scalar = .dword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._ps, .cmp, .dst0x, .src1x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .yword, .is = .dword } },
|
|
.{ .scalar_float = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{
|
|
.ref = .src0,
|
|
.rc = .sse,
|
|
.info = .{ .kind = .all, .scalar = .dword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ps, .cmp, .dst0y, .src0y, .src1y, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{
|
|
.ref = .src0,
|
|
.rc = .sse,
|
|
.info = .{ .kind = .all, .scalar = .qword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_sd, .cmp, .dst0x, .src0x, .src1x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{
|
|
.ref = .src0,
|
|
.info = .{ .kind = .all, .scalar = .qword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._sd, .cmp, .dst0x, .src1x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{
|
|
.ref = .src0,
|
|
.rc = .sse,
|
|
.info = .{ .kind = .all, .scalar = .qword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_pd, .cmp, .dst0x, .src0x, .src1x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .xword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .ref_mask = .{
|
|
.ref = .src0,
|
|
.info = .{ .kind = .all, .scalar = .qword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._pd, .cmp, .dst0x, .src1x, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .scalar_float = .{ .of = .yword, .is = .qword } },
|
|
.{ .scalar_float = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc_mask = .{
|
|
.ref = .src0,
|
|
.rc = .sse,
|
|
.info = .{ .kind = .all, .scalar = .qword },
|
|
} }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_pd, .cmp, .dst0y, .src0y, .src1y, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .f16c, .slow_incdec, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_f16, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_8_f16, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ .@"0:", .v_ps, .cvtph2, .tmp2y, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_ps, .cvtph2, .tmp3y, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_ps, .cmp, .tmp2y, .tmp2y, .tmp3y, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
.{ ._, .v_ps, .movmsk, .tmp4d, .tmp2y, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp4b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .f16c, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_f16, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_8_f16, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ .@"0:", .v_ps, .cvtph2, .tmp2y, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_ps, .cvtph2, .tmp3y, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_ps, .cmp, .tmp2y, .tmp2y, .tmp3y, .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
.{ ._, .v_ps, .movmsk, .tmp4d, .tmp2y, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp4b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1p, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, .slow_incdec, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", .vp_, .xor, .tmp2x, .tmp2x, .tmp2x, ._ },
|
|
.{ ._, .vp_w, .insr, .tmp1x, .tmp2x, .memsi(.src0w, .@"2", .tmp0), .ui(0) },
|
|
.{ ._, .vp_w, .insr, .tmp2x, .tmp2x, .memsi(.src1w, .@"2", .tmp0), .ui(0) },
|
|
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp6b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp6d, .tmp5b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp6d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0d, .si(1), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", .vp_, .xor, .tmp2x, .tmp2x, .tmp2x, ._ },
|
|
.{ ._, .vp_w, .insr, .tmp1x, .tmp2x, .memsi(.src0w, .@"2", .tmp0), .ui(0) },
|
|
.{ ._, .vp_w, .insr, .tmp2x, .tmp2x, .memsi(.src1w, .@"2", .tmp0), .ui(0) },
|
|
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp6b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp6d, .tmp5b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp6d, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, .slow_incdec, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", .p_, .xor, .tmp1x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_w, .insr, .tmp1x, .memsi(.src0w, .@"2", .tmp0), .ui(0), ._ },
|
|
.{ ._, .p_w, .insr, .tmp2x, .memsi(.src1w, .@"2", .tmp0), .ui(0), ._ },
|
|
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp6b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp6d, .tmp5b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp6d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0d, .si(1), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", .p_, .xor, .tmp1x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_w, .insr, .tmp1x, .memsi(.src0w, .@"2", .tmp0), .ui(0), ._ },
|
|
.{ ._, .p_w, .insr, .tmp2x, .memsi(.src1w, .@"2", .tmp0), .ui(0), ._ },
|
|
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp6b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp6d, .tmp5b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp6d, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .slow_incdec, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.{ .type = .vector_8_f16, .kind = .mem },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp4d, .memsi(.src0w, .@"2", .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .mem(.tmp7d), .tmp4d, ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp1x, .mem(.tmp7x), ._, ._ },
|
|
.{ ._, ._, .movzx, .tmp4d, .memsi(.src1w, .@"2", .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .mem(.tmp7d), .tmp4d, ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp2x, .mem(.tmp7x), ._, ._ },
|
|
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp6b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp6d, .tmp5b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp6d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0d, .si(1), ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.{ .type = .vector_8_f16, .kind = .mem },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp4d, .memsi(.src0w, .@"2", .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .mem(.tmp7d), .tmp4d, ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp1x, .mem(.tmp7x), ._, ._ },
|
|
.{ ._, ._, .movzx, .tmp4d, .memsi(.src1w, .@"2", .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .mem(.tmp7d), .tmp4d, ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp2x, .mem(.tmp7x), ._, ._ },
|
|
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp6b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp6d, .tmp5b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp6d, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .avx, .slow_incdec, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .vp_, .xor, .tmp3x, .tmp3x, .tmp3x, ._ },
|
|
.{ ._, .vp_w, .insr, .tmp2x, .tmp3x, .memsi(.src0w, .@"2", .tmp0), .ui(0) },
|
|
.{ ._, .vp_w, .insr, .tmp3x, .tmp3x, .memsi(.src1w, .@"2", .tmp0), .ui(0) },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7q, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp1q, .tmp7q, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0d, .lead(.none, .tmp0, 1), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp5, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"1:", ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp5), .tmp1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .avx, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .vp_, .xor, .tmp3x, .tmp3x, .tmp3x, ._ },
|
|
.{ ._, .vp_w, .insr, .tmp2x, .tmp3x, .memsi(.src0w, .@"2", .tmp0), .ui(0) },
|
|
.{ ._, .vp_w, .insr, .tmp3x, .tmp3x, .memsi(.src1w, .@"2", .tmp0), .ui(0) },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7q, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp1q, .tmp7q, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp5, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"1:", ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp5), .tmp1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .sse2, .slow_incdec, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .p_, .xor, .tmp2x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_w, .insr, .tmp2x, .memsi(.src0w, .@"2", .tmp0), .ui(0), ._ },
|
|
.{ ._, .p_, .xor, .tmp3x, .tmp3x, ._, ._ },
|
|
.{ ._, .p_w, .insr, .tmp3x, .memsi(.src1w, .@"2", .tmp0), .ui(0), ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7q, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp1q, .tmp7q, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0d, .lead(.none, .tmp0, 1), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp5, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"1:", ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp5), .tmp1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .sse2, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .p_, .xor, .tmp2x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_w, .insr, .tmp2x, .memsi(.src0w, .@"2", .tmp0), .ui(0), ._ },
|
|
.{ ._, .p_, .xor, .tmp3x, .tmp3x, ._, ._ },
|
|
.{ ._, .p_w, .insr, .tmp3x, .memsi(.src1w, .@"2", .tmp0), .ui(0), ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7q, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp1q, .tmp7q, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp5, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"1:", ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp5), .tmp1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .sse, .slow_incdec, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
.{ .type = .vector_8_f16, .kind = .mem },
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp5d, .memsi(.src0w, .@"2", .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .mem(.tmp8d), .tmp5d, ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp2x, .mem(.tmp8x), ._, ._ },
|
|
.{ ._, ._, .movzx, .tmp5d, .memsi(.src1w, .@"2", .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .mem(.tmp8d), .tmp5d, ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp3x, .mem(.tmp8x), ._, ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7q, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp1q, .tmp7q, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0d, .lead(.none, .tmp0, 1), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp5, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"1:", ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp5), .tmp1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .sse, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
.{ .multiple_scalar_float = .{ .of = .word, .is = .word } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f16, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
.{ .type = .vector_8_f16, .kind = .mem },
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._, .movzx, .tmp5d, .memsi(.src0w, .@"2", .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .mem(.tmp8d), .tmp5d, ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp2x, .mem(.tmp8x), ._, ._ },
|
|
.{ ._, ._, .movzx, .tmp5d, .memsi(.src1w, .@"2", .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .mem(.tmp8d), .tmp5d, ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp3x, .mem(.tmp8x), ._, ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp0d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7q, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp1q, .tmp7q, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp5, -8), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"1:", ._, .cmp, .tmp0d, .sa(.src0, .add_len), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp0d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp5d, .tmp0d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp5d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp5), .tmp1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, .slow_incdec, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .dword } },
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_f32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ .@"0:", .v_ps, .mova, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_ps, .cmp, .tmp2y, .tmp2y, .memia(.src1y, .tmp0, .add_size), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
.{ ._, .v_ps, .movmsk, .tmp3d, .tmp2y, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .dword } },
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_8_f32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ .@"0:", .v_ps, .mova, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_ps, .cmp, .tmp2y, .tmp2y, .memia(.src1y, .tmp0, .add_size), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
.{ ._, .v_ps, .movmsk, .tmp3d, .tmp2y, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp3b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .dword } },
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_f32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_f32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ .@"0:", ._ps, .mova, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp3x, .memiad(.src0x, .tmp0, .add_size, 16), ._, ._ },
|
|
.{ ._, ._ps, .cmp, .tmp2x, .memia(.src1x, .tmp0, .add_size), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
.{ ._, ._ps, .cmp, .tmp3x, .memiad(.src1x, .tmp0, .add_size, 16), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
.{ ._, .p_w, .ackssd, .tmp2x, .tmp3x, ._, ._ },
|
|
.{ ._, .p_b, .ackssw, .tmp2x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp4d, .tmp2x, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp4b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .dword } },
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_f32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ .@"0:", ._ps, .mova, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .cmp, .tmp2x, .memia(.src1x, .tmp0, .add_size), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
.{ ._, ._ps, .movmsk, .tmp3d, .tmp2x, ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp2x, .memiad(.src0x, .tmp0, .add_size, 16), ._, ._ },
|
|
.{ ._, ._ps, .cmp, .tmp2x, .memiad(.src1x, .tmp0, .add_size, 16), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
.{ ._, ._ps, .movmsk, .tmp4d, .tmp2x, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp4b, .ui(4), ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp3b, .tmp4b, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .dword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .dword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_f32, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ ._, ._mp, .j, .@"1f", ._, ._, ._ },
|
|
.{ .@"0:", ._ps, .mova, .tmp2x, .memiad(.src0x, .tmp0, .add_size, -16), ._, ._ },
|
|
.{ ._, ._ps, .cmp, .tmp2x, .memiad(.src1x, .tmp0, .add_size, -16), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
.{ ._, ._ps, .movmsk, .tmp4d, .tmp2x, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp4b, .ui(4), ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp3b, .tmp4b, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ .@"1:", ._ps, .mova, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .cmp, .tmp2x, .memia(.src1x, .tmp0, .add_size), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
.{ ._, ._ps, .movmsk, .tmp3d, .tmp2x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp3b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, .slow_incdec, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .zword, .is = .qword } },
|
|
.{ .multiple_scalar_float = .{ .of = .zword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_f64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_f64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ .@"0:", .v_pd, .mova, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_pd, .mova, .tmp3y, .memiad(.src0y, .tmp0, .add_size, 32), ._, ._ },
|
|
.{ ._, .v_pd, .cmp, .tmp2y, .tmp2y, .memia(.src1y, .tmp0, .add_size), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
.{ ._, .v_pd, .cmp, .tmp3y, .tmp3y, .memiad(.src1y, .tmp0, .add_size, 32), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
.{ ._, .v_pd, .movmsk, .tmp4d, .tmp2y, ._, ._ },
|
|
.{ ._, .v_pd, .movmsk, .tmp5d, .tmp3y, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp5b, .ui(4), ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp4b, .tmp5b, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp4b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .zword, .is = .qword } },
|
|
.{ .multiple_scalar_float = .{ .of = .zword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_f64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .vector_4_f64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
|
|
.{ .@"0:", .v_pd, .mova, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_pd, .mova, .tmp3y, .memiad(.src0y, .tmp0, .add_size, 32), ._, ._ },
|
|
.{ ._, .v_pd, .cmp, .tmp2y, .tmp2y, .memia(.src1y, .tmp0, .add_size), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
.{ ._, .v_pd, .cmp, .tmp3y, .tmp3y, .memiad(.src1y, .tmp0, .add_size, 32), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
.{ ._, .v_pd, .movmsk, .tmp4d, .tmp2y, ._, ._ },
|
|
.{ ._, .v_pd, .movmsk, .tmp5d, .tmp3y, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp5b, .ui(4), ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp4b, .tmp5b, ._, ._ },
|
|
.{ ._, ._, .mov, .lea(.byte, .tmp1), .tmp4b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1q, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .qword } },
|
|
.{ .multiple_scalar_float = .{ .of = .yword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_4_f64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", .v_pd, .mova, .tmp3y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_pd, .cmp, .tmp3y, .tmp3y, .memia(.src1y, .tmp0, .add_size), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}) },
|
|
.{ ._, .v_pd, .movmsk, .tmp4d, .tmp3y, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp4b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp4b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 4), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp4, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp4), .tmp2b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .qword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .vector_2_f64, .kind = .{ .rc = .sse } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._pd, .mova, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._pd, .cmp, .tmp3x, .memia(.src1x, .tmp0, .add_size), .vp(switch (cc) {
|
|
else => unreachable,
|
|
.e => .eq,
|
|
.ne => .neq,
|
|
}), ._ },
|
|
.{ ._, ._pd, .movmsk, .tmp4d, .tmp3x, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp4b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp4b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 2), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp4, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp4d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp4d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp4), .tmp2b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, .cmov, .slow_incdec, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src1q, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src0q, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_p, .ucomi, .tmp5t, .tmp6t, ._, ._ },
|
|
.{ ._, .f_p, .st, .tmp6t, ._, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp3b, ._, ._, ._ },
|
|
.{ ._, switch (cc) {
|
|
else => unreachable,
|
|
.e => ._np,
|
|
.ne => ._p,
|
|
}, .set, .tmp4b, ._, ._, ._ },
|
|
.{ ._, ._, switch (cc) {
|
|
else => unreachable,
|
|
.e => .@"and",
|
|
.ne => .@"or",
|
|
}, .tmp3b, .tmp4b, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, .cmov, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src1q, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src0q, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_p, .ucomi, .tmp5t, .tmp6t, ._, ._ },
|
|
.{ ._, .f_p, .st, .tmp6t, ._, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp3b, ._, ._, ._ },
|
|
.{ ._, switch (cc) {
|
|
else => unreachable,
|
|
.e => ._np,
|
|
.ne => ._p,
|
|
}, .set, .tmp4b, ._, ._, ._ },
|
|
.{ ._, ._, switch (cc) {
|
|
else => unreachable,
|
|
.e => .@"and",
|
|
.ne => .@"or",
|
|
}, .tmp3b, .tmp4b, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .qword, .is = .qword } },
|
|
.{ .multiple_scalar_float = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src1q, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src0q, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_pp, .ucom, ._, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp6w, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp6b, .si(0b0_1_000_000), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp6b, .si(0b0_1_000_100), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp3b, ._, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, .cmov, .slow_incdec, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src1t, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src0t, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_p, .ucomi, .tmp5t, .tmp6t, ._, ._ },
|
|
.{ ._, .f_p, .st, .tmp6t, ._, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp3b, ._, ._, ._ },
|
|
.{ ._, switch (cc) {
|
|
else => unreachable,
|
|
.e => ._np,
|
|
.ne => ._p,
|
|
}, .set, .tmp4b, ._, ._, ._ },
|
|
.{ ._, ._, switch (cc) {
|
|
else => unreachable,
|
|
.e => .@"and",
|
|
.ne => .@"or",
|
|
}, .tmp3b, .tmp4b, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, .cmov, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4d, .tmp4d, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src1t, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src0t, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_p, .ucomi, .tmp5t, .tmp6t, ._, ._ },
|
|
.{ ._, .f_p, .st, .tmp6t, ._, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp3b, ._, ._, ._ },
|
|
.{ ._, switch (cc) {
|
|
else => unreachable,
|
|
.e => ._np,
|
|
.ne => ._p,
|
|
}, .set, .tmp4b, ._, ._, ._ },
|
|
.{ ._, ._, switch (cc) {
|
|
else => unreachable,
|
|
.e => .@"and",
|
|
.ne => .@"or",
|
|
}, .tmp3b, .tmp4b, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .tbyte } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .reg = .rcx } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src1t, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .memia(.src0t, .tmp0, .add_size), ._, ._, ._ },
|
|
.{ ._, .f_pp, .ucom, ._, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp6w, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp6b, .si(0b0_1_000_000), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp6b, .si(0b0_1_000_100), ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp3b, ._, ._, ._ },
|
|
.{ ._, ._l, .ro, .tmp3b, .tmp1b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2b, .tmp3b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0b, .tmp3, -1), .tmp2b, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp3d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0b, .tmp3), .tmp2b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, .slow_incdec, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7d, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, .slow_incdec, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7d, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp7d, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, .slow_incdec, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7d, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, .slow_incdec, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7d, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp7d, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .slow_incdec, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._ps, .mova, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7d, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, .slow_incdec, null, null },
|
|
.dst_constraints = .{.{ .bool_vec = .dword }},
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u32, .kind = .{ .reg = .edx } },
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ .@"0:", ._ps, .mova, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp3x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp4d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp7d, .tmp7d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp5d, .tmp5d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp7b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp7d, .tmp6b, ._, ._ },
|
|
.{ ._, ._, .@"or", .dst0d, .tmp7d, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .avx, .slow_incdec, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp5d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp8d, .tmp8d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp8b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp7d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp8q, .tmp7b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2q, .tmp8q, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp6, -8), .tmp2q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp6), .tmp2q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .avx, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", .v_dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp5d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp8d, .tmp8d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp8b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp7d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp8q, .tmp7b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2q, .tmp8q, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp6, -8), .tmp2q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp6), .tmp2q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .sse2, .slow_incdec, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp5d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp8d, .tmp8d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp8b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp7d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp8q, .tmp7b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2q, .tmp8q, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp6, -8), .tmp2q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp6), .tmp2q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .sse2, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp5d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp8d, .tmp8d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp8b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp7d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp8q, .tmp7b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2q, .tmp8q, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp6, -8), .tmp2q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp6), .tmp2q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .sse, .slow_incdec, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._ps, .mova, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp5d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp8d, .tmp8d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp8b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp7d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp8q, .tmp7b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2q, .tmp8q, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp1d, .lead(.none, .tmp1, 1), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp6, -8), .tmp2q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp6), .tmp2q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .sse, null, null },
|
|
.src_constraints = .{
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
.{ .multiple_scalar_float = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm0 } },
|
|
.{ .type = .f128, .kind = .{ .reg = .xmm1 } },
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.{ .type = .u8, .kind = .{ .reg = .cl } },
|
|
.{ .type = .u64, .kind = .{ .reg = .rdx } },
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._ps, .mova, .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp4x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .call, .tmp5d, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp8d, .tmp8d, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp6d, .tmp6d, ._, ._ },
|
|
.{ ._, .fromCond(cc), .set, .tmp8b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp7d, .tmp1d, ._, ._ },
|
|
.{ ._, ._l, .sh, .tmp8q, .tmp7b, ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp2q, .tmp8q, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp1d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .ui(3), ._, ._ },
|
|
.{ ._, ._, .mov, .memid(.dst0q, .tmp6, -8), .tmp2q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"1:", ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .si(0b111111), ._, ._ },
|
|
.{ ._, ._z, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .mov, .tmp6d, .tmp1d, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp6d, .si(9), ._, ._ },
|
|
.{ ._, ._, .mov, .memsi(.dst0q, .@"8", .tmp6), .tmp2q, ._, ._ },
|
|
} },
|
|
} },
|
|
}) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {s} {} {} {}", .{
|
|
@tagName(air_tag),
|
|
@tagName(extra.compareOperator()),
|
|
cg.typeOf(extra.lhs).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
ops[1].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
},
|
|
.gte => unreachable,
|
|
.gt => unreachable,
|
|
}
|
|
try res[0].finish(inst, &.{ extra.lhs, extra.rhs }, &ops, cg);
|
|
},
|
|
|
|
.abs => |air_tag| if (use_old) try cg.airAbs(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
var res: [1]Temp = undefined;
|
|
cg.select(&res, &.{ty_op.ty.toType()}, &ops, comptime &.{ .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0b, .src0b, ._, ._ },
|
|
.{ ._, ._s, .cmov, .dst0d, .src0d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .si(0), ._, ._ },
|
|
.{ ._, ._ge, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .dst0b, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .byte }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"test", .src0b, .src0b, ._, ._ },
|
|
.{ ._, ._ns, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .dst0b, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0w, .src0w, ._, ._ },
|
|
.{ ._, ._s, .cmov, .dst0w, .src0w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0w, .src0w, ._, ._ },
|
|
.{ ._, ._s, .cmov, .dst0d, .src0d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .si(0), ._, ._ },
|
|
.{ ._, ._ge, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .word }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"test", .src0w, .src0w, ._, ._ },
|
|
.{ ._, ._ns, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0d, .src0d, ._, ._ },
|
|
.{ ._, ._s, .cmov, .dst0d, .src0d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .si(0), ._, ._ },
|
|
.{ ._, ._ge, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .dword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"test", .src0d, .src0d, ._, ._ },
|
|
.{ ._, ._ns, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .dst0d, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .{ .int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .dst0d, .dst0d, ._, ._ },
|
|
.{ ._, ._, .sub, .dst0q, .src0q, ._, ._ },
|
|
.{ ._, ._s, .cmov, .dst0q, .src0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mut_mem, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .si(0), ._, ._ },
|
|
.{ ._, ._ge, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .dst0q, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .int = .qword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_gpr, .none } },
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .@"test", .src0q, .src0q, ._, ._ },
|
|
.{ ._, ._ns, .j, .@"0f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .dst0q, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_int, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1q, .memad(.src0q, .add_size, -8), ._, ._ },
|
|
.{ ._, ._r, .sa, .tmp1q, .si(63), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2d, .tmp2d, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp3q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3q, .tmp1q, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp2b, .si(1), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp3q, .tmp1q, ._, ._ },
|
|
.{ ._, ._c, .set, .tmp2b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp3q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, .ssse3, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .qword, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_mm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .mmx } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .abs, .dst0q, .src0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, .ssse3, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .qword, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_mm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .mmx } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .abs, .dst0q, .src0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, .ssse3, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .qword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_mm, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .mmx } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .abs, .dst0q, .src0q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .ssse3, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .xword, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_b, .abs, .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .ssse3, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .xword, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_w, .abs, .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .ssse3, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_d, .abs, .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .xword, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .abs, .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .xword, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .abs, .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .abs, .dst0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .yword, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_b, .abs, .dst0y, .src0y, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .yword, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_w, .abs, .dst0y, .src0y, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_int = .{ .of = .yword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_d, .abs, .dst0y, .src0y, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .yword, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .vp_b, .abs, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .yword, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .vp_w, .abs, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .yword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .vp_d, .abs, .tmp1y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp1y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .xword, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .vp_b, .abs, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .xword, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .vp_w, .abs, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .vp_d, .abs, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .ssse3, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .xword, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .p_b, .abs, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .ssse3, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .xword, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .p_w, .abs, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .ssse3, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .p_d, .abs, .tmp1x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp1x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, .ssse3, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .qword, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .p_b, .abs, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._q, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, .ssse3, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .qword, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .p_w, .abs, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._q, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .mmx, .ssse3, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .qword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .p_d, .abs, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._q, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, .slow_incdec, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .byte, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .movsx, .tmp2d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._s, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .byte, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .movsx, .tmp2d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1b, .tmp2b, ._, ._ },
|
|
.{ ._, ._s, .cmov, .tmp1d, .tmp2d, ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .slow_incdec, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .byte, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1b, .tmp1b, ._, ._ },
|
|
.{ ._, ._ns, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .tmp1b, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .byte, .is = .byte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1b, .tmp1b, ._, ._ },
|
|
.{ ._, ._ns, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .tmp1b, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0b, .tmp0, .add_size), .tmp1b, ._, ._ },
|
|
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
|
|
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .word, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1w, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._s, .cmov, .tmp1w, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .word, .is = .word } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0w, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._ns, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .tmp1d, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0w, .tmp0, .add_size), .tmp1w, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(2), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .cmov, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .dword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._s, .cmov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .dword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .memia(.src0d, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._ns, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .tmp1d, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0d, .tmp0, .add_size), .tmp1d, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(4), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .cmov, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .qword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .xor, .tmp1d, .tmp1d, ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._s, .cmov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_int = .{ .of = .qword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1q, .tmp1q, ._, ._ },
|
|
.{ ._, ._ns, .j, .@"1f", ._, ._, ._ },
|
|
.{ ._, ._, .neg, .tmp1q, ._, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .memia(.dst0q, .tmp0, .add_size), .tmp1q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .any_scalar_int, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .xor, .tmp0d, .tmp0d, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp1d, .sa(.none, .add_src0_elem_size), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp2q, .memiad(.src0q, .tmp0, .add_src0_elem_size, -8), ._, ._ },
|
|
.{ ._, ._r, .sa, .tmp2q, .si(63), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp3d, .tmp3d, ._, ._ },
|
|
.{ .@"1:", ._, .mov, .tmp4q, .memi(.src0q, .tmp0), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp4q, .tmp2q, ._, ._ },
|
|
.{ ._, ._r, .sh, .tmp3b, .si(1), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp4q, .tmp2q, ._, ._ },
|
|
.{ ._, ._c, .set, .tmp3b, ._, ._, ._ },
|
|
.{ ._, ._, .mov, .memi(.dst0q, .tmp0), .tmp4q, ._, ._ },
|
|
.{ ._, ._, .lea, .tmp0d, .lead(.none, .tmp0, 8), ._, ._ },
|
|
.{ ._, ._, .sub, .tmp1d, .si(8), ._, ._ },
|
|
.{ ._, ._a, .j, .@"1b", ._, ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp0d, .sa(.none, .add_src0_unaligned_size), ._, ._ },
|
|
.{ ._, ._b, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_float = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_ps, .@"and", .dst0x, .src0x, .lea(.xword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_float = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._ps, .@"and", .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_float = .{ .of = .yword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .yword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_ps, .@"and", .dst0y, .src0y, .lea(.yword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_float = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_pd, .@"and", .dst0x, .src0x, .lea(.xword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_float = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._pd, .@"and", .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_float = .{ .of = .yword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .yword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_pd, .@"and", .dst0y, .src0y, .lea(.yword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_float = .{ .of = .xword, .is = .tbyte } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .none } },
|
|
.{ .src = .{ .to_x87, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .x87 } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_, .abs, ._, ._, ._, ._ },
|
|
.{ ._, .f_p, .st, .dst0t, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_any_float = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_, .@"and", .dst0x, .src0x, .lea(.xword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_any_float = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .p_, .@"and", .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_any_float = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .ref = .src0 }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._ps, .@"and", .dst0x, .lea(.xword, .tmp0), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_any_float = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .yword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .vp_, .@"and", .dst0y, .src0y, .lea(.yword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .scalar_any_float = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .yword } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .mut_rc = .{ .ref = .src0, .rc = .sse } }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_pd, .@"and", .dst0y, .src0y, .lea(.yword, .tmp0), ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_float = .{ .of = .yword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .yword } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_ps, .mova, .tmp2y, .lea(.yword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_ps, .@"and", .tmp3y, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_ps, .mova, .memia(.dst0y, .tmp0, .add_size), .tmp3y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_float = .{ .of = .xword, .is = .dword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp2x, .lea(.xword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._ps, .mova, .tmp3x, .tmp2x, ._, ._ },
|
|
.{ ._, ._ps, .@"and", .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .mova, .memia(.dst0x, .tmp0, .add_size), .tmp3x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_float = .{ .of = .yword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .yword } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_pd, .mova, .tmp2y, .lea(.yword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_pd, .@"and", .tmp3y, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_pd, .mova, .memia(.dst0y, .tmp0, .add_size), .tmp3y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_float = .{ .of = .xword, .is = .qword } }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._pd, .mova, .tmp2x, .lea(.xword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._pd, .mova, .tmp3x, .tmp2x, ._, ._ },
|
|
.{ ._, ._pd, .@"and", .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._pd, .mova, .memia(.dst0x, .tmp0, .add_size), .tmp3x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_any_float = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .yword } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp2y, .lea(.yword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .vp_, .@"and", .tmp3y, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_dqa, .mov, .memia(.dst0y, .tmp0, .add_size), .tmp3y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_any_float = .yword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .yword } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, .v_pd, .mova, .tmp2y, .lea(.yword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", .v_pd, .@"and", .tmp3y, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_pd, .mova, .memia(.dst0y, .tmp0, .add_size), .tmp3y, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_any_float = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .tmp2x, .lea(.xword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._dqa, .mov, .tmp3x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_, .@"and", .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._dqa, .mov, .memia(.dst0x, .tmp0, .add_size), .tmp3x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .multiple_scalar_any_float = .xword }, .any },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .none } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .smax_mem = .{ .ref = .src0, .vectorize_to = .xword } } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.mem},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .lea, .tmp0p, .mem(.tmp1), ._, ._ },
|
|
.{ ._, ._ps, .mova, .tmp2x, .lea(.xword, .tmp0), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ .@"0:", ._ps, .mova, .tmp3x, .tmp2x, ._, ._ },
|
|
.{ ._, ._ps, .@"and", .tmp3x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._ps, .mova, .memia(.dst0x, .tmp0, .add_size), .tmp3x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
} }) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(ty_op.operand).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
};
|
|
try res[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
|
|
.cmp_lt,
|
|
.cmp_lt_optimized,
|
|
.cmp_lte,
|
|
.cmp_lte_optimized,
|
|
.cmp_gte,
|
|
.cmp_gte_optimized,
|
|
.cmp_gt,
|
|
.cmp_gt_optimized,
|
|
=> |air_tag| if (use_old) try cg.airCmp(inst, switch (air_tag) {
|
|
else => unreachable,
|
|
.cmp_lt, .cmp_lt_optimized => .lt,
|
|
.cmp_lte, .cmp_lte_optimized => .lte,
|
|
.cmp_gte, .cmp_gte_optimized => .gte,
|
|
.cmp_gt, .cmp_gt_optimized => .gt,
|
|
}) else fallback: {
|
|
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
|
|
const scalar_ty = cg.typeOf(bin_op.lhs).scalarType(zcu);
|
|
if (scalar_ty.isRuntimeFloat()) break :fallback try cg.airCmp(inst, switch (air_tag) {
|
|
else => unreachable,
|
|
.cmp_lt, .cmp_lt_optimized => .lt,
|
|
.cmp_lte, .cmp_lte_optimized => .lte,
|
|
.cmp_gte, .cmp_gte_optimized => .gte,
|
|
.cmp_gt, .cmp_gt_optimized => .gt,
|
|
});
|
|
const signedness = if (scalar_ty.isAbiInt(zcu))
|
|
scalar_ty.intInfo(zcu).signedness
|
|
else
|
|
.unsigned;
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
var res: [1]Temp = undefined;
|
|
cg.select(&res, &.{.bool}, &ops, switch (@as(Condition, switch (signedness) {
|
|
.signed => switch (air_tag) {
|
|
else => unreachable,
|
|
.cmp_lt, .cmp_lt_optimized => .l,
|
|
.cmp_lte, .cmp_lte_optimized => .le,
|
|
.cmp_gte, .cmp_gte_optimized => .ge,
|
|
.cmp_gt, .cmp_gt_optimized => .g,
|
|
},
|
|
.unsigned => switch (air_tag) {
|
|
else => unreachable,
|
|
.cmp_lt, .cmp_lt_optimized => .b,
|
|
.cmp_lte, .cmp_lte_optimized => .be,
|
|
.cmp_gte, .cmp_gte_optimized => .ae,
|
|
.cmp_gt, .cmp_gt_optimized => .a,
|
|
},
|
|
})) {
|
|
else => unreachable,
|
|
inline .l, .le, .ge, .g, .b, .be, .ae, .a => |cc| comptime &.{ .{
|
|
.src_constraints = .{ .{ .int = .byte }, .{ .int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .imm8, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .imm8, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_gpr }, .commute = .{ 0, 1 } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc.commute() }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .byte }, .{ .int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .imm8 } },
|
|
.{ .src = .{ .to_gpr, .imm8 } },
|
|
.{ .src = .{ .to_gpr, .mem } },
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .word }, .{ .int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .imm16, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .imm16, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_gpr }, .commute = .{ 0, 1 } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc.commute() }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .word }, .{ .int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .imm16 } },
|
|
.{ .src = .{ .to_gpr, .imm16 } },
|
|
.{ .src = .{ .to_gpr, .mem } },
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .dword }, .{ .int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .imm32, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .imm32, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_gpr }, .commute = .{ 0, 1 } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc.commute() }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .dword }, .{ .int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .imm32 } },
|
|
.{ .src = .{ .to_gpr, .imm32 } },
|
|
.{ .src = .{ .to_gpr, .mem } },
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .int = .qword }, .{ .int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .simm32, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .simm32, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_gpr }, .commute = .{ 0, 1 } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc.commute() }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .int = .qword }, .{ .int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .simm32 } },
|
|
.{ .src = .{ .to_gpr, .simm32 } },
|
|
.{ .src = .{ .to_gpr, .mem } },
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .any_int, .any_int },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem }, .commute = switch (cc) {
|
|
else => unreachable,
|
|
.l, .ge, .b, .ae => .{ 0, 0 },
|
|
.le, .g, .be, .a => .{ 0, 1 },
|
|
} },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1b, .tmp1b, ._, ._ },
|
|
.{ .@"0:", ._r, .sh, .tmp1b, .si(1), ._, ._ },
|
|
.{ ._, ._, .mov, .tmp1p, .memia(.src0p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .sbb, .tmp1p, .memia(.src1p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._c, .set, .tmp1b, ._, ._, ._ },
|
|
.{ ._, .fromCond(switch (cc) {
|
|
else => unreachable,
|
|
.l, .ge, .b, .ae => cc,
|
|
.le, .g, .be, .a => cc.commute(),
|
|
}), .set, .dst0b, ._, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .sa(.tmp1, .add_size), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
} },
|
|
} },
|
|
}) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(bin_op.lhs).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
ops[1].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
};
|
|
try res[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.cmp_eq,
|
|
.cmp_eq_optimized,
|
|
.cmp_neq,
|
|
.cmp_neq_optimized,
|
|
=> |air_tag| if (use_old) try cg.airCmp(inst, switch (air_tag) {
|
|
else => unreachable,
|
|
.cmp_eq, .cmp_eq_optimized => .eq,
|
|
.cmp_neq, .cmp_neq_optimized => .neq,
|
|
}) else fallback: {
|
|
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
|
|
const scalar_ty = cg.typeOf(bin_op.lhs).scalarType(zcu);
|
|
if (cg.intInfo(scalar_ty) == null and cg.floatBits(scalar_ty) == null) break :fallback try cg.airCmp(inst, switch (air_tag) {
|
|
else => unreachable,
|
|
.cmp_eq, .cmp_eq_optimized => .eq,
|
|
.cmp_neq, .cmp_neq_optimized => .neq,
|
|
});
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
var res: [1]Temp = undefined;
|
|
cg.select(&res, &.{.bool}, &ops, switch (@as(Condition, switch (air_tag) {
|
|
else => unreachable,
|
|
.cmp_eq, .cmp_eq_optimized => .e,
|
|
.cmp_neq, .cmp_neq_optimized => .ne,
|
|
})) {
|
|
else => unreachable,
|
|
inline .e, .ne => |cc| comptime &.{ .{
|
|
.src_constraints = .{ .{ .int = .byte }, .{ .int = .byte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .imm8 } },
|
|
.{ .src = .{ .imm8, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .imm8 } },
|
|
.{ .src = .{ .imm8, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0b, .src1b, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .word }, .{ .int = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .imm16 } },
|
|
.{ .src = .{ .imm16, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .imm16 } },
|
|
.{ .src = .{ .imm16, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0w, .src1w, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{ .{ .int = .dword }, .{ .int = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .imm32 } },
|
|
.{ .src = .{ .imm32, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .imm32 } },
|
|
.{ .src = .{ .imm32, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0d, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.src_constraints = .{ .{ .int = .qword }, .{ .int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .simm32 } },
|
|
.{ .src = .{ .simm32, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .simm32 } },
|
|
.{ .src = .{ .simm32, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .mem } },
|
|
.{ .src = .{ .mem, .to_gpr }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .cmp, .src0q, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .mmx, null, null },
|
|
.src_constraints = .{ .{ .int = .qword }, .{ .int = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_mm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_mm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_mm, .to_mm } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .mmx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_, .xor, .tmp1q, .tmp1q, ._, ._ },
|
|
.{ ._, .p_, .xor, .src0q, .src1q, ._, ._ },
|
|
.{ ._, .p_b, .cmpeq, .tmp1q, .src0q, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp0d, .tmp1q, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .si(0xff), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .int = .xword }, .{ .int = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_xmm, .to_xmm } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_, .xor, .tmp0x, .src0x, .src1x, ._ },
|
|
.{ ._, .vp_, .@"test", .tmp0x, .tmp0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{ .{ .int = .xword }, .{ .int = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_xmm, .to_xmm } },
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_, .xor, .src0x, .src1x, ._, ._ },
|
|
.{ ._, .p_, .@"test", .src0x, .src0x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .int = .xword }, .{ .int = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mut_xmm, .mem } },
|
|
.{ .src = .{ .mem, .to_mut_xmm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_mut_xmm, .to_xmm } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .p_, .xor, .tmp1x, .tmp1x, ._, ._ },
|
|
.{ ._, .p_, .xor, .src0x, .src1x, ._, ._ },
|
|
.{ ._, .p_b, .cmpeq, .tmp1x, .src0x, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp0d, .tmp1x, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp0d, .si(0xffff), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{ .{ .int = .yword }, .{ .int = .yword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .mem } },
|
|
.{ .src = .{ .mem, .to_ymm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_ymm, .to_ymm } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .vp_, .xor, .tmp0y, .src0y, .src1y, ._ },
|
|
.{ ._, .vp_, .@"test", .tmp0y, .tmp0y, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .int = .yword }, .{ .int = .yword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_ymm, .mem } },
|
|
.{ .src = .{ .mem, .to_ymm }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_ymm, .to_ymm } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_pd, .xor, .tmp0y, .src0y, .src1y, ._ },
|
|
.{ ._, .vp_, .@"test", .tmp0y, .tmp0y, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .remainder_int = .{ .of = .yword, .is = .xword } },
|
|
.{ .remainder_int = .{ .of = .yword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp1y, .tmp1y, .tmp1y, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp2y, .memiad(.src0y, .tmp0, .add_size, -16), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp2y, .tmp2y, .memiad(.src1y, .tmp0, .add_size, -16), ._ },
|
|
.{ ._, .vp_, .@"or", .tmp1y, .tmp1y, .tmp2y, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, .v_dqa, .mov, .tmp2x, .memad(.src0x, .add_size, -16), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp2x, .tmp2x, .memad(.src1x, .add_size, -16), ._ },
|
|
.{ ._, .vp_, .@"or", .tmp1y, .tmp1y, .tmp2y, ._ },
|
|
.{ ._, .vp_, .@"test", .tmp1y, .tmp1y, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .remainder_int = .{ .of = .yword, .is = .yword } },
|
|
.{ .remainder_int = .{ .of = .yword, .is = .yword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp1y, .tmp1y, .tmp1y, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp2y, .tmp2y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_, .@"or", .tmp1y, .tmp1y, .tmp2y, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, .vp_, .@"test", .tmp1y, .tmp1y, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .remainder_int = .{ .of = .yword, .is = .xword } },
|
|
.{ .remainder_int = .{ .of = .yword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sia(16, .src0, .sub_size), ._, ._ },
|
|
.{ ._, .v_pd, .xor, .tmp1y, .tmp1y, .tmp1y, ._ },
|
|
.{ .@"0:", .v_pd, .movu, .tmp2y, .memiad(.src0y, .tmp0, .add_size, -16), ._, ._ },
|
|
.{ ._, .v_pd, .xor, .tmp2y, .tmp2y, .memiad(.src1y, .tmp0, .add_size, -16), ._ },
|
|
.{ ._, .v_pd, .@"or", .tmp1y, .tmp1y, .tmp2y, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, .v_pd, .mova, .tmp2x, .memad(.src0x, .add_size, -16), ._, ._ },
|
|
.{ ._, .v_pd, .xor, .tmp2x, .tmp2x, .memad(.src1x, .add_size, -16), ._ },
|
|
.{ ._, .v_pd, .@"or", .tmp1y, .tmp1y, .tmp2y, ._ },
|
|
.{ ._, .vp_, .@"test", .tmp1y, .tmp1y, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .remainder_int = .{ .of = .yword, .is = .yword } },
|
|
.{ .remainder_int = .{ .of = .yword, .is = .yword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .v_pd, .xor, .tmp1y, .tmp1y, .tmp1y, ._ },
|
|
.{ .@"0:", .v_pd, .movu, .tmp2y, .memia(.src0y, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .v_pd, .xor, .tmp2y, .tmp2y, .memia(.src1y, .tmp0, .add_size), ._ },
|
|
.{ ._, .v_pd, .@"or", .tmp1y, .tmp1y, .tmp2y, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(32), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, .vp_, .@"test", .tmp1y, .tmp1y, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .remainder_int = .{ .of = .xword, .is = .xword } },
|
|
.{ .remainder_int = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp1x, .tmp1x, .tmp1x, ._ },
|
|
.{ .@"0:", .v_dqu, .mov, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .vp_, .xor, .tmp2x, .tmp2x, .memia(.src1x, .tmp0, .add_size), ._ },
|
|
.{ ._, .vp_, .@"or", .tmp1x, .tmp1x, .tmp2x, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, .vp_, .@"test", .tmp1x, .tmp1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse4_1, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .remainder_int = .{ .of = .xword, .is = .xword } },
|
|
.{ .remainder_int = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp1x, .tmp1x, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .@"or", .tmp1x, .tmp2x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, .p_, .@"test", .tmp1x, .tmp1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{
|
|
.{ .remainder_int = .{ .of = .xword, .is = .xword } },
|
|
.{ .remainder_int = .{ .of = .xword, .is = .xword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.{ .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp1x, .tmp1x, ._, ._ },
|
|
.{ .@"0:", ._dqu, .mov, .tmp2x, .memia(.src0x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .memia(.src1x, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .@"or", .tmp1x, .tmp2x, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(16), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_b, .cmpeq, .tmp1x, .tmp2x, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp0d, .tmp1x, ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp0d, .si(0xffff), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, .mmx, null, null },
|
|
.src_constraints = .{
|
|
.{ .remainder_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .remainder_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .kind = .{ .rc = .mmx } },
|
|
.{ .kind = .{ .rc = .mmx } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp1q, .tmp1q, ._, ._ },
|
|
.{ .@"0:", ._q, .mov, .tmp2q, .memia(.src0q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2q, .memia(.src1q, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, .p_, .@"or", .tmp1q, .tmp2q, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .si(8), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, .p_, .xor, .tmp2q, .tmp2q, ._, ._ },
|
|
.{ ._, .p_b, .cmpeq, .tmp1q, .tmp2q, ._, ._ },
|
|
.{ ._, .p_b, .movmsk, .tmp0d, .tmp1q, ._, ._ },
|
|
.{ ._, ._, .cmp, .tmp0d, .si(0xff), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.src_constraints = .{
|
|
.{ .remainder_int = .{ .of = .qword, .is = .qword } },
|
|
.{ .remainder_int = .{ .of = .qword, .is = .qword } },
|
|
},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_mem, .to_mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .isize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp1p, .tmp1p, ._, ._ },
|
|
.{ .@"0:", ._, .mov, .tmp2p, .memia(.src0p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2p, .memia(.src1p, .tmp0, .add_size), ._, ._ },
|
|
.{ ._, ._, .@"or", .tmp1p, .tmp2p, ._, ._ },
|
|
.{ ._, ._, .add, .tmp0p, .sa(.tmp2, .add_size), ._, ._ },
|
|
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1p, .tmp1p, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .f16c, null, null, null },
|
|
.src_constraints = .{ .{ .float = .word }, .{ .float = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .kind = .{ .mut_rc = .{ .ref = .src0, .rc = .sse } } },
|
|
.{ .kind = .{ .mut_rc = .{ .ref = .src1, .rc = .sse } } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ps, .cvtph2, .tmp0x, .src0x, ._, ._ },
|
|
.{ ._, .v_ps, .cvtph2, .tmp1x, .src1x, ._, ._ },
|
|
.{ ._, .v_ss, .ucomi, .tmp0x, .tmp1x, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .float = .word }, .{ .float = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqhf2",
|
|
.ne => "__nehf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .tmp1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .float = .dword }, .{ .float = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f16, .kind = .{ .rc = .sse } },
|
|
.{ .type = .f16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_ss, .ucomi, .src0x, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .float = .dword }, .{ .float = .dword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f16, .kind = .{ .rc = .sse } },
|
|
.{ .type = .f16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._ss, .ucomi, .src0x, .src1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .float = .dword }, .{ .float = .word } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqsf2",
|
|
.ne => "__nesf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .tmp1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .avx, null, null, null },
|
|
.src_constraints = .{ .{ .float = .qword }, .{ .float = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f16, .kind = .{ .rc = .sse } },
|
|
.{ .type = .f16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .v_sd, .ucomi, .src0x, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse2, null, null, null },
|
|
.src_constraints = .{ .{ .float = .qword }, .{ .float = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_sse, .mem } },
|
|
.{ .src = .{ .mem, .to_sse }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .to_sse, .to_sse } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f16, .kind = .{ .rc = .sse } },
|
|
.{ .type = .f16, .kind = .{ .rc = .sse } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._sd, .ucomi, .src0x, .src1q, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .float = .qword }, .{ .float = .qword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqdf2",
|
|
.ne => "__nedf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .tmp1d, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, .cmov, null, null },
|
|
.src_constraints = .{ .{ .float = .tbyte }, .{ .float = .tbyte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_p, .ucomi, .tmp0t, .src1t, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sahf, .x87, null, null },
|
|
.src_constraints = .{ .{ .float = .tbyte }, .{ .float = .tbyte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src1t, ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_pp, .ucom, ._, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp2w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .x87, null, null },
|
|
.src_constraints = .{ .{ .float = .tbyte }, .{ .float = .tbyte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src1t, ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_pp, .ucom, ._, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp2w, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .si(0b0_1_000_000), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp2b, .si(0b0_1_000_100), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, null, null, null },
|
|
.src_constraints = .{ .{ .float = .tbyte }, .{ .float = .tbyte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .mem, .mem } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src1t, ._, ._, ._ },
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_pp, .ucom, ._, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp2w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sahf, .x87, null, null },
|
|
.src_constraints = .{ .{ .float = .tbyte }, .{ .float = .tbyte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u16, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_p, .ucom, .src1t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp2w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", .x87, null, null },
|
|
.src_constraints = .{ .{ .float = .tbyte }, .{ .float = .tbyte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u8, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_p, .ucom, .src1t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp2w, ._, ._, ._ },
|
|
.{ ._, ._, .xor, .tmp2b, .si(0b0_1_000_000), ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp2b, .si(0b0_1_000_100), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .x87, null, null, null },
|
|
.src_constraints = .{ .{ .float = .tbyte }, .{ .float = .tbyte } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_x87, .mem }, .commute = .{ 0, 1 } },
|
|
.{ .src = .{ .mem, .to_x87 } },
|
|
.{ .src = .{ .to_x87, .to_x87 } },
|
|
},
|
|
.extra_temps = .{
|
|
.{ .type = .f80, .kind = .{ .reg = .st6 } },
|
|
.{ .type = .f80, .kind = .{ .reg = .st7 } },
|
|
.{ .type = .u16, .kind = .{ .reg = .ah } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = switch (cc) {
|
|
else => unreachable,
|
|
.e => .z_and_np,
|
|
.ne => .nz_or_p,
|
|
} }},
|
|
.clobbers = .{ .eflags = true },
|
|
.each = .{ .once = &.{
|
|
.{ ._, .f_, .ld, .src0t, ._, ._, ._ },
|
|
.{ ._, .f_p, .ucom, .src1t, ._, ._, ._ },
|
|
.{ ._, .fn_sw, .st, .tmp2w, ._, ._, ._ },
|
|
.{ ._, ._, .sahf, ._, ._, ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .sse, null, null, null },
|
|
.src_constraints = .{ .{ .float = .xword }, .{ .float = .xword } },
|
|
.patterns = &.{
|
|
.{ .src = .{ .{ .to_reg = .xmm0 }, .{ .to_reg = .xmm1 } } },
|
|
},
|
|
.call_frame = .{ .alignment = .@"16" },
|
|
.extra_temps = .{
|
|
.{ .type = .usize, .kind = .{ .symbol = &.{ .name = switch (cc) {
|
|
else => unreachable,
|
|
.e => "__eqtf2",
|
|
.ne => "__netf2",
|
|
} } } },
|
|
.{ .type = .i32, .kind = .{ .reg = .eax } },
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
.unused,
|
|
},
|
|
.dst_temps = .{.{ .cc = cc }},
|
|
.clobbers = .{ .eflags = true, .caller_preserved = .ccc },
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .call, .tmp0d, ._, ._, ._ },
|
|
.{ ._, ._, .@"test", .tmp1d, .tmp1d, ._, ._ },
|
|
} },
|
|
} },
|
|
}) catch |err| switch (err) {
|
|
error.SelectFailed => return cg.fail("failed to select {s} {} {} {}", .{
|
|
@tagName(air_tag),
|
|
cg.typeOf(bin_op.lhs).fmt(pt),
|
|
ops[0].tracking(cg),
|
|
ops[1].tracking(cg),
|
|
}),
|
|
else => |e| return e,
|
|
};
|
|
try res[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
|
|
.cond_br => try cg.airCondBr(inst),
|
|
.switch_br => try cg.airSwitchBr(inst),
|
|
.loop_switch_br => try cg.airLoopSwitchBr(inst),
|
|
.switch_dispatch => try cg.airSwitchDispatch(inst),
|
|
.@"try", .try_cold => try cg.airTry(inst),
|
|
.try_ptr, .try_ptr_cold => try cg.airTryPtr(inst),
|
|
.dbg_stmt => if (cg.debug_output != .none) {
|
|
const dbg_stmt = air_datas[@intFromEnum(inst)].dbg_stmt;
|
|
_ = try cg.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_dbg_line_stmt_line_column,
|
|
.data = .{ .line_column = .{
|
|
.line = dbg_stmt.line,
|
|
.column = dbg_stmt.column,
|
|
} },
|
|
});
|
|
},
|
|
.dbg_empty_stmt => if (cg.debug_output != .none) {
|
|
if (cg.mir_instructions.len > 0) {
|
|
const prev_mir_op = &cg.mir_instructions.items(.ops)[cg.mir_instructions.len - 1];
|
|
if (prev_mir_op.* == .pseudo_dbg_line_line_column)
|
|
prev_mir_op.* = .pseudo_dbg_line_stmt_line_column;
|
|
}
|
|
try cg.asmOpOnly(.{ ._, .nop });
|
|
},
|
|
.dbg_inline_block => {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const extra = cg.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
|
|
const old_inline_func = cg.inline_func;
|
|
defer cg.inline_func = old_inline_func;
|
|
cg.inline_func = extra.data.func;
|
|
if (cg.debug_output != .none) _ = try cg.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_dbg_enter_inline_func,
|
|
.data = .{ .func = extra.data.func },
|
|
});
|
|
try cg.lowerBlock(inst, @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]));
|
|
if (cg.debug_output != .none) _ = try cg.addInst(.{
|
|
.tag = .pseudo,
|
|
.ops = .pseudo_dbg_leave_inline_func,
|
|
.data = .{ .func = old_inline_func },
|
|
});
|
|
},
|
|
.dbg_var_ptr, .dbg_var_val, .dbg_arg_inline => if (use_old) try cg.airDbgVar(inst) else if (cg.debug_output != .none) {
|
|
const pl_op = air_datas[@intFromEnum(inst)].pl_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{pl_op.operand});
|
|
var mcv = ops[0].tracking(cg).short;
|
|
switch (mcv) {
|
|
else => {},
|
|
.eflags => |cc| switch (cc) {
|
|
else => {},
|
|
// These values would self destruct. Maybe we make them use their
|
|
// turing complete dwarf expression interpreters for once?
|
|
.z_and_np, .nz_or_p => {
|
|
try cg.spillEflagsIfOccupied();
|
|
mcv = ops[0].tracking(cg).short;
|
|
},
|
|
},
|
|
}
|
|
try cg.genLocalDebugInfo(inst, ops[0].tracking(cg).short);
|
|
try ops[0].die(cg);
|
|
},
|
|
.is_null_ptr => if (use_old) try cg.airIsNullPtr(inst) else {
|
|
const un_op = air_datas[@intFromEnum(inst)].un_op;
|
|
const opt_ty = cg.typeOf(un_op).childType(zcu);
|
|
const opt_repr_is_pl = opt_ty.optionalReprIsPayload(zcu);
|
|
const opt_child_ty = opt_ty.optionalChild(zcu);
|
|
const opt_child_abi_size: u31 = @intCast(opt_child_ty.abiSize(zcu));
|
|
var ops = try cg.tempsFromOperands(inst, .{un_op});
|
|
if (!opt_repr_is_pl) try ops[0].toOffset(opt_child_abi_size, cg);
|
|
while (try ops[0].toLea(cg)) {}
|
|
try cg.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
try ops[0].tracking(cg).short.deref().mem(cg, .{ .size = if (!opt_repr_is_pl)
|
|
.byte
|
|
else if (opt_child_ty.isSlice(zcu))
|
|
.qword
|
|
else
|
|
.fromSize(opt_child_abi_size) }),
|
|
.u(0),
|
|
);
|
|
const is_null = try cg.tempInit(.bool, .{ .eflags = .e });
|
|
try is_null.finish(inst, &.{un_op}, &ops, cg);
|
|
},
|
|
.is_non_null_ptr => if (use_old) try cg.airIsNonNullPtr(inst) else {
|
|
const un_op = air_datas[@intFromEnum(inst)].un_op;
|
|
const opt_ty = cg.typeOf(un_op).childType(zcu);
|
|
const opt_repr_is_pl = opt_ty.optionalReprIsPayload(zcu);
|
|
const opt_child_ty = opt_ty.optionalChild(zcu);
|
|
const opt_child_abi_size: u31 = @intCast(opt_child_ty.abiSize(zcu));
|
|
var ops = try cg.tempsFromOperands(inst, .{un_op});
|
|
if (!opt_repr_is_pl) try ops[0].toOffset(opt_child_abi_size, cg);
|
|
while (try ops[0].toLea(cg)) {}
|
|
try cg.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
try ops[0].tracking(cg).short.deref().mem(cg, .{ .size = if (!opt_repr_is_pl)
|
|
.byte
|
|
else if (opt_child_ty.isSlice(zcu))
|
|
.qword
|
|
else
|
|
.fromSize(opt_child_abi_size) }),
|
|
.u(0),
|
|
);
|
|
const is_non_null = try cg.tempInit(.bool, .{ .eflags = .ne });
|
|
try is_non_null.finish(inst, &.{un_op}, &ops, cg);
|
|
},
|
|
.is_err_ptr => if (use_old) try cg.airIsErrPtr(inst) else {
|
|
const un_op = air_datas[@intFromEnum(inst)].un_op;
|
|
const eu_ty = cg.typeOf(un_op).childType(zcu);
|
|
const eu_err_ty = eu_ty.errorUnionSet(zcu);
|
|
const eu_pl_ty = eu_ty.errorUnionPayload(zcu);
|
|
const eu_err_off: i32 = @intCast(codegen.errUnionErrorOffset(eu_pl_ty, zcu));
|
|
var ops = try cg.tempsFromOperands(inst, .{un_op});
|
|
try ops[0].toOffset(eu_err_off, cg);
|
|
while (try ops[0].toLea(cg)) {}
|
|
try cg.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
try ops[0].tracking(cg).short.deref().mem(cg, .{ .size = cg.memSize(eu_err_ty) }),
|
|
.u(0),
|
|
);
|
|
const is_err = try cg.tempInit(.bool, .{ .eflags = .ne });
|
|
try is_err.finish(inst, &.{un_op}, &ops, cg);
|
|
},
|
|
.is_non_err_ptr => if (use_old) try cg.airIsNonErrPtr(inst) else {
|
|
const un_op = air_datas[@intFromEnum(inst)].un_op;
|
|
const eu_ty = cg.typeOf(un_op).childType(zcu);
|
|
const eu_err_ty = eu_ty.errorUnionSet(zcu);
|
|
const eu_pl_ty = eu_ty.errorUnionPayload(zcu);
|
|
const eu_err_off: i32 = @intCast(codegen.errUnionErrorOffset(eu_pl_ty, zcu));
|
|
var ops = try cg.tempsFromOperands(inst, .{un_op});
|
|
try ops[0].toOffset(eu_err_off, cg);
|
|
while (try ops[0].toLea(cg)) {}
|
|
try cg.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
try ops[0].tracking(cg).short.deref().mem(cg, .{ .size = cg.memSize(eu_err_ty) }),
|
|
.u(0),
|
|
);
|
|
const is_non_err = try cg.tempInit(.bool, .{ .eflags = .e });
|
|
try is_non_err.finish(inst, &.{un_op}, &ops, cg);
|
|
},
|
|
.load => if (use_old) try cg.airLoad(inst) else fallback: {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
const val_ty = ty_op.ty.toType();
|
|
const ptr_ty = cg.typeOf(ty_op.operand);
|
|
const ptr_info = ptr_ty.ptrInfo(zcu);
|
|
if (ptr_info.packed_offset.host_size > 0 and
|
|
(ptr_info.flags.vector_index == .none or val_ty.toIntern() == .bool_type))
|
|
break :fallback try cg.airLoad(inst);
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
const res = try ops[0].load(val_ty, .{
|
|
.disp = switch (ptr_info.flags.vector_index) {
|
|
.none => 0,
|
|
.runtime => unreachable,
|
|
else => |vector_index| @intCast(val_ty.abiSize(zcu) * @intFromEnum(vector_index)),
|
|
},
|
|
}, cg);
|
|
try res.finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.int_from_ptr => if (use_old) try cg.airIntFromPtr(inst) else {
|
|
const un_op = air_datas[@intFromEnum(inst)].un_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{un_op});
|
|
try ops[0].toSlicePtr(cg);
|
|
try ops[0].finish(inst, &.{un_op}, &ops, cg);
|
|
},
|
|
.int_from_bool => if (use_old) try cg.airIntFromBool(inst) else {
|
|
const un_op = air_datas[@intFromEnum(inst)].un_op;
|
|
const ops = try cg.tempsFromOperands(inst, .{un_op});
|
|
try ops[0].finish(inst, &.{un_op}, &ops, cg);
|
|
},
|
|
.ret => try cg.airRet(inst, false),
|
|
.ret_safe => try cg.airRet(inst, true),
|
|
.ret_load => try cg.airRetLoad(inst),
|
|
.store, .store_safe => |air_tag| if (use_old) try cg.airStore(inst, switch (air_tag) {
|
|
else => unreachable,
|
|
.store => false,
|
|
.store_safe => true,
|
|
}) else fallback: {
|
|
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
|
|
const ptr_ty = cg.typeOf(bin_op.lhs);
|
|
const ptr_info = ptr_ty.ptrInfo(zcu);
|
|
const val_ty = cg.typeOf(bin_op.rhs);
|
|
if (ptr_info.packed_offset.host_size > 0 and
|
|
(ptr_info.flags.vector_index == .none or val_ty.toIntern() == .bool_type))
|
|
break :fallback try cg.airStore(inst, switch (air_tag) {
|
|
else => unreachable,
|
|
.store => false,
|
|
.store_safe => true,
|
|
});
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
try ops[0].store(&ops[1], .{
|
|
.disp = switch (ptr_info.flags.vector_index) {
|
|
.none => 0,
|
|
.runtime => unreachable,
|
|
else => |vector_index| @intCast(val_ty.abiSize(zcu) * @intFromEnum(vector_index)),
|
|
},
|
|
.safe = switch (air_tag) {
|
|
else => unreachable,
|
|
.store => false,
|
|
.store_safe => true,
|
|
},
|
|
}, cg);
|
|
for (ops) |op| try op.die(cg);
|
|
},
|
|
.unreach => {},
|
|
.optional_payload_ptr => if (use_old) try cg.airOptionalPayloadPtr(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
const ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.optional_payload_ptr_set => if (use_old) try cg.airOptionalPayloadPtrSet(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
const opt_ty = cg.typeOf(ty_op.operand).childType(zcu);
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
if (!opt_ty.optionalReprIsPayload(zcu)) {
|
|
const opt_child_ty = opt_ty.optionalChild(zcu);
|
|
const opt_child_abi_size: i32 = @intCast(opt_child_ty.abiSize(zcu));
|
|
try ops[0].toOffset(opt_child_abi_size, cg);
|
|
var has_value = try cg.tempInit(.bool, .{ .immediate = 1 });
|
|
try ops[0].store(&has_value, .{}, cg);
|
|
try has_value.die(cg);
|
|
try ops[0].toOffset(-opt_child_abi_size, cg);
|
|
}
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.unwrap_errunion_payload_ptr => if (use_old) try cg.airUnwrapErrUnionPayloadPtr(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
const eu_ty = cg.typeOf(ty_op.operand).childType(zcu);
|
|
const eu_pl_ty = eu_ty.errorUnionPayload(zcu);
|
|
const eu_pl_off: i32 = @intCast(codegen.errUnionPayloadOffset(eu_pl_ty, zcu));
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].toOffset(eu_pl_off, cg);
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.unwrap_errunion_err_ptr => if (use_old) try cg.airUnwrapErrUnionErrPtr(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
const eu_ty = cg.typeOf(ty_op.operand).childType(zcu);
|
|
const eu_pl_ty = eu_ty.errorUnionPayload(zcu);
|
|
const eu_err_off: i32 = @intCast(codegen.errUnionErrorOffset(eu_pl_ty, zcu));
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].toOffset(eu_err_off, cg);
|
|
const err = try ops[0].load(eu_ty.errorUnionSet(zcu), .{}, cg);
|
|
try err.finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.errunion_payload_ptr_set => if (use_old) try cg.airErrUnionPayloadPtrSet(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
const eu_ty = cg.typeOf(ty_op.operand).childType(zcu);
|
|
const eu_err_ty = eu_ty.errorUnionSet(zcu);
|
|
const eu_pl_ty = eu_ty.errorUnionPayload(zcu);
|
|
const eu_err_off: i32 = @intCast(codegen.errUnionErrorOffset(eu_pl_ty, zcu));
|
|
const eu_pl_off: i32 = @intCast(codegen.errUnionPayloadOffset(eu_pl_ty, zcu));
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].toOffset(eu_err_off, cg);
|
|
var no_err = try cg.tempInit(eu_err_ty, .{ .immediate = 0 });
|
|
try ops[0].store(&no_err, .{}, cg);
|
|
try no_err.die(cg);
|
|
try ops[0].toOffset(eu_pl_off - eu_err_off, cg);
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.struct_field_ptr => if (use_old) try cg.airStructFieldPtr(inst) else {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const extra = cg.air.extraData(Air.StructField, ty_pl.payload).data;
|
|
var ops = try cg.tempsFromOperands(inst, .{extra.struct_operand});
|
|
try ops[0].toOffset(cg.fieldOffset(
|
|
cg.typeOf(extra.struct_operand),
|
|
ty_pl.ty.toType(),
|
|
extra.field_index,
|
|
), cg);
|
|
try ops[0].finish(inst, &.{extra.struct_operand}, &ops, cg);
|
|
},
|
|
.struct_field_ptr_index_0,
|
|
.struct_field_ptr_index_1,
|
|
.struct_field_ptr_index_2,
|
|
.struct_field_ptr_index_3,
|
|
=> |air_tag| if (use_old) try cg.airStructFieldPtrIndex(inst, switch (air_tag) {
|
|
else => unreachable,
|
|
.struct_field_ptr_index_0 => 0,
|
|
.struct_field_ptr_index_1 => 1,
|
|
.struct_field_ptr_index_2 => 2,
|
|
.struct_field_ptr_index_3 => 3,
|
|
}) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].toOffset(cg.fieldOffset(
|
|
cg.typeOf(ty_op.operand),
|
|
ty_op.ty.toType(),
|
|
switch (air_tag) {
|
|
else => unreachable,
|
|
.struct_field_ptr_index_0 => 0,
|
|
.struct_field_ptr_index_1 => 1,
|
|
.struct_field_ptr_index_2 => 2,
|
|
.struct_field_ptr_index_3 => 3,
|
|
},
|
|
), cg);
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.struct_field_val => if (use_old) try cg.airStructFieldVal(inst) else fallback: {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const extra = cg.air.extraData(Air.StructField, ty_pl.payload).data;
|
|
const agg_ty = cg.typeOf(extra.struct_operand);
|
|
const field_ty = ty_pl.ty.toType();
|
|
const field_off: u31 = switch (agg_ty.containerLayout(zcu)) {
|
|
.auto, .@"extern" => @intCast(agg_ty.structFieldOffset(extra.field_index, zcu)),
|
|
.@"packed" => break :fallback try cg.airStructFieldVal(inst),
|
|
};
|
|
var ops = try cg.tempsFromOperands(inst, .{extra.struct_operand});
|
|
// hack around Sema OPV bugs
|
|
var res = if (field_ty.hasRuntimeBitsIgnoreComptime(zcu))
|
|
try ops[0].read(field_ty, .{ .disp = field_off }, cg)
|
|
else
|
|
try cg.tempInit(field_ty, .none);
|
|
try res.finish(inst, &.{extra.struct_operand}, &ops, cg);
|
|
},
|
|
.set_union_tag => if (use_old) try cg.airSetUnionTag(inst) else {
|
|
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
|
|
const union_ty = cg.typeOf(bin_op.lhs).childType(zcu);
|
|
const union_layout = union_ty.unionGetLayout(zcu);
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
// hack around Sema OPV bugs
|
|
if (union_layout.tag_size > 0) try ops[0].store(&ops[1], .{
|
|
.disp = @intCast(union_layout.tagOffset()),
|
|
}, cg);
|
|
const res = try cg.tempInit(.void, .none);
|
|
try res.finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.get_union_tag => if (use_old) try cg.airGetUnionTag(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
const union_ty = cg.typeOf(ty_op.operand);
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
const union_layout = union_ty.unionGetLayout(zcu);
|
|
assert(union_layout.tag_size > 0);
|
|
const res = try ops[0].read(ty_op.ty.toType(), .{
|
|
.disp = @intCast(union_layout.tagOffset()),
|
|
}, cg);
|
|
try res.finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.slice => if (use_old) try cg.airSlice(inst) else {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
try ops[0].toPair(&ops[1], cg);
|
|
try ops[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.slice_len => if (use_old) try cg.airSliceLen(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].toSliceLen(cg);
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.slice_ptr => if (use_old) try cg.airSlicePtr(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].toSlicePtr(cg);
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.ptr_slice_len_ptr => if (use_old) try cg.airPtrSliceLenPtr(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].toOffset(8, cg);
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.ptr_slice_ptr_ptr => if (use_old) try cg.airPtrSlicePtrPtr(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].toOffset(0, cg);
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.slice_elem_val, .ptr_elem_val => |air_tag| if (use_old) switch (air_tag) {
|
|
else => unreachable,
|
|
.slice_elem_val => try cg.airSliceElemVal(inst),
|
|
.ptr_elem_val => try cg.airPtrElemVal(inst),
|
|
} else {
|
|
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
|
|
const res_ty = cg.typeOf(bin_op.lhs).elemType2(zcu);
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
try ops[0].toSlicePtr(cg);
|
|
var res: [1]Temp = undefined;
|
|
if (res_ty.hasRuntimeBitsIgnoreComptime(zcu)) cg.select(&res, &.{res_ty}, &ops, comptime &.{ .{
|
|
.dst_constraints = .{.{ .int = .byte }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .simm32 } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .leaa(.byte, .src0, .add_src0_elem_size_times_src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .int = .byte }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .leai(.byte, .src0, .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .int = .word }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .simm32 } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .leaa(.word, .src0, .add_src0_elem_size_times_src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .int = .word }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .movzx, .dst0d, .leasi(.word, .src0, .@"2", .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .int = .dword }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .simm32 } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0d, .leaa(.dword, .src0, .add_src0_elem_size_times_src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .int = .dword }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0d, .leasi(.dword, .src0, .@"4", .src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.dst_constraints = .{.{ .int = .qword }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .simm32 } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0q, .leaa(.qword, .src0, .add_src0_elem_size_times_src1), ._, ._ },
|
|
} },
|
|
}, .{
|
|
.required_features = .{ .@"64bit", null, null, null },
|
|
.dst_constraints = .{.{ .int = .qword }},
|
|
.patterns = &.{
|
|
.{ .src = .{ .to_gpr, .to_gpr } },
|
|
},
|
|
.dst_temps = .{.{ .rc = .general_purpose }},
|
|
.each = .{ .once = &.{
|
|
.{ ._, ._, .mov, .dst0q, .leasi(.qword, .src0, .@"8", .src1), ._, ._ },
|
|
} },
|
|
} }) catch |err| switch (err) {
|
|
error.SelectFailed => {
|
|
const elem_size = res_ty.abiSize(zcu);
|
|
while (true) for (&ops) |*op| {
|
|
if (try op.toRegClass(true, .general_purpose, cg)) break;
|
|
} else break;
|
|
const lhs_reg = ops[0].unwrap(cg).temp.tracking(cg).short.register.to64();
|
|
const rhs_reg = ops[1].unwrap(cg).temp.tracking(cg).short.register.to64();
|
|
if (!std.math.isPowerOfTwo(elem_size)) {
|
|
try cg.spillEflagsIfOccupied();
|
|
try cg.asmRegisterRegisterImmediate(
|
|
.{ .i_, .mul },
|
|
rhs_reg,
|
|
rhs_reg,
|
|
.u(elem_size),
|
|
);
|
|
try cg.asmRegisterMemory(.{ ._, .lea }, lhs_reg, .{
|
|
.base = .{ .reg = lhs_reg },
|
|
.mod = .{ .rm = .{ .size = .qword, .index = rhs_reg } },
|
|
});
|
|
} else if (elem_size > 8) {
|
|
try cg.spillEflagsIfOccupied();
|
|
try cg.asmRegisterImmediate(
|
|
.{ ._l, .sh },
|
|
rhs_reg,
|
|
.u(std.math.log2_int(u64, elem_size)),
|
|
);
|
|
try cg.asmRegisterMemory(.{ ._, .lea }, lhs_reg, .{
|
|
.base = .{ .reg = lhs_reg },
|
|
.mod = .{ .rm = .{ .size = .qword, .index = rhs_reg } },
|
|
});
|
|
} else try cg.asmRegisterMemory(.{ ._, .lea }, lhs_reg, .{
|
|
.base = .{ .reg = lhs_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = rhs_reg,
|
|
.scale = .fromFactor(@intCast(elem_size)),
|
|
} },
|
|
});
|
|
res[0] = try ops[0].load(res_ty, .{}, cg);
|
|
},
|
|
else => |e| return e,
|
|
} else {
|
|
// hack around Sema OPV bugs
|
|
res[0] = try cg.tempInit(res_ty, .none);
|
|
}
|
|
try res[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.slice_elem_ptr, .ptr_elem_ptr => |air_tag| if (use_old) switch (air_tag) {
|
|
else => unreachable,
|
|
.slice_elem_ptr => try cg.airSliceElemPtr(inst),
|
|
.ptr_elem_ptr => try cg.airPtrElemPtr(inst),
|
|
} else {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
|
|
try ops[0].toSlicePtr(cg);
|
|
const dst_ty = ty_pl.ty.toType();
|
|
if (dst_ty.ptrInfo(zcu).flags.vector_index == .none) zero_offset: {
|
|
const elem_size = dst_ty.childType(zcu).abiSize(zcu);
|
|
// hack around Sema OPV bugs
|
|
if (elem_size == 0) break :zero_offset;
|
|
while (true) for (&ops) |*op| {
|
|
if (try op.toRegClass(true, .general_purpose, cg)) break;
|
|
} else break;
|
|
const lhs_reg = ops[0].unwrap(cg).temp.tracking(cg).short.register.to64();
|
|
const rhs_reg = ops[1].unwrap(cg).temp.tracking(cg).short.register.to64();
|
|
if (!std.math.isPowerOfTwo(elem_size)) {
|
|
try cg.spillEflagsIfOccupied();
|
|
try cg.asmRegisterRegisterImmediate(
|
|
.{ .i_, .mul },
|
|
rhs_reg,
|
|
rhs_reg,
|
|
.u(elem_size),
|
|
);
|
|
try cg.asmRegisterMemory(.{ ._, .lea }, lhs_reg, .{
|
|
.base = .{ .reg = lhs_reg },
|
|
.mod = .{ .rm = .{ .size = .qword, .index = rhs_reg } },
|
|
});
|
|
} else if (elem_size > 8) {
|
|
try cg.spillEflagsIfOccupied();
|
|
try cg.asmRegisterImmediate(
|
|
.{ ._l, .sh },
|
|
rhs_reg,
|
|
.u(std.math.log2_int(u64, elem_size)),
|
|
);
|
|
try cg.asmRegisterMemory(.{ ._, .lea }, lhs_reg, .{
|
|
.base = .{ .reg = lhs_reg },
|
|
.mod = .{ .rm = .{ .size = .qword, .index = rhs_reg } },
|
|
});
|
|
} else try cg.asmRegisterMemory(.{ ._, .lea }, lhs_reg, .{
|
|
.base = .{ .reg = lhs_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = rhs_reg,
|
|
.scale = .fromFactor(@intCast(elem_size)),
|
|
} },
|
|
});
|
|
}
|
|
try ops[0].finish(inst, &.{ bin_op.lhs, bin_op.rhs }, &ops, cg);
|
|
},
|
|
.array_to_slice => if (use_old) try cg.airArrayToSlice(inst) else {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
var len = try cg.tempInit(.usize, .{
|
|
.immediate = cg.typeOf(ty_op.operand).childType(zcu).arrayLen(zcu),
|
|
});
|
|
try ops[0].toPair(&len, cg);
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
.error_set_has_value => return cg.fail("TODO implement error_set_has_value", .{}),
|
|
.union_init => if (use_old) try cg.airUnionInit(inst) else {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const extra = cg.air.extraData(Air.UnionInit, ty_pl.payload).data;
|
|
const union_ty = ty_pl.ty.toType();
|
|
var ops = try cg.tempsFromOperands(inst, .{extra.init});
|
|
var res = try cg.tempAllocMem(union_ty);
|
|
const union_layout = union_ty.unionGetLayout(zcu);
|
|
if (union_layout.tag_size > 0) {
|
|
var tag_temp = try cg.tempFromValue(try pt.enumValueFieldIndex(
|
|
union_ty.unionTagTypeSafety(zcu).?,
|
|
extra.field_index,
|
|
));
|
|
try res.write(&tag_temp, .{
|
|
.disp = @intCast(union_layout.tagOffset()),
|
|
}, cg);
|
|
try tag_temp.die(cg);
|
|
}
|
|
try res.write(&ops[0], .{
|
|
.disp = @intCast(union_layout.payloadOffset()),
|
|
}, cg);
|
|
try res.finish(inst, &.{extra.init}, &ops, cg);
|
|
},
|
|
.field_parent_ptr => if (use_old) try cg.airFieldParentPtr(inst) else {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const extra = cg.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
|
|
var ops = try cg.tempsFromOperands(inst, .{extra.field_ptr});
|
|
try ops[0].toOffset(-cg.fieldOffset(
|
|
ty_pl.ty.toType(),
|
|
cg.typeOf(extra.field_ptr),
|
|
extra.field_index,
|
|
), cg);
|
|
try ops[0].finish(inst, &.{extra.field_ptr}, &ops, cg);
|
|
},
|
|
|
|
.is_named_enum_value => return cg.fail("TODO implement is_named_enum_value", .{}),
|
|
|
|
.wasm_memory_size => unreachable,
|
|
.wasm_memory_grow => unreachable,
|
|
|
|
.err_return_trace => {
|
|
const ert: Temp = .{ .index = err_ret_trace_index };
|
|
try ert.finish(inst, &.{}, &.{}, cg);
|
|
},
|
|
.set_err_return_trace => {
|
|
const un_op = air_datas[@intFromEnum(inst)].un_op;
|
|
var ops = try cg.tempsFromOperands(inst, .{un_op});
|
|
switch (ops[0].unwrap(cg)) {
|
|
.ref => {
|
|
const result = try cg.allocRegOrMem(err_ret_trace_index, true);
|
|
try cg.genCopy(.usize, result, ops[0].tracking(cg).short, .{});
|
|
tracking_log.debug("{} => {} (birth)", .{ err_ret_trace_index, result });
|
|
cg.inst_tracking.putAssumeCapacityNoClobber(err_ret_trace_index, .init(result));
|
|
},
|
|
.temp => |temp_index| {
|
|
const temp_tracking = temp_index.tracking(cg);
|
|
tracking_log.debug("{} => {} (birth)", .{ err_ret_trace_index, temp_tracking.short });
|
|
cg.inst_tracking.putAssumeCapacityNoClobber(err_ret_trace_index, temp_tracking.*);
|
|
assert(cg.reuseTemp(err_ret_trace_index, temp_index.toIndex(), temp_tracking));
|
|
},
|
|
.err_ret_trace => unreachable,
|
|
}
|
|
},
|
|
|
|
.addrspace_cast => {
|
|
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
|
|
const ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
|
|
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
|
|
},
|
|
|
|
.save_err_return_trace_index => {
|
|
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
|
const agg_ty = ty_pl.ty.toType();
|
|
assert(agg_ty.containerLayout(zcu) != .@"packed");
|
|
var ert: Temp = .{ .index = err_ret_trace_index };
|
|
var res = try ert.load(.usize, .{ .disp = @intCast(agg_ty.structFieldOffset(ty_pl.payload, zcu)) }, cg);
|
|
try ert.die(cg);
|
|
try res.finish(inst, &.{}, &.{}, cg);
|
|
},
|
|
|
|
.vector_store_elem => return cg.fail("TODO implement vector_store_elem", .{}),
|
|
|
|
.c_va_arg => try cg.airVaArg(inst),
|
|
.c_va_copy => try cg.airVaCopy(inst),
|
|
.c_va_end => try cg.airVaEnd(inst),
|
|
.c_va_start => try cg.airVaStart(inst),
|
|
|
|
.work_item_id => unreachable,
|
|
.work_group_size => unreachable,
|
|
.work_group_id => unreachable,
|
|
}
|
|
cg.resetTemps();
|
|
cg.checkInvariantsAfterAirInst();
|
|
}
|
|
verbose_tracking_log.debug("{}", .{cg.fmtTracking()});
|
|
}
|
|
|
|
fn genLazy(self: *CodeGen, lazy_sym: link.File.LazySymbol) InnerError!void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu)) {
|
|
.@"enum" => {
|
|
const enum_ty: Type = .fromInterned(lazy_sym.ty);
|
|
wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
|
|
|
|
const param_regs = abi.getCAbiIntParamRegs(.auto);
|
|
const param_locks = self.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*);
|
|
defer for (param_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const ret_reg = param_regs[0];
|
|
const enum_mcv = MCValue{ .register = param_regs[1] };
|
|
|
|
const epilogue_relocs = try self.gpa.alloc(Mir.Inst.Index, enum_ty.enumFieldCount(zcu));
|
|
defer self.gpa.free(epilogue_relocs);
|
|
|
|
const data_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const data_lock = self.register_manager.lockRegAssumeUnused(data_reg);
|
|
defer self.register_manager.unlockReg(data_lock);
|
|
try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty.toIntern() });
|
|
|
|
var data_off: i32 = 0;
|
|
const tag_names = enum_ty.enumFields(zcu);
|
|
for (epilogue_relocs, 0..) |*epilogue_reloc, tag_index| {
|
|
const tag_name_len = tag_names.get(ip)[tag_index].length(ip);
|
|
const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
|
|
const tag_mcv = try self.genTypedValue(tag_val);
|
|
try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv);
|
|
const skip_reloc = try self.asmJccReloc(.ne, undefined);
|
|
|
|
try self.genSetMem(
|
|
.{ .reg = ret_reg },
|
|
0,
|
|
.usize,
|
|
.{ .register_offset = .{ .reg = data_reg, .off = data_off } },
|
|
.{},
|
|
);
|
|
try self.genSetMem(.{ .reg = ret_reg }, 8, .usize, .{ .immediate = tag_name_len }, .{});
|
|
|
|
epilogue_reloc.* = try self.asmJmpReloc(undefined);
|
|
self.performReloc(skip_reloc);
|
|
|
|
data_off += @intCast(tag_name_len + 1);
|
|
}
|
|
|
|
try self.asmOpOnly(.{ ._2, .ud });
|
|
|
|
for (epilogue_relocs) |reloc| self.performReloc(reloc);
|
|
try self.asmOpOnly(.{ ._, .ret });
|
|
},
|
|
else => return self.fail(
|
|
"TODO implement {s} for {}",
|
|
.{ @tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt) },
|
|
),
|
|
}
|
|
}
|
|
|
|
fn getValue(self: *CodeGen, value: MCValue, inst: ?Air.Inst.Index) !void {
|
|
for (value.getRegs()) |reg| try self.register_manager.getReg(reg, inst);
|
|
switch (value) {
|
|
else => {},
|
|
.eflags, .register_overflow => self.eflags_inst = inst,
|
|
}
|
|
}
|
|
|
|
fn getValueIfFree(self: *CodeGen, value: MCValue, inst: ?Air.Inst.Index) void {
|
|
for (value.getRegs()) |reg| if (self.register_manager.isRegFree(reg))
|
|
self.register_manager.getRegAssumeFree(reg, inst);
|
|
}
|
|
|
|
fn freeReg(self: *CodeGen, reg: Register) !void {
|
|
self.register_manager.freeReg(reg);
|
|
if (reg.class() == .x87) try self.asmRegister(.{ .f_, .free }, reg);
|
|
}
|
|
|
|
fn freeValue(self: *CodeGen, value: MCValue) !void {
|
|
switch (value) {
|
|
.register => |reg| try self.freeReg(reg),
|
|
inline .register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
=> |regs| for (regs) |reg| try self.freeReg(reg),
|
|
.register_offset, .indirect => |reg_off| try self.freeReg(reg_off.reg),
|
|
.register_overflow => |reg_ov| {
|
|
try self.freeReg(reg_ov.reg);
|
|
self.eflags_inst = null;
|
|
},
|
|
.register_mask => |reg_mask| try self.freeReg(reg_mask.reg),
|
|
.eflags => self.eflags_inst = null,
|
|
else => {}, // TODO process stack allocation death
|
|
}
|
|
}
|
|
|
|
fn feed(self: *CodeGen, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) !void {
|
|
if (bt.feed()) if (operand.toIndex()) |inst| try self.processDeath(inst);
|
|
}
|
|
|
|
/// Asserts there is already capacity to insert into top branch inst_table.
|
|
fn processDeath(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
try self.inst_tracking.getPtr(inst).?.die(self, inst);
|
|
}
|
|
|
|
fn finishAirResult(self: *CodeGen, inst: Air.Inst.Index, result: MCValue) void {
|
|
if (self.liveness.isUnused(inst) and self.air.instructions.items(.tag)[@intFromEnum(inst)] != .arg) switch (result) {
|
|
.none, .dead, .unreach => {},
|
|
else => unreachable, // Why didn't the result die?
|
|
} else {
|
|
tracking_log.debug("{} => {} (birth)", .{ inst, result });
|
|
self.inst_tracking.putAssumeCapacityNoClobber(inst, .init(result));
|
|
// In some cases, an operand may be reused as the result.
|
|
// If that operand died and was a register, it was freed by
|
|
// processDeath, so we have to "re-allocate" the register.
|
|
self.getValueIfFree(result, inst);
|
|
}
|
|
}
|
|
|
|
fn finishAir(
|
|
self: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
result: MCValue,
|
|
operands: [Liveness.bpi - 1]Air.Inst.Ref,
|
|
) !void {
|
|
const tomb_bits = self.liveness.getTombBits(inst);
|
|
for (0.., operands) |op_index, op| {
|
|
if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
|
|
if (self.reused_operands.isSet(op_index)) continue;
|
|
try self.processDeath(op.toIndexAllowNone() orelse continue);
|
|
}
|
|
self.finishAirResult(inst, result);
|
|
}
|
|
|
|
const FrameLayout = struct {
|
|
stack_mask: u32,
|
|
stack_adjust: u32,
|
|
save_reg_list: Mir.RegisterList,
|
|
};
|
|
|
|
fn setFrameLoc(
|
|
self: *CodeGen,
|
|
frame_index: FrameIndex,
|
|
base: Register,
|
|
offset: *i32,
|
|
comptime aligned: bool,
|
|
) void {
|
|
const frame_i = @intFromEnum(frame_index);
|
|
if (aligned) {
|
|
const alignment = self.frame_allocs.items(.abi_align)[frame_i];
|
|
offset.* = @intCast(alignment.forward(@intCast(offset.*)));
|
|
}
|
|
self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* });
|
|
offset.* += self.frame_allocs.items(.abi_size)[frame_i];
|
|
}
|
|
|
|
fn computeFrameLayout(self: *CodeGen, cc: std.builtin.CallingConvention.Tag) !FrameLayout {
|
|
const frame_allocs_len = self.frame_allocs.len;
|
|
try self.frame_locs.resize(self.gpa, frame_allocs_len);
|
|
const stack_frame_order = try self.gpa.alloc(FrameIndex, frame_allocs_len - FrameIndex.named_count);
|
|
defer self.gpa.free(stack_frame_order);
|
|
|
|
const frame_size = self.frame_allocs.items(.abi_size);
|
|
const frame_align = self.frame_allocs.items(.abi_align);
|
|
const frame_offset = self.frame_locs.items(.disp);
|
|
|
|
for (stack_frame_order, FrameIndex.named_count..) |*frame_order, frame_index|
|
|
frame_order.* = @enumFromInt(frame_index);
|
|
{
|
|
const SortContext = struct {
|
|
frame_align: @TypeOf(frame_align),
|
|
pub fn lessThan(context: @This(), lhs: FrameIndex, rhs: FrameIndex) bool {
|
|
return context.frame_align[@intFromEnum(lhs)].compare(.gt, context.frame_align[@intFromEnum(rhs)]);
|
|
}
|
|
};
|
|
const sort_context = SortContext{ .frame_align = frame_align };
|
|
std.mem.sort(FrameIndex, stack_frame_order, sort_context, SortContext.lessThan);
|
|
}
|
|
|
|
const call_frame_align = frame_align[@intFromEnum(FrameIndex.call_frame)];
|
|
const stack_frame_align = frame_align[@intFromEnum(FrameIndex.stack_frame)];
|
|
const args_frame_align = frame_align[@intFromEnum(FrameIndex.args_frame)];
|
|
const needed_align = call_frame_align.max(stack_frame_align);
|
|
const need_align_stack = needed_align.compare(.gt, args_frame_align);
|
|
|
|
// Create list of registers to save in the prologue.
|
|
// TODO handle register classes
|
|
var save_reg_list: Mir.RegisterList = .empty;
|
|
const callee_preserved_regs = abi.getCalleePreservedRegs(cc);
|
|
for (callee_preserved_regs) |reg| {
|
|
if (self.register_manager.isRegAllocated(reg)) {
|
|
save_reg_list.push(callee_preserved_regs, reg);
|
|
}
|
|
}
|
|
|
|
var rbp_offset: i32 = 0;
|
|
self.setFrameLoc(.base_ptr, .rbp, &rbp_offset, false);
|
|
self.setFrameLoc(.ret_addr, .rbp, &rbp_offset, false);
|
|
self.setFrameLoc(.args_frame, .rbp, &rbp_offset, false);
|
|
const stack_frame_align_offset = if (need_align_stack)
|
|
0
|
|
else
|
|
save_reg_list.size(self.target) + frame_offset[@intFromEnum(FrameIndex.args_frame)];
|
|
|
|
var rsp_offset: i32 = 0;
|
|
self.setFrameLoc(.call_frame, .rsp, &rsp_offset, true);
|
|
self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true);
|
|
for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true);
|
|
rsp_offset += stack_frame_align_offset;
|
|
rsp_offset = @intCast(needed_align.forward(@intCast(rsp_offset)));
|
|
rsp_offset -= stack_frame_align_offset;
|
|
frame_size[@intFromEnum(FrameIndex.call_frame)] =
|
|
@intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]);
|
|
|
|
return .{
|
|
.stack_mask = @as(u32, std.math.maxInt(u32)) << @intCast(if (need_align_stack) @intFromEnum(needed_align) else 0),
|
|
.stack_adjust = @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)]),
|
|
.save_reg_list = save_reg_list,
|
|
};
|
|
}
|
|
|
|
fn getFrameAddrAlignment(self: *CodeGen, frame_addr: bits.FrameAddr) InternPool.Alignment {
|
|
const alloc_align = self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align;
|
|
return @enumFromInt(@min(@intFromEnum(alloc_align), @ctz(frame_addr.off)));
|
|
}
|
|
|
|
fn getFrameAddrSize(self: *CodeGen, frame_addr: bits.FrameAddr) u32 {
|
|
return self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_size - @as(u31, @intCast(frame_addr.off));
|
|
}
|
|
|
|
fn allocFrameIndex(self: *CodeGen, alloc: FrameAlloc) !FrameIndex {
|
|
const frame_allocs_slice = self.frame_allocs.slice();
|
|
const frame_size = frame_allocs_slice.items(.abi_size);
|
|
const frame_align = frame_allocs_slice.items(.abi_align);
|
|
|
|
const stack_frame_align = &frame_align[@intFromEnum(FrameIndex.stack_frame)];
|
|
stack_frame_align.* = stack_frame_align.max(alloc.abi_align);
|
|
|
|
for (self.free_frame_indices.keys(), 0..) |frame_index, free_i| {
|
|
const abi_size = frame_size[@intFromEnum(frame_index)];
|
|
if (abi_size != alloc.abi_size) continue;
|
|
const abi_align = &frame_align[@intFromEnum(frame_index)];
|
|
abi_align.* = abi_align.max(alloc.abi_align);
|
|
|
|
_ = self.free_frame_indices.swapRemoveAt(free_i);
|
|
return frame_index;
|
|
}
|
|
const frame_index: FrameIndex = @enumFromInt(self.frame_allocs.len);
|
|
try self.frame_allocs.append(self.gpa, alloc);
|
|
return frame_index;
|
|
}
|
|
|
|
/// Use a pointer instruction as the basis for allocating stack memory.
|
|
fn allocMemPtr(self: *CodeGen, inst: Air.Inst.Index) !FrameIndex {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ptr_ty = self.typeOfIndex(inst);
|
|
const val_ty = ptr_ty.childType(zcu);
|
|
return self.allocFrameIndex(.init(.{
|
|
.size = std.math.cast(u32, val_ty.abiSize(zcu)) orelse {
|
|
return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(pt)});
|
|
},
|
|
.alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"),
|
|
}));
|
|
}
|
|
|
|
fn allocRegOrMem(self: *CodeGen, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
|
return self.allocRegOrMemAdvanced(self.typeOfIndex(inst), inst, reg_ok);
|
|
}
|
|
|
|
fn allocTempRegOrMem(self: *CodeGen, elem_ty: Type, reg_ok: bool) !MCValue {
|
|
return self.allocRegOrMemAdvanced(elem_ty, null, reg_ok);
|
|
}
|
|
|
|
fn allocRegOrMemAdvanced(self: *CodeGen, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const abi_size = std.math.cast(u32, ty.abiSize(zcu)) orelse {
|
|
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
|
|
};
|
|
|
|
if (reg_ok) need_mem: {
|
|
if (std.math.isPowerOfTwo(abi_size) and abi_size <= @as(u32, switch (ty.zigTypeTag(zcu)) {
|
|
.float => switch (ty.floatBits(self.target.*)) {
|
|
16, 32, 64, 128 => 16,
|
|
80 => break :need_mem,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (ty.childType(zcu).floatBits(self.target.*)) {
|
|
16, 32, 64, 128 => self.vectorSize(.float),
|
|
80 => break :need_mem,
|
|
else => unreachable,
|
|
},
|
|
else => self.vectorSize(.int),
|
|
},
|
|
else => 8,
|
|
})) {
|
|
if (self.register_manager.tryAllocReg(inst, self.regSetForType(ty))) |reg| {
|
|
return MCValue{ .register = registerAlias(reg, abi_size) };
|
|
}
|
|
}
|
|
}
|
|
|
|
const frame_index = try self.allocFrameIndex(.initSpill(ty, zcu));
|
|
return .{ .load_frame = .{ .index = frame_index } };
|
|
}
|
|
|
|
fn regClassForType(self: *CodeGen, ty: Type) Register.Class {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
return switch (ty.zigTypeTag(zcu)) {
|
|
.float => switch (ty.floatBits(self.target.*)) {
|
|
80 => .x87,
|
|
else => .sse,
|
|
},
|
|
.vector => switch (ty.childType(zcu).toIntern()) {
|
|
.bool_type => .general_purpose,
|
|
else => .sse,
|
|
},
|
|
else => .general_purpose,
|
|
};
|
|
}
|
|
|
|
fn regSetForRegClass(rc: Register.Class) RegisterManager.RegisterBitSet {
|
|
return switch (rc) {
|
|
.general_purpose => abi.RegisterClass.gp,
|
|
.segment, .ip, .cr, .dr => unreachable,
|
|
.x87 => abi.RegisterClass.x87,
|
|
.mmx => @panic("TODO"),
|
|
.sse => abi.RegisterClass.sse,
|
|
};
|
|
}
|
|
|
|
fn regSetForType(self: *CodeGen, ty: Type) RegisterManager.RegisterBitSet {
|
|
return regSetForRegClass(self.regClassForType(ty));
|
|
}
|
|
|
|
fn vectorSize(cg: *CodeGen, kind: enum { int, float }) u6 {
|
|
return if (cg.hasFeature(switch (kind) {
|
|
.int => .avx2,
|
|
.float => .avx,
|
|
})) 32 else if (cg.hasFeature(.sse)) 16 else 8;
|
|
}
|
|
|
|
fn limbType(cg: *CodeGen, ty: Type) Type {
|
|
const pt = cg.pt;
|
|
const zcu = pt.zcu;
|
|
const vector_size = cg.vectorSize(if (ty.isRuntimeFloat()) .float else .int);
|
|
const scalar_ty, const scalar_size = scalar: {
|
|
const scalar_ty = ty.scalarType(zcu);
|
|
const scalar_size = scalar_ty.abiSize(zcu);
|
|
if (scalar_size <= vector_size) break :scalar .{ scalar_ty, scalar_size };
|
|
};
|
|
pt.vectorType(.{
|
|
.len = @divExact(vector_size, scalar_size),
|
|
.child = scalar_ty.toIntern(),
|
|
});
|
|
}
|
|
|
|
const State = struct {
|
|
registers: RegisterManager.TrackedRegisters,
|
|
reg_tracking: [RegisterManager.RegisterBitSet.bit_length]InstTracking,
|
|
free_registers: RegisterManager.RegisterBitSet,
|
|
inst_tracking_len: u32,
|
|
scope_generation: u32,
|
|
};
|
|
|
|
fn initRetroactiveState(self: *CodeGen) State {
|
|
var state: State = undefined;
|
|
state.inst_tracking_len = @intCast(self.inst_tracking.count());
|
|
state.scope_generation = self.scope_generation;
|
|
return state;
|
|
}
|
|
|
|
fn saveRetroactiveState(self: *CodeGen, state: *State) !void {
|
|
try self.spillEflagsIfOccupied();
|
|
const free_registers = self.register_manager.free_registers;
|
|
var it = free_registers.iterator(.{ .kind = .unset });
|
|
while (it.next()) |index| {
|
|
const tracked_inst = self.register_manager.registers[index];
|
|
state.registers[index] = tracked_inst;
|
|
state.reg_tracking[index] = self.inst_tracking.get(tracked_inst).?;
|
|
}
|
|
state.free_registers = free_registers;
|
|
}
|
|
|
|
fn saveState(self: *CodeGen) !State {
|
|
var state = self.initRetroactiveState();
|
|
try self.saveRetroactiveState(&state);
|
|
return state;
|
|
}
|
|
|
|
fn restoreState(self: *CodeGen, state: State, deaths: []const Air.Inst.Index, comptime opts: struct {
|
|
emit_instructions: bool,
|
|
update_tracking: bool,
|
|
resurrect: bool,
|
|
close_scope: bool,
|
|
}) !void {
|
|
if (opts.close_scope) {
|
|
for (
|
|
self.inst_tracking.keys()[state.inst_tracking_len..],
|
|
self.inst_tracking.values()[state.inst_tracking_len..],
|
|
) |inst, *tracking| try tracking.die(self, inst);
|
|
self.inst_tracking.shrinkRetainingCapacity(state.inst_tracking_len);
|
|
}
|
|
|
|
if (opts.resurrect) for (
|
|
self.inst_tracking.keys()[Temp.Index.max..state.inst_tracking_len],
|
|
self.inst_tracking.values()[Temp.Index.max..state.inst_tracking_len],
|
|
) |inst, *tracking| try tracking.resurrect(self, inst, state.scope_generation);
|
|
for (deaths) |death| try self.processDeath(death);
|
|
|
|
const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).array.len]RegisterLock;
|
|
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
|
|
if (opts.update_tracking)
|
|
{} else std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
|
|
|
|
var reg_locks = if (opts.update_tracking) {} else try std.ArrayList(RegisterLock).initCapacity(
|
|
stack.get(),
|
|
@typeInfo(ExpectedContents).array.len,
|
|
);
|
|
defer if (!opts.update_tracking) {
|
|
for (reg_locks.items) |lock| self.register_manager.unlockReg(lock);
|
|
reg_locks.deinit();
|
|
};
|
|
|
|
for (
|
|
0..,
|
|
self.register_manager.registers,
|
|
state.registers,
|
|
state.reg_tracking,
|
|
) |reg_i, current_slot, target_slot, reg_tracking| {
|
|
const reg_index: RegisterManager.TrackedIndex = @intCast(reg_i);
|
|
const current_maybe_inst = if (self.register_manager.isRegIndexFree(reg_index)) null else current_slot;
|
|
const target_maybe_inst = if (state.free_registers.isSet(reg_index)) null else target_slot;
|
|
if (std.debug.runtime_safety) if (target_maybe_inst) |target_inst|
|
|
assert(self.inst_tracking.getIndex(target_inst).? < state.inst_tracking_len);
|
|
if (opts.emit_instructions and current_maybe_inst != target_maybe_inst) {
|
|
if (current_maybe_inst) |current_inst|
|
|
try self.inst_tracking.getPtr(current_inst).?.spill(self, current_inst);
|
|
if (target_maybe_inst) |target_inst|
|
|
try self.inst_tracking.getPtr(target_inst).?.materialize(self, target_inst, reg_tracking);
|
|
}
|
|
if (opts.update_tracking) {
|
|
if (current_maybe_inst) |current_inst| {
|
|
try self.inst_tracking.getPtr(current_inst).?.trackSpill(self, current_inst);
|
|
self.register_manager.freeRegIndex(reg_index);
|
|
}
|
|
if (target_maybe_inst) |target_inst| {
|
|
self.register_manager.getRegIndexAssumeFree(reg_index, target_inst);
|
|
self.inst_tracking.getPtr(target_inst).?.trackMaterialize(target_inst, reg_tracking);
|
|
}
|
|
} else if (target_maybe_inst) |_|
|
|
try reg_locks.append(self.register_manager.lockRegIndexAssumeUnused(reg_index));
|
|
}
|
|
if (opts.emit_instructions) if (self.eflags_inst) |inst|
|
|
try self.inst_tracking.getPtr(inst).?.spill(self, inst);
|
|
if (opts.update_tracking) if (self.eflags_inst) |inst| {
|
|
self.eflags_inst = null;
|
|
try self.inst_tracking.getPtr(inst).?.trackSpill(self, inst);
|
|
};
|
|
|
|
if (opts.update_tracking and std.debug.runtime_safety) {
|
|
assert(self.eflags_inst == null);
|
|
assert(self.register_manager.free_registers.eql(state.free_registers));
|
|
var used_reg_it = state.free_registers.iterator(.{ .kind = .unset });
|
|
while (used_reg_it.next()) |index|
|
|
assert(self.register_manager.registers[index] == state.registers[index]);
|
|
}
|
|
}
|
|
|
|
pub fn spillInstruction(self: *CodeGen, reg: Register, inst: Air.Inst.Index) !void {
|
|
const tracking = self.inst_tracking.getPtr(inst) orelse return;
|
|
for (tracking.getRegs()) |tracked_reg| {
|
|
if (tracked_reg.id() == reg.id()) break;
|
|
} else unreachable; // spilled reg not tracked with spilled instruction
|
|
try tracking.spill(self, inst);
|
|
try tracking.trackSpill(self, inst);
|
|
}
|
|
|
|
pub fn spillEflagsIfOccupied(self: *CodeGen) !void {
|
|
if (self.eflags_inst) |inst| {
|
|
self.eflags_inst = null;
|
|
const tracking = self.inst_tracking.getPtr(inst).?;
|
|
assert(tracking.getCondition() != null);
|
|
try tracking.spill(self, inst);
|
|
try tracking.trackSpill(self, inst);
|
|
}
|
|
}
|
|
|
|
pub fn spillCallerPreservedRegs(self: *CodeGen, cc: std.builtin.CallingConvention.Tag, ignore_reg: Register) !void {
|
|
switch (cc) {
|
|
inline .auto, .x86_64_sysv, .x86_64_win => |tag| inline for (comptime abi.getCallerPreservedRegs(tag)) |reg|
|
|
if (reg != ignore_reg) try self.register_manager.getKnownReg(reg, null),
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
pub fn spillRegisters(self: *CodeGen, comptime registers: []const Register) !void {
|
|
inline for (registers) |reg| try self.register_manager.getKnownReg(reg, null);
|
|
}
|
|
|
|
/// Copies a value to a register without tracking the register. The register is not considered
|
|
/// allocated. A second call to `copyToTmpRegister` may return the same register.
|
|
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
|
fn copyToTmpRegister(self: *CodeGen, ty: Type, mcv: MCValue) !Register {
|
|
const reg = try self.register_manager.allocReg(null, self.regSetForType(ty));
|
|
try self.genSetReg(reg, ty, mcv, .{});
|
|
return reg;
|
|
}
|
|
|
|
/// Allocates a new register and copies `mcv` into it.
|
|
/// `reg_owner` is the instruction that gets associated with the register in the register table.
|
|
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
|
/// WARNING make sure that the allocated register matches the returned MCValue from an instruction!
|
|
fn copyToRegisterWithInstTracking(
|
|
self: *CodeGen,
|
|
reg_owner: Air.Inst.Index,
|
|
ty: Type,
|
|
mcv: MCValue,
|
|
) !MCValue {
|
|
const reg: Register = try self.register_manager.allocReg(reg_owner, self.regSetForType(ty));
|
|
try self.genSetReg(reg, ty, mcv, .{});
|
|
return MCValue{ .register = reg };
|
|
}
|
|
|
|
fn airAlloc(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const result = MCValue{ .lea_frame = .{ .index = try self.allocMemPtr(inst) } };
|
|
return self.finishAir(inst, result, .{ .none, .none, .none });
|
|
}
|
|
|
|
fn airRetPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const result: MCValue = switch (self.ret_mcv.long) {
|
|
else => unreachable,
|
|
.none => .{ .lea_frame = .{ .index = try self.allocMemPtr(inst) } },
|
|
.load_frame => .{ .register_offset = .{
|
|
.reg = (try self.copyToRegisterWithInstTracking(
|
|
inst,
|
|
self.typeOfIndex(inst),
|
|
self.ret_mcv.long,
|
|
)).register,
|
|
.off = self.ret_mcv.short.indirect.off,
|
|
} },
|
|
};
|
|
return self.finishAir(inst, result, .{ .none, .none, .none });
|
|
}
|
|
|
|
fn airFptrunc(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const dst_bits = dst_ty.floatBits(self.target.*);
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_bits = src_ty.floatBits(self.target.*);
|
|
|
|
const result = result: {
|
|
if (switch (dst_bits) {
|
|
16 => switch (src_bits) {
|
|
32 => !self.hasFeature(.f16c),
|
|
64, 80, 128 => true,
|
|
else => unreachable,
|
|
},
|
|
32 => switch (src_bits) {
|
|
64 => false,
|
|
80, 128 => true,
|
|
else => unreachable,
|
|
},
|
|
64 => switch (src_bits) {
|
|
80, 128 => true,
|
|
else => unreachable,
|
|
},
|
|
80 => switch (src_bits) {
|
|
128 => true,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}) {
|
|
var callee_buf: ["__trunc?f?f2".len]u8 = undefined;
|
|
break :result try self.genCall(.{ .lib = .{
|
|
.return_type = self.floatCompilerRtAbiType(dst_ty, src_ty).toIntern(),
|
|
.param_types = &.{self.floatCompilerRtAbiType(src_ty, dst_ty).toIntern()},
|
|
.callee = std.fmt.bufPrint(&callee_buf, "__trunc{c}f{c}f2", .{
|
|
floatCompilerRtAbiName(src_bits),
|
|
floatCompilerRtAbiName(dst_bits),
|
|
}) catch unreachable,
|
|
} }, &.{src_ty}, &.{.{ .air_ref = ty_op.operand }}, .{});
|
|
}
|
|
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
|
|
const dst_reg = dst_mcv.getReg().?.to128();
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (dst_bits == 16) {
|
|
assert(self.hasFeature(.f16c));
|
|
switch (src_bits) {
|
|
32 => {
|
|
const mat_src_reg = if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv);
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
mat_src_reg.to128(),
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
} else {
|
|
assert(src_bits == 64 and dst_bits == 32);
|
|
if (self.hasFeature(.avx)) if (src_mcv.isBase()) try self.asmRegisterRegisterMemory(
|
|
.{ .v_ss, .cvtsd2 },
|
|
dst_reg,
|
|
dst_reg,
|
|
try src_mcv.mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
.{ .v_ss, .cvtsd2 },
|
|
dst_reg,
|
|
dst_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
|
|
) else if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._ss, .cvtsd2 },
|
|
dst_reg,
|
|
try src_mcv.mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._ss, .cvtsd2 },
|
|
dst_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
|
|
);
|
|
}
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airFpext(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const dst_scalar_ty = dst_ty.scalarType(zcu);
|
|
const dst_bits = dst_scalar_ty.floatBits(self.target.*);
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_scalar_ty = src_ty.scalarType(zcu);
|
|
const src_bits = src_scalar_ty.floatBits(self.target.*);
|
|
|
|
const result = result: {
|
|
if (switch (src_bits) {
|
|
16 => switch (dst_bits) {
|
|
32, 64 => !self.hasFeature(.f16c),
|
|
80, 128 => true,
|
|
else => unreachable,
|
|
},
|
|
32 => switch (dst_bits) {
|
|
64 => false,
|
|
80, 128 => true,
|
|
else => unreachable,
|
|
},
|
|
64 => switch (dst_bits) {
|
|
80, 128 => true,
|
|
else => unreachable,
|
|
},
|
|
80 => switch (dst_bits) {
|
|
128 => true,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}) {
|
|
if (dst_ty.isVector(zcu)) break :result null;
|
|
var callee_buf: ["__extend?f?f2".len]u8 = undefined;
|
|
break :result try self.genCall(.{ .lib = .{
|
|
.return_type = self.floatCompilerRtAbiType(dst_scalar_ty, src_scalar_ty).toIntern(),
|
|
.param_types = &.{self.floatCompilerRtAbiType(src_scalar_ty, dst_scalar_ty).toIntern()},
|
|
.callee = std.fmt.bufPrint(&callee_buf, "__extend{c}f{c}f2", .{
|
|
floatCompilerRtAbiName(src_bits),
|
|
floatCompilerRtAbiName(dst_bits),
|
|
}) catch unreachable,
|
|
} }, &.{src_scalar_ty}, &.{.{ .air_ref = ty_op.operand }}, .{});
|
|
}
|
|
|
|
const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, @intCast(@max(dst_ty.abiSize(zcu), 16)));
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const vec_len = if (dst_ty.isVector(zcu)) dst_ty.vectorLen(zcu) else 1;
|
|
if (src_bits == 16) {
|
|
assert(self.hasFeature(.f16c));
|
|
const mat_src_reg = if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv);
|
|
try self.asmRegisterRegister(
|
|
.{ .v_ps, .cvtph2 },
|
|
dst_alias,
|
|
registerAlias(mat_src_reg, src_abi_size),
|
|
);
|
|
switch (dst_bits) {
|
|
32 => {},
|
|
64 => try self.asmRegisterRegisterRegister(
|
|
.{ .v_sd, .cvtss2 },
|
|
dst_alias,
|
|
dst_alias,
|
|
dst_alias,
|
|
),
|
|
else => unreachable,
|
|
}
|
|
} else {
|
|
assert(src_bits == 32 and dst_bits == 64);
|
|
if (self.hasFeature(.avx)) switch (vec_len) {
|
|
1 => if (src_mcv.isBase()) try self.asmRegisterRegisterMemory(
|
|
.{ .v_sd, .cvtss2 },
|
|
dst_alias,
|
|
dst_alias,
|
|
try src_mcv.mem(self, .{ .size = self.memSize(src_ty) }),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
.{ .v_sd, .cvtss2 },
|
|
dst_alias,
|
|
dst_alias,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv), src_abi_size),
|
|
),
|
|
2...4 => if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ .v_pd, .cvtps2 },
|
|
dst_alias,
|
|
try src_mcv.mem(self, .{ .size = self.memSize(src_ty) }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ .v_pd, .cvtps2 },
|
|
dst_alias,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv), src_abi_size),
|
|
),
|
|
else => break :result null,
|
|
} else if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
switch (vec_len) {
|
|
1 => .{ ._sd, .cvtss2 },
|
|
2 => .{ ._pd, .cvtps2 },
|
|
else => break :result null,
|
|
},
|
|
dst_alias,
|
|
try src_mcv.mem(self, .{ .size = self.memSize(src_ty) }),
|
|
) else try self.asmRegisterRegister(
|
|
switch (vec_len) {
|
|
1 => .{ ._sd, .cvtss2 },
|
|
2 => .{ ._pd, .cvtps2 },
|
|
else => break :result null,
|
|
},
|
|
dst_alias,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv), src_abi_size),
|
|
);
|
|
}
|
|
break :result dst_mcv;
|
|
} orelse return self.fail("TODO implement airFpext from {} to {}", .{
|
|
src_ty.fmt(pt), dst_ty.fmt(pt),
|
|
});
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airIntCast(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
|
|
const result = @as(?MCValue, result: {
|
|
const src_abi_size: u31 = @intCast(src_ty.abiSize(zcu));
|
|
const dst_abi_size: u31 = @intCast(dst_ty.abiSize(zcu));
|
|
|
|
const src_int_info = src_ty.intInfo(zcu);
|
|
const dst_int_info = dst_ty.intInfo(zcu);
|
|
const extend = switch (src_int_info.signedness) {
|
|
.signed => dst_int_info,
|
|
.unsigned => src_int_info,
|
|
}.signedness;
|
|
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
if (dst_ty.isVector(zcu)) {
|
|
const max_abi_size = @max(dst_abi_size, src_abi_size);
|
|
const has_avx = self.hasFeature(.avx);
|
|
|
|
const dst_elem_abi_size = dst_ty.childType(zcu).abiSize(zcu);
|
|
const src_elem_abi_size = src_ty.childType(zcu).abiSize(zcu);
|
|
switch (std.math.order(dst_elem_abi_size, src_elem_abi_size)) {
|
|
.lt => {
|
|
if (max_abi_size > self.vectorSize(.int)) break :result null;
|
|
const mir_tag: Mir.Inst.FixedTag = switch (dst_elem_abi_size) {
|
|
else => break :result null,
|
|
1 => switch (src_elem_abi_size) {
|
|
else => break :result null,
|
|
2 => switch (dst_int_info.signedness) {
|
|
.signed => if (has_avx) .{ .vp_b, .ackssw } else .{ .p_b, .ackssw },
|
|
.unsigned => if (has_avx) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw },
|
|
},
|
|
},
|
|
2 => switch (src_elem_abi_size) {
|
|
else => break :result null,
|
|
4 => switch (dst_int_info.signedness) {
|
|
.signed => if (has_avx) .{ .vp_w, .ackssd } else .{ .p_w, .ackssd },
|
|
.unsigned => if (has_avx)
|
|
.{ .vp_w, .ackusd }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_w, .ackusd }
|
|
else
|
|
break :result null,
|
|
},
|
|
},
|
|
};
|
|
|
|
const dst_mcv: MCValue = if (src_mcv.isRegister() and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else if (has_avx and src_mcv.isRegister())
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, src_ty, src_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, dst_abi_size);
|
|
|
|
if (has_avx) try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
dst_reg, src_abi_size),
|
|
dst_alias,
|
|
) else try self.asmRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
dst_alias,
|
|
);
|
|
break :result dst_mcv;
|
|
},
|
|
.eq => if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
break :result src_mcv
|
|
else {
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(dst_ty, dst_mcv, src_mcv, .{});
|
|
break :result dst_mcv;
|
|
},
|
|
.gt => if (self.hasFeature(.sse4_1)) {
|
|
if (max_abi_size > self.vectorSize(.int)) break :result null;
|
|
const mir_tag: Mir.Inst.FixedTag = .{ switch (dst_elem_abi_size) {
|
|
else => break :result null,
|
|
2 => if (has_avx) .vp_w else .p_w,
|
|
4 => if (has_avx) .vp_d else .p_d,
|
|
8 => if (has_avx) .vp_q else .p_q,
|
|
}, switch (src_elem_abi_size) {
|
|
else => break :result null,
|
|
1 => switch (extend) {
|
|
.signed => .movsxb,
|
|
.unsigned => .movzxb,
|
|
},
|
|
2 => switch (extend) {
|
|
.signed => .movsxw,
|
|
.unsigned => .movzxw,
|
|
},
|
|
4 => switch (extend) {
|
|
.signed => .movsxd,
|
|
.unsigned => .movzxd,
|
|
},
|
|
} };
|
|
|
|
const dst_mcv: MCValue = if (src_mcv.isRegister() and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) };
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, dst_abi_size);
|
|
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
mir_tag,
|
|
dst_alias,
|
|
try src_mcv.mem(self, .{ .size = self.memSize(src_ty) }),
|
|
) else try self.asmRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv), src_abi_size),
|
|
);
|
|
break :result dst_mcv;
|
|
} else {
|
|
const mir_tag: Mir.Inst.FixedTag = switch (dst_elem_abi_size) {
|
|
else => break :result null,
|
|
2 => switch (src_elem_abi_size) {
|
|
else => break :result null,
|
|
1 => .{ .p_, .unpcklbw },
|
|
},
|
|
4 => switch (src_elem_abi_size) {
|
|
else => break :result null,
|
|
2 => .{ .p_, .unpcklwd },
|
|
},
|
|
8 => switch (src_elem_abi_size) {
|
|
else => break :result null,
|
|
2 => .{ .p_, .unpckldq },
|
|
},
|
|
};
|
|
|
|
const dst_mcv: MCValue = if (src_mcv.isRegister() and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
|
|
const ext_reg = try self.register_manager.allocReg(null, abi.RegisterClass.sse);
|
|
const ext_alias = registerAlias(ext_reg, src_abi_size);
|
|
const ext_lock = self.register_manager.lockRegAssumeUnused(ext_reg);
|
|
defer self.register_manager.unlockReg(ext_lock);
|
|
|
|
try self.asmRegisterRegister(.{ .p_, .xor }, ext_alias, ext_alias);
|
|
switch (extend) {
|
|
.signed => try self.asmRegisterRegister(
|
|
.{ switch (src_elem_abi_size) {
|
|
else => unreachable,
|
|
1 => .p_b,
|
|
2 => .p_w,
|
|
4 => .p_d,
|
|
}, .cmpgt },
|
|
ext_alias,
|
|
registerAlias(dst_reg, src_abi_size),
|
|
),
|
|
.unsigned => {},
|
|
}
|
|
try self.asmRegisterRegister(
|
|
mir_tag,
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(ext_reg, dst_abi_size),
|
|
);
|
|
break :result dst_mcv;
|
|
},
|
|
}
|
|
@compileError("unreachable");
|
|
}
|
|
|
|
const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty;
|
|
|
|
const src_storage_bits: u16 = switch (src_mcv) {
|
|
.register, .register_offset => 64,
|
|
.register_pair => 128,
|
|
.load_frame => |frame_addr| @intCast(self.getFrameAddrSize(frame_addr) * 8),
|
|
else => src_int_info.bits,
|
|
};
|
|
|
|
const dst_mcv = if ((if (src_mcv.getReg()) |src_reg| src_reg.class() == .general_purpose else src_abi_size > 8) and
|
|
dst_int_info.bits <= src_storage_bits and
|
|
std.math.divCeil(u16, dst_int_info.bits, 64) catch unreachable ==
|
|
std.math.divCeil(u32, src_storage_bits, 64) catch unreachable and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: {
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(min_ty, dst_mcv, src_mcv, .{});
|
|
break :dst dst_mcv;
|
|
};
|
|
|
|
if (dst_int_info.bits <= src_int_info.bits) break :result if (dst_mcv.isRegister())
|
|
.{ .register = registerAlias(dst_mcv.getReg().?, dst_abi_size) }
|
|
else
|
|
dst_mcv;
|
|
|
|
if (dst_mcv.isRegister()) {
|
|
try self.truncateRegister(src_ty, dst_mcv.getReg().?);
|
|
break :result .{ .register = registerAlias(dst_mcv.getReg().?, dst_abi_size) };
|
|
}
|
|
|
|
const src_limbs_len = std.math.divCeil(u31, src_abi_size, 8) catch unreachable;
|
|
const dst_limbs_len = @divExact(dst_abi_size, 8);
|
|
|
|
const high_mcv: MCValue = if (dst_mcv.isBase())
|
|
dst_mcv.address().offset((src_limbs_len - 1) * 8).deref()
|
|
else
|
|
.{ .register = dst_mcv.register_pair[1] };
|
|
const high_reg = if (high_mcv.isRegister())
|
|
high_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(switch (src_int_info.signedness) {
|
|
.signed => .isize,
|
|
.unsigned => .usize,
|
|
}, high_mcv);
|
|
const high_lock = self.register_manager.lockRegAssumeUnused(high_reg);
|
|
defer self.register_manager.unlockReg(high_lock);
|
|
|
|
const high_bits = src_int_info.bits % 64;
|
|
if (high_bits > 0) {
|
|
try self.truncateRegister(src_ty, high_reg);
|
|
const high_ty: Type = if (dst_int_info.bits >= 64) .usize else dst_ty;
|
|
try self.genCopy(high_ty, high_mcv, .{ .register = high_reg }, .{});
|
|
}
|
|
|
|
if (dst_limbs_len > src_limbs_len) try self.genInlineMemset(
|
|
dst_mcv.address().offset(src_limbs_len * 8),
|
|
switch (extend) {
|
|
.signed => extend: {
|
|
const extend_mcv = MCValue{ .register = high_reg };
|
|
try self.genShiftBinOpMir(.{ ._r, .sa }, .isize, extend_mcv, .u8, .{ .immediate = 63 });
|
|
break :extend extend_mcv;
|
|
},
|
|
.unsigned => .{ .immediate = 0 },
|
|
},
|
|
.{ .immediate = (dst_limbs_len - src_limbs_len) * 8 },
|
|
.{},
|
|
);
|
|
|
|
break :result dst_mcv;
|
|
}) orelse return self.fail("TODO implement airIntCast from {} to {}", .{
|
|
src_ty.fmt(pt), dst_ty.fmt(pt),
|
|
});
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airTrunc(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
|
|
|
|
const result = result: {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const src_lock =
|
|
if (src_mcv.getReg()) |reg| self.register_manager.lockRegAssumeUnused(reg) else null;
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv = if (src_mcv.isRegister() and src_mcv.getReg().?.class() == self.regClassForType(dst_ty) and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else if (dst_abi_size <= 8)
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv)
|
|
else if (dst_abi_size <= 16 and !dst_ty.isVector(zcu)) dst: {
|
|
const dst_regs =
|
|
try self.register_manager.allocRegs(2, .{ inst, inst }, abi.RegisterClass.gp);
|
|
const dst_mcv: MCValue = .{ .register_pair = dst_regs };
|
|
const dst_locks = self.register_manager.lockRegsAssumeUnused(2, dst_regs);
|
|
defer for (dst_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genCopy(dst_ty, dst_mcv, src_mcv, .{});
|
|
break :dst dst_mcv;
|
|
} else dst: {
|
|
const dst_mcv = try self.allocRegOrMemAdvanced(src_ty, inst, true);
|
|
try self.genCopy(src_ty, dst_mcv, src_mcv, .{});
|
|
break :dst dst_mcv;
|
|
};
|
|
|
|
if (dst_ty.zigTypeTag(zcu) == .vector) {
|
|
assert(src_ty.zigTypeTag(zcu) == .vector and dst_ty.vectorLen(zcu) == src_ty.vectorLen(zcu));
|
|
const dst_elem_ty = dst_ty.childType(zcu);
|
|
const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(zcu));
|
|
const src_elem_ty = src_ty.childType(zcu);
|
|
const src_elem_abi_size: u32 = @intCast(src_elem_ty.abiSize(zcu));
|
|
|
|
const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_elem_abi_size) {
|
|
1 => switch (src_elem_abi_size) {
|
|
2 => switch (dst_ty.vectorLen(zcu)) {
|
|
1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw },
|
|
9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null,
|
|
else => null,
|
|
},
|
|
else => null,
|
|
},
|
|
2 => switch (src_elem_abi_size) {
|
|
4 => switch (dst_ty.vectorLen(zcu)) {
|
|
1...4 => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .ackusd }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_w, .ackusd }
|
|
else
|
|
null,
|
|
5...8 => if (self.hasFeature(.avx2)) .{ .vp_w, .ackusd } else null,
|
|
else => null,
|
|
},
|
|
else => null,
|
|
},
|
|
else => null,
|
|
}) orelse return self.fail("TODO implement airTrunc for {}", .{dst_ty.fmt(pt)});
|
|
|
|
const dst_info = dst_elem_ty.intInfo(zcu);
|
|
const src_info = src_elem_ty.intInfo(zcu);
|
|
|
|
const mask_val = try pt.intValue(src_elem_ty, @as(u64, std.math.maxInt(u64)) >> @intCast(64 - dst_info.bits));
|
|
|
|
const splat_ty = try pt.vectorType(.{
|
|
.len = @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)),
|
|
.child = src_elem_ty.ip_index,
|
|
});
|
|
const splat_abi_size: u32 = @intCast(splat_ty.abiSize(zcu));
|
|
|
|
const splat_val = try pt.intern(.{ .aggregate = .{
|
|
.ty = splat_ty.ip_index,
|
|
.storage = .{ .repeated_elem = mask_val.ip_index },
|
|
} });
|
|
|
|
const splat_mcv = try self.genTypedValue(.fromInterned(splat_val));
|
|
const splat_addr_mcv: MCValue = switch (splat_mcv) {
|
|
.memory, .indirect, .load_frame => splat_mcv.address(),
|
|
else => .{ .register = try self.copyToTmpRegister(.usize, splat_mcv.address()) },
|
|
};
|
|
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, src_abi_size);
|
|
if (self.hasFeature(.avx)) {
|
|
try self.asmRegisterRegisterMemory(
|
|
.{ .vp_, .@"and" },
|
|
dst_alias,
|
|
dst_alias,
|
|
try splat_addr_mcv.deref().mem(self, .{ .size = .fromSize(splat_abi_size) }),
|
|
);
|
|
if (src_abi_size > 16) {
|
|
const temp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.sse);
|
|
const temp_lock = self.register_manager.lockRegAssumeUnused(temp_reg);
|
|
defer self.register_manager.unlockReg(temp_lock);
|
|
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ if (self.hasFeature(.avx2)) .v_i128 else .v_f128, .extract },
|
|
registerAlias(temp_reg, dst_abi_size),
|
|
dst_alias,
|
|
.u(1),
|
|
);
|
|
try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(temp_reg, dst_abi_size),
|
|
);
|
|
} else try self.asmRegisterRegisterRegister(mir_tag, dst_alias, dst_alias, dst_alias);
|
|
} else {
|
|
try self.asmRegisterMemory(
|
|
.{ .p_, .@"and" },
|
|
dst_alias,
|
|
try splat_addr_mcv.deref().mem(self, .{ .size = .fromSize(splat_abi_size) }),
|
|
);
|
|
try self.asmRegisterRegister(mir_tag, dst_alias, dst_alias);
|
|
}
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
// when truncating a `u16` to `u5`, for example, those top 3 bits in the result
|
|
// have to be removed. this only happens if the dst if not a power-of-two size.
|
|
if (dst_abi_size <= 8) {
|
|
if (self.regExtraBits(dst_ty) > 0) {
|
|
try self.truncateRegister(dst_ty, dst_mcv.register.to64());
|
|
}
|
|
} else if (dst_abi_size <= 16) {
|
|
const dst_info = dst_ty.intInfo(zcu);
|
|
const high_ty = try pt.intType(dst_info.signedness, dst_info.bits - 64);
|
|
if (self.regExtraBits(high_ty) > 0) {
|
|
try self.truncateRegister(high_ty, dst_mcv.register_pair[1].to64());
|
|
}
|
|
}
|
|
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airIntFromBool(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const ty = self.typeOfIndex(inst);
|
|
|
|
const operand = try self.resolveInst(un_op);
|
|
const dst_mcv = if (self.reuseOperand(inst, un_op, 0, operand))
|
|
operand
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, ty, operand);
|
|
|
|
return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airSlice(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const zcu = self.pt.zcu;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
|
|
const slice_ty = self.typeOfIndex(inst);
|
|
const frame_index = try self.allocFrameIndex(.initSpill(slice_ty, zcu));
|
|
|
|
const ptr_ty = self.typeOf(bin_op.lhs);
|
|
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }, .{});
|
|
|
|
const len_ty = self.typeOf(bin_op.rhs);
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(ptr_ty.abiSize(zcu)),
|
|
len_ty,
|
|
.{ .air_ref = bin_op.rhs },
|
|
.{},
|
|
);
|
|
|
|
const result = MCValue{ .load_frame = .{ .index = frame_index } };
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airUnOp(self: *CodeGen, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const dst_mcv = try self.genUnOp(inst, tag, ty_op.operand);
|
|
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airBinOp(self: *CodeGen, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
if (dst_ty.isAbiInt(zcu)) {
|
|
const abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
|
|
const bit_size: u32 = @intCast(dst_ty.bitSize(zcu));
|
|
if (abi_size * 8 > bit_size) {
|
|
const dst_lock = switch (dst_mcv) {
|
|
.register => |dst_reg| self.register_manager.lockRegAssumeUnused(dst_reg),
|
|
else => null,
|
|
};
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (dst_mcv.isRegister()) {
|
|
try self.truncateRegister(dst_ty, dst_mcv.getReg().?);
|
|
} else {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(zcu) - 1) % 64 + 1));
|
|
const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref();
|
|
try self.genSetReg(tmp_reg, hi_ty, hi_mcv, .{});
|
|
try self.truncateRegister(dst_ty, tmp_reg);
|
|
try self.genCopy(hi_ty, hi_mcv, .{ .register = tmp_reg }, .{});
|
|
}
|
|
}
|
|
}
|
|
return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airPtrArithmetic(self: *CodeGen, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
|
|
return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn activeIntBits(self: *CodeGen, dst_air: Air.Inst.Ref) u16 {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const air_tag = self.air.instructions.items(.tag);
|
|
const air_data = self.air.instructions.items(.data);
|
|
|
|
const dst_ty = self.typeOf(dst_air);
|
|
const dst_info = dst_ty.intInfo(zcu);
|
|
if (dst_air.toIndex()) |inst| {
|
|
switch (air_tag[@intFromEnum(inst)]) {
|
|
.intcast => {
|
|
const src_ty = self.typeOf(air_data[@intFromEnum(inst)].ty_op.operand);
|
|
const src_info = src_ty.intInfo(zcu);
|
|
return @min(switch (src_info.signedness) {
|
|
.signed => switch (dst_info.signedness) {
|
|
.signed => src_info.bits,
|
|
.unsigned => src_info.bits - 1,
|
|
},
|
|
.unsigned => switch (dst_info.signedness) {
|
|
.signed => src_info.bits + 1,
|
|
.unsigned => src_info.bits,
|
|
},
|
|
}, dst_info.bits);
|
|
},
|
|
else => {},
|
|
}
|
|
} else if (dst_air.toInterned()) |ip_index| {
|
|
var space: Value.BigIntSpace = undefined;
|
|
const src_int = Value.fromInterned(ip_index).toBigInt(&space, zcu);
|
|
return @as(u16, @intCast(src_int.bitCountTwosComp())) +
|
|
@intFromBool(src_int.positive and dst_info.signedness == .signed);
|
|
}
|
|
return dst_info.bits;
|
|
}
|
|
|
|
fn airMulDivBinOp(self: *CodeGen, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
const result = result: {
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
switch (dst_ty.zigTypeTag(zcu)) {
|
|
.float, .vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs),
|
|
else => {},
|
|
}
|
|
const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
|
|
|
|
const dst_info = dst_ty.intInfo(zcu);
|
|
const src_ty = try pt.intType(dst_info.signedness, switch (tag) {
|
|
else => unreachable,
|
|
.mul, .mul_wrap => @max(
|
|
self.activeIntBits(bin_op.lhs),
|
|
self.activeIntBits(bin_op.rhs),
|
|
dst_info.bits / 2,
|
|
),
|
|
.div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits,
|
|
});
|
|
const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
|
|
|
|
if (dst_abi_size == 16 and src_abi_size == 16) switch (tag) {
|
|
else => unreachable,
|
|
.mul, .mul_wrap => {},
|
|
.div_trunc, .div_floor, .div_exact, .rem, .mod => {
|
|
const signed = dst_ty.isSignedInt(zcu);
|
|
var callee_buf: ["__udiv?i3".len]u8 = undefined;
|
|
const signed_div_floor_state: struct {
|
|
frame_index: FrameIndex,
|
|
state: State,
|
|
reloc: Mir.Inst.Index,
|
|
} = if (signed and tag == .div_floor) state: {
|
|
const frame_index = try self.allocFrameIndex(.initType(.usize, zcu));
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .mov },
|
|
.{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } },
|
|
.u(0),
|
|
);
|
|
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const mat_lhs_mcv = switch (lhs_mcv) {
|
|
.load_symbol => mat_lhs_mcv: {
|
|
// TODO clean this up!
|
|
const addr_reg = try self.copyToTmpRegister(.usize, lhs_mcv.address());
|
|
break :mat_lhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
|
},
|
|
else => lhs_mcv,
|
|
};
|
|
const mat_lhs_lock = switch (mat_lhs_mcv) {
|
|
.indirect => |reg_off| self.register_manager.lockReg(reg_off.reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
if (mat_lhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
tmp_reg,
|
|
try mat_lhs_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
tmp_reg,
|
|
mat_lhs_mcv.register_pair[1],
|
|
);
|
|
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
const mat_rhs_mcv = switch (rhs_mcv) {
|
|
.load_symbol => mat_rhs_mcv: {
|
|
// TODO clean this up!
|
|
const addr_reg = try self.copyToTmpRegister(.usize, rhs_mcv.address());
|
|
break :mat_rhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
|
},
|
|
else => rhs_mcv,
|
|
};
|
|
const mat_rhs_lock = switch (mat_rhs_mcv) {
|
|
.indirect => |reg_off| self.register_manager.lockReg(reg_off.reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
if (mat_rhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .xor },
|
|
tmp_reg,
|
|
try mat_rhs_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._, .xor },
|
|
tmp_reg,
|
|
mat_rhs_mcv.register_pair[1],
|
|
);
|
|
const state = try self.saveState();
|
|
const reloc = try self.asmJccReloc(.ns, undefined);
|
|
|
|
break :state .{ .frame_index = frame_index, .state = state, .reloc = reloc };
|
|
} else undefined;
|
|
const call_mcv = try self.genCall(
|
|
.{ .lib = .{
|
|
.return_type = dst_ty.toIntern(),
|
|
.param_types = &.{ src_ty.toIntern(), src_ty.toIntern() },
|
|
.callee = std.fmt.bufPrint(&callee_buf, "__{s}{s}{c}i3", .{
|
|
if (signed) "" else "u",
|
|
switch (tag) {
|
|
.div_trunc, .div_exact => "div",
|
|
.div_floor => if (signed) "mod" else "div",
|
|
.rem, .mod => "mod",
|
|
else => unreachable,
|
|
},
|
|
intCompilerRtAbiName(@intCast(dst_ty.bitSize(zcu))),
|
|
}) catch unreachable,
|
|
} },
|
|
&.{ src_ty, src_ty },
|
|
&.{ .{ .air_ref = bin_op.lhs }, .{ .air_ref = bin_op.rhs } },
|
|
.{},
|
|
);
|
|
break :result if (signed) switch (tag) {
|
|
.div_floor => {
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .@"or" },
|
|
call_mcv.register_pair[0],
|
|
call_mcv.register_pair[1],
|
|
);
|
|
try self.asmSetccMemory(.nz, .{
|
|
.base = .{ .frame = signed_div_floor_state.frame_index },
|
|
.mod = .{ .rm = .{ .size = .byte } },
|
|
});
|
|
try self.restoreState(signed_div_floor_state.state, &.{}, .{
|
|
.emit_instructions = true,
|
|
.update_tracking = true,
|
|
.resurrect = true,
|
|
.close_scope = true,
|
|
});
|
|
self.performReloc(signed_div_floor_state.reloc);
|
|
const dst_mcv = try self.genCall(
|
|
.{ .lib = .{
|
|
.return_type = dst_ty.toIntern(),
|
|
.param_types = &.{ src_ty.toIntern(), src_ty.toIntern() },
|
|
.callee = std.fmt.bufPrint(&callee_buf, "__div{c}i3", .{
|
|
intCompilerRtAbiName(@intCast(dst_ty.bitSize(zcu))),
|
|
}) catch unreachable,
|
|
} },
|
|
&.{ src_ty, src_ty },
|
|
&.{ .{ .air_ref = bin_op.lhs }, .{ .air_ref = bin_op.rhs } },
|
|
.{},
|
|
);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .sub },
|
|
dst_mcv.register_pair[0],
|
|
.{
|
|
.base = .{ .frame = signed_div_floor_state.frame_index },
|
|
.mod = .{ .rm = .{ .size = .qword } },
|
|
},
|
|
);
|
|
try self.asmRegisterImmediate(.{ ._, .sbb }, dst_mcv.register_pair[1], .u(0));
|
|
try self.freeValue(
|
|
.{ .load_frame = .{ .index = signed_div_floor_state.frame_index } },
|
|
);
|
|
break :result dst_mcv;
|
|
},
|
|
.mod => {
|
|
const dst_regs = call_mcv.register_pair;
|
|
const dst_locks = self.register_manager.lockRegsAssumeUnused(2, dst_regs);
|
|
defer for (dst_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const tmp_regs =
|
|
try self.register_manager.allocRegs(2, @splat(null), abi.RegisterClass.gp);
|
|
const tmp_locks = self.register_manager.lockRegsAssumeUnused(2, tmp_regs);
|
|
defer for (tmp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
const mat_rhs_mcv = switch (rhs_mcv) {
|
|
.load_symbol => mat_rhs_mcv: {
|
|
// TODO clean this up!
|
|
const addr_reg = try self.copyToTmpRegister(.usize, rhs_mcv.address());
|
|
break :mat_rhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
|
},
|
|
else => rhs_mcv,
|
|
};
|
|
const mat_rhs_lock = switch (mat_rhs_mcv) {
|
|
.indirect => |reg_off| self.register_manager.lockReg(reg_off.reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
for (tmp_regs, dst_regs) |tmp_reg, dst_reg|
|
|
try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, dst_reg);
|
|
if (mat_rhs_mcv.isBase()) {
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .add },
|
|
tmp_regs[0],
|
|
try mat_rhs_mcv.mem(self, .{ .size = .qword }),
|
|
);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .adc },
|
|
tmp_regs[1],
|
|
try mat_rhs_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
);
|
|
} else for (
|
|
[_]Mir.Inst.Tag{ .add, .adc },
|
|
tmp_regs,
|
|
mat_rhs_mcv.register_pair,
|
|
) |op, tmp_reg, rhs_reg|
|
|
try self.asmRegisterRegister(.{ ._, op }, tmp_reg, rhs_reg);
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, dst_regs[1], dst_regs[1]);
|
|
for (dst_regs, tmp_regs) |dst_reg, tmp_reg|
|
|
try self.asmCmovccRegisterRegister(.s, dst_reg, tmp_reg);
|
|
break :result call_mcv;
|
|
},
|
|
else => call_mcv,
|
|
} else call_mcv;
|
|
},
|
|
};
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
try self.spillRegisters(&.{ .rax, .rcx, .rdx });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rax, .rcx, .rdx });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
break :result try self.genMulDivBinOp(tag, inst, dst_ty, src_ty, lhs_mcv, rhs_mcv);
|
|
};
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airAddSat(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
const ty = self.typeOf(bin_op.lhs);
|
|
if (ty.zigTypeTag(zcu) == .vector or ty.abiSize(zcu) > 8) return self.fail(
|
|
"TODO implement airAddSat for {}",
|
|
.{ty.fmt(pt)},
|
|
);
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
|
|
lhs_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
|
|
const dst_reg = dst_mcv.register;
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
const rhs_lock = switch (rhs_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const limit_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const limit_mcv = MCValue{ .register = limit_reg };
|
|
const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
|
|
defer self.register_manager.unlockReg(limit_lock);
|
|
|
|
const reg_bits = self.regBitSize(ty);
|
|
const reg_extra_bits = self.regExtraBits(ty);
|
|
const cc: Condition = if (ty.isSignedInt(zcu)) cc: {
|
|
if (reg_extra_bits > 0) {
|
|
try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .u8, .{ .immediate = reg_extra_bits });
|
|
}
|
|
try self.genSetReg(limit_reg, ty, dst_mcv, .{});
|
|
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .u8, .{ .immediate = reg_bits - 1 });
|
|
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
|
|
.immediate = (@as(u64, 1) << @intCast(reg_bits - 1)) - 1,
|
|
});
|
|
if (reg_extra_bits > 0) {
|
|
const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv);
|
|
const shifted_rhs_mcv = MCValue{ .register = shifted_rhs_reg };
|
|
const shifted_rhs_lock = self.register_manager.lockRegAssumeUnused(shifted_rhs_reg);
|
|
defer self.register_manager.unlockReg(shifted_rhs_lock);
|
|
|
|
try self.genShiftBinOpMir(.{ ._l, .sa }, ty, shifted_rhs_mcv, .u8, .{ .immediate = reg_extra_bits });
|
|
try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, shifted_rhs_mcv);
|
|
} else try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv);
|
|
break :cc .o;
|
|
} else cc: {
|
|
try self.genSetReg(limit_reg, ty, .{
|
|
.immediate = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - ty.bitSize(zcu)),
|
|
}, .{});
|
|
|
|
try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv);
|
|
if (reg_extra_bits > 0) {
|
|
try self.genBinOpMir(.{ ._, .cmp }, ty, dst_mcv, limit_mcv);
|
|
break :cc .a;
|
|
}
|
|
break :cc .c;
|
|
};
|
|
|
|
const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
|
|
try self.asmCmovccRegisterRegister(
|
|
cc,
|
|
registerAlias(dst_reg, cmov_abi_size),
|
|
registerAlias(limit_reg, cmov_abi_size),
|
|
);
|
|
|
|
if (reg_extra_bits > 0 and ty.isSignedInt(zcu))
|
|
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .u8, .{ .immediate = reg_extra_bits });
|
|
|
|
return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airSubSat(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
const ty = self.typeOf(bin_op.lhs);
|
|
if (ty.zigTypeTag(zcu) == .vector or ty.abiSize(zcu) > 8) return self.fail(
|
|
"TODO implement airSubSat for {}",
|
|
.{ty.fmt(pt)},
|
|
);
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
|
|
lhs_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
|
|
const dst_reg = dst_mcv.register;
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
const rhs_lock = switch (rhs_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const limit_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const limit_mcv = MCValue{ .register = limit_reg };
|
|
const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
|
|
defer self.register_manager.unlockReg(limit_lock);
|
|
|
|
const reg_bits = self.regBitSize(ty);
|
|
const reg_extra_bits = self.regExtraBits(ty);
|
|
const cc: Condition = if (ty.isSignedInt(zcu)) cc: {
|
|
if (reg_extra_bits > 0) {
|
|
try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .u8, .{ .immediate = reg_extra_bits });
|
|
}
|
|
try self.genSetReg(limit_reg, ty, dst_mcv, .{});
|
|
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .u8, .{ .immediate = reg_bits - 1 });
|
|
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
|
|
.immediate = (@as(u64, 1) << @intCast(reg_bits - 1)) - 1,
|
|
});
|
|
if (reg_extra_bits > 0) {
|
|
const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv);
|
|
const shifted_rhs_mcv = MCValue{ .register = shifted_rhs_reg };
|
|
const shifted_rhs_lock = self.register_manager.lockRegAssumeUnused(shifted_rhs_reg);
|
|
defer self.register_manager.unlockReg(shifted_rhs_lock);
|
|
|
|
try self.genShiftBinOpMir(.{ ._l, .sa }, ty, shifted_rhs_mcv, .u8, .{ .immediate = reg_extra_bits });
|
|
try self.genBinOpMir(.{ ._, .sub }, ty, dst_mcv, shifted_rhs_mcv);
|
|
} else try self.genBinOpMir(.{ ._, .sub }, ty, dst_mcv, rhs_mcv);
|
|
break :cc .o;
|
|
} else cc: {
|
|
try self.genSetReg(limit_reg, ty, .{ .immediate = 0 }, .{});
|
|
try self.genBinOpMir(.{ ._, .sub }, ty, dst_mcv, rhs_mcv);
|
|
break :cc .c;
|
|
};
|
|
|
|
const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
|
|
try self.asmCmovccRegisterRegister(
|
|
cc,
|
|
registerAlias(dst_reg, cmov_abi_size),
|
|
registerAlias(limit_reg, cmov_abi_size),
|
|
);
|
|
|
|
if (reg_extra_bits > 0 and ty.isSignedInt(zcu))
|
|
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .u8, .{ .immediate = reg_extra_bits });
|
|
|
|
return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airMulSat(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
const ty = self.typeOf(bin_op.lhs);
|
|
|
|
const result = result: {
|
|
if (ty.toIntern() == .i128_type) {
|
|
const ptr_c_int = try pt.singleMutPtrType(.c_int);
|
|
const overflow = try self.allocTempRegOrMem(.c_int, false);
|
|
|
|
const dst_mcv = try self.genCall(.{ .lib = .{
|
|
.return_type = .i128_type,
|
|
.param_types = &.{ .i128_type, .i128_type, ptr_c_int.toIntern() },
|
|
.callee = "__muloti4",
|
|
} }, &.{ .i128, .i128, ptr_c_int }, &.{
|
|
.{ .air_ref = bin_op.lhs },
|
|
.{ .air_ref = bin_op.rhs },
|
|
overflow.address(),
|
|
}, .{});
|
|
const dst_locks = self.register_manager.lockRegsAssumeUnused(2, dst_mcv.register_pair);
|
|
defer for (dst_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const mat_lhs_mcv = switch (lhs_mcv) {
|
|
.load_symbol => mat_lhs_mcv: {
|
|
// TODO clean this up!
|
|
const addr_reg = try self.copyToTmpRegister(.usize, lhs_mcv.address());
|
|
break :mat_lhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
|
},
|
|
else => lhs_mcv,
|
|
};
|
|
const mat_lhs_lock = switch (mat_lhs_mcv) {
|
|
.indirect => |reg_off| self.register_manager.lockReg(reg_off.reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
if (mat_lhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
tmp_reg,
|
|
try mat_lhs_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
tmp_reg,
|
|
mat_lhs_mcv.register_pair[1],
|
|
);
|
|
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
const mat_rhs_mcv = switch (rhs_mcv) {
|
|
.load_symbol => mat_rhs_mcv: {
|
|
// TODO clean this up!
|
|
const addr_reg = try self.copyToTmpRegister(.usize, rhs_mcv.address());
|
|
break :mat_rhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
|
},
|
|
else => rhs_mcv,
|
|
};
|
|
const mat_rhs_lock = switch (mat_rhs_mcv) {
|
|
.indirect => |reg_off| self.register_manager.lockReg(reg_off.reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
if (mat_rhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .xor },
|
|
tmp_reg,
|
|
try mat_rhs_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._, .xor },
|
|
tmp_reg,
|
|
mat_rhs_mcv.register_pair[1],
|
|
);
|
|
|
|
try self.asmRegisterImmediate(.{ ._r, .sa }, tmp_reg, .u(63));
|
|
try self.asmRegister(.{ ._, .not }, tmp_reg);
|
|
try self.asmMemoryImmediate(.{ ._, .cmp }, try overflow.mem(self, .{ .size = .dword }), .s(0));
|
|
try self.freeValue(overflow);
|
|
try self.asmCmovccRegisterRegister(.ne, dst_mcv.register_pair[0], tmp_reg);
|
|
try self.asmRegisterImmediate(.{ ._c, .bt }, tmp_reg, .u(63));
|
|
try self.asmCmovccRegisterRegister(.ne, dst_mcv.register_pair[1], tmp_reg);
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
if (ty.zigTypeTag(zcu) == .vector or ty.abiSize(zcu) > 8) return self.fail(
|
|
"TODO implement airMulSat for {}",
|
|
.{ty.fmt(pt)},
|
|
);
|
|
|
|
try self.spillRegisters(&.{ .rax, .rcx, .rdx });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rax, .rcx, .rdx });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const lhs_lock = switch (lhs_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
const rhs_lock = switch (rhs_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const limit_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const limit_mcv = MCValue{ .register = limit_reg };
|
|
const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
|
|
defer self.register_manager.unlockReg(limit_lock);
|
|
|
|
const reg_bits = self.regBitSize(ty);
|
|
const cc: Condition = if (ty.isSignedInt(zcu)) cc: {
|
|
try self.genSetReg(limit_reg, ty, lhs_mcv, .{});
|
|
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv);
|
|
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .u8, .{ .immediate = reg_bits - 1 });
|
|
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
|
|
.immediate = (@as(u64, 1) << @intCast(reg_bits - 1)) - 1,
|
|
});
|
|
break :cc .o;
|
|
} else cc: {
|
|
try self.genSetReg(limit_reg, ty, .{
|
|
.immediate = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - reg_bits),
|
|
}, .{});
|
|
break :cc .c;
|
|
};
|
|
|
|
const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
|
|
const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
|
|
try self.asmCmovccRegisterRegister(
|
|
cc,
|
|
registerAlias(dst_mcv.register, cmov_abi_size),
|
|
registerAlias(limit_reg, cmov_abi_size),
|
|
);
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airAddSubWithOverflow(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
const result: MCValue = result: {
|
|
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
|
|
const ty = self.typeOf(bin_op.lhs);
|
|
switch (ty.zigTypeTag(zcu)) {
|
|
.vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}),
|
|
.int => {
|
|
try self.spillEflagsIfOccupied();
|
|
try self.spillRegisters(&.{ .rcx, .rdi, .rsi });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rcx, .rdi, .rsi });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const partial_mcv = try self.genBinOp(null, switch (tag) {
|
|
.add_with_overflow => .add,
|
|
.sub_with_overflow => .sub,
|
|
else => unreachable,
|
|
}, bin_op.lhs, bin_op.rhs);
|
|
const int_info = ty.intInfo(zcu);
|
|
const cc: Condition = switch (int_info.signedness) {
|
|
.unsigned => .c,
|
|
.signed => .o,
|
|
};
|
|
|
|
const tuple_ty = self.typeOfIndex(inst);
|
|
if (int_info.bits >= 8 and std.math.isPowerOfTwo(int_info.bits)) {
|
|
switch (partial_mcv) {
|
|
.register => |reg| {
|
|
self.eflags_inst = inst;
|
|
break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } };
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
const frame_index = try self.allocFrameIndex(.initSpill(tuple_ty, zcu));
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
|
.u1,
|
|
.{ .eflags = cc },
|
|
.{},
|
|
);
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(tuple_ty.structFieldOffset(0, zcu)),
|
|
ty,
|
|
partial_mcv,
|
|
.{},
|
|
);
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
}
|
|
|
|
const frame_index = try self.allocFrameIndex(.initSpill(tuple_ty, zcu));
|
|
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
},
|
|
else => unreachable,
|
|
}
|
|
};
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airShlWithOverflow(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
const result: MCValue = result: {
|
|
const lhs_ty = self.typeOf(bin_op.lhs);
|
|
const rhs_ty = self.typeOf(bin_op.rhs);
|
|
switch (lhs_ty.zigTypeTag(zcu)) {
|
|
.vector => return self.fail("TODO implement shl with overflow for Vector type", .{}),
|
|
.int => {
|
|
try self.spillEflagsIfOccupied();
|
|
try self.spillRegisters(&.{ .rcx, .rdi, .rsi });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rcx, .rdi, .rsi });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const lhs = try self.resolveInst(bin_op.lhs);
|
|
const rhs = try self.resolveInst(bin_op.rhs);
|
|
|
|
const int_info = lhs_ty.intInfo(zcu);
|
|
|
|
const partial_mcv = try self.genShiftBinOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
|
|
const partial_lock = switch (partial_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (partial_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const tmp_mcv = try self.genShiftBinOp(.shr, null, partial_mcv, rhs, lhs_ty, rhs_ty);
|
|
const tmp_lock = switch (tmp_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (tmp_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, tmp_mcv, lhs);
|
|
const cc = Condition.ne;
|
|
|
|
const tuple_ty = self.typeOfIndex(inst);
|
|
if (int_info.bits >= 8 and std.math.isPowerOfTwo(int_info.bits)) {
|
|
switch (partial_mcv) {
|
|
.register => |reg| {
|
|
self.eflags_inst = inst;
|
|
break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } };
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
const frame_index = try self.allocFrameIndex(.initSpill(tuple_ty, zcu));
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
|
tuple_ty.fieldType(1, zcu),
|
|
.{ .eflags = cc },
|
|
.{},
|
|
);
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(tuple_ty.structFieldOffset(0, zcu)),
|
|
tuple_ty.fieldType(0, zcu),
|
|
partial_mcv,
|
|
.{},
|
|
);
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
}
|
|
|
|
const frame_index =
|
|
try self.allocFrameIndex(.initSpill(tuple_ty, zcu));
|
|
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
},
|
|
else => unreachable,
|
|
}
|
|
};
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn genSetFrameTruncatedOverflowCompare(
|
|
self: *CodeGen,
|
|
tuple_ty: Type,
|
|
frame_index: FrameIndex,
|
|
src_mcv: MCValue,
|
|
overflow_cc: ?Condition,
|
|
) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const src_lock = switch (src_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const ty = tuple_ty.fieldType(0, zcu);
|
|
const ty_size = ty.abiSize(zcu);
|
|
const int_info = ty.intInfo(zcu);
|
|
|
|
const hi_bits = (int_info.bits - 1) % 64 + 1;
|
|
const hi_ty = try pt.intType(int_info.signedness, hi_bits);
|
|
|
|
const limb_bits: u16 = @intCast(if (int_info.bits <= 64) self.regBitSize(ty) else 64);
|
|
const limb_ty = try pt.intType(int_info.signedness, limb_bits);
|
|
|
|
const rest_ty = try pt.intType(.unsigned, int_info.bits - hi_bits);
|
|
|
|
const temp_regs =
|
|
try self.register_manager.allocRegs(3, @splat(null), abi.RegisterClass.gp);
|
|
const temp_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs);
|
|
defer for (temp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const overflow_reg = temp_regs[0];
|
|
if (overflow_cc) |cc| try self.asmSetccRegister(cc, overflow_reg.to8());
|
|
|
|
const scratch_reg = temp_regs[1];
|
|
const hi_limb_off = if (int_info.bits <= 64) 0 else (int_info.bits - 1) / 64 * 8;
|
|
const hi_limb_mcv = if (hi_limb_off > 0)
|
|
src_mcv.address().offset(int_info.bits / 64 * 8).deref()
|
|
else
|
|
src_mcv;
|
|
try self.genSetReg(scratch_reg, limb_ty, hi_limb_mcv, .{});
|
|
try self.truncateRegister(hi_ty, scratch_reg);
|
|
try self.genBinOpMir(.{ ._, .cmp }, limb_ty, .{ .register = scratch_reg }, hi_limb_mcv);
|
|
|
|
const eq_reg = temp_regs[2];
|
|
if (overflow_cc) |_| {
|
|
try self.asmSetccRegister(.ne, eq_reg.to8());
|
|
try self.genBinOpMir(.{ ._, .@"or" }, .u8, .{ .register = overflow_reg }, .{ .register = eq_reg });
|
|
}
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
|
tuple_ty.fieldType(1, zcu),
|
|
if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
|
|
.{},
|
|
);
|
|
|
|
const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu));
|
|
if (hi_limb_off > 0) try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
payload_off,
|
|
rest_ty,
|
|
src_mcv,
|
|
.{},
|
|
);
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
payload_off + hi_limb_off,
|
|
limb_ty,
|
|
.{ .register = scratch_reg },
|
|
.{},
|
|
);
|
|
var ext_off: i32 = hi_limb_off + 8;
|
|
if (ext_off < ty_size) {
|
|
switch (int_info.signedness) {
|
|
.signed => try self.asmRegisterImmediate(.{ ._r, .sa }, scratch_reg.to64(), .s(63)),
|
|
.unsigned => try self.asmRegisterRegister(.{ ._, .xor }, scratch_reg.to32(), scratch_reg.to32()),
|
|
}
|
|
while (ext_off < ty_size) : (ext_off += 8) try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
payload_off + ext_off,
|
|
limb_ty,
|
|
.{ .register = scratch_reg },
|
|
.{},
|
|
);
|
|
}
|
|
}
|
|
|
|
fn airMulWithOverflow(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
const tuple_ty = self.typeOfIndex(inst);
|
|
const dst_ty = self.typeOf(bin_op.lhs);
|
|
const result: MCValue = switch (dst_ty.zigTypeTag(zcu)) {
|
|
.vector => return self.fail("TODO implement airMulWithOverflow for {}", .{dst_ty.fmt(pt)}),
|
|
.int => result: {
|
|
const dst_info = dst_ty.intInfo(zcu);
|
|
if (dst_info.bits > 128 and dst_info.signedness == .unsigned) {
|
|
const slow_inc = self.hasFeature(.slow_incdec);
|
|
const abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
|
|
const limb_len = std.math.divCeil(u32, abi_size, 8) catch unreachable;
|
|
|
|
try self.spillRegisters(&.{ .rax, .rcx, .rdx });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rax, .rcx, .rdx });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.genInlineMemset(
|
|
dst_mcv.address(),
|
|
.{ .immediate = 0 },
|
|
.{ .immediate = tuple_ty.abiSize(zcu) },
|
|
.{},
|
|
);
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
|
|
const temp_regs =
|
|
try self.register_manager.allocRegs(4, @splat(null), abi.RegisterClass.gp);
|
|
const temp_locks = self.register_manager.lockRegsAssumeUnused(4, temp_regs);
|
|
defer for (temp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.asmRegisterRegister(.{ ._, .xor }, temp_regs[0].to32(), temp_regs[0].to32());
|
|
|
|
const outer_loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, temp_regs[1].to64(), .{
|
|
.base = .{ .frame = rhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[0].to64(),
|
|
.scale = .@"8",
|
|
.disp = rhs_mcv.load_frame.off,
|
|
} },
|
|
});
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, temp_regs[1].to64(), temp_regs[1].to64());
|
|
const skip_inner = try self.asmJccReloc(.z, undefined);
|
|
|
|
try self.asmRegisterRegister(.{ ._, .xor }, temp_regs[2].to32(), temp_regs[2].to32());
|
|
try self.asmRegisterRegister(.{ ._, .mov }, temp_regs[3].to32(), temp_regs[0].to32());
|
|
try self.asmRegisterRegister(.{ ._, .xor }, .ecx, .ecx);
|
|
try self.asmRegisterRegister(.{ ._, .xor }, .edx, .edx);
|
|
|
|
const inner_loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, .cl, .u(1));
|
|
try self.asmMemoryRegister(.{ ._, .adc }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[3].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off +
|
|
@as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
|
|
} },
|
|
}, .rdx);
|
|
try self.asmSetccRegister(.c, .cl);
|
|
|
|
try self.asmRegisterMemory(.{ ._, .mov }, .rax, .{
|
|
.base = .{ .frame = lhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[2].to64(),
|
|
.scale = .@"8",
|
|
.disp = lhs_mcv.load_frame.off,
|
|
} },
|
|
});
|
|
try self.asmRegister(.{ ._, .mul }, temp_regs[1].to64());
|
|
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, .ch, .u(1));
|
|
try self.asmMemoryRegister(.{ ._, .adc }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[3].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off +
|
|
@as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
|
|
} },
|
|
}, .rax);
|
|
try self.asmSetccRegister(.c, .ch);
|
|
|
|
if (slow_inc) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[2].to32(), .u(1));
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[3].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[2].to32());
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[3].to32());
|
|
}
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, temp_regs[3].to32(), .u(limb_len));
|
|
_ = try self.asmJccReloc(.b, inner_loop);
|
|
|
|
try self.asmRegisterRegister(.{ ._, .@"or" }, .rdx, .rcx);
|
|
const overflow = try self.asmJccReloc(.nz, undefined);
|
|
const overflow_loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, temp_regs[2].to32(), .u(limb_len));
|
|
const no_overflow = try self.asmJccReloc(.nb, undefined);
|
|
if (slow_inc) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[2].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[2].to32());
|
|
}
|
|
try self.asmMemoryImmediate(.{ ._, .cmp }, .{
|
|
.base = .{ .frame = lhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[2].to64(),
|
|
.scale = .@"8",
|
|
.disp = lhs_mcv.load_frame.off - 8,
|
|
} },
|
|
}, .u(0));
|
|
_ = try self.asmJccReloc(.z, overflow_loop);
|
|
self.performReloc(overflow);
|
|
try self.asmMemoryImmediate(.{ ._, .mov }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .byte,
|
|
.disp = dst_mcv.load_frame.off +
|
|
@as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
|
|
} },
|
|
}, .u(1));
|
|
self.performReloc(no_overflow);
|
|
|
|
self.performReloc(skip_inner);
|
|
if (slow_inc) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[0].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[0].to32());
|
|
}
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, temp_regs[0].to32(), .u(limb_len));
|
|
_ = try self.asmJccReloc(.b, outer_loop);
|
|
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
const lhs_active_bits = self.activeIntBits(bin_op.lhs);
|
|
const rhs_active_bits = self.activeIntBits(bin_op.rhs);
|
|
const src_bits = @max(lhs_active_bits, rhs_active_bits, dst_info.bits / 2);
|
|
const src_ty = try pt.intType(dst_info.signedness, src_bits);
|
|
if (src_bits > 64 and src_bits <= 128 and
|
|
dst_info.bits > 64 and dst_info.bits <= 128) switch (dst_info.signedness) {
|
|
.signed => {
|
|
const ptr_c_int = try pt.singleMutPtrType(.c_int);
|
|
const overflow = try self.allocTempRegOrMem(.c_int, false);
|
|
const result = try self.genCall(.{ .lib = .{
|
|
.return_type = .i128_type,
|
|
.param_types = &.{ .i128_type, .i128_type, ptr_c_int.toIntern() },
|
|
.callee = "__muloti4",
|
|
} }, &.{ .i128, .i128, ptr_c_int }, &.{
|
|
.{ .air_ref = bin_op.lhs },
|
|
.{ .air_ref = bin_op.rhs },
|
|
overflow.address(),
|
|
}, .{});
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.genSetMem(
|
|
.{ .frame = dst_mcv.load_frame.index },
|
|
@intCast(tuple_ty.structFieldOffset(0, zcu)),
|
|
tuple_ty.fieldType(0, zcu),
|
|
result,
|
|
.{},
|
|
);
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
try overflow.mem(self, .{ .size = self.memSize(.c_int) }),
|
|
.s(0),
|
|
);
|
|
try self.genSetMem(
|
|
.{ .frame = dst_mcv.load_frame.index },
|
|
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
|
tuple_ty.fieldType(1, zcu),
|
|
.{ .eflags = .ne },
|
|
.{},
|
|
);
|
|
try self.freeValue(overflow);
|
|
break :result dst_mcv;
|
|
},
|
|
.unsigned => {
|
|
try self.spillEflagsIfOccupied();
|
|
try self.spillRegisters(&.{ .rax, .rdx });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rdx });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const tmp_regs =
|
|
try self.register_manager.allocRegs(4, @splat(null), abi.RegisterClass.gp);
|
|
const tmp_locks = self.register_manager.lockRegsAssumeUnused(4, tmp_regs);
|
|
defer for (tmp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
const mat_lhs_mcv = mat_lhs_mcv: switch (lhs_mcv) {
|
|
.register => |lhs_reg| switch (lhs_reg.class()) {
|
|
else => lhs_mcv,
|
|
.sse => {
|
|
const mat_lhs_mcv: MCValue = .{
|
|
.register_pair = try self.register_manager.allocRegs(2, @splat(null), abi.RegisterClass.gp),
|
|
};
|
|
try self.genCopy(dst_ty, mat_lhs_mcv, lhs_mcv, .{});
|
|
break :mat_lhs_mcv mat_lhs_mcv;
|
|
},
|
|
},
|
|
.load_symbol => {
|
|
// TODO clean this up!
|
|
const addr_reg = try self.copyToTmpRegister(.usize, lhs_mcv.address());
|
|
break :mat_lhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
|
},
|
|
else => lhs_mcv,
|
|
};
|
|
const mat_lhs_locks: [2]?RegisterLock = switch (mat_lhs_mcv) {
|
|
.register_pair => |mat_lhs_regs| self.register_manager.lockRegs(2, mat_lhs_regs),
|
|
.indirect => |reg_off| .{ self.register_manager.lockReg(reg_off.reg), null },
|
|
else => @splat(null),
|
|
};
|
|
defer for (mat_lhs_locks) |mat_lhs_lock| if (mat_lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
const mat_rhs_mcv = mat_rhs_mcv: switch (rhs_mcv) {
|
|
.register => |rhs_reg| switch (rhs_reg.class()) {
|
|
else => rhs_mcv,
|
|
.sse => {
|
|
const mat_rhs_mcv: MCValue = .{
|
|
.register_pair = try self.register_manager.allocRegs(2, @splat(null), abi.RegisterClass.gp),
|
|
};
|
|
try self.genCopy(dst_ty, mat_rhs_mcv, rhs_mcv, .{});
|
|
break :mat_rhs_mcv mat_rhs_mcv;
|
|
},
|
|
},
|
|
.load_symbol => {
|
|
// TODO clean this up!
|
|
const addr_reg = try self.copyToTmpRegister(.usize, rhs_mcv.address());
|
|
break :mat_rhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
|
},
|
|
else => rhs_mcv,
|
|
};
|
|
const mat_rhs_locks: [2]?RegisterLock = switch (mat_rhs_mcv) {
|
|
.register_pair => |mat_rhs_regs| self.register_manager.lockRegs(2, mat_rhs_regs),
|
|
.indirect => |reg_off| .{ self.register_manager.lockReg(reg_off.reg), null },
|
|
else => @splat(null),
|
|
};
|
|
defer for (mat_rhs_locks) |mat_rhs_lock| if (mat_rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (mat_lhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
.rax,
|
|
try mat_lhs_mcv.mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
.rax,
|
|
mat_lhs_mcv.register_pair[0],
|
|
);
|
|
if (mat_rhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
tmp_regs[0],
|
|
try mat_rhs_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
tmp_regs[0],
|
|
mat_rhs_mcv.register_pair[1],
|
|
);
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, tmp_regs[0], tmp_regs[0]);
|
|
try self.asmSetccRegister(.nz, tmp_regs[1].to8());
|
|
try self.asmRegisterRegister(.{ .i_, .mul }, tmp_regs[0], .rax);
|
|
try self.asmSetccRegister(.o, tmp_regs[2].to8());
|
|
if (mat_rhs_mcv.isBase())
|
|
try self.asmMemory(.{ ._, .mul }, try mat_rhs_mcv.mem(self, .{ .size = .qword }))
|
|
else
|
|
try self.asmRegister(.{ ._, .mul }, mat_rhs_mcv.register_pair[0]);
|
|
try self.asmRegisterRegister(.{ ._, .add }, .rdx, tmp_regs[0]);
|
|
try self.asmSetccRegister(.c, tmp_regs[3].to8());
|
|
try self.asmRegisterRegister(.{ ._, .@"or" }, tmp_regs[2].to8(), tmp_regs[3].to8());
|
|
if (mat_lhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
tmp_regs[0],
|
|
try mat_lhs_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
tmp_regs[0],
|
|
mat_lhs_mcv.register_pair[1],
|
|
);
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, tmp_regs[0], tmp_regs[0]);
|
|
try self.asmSetccRegister(.nz, tmp_regs[3].to8());
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .@"and" },
|
|
tmp_regs[1].to8(),
|
|
tmp_regs[3].to8(),
|
|
);
|
|
try self.asmRegisterRegister(.{ ._, .@"or" }, tmp_regs[1].to8(), tmp_regs[2].to8());
|
|
if (mat_rhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ .i_, .mul },
|
|
tmp_regs[0],
|
|
try mat_rhs_mcv.mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ .i_, .mul },
|
|
tmp_regs[0],
|
|
mat_rhs_mcv.register_pair[0],
|
|
);
|
|
try self.asmSetccRegister(.o, tmp_regs[2].to8());
|
|
try self.asmRegisterRegister(.{ ._, .@"or" }, tmp_regs[1].to8(), tmp_regs[2].to8());
|
|
try self.asmRegisterRegister(.{ ._, .add }, .rdx, tmp_regs[0]);
|
|
try self.asmSetccRegister(.c, tmp_regs[2].to8());
|
|
try self.asmRegisterRegister(.{ ._, .@"or" }, tmp_regs[1].to8(), tmp_regs[2].to8());
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.genSetMem(
|
|
.{ .frame = dst_mcv.load_frame.index },
|
|
@intCast(tuple_ty.structFieldOffset(0, zcu)),
|
|
tuple_ty.fieldType(0, zcu),
|
|
.{ .register_pair = .{ .rax, .rdx } },
|
|
.{},
|
|
);
|
|
try self.genSetMem(
|
|
.{ .frame = dst_mcv.load_frame.index },
|
|
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
|
tuple_ty.fieldType(1, zcu),
|
|
.{ .register = tmp_regs[1] },
|
|
.{},
|
|
);
|
|
break :result dst_mcv;
|
|
},
|
|
};
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
try self.spillRegisters(&.{ .rax, .rcx, .rdx, .rdi, .rsi });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(5, .{ .rax, .rcx, .rdx, .rdi, .rsi });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const cc: Condition = switch (dst_info.signedness) {
|
|
.unsigned => .c,
|
|
.signed => .o,
|
|
};
|
|
|
|
const lhs = try self.resolveInst(bin_op.lhs);
|
|
const rhs = try self.resolveInst(bin_op.rhs);
|
|
|
|
const extra_bits = if (dst_info.bits <= 64)
|
|
self.regExtraBits(dst_ty)
|
|
else
|
|
dst_info.bits % 64;
|
|
const partial_mcv = try self.genMulDivBinOp(.mul, null, dst_ty, src_ty, lhs, rhs);
|
|
|
|
switch (partial_mcv) {
|
|
.register => |reg| if (extra_bits == 0) {
|
|
self.eflags_inst = inst;
|
|
break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } };
|
|
} else {
|
|
const frame_index = try self.allocFrameIndex(.initSpill(tuple_ty, zcu));
|
|
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
},
|
|
else => {
|
|
// For now, this is the only supported multiply that doesn't fit in a register.
|
|
if (dst_info.bits > 128 or src_bits != 64)
|
|
return self.fail("TODO implement airWithOverflow from {} to {}", .{
|
|
src_ty.fmt(pt), dst_ty.fmt(pt),
|
|
});
|
|
|
|
const frame_index = try self.allocFrameIndex(.initSpill(tuple_ty, zcu));
|
|
if (dst_info.bits >= lhs_active_bits + rhs_active_bits) {
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(tuple_ty.structFieldOffset(0, zcu)),
|
|
tuple_ty.fieldType(0, zcu),
|
|
partial_mcv,
|
|
.{},
|
|
);
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
|
tuple_ty.fieldType(1, zcu),
|
|
.{ .immediate = 0 }, // cc being set is impossible
|
|
.{},
|
|
);
|
|
} else try self.genSetFrameTruncatedOverflowCompare(
|
|
tuple_ty,
|
|
frame_index,
|
|
partial_mcv,
|
|
null,
|
|
);
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
},
|
|
}
|
|
},
|
|
else => unreachable,
|
|
};
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
/// Generates signed or unsigned integer multiplication/division.
|
|
/// Clobbers .rax and .rdx registers.
|
|
/// Quotient is saved in .rax and remainder in .rdx.
|
|
fn genIntMulDivOpMir(self: *CodeGen, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void {
|
|
const pt = self.pt;
|
|
const abi_size: u32 = @intCast(ty.abiSize(pt.zcu));
|
|
const bit_size: u32 = @intCast(self.regBitSize(ty));
|
|
if (abi_size > 8) {
|
|
return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{});
|
|
}
|
|
|
|
try self.genSetReg(.rax, ty, lhs, .{});
|
|
switch (tag[1]) {
|
|
else => unreachable,
|
|
.mul => {},
|
|
.div => switch (tag[0]) {
|
|
._ => {
|
|
const hi_reg: Register =
|
|
switch (bit_size) {
|
|
8 => .ah,
|
|
16, 32, 64 => .edx,
|
|
else => unreachable,
|
|
};
|
|
try self.asmRegisterRegister(.{ ._, .xor }, hi_reg, hi_reg);
|
|
},
|
|
.i_ => try self.asmOpOnly(.{ ._, switch (bit_size) {
|
|
8 => .cbw,
|
|
16 => .cwd,
|
|
32 => .cdq,
|
|
64 => .cqo,
|
|
else => unreachable,
|
|
} }),
|
|
else => unreachable,
|
|
},
|
|
}
|
|
|
|
const mat_rhs: MCValue = switch (rhs) {
|
|
.register, .indirect, .load_frame => rhs,
|
|
else => .{ .register = try self.copyToTmpRegister(ty, rhs) },
|
|
};
|
|
switch (mat_rhs) {
|
|
.register => |reg| try self.asmRegister(tag, registerAlias(reg, abi_size)),
|
|
.memory, .indirect, .load_frame => try self.asmMemory(
|
|
tag,
|
|
try mat_rhs.mem(self, .{ .size = .fromSize(abi_size) }),
|
|
),
|
|
else => unreachable,
|
|
}
|
|
if (tag[1] == .div and bit_size == 8) try self.asmRegisterRegister(.{ ._, .mov }, .dl, .ah);
|
|
}
|
|
|
|
/// Always returns a register.
|
|
/// Clobbers .rax and .rdx registers.
|
|
fn genInlineIntDivFloor(self: *CodeGen, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const abi_size: u32 = @intCast(ty.abiSize(zcu));
|
|
const int_info = ty.intInfo(zcu);
|
|
const dividend = switch (lhs) {
|
|
.register => |reg| reg,
|
|
else => try self.copyToTmpRegister(ty, lhs),
|
|
};
|
|
const dividend_lock = self.register_manager.lockReg(dividend);
|
|
defer if (dividend_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const divisor = switch (rhs) {
|
|
.register => |reg| reg,
|
|
else => try self.copyToTmpRegister(ty, rhs),
|
|
};
|
|
const divisor_lock = self.register_manager.lockReg(divisor);
|
|
defer if (divisor_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genIntMulDivOpMir(
|
|
switch (int_info.signedness) {
|
|
.signed => .{ .i_, .div },
|
|
.unsigned => .{ ._, .div },
|
|
},
|
|
ty,
|
|
.{ .register = dividend },
|
|
.{ .register = divisor },
|
|
);
|
|
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .xor },
|
|
registerAlias(divisor, abi_size),
|
|
registerAlias(dividend, abi_size),
|
|
);
|
|
try self.asmRegisterImmediate(
|
|
.{ ._r, .sa },
|
|
registerAlias(divisor, abi_size),
|
|
.u(int_info.bits - 1),
|
|
);
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .@"test" },
|
|
registerAlias(.rdx, abi_size),
|
|
registerAlias(.rdx, abi_size),
|
|
);
|
|
try self.asmCmovccRegisterRegister(
|
|
.z,
|
|
registerAlias(divisor, @max(abi_size, 2)),
|
|
registerAlias(.rdx, @max(abi_size, 2)),
|
|
);
|
|
try self.genBinOpMir(.{ ._, .add }, ty, .{ .register = divisor }, .{ .register = .rax });
|
|
return MCValue{ .register = divisor };
|
|
}
|
|
|
|
fn airShlShrBinOp(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
|
|
const air_tags = self.air.instructions.items(.tag);
|
|
const tag = air_tags[@intFromEnum(inst)];
|
|
const lhs_ty = self.typeOf(bin_op.lhs);
|
|
const rhs_ty = self.typeOf(bin_op.rhs);
|
|
const result: MCValue = result: {
|
|
switch (lhs_ty.zigTypeTag(zcu)) {
|
|
.int => {
|
|
try self.spillRegisters(&.{.rcx});
|
|
try self.register_manager.getKnownReg(.rcx, null);
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
|
|
const dst_mcv = try self.genShiftBinOp(tag, inst, lhs_mcv, rhs_mcv, lhs_ty, rhs_ty);
|
|
switch (tag) {
|
|
.shr, .shr_exact, .shl_exact => {},
|
|
.shl => switch (dst_mcv) {
|
|
.register => |dst_reg| try self.truncateRegister(lhs_ty, dst_reg),
|
|
.register_pair => |dst_regs| try self.truncateRegister(lhs_ty, dst_regs[1]),
|
|
.load_frame => |frame_addr| {
|
|
const tmp_reg =
|
|
try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const lhs_bits: u31 = @intCast(lhs_ty.bitSize(zcu));
|
|
const tmp_ty: Type = if (lhs_bits > 64) .usize else lhs_ty;
|
|
const off = frame_addr.off + (lhs_bits - 1) / 64 * 8;
|
|
try self.genSetReg(
|
|
tmp_reg,
|
|
tmp_ty,
|
|
.{ .load_frame = .{ .index = frame_addr.index, .off = off } },
|
|
.{},
|
|
);
|
|
try self.truncateRegister(lhs_ty, tmp_reg);
|
|
try self.genSetMem(
|
|
.{ .frame = frame_addr.index },
|
|
off,
|
|
tmp_ty,
|
|
.{ .register = tmp_reg },
|
|
.{},
|
|
);
|
|
},
|
|
else => {},
|
|
},
|
|
else => unreachable,
|
|
}
|
|
break :result dst_mcv;
|
|
},
|
|
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.int => if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
|
|
else => null,
|
|
16 => switch (lhs_ty.vectorLen(zcu)) {
|
|
else => null,
|
|
1...8 => switch (tag) {
|
|
else => unreachable,
|
|
.shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .sra }
|
|
else
|
|
.{ .p_w, .sra },
|
|
.unsigned => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .srl }
|
|
else
|
|
.{ .p_w, .srl },
|
|
},
|
|
.shl, .shl_exact => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .sll }
|
|
else
|
|
.{ .p_w, .sll },
|
|
},
|
|
9...16 => switch (tag) {
|
|
else => unreachable,
|
|
.shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .sra } else null,
|
|
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .srl } else null,
|
|
},
|
|
.shl, .shl_exact => if (self.hasFeature(.avx2)) .{ .vp_w, .sll } else null,
|
|
},
|
|
},
|
|
32 => switch (lhs_ty.vectorLen(zcu)) {
|
|
else => null,
|
|
1...4 => switch (tag) {
|
|
else => unreachable,
|
|
.shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_d, .sra }
|
|
else
|
|
.{ .p_d, .sra },
|
|
.unsigned => if (self.hasFeature(.avx))
|
|
.{ .vp_d, .srl }
|
|
else
|
|
.{ .p_d, .srl },
|
|
},
|
|
.shl, .shl_exact => if (self.hasFeature(.avx))
|
|
.{ .vp_d, .sll }
|
|
else
|
|
.{ .p_d, .sll },
|
|
},
|
|
5...8 => switch (tag) {
|
|
else => unreachable,
|
|
.shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .sra } else null,
|
|
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .srl } else null,
|
|
},
|
|
.shl, .shl_exact => if (self.hasFeature(.avx2)) .{ .vp_d, .sll } else null,
|
|
},
|
|
},
|
|
64 => switch (lhs_ty.vectorLen(zcu)) {
|
|
else => null,
|
|
1...2 => switch (tag) {
|
|
else => unreachable,
|
|
.shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_q, .sra }
|
|
else
|
|
.{ .p_q, .sra },
|
|
.unsigned => if (self.hasFeature(.avx))
|
|
.{ .vp_q, .srl }
|
|
else
|
|
.{ .p_q, .srl },
|
|
},
|
|
.shl, .shl_exact => if (self.hasFeature(.avx))
|
|
.{ .vp_q, .sll }
|
|
else
|
|
.{ .p_q, .sll },
|
|
},
|
|
3...4 => switch (tag) {
|
|
else => unreachable,
|
|
.shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx2)) .{ .vp_q, .sra } else null,
|
|
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_q, .srl } else null,
|
|
},
|
|
.shl, .shl_exact => if (self.hasFeature(.avx2)) .{ .vp_q, .sll } else null,
|
|
},
|
|
},
|
|
})) |mir_tag| if (try self.air.value(bin_op.rhs, pt)) |rhs_val| {
|
|
switch (zcu.intern_pool.indexToKey(rhs_val.toIntern())) {
|
|
.aggregate => |rhs_aggregate| switch (rhs_aggregate.storage) {
|
|
.repeated_elem => |rhs_elem| {
|
|
const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and
|
|
self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
|
|
.{lhs_mcv.getReg().?} ** 2
|
|
else if (lhs_mcv.isRegister() and self.hasFeature(.avx)) .{
|
|
try self.register_manager.allocReg(inst, abi.RegisterClass.sse),
|
|
lhs_mcv.getReg().?,
|
|
} else .{(try self.copyToRegisterWithInstTracking(
|
|
inst,
|
|
lhs_ty,
|
|
lhs_mcv,
|
|
)).register} ** 2;
|
|
const reg_locks =
|
|
self.register_manager.lockRegs(2, .{ dst_reg, lhs_reg });
|
|
defer for (reg_locks) |reg_lock| if (reg_lock) |lock|
|
|
self.register_manager.unlockReg(lock);
|
|
|
|
const shift_imm: Immediate =
|
|
.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(zcu)));
|
|
if (self.hasFeature(.avx)) try self.asmRegisterRegisterImmediate(
|
|
mir_tag,
|
|
registerAlias(dst_reg, abi_size),
|
|
registerAlias(lhs_reg, abi_size),
|
|
shift_imm,
|
|
) else {
|
|
assert(dst_reg.id() == lhs_reg.id());
|
|
try self.asmRegisterImmediate(
|
|
mir_tag,
|
|
registerAlias(dst_reg, abi_size),
|
|
shift_imm,
|
|
);
|
|
}
|
|
break :result .{ .register = dst_reg };
|
|
},
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
} else if (bin_op.rhs.toIndex()) |rhs_inst| switch (air_tags[@intFromEnum(rhs_inst)]) {
|
|
.splat => {
|
|
const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and
|
|
self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
|
|
.{lhs_mcv.getReg().?} ** 2
|
|
else if (lhs_mcv.isRegister() and self.hasFeature(.avx)) .{
|
|
try self.register_manager.allocReg(inst, abi.RegisterClass.sse),
|
|
lhs_mcv.getReg().?,
|
|
} else .{(try self.copyToRegisterWithInstTracking(
|
|
inst,
|
|
lhs_ty,
|
|
lhs_mcv,
|
|
)).register} ** 2;
|
|
const reg_locks = self.register_manager.lockRegs(2, .{ dst_reg, lhs_reg });
|
|
defer for (reg_locks) |reg_lock| if (reg_lock) |lock|
|
|
self.register_manager.unlockReg(lock);
|
|
|
|
const shift_reg =
|
|
try self.copyToTmpRegister(rhs_ty, .{ .air_ref = bin_op.rhs });
|
|
const shift_lock = self.register_manager.lockRegAssumeUnused(shift_reg);
|
|
defer self.register_manager.unlockReg(shift_lock);
|
|
|
|
const mask_ty = try pt.vectorType(.{ .len = 16, .child = .u8_type });
|
|
const mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
.ty = mask_ty.toIntern(),
|
|
.storage = .{ .elems = &([1]InternPool.Index{
|
|
(try rhs_ty.childType(zcu).maxIntScalar(pt, .u8)).toIntern(),
|
|
} ++ [1]InternPool.Index{
|
|
(try pt.intValue(.u8, 0)).toIntern(),
|
|
} ** 15) },
|
|
} })));
|
|
const mask_addr_reg = try self.copyToTmpRegister(.usize, mask_mcv.address());
|
|
const mask_addr_lock = self.register_manager.lockRegAssumeUnused(mask_addr_reg);
|
|
defer self.register_manager.unlockReg(mask_addr_lock);
|
|
|
|
if (self.hasFeature(.avx)) {
|
|
try self.asmRegisterRegisterMemory(
|
|
.{ .vp_, .@"and" },
|
|
shift_reg.to128(),
|
|
shift_reg.to128(),
|
|
.{
|
|
.base = .{ .reg = mask_addr_reg },
|
|
.mod = .{ .rm = .{ .size = .xword } },
|
|
},
|
|
);
|
|
try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
registerAlias(dst_reg, abi_size),
|
|
registerAlias(lhs_reg, abi_size),
|
|
shift_reg.to128(),
|
|
);
|
|
} else {
|
|
try self.asmRegisterMemory(
|
|
.{ .p_, .@"and" },
|
|
shift_reg.to128(),
|
|
.{
|
|
.base = .{ .reg = mask_addr_reg },
|
|
.mod = .{ .rm = .{ .size = .xword } },
|
|
},
|
|
);
|
|
assert(dst_reg.id() == lhs_reg.id());
|
|
try self.asmRegisterRegister(
|
|
mir_tag,
|
|
registerAlias(dst_reg, abi_size),
|
|
shift_reg.to128(),
|
|
);
|
|
}
|
|
break :result .{ .register = dst_reg };
|
|
},
|
|
else => {},
|
|
},
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
return self.fail("TODO implement airShlShrBinOp for {}", .{lhs_ty.fmt(pt)});
|
|
};
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airShlSat(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
_ = bin_op;
|
|
return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
|
|
//return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airOptionalPayload(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const zcu = self.pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result: MCValue = result: {
|
|
const pl_ty = self.typeOfIndex(inst);
|
|
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
|
|
|
|
const opt_mcv = try self.resolveInst(ty_op.operand);
|
|
if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) {
|
|
const pl_mcv: MCValue = switch (opt_mcv) {
|
|
.register_overflow => |ro| pl: {
|
|
self.eflags_inst = null; // actually stop tracking the overflow part
|
|
break :pl .{ .register = ro.reg };
|
|
},
|
|
else => opt_mcv,
|
|
};
|
|
switch (pl_mcv) {
|
|
.register => |pl_reg| try self.truncateRegister(pl_ty, pl_reg),
|
|
else => {},
|
|
}
|
|
break :result pl_mcv;
|
|
}
|
|
|
|
const pl_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(pl_ty, pl_mcv, switch (opt_mcv) {
|
|
else => opt_mcv,
|
|
.register_overflow => |ro| .{ .register = ro.reg },
|
|
}, .{});
|
|
break :result pl_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airOptionalPayloadPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const opt_mcv = try self.resolveInst(ty_op.operand);
|
|
|
|
const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
|
|
opt_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
|
|
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airOptionalPayloadPtrSet(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result = result: {
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const opt_ty = src_ty.childType(zcu);
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
|
|
if (opt_ty.optionalReprIsPayload(zcu)) {
|
|
break :result if (self.liveness.isUnused(inst))
|
|
.unreach
|
|
else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
|
|
}
|
|
|
|
const dst_mcv: MCValue = if (src_mcv.isRegister() and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else if (self.liveness.isUnused(inst))
|
|
.{ .register = try self.copyToTmpRegister(dst_ty, src_mcv) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
|
|
|
|
const pl_ty = dst_ty.childType(zcu);
|
|
const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu));
|
|
try self.genSetMem(
|
|
.{ .reg = dst_mcv.getReg().? },
|
|
pl_abi_size,
|
|
.bool,
|
|
.{ .immediate = 1 },
|
|
.{},
|
|
);
|
|
break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airUnwrapErrUnionErr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const err_union_ty = self.typeOf(ty_op.operand);
|
|
const err_ty = err_union_ty.errorUnionSet(zcu);
|
|
const payload_ty = err_union_ty.errorUnionPayload(zcu);
|
|
const operand = try self.resolveInst(ty_op.operand);
|
|
|
|
const result: MCValue = result: {
|
|
if (err_ty.errorSetIsEmpty(zcu)) {
|
|
break :result MCValue{ .immediate = 0 };
|
|
}
|
|
|
|
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
|
break :result operand;
|
|
}
|
|
|
|
const err_off = codegen.errUnionErrorOffset(payload_ty, zcu);
|
|
switch (operand) {
|
|
.register => |reg| {
|
|
// TODO reuse operand
|
|
const eu_lock = self.register_manager.lockReg(reg);
|
|
defer if (eu_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand);
|
|
if (err_off > 0) try self.genShiftBinOpMir(
|
|
.{ ._r, .sh },
|
|
err_union_ty,
|
|
result,
|
|
.u8,
|
|
.{ .immediate = @as(u6, @intCast(err_off * 8)) },
|
|
) else try self.truncateRegister(.anyerror, result.register);
|
|
break :result result;
|
|
},
|
|
.load_frame => |frame_addr| break :result .{ .load_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + @as(i32, @intCast(err_off)),
|
|
} },
|
|
else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}),
|
|
}
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airUnwrapErrUnionPayload(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const operand_ty = self.typeOf(ty_op.operand);
|
|
const operand = try self.resolveInst(ty_op.operand);
|
|
const result = try self.genUnwrapErrUnionPayloadMir(inst, operand_ty, operand);
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
// *(E!T) -> E
|
|
fn airUnwrapErrUnionErrPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const src_reg = switch (src_mcv) {
|
|
.register => |reg| reg,
|
|
else => try self.copyToTmpRegister(src_ty, src_mcv),
|
|
};
|
|
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
|
|
defer self.register_manager.unlockReg(src_lock);
|
|
|
|
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
const eu_ty = src_ty.childType(zcu);
|
|
const pl_ty = eu_ty.errorUnionPayload(zcu);
|
|
const err_ty = eu_ty.errorUnionSet(zcu);
|
|
const err_off: i32 = @intCast(codegen.errUnionErrorOffset(pl_ty, zcu));
|
|
const err_abi_size: u32 = @intCast(err_ty.abiSize(zcu));
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
registerAlias(dst_reg, err_abi_size),
|
|
.{
|
|
.base = .{ .reg = src_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(err_abi_size),
|
|
.disp = err_off,
|
|
} },
|
|
},
|
|
);
|
|
|
|
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
// *(E!T) -> *T
|
|
fn airUnwrapErrUnionPayloadPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const operand_ty = self.typeOf(ty_op.operand);
|
|
const operand = try self.resolveInst(ty_op.operand);
|
|
const result = try self.genUnwrapErrUnionPayloadPtrMir(inst, operand_ty, operand);
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airErrUnionPayloadPtrSet(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result: MCValue = result: {
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const src_reg = switch (src_mcv) {
|
|
.register => |reg| reg,
|
|
else => try self.copyToTmpRegister(src_ty, src_mcv),
|
|
};
|
|
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
|
|
defer self.register_manager.unlockReg(src_lock);
|
|
|
|
const eu_ty = src_ty.childType(zcu);
|
|
const pl_ty = eu_ty.errorUnionPayload(zcu);
|
|
const err_ty = eu_ty.errorUnionSet(zcu);
|
|
const err_off: i32 = @intCast(codegen.errUnionErrorOffset(pl_ty, zcu));
|
|
const err_abi_size: u32 = @intCast(err_ty.abiSize(zcu));
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .mov },
|
|
.{
|
|
.base = .{ .reg = src_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(err_abi_size),
|
|
.disp = err_off,
|
|
} },
|
|
},
|
|
.u(0),
|
|
);
|
|
|
|
if (self.liveness.isUnused(inst)) break :result .unreach;
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_reg
|
|
else
|
|
try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const pl_off: i32 = @intCast(codegen.errUnionPayloadOffset(pl_ty, zcu));
|
|
const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .lea },
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
.{
|
|
.base = .{ .reg = src_reg },
|
|
.mod = .{ .rm = .{ .size = .qword, .disp = pl_off } },
|
|
},
|
|
);
|
|
break :result .{ .register = dst_reg };
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn genUnwrapErrUnionPayloadMir(
|
|
self: *CodeGen,
|
|
maybe_inst: ?Air.Inst.Index,
|
|
err_union_ty: Type,
|
|
err_union: MCValue,
|
|
) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const payload_ty = err_union_ty.errorUnionPayload(zcu);
|
|
|
|
const result: MCValue = result: {
|
|
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
|
|
|
|
const payload_off: u31 = @intCast(codegen.errUnionPayloadOffset(payload_ty, zcu));
|
|
switch (err_union) {
|
|
.load_frame => |frame_addr| break :result .{ .load_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + payload_off,
|
|
} },
|
|
.register => |reg| {
|
|
// TODO reuse operand
|
|
const eu_lock = self.register_manager.lockReg(reg);
|
|
defer if (eu_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const payload_in_gp = self.regSetForType(payload_ty).supersetOf(abi.RegisterClass.gp);
|
|
const result_mcv: MCValue = if (payload_in_gp and maybe_inst != null)
|
|
try self.copyToRegisterWithInstTracking(maybe_inst.?, err_union_ty, err_union)
|
|
else
|
|
.{ .register = try self.copyToTmpRegister(err_union_ty, err_union) };
|
|
if (payload_off > 0) try self.genShiftBinOpMir(
|
|
.{ ._r, .sh },
|
|
err_union_ty,
|
|
result_mcv,
|
|
.u8,
|
|
.{ .immediate = @as(u6, @intCast(payload_off * 8)) },
|
|
) else try self.truncateRegister(payload_ty, result_mcv.register);
|
|
break :result if (payload_in_gp)
|
|
result_mcv
|
|
else if (maybe_inst) |inst|
|
|
try self.copyToRegisterWithInstTracking(inst, payload_ty, result_mcv)
|
|
else
|
|
.{ .register = try self.copyToTmpRegister(payload_ty, result_mcv) };
|
|
},
|
|
else => return self.fail("TODO implement genUnwrapErrUnionPayloadMir for {}", .{err_union}),
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
fn genUnwrapErrUnionPayloadPtrMir(
|
|
self: *CodeGen,
|
|
maybe_inst: ?Air.Inst.Index,
|
|
ptr_ty: Type,
|
|
ptr_mcv: MCValue,
|
|
) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const err_union_ty = ptr_ty.childType(zcu);
|
|
const payload_ty = err_union_ty.errorUnionPayload(zcu);
|
|
|
|
const result: MCValue = result: {
|
|
const payload_off = codegen.errUnionPayloadOffset(payload_ty, zcu);
|
|
const result_mcv: MCValue = if (maybe_inst) |inst|
|
|
try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr_mcv)
|
|
else
|
|
.{ .register = try self.copyToTmpRegister(ptr_ty, ptr_mcv) };
|
|
try self.genBinOpMir(.{ ._, .add }, ptr_ty, result_mcv, .{ .immediate = payload_off });
|
|
break :result result_mcv;
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
fn airWrapOptional(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result: MCValue = result: {
|
|
const pl_ty = self.typeOf(ty_op.operand);
|
|
if (!pl_ty.hasRuntimeBits(zcu)) break :result .{ .immediate = 1 };
|
|
|
|
const opt_ty = self.typeOfIndex(inst);
|
|
const pl_mcv = try self.resolveInst(ty_op.operand);
|
|
const same_repr = opt_ty.optionalReprIsPayload(zcu);
|
|
if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv;
|
|
|
|
const pl_lock: ?RegisterLock = switch (pl_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (pl_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const opt_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(pl_ty, opt_mcv, pl_mcv, .{});
|
|
|
|
if (!same_repr) {
|
|
const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu));
|
|
switch (opt_mcv) {
|
|
else => unreachable,
|
|
|
|
.register => |opt_reg| {
|
|
try self.truncateRegister(pl_ty, opt_reg);
|
|
try self.asmRegisterImmediate(
|
|
.{ ._s, .bt },
|
|
opt_reg,
|
|
.u(@as(u6, @intCast(pl_abi_size * 8))),
|
|
);
|
|
},
|
|
|
|
.load_frame => |frame_addr| try self.asmMemoryImmediate(
|
|
.{ ._, .mov },
|
|
.{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .byte,
|
|
.disp = frame_addr.off + pl_abi_size,
|
|
} },
|
|
},
|
|
.u(1),
|
|
),
|
|
}
|
|
}
|
|
break :result opt_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
/// T to E!T
|
|
fn airWrapErrUnionPayload(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const eu_ty = ty_op.ty.toType();
|
|
const pl_ty = eu_ty.errorUnionPayload(zcu);
|
|
const err_ty = eu_ty.errorUnionSet(zcu);
|
|
const operand = try self.resolveInst(ty_op.operand);
|
|
|
|
const result: MCValue = result: {
|
|
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .{ .immediate = 0 };
|
|
|
|
const frame_index = try self.allocFrameIndex(.initSpill(eu_ty, zcu));
|
|
const pl_off: i32 = @intCast(codegen.errUnionPayloadOffset(pl_ty, zcu));
|
|
const err_off: i32 = @intCast(codegen.errUnionErrorOffset(pl_ty, zcu));
|
|
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand, .{});
|
|
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }, .{});
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
/// E to E!T
|
|
fn airWrapErrUnionErr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const eu_ty = ty_op.ty.toType();
|
|
const pl_ty = eu_ty.errorUnionPayload(zcu);
|
|
const err_ty = eu_ty.errorUnionSet(zcu);
|
|
|
|
const result: MCValue = result: {
|
|
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try self.resolveInst(ty_op.operand);
|
|
|
|
const frame_index = try self.allocFrameIndex(.initSpill(eu_ty, zcu));
|
|
const pl_off: i32 = @intCast(codegen.errUnionPayloadOffset(pl_ty, zcu));
|
|
const err_off: i32 = @intCast(codegen.errUnionErrorOffset(pl_ty, zcu));
|
|
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef, .{});
|
|
const operand = try self.resolveInst(ty_op.operand);
|
|
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand, .{});
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airSlicePtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result = result: {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const ptr_mcv: MCValue = switch (src_mcv) {
|
|
.register_pair => |regs| .{ .register = regs[0] },
|
|
else => src_mcv,
|
|
};
|
|
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) {
|
|
switch (src_mcv) {
|
|
.register_pair => |regs| try self.freeValue(.{ .register = regs[1] }),
|
|
else => {},
|
|
}
|
|
break :result ptr_mcv;
|
|
}
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(self.typeOfIndex(inst), dst_mcv, ptr_mcv, .{});
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airSliceLen(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result = result: {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const len_mcv: MCValue = switch (src_mcv) {
|
|
.register_pair => |regs| .{ .register = regs[1] },
|
|
.load_frame => |frame_addr| .{ .load_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + 8,
|
|
} },
|
|
else => return self.fail("TODO implement slice_len for {}", .{src_mcv}),
|
|
};
|
|
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) {
|
|
switch (src_mcv) {
|
|
.register_pair => |regs| try self.freeValue(.{ .register = regs[0] }),
|
|
.load_frame => {},
|
|
else => unreachable,
|
|
}
|
|
break :result len_mcv;
|
|
}
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(self.typeOfIndex(inst), dst_mcv, len_mcv, .{});
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airPtrSliceLenPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const src_reg = switch (src_mcv) {
|
|
.register => |reg| reg,
|
|
else => try self.copyToTmpRegister(src_ty, src_mcv),
|
|
};
|
|
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
|
|
defer self.register_manager.unlockReg(src_lock);
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_reg
|
|
else
|
|
try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt.zcu));
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .lea },
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
.{
|
|
.base = .{ .reg = src_reg },
|
|
.mod = .{ .rm = .{ .size = .qword, .disp = 8 } },
|
|
},
|
|
);
|
|
|
|
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airPtrSlicePtrPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const opt_mcv = try self.resolveInst(ty_op.operand);
|
|
|
|
const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
|
|
opt_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
|
|
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn elemOffset(self: *CodeGen, index_ty: Type, index: MCValue, elem_size: u64) !Register {
|
|
const reg: Register = blk: {
|
|
switch (index) {
|
|
.immediate => |imm| {
|
|
// Optimisation: if index MCValue is an immediate, we can multiply in `comptime`
|
|
// and set the register directly to the scaled offset as an immediate.
|
|
const reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
try self.genSetReg(reg, index_ty, .{ .immediate = imm * elem_size }, .{});
|
|
break :blk reg;
|
|
},
|
|
else => {
|
|
const reg = try self.copyToTmpRegister(index_ty, index);
|
|
try self.genIntMulComplexOpMir(index_ty, .{ .register = reg }, .{ .immediate = elem_size });
|
|
break :blk reg;
|
|
},
|
|
}
|
|
};
|
|
return reg;
|
|
}
|
|
|
|
fn genSliceElemPtr(self: *CodeGen, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const slice_ty = self.typeOf(lhs);
|
|
const slice_mcv = try self.resolveInst(lhs);
|
|
const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const elem_ty = slice_ty.childType(zcu);
|
|
const elem_size = elem_ty.abiSize(zcu);
|
|
const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
|
|
|
|
const index_ty = self.typeOf(rhs);
|
|
const index_mcv = try self.resolveInst(rhs);
|
|
const index_mcv_lock: ?RegisterLock = switch (index_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (index_mcv_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_size);
|
|
const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
|
|
defer self.register_manager.unlockReg(offset_reg_lock);
|
|
|
|
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
try self.genSetReg(addr_reg, .usize, slice_mcv, .{});
|
|
// TODO we could allocate register here, but need to expect addr register and potentially
|
|
// offset register.
|
|
try self.genBinOpMir(.{ ._, .add }, slice_ptr_field_type, .{ .register = addr_reg }, .{
|
|
.register = offset_reg,
|
|
});
|
|
return MCValue{ .register = addr_reg.to64() };
|
|
}
|
|
|
|
fn airSliceElemVal(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
|
|
const result: MCValue = result: {
|
|
const elem_ty = self.typeOfIndex(inst);
|
|
if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
|
|
|
|
const slice_ty = self.typeOf(bin_op.lhs);
|
|
const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
|
|
const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs);
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.load(dst_mcv, slice_ptr_field_type, elem_ptr);
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airSliceElemPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
const dst_mcv = try self.genSliceElemPtr(extra.lhs, extra.rhs);
|
|
return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none });
|
|
}
|
|
|
|
fn airArrayElemVal(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
|
|
const result: MCValue = result: {
|
|
const array_ty = self.typeOf(bin_op.lhs);
|
|
const elem_ty = array_ty.childType(zcu);
|
|
|
|
const array_mcv = try self.resolveInst(bin_op.lhs);
|
|
const array_lock: ?RegisterLock = switch (array_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (array_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const index_ty = self.typeOf(bin_op.rhs);
|
|
const index_mcv = try self.resolveInst(bin_op.rhs);
|
|
const index_lock = switch (index_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
if (array_ty.isVector(zcu) and elem_ty.bitSize(zcu) == 1) {
|
|
const array_mat_mcv: MCValue = switch (array_mcv) {
|
|
else => array_mcv,
|
|
.register_mask => .{ .register = try self.copyToTmpRegister(array_ty, array_mcv) },
|
|
};
|
|
const array_mat_lock = switch (array_mat_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (array_mat_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
switch (array_mat_mcv) {
|
|
.register => |array_reg| switch (array_reg.class()) {
|
|
.general_purpose => switch (index_mcv) {
|
|
.immediate => |index_imm| try self.asmRegisterImmediate(
|
|
.{ ._, .bt },
|
|
array_reg.to64(),
|
|
.u(index_imm),
|
|
),
|
|
else => try self.asmRegisterRegister(
|
|
.{ ._, .bt },
|
|
array_reg.to64(),
|
|
switch (index_mcv) {
|
|
.register => |index_reg| index_reg,
|
|
else => try self.copyToTmpRegister(index_ty, index_mcv),
|
|
}.to64(),
|
|
),
|
|
},
|
|
.sse => {
|
|
const frame_index = try self.allocFrameIndex(.initType(array_ty, zcu));
|
|
try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mat_mcv, .{});
|
|
switch (index_mcv) {
|
|
.immediate => |index_imm| try self.asmMemoryImmediate(
|
|
.{ ._, .bt },
|
|
.{
|
|
.base = .{ .frame = frame_index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = @intCast(index_imm / 64 * 8),
|
|
} },
|
|
},
|
|
.u(index_imm % 64),
|
|
),
|
|
else => try self.asmMemoryRegister(
|
|
.{ ._, .bt },
|
|
.{
|
|
.base = .{ .frame = frame_index },
|
|
.mod = .{ .rm = .{ .size = .qword } },
|
|
},
|
|
switch (index_mcv) {
|
|
.register => |index_reg| index_reg,
|
|
else => try self.copyToTmpRegister(index_ty, index_mcv),
|
|
}.to64(),
|
|
),
|
|
}
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.load_frame => switch (index_mcv) {
|
|
.immediate => |index_imm| try self.asmMemoryImmediate(
|
|
.{ ._, .bt },
|
|
try array_mat_mcv.mem(self, .{
|
|
.size = .qword,
|
|
.disp = @intCast(index_imm / 64 * 8),
|
|
}),
|
|
.u(index_imm % 64),
|
|
),
|
|
else => try self.asmMemoryRegister(
|
|
.{ ._, .bt },
|
|
try array_mat_mcv.mem(self, .{ .size = .qword }),
|
|
switch (index_mcv) {
|
|
.register => |index_reg| index_reg,
|
|
else => try self.copyToTmpRegister(index_ty, index_mcv),
|
|
}.to64(),
|
|
),
|
|
},
|
|
.memory, .load_symbol, .load_direct, .load_got, .load_tlv => switch (index_mcv) {
|
|
.immediate => |index_imm| try self.asmMemoryImmediate(
|
|
.{ ._, .bt },
|
|
.{
|
|
.base = .{
|
|
.reg = try self.copyToTmpRegister(.usize, array_mat_mcv.address()),
|
|
},
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = @intCast(index_imm / 64 * 8),
|
|
} },
|
|
},
|
|
.u(index_imm % 64),
|
|
),
|
|
else => try self.asmMemoryRegister(
|
|
.{ ._, .bt },
|
|
.{
|
|
.base = .{
|
|
.reg = try self.copyToTmpRegister(.usize, array_mat_mcv.address()),
|
|
},
|
|
.mod = .{ .rm = .{ .size = .qword } },
|
|
},
|
|
switch (index_mcv) {
|
|
.register => |index_reg| index_reg,
|
|
else => try self.copyToTmpRegister(index_ty, index_mcv),
|
|
}.to64(),
|
|
),
|
|
},
|
|
else => return self.fail("TODO airArrayElemVal for {s} of {}", .{
|
|
@tagName(array_mat_mcv), array_ty.fmt(pt),
|
|
}),
|
|
}
|
|
|
|
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
|
|
try self.asmSetccRegister(.c, dst_reg.to8());
|
|
break :result .{ .register = dst_reg };
|
|
}
|
|
|
|
const elem_abi_size = elem_ty.abiSize(zcu);
|
|
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
|
defer self.register_manager.unlockReg(addr_lock);
|
|
|
|
switch (array_mcv) {
|
|
.register => {
|
|
const frame_index = try self.allocFrameIndex(.initType(array_ty, zcu));
|
|
try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv, .{});
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .lea },
|
|
addr_reg,
|
|
.{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } },
|
|
);
|
|
},
|
|
.load_frame => |frame_addr| try self.asmRegisterMemory(
|
|
.{ ._, .lea },
|
|
addr_reg,
|
|
.{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{ .size = .qword, .disp = frame_addr.off } },
|
|
},
|
|
),
|
|
.memory,
|
|
.load_symbol,
|
|
.load_direct,
|
|
.load_got,
|
|
.load_tlv,
|
|
=> try self.genSetReg(addr_reg, .usize, array_mcv.address(), .{}),
|
|
.lea_symbol, .lea_direct, .lea_tlv => unreachable,
|
|
else => return self.fail("TODO airArrayElemVal_val for {s} of {}", .{
|
|
@tagName(array_mcv), array_ty.fmt(pt),
|
|
}),
|
|
}
|
|
|
|
const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size);
|
|
const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
|
|
defer self.register_manager.unlockReg(offset_lock);
|
|
|
|
// TODO we could allocate register here, but need to expect addr register and potentially
|
|
// offset register.
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.genBinOpMir(.{ ._, .add }, .usize, .{ .register = addr_reg }, .{ .register = offset_reg });
|
|
try self.genCopy(elem_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }, .{});
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airPtrElemVal(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
const ptr_ty = self.typeOf(bin_op.lhs);
|
|
|
|
// this is identical to the `airPtrElemPtr` codegen expect here an
|
|
// additional `mov` is needed at the end to get the actual value
|
|
|
|
const result = result: {
|
|
const elem_ty = ptr_ty.elemType2(zcu);
|
|
if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
|
|
|
|
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
|
|
const index_ty = self.typeOf(bin_op.rhs);
|
|
const index_mcv = try self.resolveInst(bin_op.rhs);
|
|
const index_lock = switch (index_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size);
|
|
const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
|
|
defer self.register_manager.unlockReg(offset_lock);
|
|
|
|
const ptr_mcv = try self.resolveInst(bin_op.lhs);
|
|
const elem_ptr_reg = if (ptr_mcv.isRegister() and self.liveness.operandDies(inst, 0))
|
|
ptr_mcv.register
|
|
else
|
|
try self.copyToTmpRegister(ptr_ty, ptr_mcv);
|
|
const elem_ptr_lock = self.register_manager.lockRegAssumeUnused(elem_ptr_reg);
|
|
defer self.register_manager.unlockReg(elem_ptr_lock);
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .add },
|
|
elem_ptr_reg,
|
|
offset_reg,
|
|
);
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
const dst_lock = switch (dst_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
try self.load(dst_mcv, ptr_ty, .{ .register = elem_ptr_reg });
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airPtrElemPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
|
|
|
const result = result: {
|
|
const elem_ptr_ty = self.typeOfIndex(inst);
|
|
const base_ptr_ty = self.typeOf(extra.lhs);
|
|
|
|
const base_ptr_mcv = try self.resolveInst(extra.lhs);
|
|
const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (base_ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (elem_ptr_ty.ptrInfo(zcu).flags.vector_index != .none) {
|
|
break :result if (self.reuseOperand(inst, extra.lhs, 0, base_ptr_mcv))
|
|
base_ptr_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, elem_ptr_ty, base_ptr_mcv);
|
|
}
|
|
|
|
const elem_ty = base_ptr_ty.elemType2(zcu);
|
|
const elem_abi_size = elem_ty.abiSize(zcu);
|
|
const index_ty = self.typeOf(extra.rhs);
|
|
const index_mcv = try self.resolveInst(extra.rhs);
|
|
const index_lock: ?RegisterLock = switch (index_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size);
|
|
const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
|
|
defer self.register_manager.unlockReg(offset_reg_lock);
|
|
|
|
const dst_mcv = try self.copyToRegisterWithInstTracking(inst, elem_ptr_ty, base_ptr_mcv);
|
|
try self.genBinOpMir(.{ ._, .add }, elem_ptr_ty, dst_mcv, .{ .register = offset_reg });
|
|
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
|
|
}
|
|
|
|
fn airSetUnionTag(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
const ptr_union_ty = self.typeOf(bin_op.lhs);
|
|
const union_ty = ptr_union_ty.childType(zcu);
|
|
const tag_ty = self.typeOf(bin_op.rhs);
|
|
const layout = union_ty.unionGetLayout(zcu);
|
|
|
|
if (layout.tag_size == 0) {
|
|
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
const ptr = try self.resolveInst(bin_op.lhs);
|
|
const ptr_lock: ?RegisterLock = switch (ptr) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const tag = try self.resolveInst(bin_op.rhs);
|
|
const tag_lock: ?RegisterLock = switch (tag) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (tag_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) blk: {
|
|
// TODO reusing the operand
|
|
const reg = try self.copyToTmpRegister(ptr_union_ty, ptr);
|
|
try self.genBinOpMir(
|
|
.{ ._, .add },
|
|
ptr_union_ty,
|
|
.{ .register = reg },
|
|
.{ .immediate = layout.payload_size },
|
|
);
|
|
break :blk MCValue{ .register = reg };
|
|
} else ptr;
|
|
|
|
const ptr_tag_ty = try pt.adjustPtrTypeChild(ptr_union_ty, tag_ty);
|
|
try self.store(ptr_tag_ty, adjusted_ptr, tag, .{});
|
|
|
|
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airGetUnionTag(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const zcu = self.pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const tag_ty = self.typeOfIndex(inst);
|
|
const union_ty = self.typeOf(ty_op.operand);
|
|
const layout = union_ty.unionGetLayout(zcu);
|
|
|
|
if (layout.tag_size == 0) {
|
|
return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
// TODO reusing the operand
|
|
const operand = try self.resolveInst(ty_op.operand);
|
|
const operand_lock: ?RegisterLock = switch (operand) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const tag_abi_size = tag_ty.abiSize(zcu);
|
|
const dst_mcv: MCValue = blk: {
|
|
switch (operand) {
|
|
.load_frame => |frame_addr| {
|
|
if (tag_abi_size <= 8) {
|
|
const off: i32 = @intCast(layout.tagOffset());
|
|
break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{
|
|
.load_frame = .{ .index = frame_addr.index, .off = frame_addr.off + off },
|
|
});
|
|
}
|
|
|
|
return self.fail(
|
|
"TODO implement get_union_tag for ABI larger than 8 bytes and operand {}",
|
|
.{operand},
|
|
);
|
|
},
|
|
.register => {
|
|
const shift: u6 = @intCast(layout.tagOffset() * 8);
|
|
const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand);
|
|
try self.genShiftBinOpMir(.{ ._r, .sh }, .usize, result, .u8, .{ .immediate = shift });
|
|
break :blk MCValue{
|
|
.register = registerAlias(result.register, @intCast(layout.tag_size)),
|
|
};
|
|
},
|
|
else => return self.fail("TODO implement get_union_tag for {}", .{operand}),
|
|
}
|
|
};
|
|
|
|
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airClz(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result = result: {
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
if (src_ty.zigTypeTag(zcu) == .vector) return self.fail("TODO implement airClz for {}", .{
|
|
src_ty.fmt(pt),
|
|
});
|
|
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const mat_src_mcv = switch (src_mcv) {
|
|
.immediate => MCValue{ .register = try self.copyToTmpRegister(src_ty, src_mcv) },
|
|
else => src_mcv,
|
|
};
|
|
const mat_src_lock = switch (mat_src_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
const abi_size: u31 = @intCast(src_ty.abiSize(zcu));
|
|
const src_bits: u31 = @intCast(src_ty.bitSize(zcu));
|
|
const has_lzcnt = self.hasFeature(.lzcnt);
|
|
if (src_bits > @as(u32, if (has_lzcnt) 128 else 64)) {
|
|
const src_frame_addr: bits.FrameAddr = src_frame_addr: switch (src_mcv) {
|
|
.load_frame => |src_frame_addr| src_frame_addr,
|
|
else => {
|
|
const src_frame_addr = try self.allocFrameIndex(.initSpill(src_ty, zcu));
|
|
try self.genSetMem(.{ .frame = src_frame_addr }, 0, src_ty, src_mcv, .{});
|
|
break :src_frame_addr .{ .index = src_frame_addr };
|
|
},
|
|
};
|
|
|
|
const limbs_len = std.math.divCeil(u32, abi_size, 8) catch unreachable;
|
|
const extra_bits = abi_size * 8 - src_bits;
|
|
|
|
const index_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const index_lock = self.register_manager.lockRegAssumeUnused(index_reg);
|
|
defer self.register_manager.unlockReg(index_lock);
|
|
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, index_reg.to32(), .u(limbs_len));
|
|
switch (extra_bits) {
|
|
1 => try self.asmRegisterRegister(.{ ._, .xor }, dst_reg.to32(), dst_reg.to32()),
|
|
else => try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
dst_reg.to32(),
|
|
.s(@as(i32, extra_bits) - 1),
|
|
),
|
|
}
|
|
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, index_reg.to32(), index_reg.to32());
|
|
const zero = try self.asmJccReloc(.z, undefined);
|
|
if (self.hasFeature(.slow_incdec)) {
|
|
try self.asmRegisterImmediate(.{ ._, .sub }, index_reg.to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .de }, index_reg.to32());
|
|
}
|
|
try self.asmMemoryImmediate(.{ ._, .cmp }, .{
|
|
.base = .{ .frame = src_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = index_reg.to64(),
|
|
.scale = .@"8",
|
|
.disp = src_frame_addr.off,
|
|
} },
|
|
}, .u(0));
|
|
_ = try self.asmJccReloc(.e, loop);
|
|
try self.asmRegisterMemory(.{ ._r, .bs }, dst_reg.to64(), .{
|
|
.base = .{ .frame = src_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = index_reg.to64(),
|
|
.scale = .@"8",
|
|
.disp = src_frame_addr.off,
|
|
} },
|
|
});
|
|
self.performReloc(zero);
|
|
try self.asmRegisterImmediate(.{ ._l, .sh }, index_reg.to32(), .u(6));
|
|
try self.asmRegisterRegister(.{ ._, .add }, index_reg.to32(), dst_reg.to32());
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, dst_reg.to32(), .u(src_bits - 1));
|
|
try self.asmRegisterRegister(.{ ._, .sub }, dst_reg.to32(), index_reg.to32());
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
if (has_lzcnt) {
|
|
if (src_bits <= 8) {
|
|
const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv);
|
|
try self.truncateRegister(src_ty, wide_reg);
|
|
try self.genBinOpMir(.{ ._, .lzcnt }, .u32, dst_mcv, .{ .register = wide_reg });
|
|
try self.genBinOpMir(
|
|
.{ ._, .sub },
|
|
dst_ty,
|
|
dst_mcv,
|
|
.{ .immediate = 32 - src_bits },
|
|
);
|
|
} else if (src_bits <= 64) {
|
|
try self.genBinOpMir(.{ ._, .lzcnt }, src_ty, dst_mcv, mat_src_mcv);
|
|
const extra_bits = self.regExtraBits(src_ty);
|
|
if (extra_bits > 0) {
|
|
try self.genBinOpMir(.{ ._, .sub }, dst_ty, dst_mcv, .{ .immediate = extra_bits });
|
|
}
|
|
} else {
|
|
assert(src_bits <= 128);
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_mcv = MCValue{ .register = tmp_reg };
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.genBinOpMir(.{ ._, .lzcnt }, .u64, dst_mcv, if (mat_src_mcv.isBase())
|
|
mat_src_mcv
|
|
else
|
|
.{ .register = mat_src_mcv.register_pair[0] });
|
|
try self.genBinOpMir(.{ ._, .add }, dst_ty, dst_mcv, .{ .immediate = 64 });
|
|
try self.genBinOpMir(.{ ._, .lzcnt }, .u64, tmp_mcv, if (mat_src_mcv.isBase())
|
|
mat_src_mcv.address().offset(8).deref()
|
|
else
|
|
.{ .register = mat_src_mcv.register_pair[1] });
|
|
try self.asmCmovccRegisterRegister(.nc, dst_reg.to32(), tmp_reg.to32());
|
|
|
|
if (src_bits < 128) try self.genBinOpMir(
|
|
.{ ._, .sub },
|
|
dst_ty,
|
|
dst_mcv,
|
|
.{ .immediate = 128 - src_bits },
|
|
);
|
|
}
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
assert(src_bits <= 64);
|
|
const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(zcu))), 2);
|
|
if (std.math.isPowerOfTwo(src_bits)) {
|
|
const imm_reg = try self.copyToTmpRegister(dst_ty, .{
|
|
.immediate = src_bits ^ (src_bits - 1),
|
|
});
|
|
const imm_lock = self.register_manager.lockRegAssumeUnused(imm_reg);
|
|
defer self.register_manager.unlockReg(imm_lock);
|
|
|
|
if (src_bits <= 8) {
|
|
const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv);
|
|
const wide_lock = self.register_manager.lockRegAssumeUnused(wide_reg);
|
|
defer self.register_manager.unlockReg(wide_lock);
|
|
|
|
try self.truncateRegister(src_ty, wide_reg);
|
|
try self.genBinOpMir(.{ ._r, .bs }, .u16, dst_mcv, .{ .register = wide_reg });
|
|
} else try self.genBinOpMir(.{ ._r, .bs }, src_ty, dst_mcv, mat_src_mcv);
|
|
|
|
try self.asmCmovccRegisterRegister(
|
|
.z,
|
|
registerAlias(dst_reg, cmov_abi_size),
|
|
registerAlias(imm_reg, cmov_abi_size),
|
|
);
|
|
|
|
try self.genBinOpMir(.{ ._, .xor }, dst_ty, dst_mcv, .{ .immediate = src_bits - 1 });
|
|
} else {
|
|
const imm_reg = try self.copyToTmpRegister(dst_ty, .{
|
|
.immediate = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - self.regBitSize(dst_ty)),
|
|
});
|
|
const imm_lock = self.register_manager.lockRegAssumeUnused(imm_reg);
|
|
defer self.register_manager.unlockReg(imm_lock);
|
|
|
|
const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv);
|
|
const wide_lock = self.register_manager.lockRegAssumeUnused(wide_reg);
|
|
defer self.register_manager.unlockReg(wide_lock);
|
|
|
|
try self.truncateRegister(src_ty, wide_reg);
|
|
try self.genBinOpMir(
|
|
.{ ._r, .bs },
|
|
if (src_bits <= 8) .u16 else src_ty,
|
|
dst_mcv,
|
|
.{ .register = wide_reg },
|
|
);
|
|
|
|
try self.asmCmovccRegisterRegister(
|
|
.nz,
|
|
registerAlias(imm_reg, cmov_abi_size),
|
|
registerAlias(dst_reg, cmov_abi_size),
|
|
);
|
|
|
|
try self.genSetReg(dst_reg, dst_ty, .{ .immediate = src_bits - 1 }, .{});
|
|
try self.genBinOpMir(.{ ._, .sub }, dst_ty, dst_mcv, .{ .register = imm_reg });
|
|
}
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airCtz(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result = result: {
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
if (src_ty.zigTypeTag(zcu) == .vector) return self.fail("TODO implement airCtz for {}", .{
|
|
src_ty.fmt(pt),
|
|
});
|
|
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const mat_src_mcv = switch (src_mcv) {
|
|
.immediate => MCValue{ .register = try self.copyToTmpRegister(src_ty, src_mcv) },
|
|
else => src_mcv,
|
|
};
|
|
const mat_src_lock = switch (mat_src_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const abi_size: u31 = @intCast(src_ty.abiSize(zcu));
|
|
const src_bits: u31 = @intCast(src_ty.bitSize(zcu));
|
|
const has_bmi = self.hasFeature(.bmi);
|
|
if (src_bits > @as(u32, if (has_bmi) 128 else 64)) {
|
|
const src_frame_addr: bits.FrameAddr = src_frame_addr: switch (src_mcv) {
|
|
.load_frame => |src_frame_addr| src_frame_addr,
|
|
else => {
|
|
const src_frame_addr = try self.allocFrameIndex(.initSpill(src_ty, zcu));
|
|
try self.genSetMem(.{ .frame = src_frame_addr }, 0, src_ty, src_mcv, .{});
|
|
break :src_frame_addr .{ .index = src_frame_addr };
|
|
},
|
|
};
|
|
|
|
const limbs_len = std.math.divCeil(u32, abi_size, 8) catch unreachable;
|
|
const extra_bits = abi_size * 8 - src_bits;
|
|
|
|
const index_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const index_lock = self.register_manager.lockRegAssumeUnused(index_reg);
|
|
defer self.register_manager.unlockReg(index_lock);
|
|
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, index_reg.to32(), .s(-1));
|
|
switch (extra_bits) {
|
|
0 => try self.asmRegisterRegister(.{ ._, .xor }, dst_reg.to32(), dst_reg.to32()),
|
|
1 => try self.asmRegisterRegister(.{ ._, .mov }, dst_reg.to32(), dst_reg.to32()),
|
|
else => try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
dst_reg.to32(),
|
|
.s(-@as(i32, extra_bits)),
|
|
),
|
|
}
|
|
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
if (self.hasFeature(.slow_incdec)) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, index_reg.to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, index_reg.to32());
|
|
}
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, index_reg.to32(), .u(limbs_len));
|
|
const zero = try self.asmJccReloc(.nb, undefined);
|
|
try self.asmMemoryImmediate(.{ ._, .cmp }, .{
|
|
.base = .{ .frame = src_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = index_reg.to64(),
|
|
.scale = .@"8",
|
|
.disp = src_frame_addr.off,
|
|
} },
|
|
}, .u(0));
|
|
_ = try self.asmJccReloc(.e, loop);
|
|
try self.asmRegisterMemory(.{ ._f, .bs }, dst_reg.to64(), .{
|
|
.base = .{ .frame = src_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = index_reg.to64(),
|
|
.scale = .@"8",
|
|
.disp = src_frame_addr.off,
|
|
} },
|
|
});
|
|
self.performReloc(zero);
|
|
try self.asmRegisterImmediate(.{ ._l, .sh }, index_reg.to32(), .u(6));
|
|
try self.asmRegisterRegister(.{ ._, .add }, dst_reg.to32(), index_reg.to32());
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
const wide_ty: Type = if (src_bits <= 8) .u16 else src_ty;
|
|
if (has_bmi) {
|
|
if (src_bits <= 64) {
|
|
const extra_bits = self.regExtraBits(src_ty) + @as(u64, if (src_bits <= 8) 8 else 0);
|
|
const masked_mcv = if (extra_bits > 0) masked: {
|
|
const tmp_mcv = tmp: {
|
|
if (src_mcv.isImmediate() or self.liveness.operandDies(inst, 0))
|
|
break :tmp src_mcv;
|
|
try self.genSetReg(dst_reg, wide_ty, src_mcv, .{});
|
|
break :tmp dst_mcv;
|
|
};
|
|
try self.genBinOpMir(
|
|
.{ ._, .@"or" },
|
|
wide_ty,
|
|
tmp_mcv,
|
|
.{ .immediate = (@as(u64, std.math.maxInt(u64)) >> @intCast(64 - extra_bits)) <<
|
|
@intCast(src_bits) },
|
|
);
|
|
break :masked tmp_mcv;
|
|
} else mat_src_mcv;
|
|
try self.genBinOpMir(.{ ._, .tzcnt }, wide_ty, dst_mcv, masked_mcv);
|
|
} else {
|
|
assert(src_bits <= 128);
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_mcv = MCValue{ .register = tmp_reg };
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const lo_mat_src_mcv: MCValue = if (mat_src_mcv.isBase())
|
|
mat_src_mcv
|
|
else
|
|
.{ .register = mat_src_mcv.register_pair[0] };
|
|
const hi_mat_src_mcv: MCValue = if (mat_src_mcv.isBase())
|
|
mat_src_mcv.address().offset(8).deref()
|
|
else
|
|
.{ .register = mat_src_mcv.register_pair[1] };
|
|
const masked_mcv = if (src_bits < 128) masked: {
|
|
try self.genCopy(.u64, dst_mcv, hi_mat_src_mcv, .{});
|
|
try self.genBinOpMir(
|
|
.{ ._, .@"or" },
|
|
.u64,
|
|
dst_mcv,
|
|
.{ .immediate = @as(u64, std.math.maxInt(u64)) << @intCast(src_bits - 64) },
|
|
);
|
|
break :masked dst_mcv;
|
|
} else hi_mat_src_mcv;
|
|
try self.genBinOpMir(.{ ._, .tzcnt }, .u64, dst_mcv, masked_mcv);
|
|
try self.genBinOpMir(.{ ._, .add }, dst_ty, dst_mcv, .{ .immediate = 64 });
|
|
try self.genBinOpMir(.{ ._, .tzcnt }, .u64, tmp_mcv, lo_mat_src_mcv);
|
|
try self.asmCmovccRegisterRegister(.nc, dst_reg.to32(), tmp_reg.to32());
|
|
}
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
assert(src_bits <= 64);
|
|
const width_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits });
|
|
const width_lock = self.register_manager.lockRegAssumeUnused(width_reg);
|
|
defer self.register_manager.unlockReg(width_lock);
|
|
|
|
if (src_bits <= 8 or !std.math.isPowerOfTwo(src_bits)) {
|
|
const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv);
|
|
const wide_lock = self.register_manager.lockRegAssumeUnused(wide_reg);
|
|
defer self.register_manager.unlockReg(wide_lock);
|
|
|
|
try self.truncateRegister(src_ty, wide_reg);
|
|
try self.genBinOpMir(.{ ._f, .bs }, wide_ty, dst_mcv, .{ .register = wide_reg });
|
|
} else try self.genBinOpMir(.{ ._f, .bs }, src_ty, dst_mcv, mat_src_mcv);
|
|
|
|
const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(zcu))), 2);
|
|
try self.asmCmovccRegisterRegister(
|
|
.z,
|
|
registerAlias(dst_reg, cmov_abi_size),
|
|
registerAlias(width_reg, cmov_abi_size),
|
|
);
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airPopCount(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result: MCValue = result: {
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
|
|
if (src_ty.zigTypeTag(zcu) == .vector or src_abi_size > 16)
|
|
return self.fail("TODO implement airPopCount for {}", .{src_ty.fmt(pt)});
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
|
|
const mat_src_mcv = switch (src_mcv) {
|
|
.immediate => MCValue{ .register = try self.copyToTmpRegister(src_ty, src_mcv) },
|
|
else => src_mcv,
|
|
};
|
|
const mat_src_lock = switch (mat_src_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (src_abi_size <= 8) {
|
|
const dst_contains_src =
|
|
src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv);
|
|
const dst_reg = if (dst_contains_src)
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genPopCount(dst_reg, src_ty, mat_src_mcv, dst_contains_src);
|
|
break :result .{ .register = dst_reg };
|
|
}
|
|
|
|
assert(src_abi_size > 8 and src_abi_size <= 16);
|
|
const tmp_regs = try self.register_manager.allocRegs(2, .{ inst, null }, abi.RegisterClass.gp);
|
|
const tmp_locks = self.register_manager.lockRegsAssumeUnused(2, tmp_regs);
|
|
defer for (tmp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genPopCount(tmp_regs[0], .usize, if (mat_src_mcv.isBase())
|
|
mat_src_mcv
|
|
else
|
|
.{ .register = mat_src_mcv.register_pair[0] }, false);
|
|
const src_info = src_ty.intInfo(zcu);
|
|
const hi_ty = try pt.intType(src_info.signedness, (src_info.bits - 1) % 64 + 1);
|
|
try self.genPopCount(tmp_regs[1], hi_ty, if (mat_src_mcv.isBase())
|
|
mat_src_mcv.address().offset(8).deref()
|
|
else
|
|
.{ .register = mat_src_mcv.register_pair[1] }, false);
|
|
try self.asmRegisterRegister(.{ ._, .add }, tmp_regs[0].to8(), tmp_regs[1].to8());
|
|
break :result .{ .register = tmp_regs[0] };
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn genPopCount(
|
|
self: *CodeGen,
|
|
dst_reg: Register,
|
|
src_ty: Type,
|
|
src_mcv: MCValue,
|
|
dst_contains_src: bool,
|
|
) !void {
|
|
const pt = self.pt;
|
|
|
|
const src_abi_size: u32 = @intCast(src_ty.abiSize(pt.zcu));
|
|
if (self.hasFeature(.popcnt)) return self.genBinOpMir(
|
|
.{ ._, .popcnt },
|
|
if (src_abi_size > 1) src_ty else .u32,
|
|
.{ .register = dst_reg },
|
|
if (src_abi_size > 1) src_mcv else src: {
|
|
if (!dst_contains_src) try self.genSetReg(dst_reg, src_ty, src_mcv, .{});
|
|
try self.truncateRegister(try src_ty.toUnsigned(pt), dst_reg);
|
|
break :src .{ .register = dst_reg };
|
|
},
|
|
);
|
|
|
|
const mask = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - src_abi_size * 8);
|
|
const imm_0_1: Immediate = .u(mask / 0b1_1);
|
|
const imm_00_11: Immediate = .u(mask / 0b01_01);
|
|
const imm_0000_1111: Immediate = .u(mask / 0b0001_0001);
|
|
const imm_0000_0001: Immediate = .u(mask / 0b1111_1111);
|
|
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const dst = registerAlias(dst_reg, src_abi_size);
|
|
const tmp = registerAlias(tmp_reg, src_abi_size);
|
|
const imm = if (src_abi_size > 4)
|
|
try self.register_manager.allocReg(null, abi.RegisterClass.gp)
|
|
else
|
|
undefined;
|
|
|
|
if (!dst_contains_src) try self.genSetReg(dst, src_ty, src_mcv, .{});
|
|
// dst = operand
|
|
try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst);
|
|
// tmp = operand
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, .u(1));
|
|
// tmp = operand >> 1
|
|
if (src_abi_size > 4) {
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0_1);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
|
|
} else try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_0_1);
|
|
// tmp = (operand >> 1) & 0x55...55
|
|
try self.asmRegisterRegister(.{ ._, .sub }, dst, tmp);
|
|
// dst = temp1 = operand - ((operand >> 1) & 0x55...55)
|
|
try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst);
|
|
// tmp = temp1
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, dst, .u(2));
|
|
// dst = temp1 >> 2
|
|
if (src_abi_size > 4) {
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_00_11);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
|
|
} else {
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_00_11);
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_00_11);
|
|
}
|
|
// tmp = temp1 & 0x33...33
|
|
// dst = (temp1 >> 2) & 0x33...33
|
|
try self.asmRegisterRegister(.{ ._, .add }, tmp, dst);
|
|
// tmp = temp2 = (temp1 & 0x33...33) + ((temp1 >> 2) & 0x33...33)
|
|
try self.asmRegisterRegister(.{ ._, .mov }, dst, tmp);
|
|
// dst = temp2
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, .u(4));
|
|
// tmp = temp2 >> 4
|
|
try self.asmRegisterRegister(.{ ._, .add }, dst, tmp);
|
|
// dst = temp2 + (temp2 >> 4)
|
|
if (src_abi_size > 4) {
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0000_1111);
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, tmp, imm_0000_0001);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
|
|
try self.asmRegisterRegister(.{ .i_, .mul }, dst, tmp);
|
|
} else {
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_0000_1111);
|
|
if (src_abi_size > 1) {
|
|
try self.asmRegisterRegisterImmediate(.{ .i_, .mul }, dst, dst, imm_0000_0001);
|
|
}
|
|
}
|
|
// dst = temp3 = (temp2 + (temp2 >> 4)) & 0x0f...0f
|
|
// dst = temp3 * 0x01...01
|
|
if (src_abi_size > 1) {
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, dst, .u((src_abi_size - 1) * 8));
|
|
}
|
|
// dst = (temp3 * 0x01...01) >> (bits - 8)
|
|
}
|
|
|
|
fn genByteSwap(
|
|
self: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
src_ty: Type,
|
|
src_mcv: MCValue,
|
|
mem_ok: bool,
|
|
) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const has_movbe = self.hasFeature(.movbe);
|
|
|
|
if (src_ty.zigTypeTag(zcu) == .vector) return self.fail(
|
|
"TODO implement genByteSwap for {}",
|
|
.{src_ty.fmt(pt)},
|
|
);
|
|
|
|
const src_lock = switch (src_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const abi_size: u32 = @intCast(src_ty.abiSize(zcu));
|
|
switch (abi_size) {
|
|
0 => unreachable,
|
|
1 => return if ((mem_ok or src_mcv.isRegister()) and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, src_ty, src_mcv),
|
|
2 => if ((mem_ok or src_mcv.isRegister()) and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
{
|
|
try self.genBinOpMir(.{ ._l, .ro }, src_ty, src_mcv, .{ .immediate = 8 });
|
|
return src_mcv;
|
|
},
|
|
3...8 => if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) {
|
|
try self.genUnOpMir(.{ ._, .bswap }, src_ty, src_mcv);
|
|
return src_mcv;
|
|
},
|
|
9...16 => {
|
|
const mat_src_mcv: MCValue = mat_src_mcv: switch (src_mcv) {
|
|
.register => {
|
|
const frame_index = try self.allocFrameIndex(.initSpill(src_ty, zcu));
|
|
try self.genSetMem(.{ .frame = frame_index }, 0, src_ty, src_mcv, .{});
|
|
break :mat_src_mcv .{ .load_frame = .{ .index = frame_index } };
|
|
},
|
|
.register_pair => |src_regs| if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) {
|
|
for (src_regs) |src_reg| try self.asmRegister(.{ ._, .bswap }, src_reg.to64());
|
|
return .{ .register_pair = .{ src_regs[1], src_regs[0] } };
|
|
} else src_mcv,
|
|
else => src_mcv,
|
|
};
|
|
|
|
const dst_regs =
|
|
try self.register_manager.allocRegs(2, .{ inst, inst }, abi.RegisterClass.gp);
|
|
const dst_locks = self.register_manager.lockRegsAssumeUnused(2, dst_regs);
|
|
defer for (dst_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
for (dst_regs, 0..) |dst_reg, limb_index| {
|
|
if (mat_src_mcv.isBase()) {
|
|
try self.asmRegisterMemory(
|
|
.{ ._, if (has_movbe) .movbe else .mov },
|
|
dst_reg.to64(),
|
|
try mat_src_mcv.address().offset(@intCast(limb_index * 8)).deref().mem(self, .{ .size = .qword }),
|
|
);
|
|
if (!has_movbe) try self.asmRegister(.{ ._, .bswap }, dst_reg.to64());
|
|
} else {
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
dst_reg.to64(),
|
|
mat_src_mcv.register_pair[limb_index].to64(),
|
|
);
|
|
try self.asmRegister(.{ ._, .bswap }, dst_reg.to64());
|
|
}
|
|
}
|
|
return .{ .register_pair = .{ dst_regs[1], dst_regs[0] } };
|
|
},
|
|
else => {
|
|
const limbs_len = std.math.divCeil(u32, abi_size, 8) catch unreachable;
|
|
|
|
const temp_regs =
|
|
try self.register_manager.allocRegs(4, @splat(null), abi.RegisterClass.gp);
|
|
const temp_locks = self.register_manager.lockRegsAssumeUnused(4, temp_regs);
|
|
defer for (temp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.asmRegisterRegister(.{ ._, .xor }, temp_regs[0].to32(), temp_regs[0].to32());
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, temp_regs[1].to32(), .u(limbs_len - 1));
|
|
|
|
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, if (has_movbe) .movbe else .mov },
|
|
temp_regs[2].to64(),
|
|
.{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[0].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
},
|
|
);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, if (has_movbe) .movbe else .mov },
|
|
temp_regs[3].to64(),
|
|
.{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[1].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
},
|
|
);
|
|
if (!has_movbe) {
|
|
try self.asmRegister(.{ ._, .bswap }, temp_regs[2].to64());
|
|
try self.asmRegister(.{ ._, .bswap }, temp_regs[3].to64());
|
|
}
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[0].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
}, temp_regs[3].to64());
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[1].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
}, temp_regs[2].to64());
|
|
if (self.hasFeature(.slow_incdec)) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[0].to32(), .u(1));
|
|
try self.asmRegisterImmediate(.{ ._, .sub }, temp_regs[1].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[0].to32());
|
|
try self.asmRegister(.{ ._c, .de }, temp_regs[1].to32());
|
|
}
|
|
try self.asmRegisterRegister(.{ ._, .cmp }, temp_regs[0].to32(), temp_regs[1].to32());
|
|
_ = try self.asmJccReloc(.be, loop);
|
|
return dst_mcv;
|
|
},
|
|
}
|
|
|
|
const dst_mcv: MCValue = if (mem_ok and has_movbe and src_mcv.isRegister())
|
|
try self.allocRegOrMem(inst, true)
|
|
else
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.gp) };
|
|
if (dst_mcv.getReg()) |dst_reg| {
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_mcv.register);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
try self.genSetReg(dst_reg, src_ty, src_mcv, .{});
|
|
switch (abi_size) {
|
|
else => unreachable,
|
|
2 => try self.genBinOpMir(.{ ._l, .ro }, src_ty, dst_mcv, .{ .immediate = 8 }),
|
|
3...8 => try self.genUnOpMir(.{ ._, .bswap }, src_ty, dst_mcv),
|
|
}
|
|
} else try self.genBinOpMir(.{ ._, .movbe }, src_ty, dst_mcv, src_mcv);
|
|
return dst_mcv;
|
|
}
|
|
|
|
fn airByteSwap(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_bits: u32 = @intCast(src_ty.bitSize(zcu));
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
|
|
const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, true);
|
|
try self.genShiftBinOpMir(
|
|
.{ ._r, switch (if (src_ty.isAbiInt(zcu)) src_ty.intInfo(zcu).signedness else .unsigned) {
|
|
.signed => .sa,
|
|
.unsigned => .sh,
|
|
} },
|
|
src_ty,
|
|
dst_mcv,
|
|
if (src_bits > 256) .u16 else .u8,
|
|
.{ .immediate = src_ty.abiSize(zcu) * 8 - src_bits },
|
|
);
|
|
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airBitReverse(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const abi_size: u32 = @intCast(src_ty.abiSize(zcu));
|
|
const bit_size: u32 = @intCast(src_ty.bitSize(zcu));
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
|
|
const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, false);
|
|
const dst_locks: [2]?RegisterLock = switch (dst_mcv) {
|
|
.register => |dst_reg| .{ self.register_manager.lockReg(dst_reg), null },
|
|
.register_pair => |dst_regs| self.register_manager.lockRegs(2, dst_regs),
|
|
else => unreachable,
|
|
};
|
|
defer for (dst_locks) |dst_lock| if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const limb_abi_size: u32 = @min(abi_size, 8);
|
|
const tmp = registerAlias(tmp_reg, limb_abi_size);
|
|
const imm = if (limb_abi_size > 4)
|
|
try self.register_manager.allocReg(null, abi.RegisterClass.gp)
|
|
else
|
|
undefined;
|
|
|
|
const mask = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - limb_abi_size * 8);
|
|
const imm_0000_1111: Immediate = .u(mask / 0b0001_0001);
|
|
const imm_00_11: Immediate = .u(mask / 0b01_01);
|
|
const imm_0_1: Immediate = .u(mask / 0b1_1);
|
|
|
|
for (dst_mcv.getRegs()) |dst_reg| {
|
|
const dst = registerAlias(dst_reg, limb_abi_size);
|
|
|
|
// dst = temp1 = bswap(operand)
|
|
try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst);
|
|
// tmp = temp1
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, dst, .u(4));
|
|
// dst = temp1 >> 4
|
|
if (limb_abi_size > 4) {
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0000_1111);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
|
|
} else {
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_0000_1111);
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_0000_1111);
|
|
}
|
|
// tmp = temp1 & 0x0F...0F
|
|
// dst = (temp1 >> 4) & 0x0F...0F
|
|
try self.asmRegisterImmediate(.{ ._l, .sh }, tmp, .u(4));
|
|
// tmp = (temp1 & 0x0F...0F) << 4
|
|
try self.asmRegisterRegister(.{ ._, .@"or" }, dst, tmp);
|
|
// dst = temp2 = ((temp1 >> 4) & 0x0F...0F) | ((temp1 & 0x0F...0F) << 4)
|
|
try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst);
|
|
// tmp = temp2
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, dst, .u(2));
|
|
// dst = temp2 >> 2
|
|
if (limb_abi_size > 4) {
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_00_11);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
|
|
} else {
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_00_11);
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_00_11);
|
|
}
|
|
// tmp = temp2 & 0x33...33
|
|
// dst = (temp2 >> 2) & 0x33...33
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .lea },
|
|
if (limb_abi_size > 4) tmp.to64() else tmp.to32(),
|
|
.{
|
|
.base = .{ .reg = dst.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = tmp.to64(),
|
|
.scale = .@"4",
|
|
} },
|
|
},
|
|
);
|
|
// tmp = temp3 = ((temp2 >> 2) & 0x33...33) + ((temp2 & 0x33...33) << 2)
|
|
try self.asmRegisterRegister(.{ ._, .mov }, dst, tmp);
|
|
// dst = temp3
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, .u(1));
|
|
// tmp = temp3 >> 1
|
|
if (limb_abi_size > 4) {
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0_1);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
|
|
try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
|
|
} else {
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_0_1);
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_0_1);
|
|
}
|
|
// dst = temp3 & 0x55...55
|
|
// tmp = (temp3 >> 1) & 0x55...55
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .lea },
|
|
if (limb_abi_size > 4) dst.to64() else dst.to32(),
|
|
.{
|
|
.base = .{ .reg = tmp.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = dst.to64(),
|
|
.scale = .@"2",
|
|
} },
|
|
},
|
|
);
|
|
// dst = ((temp3 >> 1) & 0x55...55) + ((temp3 & 0x55...55) << 1)
|
|
}
|
|
|
|
const extra_bits = abi_size * 8 - bit_size;
|
|
const signedness: std.builtin.Signedness =
|
|
if (src_ty.isAbiInt(zcu)) src_ty.intInfo(zcu).signedness else .unsigned;
|
|
if (extra_bits > 0) try self.genShiftBinOpMir(switch (signedness) {
|
|
.signed => .{ ._r, .sa },
|
|
.unsigned => .{ ._r, .sh },
|
|
}, src_ty, dst_mcv, .u8, .{ .immediate = extra_bits });
|
|
|
|
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn floatSign(self: *CodeGen, inst: Air.Inst.Index, tag: Air.Inst.Tag, operand: Air.Inst.Ref, ty: Type) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
|
|
const result = result: {
|
|
const scalar_bits = ty.scalarType(zcu).floatBits(self.target.*);
|
|
if (scalar_bits == 80) {
|
|
if (ty.zigTypeTag(zcu) != .float) return self.fail("TODO implement floatSign for {}", .{
|
|
ty.fmt(pt),
|
|
});
|
|
|
|
const src_mcv = try self.resolveInst(operand);
|
|
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv: MCValue = .{ .register = .st0 };
|
|
if (!std.meta.eql(src_mcv, dst_mcv) or !self.reuseOperand(inst, operand, 0, src_mcv))
|
|
try self.register_manager.getKnownReg(.st0, inst);
|
|
|
|
try self.genCopy(ty, dst_mcv, src_mcv, .{});
|
|
switch (tag) {
|
|
.neg => try self.asmOpOnly(.{ .f_, .chs }),
|
|
.abs => try self.asmOpOnly(.{ .f_, .abs }),
|
|
else => unreachable,
|
|
}
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
const abi_size: u32 = switch (ty.abiSize(zcu)) {
|
|
1...16 => 16,
|
|
17...32 => 32,
|
|
else => return self.fail("TODO implement floatSign for {}", .{
|
|
ty.fmt(pt),
|
|
}),
|
|
};
|
|
|
|
const src_mcv = try self.resolveInst(operand);
|
|
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv: MCValue = if (src_mcv.isRegister() and
|
|
self.reuseOperand(inst, operand, 0, src_mcv))
|
|
src_mcv
|
|
else if (self.hasFeature(.avx))
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, ty, src_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const vec_ty = try pt.vectorType(.{
|
|
.len = @divExact(abi_size * 8, scalar_bits),
|
|
.child = (try pt.intType(.signed, scalar_bits)).ip_index,
|
|
});
|
|
|
|
const sign_mcv = try self.genTypedValue(switch (tag) {
|
|
.neg => try vec_ty.minInt(pt, vec_ty),
|
|
.abs => try vec_ty.maxInt(pt, vec_ty),
|
|
else => unreachable,
|
|
});
|
|
const sign_mem: Memory = if (sign_mcv.isBase())
|
|
try sign_mcv.mem(self, .{ .size = .fromSize(abi_size) })
|
|
else
|
|
.{
|
|
.base = .{ .reg = try self.copyToTmpRegister(.usize, sign_mcv.address()) },
|
|
.mod = .{ .rm = .{ .size = .fromSize(abi_size) } },
|
|
};
|
|
|
|
if (self.hasFeature(.avx)) try self.asmRegisterRegisterMemory(
|
|
switch (scalar_bits) {
|
|
16, 128 => if (abi_size <= 16 or self.hasFeature(.avx2)) switch (tag) {
|
|
.neg => .{ .vp_, .xor },
|
|
.abs => .{ .vp_, .@"and" },
|
|
else => unreachable,
|
|
} else switch (tag) {
|
|
.neg => .{ .v_ps, .xor },
|
|
.abs => .{ .v_ps, .@"and" },
|
|
else => unreachable,
|
|
},
|
|
32 => switch (tag) {
|
|
.neg => .{ .v_ps, .xor },
|
|
.abs => .{ .v_ps, .@"and" },
|
|
else => unreachable,
|
|
},
|
|
64 => switch (tag) {
|
|
.neg => .{ .v_pd, .xor },
|
|
.abs => .{ .v_pd, .@"and" },
|
|
else => unreachable,
|
|
},
|
|
80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(pt)}),
|
|
else => unreachable,
|
|
},
|
|
registerAlias(dst_reg, abi_size),
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv), abi_size),
|
|
sign_mem,
|
|
) else try self.asmRegisterMemory(
|
|
switch (scalar_bits) {
|
|
16, 128 => switch (tag) {
|
|
.neg => .{ .p_, .xor },
|
|
.abs => .{ .p_, .@"and" },
|
|
else => unreachable,
|
|
},
|
|
32 => switch (tag) {
|
|
.neg => .{ ._ps, .xor },
|
|
.abs => .{ ._ps, .@"and" },
|
|
else => unreachable,
|
|
},
|
|
64 => switch (tag) {
|
|
.neg => .{ ._pd, .xor },
|
|
.abs => .{ ._pd, .@"and" },
|
|
else => unreachable,
|
|
},
|
|
80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(pt)}),
|
|
else => unreachable,
|
|
},
|
|
registerAlias(dst_reg, abi_size),
|
|
sign_mem,
|
|
);
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ operand, .none, .none });
|
|
}
|
|
|
|
fn airFloatSign(self: *CodeGen, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const ty = self.typeOf(un_op);
|
|
return self.floatSign(inst, tag, un_op, ty);
|
|
}
|
|
|
|
fn airRound(self: *CodeGen, inst: Air.Inst.Index, mode: bits.RoundMode) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const ty = self.typeOf(un_op);
|
|
|
|
const result = result: {
|
|
switch (try self.genRoundLibcall(ty, .{ .air_ref = un_op }, mode)) {
|
|
.none => {},
|
|
else => |dst_mcv| break :result dst_mcv,
|
|
}
|
|
|
|
const src_mcv = try self.resolveInst(un_op);
|
|
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, ty, src_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
try self.genRound(ty, dst_reg, src_mcv, mode);
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn getRoundTag(self: *CodeGen, ty: Type) ?Mir.Inst.FixedTag {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
return if (self.hasFeature(.sse4_1)) switch (ty.zigTypeTag(zcu)) {
|
|
.float => switch (ty.floatBits(self.target.*)) {
|
|
32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
|
|
64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (ty.childType(zcu).floatBits(self.target.*)) {
|
|
32 => switch (ty.vectorLen(zcu)) {
|
|
1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
|
|
2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round },
|
|
5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null,
|
|
else => null,
|
|
},
|
|
64 => switch (ty.vectorLen(zcu)) {
|
|
1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
|
|
2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round },
|
|
3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null,
|
|
else => null,
|
|
},
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => null,
|
|
},
|
|
else => unreachable,
|
|
} else null;
|
|
}
|
|
|
|
fn genRoundLibcall(self: *CodeGen, ty: Type, src_mcv: MCValue, mode: bits.RoundMode) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
if (self.getRoundTag(ty)) |_| return .none;
|
|
|
|
if (ty.zigTypeTag(zcu) != .float)
|
|
return self.fail("TODO implement genRound for {}", .{ty.fmt(pt)});
|
|
|
|
var callee_buf: ["__trunc?".len]u8 = undefined;
|
|
return try self.genCall(.{ .lib = .{
|
|
.return_type = ty.toIntern(),
|
|
.param_types = &.{ty.toIntern()},
|
|
.callee = std.fmt.bufPrint(&callee_buf, "{s}{s}{s}", .{
|
|
floatLibcAbiPrefix(ty),
|
|
switch (mode.mode) {
|
|
.down => "floor",
|
|
.up => "ceil",
|
|
.zero => "trunc",
|
|
else => unreachable,
|
|
},
|
|
floatLibcAbiSuffix(ty),
|
|
}) catch unreachable,
|
|
} }, &.{ty}, &.{src_mcv}, .{});
|
|
}
|
|
|
|
fn genRound(self: *CodeGen, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: bits.RoundMode) !void {
|
|
const pt = self.pt;
|
|
const mir_tag = self.getRoundTag(ty) orelse {
|
|
const result = try self.genRoundLibcall(ty, src_mcv, mode);
|
|
return self.genSetReg(dst_reg, ty, result, .{});
|
|
};
|
|
const abi_size: u32 = @intCast(ty.abiSize(pt.zcu));
|
|
const dst_alias = registerAlias(dst_reg, abi_size);
|
|
switch (mir_tag[0]) {
|
|
.v_ss, .v_sd => if (src_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
mir_tag,
|
|
dst_alias,
|
|
dst_alias,
|
|
try src_mcv.mem(self, .{ .size = .fromSize(abi_size) }),
|
|
mode.imm(),
|
|
) else try self.asmRegisterRegisterRegisterImmediate(
|
|
mir_tag,
|
|
dst_alias,
|
|
dst_alias,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv), abi_size),
|
|
mode.imm(),
|
|
),
|
|
else => if (src_mcv.isBase()) try self.asmRegisterMemoryImmediate(
|
|
mir_tag,
|
|
dst_alias,
|
|
try src_mcv.mem(self, .{ .size = .fromSize(abi_size) }),
|
|
mode.imm(),
|
|
) else try self.asmRegisterRegisterImmediate(
|
|
mir_tag,
|
|
dst_alias,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv), abi_size),
|
|
mode.imm(),
|
|
),
|
|
}
|
|
}
|
|
|
|
fn airAbs(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const ty = self.typeOf(ty_op.operand);
|
|
|
|
const result: MCValue = result: {
|
|
const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(zcu)) {
|
|
else => null,
|
|
.int => switch (ty.abiSize(zcu)) {
|
|
0 => unreachable,
|
|
1...8 => {
|
|
try self.spillEflagsIfOccupied();
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ty, src_mcv);
|
|
|
|
try self.genUnOpMir(.{ ._, .neg }, ty, dst_mcv);
|
|
|
|
const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
|
|
switch (src_mcv) {
|
|
.register => |val_reg| try self.asmCmovccRegisterRegister(
|
|
.l,
|
|
registerAlias(dst_mcv.register, cmov_abi_size),
|
|
registerAlias(val_reg, cmov_abi_size),
|
|
),
|
|
.memory, .indirect, .load_frame => try self.asmCmovccRegisterMemory(
|
|
.l,
|
|
registerAlias(dst_mcv.register, cmov_abi_size),
|
|
try src_mcv.mem(self, .{ .size = .fromSize(cmov_abi_size) }),
|
|
),
|
|
else => {
|
|
const val_reg = try self.copyToTmpRegister(ty, src_mcv);
|
|
try self.asmCmovccRegisterRegister(
|
|
.l,
|
|
registerAlias(dst_mcv.register, cmov_abi_size),
|
|
registerAlias(val_reg, cmov_abi_size),
|
|
);
|
|
},
|
|
}
|
|
break :result dst_mcv;
|
|
},
|
|
9...16 => {
|
|
try self.spillEflagsIfOccupied();
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_mcv = if (src_mcv == .register_pair and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: {
|
|
const dst_regs = try self.register_manager.allocRegs(
|
|
2,
|
|
.{ inst, inst },
|
|
abi.RegisterClass.gp,
|
|
);
|
|
const dst_mcv: MCValue = .{ .register_pair = dst_regs };
|
|
const dst_locks = self.register_manager.lockRegsAssumeUnused(2, dst_regs);
|
|
defer for (dst_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genCopy(ty, dst_mcv, src_mcv, .{});
|
|
break :dst dst_mcv;
|
|
};
|
|
const dst_regs = dst_mcv.register_pair;
|
|
const dst_locks = self.register_manager.lockRegs(2, dst_regs);
|
|
defer for (dst_locks) |dst_lock| if (dst_lock) |lock|
|
|
self.register_manager.unlockReg(lock);
|
|
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, dst_regs[1]);
|
|
try self.asmRegisterImmediate(.{ ._r, .sa }, tmp_reg, .u(63));
|
|
try self.asmRegisterRegister(.{ ._, .xor }, dst_regs[0], tmp_reg);
|
|
try self.asmRegisterRegister(.{ ._, .xor }, dst_regs[1], tmp_reg);
|
|
try self.asmRegisterRegister(.{ ._, .sub }, dst_regs[0], tmp_reg);
|
|
try self.asmRegisterRegister(.{ ._, .sbb }, dst_regs[1], tmp_reg);
|
|
|
|
break :result dst_mcv;
|
|
},
|
|
else => {
|
|
const abi_size: u31 = @intCast(ty.abiSize(zcu));
|
|
const limb_len = std.math.divCeil(u31, abi_size, 8) catch unreachable;
|
|
|
|
const tmp_regs =
|
|
try self.register_manager.allocRegs(3, @splat(null), abi.RegisterClass.gp);
|
|
const tmp_locks = self.register_manager.lockRegsAssumeUnused(3, tmp_regs);
|
|
defer for (tmp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.allocRegOrMem(inst, false);
|
|
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
try dst_mcv.address().offset((limb_len - 1) * 8).deref().mem(self, .{ .size = .qword }),
|
|
.u(0),
|
|
);
|
|
const positive = try self.asmJccReloc(.ns, undefined);
|
|
|
|
try self.asmRegisterRegister(.{ ._, .xor }, tmp_regs[0].to32(), tmp_regs[0].to32());
|
|
try self.asmRegisterRegister(.{ ._, .xor }, tmp_regs[1].to8(), tmp_regs[1].to8());
|
|
|
|
const neg_loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmRegisterRegister(.{ ._, .xor }, tmp_regs[2].to32(), tmp_regs[2].to32());
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, tmp_regs[1].to8(), .u(1));
|
|
try self.asmRegisterMemory(.{ ._, .sbb }, tmp_regs[2].to64(), .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = tmp_regs[0].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
});
|
|
try self.asmSetccRegister(.c, tmp_regs[1].to8());
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = tmp_regs[0].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
}, tmp_regs[2].to64());
|
|
|
|
if (self.hasFeature(.slow_incdec)) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, tmp_regs[0].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, tmp_regs[0].to32());
|
|
}
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, tmp_regs[0].to32(), .u(limb_len));
|
|
_ = try self.asmJccReloc(.b, neg_loop);
|
|
|
|
self.performReloc(positive);
|
|
break :result dst_mcv;
|
|
},
|
|
},
|
|
.float => return self.floatSign(inst, .abs, ty_op.operand, ty),
|
|
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
|
else => null,
|
|
.int => switch (ty.childType(zcu).intInfo(zcu).bits) {
|
|
else => null,
|
|
8 => switch (ty.vectorLen(zcu)) {
|
|
else => null,
|
|
1...16 => if (self.hasFeature(.avx))
|
|
.{ .vp_b, .abs }
|
|
else if (self.hasFeature(.ssse3))
|
|
.{ .p_b, .abs }
|
|
else
|
|
null,
|
|
17...32 => if (self.hasFeature(.avx2)) .{ .vp_b, .abs } else null,
|
|
},
|
|
16 => switch (ty.vectorLen(zcu)) {
|
|
else => null,
|
|
1...8 => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .abs }
|
|
else if (self.hasFeature(.ssse3))
|
|
.{ .p_w, .abs }
|
|
else
|
|
null,
|
|
9...16 => if (self.hasFeature(.avx2)) .{ .vp_w, .abs } else null,
|
|
},
|
|
32 => switch (ty.vectorLen(zcu)) {
|
|
else => null,
|
|
1...4 => if (self.hasFeature(.avx))
|
|
.{ .vp_d, .abs }
|
|
else if (self.hasFeature(.ssse3))
|
|
.{ .p_d, .abs }
|
|
else
|
|
null,
|
|
5...8 => if (self.hasFeature(.avx2)) .{ .vp_d, .abs } else null,
|
|
},
|
|
},
|
|
.float => return self.floatSign(inst, .abs, ty_op.operand, ty),
|
|
},
|
|
}) orelse return self.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
|
|
|
|
const abi_size: u32 = @intCast(ty.abiSize(zcu));
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_reg = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.register_manager.allocReg(inst, self.regSetForType(ty));
|
|
const dst_alias = registerAlias(dst_reg, abi_size);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
mir_tag,
|
|
dst_alias,
|
|
try src_mcv.mem(self, .{ .size = self.memSize(ty) }),
|
|
) else try self.asmRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv), abi_size),
|
|
);
|
|
break :result .{ .register = dst_reg };
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airSqrt(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const ty = self.typeOf(un_op);
|
|
const abi_size: u32 = @intCast(ty.abiSize(zcu));
|
|
|
|
const result: MCValue = result: {
|
|
switch (ty.zigTypeTag(zcu)) {
|
|
.float => {
|
|
const float_bits = ty.floatBits(self.target.*);
|
|
if (switch (float_bits) {
|
|
16 => !self.hasFeature(.f16c),
|
|
32, 64 => false,
|
|
80, 128 => true,
|
|
else => unreachable,
|
|
}) {
|
|
var callee_buf: ["__sqrt?".len]u8 = undefined;
|
|
break :result try self.genCall(.{ .lib = .{
|
|
.return_type = ty.toIntern(),
|
|
.param_types = &.{ty.toIntern()},
|
|
.callee = std.fmt.bufPrint(&callee_buf, "{s}sqrt{s}", .{
|
|
floatLibcAbiPrefix(ty),
|
|
floatLibcAbiSuffix(ty),
|
|
}) catch unreachable,
|
|
} }, &.{ty}, &.{.{ .air_ref = un_op }}, .{});
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
const src_mcv = try self.resolveInst(un_op);
|
|
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, ty, src_mcv);
|
|
const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size);
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(zcu)) {
|
|
.float => switch (ty.floatBits(self.target.*)) {
|
|
16 => {
|
|
assert(self.hasFeature(.f16c));
|
|
const mat_src_reg = if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv);
|
|
try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, mat_src_reg.to128());
|
|
try self.asmRegisterRegisterRegister(.{ .v_ss, .sqrt }, dst_reg, dst_reg, dst_reg);
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
dst_reg,
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
break :result dst_mcv;
|
|
},
|
|
32 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt },
|
|
64 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt },
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (ty.childType(zcu).floatBits(self.target.*)) {
|
|
16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(zcu)) {
|
|
1 => {
|
|
try self.asmRegisterRegister(
|
|
.{ .v_ps, .cvtph2 },
|
|
dst_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv)).to128(),
|
|
);
|
|
try self.asmRegisterRegisterRegister(
|
|
.{ .v_ss, .sqrt },
|
|
dst_reg,
|
|
dst_reg,
|
|
dst_reg,
|
|
);
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
dst_reg,
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
break :result dst_mcv;
|
|
},
|
|
2...8 => {
|
|
const wide_reg = registerAlias(dst_reg, abi_size * 2);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ .v_ps, .cvtph2 },
|
|
wide_reg,
|
|
try src_mcv.mem(self, .{ .size = .fromSize(
|
|
@intCast(@divExact(wide_reg.bitSize(), 16)),
|
|
) }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ .v_ps, .cvtph2 },
|
|
wide_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv)).to128(),
|
|
);
|
|
try self.asmRegisterRegister(.{ .v_ps, .sqrt }, wide_reg, wide_reg);
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
wide_reg,
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
break :result dst_mcv;
|
|
},
|
|
else => null,
|
|
} else null,
|
|
32 => switch (ty.vectorLen(zcu)) {
|
|
1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt },
|
|
2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt },
|
|
5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null,
|
|
else => null,
|
|
},
|
|
64 => switch (ty.vectorLen(zcu)) {
|
|
1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt },
|
|
2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt },
|
|
3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null,
|
|
else => null,
|
|
},
|
|
80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}) orelse return self.fail("TODO implement airSqrt for {}", .{ty.fmt(pt)});
|
|
switch (mir_tag[0]) {
|
|
.v_ss, .v_sd => if (src_mcv.isBase()) try self.asmRegisterRegisterMemory(
|
|
mir_tag,
|
|
dst_reg,
|
|
dst_reg,
|
|
try src_mcv.mem(self, .{ .size = .fromSize(abi_size) }),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
dst_reg,
|
|
dst_reg,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv), abi_size),
|
|
),
|
|
else => if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
mir_tag,
|
|
dst_reg,
|
|
try src_mcv.mem(self, .{ .size = .fromSize(abi_size) }),
|
|
) else try self.asmRegisterRegister(
|
|
mir_tag,
|
|
dst_reg,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv), abi_size),
|
|
),
|
|
}
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airUnaryMath(self: *CodeGen, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const ty = self.typeOf(un_op);
|
|
var callee_buf: ["__round?".len]u8 = undefined;
|
|
const result = try self.genCall(.{ .lib = .{
|
|
.return_type = ty.toIntern(),
|
|
.param_types = &.{ty.toIntern()},
|
|
.callee = std.fmt.bufPrint(&callee_buf, "{s}{s}{s}", .{
|
|
floatLibcAbiPrefix(ty),
|
|
switch (tag) {
|
|
.sin,
|
|
.cos,
|
|
.tan,
|
|
.exp,
|
|
.exp2,
|
|
.log,
|
|
.log2,
|
|
.log10,
|
|
.round,
|
|
=> @tagName(tag),
|
|
else => unreachable,
|
|
},
|
|
floatLibcAbiSuffix(ty),
|
|
}) catch unreachable,
|
|
} }, &.{ty}, &.{.{ .air_ref = un_op }}, .{});
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn reuseOperand(
|
|
self: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
operand: Air.Inst.Ref,
|
|
op_index: Liveness.OperandInt,
|
|
mcv: MCValue,
|
|
) bool {
|
|
return self.reuseOperandAdvanced(inst, operand, op_index, mcv, inst);
|
|
}
|
|
|
|
fn reuseOperandAdvanced(
|
|
self: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
operand: Air.Inst.Ref,
|
|
op_index: Liveness.OperandInt,
|
|
mcv: MCValue,
|
|
maybe_tracked_inst: ?Air.Inst.Index,
|
|
) bool {
|
|
if (!self.liveness.operandDies(inst, op_index))
|
|
return false;
|
|
|
|
switch (mcv) {
|
|
.register, .register_pair, .register_overflow, .register_mask => for (mcv.getRegs()) |reg| {
|
|
// If it's in the registers table, need to associate the register(s) with the
|
|
// new instruction.
|
|
if (maybe_tracked_inst) |tracked_inst| {
|
|
if (!self.register_manager.isRegFree(reg)) {
|
|
if (RegisterManager.indexOfRegIntoTracked(reg)) |index| {
|
|
self.register_manager.registers[index] = tracked_inst;
|
|
}
|
|
}
|
|
} else self.register_manager.freeReg(reg);
|
|
},
|
|
.load_frame => |frame_addr| if (frame_addr.index.isNamed()) return false,
|
|
else => return false,
|
|
}
|
|
switch (mcv) {
|
|
.eflags, .register_overflow => self.eflags_inst = maybe_tracked_inst,
|
|
else => {},
|
|
}
|
|
|
|
// Prevent the operand deaths processing code from deallocating it.
|
|
self.reused_operands.set(op_index);
|
|
const op_inst = operand.toIndex().?;
|
|
self.getResolvedInstValue(op_inst).reuse(self, maybe_tracked_inst, op_inst);
|
|
|
|
return true;
|
|
}
|
|
|
|
fn packedLoad(self: *CodeGen, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
|
|
const ptr_info = ptr_ty.ptrInfo(zcu);
|
|
const val_ty: Type = .fromInterned(ptr_info.child);
|
|
if (!val_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
|
|
const val_abi_size: u32 = @intCast(val_ty.abiSize(zcu));
|
|
|
|
const val_bit_size: u32 = @intCast(val_ty.bitSize(zcu));
|
|
const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
|
|
.none => 0,
|
|
.runtime => unreachable,
|
|
else => |vector_index| @intFromEnum(vector_index) * val_bit_size,
|
|
};
|
|
if (ptr_bit_off % 8 == 0) {
|
|
{
|
|
const mat_ptr_mcv: MCValue = switch (ptr_mcv) {
|
|
.immediate, .register, .register_offset, .lea_frame => ptr_mcv,
|
|
else => .{ .register = try self.copyToTmpRegister(ptr_ty, ptr_mcv) },
|
|
};
|
|
const mat_ptr_lock = switch (mat_ptr_mcv) {
|
|
.register => |mat_ptr_reg| self.register_manager.lockReg(mat_ptr_reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.load(dst_mcv, ptr_ty, mat_ptr_mcv.offset(@intCast(@divExact(ptr_bit_off, 8))));
|
|
}
|
|
|
|
if (val_abi_size * 8 > val_bit_size) {
|
|
if (dst_mcv.isRegister()) {
|
|
try self.truncateRegister(val_ty, dst_mcv.getReg().?);
|
|
} else {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const hi_mcv = dst_mcv.address().offset(@intCast(val_bit_size / 64 * 8)).deref();
|
|
try self.genSetReg(tmp_reg, .usize, hi_mcv, .{});
|
|
try self.truncateRegister(val_ty, tmp_reg);
|
|
try self.genCopy(.usize, hi_mcv, .{ .register = tmp_reg }, .{});
|
|
}
|
|
}
|
|
return;
|
|
}
|
|
|
|
if (val_abi_size > 8) return self.fail("TODO implement packed load of {}", .{val_ty.fmt(pt)});
|
|
|
|
const limb_abi_size: u31 = @min(val_abi_size, 8);
|
|
const limb_abi_bits = limb_abi_size * 8;
|
|
const val_byte_off: i32 = @intCast(ptr_bit_off / limb_abi_bits * limb_abi_size);
|
|
const val_bit_off = ptr_bit_off % limb_abi_bits;
|
|
const val_extra_bits = self.regExtraBits(val_ty);
|
|
|
|
const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
|
|
const ptr_lock = self.register_manager.lockRegAssumeUnused(ptr_reg);
|
|
defer self.register_manager.unlockReg(ptr_lock);
|
|
|
|
const dst_reg = switch (dst_mcv) {
|
|
.register => |reg| reg,
|
|
else => try self.register_manager.allocReg(null, abi.RegisterClass.gp),
|
|
};
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const load_abi_size =
|
|
if (val_bit_off < val_extra_bits) val_abi_size else val_abi_size * 2;
|
|
if (load_abi_size <= 8) {
|
|
const load_reg = registerAlias(dst_reg, load_abi_size);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, load_reg, .{
|
|
.base = .{ .reg = ptr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(load_abi_size),
|
|
.disp = val_byte_off,
|
|
} },
|
|
});
|
|
try self.spillEflagsIfOccupied();
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, load_reg, .u(val_bit_off));
|
|
} else {
|
|
const tmp_reg =
|
|
registerAlias(try self.register_manager.allocReg(null, abi.RegisterClass.gp), val_abi_size);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const dst_alias = registerAlias(dst_reg, val_abi_size);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, dst_alias, .{
|
|
.base = .{ .reg = ptr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(val_abi_size),
|
|
.disp = val_byte_off,
|
|
} },
|
|
});
|
|
try self.asmRegisterMemory(.{ ._, .mov }, tmp_reg, .{
|
|
.base = .{ .reg = ptr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(val_abi_size),
|
|
.disp = val_byte_off + limb_abi_size,
|
|
} },
|
|
});
|
|
try self.spillEflagsIfOccupied();
|
|
try self.asmRegisterRegisterImmediate(.{ ._rd, .sh }, dst_alias, tmp_reg, .u(val_bit_off));
|
|
}
|
|
|
|
if (val_extra_bits > 0) try self.truncateRegister(val_ty, dst_reg);
|
|
try self.genCopy(val_ty, dst_mcv, .{ .register = dst_reg }, .{});
|
|
}
|
|
|
|
fn load(self: *CodeGen, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const dst_ty = ptr_ty.childType(zcu);
|
|
if (!dst_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
|
|
switch (ptr_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.eflags,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
=> unreachable, // not a valid pointer
|
|
.immediate,
|
|
.register,
|
|
.register_offset,
|
|
.lea_symbol,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
=> try self.genCopy(dst_ty, dst_mcv, ptr_mcv.deref(), .{}),
|
|
.memory,
|
|
.indirect,
|
|
.load_symbol,
|
|
.load_direct,
|
|
.load_got,
|
|
.load_tlv,
|
|
.load_frame,
|
|
=> {
|
|
const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
|
|
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
|
defer self.register_manager.unlockReg(addr_lock);
|
|
|
|
try self.genCopy(dst_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }, .{});
|
|
},
|
|
.air_ref => |ptr_ref| try self.load(dst_mcv, ptr_ty, try self.resolveInst(ptr_ref)),
|
|
}
|
|
}
|
|
|
|
fn airLoad(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const elem_ty = self.typeOfIndex(inst);
|
|
const result: MCValue = result: {
|
|
if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
|
|
|
|
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const ptr_ty = self.typeOf(ty_op.operand);
|
|
const elem_size = elem_ty.abiSize(zcu);
|
|
|
|
const elem_rs = self.regSetForType(elem_ty);
|
|
const ptr_rs = self.regSetForType(ptr_ty);
|
|
|
|
const ptr_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_mcv = if (elem_size <= 8 and std.math.isPowerOfTwo(elem_size) and
|
|
elem_rs.supersetOf(ptr_rs) and self.reuseOperand(inst, ty_op.operand, 0, ptr_mcv))
|
|
// The MCValue that holds the pointer can be re-used as the value.
|
|
ptr_mcv
|
|
else
|
|
try self.allocRegOrMem(inst, true);
|
|
|
|
const ptr_info = ptr_ty.ptrInfo(zcu);
|
|
if (ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0) {
|
|
try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv);
|
|
} else {
|
|
try self.load(dst_mcv, ptr_ty, ptr_mcv);
|
|
}
|
|
|
|
if (elem_ty.isAbiInt(zcu) and elem_size * 8 > elem_ty.bitSize(zcu)) {
|
|
const high_mcv: MCValue = switch (dst_mcv) {
|
|
.register => |dst_reg| .{ .register = dst_reg },
|
|
.register_pair => |dst_regs| .{ .register = dst_regs[1] },
|
|
else => dst_mcv.address().offset(@intCast((elem_size - 1) / 8 * 8)).deref(),
|
|
};
|
|
const high_reg = if (high_mcv.isRegister())
|
|
high_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(.usize, high_mcv);
|
|
const high_lock = self.register_manager.lockReg(high_reg);
|
|
defer if (high_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.truncateRegister(elem_ty, high_reg);
|
|
if (!high_mcv.isRegister()) try self.genCopy(
|
|
if (elem_size <= 8) elem_ty else .usize,
|
|
high_mcv,
|
|
.{ .register = high_reg },
|
|
.{},
|
|
);
|
|
}
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn packedStore(self: *CodeGen, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ptr_info = ptr_ty.ptrInfo(zcu);
|
|
const src_ty: Type = .fromInterned(ptr_info.child);
|
|
if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
|
|
|
|
const limb_abi_size: u16 = @min(ptr_info.packed_offset.host_size, 8);
|
|
const limb_abi_bits = limb_abi_size * 8;
|
|
const limb_ty = try pt.intType(.unsigned, limb_abi_bits);
|
|
|
|
const src_bit_size = src_ty.bitSize(zcu);
|
|
const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
|
|
.none => 0,
|
|
.runtime => unreachable,
|
|
else => |vector_index| @intFromEnum(vector_index) * src_bit_size,
|
|
};
|
|
const src_byte_off: i32 = @intCast(ptr_bit_off / limb_abi_bits * limb_abi_size);
|
|
const src_bit_off = ptr_bit_off % limb_abi_bits;
|
|
|
|
const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
|
|
const ptr_lock = self.register_manager.lockRegAssumeUnused(ptr_reg);
|
|
defer self.register_manager.unlockReg(ptr_lock);
|
|
|
|
const mat_src_mcv: MCValue = mat_src_mcv: switch (src_mcv) {
|
|
.register => if (src_bit_size > 64) {
|
|
const frame_index = try self.allocFrameIndex(.initSpill(src_ty, self.pt.zcu));
|
|
try self.genSetMem(.{ .frame = frame_index }, 0, src_ty, src_mcv, .{});
|
|
break :mat_src_mcv .{ .load_frame = .{ .index = frame_index } };
|
|
} else src_mcv,
|
|
else => src_mcv,
|
|
};
|
|
|
|
var limb_i: u16 = 0;
|
|
while (limb_i * limb_abi_bits < src_bit_off + src_bit_size) : (limb_i += 1) {
|
|
const part_bit_off = if (limb_i == 0) src_bit_off else 0;
|
|
const part_bit_size =
|
|
@min(src_bit_off + src_bit_size - limb_i * limb_abi_bits, limb_abi_bits) - part_bit_off;
|
|
const limb_mem: Memory = .{
|
|
.base = .{ .reg = ptr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(limb_abi_size),
|
|
.disp = src_byte_off + limb_i * limb_abi_size,
|
|
} },
|
|
};
|
|
|
|
const part_mask = (@as(u64, std.math.maxInt(u64)) >> @intCast(64 - part_bit_size)) <<
|
|
@intCast(part_bit_off);
|
|
const part_mask_not = part_mask ^ (@as(u64, std.math.maxInt(u64)) >> @intCast(64 - limb_abi_bits));
|
|
if (limb_abi_size <= 4) {
|
|
try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, .u(part_mask_not));
|
|
} else if (std.math.cast(i32, @as(i64, @bitCast(part_mask_not)))) |small| {
|
|
try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, .s(small));
|
|
} else {
|
|
const part_mask_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, part_mask_reg, .u(part_mask_not));
|
|
try self.asmMemoryRegister(.{ ._, .@"and" }, limb_mem, part_mask_reg);
|
|
}
|
|
|
|
if (src_bit_size <= 64) {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_mcv = MCValue{ .register = tmp_reg };
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.genSetReg(tmp_reg, limb_ty, mat_src_mcv, .{});
|
|
switch (limb_i) {
|
|
0 => try self.genShiftBinOpMir(
|
|
.{ ._l, .sh },
|
|
limb_ty,
|
|
tmp_mcv,
|
|
.u8,
|
|
.{ .immediate = src_bit_off },
|
|
),
|
|
1 => try self.genShiftBinOpMir(
|
|
.{ ._r, .sh },
|
|
limb_ty,
|
|
tmp_mcv,
|
|
.u8,
|
|
.{ .immediate = limb_abi_bits - src_bit_off },
|
|
),
|
|
else => unreachable,
|
|
}
|
|
try self.genBinOpMir(.{ ._, .@"and" }, limb_ty, tmp_mcv, .{ .immediate = part_mask });
|
|
try self.asmMemoryRegister(
|
|
.{ ._, .@"or" },
|
|
limb_mem,
|
|
registerAlias(tmp_reg, limb_abi_size),
|
|
);
|
|
} else if (src_bit_size <= 128 and src_bit_off == 0) {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_mcv = MCValue{ .register = tmp_reg };
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.genSetReg(tmp_reg, limb_ty, switch (limb_i) {
|
|
0 => mat_src_mcv,
|
|
else => mat_src_mcv.address().offset(limb_i * limb_abi_size).deref(),
|
|
}, .{});
|
|
try self.genBinOpMir(.{ ._, .@"and" }, limb_ty, tmp_mcv, .{ .immediate = part_mask });
|
|
try self.asmMemoryRegister(
|
|
.{ ._, .@"or" },
|
|
limb_mem,
|
|
registerAlias(tmp_reg, limb_abi_size),
|
|
);
|
|
} else return self.fail("TODO: implement packed store of {}", .{src_ty.fmt(pt)});
|
|
}
|
|
}
|
|
|
|
fn store(
|
|
self: *CodeGen,
|
|
ptr_ty: Type,
|
|
ptr_mcv: MCValue,
|
|
src_mcv: MCValue,
|
|
opts: CopyOptions,
|
|
) InnerError!void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const src_ty = ptr_ty.childType(zcu);
|
|
if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
|
|
switch (ptr_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.eflags,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
=> unreachable, // not a valid pointer
|
|
.immediate,
|
|
.register,
|
|
.register_offset,
|
|
.lea_symbol,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
=> try self.genCopy(src_ty, ptr_mcv.deref(), src_mcv, opts),
|
|
.memory,
|
|
.indirect,
|
|
.load_symbol,
|
|
.load_direct,
|
|
.load_got,
|
|
.load_tlv,
|
|
.load_frame,
|
|
=> {
|
|
const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
|
|
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
|
defer self.register_manager.unlockReg(addr_lock);
|
|
|
|
try self.genCopy(src_ty, .{ .indirect = .{ .reg = addr_reg } }, src_mcv, opts);
|
|
},
|
|
.air_ref => |ptr_ref| try self.store(ptr_ty, try self.resolveInst(ptr_ref), src_mcv, opts),
|
|
}
|
|
}
|
|
|
|
fn airStore(self: *CodeGen, inst: Air.Inst.Index, safety: bool) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
|
|
result: {
|
|
if (!safety and (try self.resolveInst(bin_op.rhs)) == .undef) break :result;
|
|
|
|
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const src_mcv = try self.resolveInst(bin_op.rhs);
|
|
const ptr_mcv = try self.resolveInst(bin_op.lhs);
|
|
const ptr_ty = self.typeOf(bin_op.lhs);
|
|
|
|
const ptr_info = ptr_ty.ptrInfo(zcu);
|
|
if (ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0) {
|
|
try self.packedStore(ptr_ty, ptr_mcv, src_mcv);
|
|
} else {
|
|
try self.store(ptr_ty, ptr_mcv, src_mcv, .{ .safety = safety });
|
|
}
|
|
}
|
|
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airStructFieldPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
|
|
const result = try self.fieldPtr(inst, extra.struct_operand, extra.field_index);
|
|
return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
|
|
}
|
|
|
|
fn airStructFieldPtrIndex(self: *CodeGen, inst: Air.Inst.Index, field_index: u8) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const result = try self.fieldPtr(inst, ty_op.operand, field_index);
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn fieldPtr(self: *CodeGen, inst: Air.Inst.Index, operand: Air.Inst.Ref, field_index: u32) !MCValue {
|
|
const ptr_field_ty = self.typeOfIndex(inst);
|
|
|
|
const src_mcv = try self.resolveInst(operand);
|
|
const dst_mcv = if (switch (src_mcv) {
|
|
.immediate, .lea_frame => true,
|
|
.register, .register_offset => self.reuseOperand(inst, operand, 0, src_mcv),
|
|
else => false,
|
|
}) src_mcv else try self.copyToRegisterWithInstTracking(inst, ptr_field_ty, src_mcv);
|
|
return dst_mcv.offset(self.fieldOffset(self.typeOf(operand), ptr_field_ty, field_index));
|
|
}
|
|
|
|
fn fieldOffset(self: *CodeGen, ptr_agg_ty: Type, ptr_field_ty: Type, field_index: u32) i32 {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const agg_ty = ptr_agg_ty.childType(zcu);
|
|
return switch (agg_ty.containerLayout(zcu)) {
|
|
.auto, .@"extern" => @intCast(agg_ty.structFieldOffset(field_index, zcu)),
|
|
.@"packed" => @divExact(@as(i32, ptr_agg_ty.ptrInfo(zcu).packed_offset.bit_offset) +
|
|
(if (zcu.typeToStruct(agg_ty)) |loaded_struct| pt.structPackedFieldBitOffset(loaded_struct, field_index) else 0) -
|
|
ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
|
|
};
|
|
}
|
|
|
|
fn airStructFieldVal(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
|
|
const result: MCValue = result: {
|
|
const operand = extra.struct_operand;
|
|
const index = extra.field_index;
|
|
|
|
const container_ty = self.typeOf(operand);
|
|
const container_rc = self.regSetForType(container_ty);
|
|
const field_ty = container_ty.fieldType(index, zcu);
|
|
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
|
|
const field_rc = self.regSetForType(field_ty);
|
|
const field_is_gp = field_rc.supersetOf(abi.RegisterClass.gp);
|
|
|
|
const src_mcv = try self.resolveInst(operand);
|
|
const field_off: u32 = switch (container_ty.containerLayout(zcu)) {
|
|
.auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, zcu) * 8),
|
|
.@"packed" => if (zcu.typeToStruct(container_ty)) |loaded_struct|
|
|
pt.structPackedFieldBitOffset(loaded_struct, extra.field_index)
|
|
else
|
|
0,
|
|
};
|
|
|
|
switch (src_mcv) {
|
|
.register => |src_reg| {
|
|
const src_reg_lock = self.register_manager.lockRegAssumeUnused(src_reg);
|
|
defer self.register_manager.unlockReg(src_reg_lock);
|
|
|
|
const src_in_field_rc =
|
|
field_rc.isSet(RegisterManager.indexOfRegIntoTracked(src_reg).?);
|
|
const dst_reg = if (src_in_field_rc and self.reuseOperand(inst, operand, 0, src_mcv))
|
|
src_reg
|
|
else if (field_off == 0)
|
|
(try self.copyToRegisterWithInstTracking(inst, field_ty, src_mcv)).register
|
|
else
|
|
try self.copyToTmpRegister(.usize, .{ .register = src_reg });
|
|
const dst_mcv: MCValue = .{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (field_off > 0) {
|
|
try self.spillEflagsIfOccupied();
|
|
try self.genShiftBinOpMir(.{ ._r, .sh }, .usize, dst_mcv, .u8, .{ .immediate = field_off });
|
|
}
|
|
if (abi.RegisterClass.gp.isSet(RegisterManager.indexOfRegIntoTracked(dst_reg).?) and
|
|
container_ty.abiSize(zcu) * 8 > field_ty.bitSize(zcu))
|
|
try self.truncateRegister(field_ty, dst_reg);
|
|
|
|
break :result if (field_off == 0 or field_rc.supersetOf(abi.RegisterClass.gp))
|
|
dst_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv);
|
|
},
|
|
.register_pair => |src_regs| {
|
|
const src_regs_lock = self.register_manager.lockRegsAssumeUnused(2, src_regs);
|
|
defer for (src_regs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const field_bit_size: u32 = @intCast(field_ty.bitSize(zcu));
|
|
const src_reg = if (field_off + field_bit_size <= 64)
|
|
src_regs[0]
|
|
else if (field_off >= 64)
|
|
src_regs[1]
|
|
else {
|
|
const dst_regs: [2]Register = if (field_rc.supersetOf(container_rc) and
|
|
self.reuseOperand(inst, operand, 0, src_mcv)) src_regs else dst: {
|
|
const dst_regs =
|
|
try self.register_manager.allocRegs(2, @splat(null), field_rc);
|
|
const dst_locks = self.register_manager.lockRegsAssumeUnused(2, dst_regs);
|
|
defer for (dst_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genCopy(container_ty, .{ .register_pair = dst_regs }, src_mcv, .{});
|
|
break :dst dst_regs;
|
|
};
|
|
const dst_mcv = MCValue{ .register_pair = dst_regs };
|
|
const dst_locks = self.register_manager.lockRegs(2, dst_regs);
|
|
defer for (dst_locks) |dst_lock| if (dst_lock) |lock|
|
|
self.register_manager.unlockReg(lock);
|
|
|
|
if (field_off > 0) {
|
|
try self.spillEflagsIfOccupied();
|
|
try self.genShiftBinOpMir(.{ ._r, .sh }, .u128, dst_mcv, .u8, .{ .immediate = field_off });
|
|
}
|
|
|
|
if (field_bit_size <= 64) {
|
|
if (self.regExtraBits(field_ty) > 0)
|
|
try self.truncateRegister(field_ty, dst_regs[0]);
|
|
break :result if (field_rc.supersetOf(abi.RegisterClass.gp))
|
|
.{ .register = dst_regs[0] }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, field_ty, .{
|
|
.register = dst_regs[0],
|
|
});
|
|
}
|
|
|
|
if (field_bit_size < 128) try self.truncateRegister(
|
|
try pt.intType(.unsigned, @intCast(field_bit_size - 64)),
|
|
dst_regs[1],
|
|
);
|
|
break :result if (field_rc.supersetOf(abi.RegisterClass.gp))
|
|
dst_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv);
|
|
};
|
|
|
|
const dst_reg = try self.copyToTmpRegister(.usize, .{ .register = src_reg });
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (field_off % 64 > 0) {
|
|
try self.spillEflagsIfOccupied();
|
|
try self.genShiftBinOpMir(.{ ._r, .sh }, .usize, dst_mcv, .u8, .{ .immediate = field_off % 64 });
|
|
}
|
|
if (self.regExtraBits(field_ty) > 0) try self.truncateRegister(field_ty, dst_reg);
|
|
|
|
break :result if (field_rc.supersetOf(abi.RegisterClass.gp))
|
|
dst_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv);
|
|
},
|
|
.register_overflow => |ro| {
|
|
switch (index) {
|
|
// Get wrapped value for overflow operation.
|
|
0 => if (self.reuseOperand(inst, extra.struct_operand, 0, src_mcv)) {
|
|
self.eflags_inst = null; // actually stop tracking the overflow part
|
|
break :result .{ .register = ro.reg };
|
|
} else break :result try self.copyToRegisterWithInstTracking(inst, .usize, .{ .register = ro.reg }),
|
|
// Get overflow bit.
|
|
1 => if (self.reuseOperandAdvanced(inst, extra.struct_operand, 0, src_mcv, null)) {
|
|
self.eflags_inst = inst; // actually keep tracking the overflow part
|
|
break :result .{ .eflags = ro.eflags };
|
|
} else {
|
|
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
|
|
try self.asmSetccRegister(ro.eflags, dst_reg.to8());
|
|
break :result .{ .register = dst_reg.to8() };
|
|
},
|
|
else => unreachable,
|
|
}
|
|
},
|
|
.load_frame => |frame_addr| {
|
|
const field_abi_size: u32 = @intCast(field_ty.abiSize(zcu));
|
|
if (field_off % 8 == 0) {
|
|
const field_byte_off = @divExact(field_off, 8);
|
|
const off_mcv = src_mcv.address().offset(@intCast(field_byte_off)).deref();
|
|
const field_bit_size = field_ty.bitSize(zcu);
|
|
|
|
if (field_abi_size <= 8) {
|
|
const int_ty = try pt.intType(
|
|
if (field_ty.isAbiInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned,
|
|
@intCast(field_bit_size),
|
|
);
|
|
|
|
const dst_reg = try self.register_manager.allocReg(
|
|
if (field_is_gp) inst else null,
|
|
abi.RegisterClass.gp,
|
|
);
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
try self.genCopy(int_ty, dst_mcv, off_mcv, .{});
|
|
if (self.regExtraBits(field_ty) > 0) try self.truncateRegister(int_ty, dst_reg);
|
|
break :result if (field_is_gp)
|
|
dst_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv);
|
|
}
|
|
|
|
const container_abi_size: u32 = @intCast(container_ty.abiSize(zcu));
|
|
const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and
|
|
self.reuseOperand(inst, operand, 0, src_mcv))
|
|
off_mcv
|
|
else dst: {
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(field_ty, dst_mcv, off_mcv, .{});
|
|
break :dst dst_mcv;
|
|
};
|
|
if (field_abi_size * 8 > field_bit_size and dst_mcv.isBase()) {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const hi_mcv =
|
|
dst_mcv.address().offset(@intCast(field_bit_size / 64 * 8)).deref();
|
|
try self.genSetReg(tmp_reg, .usize, hi_mcv, .{});
|
|
try self.truncateRegister(field_ty, tmp_reg);
|
|
try self.genCopy(.usize, hi_mcv, .{ .register = tmp_reg }, .{});
|
|
}
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
const limb_abi_size: u31 = @min(field_abi_size, 8);
|
|
const limb_abi_bits = limb_abi_size * 8;
|
|
const field_byte_off: i32 = @intCast(field_off / limb_abi_bits * limb_abi_size);
|
|
const field_bit_off = field_off % limb_abi_bits;
|
|
|
|
if (field_abi_size > 8) {
|
|
return self.fail("TODO implement struct_field_val with large packed field", .{});
|
|
}
|
|
|
|
const dst_reg = try self.register_manager.allocReg(
|
|
if (field_is_gp) inst else null,
|
|
abi.RegisterClass.gp,
|
|
);
|
|
const field_extra_bits = self.regExtraBits(field_ty);
|
|
const load_abi_size =
|
|
if (field_bit_off < field_extra_bits) field_abi_size else field_abi_size * 2;
|
|
if (load_abi_size <= 8) {
|
|
const load_reg = registerAlias(dst_reg, load_abi_size);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, load_reg, .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(load_abi_size),
|
|
.disp = frame_addr.off + field_byte_off,
|
|
} },
|
|
});
|
|
try self.spillEflagsIfOccupied();
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, load_reg, .u(field_bit_off));
|
|
} else {
|
|
const tmp_reg = registerAlias(
|
|
try self.register_manager.allocReg(null, abi.RegisterClass.gp),
|
|
field_abi_size,
|
|
);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const dst_alias = registerAlias(dst_reg, field_abi_size);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
dst_alias,
|
|
.{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(field_abi_size),
|
|
.disp = frame_addr.off + field_byte_off,
|
|
} },
|
|
},
|
|
);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, tmp_reg, .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(field_abi_size),
|
|
.disp = frame_addr.off + field_byte_off + limb_abi_size,
|
|
} },
|
|
});
|
|
try self.spillEflagsIfOccupied();
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ ._rd, .sh },
|
|
dst_alias,
|
|
tmp_reg,
|
|
.u(field_bit_off),
|
|
);
|
|
}
|
|
|
|
if (field_extra_bits > 0) try self.truncateRegister(field_ty, dst_reg);
|
|
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
break :result if (field_is_gp)
|
|
dst_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv);
|
|
},
|
|
else => return self.fail("TODO implement airStructFieldVal for {}", .{src_mcv}),
|
|
}
|
|
};
|
|
return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
|
|
}
|
|
|
|
fn airFieldParentPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
|
|
|
|
const ptr_agg_ty = self.typeOfIndex(inst);
|
|
const src_mcv = try self.resolveInst(extra.field_ptr);
|
|
const dst_mcv = if (src_mcv.isRegisterOffset() and
|
|
self.reuseOperand(inst, extra.field_ptr, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, ptr_agg_ty, src_mcv);
|
|
const result = dst_mcv.offset(-self.fieldOffset(ptr_agg_ty, self.typeOf(extra.field_ptr), extra.field_index));
|
|
return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none });
|
|
}
|
|
|
|
fn genUnOp(self: *CodeGen, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const src_ty = self.typeOf(src_air);
|
|
if (src_ty.zigTypeTag(zcu) == .vector)
|
|
return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(pt)});
|
|
|
|
var src_mcv = try self.resolveInst(src_air);
|
|
switch (src_mcv) {
|
|
.eflags => |cc| switch (tag) {
|
|
.not => {
|
|
if (maybe_inst) |inst| if (self.reuseOperand(inst, src_air, 0, src_mcv))
|
|
return .{ .eflags = cc.negate() };
|
|
try self.spillEflagsIfOccupied();
|
|
src_mcv = try self.resolveInst(src_air);
|
|
},
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
const src_lock = switch (src_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv: MCValue = dst: {
|
|
if (maybe_inst) |inst| if (self.reuseOperand(inst, src_air, 0, src_mcv)) break :dst src_mcv;
|
|
|
|
const dst_mcv = try self.allocRegOrMemAdvanced(src_ty, maybe_inst, true);
|
|
try self.genCopy(src_ty, dst_mcv, src_mcv, .{});
|
|
break :dst dst_mcv;
|
|
};
|
|
const dst_lock = switch (dst_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const abi_size: u16 = @intCast(src_ty.abiSize(zcu));
|
|
switch (tag) {
|
|
.not => {
|
|
const limb_abi_size: u16 = @min(abi_size, 8);
|
|
const int_info: InternPool.Key.IntType = if (src_ty.ip_index == .bool_type)
|
|
.{ .signedness = .unsigned, .bits = 1 }
|
|
else
|
|
src_ty.intInfo(zcu);
|
|
var byte_off: i32 = 0;
|
|
while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) {
|
|
const limb_bits: u16 = @intCast(@min(switch (int_info.signedness) {
|
|
.signed => abi_size * 8,
|
|
.unsigned => int_info.bits,
|
|
} - byte_off * 8, limb_abi_size * 8));
|
|
const limb_ty = try pt.intType(int_info.signedness, limb_bits);
|
|
const limb_mcv = switch (byte_off) {
|
|
0 => dst_mcv,
|
|
else => dst_mcv.address().offset(byte_off).deref(),
|
|
};
|
|
|
|
if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) {
|
|
const mask = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - limb_bits);
|
|
try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask });
|
|
} else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv);
|
|
}
|
|
},
|
|
.neg => {
|
|
try self.genUnOpMir(.{ ._, .neg }, src_ty, dst_mcv);
|
|
const bit_size = src_ty.intInfo(zcu).bits;
|
|
if (abi_size * 8 > bit_size) {
|
|
if (dst_mcv.isRegister()) {
|
|
try self.truncateRegister(src_ty, dst_mcv.getReg().?);
|
|
} else {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref();
|
|
try self.genSetReg(tmp_reg, .usize, hi_mcv, .{});
|
|
try self.truncateRegister(src_ty, tmp_reg);
|
|
try self.genCopy(.usize, hi_mcv, .{ .register = tmp_reg }, .{});
|
|
}
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
return dst_mcv;
|
|
}
|
|
|
|
fn genUnOpMir(self: *CodeGen, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void {
|
|
const pt = self.pt;
|
|
const abi_size: u32 = @intCast(dst_ty.abiSize(pt.zcu));
|
|
if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(pt) });
|
|
switch (dst_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.register_offset,
|
|
.eflags,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.lea_symbol,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable, // unmodifiable destination
|
|
.register => |dst_reg| try self.asmRegister(mir_tag, registerAlias(dst_reg, abi_size)),
|
|
.register_pair, .register_triple, .register_quadruple => unreachable, // unimplemented
|
|
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => {
|
|
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
|
defer self.register_manager.unlockReg(addr_reg_lock);
|
|
|
|
try self.genSetReg(addr_reg, .usize, dst_mcv.address(), .{});
|
|
try self.asmMemory(mir_tag, .{ .base = .{ .reg = addr_reg }, .mod = .{ .rm = .{
|
|
.size = .fromSize(abi_size),
|
|
} } });
|
|
},
|
|
.indirect, .load_frame => try self.asmMemory(
|
|
mir_tag,
|
|
try dst_mcv.mem(self, .{ .size = .fromSize(abi_size) }),
|
|
),
|
|
}
|
|
}
|
|
|
|
/// Clobbers .rcx for non-immediate shift value.
|
|
fn genShiftBinOpMir(
|
|
self: *CodeGen,
|
|
tag: Mir.Inst.FixedTag,
|
|
lhs_ty: Type,
|
|
lhs_mcv: MCValue,
|
|
rhs_ty: Type,
|
|
rhs_mcv: MCValue,
|
|
) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
|
|
const shift_abi_size: u32 = @intCast(rhs_ty.abiSize(zcu));
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
if (abi_size > 16) {
|
|
const limbs_len = std.math.divCeil(u32, abi_size, 8) catch unreachable;
|
|
assert(shift_abi_size >= 1 and shift_abi_size <= 2);
|
|
|
|
const rcx_lock: ?RegisterLock = switch (rhs_mcv) {
|
|
.immediate => |shift_imm| switch (shift_imm) {
|
|
0 => return,
|
|
else => null,
|
|
},
|
|
else => lock: {
|
|
if (switch (rhs_mcv) {
|
|
.register => |rhs_reg| rhs_reg.id() != Register.rcx.id(),
|
|
else => true,
|
|
}) {
|
|
self.register_manager.getRegAssumeFree(.rcx, null);
|
|
try self.genSetReg(.rcx, rhs_ty, rhs_mcv, .{});
|
|
}
|
|
break :lock self.register_manager.lockReg(.rcx);
|
|
},
|
|
};
|
|
defer if (rcx_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const temp_regs = try self.register_manager.allocRegs(4, @splat(null), abi.RegisterClass.gp);
|
|
const temp_locks = self.register_manager.lockRegsAssumeUnused(4, temp_regs);
|
|
defer for (temp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
switch (tag[0]) {
|
|
._l => {
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, temp_regs[1].to32(), .u(limbs_len - 1));
|
|
switch (rhs_mcv) {
|
|
.immediate => |shift_imm| try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
temp_regs[0].to32(),
|
|
.u(limbs_len - (shift_imm >> 6) - 1),
|
|
),
|
|
else => {
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .movzx },
|
|
temp_regs[2].to32(),
|
|
registerAlias(.rcx, shift_abi_size),
|
|
);
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, .cl, .u(std.math.maxInt(u6)));
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, temp_regs[2].to32(), .u(6));
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
temp_regs[0].to32(),
|
|
temp_regs[1].to32(),
|
|
);
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .sub },
|
|
temp_regs[0].to32(),
|
|
temp_regs[2].to32(),
|
|
);
|
|
},
|
|
}
|
|
},
|
|
._r => {
|
|
try self.asmRegisterRegister(.{ ._, .xor }, temp_regs[1].to32(), temp_regs[1].to32());
|
|
switch (rhs_mcv) {
|
|
.immediate => |shift_imm| try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
temp_regs[0].to32(),
|
|
.u(shift_imm >> 6),
|
|
),
|
|
else => {
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .movzx },
|
|
temp_regs[0].to32(),
|
|
registerAlias(.rcx, shift_abi_size),
|
|
);
|
|
try self.asmRegisterImmediate(.{ ._, .@"and" }, .cl, .u(std.math.maxInt(u6)));
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, temp_regs[0].to32(), .u(6));
|
|
},
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
const slow_inc_dec = self.hasFeature(.slow_incdec);
|
|
if (switch (rhs_mcv) {
|
|
.immediate => |shift_imm| shift_imm >> 6 < limbs_len - 1,
|
|
else => true,
|
|
}) {
|
|
try self.asmRegisterMemory(.{ ._, .mov }, temp_regs[2].to64(), .{
|
|
.base = .{ .frame = lhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[0].to64(),
|
|
.scale = .@"8",
|
|
.disp = lhs_mcv.load_frame.off,
|
|
} },
|
|
});
|
|
const skip = switch (rhs_mcv) {
|
|
.immediate => undefined,
|
|
else => switch (tag[0]) {
|
|
._l => try self.asmJccReloc(.z, undefined),
|
|
._r => skip: {
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .cmp },
|
|
temp_regs[0].to32(),
|
|
.u(limbs_len - 1),
|
|
);
|
|
break :skip try self.asmJccReloc(.nb, undefined);
|
|
},
|
|
else => unreachable,
|
|
},
|
|
};
|
|
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, temp_regs[3].to64(), .{
|
|
.base = .{ .frame = lhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[0].to64(),
|
|
.scale = .@"8",
|
|
.disp = switch (tag[0]) {
|
|
._l => lhs_mcv.load_frame.off - 8,
|
|
._r => lhs_mcv.load_frame.off + 8,
|
|
else => unreachable,
|
|
},
|
|
} },
|
|
});
|
|
switch (rhs_mcv) {
|
|
.immediate => |shift_imm| try self.asmRegisterRegisterImmediate(
|
|
.{ switch (tag[0]) {
|
|
._l => ._ld,
|
|
._r => ._rd,
|
|
else => unreachable,
|
|
}, .sh },
|
|
temp_regs[2].to64(),
|
|
temp_regs[3].to64(),
|
|
.u(shift_imm & std.math.maxInt(u6)),
|
|
),
|
|
else => try self.asmRegisterRegisterRegister(.{ switch (tag[0]) {
|
|
._l => ._ld,
|
|
._r => ._rd,
|
|
else => unreachable,
|
|
}, .sh }, temp_regs[2].to64(), temp_regs[3].to64(), .cl),
|
|
}
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = lhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[1].to64(),
|
|
.scale = .@"8",
|
|
.disp = lhs_mcv.load_frame.off,
|
|
} },
|
|
}, temp_regs[2].to64());
|
|
try self.asmRegisterRegister(.{ ._, .mov }, temp_regs[2].to64(), temp_regs[3].to64());
|
|
switch (tag[0]) {
|
|
._l => {
|
|
if (slow_inc_dec) {
|
|
try self.asmRegisterImmediate(.{ ._, .sub }, temp_regs[1].to32(), .u(1));
|
|
try self.asmRegisterImmediate(.{ ._, .sub }, temp_regs[0].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .de }, temp_regs[1].to32());
|
|
try self.asmRegister(.{ ._c, .de }, temp_regs[0].to32());
|
|
}
|
|
_ = try self.asmJccReloc(.nz, loop);
|
|
},
|
|
._r => {
|
|
if (slow_inc_dec) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[1].to32(), .u(1));
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[0].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[1].to32());
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[0].to32());
|
|
}
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .cmp },
|
|
temp_regs[0].to32(),
|
|
.u(limbs_len - 1),
|
|
);
|
|
_ = try self.asmJccReloc(.b, loop);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
switch (rhs_mcv) {
|
|
.immediate => {},
|
|
else => self.performReloc(skip),
|
|
}
|
|
}
|
|
switch (rhs_mcv) {
|
|
.immediate => |shift_imm| try self.asmRegisterImmediate(
|
|
tag,
|
|
temp_regs[2].to64(),
|
|
.u(shift_imm & std.math.maxInt(u6)),
|
|
),
|
|
else => try self.asmRegisterRegister(tag, temp_regs[2].to64(), .cl),
|
|
}
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = lhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[1].to64(),
|
|
.scale = .@"8",
|
|
.disp = lhs_mcv.load_frame.off,
|
|
} },
|
|
}, temp_regs[2].to64());
|
|
if (tag[0] == ._r and tag[1] == .sa) try self.asmRegisterImmediate(
|
|
tag,
|
|
temp_regs[2].to64(),
|
|
.u(63),
|
|
);
|
|
if (switch (rhs_mcv) {
|
|
.immediate => |shift_imm| shift_imm >> 6 > 0,
|
|
else => true,
|
|
}) {
|
|
const skip = switch (rhs_mcv) {
|
|
.immediate => undefined,
|
|
else => switch (tag[0]) {
|
|
._l => skip: {
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .@"test" },
|
|
temp_regs[1].to32(),
|
|
temp_regs[1].to32(),
|
|
);
|
|
break :skip try self.asmJccReloc(.z, undefined);
|
|
},
|
|
._r => skip: {
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .cmp },
|
|
temp_regs[1].to32(),
|
|
.u(limbs_len - 1),
|
|
);
|
|
break :skip try self.asmJccReloc(.nb, undefined);
|
|
},
|
|
else => unreachable,
|
|
},
|
|
};
|
|
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
switch (tag[0]) {
|
|
._l => if (slow_inc_dec) {
|
|
try self.asmRegisterImmediate(.{ ._, .sub }, temp_regs[1].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .de }, temp_regs[1].to32());
|
|
},
|
|
._r => if (slow_inc_dec) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[1].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[1].to32());
|
|
},
|
|
else => unreachable,
|
|
}
|
|
if (tag[0] == ._r and tag[1] == .sa) try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = lhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[1].to64(),
|
|
.scale = .@"8",
|
|
.disp = lhs_mcv.load_frame.off,
|
|
} },
|
|
}, temp_regs[2].to64()) else try self.asmMemoryImmediate(.{ ._, .mov }, .{
|
|
.base = .{ .frame = lhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[1].to64(),
|
|
.scale = .@"8",
|
|
.disp = lhs_mcv.load_frame.off,
|
|
} },
|
|
}, .u(0));
|
|
switch (tag[0]) {
|
|
._l => _ = try self.asmJccReloc(.nz, loop),
|
|
._r => {
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .cmp },
|
|
temp_regs[1].to32(),
|
|
.u(limbs_len - 1),
|
|
);
|
|
_ = try self.asmJccReloc(.b, loop);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
switch (rhs_mcv) {
|
|
.immediate => {},
|
|
else => self.performReloc(skip),
|
|
}
|
|
}
|
|
return;
|
|
}
|
|
|
|
assert(shift_abi_size == 1);
|
|
const shift_mcv: MCValue = shift: {
|
|
switch (rhs_mcv) {
|
|
.immediate => |shift_imm| switch (shift_imm) {
|
|
0 => return,
|
|
else => break :shift rhs_mcv,
|
|
},
|
|
.register => |rhs_reg| if (rhs_reg.id() == Register.rcx.id())
|
|
break :shift rhs_mcv,
|
|
else => {},
|
|
}
|
|
self.register_manager.getRegAssumeFree(.rcx, null);
|
|
try self.genSetReg(.cl, rhs_ty, rhs_mcv, .{});
|
|
break :shift .{ .register = .rcx };
|
|
};
|
|
if (abi_size > 8) {
|
|
const info: struct { indices: [2]u31, double_tag: Mir.Inst.FixedTag } = switch (tag[0]) {
|
|
._l => .{ .indices = .{ 0, 1 }, .double_tag = .{ ._ld, .sh } },
|
|
._r => .{ .indices = .{ 1, 0 }, .double_tag = .{ ._rd, .sh } },
|
|
else => unreachable,
|
|
};
|
|
switch (lhs_mcv) {
|
|
.register_pair => |lhs_regs| switch (shift_mcv) {
|
|
.immediate => |shift_imm| if (shift_imm > 0 and shift_imm < 64) {
|
|
try self.asmRegisterRegisterImmediate(
|
|
info.double_tag,
|
|
lhs_regs[info.indices[1]],
|
|
lhs_regs[info.indices[0]],
|
|
.u(shift_imm),
|
|
);
|
|
try self.asmRegisterImmediate(
|
|
tag,
|
|
lhs_regs[info.indices[0]],
|
|
.u(shift_imm),
|
|
);
|
|
return;
|
|
} else {
|
|
assert(shift_imm < 128);
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
lhs_regs[info.indices[1]],
|
|
lhs_regs[info.indices[0]],
|
|
);
|
|
if (tag[0] == ._r and tag[1] == .sa) try self.asmRegisterImmediate(
|
|
tag,
|
|
lhs_regs[info.indices[0]],
|
|
.u(63),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._, .xor },
|
|
lhs_regs[info.indices[0]],
|
|
lhs_regs[info.indices[0]],
|
|
);
|
|
if (shift_imm > 64) try self.asmRegisterImmediate(
|
|
tag,
|
|
lhs_regs[info.indices[1]],
|
|
.u(shift_imm - 64),
|
|
);
|
|
return;
|
|
},
|
|
.register => |shift_reg| {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
if (tag[0] == ._r and tag[1] == .sa) {
|
|
try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, lhs_regs[info.indices[0]]);
|
|
try self.asmRegisterImmediate(tag, tmp_reg, .u(63));
|
|
} else try self.asmRegisterRegister(
|
|
.{ ._, .xor },
|
|
tmp_reg.to32(),
|
|
tmp_reg.to32(),
|
|
);
|
|
try self.asmRegisterRegisterRegister(
|
|
info.double_tag,
|
|
lhs_regs[info.indices[1]],
|
|
lhs_regs[info.indices[0]],
|
|
registerAlias(shift_reg, 1),
|
|
);
|
|
try self.asmRegisterRegister(
|
|
tag,
|
|
lhs_regs[info.indices[0]],
|
|
registerAlias(shift_reg, 1),
|
|
);
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, registerAlias(shift_reg, 1), .u(64));
|
|
try self.asmCmovccRegisterRegister(
|
|
.ae,
|
|
lhs_regs[info.indices[1]],
|
|
lhs_regs[info.indices[0]],
|
|
);
|
|
try self.asmCmovccRegisterRegister(.ae, lhs_regs[info.indices[0]], tmp_reg);
|
|
return;
|
|
},
|
|
else => {},
|
|
},
|
|
.load_frame => |dst_frame_addr| {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
switch (shift_mcv) {
|
|
.immediate => |shift_imm| if (shift_imm > 0 and shift_imm < 64) {
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
tmp_reg,
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[0] * 8,
|
|
} },
|
|
},
|
|
);
|
|
try self.asmMemoryRegisterImmediate(
|
|
info.double_tag,
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[1] * 8,
|
|
} },
|
|
},
|
|
tmp_reg,
|
|
.u(shift_imm),
|
|
);
|
|
try self.asmMemoryImmediate(
|
|
tag,
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[0] * 8,
|
|
} },
|
|
},
|
|
.u(shift_imm),
|
|
);
|
|
return;
|
|
} else {
|
|
assert(shift_imm < 128);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
tmp_reg,
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[0] * 8,
|
|
} },
|
|
},
|
|
);
|
|
if (shift_imm > 64) try self.asmRegisterImmediate(
|
|
tag,
|
|
tmp_reg,
|
|
.u(shift_imm - 64),
|
|
);
|
|
try self.asmMemoryRegister(
|
|
.{ ._, .mov },
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[1] * 8,
|
|
} },
|
|
},
|
|
tmp_reg,
|
|
);
|
|
if (tag[0] == ._r and tag[1] == .sa) try self.asmMemoryImmediate(
|
|
tag,
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[0] * 8,
|
|
} },
|
|
},
|
|
.u(63),
|
|
) else {
|
|
try self.asmRegisterRegister(.{ ._, .xor }, tmp_reg.to32(), tmp_reg.to32());
|
|
try self.asmMemoryRegister(
|
|
.{ ._, .mov },
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[0] * 8,
|
|
} },
|
|
},
|
|
tmp_reg,
|
|
);
|
|
}
|
|
return;
|
|
},
|
|
.register => |shift_reg| {
|
|
const first_reg =
|
|
try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const first_lock = self.register_manager.lockRegAssumeUnused(first_reg);
|
|
defer self.register_manager.unlockReg(first_lock);
|
|
|
|
const second_reg =
|
|
try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const second_lock = self.register_manager.lockRegAssumeUnused(second_reg);
|
|
defer self.register_manager.unlockReg(second_lock);
|
|
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
first_reg,
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[0] * 8,
|
|
} },
|
|
},
|
|
);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
second_reg,
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[1] * 8,
|
|
} },
|
|
},
|
|
);
|
|
if (tag[0] == ._r and tag[1] == .sa) {
|
|
try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, first_reg);
|
|
try self.asmRegisterImmediate(tag, tmp_reg, .u(63));
|
|
} else try self.asmRegisterRegister(
|
|
.{ ._, .xor },
|
|
tmp_reg.to32(),
|
|
tmp_reg.to32(),
|
|
);
|
|
try self.asmRegisterRegisterRegister(
|
|
info.double_tag,
|
|
second_reg,
|
|
first_reg,
|
|
registerAlias(shift_reg, 1),
|
|
);
|
|
try self.asmRegisterRegister(tag, first_reg, registerAlias(shift_reg, 1));
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .cmp },
|
|
registerAlias(shift_reg, 1),
|
|
.u(64),
|
|
);
|
|
try self.asmCmovccRegisterRegister(.ae, second_reg, first_reg);
|
|
try self.asmCmovccRegisterRegister(.ae, first_reg, tmp_reg);
|
|
try self.asmMemoryRegister(
|
|
.{ ._, .mov },
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[1] * 8,
|
|
} },
|
|
},
|
|
second_reg,
|
|
);
|
|
try self.asmMemoryRegister(
|
|
.{ ._, .mov },
|
|
.{
|
|
.base = .{ .frame = dst_frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_frame_addr.off + info.indices[0] * 8,
|
|
} },
|
|
},
|
|
first_reg,
|
|
);
|
|
return;
|
|
},
|
|
else => {},
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
} else switch (lhs_mcv) {
|
|
.register => |lhs_reg| switch (shift_mcv) {
|
|
.immediate => |shift_imm| return self.asmRegisterImmediate(
|
|
tag,
|
|
registerAlias(lhs_reg, abi_size),
|
|
.u(shift_imm),
|
|
),
|
|
.register => |shift_reg| return self.asmRegisterRegister(
|
|
tag,
|
|
registerAlias(lhs_reg, abi_size),
|
|
registerAlias(shift_reg, 1),
|
|
),
|
|
else => {},
|
|
},
|
|
.memory, .indirect, .load_frame => {
|
|
const lhs_mem: Memory = switch (lhs_mcv) {
|
|
.memory => |addr| .{
|
|
.base = .{ .reg = .ds },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(abi_size),
|
|
.disp = std.math.cast(i32, @as(i64, @bitCast(addr))) orelse
|
|
return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{
|
|
@tagName(lhs_mcv),
|
|
@tagName(shift_mcv),
|
|
}),
|
|
} },
|
|
},
|
|
.indirect => |reg_off| .{
|
|
.base = .{ .reg = reg_off.reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(abi_size),
|
|
.disp = reg_off.off,
|
|
} },
|
|
},
|
|
.load_frame => |frame_addr| .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(abi_size),
|
|
.disp = frame_addr.off,
|
|
} },
|
|
},
|
|
else => unreachable,
|
|
};
|
|
switch (shift_mcv) {
|
|
.immediate => |shift_imm| return self.asmMemoryImmediate(tag, lhs_mem, .u(shift_imm)),
|
|
.register => |shift_reg| return self.asmMemoryRegister(
|
|
tag,
|
|
lhs_mem,
|
|
registerAlias(shift_reg, 1),
|
|
),
|
|
else => {},
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{
|
|
@tagName(lhs_mcv),
|
|
@tagName(shift_mcv),
|
|
});
|
|
}
|
|
|
|
/// Result is always a register.
|
|
/// Clobbers .rcx for non-immediate rhs, therefore care is needed to spill .rcx upfront.
|
|
/// Asserts .rcx is free.
|
|
fn genShiftBinOp(
|
|
self: *CodeGen,
|
|
air_tag: Air.Inst.Tag,
|
|
maybe_inst: ?Air.Inst.Index,
|
|
lhs_mcv: MCValue,
|
|
rhs_mcv: MCValue,
|
|
lhs_ty: Type,
|
|
rhs_ty: Type,
|
|
) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
if (lhs_ty.zigTypeTag(zcu) == .vector) return self.fail("TODO implement genShiftBinOp for {}", .{
|
|
lhs_ty.fmt(pt),
|
|
});
|
|
|
|
try self.register_manager.getKnownReg(.rcx, null);
|
|
const rcx_lock = self.register_manager.lockReg(.rcx);
|
|
defer if (rcx_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const mat_lhs_mcv: MCValue, const can_reuse_lhs = switch (lhs_mcv) {
|
|
.register => |lhs_reg| switch (lhs_reg.class()) {
|
|
.general_purpose => .{ lhs_mcv, true },
|
|
else => lhs: {
|
|
const mat_lhs_mcv = try self.allocTempRegOrMem(lhs_ty, true);
|
|
try self.genCopy(lhs_ty, mat_lhs_mcv, lhs_mcv, .{});
|
|
break :lhs .{ mat_lhs_mcv, false };
|
|
},
|
|
},
|
|
else => .{ lhs_mcv, true },
|
|
};
|
|
const lhs_lock = switch (mat_lhs_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const rhs_lock = switch (rhs_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv: MCValue = dst: {
|
|
if (can_reuse_lhs) if (maybe_inst) |inst| {
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
if (self.reuseOperand(inst, bin_op.lhs, 0, mat_lhs_mcv)) break :dst mat_lhs_mcv;
|
|
};
|
|
const dst_mcv = try self.allocRegOrMemAdvanced(lhs_ty, maybe_inst, true);
|
|
try self.genCopy(lhs_ty, dst_mcv, mat_lhs_mcv, .{});
|
|
break :dst dst_mcv;
|
|
};
|
|
|
|
const signedness = lhs_ty.intInfo(zcu).signedness;
|
|
try self.genShiftBinOpMir(switch (air_tag) {
|
|
.shl, .shl_exact => switch (signedness) {
|
|
.signed => .{ ._l, .sa },
|
|
.unsigned => .{ ._l, .sh },
|
|
},
|
|
.shr, .shr_exact => switch (signedness) {
|
|
.signed => .{ ._r, .sa },
|
|
.unsigned => .{ ._r, .sh },
|
|
},
|
|
else => unreachable,
|
|
}, lhs_ty, dst_mcv, rhs_ty, rhs_mcv);
|
|
return dst_mcv;
|
|
}
|
|
|
|
/// Result is always a register.
|
|
/// Clobbers .rax and .rdx therefore care is needed to spill .rax and .rdx upfront.
|
|
/// Asserts .rax and .rdx are free.
|
|
fn genMulDivBinOp(
|
|
self: *CodeGen,
|
|
tag: Air.Inst.Tag,
|
|
maybe_inst: ?Air.Inst.Index,
|
|
dst_ty: Type,
|
|
src_ty: Type,
|
|
lhs_mcv: MCValue,
|
|
rhs_mcv: MCValue,
|
|
) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
if (dst_ty.zigTypeTag(zcu) == .vector or dst_ty.zigTypeTag(zcu) == .float) return self.fail(
|
|
"TODO implement genMulDivBinOp for {s} from {} to {}",
|
|
.{ @tagName(tag), src_ty.fmt(pt), dst_ty.fmt(pt) },
|
|
);
|
|
const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
|
|
const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
|
|
|
|
assert(self.register_manager.isRegFree(.rax));
|
|
assert(self.register_manager.isRegFree(.rcx));
|
|
assert(self.register_manager.isRegFree(.rdx));
|
|
assert(self.eflags_inst == null);
|
|
|
|
if (dst_abi_size == 16 and src_abi_size == 16) {
|
|
assert(tag == .mul or tag == .mul_wrap);
|
|
const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
|
|
defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const mat_lhs_mcv = switch (lhs_mcv) {
|
|
.load_symbol => mat_lhs_mcv: {
|
|
// TODO clean this up!
|
|
const addr_reg = try self.copyToTmpRegister(.usize, lhs_mcv.address());
|
|
break :mat_lhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
|
},
|
|
else => lhs_mcv,
|
|
};
|
|
const mat_lhs_lock = switch (mat_lhs_mcv) {
|
|
.indirect => |reg_off| self.register_manager.lockReg(reg_off.reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
const mat_rhs_mcv = switch (rhs_mcv) {
|
|
.load_symbol => mat_rhs_mcv: {
|
|
// TODO clean this up!
|
|
const addr_reg = try self.copyToTmpRegister(.usize, rhs_mcv.address());
|
|
break :mat_rhs_mcv MCValue{ .indirect = .{ .reg = addr_reg } };
|
|
},
|
|
else => rhs_mcv,
|
|
};
|
|
const mat_rhs_lock = switch (mat_rhs_mcv) {
|
|
.indirect => |reg_off| self.register_manager.lockReg(reg_off.reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
if (mat_lhs_mcv.isBase())
|
|
try self.asmRegisterMemory(.{ ._, .mov }, .rax, try mat_lhs_mcv.mem(self, .{ .size = .qword }))
|
|
else
|
|
try self.asmRegisterRegister(.{ ._, .mov }, .rax, mat_lhs_mcv.register_pair[0]);
|
|
if (mat_rhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
tmp_reg,
|
|
try mat_rhs_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, mat_rhs_mcv.register_pair[1]);
|
|
try self.asmRegisterRegister(.{ .i_, .mul }, tmp_reg, .rax);
|
|
if (mat_rhs_mcv.isBase())
|
|
try self.asmMemory(.{ ._, .mul }, try mat_rhs_mcv.mem(self, .{ .size = .qword }))
|
|
else
|
|
try self.asmRegister(.{ ._, .mul }, mat_rhs_mcv.register_pair[0]);
|
|
try self.asmRegisterRegister(.{ ._, .add }, .rdx, tmp_reg);
|
|
if (mat_lhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
tmp_reg,
|
|
try mat_lhs_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, mat_lhs_mcv.register_pair[1]);
|
|
if (mat_rhs_mcv.isBase())
|
|
try self.asmRegisterMemory(.{ .i_, .mul }, tmp_reg, try mat_rhs_mcv.mem(self, .{ .size = .qword }))
|
|
else
|
|
try self.asmRegisterRegister(.{ .i_, .mul }, tmp_reg, mat_rhs_mcv.register_pair[0]);
|
|
try self.asmRegisterRegister(.{ ._, .add }, .rdx, tmp_reg);
|
|
return .{ .register_pair = .{ .rax, .rdx } };
|
|
}
|
|
|
|
if (switch (tag) {
|
|
else => unreachable,
|
|
.mul, .mul_wrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2,
|
|
.div_trunc, .div_floor, .div_exact, .rem, .mod => dst_abi_size != src_abi_size,
|
|
} or src_abi_size > 8) {
|
|
const src_info = src_ty.intInfo(zcu);
|
|
switch (tag) {
|
|
.mul, .mul_wrap => {
|
|
const slow_inc = self.hasFeature(.slow_incdec);
|
|
const limb_len = std.math.divCeil(u32, src_abi_size, 8) catch unreachable;
|
|
|
|
try self.spillRegisters(&.{ .rax, .rcx, .rdx });
|
|
const reg_locks = self.register_manager.lockRegs(3, .{ .rax, .rcx, .rdx });
|
|
defer for (reg_locks) |reg_lock| if (reg_lock) |lock|
|
|
self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv = try self.allocRegOrMemAdvanced(dst_ty, maybe_inst, false);
|
|
try self.genInlineMemset(
|
|
dst_mcv.address(),
|
|
.{ .immediate = 0 },
|
|
.{ .immediate = src_abi_size },
|
|
.{},
|
|
);
|
|
|
|
const temp_regs =
|
|
try self.register_manager.allocRegs(4, @splat(null), abi.RegisterClass.gp);
|
|
const temp_locks = self.register_manager.lockRegsAssumeUnused(4, temp_regs);
|
|
defer for (temp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.asmRegisterRegister(.{ ._, .xor }, temp_regs[0].to32(), temp_regs[0].to32());
|
|
|
|
const outer_loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, temp_regs[1].to64(), .{
|
|
.base = .{ .frame = rhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[0].to64(),
|
|
.scale = .@"8",
|
|
.disp = rhs_mcv.load_frame.off,
|
|
} },
|
|
});
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, temp_regs[1].to64(), temp_regs[1].to64());
|
|
const skip_inner = try self.asmJccReloc(.z, undefined);
|
|
|
|
try self.asmRegisterRegister(.{ ._, .xor }, temp_regs[2].to32(), temp_regs[2].to32());
|
|
try self.asmRegisterRegister(.{ ._, .mov }, temp_regs[3].to32(), temp_regs[0].to32());
|
|
try self.asmRegisterRegister(.{ ._, .xor }, .ecx, .ecx);
|
|
try self.asmRegisterRegister(.{ ._, .xor }, .edx, .edx);
|
|
|
|
const inner_loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, .cl, .u(1));
|
|
try self.asmMemoryRegister(.{ ._, .adc }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[3].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
}, .rdx);
|
|
try self.asmSetccRegister(.c, .cl);
|
|
|
|
try self.asmRegisterMemory(.{ ._, .mov }, .rax, .{
|
|
.base = .{ .frame = lhs_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[2].to64(),
|
|
.scale = .@"8",
|
|
.disp = lhs_mcv.load_frame.off,
|
|
} },
|
|
});
|
|
try self.asmRegister(.{ ._, .mul }, temp_regs[1].to64());
|
|
|
|
try self.asmRegisterImmediate(.{ ._r, .sh }, .ch, .u(1));
|
|
try self.asmMemoryRegister(.{ ._, .adc }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = temp_regs[3].to64(),
|
|
.scale = .@"8",
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
}, .rax);
|
|
try self.asmSetccRegister(.c, .ch);
|
|
|
|
if (slow_inc) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[2].to32(), .u(1));
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[3].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[2].to32());
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[3].to32());
|
|
}
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, temp_regs[3].to32(), .u(limb_len));
|
|
_ = try self.asmJccReloc(.b, inner_loop);
|
|
|
|
self.performReloc(skip_inner);
|
|
if (slow_inc) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, temp_regs[0].to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, temp_regs[0].to32());
|
|
}
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, temp_regs[0].to32(), .u(limb_len));
|
|
_ = try self.asmJccReloc(.b, outer_loop);
|
|
|
|
return dst_mcv;
|
|
},
|
|
.div_trunc, .div_floor, .div_exact, .rem, .mod => switch (src_info.signedness) {
|
|
.signed => {},
|
|
.unsigned => {
|
|
const dst_mcv = try self.allocRegOrMemAdvanced(dst_ty, maybe_inst, false);
|
|
const manyptr_u32_ty = try pt.ptrType(.{
|
|
.child = .u32_type,
|
|
.flags = .{
|
|
.size = .many,
|
|
},
|
|
});
|
|
const manyptr_const_u32_ty = try pt.ptrType(.{
|
|
.child = .u32_type,
|
|
.flags = .{
|
|
.size = .many,
|
|
.is_const = true,
|
|
},
|
|
});
|
|
_ = try self.genCall(.{ .lib = .{
|
|
.return_type = .void_type,
|
|
.param_types = &.{
|
|
manyptr_u32_ty.toIntern(),
|
|
manyptr_const_u32_ty.toIntern(),
|
|
manyptr_const_u32_ty.toIntern(),
|
|
.usize_type,
|
|
},
|
|
.callee = switch (tag) {
|
|
.div_trunc,
|
|
.div_floor,
|
|
.div_exact,
|
|
=> "__udivei4",
|
|
.rem,
|
|
.mod,
|
|
=> "__umodei4",
|
|
else => unreachable,
|
|
},
|
|
} }, &.{
|
|
manyptr_u32_ty,
|
|
manyptr_const_u32_ty,
|
|
manyptr_const_u32_ty,
|
|
.usize,
|
|
}, &.{
|
|
dst_mcv.address(),
|
|
lhs_mcv.address(),
|
|
rhs_mcv.address(),
|
|
.{ .immediate = 8 * src_abi_size },
|
|
}, .{});
|
|
return dst_mcv;
|
|
},
|
|
},
|
|
else => {},
|
|
}
|
|
return self.fail(
|
|
"TODO implement genMulDivBinOp for {s} from {} to {}",
|
|
.{ @tagName(tag), src_ty.fmt(pt), dst_ty.fmt(pt) },
|
|
);
|
|
}
|
|
const ty = if (dst_abi_size <= 8) dst_ty else src_ty;
|
|
const abi_size = if (dst_abi_size <= 8) dst_abi_size else src_abi_size;
|
|
|
|
const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
|
|
defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const int_info = ty.intInfo(zcu);
|
|
const signedness = int_info.signedness;
|
|
switch (tag) {
|
|
.mul,
|
|
.mul_wrap,
|
|
.rem,
|
|
.div_trunc,
|
|
.div_exact,
|
|
=> {
|
|
const track_inst_rax = switch (tag) {
|
|
.mul, .mul_wrap => if (dst_abi_size <= 8) maybe_inst else null,
|
|
.div_exact, .div_trunc => maybe_inst,
|
|
else => null,
|
|
};
|
|
const track_inst_rdx = switch (tag) {
|
|
.rem => maybe_inst,
|
|
else => null,
|
|
};
|
|
try self.register_manager.getKnownReg(.rax, track_inst_rax);
|
|
try self.register_manager.getKnownReg(.rdx, track_inst_rdx);
|
|
|
|
try self.genIntMulDivOpMir(switch (signedness) {
|
|
.signed => switch (tag) {
|
|
.mul, .mul_wrap => .{ .i_, .mul },
|
|
.div_trunc, .div_exact, .rem => .{ .i_, .div },
|
|
else => unreachable,
|
|
},
|
|
.unsigned => switch (tag) {
|
|
.mul, .mul_wrap => .{ ._, .mul },
|
|
.div_trunc, .div_exact, .rem => .{ ._, .div },
|
|
else => unreachable,
|
|
},
|
|
}, ty, lhs_mcv, rhs_mcv);
|
|
|
|
switch (tag) {
|
|
.mul, .rem, .div_trunc, .div_exact => {},
|
|
.mul_wrap => if (dst_ty.intInfo(zcu).bits < 8 * dst_abi_size) try self.truncateRegister(
|
|
dst_ty,
|
|
if (dst_abi_size <= 8) .rax else .rdx,
|
|
),
|
|
else => unreachable,
|
|
}
|
|
|
|
if (dst_abi_size <= 8) return .{ .register = registerAlias(switch (tag) {
|
|
.mul, .mul_wrap, .div_trunc, .div_exact => .rax,
|
|
.rem => .rdx,
|
|
else => unreachable,
|
|
}, dst_abi_size) };
|
|
|
|
const dst_mcv = try self.allocRegOrMemAdvanced(dst_ty, maybe_inst, false);
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
}, .rax);
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_mcv.load_frame.off + 8,
|
|
} },
|
|
}, .rdx);
|
|
return dst_mcv;
|
|
},
|
|
|
|
.mod => {
|
|
try self.register_manager.getKnownReg(.rax, null);
|
|
try self.register_manager.getKnownReg(
|
|
.rdx,
|
|
if (signedness == .unsigned) maybe_inst else null,
|
|
);
|
|
|
|
switch (signedness) {
|
|
.signed => {
|
|
const lhs_lock = switch (lhs_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
const rhs_lock = switch (rhs_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
// hack around hazard between rhs and div_floor by copying rhs to another register
|
|
const rhs_copy = try self.copyToTmpRegister(ty, rhs_mcv);
|
|
const rhs_copy_lock = self.register_manager.lockRegAssumeUnused(rhs_copy);
|
|
defer self.register_manager.unlockReg(rhs_copy_lock);
|
|
|
|
const div_floor = try self.genInlineIntDivFloor(ty, lhs_mcv, rhs_mcv);
|
|
try self.genIntMulComplexOpMir(ty, div_floor, .{ .register = rhs_copy });
|
|
const div_floor_lock = self.register_manager.lockReg(div_floor.register);
|
|
defer if (div_floor_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const result: MCValue = if (maybe_inst) |inst|
|
|
try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv)
|
|
else
|
|
.{ .register = try self.copyToTmpRegister(ty, lhs_mcv) };
|
|
try self.genBinOpMir(.{ ._, .sub }, ty, result, div_floor);
|
|
|
|
return result;
|
|
},
|
|
.unsigned => {
|
|
try self.genIntMulDivOpMir(.{ ._, .div }, ty, lhs_mcv, rhs_mcv);
|
|
return .{ .register = registerAlias(.rdx, abi_size) };
|
|
},
|
|
}
|
|
},
|
|
|
|
.div_floor => {
|
|
try self.register_manager.getKnownReg(
|
|
.rax,
|
|
if (signedness == .unsigned) maybe_inst else null,
|
|
);
|
|
try self.register_manager.getKnownReg(.rdx, null);
|
|
|
|
const lhs_lock: ?RegisterLock = switch (lhs_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const actual_rhs_mcv: MCValue = blk: {
|
|
switch (signedness) {
|
|
.signed => {
|
|
const rhs_lock: ?RegisterLock = switch (rhs_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (maybe_inst) |inst| {
|
|
break :blk try self.copyToRegisterWithInstTracking(inst, ty, rhs_mcv);
|
|
}
|
|
break :blk MCValue{ .register = try self.copyToTmpRegister(ty, rhs_mcv) };
|
|
},
|
|
.unsigned => break :blk rhs_mcv,
|
|
}
|
|
};
|
|
const rhs_lock: ?RegisterLock = switch (actual_rhs_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
switch (signedness) {
|
|
.signed => return try self.genInlineIntDivFloor(ty, lhs_mcv, actual_rhs_mcv),
|
|
.unsigned => {
|
|
try self.genIntMulDivOpMir(.{ ._, .div }, ty, lhs_mcv, actual_rhs_mcv);
|
|
return .{ .register = registerAlias(.rax, abi_size) };
|
|
},
|
|
}
|
|
},
|
|
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn genBinOp(
|
|
self: *CodeGen,
|
|
maybe_inst: ?Air.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
lhs_air: Air.Inst.Ref,
|
|
rhs_air: Air.Inst.Ref,
|
|
) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const lhs_ty = self.typeOf(lhs_air);
|
|
const rhs_ty = self.typeOf(rhs_air);
|
|
const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
|
|
|
|
if (lhs_ty.isRuntimeFloat()) libcall: {
|
|
const float_bits = lhs_ty.floatBits(self.target.*);
|
|
const type_needs_libcall = switch (float_bits) {
|
|
16 => !self.hasFeature(.f16c),
|
|
32, 64 => false,
|
|
80, 128 => true,
|
|
else => unreachable,
|
|
};
|
|
switch (air_tag) {
|
|
.rem, .mod => {},
|
|
else => if (!type_needs_libcall) break :libcall,
|
|
}
|
|
var callee_buf: ["__mod?f3".len]u8 = undefined;
|
|
const callee = switch (air_tag) {
|
|
.add,
|
|
.sub,
|
|
.mul,
|
|
.div_float,
|
|
.div_trunc,
|
|
.div_floor,
|
|
.div_exact,
|
|
=> std.fmt.bufPrint(&callee_buf, "__{s}{c}f3", .{
|
|
@tagName(air_tag)[0..3],
|
|
floatCompilerRtAbiName(float_bits),
|
|
}),
|
|
.rem, .mod, .min, .max => std.fmt.bufPrint(&callee_buf, "{s}f{s}{s}", .{
|
|
floatLibcAbiPrefix(lhs_ty),
|
|
switch (air_tag) {
|
|
.rem, .mod => "mod",
|
|
.min => "min",
|
|
.max => "max",
|
|
else => unreachable,
|
|
},
|
|
floatLibcAbiSuffix(lhs_ty),
|
|
}),
|
|
else => return self.fail("TODO implement genBinOp for {s} {}", .{
|
|
@tagName(air_tag), lhs_ty.fmt(pt),
|
|
}),
|
|
} catch unreachable;
|
|
const result = try self.genCall(.{ .lib = .{
|
|
.return_type = lhs_ty.toIntern(),
|
|
.param_types = &.{ lhs_ty.toIntern(), rhs_ty.toIntern() },
|
|
.callee = callee,
|
|
} }, &.{ lhs_ty, rhs_ty }, &.{ .{ .air_ref = lhs_air }, .{ .air_ref = rhs_air } }, .{});
|
|
return switch (air_tag) {
|
|
.mod => result: {
|
|
const adjusted: MCValue = if (type_needs_libcall) adjusted: {
|
|
var add_callee_buf: ["__add?f3".len]u8 = undefined;
|
|
break :adjusted try self.genCall(.{ .lib = .{
|
|
.return_type = lhs_ty.toIntern(),
|
|
.param_types = &.{
|
|
lhs_ty.toIntern(),
|
|
rhs_ty.toIntern(),
|
|
},
|
|
.callee = std.fmt.bufPrint(&add_callee_buf, "__add{c}f3", .{
|
|
floatCompilerRtAbiName(float_bits),
|
|
}) catch unreachable,
|
|
} }, &.{ lhs_ty, rhs_ty }, &.{ result, .{ .air_ref = rhs_air } }, .{});
|
|
} else switch (float_bits) {
|
|
16, 32, 64 => adjusted: {
|
|
const dst_reg = switch (result) {
|
|
.register => |reg| reg,
|
|
else => if (maybe_inst) |inst|
|
|
(try self.copyToRegisterWithInstTracking(inst, lhs_ty, result)).register
|
|
else
|
|
try self.copyToTmpRegister(lhs_ty, result),
|
|
};
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const rhs_mcv = try self.resolveInst(rhs_air);
|
|
const src_mcv: MCValue = if (float_bits == 16) src: {
|
|
assert(self.hasFeature(.f16c));
|
|
const tmp_reg = (try self.register_manager.allocReg(
|
|
null,
|
|
abi.RegisterClass.sse,
|
|
)).to128();
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
if (rhs_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
.{ .vp_w, .insr },
|
|
dst_reg,
|
|
dst_reg,
|
|
try rhs_mcv.mem(self, .{ .size = .word }),
|
|
.u(1),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
.{ .vp_, .unpcklwd },
|
|
dst_reg,
|
|
dst_reg,
|
|
(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, rhs_mcv)).to128(),
|
|
);
|
|
try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, dst_reg);
|
|
break :src .{ .register = tmp_reg };
|
|
} else rhs_mcv;
|
|
|
|
if (self.hasFeature(.avx)) {
|
|
const mir_tag: Mir.Inst.FixedTag = switch (float_bits) {
|
|
16, 32 => .{ .v_ss, .add },
|
|
64 => .{ .v_sd, .add },
|
|
else => unreachable,
|
|
};
|
|
if (src_mcv.isBase()) try self.asmRegisterRegisterMemory(
|
|
mir_tag,
|
|
dst_reg,
|
|
dst_reg,
|
|
try src_mcv.mem(self, .{ .size = .fromBitSize(float_bits) }),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
dst_reg,
|
|
dst_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(),
|
|
);
|
|
} else {
|
|
const mir_tag: Mir.Inst.FixedTag = switch (float_bits) {
|
|
32 => .{ ._ss, .add },
|
|
64 => .{ ._sd, .add },
|
|
else => unreachable,
|
|
};
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
mir_tag,
|
|
dst_reg,
|
|
try src_mcv.mem(self, .{ .size = .fromBitSize(float_bits) }),
|
|
) else try self.asmRegisterRegister(
|
|
mir_tag,
|
|
dst_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(),
|
|
);
|
|
}
|
|
|
|
if (float_bits == 16) try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
dst_reg,
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
break :adjusted .{ .register = dst_reg };
|
|
},
|
|
80, 128 => return self.fail("TODO implement genBinOp for {s} of {}", .{
|
|
@tagName(air_tag), lhs_ty.fmt(pt),
|
|
}),
|
|
else => unreachable,
|
|
};
|
|
break :result try self.genCall(.{ .lib = .{
|
|
.return_type = lhs_ty.toIntern(),
|
|
.param_types = &.{ lhs_ty.toIntern(), rhs_ty.toIntern() },
|
|
.callee = callee,
|
|
} }, &.{ lhs_ty, rhs_ty }, &.{ adjusted, .{ .air_ref = rhs_air } }, .{});
|
|
},
|
|
.div_trunc, .div_floor => try self.genRoundLibcall(lhs_ty, result, .{
|
|
.mode = switch (air_tag) {
|
|
.div_trunc => .zero,
|
|
.div_floor => .down,
|
|
else => unreachable,
|
|
},
|
|
.precision = .inexact,
|
|
}),
|
|
else => result,
|
|
};
|
|
}
|
|
|
|
const sse_op = switch (lhs_ty.zigTypeTag(zcu)) {
|
|
else => false,
|
|
.float => true,
|
|
.vector => switch (lhs_ty.childType(zcu).toIntern()) {
|
|
.bool_type, .u1_type => false,
|
|
else => true,
|
|
},
|
|
};
|
|
if (sse_op and ((lhs_ty.scalarType(zcu).isRuntimeFloat() and
|
|
lhs_ty.scalarType(zcu).floatBits(self.target.*) == 80) or
|
|
lhs_ty.abiSize(zcu) > self.vectorSize(.float)))
|
|
return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(pt) });
|
|
|
|
const maybe_mask_reg = switch (air_tag) {
|
|
else => null,
|
|
.rem, .mod => unreachable,
|
|
.max, .min => if (lhs_ty.scalarType(zcu).isRuntimeFloat()) registerAlias(
|
|
if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: {
|
|
try self.register_manager.getKnownReg(.xmm0, null);
|
|
break :mask .xmm0;
|
|
} else try self.register_manager.allocReg(null, abi.RegisterClass.sse),
|
|
abi_size,
|
|
) else null,
|
|
};
|
|
const mask_lock =
|
|
if (maybe_mask_reg) |mask_reg| self.register_manager.lockRegAssumeUnused(mask_reg) else null;
|
|
defer if (mask_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const ordered_air: [2]Air.Inst.Ref = if (lhs_ty.isVector(zcu) and
|
|
switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.bool => false,
|
|
.int => switch (air_tag) {
|
|
.cmp_lt, .cmp_gte => true,
|
|
else => false,
|
|
},
|
|
.float => switch (air_tag) {
|
|
.cmp_gte, .cmp_gt => true,
|
|
else => false,
|
|
},
|
|
else => unreachable,
|
|
}) .{ rhs_air, lhs_air } else .{ lhs_air, rhs_air };
|
|
|
|
if (lhs_ty.isAbiInt(zcu)) for (ordered_air) |op_air| {
|
|
switch (try self.resolveInst(op_air)) {
|
|
.register => |op_reg| switch (op_reg.class()) {
|
|
.sse => try self.register_manager.getReg(op_reg, null),
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
};
|
|
|
|
const lhs_mcv = try self.resolveInst(ordered_air[0]);
|
|
var rhs_mcv = try self.resolveInst(ordered_air[1]);
|
|
switch (lhs_mcv) {
|
|
.immediate => |imm| switch (imm) {
|
|
0 => switch (air_tag) {
|
|
.sub, .sub_wrap => return self.genUnOp(maybe_inst, .neg, ordered_air[1]),
|
|
else => {},
|
|
},
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
const is_commutative = switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
.mul,
|
|
.bool_or,
|
|
.bit_or,
|
|
.bool_and,
|
|
.bit_and,
|
|
.xor,
|
|
.min,
|
|
.max,
|
|
.cmp_eq,
|
|
.cmp_neq,
|
|
=> true,
|
|
|
|
else => false,
|
|
};
|
|
|
|
const lhs_locks: [2]?RegisterLock = switch (lhs_mcv) {
|
|
.register => |lhs_reg| .{ self.register_manager.lockRegAssumeUnused(lhs_reg), null },
|
|
.register_pair => |lhs_regs| locks: {
|
|
const locks = self.register_manager.lockRegsAssumeUnused(2, lhs_regs);
|
|
break :locks .{ locks[0], locks[1] };
|
|
},
|
|
else => @splat(null),
|
|
};
|
|
defer for (lhs_locks) |lhs_lock| if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const rhs_locks: [2]?RegisterLock = switch (rhs_mcv) {
|
|
.register => |rhs_reg| .{ self.register_manager.lockReg(rhs_reg), null },
|
|
.register_pair => |rhs_regs| self.register_manager.lockRegs(2, rhs_regs),
|
|
else => @splat(null),
|
|
};
|
|
defer for (rhs_locks) |rhs_lock| if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
var flipped = false;
|
|
var copied_to_dst = true;
|
|
const dst_mcv: MCValue = dst: {
|
|
const tracked_inst = switch (air_tag) {
|
|
else => maybe_inst,
|
|
.cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => null,
|
|
};
|
|
if (maybe_inst) |inst| {
|
|
if ((!sse_op or lhs_mcv.isRegister()) and
|
|
self.reuseOperandAdvanced(inst, ordered_air[0], 0, lhs_mcv, tracked_inst))
|
|
break :dst lhs_mcv;
|
|
if (is_commutative and (!sse_op or rhs_mcv.isRegister()) and
|
|
self.reuseOperandAdvanced(inst, ordered_air[1], 1, rhs_mcv, tracked_inst))
|
|
{
|
|
flipped = true;
|
|
break :dst rhs_mcv;
|
|
}
|
|
}
|
|
const dst_mcv = try self.allocRegOrMemAdvanced(lhs_ty, tracked_inst, true);
|
|
if (sse_op and lhs_mcv.isRegister() and self.hasFeature(.avx))
|
|
copied_to_dst = false
|
|
else
|
|
try self.genCopy(lhs_ty, dst_mcv, lhs_mcv, .{});
|
|
rhs_mcv = try self.resolveInst(ordered_air[1]);
|
|
break :dst dst_mcv;
|
|
};
|
|
const dst_locks: [2]?RegisterLock = switch (dst_mcv) {
|
|
.register => |dst_reg| .{ self.register_manager.lockReg(dst_reg), null },
|
|
.register_pair => |dst_regs| self.register_manager.lockRegs(2, dst_regs),
|
|
else => @splat(null),
|
|
};
|
|
defer for (dst_locks) |dst_lock| if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const unmat_src_mcv = if (flipped) lhs_mcv else rhs_mcv;
|
|
const src_mcv: MCValue = if (maybe_mask_reg) |mask_reg|
|
|
if (self.hasFeature(.avx) and unmat_src_mcv.isRegister() and maybe_inst != null and
|
|
self.liveness.operandDies(maybe_inst.?, if (flipped) 0 else 1)) unmat_src_mcv else src: {
|
|
try self.genSetReg(mask_reg, rhs_ty, unmat_src_mcv, .{});
|
|
break :src .{ .register = mask_reg };
|
|
}
|
|
else
|
|
unmat_src_mcv;
|
|
const src_locks: [2]?RegisterLock = switch (src_mcv) {
|
|
.register => |src_reg| .{ self.register_manager.lockReg(src_reg), null },
|
|
.register_pair => |src_regs| self.register_manager.lockRegs(2, src_regs),
|
|
else => @splat(null),
|
|
};
|
|
defer for (src_locks) |src_lock| if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (!sse_op) {
|
|
switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
=> try self.genBinOpMir(.{ ._, .add }, lhs_ty, dst_mcv, src_mcv),
|
|
|
|
.sub,
|
|
.sub_wrap,
|
|
=> try self.genBinOpMir(.{ ._, .sub }, lhs_ty, dst_mcv, src_mcv),
|
|
|
|
.ptr_add,
|
|
.ptr_sub,
|
|
=> {
|
|
const tmp_reg = try self.copyToTmpRegister(rhs_ty, src_mcv);
|
|
const tmp_mcv = MCValue{ .register = tmp_reg };
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu);
|
|
try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size });
|
|
try self.genBinOpMir(
|
|
switch (air_tag) {
|
|
.ptr_add => .{ ._, .add },
|
|
.ptr_sub => .{ ._, .sub },
|
|
else => unreachable,
|
|
},
|
|
lhs_ty,
|
|
dst_mcv,
|
|
tmp_mcv,
|
|
);
|
|
},
|
|
|
|
.bool_or,
|
|
.bit_or,
|
|
=> try self.genBinOpMir(.{ ._, .@"or" }, lhs_ty, dst_mcv, src_mcv),
|
|
|
|
.bool_and,
|
|
.bit_and,
|
|
=> try self.genBinOpMir(.{ ._, .@"and" }, lhs_ty, dst_mcv, src_mcv),
|
|
|
|
.xor => try self.genBinOpMir(.{ ._, .xor }, lhs_ty, dst_mcv, src_mcv),
|
|
|
|
.min,
|
|
.max,
|
|
=> {
|
|
const resolved_src_mcv = switch (src_mcv) {
|
|
else => src_mcv,
|
|
.air_ref => |src_ref| try self.resolveInst(src_ref),
|
|
};
|
|
|
|
if (abi_size > 8) {
|
|
const dst_regs = switch (dst_mcv) {
|
|
.register_pair => |dst_regs| dst_regs,
|
|
else => dst: {
|
|
const dst_regs = try self.register_manager.allocRegs(2, @splat(null), abi.RegisterClass.gp);
|
|
const dst_regs_locks = self.register_manager.lockRegsAssumeUnused(2, dst_regs);
|
|
defer for (dst_regs_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genCopy(lhs_ty, .{ .register_pair = dst_regs }, dst_mcv, .{});
|
|
break :dst dst_regs;
|
|
},
|
|
};
|
|
const dst_regs_locks = self.register_manager.lockRegs(2, dst_regs);
|
|
defer for (dst_regs_locks) |dst_lock| if (dst_lock) |lock|
|
|
self.register_manager.unlockReg(lock);
|
|
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
const signed = lhs_ty.isSignedInt(zcu);
|
|
const cc: Condition = switch (air_tag) {
|
|
.min => if (signed) .nl else .nb,
|
|
.max => if (signed) .nge else .nae,
|
|
else => unreachable,
|
|
};
|
|
|
|
try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, dst_regs[1]);
|
|
if (src_mcv.isBase()) {
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .cmp },
|
|
dst_regs[0],
|
|
try src_mcv.mem(self, .{ .size = .qword }),
|
|
);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .sbb },
|
|
tmp_reg,
|
|
try src_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
);
|
|
try self.asmCmovccRegisterMemory(
|
|
cc,
|
|
dst_regs[0],
|
|
try src_mcv.mem(self, .{ .size = .qword }),
|
|
);
|
|
try self.asmCmovccRegisterMemory(
|
|
cc,
|
|
dst_regs[1],
|
|
try src_mcv.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
);
|
|
} else {
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .cmp },
|
|
dst_regs[0],
|
|
src_mcv.register_pair[0],
|
|
);
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .sbb },
|
|
tmp_reg,
|
|
src_mcv.register_pair[1],
|
|
);
|
|
try self.asmCmovccRegisterRegister(cc, dst_regs[0], src_mcv.register_pair[0]);
|
|
try self.asmCmovccRegisterRegister(cc, dst_regs[1], src_mcv.register_pair[1]);
|
|
}
|
|
try self.genCopy(lhs_ty, dst_mcv, .{ .register_pair = dst_regs }, .{});
|
|
} else {
|
|
const mat_src_mcv: MCValue = if (switch (resolved_src_mcv) {
|
|
.immediate,
|
|
.eflags,
|
|
.register_offset,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
=> true,
|
|
.memory => |addr| std.math.cast(i32, @as(i64, @bitCast(addr))) == null,
|
|
else => false,
|
|
.register_pair,
|
|
.register_overflow,
|
|
=> unreachable,
|
|
})
|
|
.{ .register = try self.copyToTmpRegister(rhs_ty, resolved_src_mcv) }
|
|
else
|
|
resolved_src_mcv;
|
|
const mat_mcv_lock = switch (mat_src_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (mat_mcv_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv);
|
|
|
|
const int_info = lhs_ty.intInfo(zcu);
|
|
const cc: Condition = switch (int_info.signedness) {
|
|
.unsigned => switch (air_tag) {
|
|
.min => .a,
|
|
.max => .b,
|
|
else => unreachable,
|
|
},
|
|
.signed => switch (air_tag) {
|
|
.min => .g,
|
|
.max => .l,
|
|
else => unreachable,
|
|
},
|
|
};
|
|
|
|
const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(zcu))), 2);
|
|
const tmp_reg = switch (dst_mcv) {
|
|
.register => |reg| reg,
|
|
else => try self.copyToTmpRegister(lhs_ty, dst_mcv),
|
|
};
|
|
const tmp_lock = self.register_manager.lockReg(tmp_reg);
|
|
defer if (tmp_lock) |lock| self.register_manager.unlockReg(lock);
|
|
switch (mat_src_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable,
|
|
.register => |src_reg| try self.asmCmovccRegisterRegister(
|
|
cc,
|
|
registerAlias(tmp_reg, cmov_abi_size),
|
|
registerAlias(src_reg, cmov_abi_size),
|
|
),
|
|
.memory, .indirect, .load_frame => try self.asmCmovccRegisterMemory(
|
|
cc,
|
|
registerAlias(tmp_reg, cmov_abi_size),
|
|
switch (mat_src_mcv) {
|
|
.memory => |addr| .{
|
|
.base = .{ .reg = .ds },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(cmov_abi_size),
|
|
.disp = @intCast(@as(i64, @bitCast(addr))),
|
|
} },
|
|
},
|
|
.indirect => |reg_off| .{
|
|
.base = .{ .reg = reg_off.reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(cmov_abi_size),
|
|
.disp = reg_off.off,
|
|
} },
|
|
},
|
|
.load_frame => |frame_addr| .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(cmov_abi_size),
|
|
.disp = frame_addr.off,
|
|
} },
|
|
},
|
|
else => unreachable,
|
|
},
|
|
),
|
|
}
|
|
try self.genCopy(lhs_ty, dst_mcv, .{ .register = tmp_reg }, .{});
|
|
}
|
|
},
|
|
|
|
.cmp_eq, .cmp_neq => {
|
|
assert(lhs_ty.isVector(zcu) and lhs_ty.childType(zcu).toIntern() == .bool_type);
|
|
try self.genBinOpMir(.{ ._, .xor }, lhs_ty, dst_mcv, src_mcv);
|
|
switch (air_tag) {
|
|
.cmp_eq => try self.genUnOpMir(.{ ._, .not }, lhs_ty, dst_mcv),
|
|
.cmp_neq => {},
|
|
else => unreachable,
|
|
}
|
|
},
|
|
|
|
else => return self.fail("TODO implement genBinOp for {s} {}", .{
|
|
@tagName(air_tag), lhs_ty.fmt(pt),
|
|
}),
|
|
}
|
|
return dst_mcv;
|
|
}
|
|
|
|
const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size);
|
|
const mir_tag = @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
|
|
else => unreachable,
|
|
.float => switch (lhs_ty.floatBits(self.target.*)) {
|
|
16 => {
|
|
assert(self.hasFeature(.f16c));
|
|
const lhs_reg = if (copied_to_dst) dst_reg else registerAlias(lhs_mcv.getReg().?, abi_size);
|
|
|
|
const tmp_reg = (try self.register_manager.allocReg(null, abi.RegisterClass.sse)).to128();
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
if (src_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
.{ .vp_w, .insr },
|
|
dst_reg,
|
|
lhs_reg,
|
|
try src_mcv.mem(self, .{ .size = .word }),
|
|
.u(1),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
.{ .vp_, .unpcklwd },
|
|
dst_reg,
|
|
lhs_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(),
|
|
);
|
|
try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, dst_reg);
|
|
try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp_reg, dst_reg);
|
|
try self.asmRegisterRegisterRegister(
|
|
switch (air_tag) {
|
|
.add => .{ .v_ss, .add },
|
|
.sub => .{ .v_ss, .sub },
|
|
.mul => .{ .v_ss, .mul },
|
|
.div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ss, .div },
|
|
.max => .{ .v_ss, .max },
|
|
.min => .{ .v_ss, .min },
|
|
else => unreachable,
|
|
},
|
|
dst_reg,
|
|
dst_reg,
|
|
tmp_reg,
|
|
);
|
|
switch (air_tag) {
|
|
.div_trunc, .div_floor => try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .v_ss, .round },
|
|
dst_reg,
|
|
dst_reg,
|
|
dst_reg,
|
|
bits.RoundMode.imm(.{
|
|
.mode = switch (air_tag) {
|
|
.div_trunc => .zero,
|
|
.div_floor => .down,
|
|
else => unreachable,
|
|
},
|
|
.precision = .inexact,
|
|
}),
|
|
),
|
|
else => {},
|
|
}
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
dst_reg,
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
return dst_mcv;
|
|
},
|
|
32 => switch (air_tag) {
|
|
.add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add },
|
|
.sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub },
|
|
.mul => if (self.hasFeature(.avx)) .{ .v_ss, .mul } else .{ ._ss, .mul },
|
|
.div_float,
|
|
.div_trunc,
|
|
.div_floor,
|
|
.div_exact,
|
|
=> if (self.hasFeature(.avx)) .{ .v_ss, .div } else .{ ._ss, .div },
|
|
.max => if (self.hasFeature(.avx)) .{ .v_ss, .max } else .{ ._ss, .max },
|
|
.min => if (self.hasFeature(.avx)) .{ .v_ss, .min } else .{ ._ss, .min },
|
|
else => unreachable,
|
|
},
|
|
64 => switch (air_tag) {
|
|
.add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add },
|
|
.sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub },
|
|
.mul => if (self.hasFeature(.avx)) .{ .v_sd, .mul } else .{ ._sd, .mul },
|
|
.div_float,
|
|
.div_trunc,
|
|
.div_floor,
|
|
.div_exact,
|
|
=> if (self.hasFeature(.avx)) .{ .v_sd, .div } else .{ ._sd, .div },
|
|
.max => if (self.hasFeature(.avx)) .{ .v_sd, .max } else .{ ._sd, .max },
|
|
.min => if (self.hasFeature(.avx)) .{ .v_sd, .min } else .{ ._sd, .min },
|
|
else => unreachable,
|
|
},
|
|
80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
|
|
else => null,
|
|
.int => switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
|
|
8 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...16 => switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_b, .add } else .{ .p_b, .add },
|
|
.sub,
|
|
.sub_wrap,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_b, .sub } else .{ .p_b, .sub },
|
|
.bit_and => if (self.hasFeature(.avx))
|
|
.{ .vp_, .@"and" }
|
|
else
|
|
.{ .p_, .@"and" },
|
|
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
|
|
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
|
|
.min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_b, .mins }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_b, .mins }
|
|
else
|
|
null,
|
|
.unsigned => if (self.hasFeature(.avx))
|
|
.{ .vp_b, .minu }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_b, .minu }
|
|
else
|
|
null,
|
|
},
|
|
.max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_b, .maxs }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_b, .maxs }
|
|
else
|
|
null,
|
|
.unsigned => if (self.hasFeature(.avx))
|
|
.{ .vp_b, .maxu }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_b, .maxu }
|
|
else
|
|
null,
|
|
},
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
=> switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_b, .cmpgt }
|
|
else
|
|
.{ .p_b, .cmpgt },
|
|
.unsigned => null,
|
|
},
|
|
.cmp_eq,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_b, .cmpeq } else .{ .p_b, .cmpeq },
|
|
else => null,
|
|
},
|
|
17...32 => switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_b, .add } else null,
|
|
.sub,
|
|
.sub_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_b, .sub } else null,
|
|
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
|
|
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
|
|
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
|
|
.min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null,
|
|
.unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null,
|
|
},
|
|
.max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null,
|
|
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null,
|
|
},
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
=> switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx)) .{ .vp_b, .cmpgt } else null,
|
|
.unsigned => null,
|
|
},
|
|
.cmp_eq,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_b, .cmpeq } else null,
|
|
else => null,
|
|
},
|
|
else => null,
|
|
},
|
|
16 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...8 => switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_w, .add } else .{ .p_w, .add },
|
|
.sub,
|
|
.sub_wrap,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_w, .sub } else .{ .p_w, .sub },
|
|
.mul,
|
|
.mul_wrap,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_w, .mull } else .{ .p_d, .mull },
|
|
.bit_and => if (self.hasFeature(.avx))
|
|
.{ .vp_, .@"and" }
|
|
else
|
|
.{ .p_, .@"and" },
|
|
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
|
|
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
|
|
.min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .mins }
|
|
else
|
|
.{ .p_w, .mins },
|
|
.unsigned => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .minu }
|
|
else
|
|
.{ .p_w, .minu },
|
|
},
|
|
.max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .maxs }
|
|
else
|
|
.{ .p_w, .maxs },
|
|
.unsigned => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .maxu }
|
|
else
|
|
.{ .p_w, .maxu },
|
|
},
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
=> switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_w, .cmpgt }
|
|
else
|
|
.{ .p_w, .cmpgt },
|
|
.unsigned => null,
|
|
},
|
|
.cmp_eq,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_w, .cmpeq } else .{ .p_w, .cmpeq },
|
|
else => null,
|
|
},
|
|
9...16 => switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_w, .add } else null,
|
|
.sub,
|
|
.sub_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_w, .sub } else null,
|
|
.mul,
|
|
.mul_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_w, .mull } else null,
|
|
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
|
|
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
|
|
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
|
|
.min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null,
|
|
.unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null,
|
|
},
|
|
.max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null,
|
|
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null,
|
|
},
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
=> switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx)) .{ .vp_w, .cmpgt } else null,
|
|
.unsigned => null,
|
|
},
|
|
.cmp_eq,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_w, .cmpeq } else null,
|
|
else => null,
|
|
},
|
|
else => null,
|
|
},
|
|
32 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...4 => switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_d, .add } else .{ .p_d, .add },
|
|
.sub,
|
|
.sub_wrap,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_d, .sub } else .{ .p_d, .sub },
|
|
.mul,
|
|
.mul_wrap,
|
|
=> if (self.hasFeature(.avx))
|
|
.{ .vp_d, .mull }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_d, .mull }
|
|
else
|
|
null,
|
|
.bit_and => if (self.hasFeature(.avx))
|
|
.{ .vp_, .@"and" }
|
|
else
|
|
.{ .p_, .@"and" },
|
|
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
|
|
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
|
|
.min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_d, .mins }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_d, .mins }
|
|
else
|
|
null,
|
|
.unsigned => if (self.hasFeature(.avx))
|
|
.{ .vp_d, .minu }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_d, .minu }
|
|
else
|
|
null,
|
|
},
|
|
.max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_d, .maxs }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_d, .maxs }
|
|
else
|
|
null,
|
|
.unsigned => if (self.hasFeature(.avx))
|
|
.{ .vp_d, .maxu }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_d, .maxu }
|
|
else
|
|
null,
|
|
},
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
=> switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_d, .cmpgt }
|
|
else
|
|
.{ .p_d, .cmpgt },
|
|
.unsigned => null,
|
|
},
|
|
.cmp_eq,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_d, .cmpeq } else .{ .p_d, .cmpeq },
|
|
else => null,
|
|
},
|
|
5...8 => switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_d, .add } else null,
|
|
.sub,
|
|
.sub_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_d, .sub } else null,
|
|
.mul,
|
|
.mul_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_d, .mull } else null,
|
|
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
|
|
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
|
|
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
|
|
.min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null,
|
|
.unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null,
|
|
},
|
|
.max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null,
|
|
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null,
|
|
},
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
=> switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
|
|
.unsigned => null,
|
|
},
|
|
.cmp_eq,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_d, .cmpeq } else null,
|
|
else => null,
|
|
},
|
|
else => null,
|
|
},
|
|
64 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...2 => switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_q, .add } else .{ .p_q, .add },
|
|
.sub,
|
|
.sub_wrap,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_q, .sub } else .{ .p_q, .sub },
|
|
.bit_and => if (self.hasFeature(.avx))
|
|
.{ .vp_, .@"and" }
|
|
else
|
|
.{ .p_, .@"and" },
|
|
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
|
|
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
=> switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx))
|
|
.{ .vp_q, .cmpgt }
|
|
else if (self.hasFeature(.sse4_2))
|
|
.{ .p_q, .cmpgt }
|
|
else
|
|
null,
|
|
.unsigned => null,
|
|
},
|
|
.cmp_eq,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx))
|
|
.{ .vp_q, .cmpeq }
|
|
else if (self.hasFeature(.sse4_1))
|
|
.{ .p_q, .cmpeq }
|
|
else
|
|
null,
|
|
else => null,
|
|
},
|
|
3...4 => switch (air_tag) {
|
|
.add,
|
|
.add_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_q, .add } else null,
|
|
.sub,
|
|
.sub_wrap,
|
|
=> if (self.hasFeature(.avx2)) .{ .vp_q, .sub } else null,
|
|
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
|
|
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
|
|
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
|
|
.cmp_eq,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .vp_d, .cmpeq } else null,
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_gt,
|
|
.cmp_gte,
|
|
=> switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
|
|
.signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
|
|
.unsigned => null,
|
|
},
|
|
else => null,
|
|
},
|
|
else => null,
|
|
},
|
|
else => null,
|
|
},
|
|
.float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
|
|
16 => tag: {
|
|
assert(self.hasFeature(.f16c));
|
|
const lhs_reg = if (copied_to_dst) dst_reg else registerAlias(lhs_mcv.getReg().?, abi_size);
|
|
switch (lhs_ty.vectorLen(zcu)) {
|
|
1 => {
|
|
const tmp_reg =
|
|
(try self.register_manager.allocReg(null, abi.RegisterClass.sse)).to128();
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
if (src_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
.{ .vp_w, .insr },
|
|
dst_reg,
|
|
lhs_reg,
|
|
try src_mcv.mem(self, .{ .size = .word }),
|
|
.u(1),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
.{ .vp_, .unpcklwd },
|
|
dst_reg,
|
|
lhs_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(),
|
|
);
|
|
try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, dst_reg);
|
|
try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp_reg, dst_reg);
|
|
try self.asmRegisterRegisterRegister(
|
|
switch (air_tag) {
|
|
.add => .{ .v_ss, .add },
|
|
.sub => .{ .v_ss, .sub },
|
|
.mul => .{ .v_ss, .mul },
|
|
.div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ss, .div },
|
|
.max => .{ .v_ss, .max },
|
|
.min => .{ .v_ss, .max },
|
|
else => unreachable,
|
|
},
|
|
dst_reg,
|
|
dst_reg,
|
|
tmp_reg,
|
|
);
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
dst_reg,
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
return dst_mcv;
|
|
},
|
|
2 => {
|
|
const tmp_reg = (try self.register_manager.allocReg(
|
|
null,
|
|
abi.RegisterClass.sse,
|
|
)).to128();
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
if (src_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
.{ .vp_d, .insr },
|
|
dst_reg,
|
|
lhs_reg,
|
|
try src_mcv.mem(self, .{ .size = .dword }),
|
|
.u(1),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
.{ .v_ps, .unpckl },
|
|
dst_reg,
|
|
lhs_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(),
|
|
);
|
|
try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, dst_reg);
|
|
try self.asmRegisterRegisterRegister(
|
|
.{ .v_ps, .movhl },
|
|
tmp_reg,
|
|
dst_reg,
|
|
dst_reg,
|
|
);
|
|
try self.asmRegisterRegisterRegister(
|
|
switch (air_tag) {
|
|
.add => .{ .v_ps, .add },
|
|
.sub => .{ .v_ps, .sub },
|
|
.mul => .{ .v_ps, .mul },
|
|
.div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div },
|
|
.max => .{ .v_ps, .max },
|
|
.min => .{ .v_ps, .max },
|
|
else => unreachable,
|
|
},
|
|
dst_reg,
|
|
dst_reg,
|
|
tmp_reg,
|
|
);
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
dst_reg,
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
return dst_mcv;
|
|
},
|
|
3...4 => {
|
|
const tmp_reg = (try self.register_manager.allocReg(
|
|
null,
|
|
abi.RegisterClass.sse,
|
|
)).to128();
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, lhs_reg);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ .v_ps, .cvtph2 },
|
|
tmp_reg,
|
|
try src_mcv.mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ .v_ps, .cvtph2 },
|
|
tmp_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(),
|
|
);
|
|
try self.asmRegisterRegisterRegister(
|
|
switch (air_tag) {
|
|
.add => .{ .v_ps, .add },
|
|
.sub => .{ .v_ps, .sub },
|
|
.mul => .{ .v_ps, .mul },
|
|
.div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div },
|
|
.max => .{ .v_ps, .max },
|
|
.min => .{ .v_ps, .max },
|
|
else => unreachable,
|
|
},
|
|
dst_reg,
|
|
dst_reg,
|
|
tmp_reg,
|
|
);
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
dst_reg,
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
return dst_mcv;
|
|
},
|
|
5...8 => {
|
|
const tmp_reg = (try self.register_manager.allocReg(
|
|
null,
|
|
abi.RegisterClass.sse,
|
|
)).to256();
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg.to256(), lhs_reg);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ .v_ps, .cvtph2 },
|
|
tmp_reg,
|
|
try src_mcv.mem(self, .{ .size = .xword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ .v_ps, .cvtph2 },
|
|
tmp_reg,
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(),
|
|
);
|
|
try self.asmRegisterRegisterRegister(
|
|
switch (air_tag) {
|
|
.add => .{ .v_ps, .add },
|
|
.sub => .{ .v_ps, .sub },
|
|
.mul => .{ .v_ps, .mul },
|
|
.div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div },
|
|
.max => .{ .v_ps, .max },
|
|
.min => .{ .v_ps, .max },
|
|
else => unreachable,
|
|
},
|
|
dst_reg.to256(),
|
|
dst_reg.to256(),
|
|
tmp_reg,
|
|
);
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .v_, .cvtps2ph },
|
|
dst_reg,
|
|
dst_reg.to256(),
|
|
bits.RoundMode.imm(.{}),
|
|
);
|
|
return dst_mcv;
|
|
},
|
|
else => break :tag null,
|
|
}
|
|
},
|
|
32 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1 => switch (air_tag) {
|
|
.add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add },
|
|
.sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub },
|
|
.mul => if (self.hasFeature(.avx)) .{ .v_ss, .mul } else .{ ._ss, .mul },
|
|
.div_float,
|
|
.div_trunc,
|
|
.div_floor,
|
|
.div_exact,
|
|
=> if (self.hasFeature(.avx)) .{ .v_ss, .div } else .{ ._ss, .div },
|
|
.max => if (self.hasFeature(.avx)) .{ .v_ss, .max } else .{ ._ss, .max },
|
|
.min => if (self.hasFeature(.avx)) .{ .v_ss, .min } else .{ ._ss, .min },
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_eq,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .v_ss, .cmp } else .{ ._ss, .cmp },
|
|
else => unreachable,
|
|
},
|
|
2...4 => switch (air_tag) {
|
|
.add => if (self.hasFeature(.avx)) .{ .v_ps, .add } else .{ ._ps, .add },
|
|
.sub => if (self.hasFeature(.avx)) .{ .v_ps, .sub } else .{ ._ps, .sub },
|
|
.mul => if (self.hasFeature(.avx)) .{ .v_ps, .mul } else .{ ._ps, .mul },
|
|
.div_float,
|
|
.div_trunc,
|
|
.div_floor,
|
|
.div_exact,
|
|
=> if (self.hasFeature(.avx)) .{ .v_ps, .div } else .{ ._ps, .div },
|
|
.max => if (self.hasFeature(.avx)) .{ .v_ps, .max } else .{ ._ps, .max },
|
|
.min => if (self.hasFeature(.avx)) .{ .v_ps, .min } else .{ ._ps, .min },
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_eq,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .v_ps, .cmp } else .{ ._ps, .cmp },
|
|
else => unreachable,
|
|
},
|
|
5...8 => if (self.hasFeature(.avx)) switch (air_tag) {
|
|
.add => .{ .v_ps, .add },
|
|
.sub => .{ .v_ps, .sub },
|
|
.mul => .{ .v_ps, .mul },
|
|
.div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div },
|
|
.max => .{ .v_ps, .max },
|
|
.min => .{ .v_ps, .min },
|
|
.cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => .{ .v_ps, .cmp },
|
|
else => unreachable,
|
|
} else null,
|
|
else => null,
|
|
},
|
|
64 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1 => switch (air_tag) {
|
|
.add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add },
|
|
.sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub },
|
|
.mul => if (self.hasFeature(.avx)) .{ .v_sd, .mul } else .{ ._sd, .mul },
|
|
.div_float,
|
|
.div_trunc,
|
|
.div_floor,
|
|
.div_exact,
|
|
=> if (self.hasFeature(.avx)) .{ .v_sd, .div } else .{ ._sd, .div },
|
|
.max => if (self.hasFeature(.avx)) .{ .v_sd, .max } else .{ ._sd, .max },
|
|
.min => if (self.hasFeature(.avx)) .{ .v_sd, .min } else .{ ._sd, .min },
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_eq,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .v_sd, .cmp } else .{ ._sd, .cmp },
|
|
else => unreachable,
|
|
},
|
|
2 => switch (air_tag) {
|
|
.add => if (self.hasFeature(.avx)) .{ .v_pd, .add } else .{ ._pd, .add },
|
|
.sub => if (self.hasFeature(.avx)) .{ .v_pd, .sub } else .{ ._pd, .sub },
|
|
.mul => if (self.hasFeature(.avx)) .{ .v_pd, .mul } else .{ ._pd, .mul },
|
|
.div_float,
|
|
.div_trunc,
|
|
.div_floor,
|
|
.div_exact,
|
|
=> if (self.hasFeature(.avx)) .{ .v_pd, .div } else .{ ._pd, .div },
|
|
.max => if (self.hasFeature(.avx)) .{ .v_pd, .max } else .{ ._pd, .max },
|
|
.min => if (self.hasFeature(.avx)) .{ .v_pd, .min } else .{ ._pd, .min },
|
|
.cmp_lt,
|
|
.cmp_lte,
|
|
.cmp_eq,
|
|
.cmp_gte,
|
|
.cmp_gt,
|
|
.cmp_neq,
|
|
=> if (self.hasFeature(.avx)) .{ .v_pd, .cmp } else .{ ._pd, .cmp },
|
|
else => unreachable,
|
|
},
|
|
3...4 => if (self.hasFeature(.avx)) switch (air_tag) {
|
|
.add => .{ .v_pd, .add },
|
|
.sub => .{ .v_pd, .sub },
|
|
.mul => .{ .v_pd, .mul },
|
|
.div_float, .div_trunc, .div_floor, .div_exact => .{ .v_pd, .div },
|
|
.max => .{ .v_pd, .max },
|
|
.cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => .{ .v_pd, .cmp },
|
|
.min => .{ .v_pd, .min },
|
|
else => unreachable,
|
|
} else null,
|
|
else => null,
|
|
},
|
|
80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
},
|
|
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
|
|
@tagName(air_tag), lhs_ty.fmt(pt),
|
|
});
|
|
|
|
const lhs_copy_reg = if (maybe_mask_reg) |_| registerAlias(
|
|
if (copied_to_dst) try self.copyToTmpRegister(lhs_ty, dst_mcv) else lhs_mcv.getReg().?,
|
|
abi_size,
|
|
) else null;
|
|
const lhs_copy_lock = if (lhs_copy_reg) |reg| self.register_manager.lockReg(reg) else null;
|
|
defer if (lhs_copy_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
switch (mir_tag[1]) {
|
|
else => if (self.hasFeature(.avx)) {
|
|
const lhs_reg = if (copied_to_dst) dst_reg else registerAlias(lhs_mcv.getReg().?, abi_size);
|
|
if (src_mcv.isBase()) try self.asmRegisterRegisterMemory(
|
|
mir_tag,
|
|
dst_reg,
|
|
lhs_reg,
|
|
try src_mcv.mem(self, .{ .size = switch (lhs_ty.zigTypeTag(zcu)) {
|
|
else => .fromSize(abi_size),
|
|
.vector => .fromBitSize(dst_reg.bitSize()),
|
|
} }),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
dst_reg,
|
|
lhs_reg,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv), abi_size),
|
|
);
|
|
} else {
|
|
assert(copied_to_dst);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
mir_tag,
|
|
dst_reg,
|
|
try src_mcv.mem(self, .{ .size = switch (lhs_ty.zigTypeTag(zcu)) {
|
|
else => .fromSize(abi_size),
|
|
.vector => .fromBitSize(dst_reg.bitSize()),
|
|
} }),
|
|
) else try self.asmRegisterRegister(
|
|
mir_tag,
|
|
dst_reg,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv), abi_size),
|
|
);
|
|
},
|
|
.cmp => {
|
|
const imm: Immediate = .u(switch (air_tag) {
|
|
.cmp_eq => 0,
|
|
.cmp_lt, .cmp_gt => 1,
|
|
.cmp_lte, .cmp_gte => 2,
|
|
.cmp_neq => 4,
|
|
else => unreachable,
|
|
});
|
|
if (self.hasFeature(.avx)) {
|
|
const lhs_reg =
|
|
if (copied_to_dst) dst_reg else registerAlias(lhs_mcv.getReg().?, abi_size);
|
|
if (src_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
mir_tag,
|
|
dst_reg,
|
|
lhs_reg,
|
|
try src_mcv.mem(self, .{ .size = switch (lhs_ty.zigTypeTag(zcu)) {
|
|
else => .fromSize(abi_size),
|
|
.vector => .fromBitSize(dst_reg.bitSize()),
|
|
} }),
|
|
imm,
|
|
) else try self.asmRegisterRegisterRegisterImmediate(
|
|
mir_tag,
|
|
dst_reg,
|
|
lhs_reg,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv), abi_size),
|
|
imm,
|
|
);
|
|
} else {
|
|
assert(copied_to_dst);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemoryImmediate(
|
|
mir_tag,
|
|
dst_reg,
|
|
try src_mcv.mem(self, .{ .size = switch (lhs_ty.zigTypeTag(zcu)) {
|
|
else => .fromSize(abi_size),
|
|
.vector => .fromBitSize(dst_reg.bitSize()),
|
|
} }),
|
|
imm,
|
|
) else try self.asmRegisterRegisterImmediate(
|
|
mir_tag,
|
|
dst_reg,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(rhs_ty, src_mcv), abi_size),
|
|
imm,
|
|
);
|
|
}
|
|
},
|
|
}
|
|
|
|
switch (air_tag) {
|
|
.add, .add_wrap, .sub, .sub_wrap, .mul, .mul_wrap, .div_float, .div_exact => {},
|
|
.div_trunc, .div_floor => try self.genRound(lhs_ty, dst_reg, .{ .register = dst_reg }, .{
|
|
.mode = switch (air_tag) {
|
|
.div_trunc => .zero,
|
|
.div_floor => .down,
|
|
else => unreachable,
|
|
},
|
|
.precision = .inexact,
|
|
}),
|
|
.bit_and, .bit_or, .xor => {},
|
|
.max, .min => if (maybe_mask_reg) |mask_reg| if (self.hasFeature(.avx)) {
|
|
const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size);
|
|
|
|
try self.asmRegisterRegisterRegisterImmediate(
|
|
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.floatBits(self.target.*)) {
|
|
32 => .{ .v_ss, .cmp },
|
|
64 => .{ .v_sd, .cmp },
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
|
|
32 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1 => .{ .v_ss, .cmp },
|
|
2...8 => .{ .v_ps, .cmp },
|
|
else => null,
|
|
},
|
|
64 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1 => .{ .v_sd, .cmp },
|
|
2...4 => .{ .v_pd, .cmp },
|
|
else => null,
|
|
},
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
|
|
@tagName(air_tag), lhs_ty.fmt(pt),
|
|
}),
|
|
mask_reg,
|
|
rhs_copy_reg,
|
|
rhs_copy_reg,
|
|
bits.VexFloatPredicate.imm(.unord),
|
|
);
|
|
try self.asmRegisterRegisterRegisterRegister(
|
|
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.floatBits(self.target.*)) {
|
|
32 => .{ .v_ps, .blendv },
|
|
64 => .{ .v_pd, .blendv },
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
|
|
32 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...8 => .{ .v_ps, .blendv },
|
|
else => null,
|
|
},
|
|
64 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...4 => .{ .v_pd, .blendv },
|
|
else => null,
|
|
},
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
|
|
@tagName(air_tag), lhs_ty.fmt(pt),
|
|
}),
|
|
dst_reg,
|
|
dst_reg,
|
|
lhs_copy_reg.?,
|
|
mask_reg,
|
|
);
|
|
} else {
|
|
const has_blend = self.hasFeature(.sse4_1);
|
|
try self.asmRegisterRegisterImmediate(
|
|
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.floatBits(self.target.*)) {
|
|
32 => .{ ._ss, .cmp },
|
|
64 => .{ ._sd, .cmp },
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
|
|
32 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1 => .{ ._ss, .cmp },
|
|
2...4 => .{ ._ps, .cmp },
|
|
else => null,
|
|
},
|
|
64 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1 => .{ ._sd, .cmp },
|
|
2 => .{ ._pd, .cmp },
|
|
else => null,
|
|
},
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
|
|
@tagName(air_tag), lhs_ty.fmt(pt),
|
|
}),
|
|
mask_reg,
|
|
mask_reg,
|
|
bits.SseFloatPredicate.imm(if (has_blend) .unord else .ord),
|
|
);
|
|
if (has_blend) try self.asmRegisterRegisterRegister(
|
|
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.floatBits(self.target.*)) {
|
|
32 => .{ ._ps, .blendv },
|
|
64 => .{ ._pd, .blendv },
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
|
|
32 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...4 => .{ ._ps, .blendv },
|
|
else => null,
|
|
},
|
|
64 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...2 => .{ ._pd, .blendv },
|
|
else => null,
|
|
},
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
|
|
@tagName(air_tag), lhs_ty.fmt(pt),
|
|
}),
|
|
dst_reg,
|
|
lhs_copy_reg.?,
|
|
mask_reg,
|
|
) else {
|
|
const mir_fixes = @as(?Mir.Inst.Fixes, switch (lhs_ty.zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.floatBits(self.target.*)) {
|
|
32 => ._ps,
|
|
64 => ._pd,
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
|
|
32 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...4 => ._ps,
|
|
else => null,
|
|
},
|
|
64 => switch (lhs_ty.vectorLen(zcu)) {
|
|
1...2 => ._pd,
|
|
else => null,
|
|
},
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
|
|
@tagName(air_tag), lhs_ty.fmt(pt),
|
|
});
|
|
try self.asmRegisterRegister(.{ mir_fixes, .@"and" }, dst_reg, mask_reg);
|
|
try self.asmRegisterRegister(.{ mir_fixes, .andn }, mask_reg, lhs_copy_reg.?);
|
|
try self.asmRegisterRegister(.{ mir_fixes, .@"or" }, dst_reg, mask_reg);
|
|
}
|
|
},
|
|
.cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => {
|
|
switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.int => switch (air_tag) {
|
|
.cmp_lt,
|
|
.cmp_eq,
|
|
.cmp_gt,
|
|
=> {},
|
|
.cmp_lte,
|
|
.cmp_gte,
|
|
.cmp_neq,
|
|
=> {
|
|
const unsigned_ty = try lhs_ty.toUnsigned(pt);
|
|
const not_mcv = try self.genTypedValue(try unsigned_ty.maxInt(pt, unsigned_ty));
|
|
const not_mem: Memory = if (not_mcv.isBase())
|
|
try not_mcv.mem(self, .{ .size = .fromSize(abi_size) })
|
|
else
|
|
.{ .base = .{
|
|
.reg = try self.copyToTmpRegister(.usize, not_mcv.address()),
|
|
}, .mod = .{ .rm = .{ .size = .fromSize(abi_size) } } };
|
|
switch (mir_tag[0]) {
|
|
.vp_b, .vp_d, .vp_q, .vp_w => try self.asmRegisterRegisterMemory(
|
|
.{ .vp_, .xor },
|
|
dst_reg,
|
|
dst_reg,
|
|
not_mem,
|
|
),
|
|
.p_b, .p_d, .p_q, .p_w => try self.asmRegisterMemory(
|
|
.{ .p_, .xor },
|
|
dst_reg,
|
|
not_mem,
|
|
),
|
|
else => unreachable,
|
|
}
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.float => {},
|
|
else => unreachable,
|
|
}
|
|
|
|
const gp_reg = try self.register_manager.allocReg(maybe_inst, abi.RegisterClass.gp);
|
|
const gp_lock = self.register_manager.lockRegAssumeUnused(gp_reg);
|
|
defer self.register_manager.unlockReg(gp_lock);
|
|
|
|
try self.asmRegisterRegister(switch (mir_tag[0]) {
|
|
._pd, ._sd, .p_q => .{ ._pd, .movmsk },
|
|
._ps, ._ss, .p_d => .{ ._ps, .movmsk },
|
|
.p_b => .{ .p_b, .movmsk },
|
|
.p_w => movmsk: {
|
|
try self.asmRegisterRegister(.{ .p_b, .ackssw }, dst_reg, dst_reg);
|
|
break :movmsk .{ .p_b, .movmsk };
|
|
},
|
|
.v_pd, .v_sd, .vp_q => .{ .v_pd, .movmsk },
|
|
.v_ps, .v_ss, .vp_d => .{ .v_ps, .movmsk },
|
|
.vp_b => .{ .vp_b, .movmsk },
|
|
.vp_w => movmsk: {
|
|
try self.asmRegisterRegisterRegister(
|
|
.{ .vp_b, .ackssw },
|
|
dst_reg,
|
|
dst_reg,
|
|
dst_reg,
|
|
);
|
|
break :movmsk .{ .vp_b, .movmsk };
|
|
},
|
|
else => unreachable,
|
|
}, gp_reg.to32(), dst_reg);
|
|
return .{ .register = gp_reg };
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
return dst_mcv;
|
|
}
|
|
|
|
fn genBinOpMir(
|
|
self: *CodeGen,
|
|
mir_tag: Mir.Inst.FixedTag,
|
|
ty: Type,
|
|
dst_mcv: MCValue,
|
|
src_mcv: MCValue,
|
|
) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const abi_size: u32 = @intCast(ty.abiSize(zcu));
|
|
try self.spillEflagsIfOccupied();
|
|
switch (dst_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.lea_symbol,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable, // unmodifiable destination
|
|
.register, .register_pair, .register_triple, .register_quadruple, .register_offset => {
|
|
switch (dst_mcv) {
|
|
.register, .register_pair, .register_triple, .register_quadruple => {},
|
|
.register_offset => |ro| assert(ro.off == 0),
|
|
else => unreachable,
|
|
}
|
|
for (dst_mcv.getRegs(), 0..) |dst_reg, dst_reg_i| {
|
|
const dst_reg_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const mir_limb_tag: Mir.Inst.FixedTag = switch (dst_reg_i) {
|
|
0 => mir_tag,
|
|
1 => switch (mir_tag[1]) {
|
|
.add => .{ ._, .adc },
|
|
.sub, .cmp => .{ ._, .sbb },
|
|
.@"or", .@"and", .xor => mir_tag,
|
|
else => return self.fail("TODO genBinOpMir implement large ABI for {s}", .{
|
|
@tagName(mir_tag[1]),
|
|
}),
|
|
},
|
|
else => unreachable,
|
|
};
|
|
const off: u4 = @intCast(dst_reg_i * 8);
|
|
const limb_abi_size = @min(abi_size - off, 8);
|
|
const dst_alias = registerAlias(dst_reg, limb_abi_size);
|
|
switch (src_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
=> unreachable,
|
|
.register,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
=> try self.asmRegisterRegister(
|
|
mir_limb_tag,
|
|
dst_alias,
|
|
registerAlias(src_mcv.getRegs()[dst_reg_i], limb_abi_size),
|
|
),
|
|
.immediate => |imm| {
|
|
assert(off == 0);
|
|
switch (self.regBitSize(ty)) {
|
|
8 => try self.asmRegisterImmediate(
|
|
mir_limb_tag,
|
|
dst_alias,
|
|
if (std.math.cast(i8, @as(i64, @bitCast(imm)))) |small|
|
|
.s(small)
|
|
else
|
|
.u(@as(u8, @intCast(imm))),
|
|
),
|
|
16 => try self.asmRegisterImmediate(
|
|
mir_limb_tag,
|
|
dst_alias,
|
|
if (std.math.cast(i16, @as(i64, @bitCast(imm)))) |small|
|
|
.s(small)
|
|
else
|
|
.u(@as(u16, @intCast(imm))),
|
|
),
|
|
32 => try self.asmRegisterImmediate(
|
|
mir_limb_tag,
|
|
dst_alias,
|
|
if (std.math.cast(i32, @as(i64, @bitCast(imm)))) |small|
|
|
.s(small)
|
|
else
|
|
.u(@as(u32, @intCast(imm))),
|
|
),
|
|
64 => if (std.math.cast(i32, @as(i64, @bitCast(imm)))) |small|
|
|
try self.asmRegisterImmediate(mir_limb_tag, dst_alias, .s(small))
|
|
else
|
|
try self.asmRegisterRegister(mir_limb_tag, dst_alias, registerAlias(
|
|
try self.copyToTmpRegister(ty, src_mcv),
|
|
limb_abi_size,
|
|
)),
|
|
else => unreachable,
|
|
}
|
|
},
|
|
.eflags,
|
|
.register_offset,
|
|
.memory,
|
|
.indirect,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.load_frame,
|
|
.lea_frame,
|
|
=> {
|
|
direct: {
|
|
try self.asmRegisterMemory(mir_limb_tag, dst_alias, switch (src_mcv) {
|
|
.memory => |addr| .{
|
|
.base = .{ .reg = .ds },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(limb_abi_size),
|
|
.disp = std.math.cast(i32, addr + off) orelse break :direct,
|
|
} },
|
|
},
|
|
.indirect => |reg_off| .{
|
|
.base = .{ .reg = reg_off.reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(limb_abi_size),
|
|
.disp = reg_off.off + off,
|
|
} },
|
|
},
|
|
.load_frame => |frame_addr| .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(limb_abi_size),
|
|
.disp = frame_addr.off + off,
|
|
} },
|
|
},
|
|
else => break :direct,
|
|
});
|
|
continue;
|
|
}
|
|
|
|
switch (src_mcv) {
|
|
.eflags,
|
|
.register_offset,
|
|
.lea_symbol,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
=> {
|
|
assert(off == 0);
|
|
const reg = try self.copyToTmpRegister(ty, src_mcv);
|
|
return self.genBinOpMir(
|
|
mir_limb_tag,
|
|
ty,
|
|
dst_mcv,
|
|
.{ .register = reg },
|
|
);
|
|
},
|
|
.memory,
|
|
.load_symbol,
|
|
.load_direct,
|
|
.load_got,
|
|
.load_tlv,
|
|
=> {
|
|
const ptr_ty = try pt.singleConstPtrType(ty);
|
|
const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address());
|
|
return self.genBinOpMir(mir_limb_tag, ty, dst_mcv, .{
|
|
.indirect = .{ .reg = addr_reg, .off = off },
|
|
});
|
|
},
|
|
else => unreachable,
|
|
}
|
|
},
|
|
.air_ref => |src_ref| return self.genBinOpMir(
|
|
mir_tag,
|
|
ty,
|
|
dst_mcv,
|
|
try self.resolveInst(src_ref),
|
|
),
|
|
}
|
|
}
|
|
},
|
|
.memory, .indirect, .load_symbol, .load_got, .load_direct, .load_tlv, .load_frame => {
|
|
const OpInfo = ?struct { addr_reg: Register, addr_lock: RegisterLock };
|
|
const limb_abi_size: u32 = @min(abi_size, 8);
|
|
|
|
const dst_info: OpInfo = switch (dst_mcv) {
|
|
else => unreachable,
|
|
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => dst: {
|
|
const dst_addr_reg =
|
|
(try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to64();
|
|
const dst_addr_lock = self.register_manager.lockRegAssumeUnused(dst_addr_reg);
|
|
errdefer self.register_manager.unlockReg(dst_addr_lock);
|
|
|
|
try self.genSetReg(dst_addr_reg, .usize, dst_mcv.address(), .{});
|
|
break :dst .{ .addr_reg = dst_addr_reg, .addr_lock = dst_addr_lock };
|
|
},
|
|
.load_frame => null,
|
|
};
|
|
defer if (dst_info) |info| self.register_manager.unlockReg(info.addr_lock);
|
|
|
|
const resolved_src_mcv = switch (src_mcv) {
|
|
else => src_mcv,
|
|
.air_ref => |src_ref| try self.resolveInst(src_ref),
|
|
};
|
|
const src_info: OpInfo = switch (resolved_src_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable,
|
|
.immediate,
|
|
.eflags,
|
|
.register,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_offset,
|
|
.indirect,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.load_frame,
|
|
.lea_frame,
|
|
.lea_symbol,
|
|
=> null,
|
|
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => src: {
|
|
switch (resolved_src_mcv) {
|
|
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr))) != null and
|
|
std.math.cast(i32, @as(i64, @bitCast(addr)) + abi_size - limb_abi_size) != null)
|
|
break :src null,
|
|
.load_symbol, .load_got, .load_direct, .load_tlv => {},
|
|
else => unreachable,
|
|
}
|
|
|
|
const src_addr_reg =
|
|
(try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to64();
|
|
const src_addr_lock = self.register_manager.lockRegAssumeUnused(src_addr_reg);
|
|
errdefer self.register_manager.unlockReg(src_addr_lock);
|
|
|
|
try self.genSetReg(src_addr_reg, .usize, resolved_src_mcv.address(), .{});
|
|
break :src .{ .addr_reg = src_addr_reg, .addr_lock = src_addr_lock };
|
|
},
|
|
};
|
|
defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock);
|
|
|
|
const ty_signedness =
|
|
if (ty.isAbiInt(zcu)) ty.intInfo(zcu).signedness else .unsigned;
|
|
const limb_ty: Type = if (abi_size <= 8) ty else switch (ty_signedness) {
|
|
.signed => .usize,
|
|
.unsigned => .isize,
|
|
};
|
|
var limb_i: usize = 0;
|
|
var off: i32 = 0;
|
|
while (off < abi_size) : ({
|
|
limb_i += 1;
|
|
off += 8;
|
|
}) {
|
|
const mir_limb_tag: Mir.Inst.FixedTag = switch (limb_i) {
|
|
0 => mir_tag,
|
|
else => switch (mir_tag[1]) {
|
|
.add => .{ ._, .adc },
|
|
.sub, .cmp => .{ ._, .sbb },
|
|
.@"or", .@"and", .xor => mir_tag,
|
|
else => return self.fail("TODO genBinOpMir implement large ABI for {s}", .{
|
|
@tagName(mir_tag[1]),
|
|
}),
|
|
},
|
|
};
|
|
const dst_limb_mem: Memory = switch (dst_mcv) {
|
|
.memory,
|
|
.load_symbol,
|
|
.load_got,
|
|
.load_direct,
|
|
.load_tlv,
|
|
=> .{
|
|
.base = .{ .reg = dst_info.?.addr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(limb_abi_size),
|
|
.disp = off,
|
|
} },
|
|
},
|
|
.indirect => |reg_off| .{
|
|
.base = .{ .reg = reg_off.reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(limb_abi_size),
|
|
.disp = reg_off.off + off,
|
|
} },
|
|
},
|
|
.load_frame => |frame_addr| .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(limb_abi_size),
|
|
.disp = frame_addr.off + off,
|
|
} },
|
|
},
|
|
else => unreachable,
|
|
};
|
|
switch (resolved_src_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable,
|
|
.immediate => |src_imm| {
|
|
const imm: u64 = switch (limb_i) {
|
|
0 => src_imm,
|
|
else => switch (ty_signedness) {
|
|
.signed => @bitCast(@as(i64, @bitCast(src_imm)) >> 63),
|
|
.unsigned => 0,
|
|
},
|
|
};
|
|
switch (self.regBitSize(limb_ty)) {
|
|
8 => try self.asmMemoryImmediate(
|
|
mir_limb_tag,
|
|
dst_limb_mem,
|
|
if (std.math.cast(i8, @as(i64, @bitCast(imm)))) |small|
|
|
.s(small)
|
|
else
|
|
.u(@as(u8, @intCast(imm))),
|
|
),
|
|
16 => try self.asmMemoryImmediate(
|
|
mir_limb_tag,
|
|
dst_limb_mem,
|
|
if (std.math.cast(i16, @as(i64, @bitCast(imm)))) |small|
|
|
.s(small)
|
|
else
|
|
.u(@as(u16, @intCast(imm))),
|
|
),
|
|
32 => try self.asmMemoryImmediate(
|
|
mir_limb_tag,
|
|
dst_limb_mem,
|
|
if (std.math.cast(i32, @as(i64, @bitCast(imm)))) |small|
|
|
.s(small)
|
|
else
|
|
.u(@as(u32, @intCast(imm))),
|
|
),
|
|
64 => if (std.math.cast(i32, @as(i64, @bitCast(imm)))) |small|
|
|
try self.asmMemoryImmediate(mir_limb_tag, dst_limb_mem, .s(small))
|
|
else
|
|
try self.asmMemoryRegister(
|
|
mir_limb_tag,
|
|
dst_limb_mem,
|
|
registerAlias(
|
|
try self.copyToTmpRegister(limb_ty, .{ .immediate = imm }),
|
|
limb_abi_size,
|
|
),
|
|
),
|
|
else => unreachable,
|
|
}
|
|
},
|
|
.register,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_offset,
|
|
.eflags,
|
|
.memory,
|
|
.indirect,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.load_frame,
|
|
.lea_frame,
|
|
=> {
|
|
const src_limb_mcv: MCValue = if (src_info) |info| .{
|
|
.indirect = .{ .reg = info.addr_reg, .off = off },
|
|
} else switch (resolved_src_mcv) {
|
|
.register, .register_pair, .register_triple, .register_quadruple => .{
|
|
.register = resolved_src_mcv.getRegs()[limb_i],
|
|
},
|
|
.eflags,
|
|
.register_offset,
|
|
.lea_symbol,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
=> switch (limb_i) {
|
|
0 => resolved_src_mcv,
|
|
else => .{ .immediate = 0 },
|
|
},
|
|
.memory => |addr| .{ .memory = @bitCast(@as(i64, @bitCast(addr)) + off) },
|
|
.indirect => |reg_off| .{ .indirect = .{
|
|
.reg = reg_off.reg,
|
|
.off = reg_off.off + off,
|
|
} },
|
|
.load_frame => |frame_addr| .{ .load_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + off,
|
|
} },
|
|
else => unreachable,
|
|
};
|
|
const src_limb_reg = if (src_limb_mcv.isRegister())
|
|
src_limb_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(limb_ty, src_limb_mcv);
|
|
try self.asmMemoryRegister(
|
|
mir_limb_tag,
|
|
dst_limb_mem,
|
|
registerAlias(src_limb_reg, limb_abi_size),
|
|
);
|
|
},
|
|
}
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
/// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv.
|
|
/// Does not support byte-size operands.
|
|
fn genIntMulComplexOpMir(self: *CodeGen, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void {
|
|
const pt = self.pt;
|
|
const abi_size: u32 = @intCast(dst_ty.abiSize(pt.zcu));
|
|
try self.spillEflagsIfOccupied();
|
|
switch (dst_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.lea_symbol,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable, // unmodifiable destination
|
|
.register => |dst_reg| {
|
|
const alias_size = switch (abi_size) {
|
|
1 => 4,
|
|
else => abi_size,
|
|
};
|
|
const dst_alias = registerAlias(dst_reg, alias_size);
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
switch (abi_size) {
|
|
1 => try self.asmRegisterRegister(.{ ._, .movzx }, dst_reg.to32(), dst_reg.to8()),
|
|
else => {},
|
|
}
|
|
|
|
const resolved_src_mcv = switch (src_mcv) {
|
|
else => src_mcv,
|
|
.air_ref => |src_ref| try self.resolveInst(src_ref),
|
|
};
|
|
switch (resolved_src_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable,
|
|
.register => |src_reg| {
|
|
switch (abi_size) {
|
|
1 => try self.asmRegisterRegister(.{ ._, .movzx }, src_reg.to32(), src_reg.to8()),
|
|
else => {},
|
|
}
|
|
try self.asmRegisterRegister(
|
|
.{ .i_, .mul },
|
|
dst_alias,
|
|
registerAlias(src_reg, alias_size),
|
|
);
|
|
},
|
|
.immediate => |imm| {
|
|
if (std.math.cast(i32, @as(i64, @bitCast(imm)))) |small| {
|
|
try self.asmRegisterRegisterImmediate(.{ .i_, .mul }, dst_alias, dst_alias, .s(small));
|
|
} else {
|
|
const src_reg = try self.copyToTmpRegister(dst_ty, resolved_src_mcv);
|
|
return self.genIntMulComplexOpMir(dst_ty, dst_mcv, MCValue{ .register = src_reg });
|
|
}
|
|
},
|
|
.register_offset,
|
|
.eflags,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
=> {
|
|
const src_reg = try self.copyToTmpRegister(dst_ty, resolved_src_mcv);
|
|
switch (abi_size) {
|
|
1 => try self.asmRegisterRegister(.{ ._, .movzx }, src_reg.to32(), src_reg.to8()),
|
|
else => {},
|
|
}
|
|
try self.asmRegisterRegister(.{ .i_, .mul }, dst_alias, registerAlias(src_reg, alias_size));
|
|
},
|
|
.memory, .indirect, .load_frame => switch (abi_size) {
|
|
1 => {
|
|
const src_reg = try self.copyToTmpRegister(dst_ty, resolved_src_mcv);
|
|
try self.asmRegisterRegister(.{ ._, .movzx }, src_reg.to32(), src_reg.to8());
|
|
try self.asmRegisterRegister(.{ .i_, .mul }, dst_alias, registerAlias(src_reg, alias_size));
|
|
},
|
|
else => try self.asmRegisterMemory(
|
|
.{ .i_, .mul },
|
|
dst_alias,
|
|
switch (resolved_src_mcv) {
|
|
.memory => |addr| .{
|
|
.base = .{ .reg = .ds },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(abi_size),
|
|
.disp = std.math.cast(i32, @as(i64, @bitCast(addr))) orelse
|
|
return self.asmRegisterRegister(
|
|
.{ .i_, .mul },
|
|
dst_alias,
|
|
registerAlias(
|
|
try self.copyToTmpRegister(dst_ty, resolved_src_mcv),
|
|
abi_size,
|
|
),
|
|
),
|
|
} },
|
|
},
|
|
.indirect => |reg_off| .{
|
|
.base = .{ .reg = reg_off.reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(abi_size),
|
|
.disp = reg_off.off,
|
|
} },
|
|
},
|
|
.load_frame => |frame_addr| .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(abi_size),
|
|
.disp = frame_addr.off,
|
|
} },
|
|
},
|
|
else => unreachable,
|
|
},
|
|
),
|
|
},
|
|
}
|
|
},
|
|
.register_pair, .register_triple, .register_quadruple => unreachable, // unimplemented
|
|
.memory, .indirect, .load_symbol, .load_direct, .load_got, .load_tlv, .load_frame => {
|
|
const tmp_reg = try self.copyToTmpRegister(dst_ty, dst_mcv);
|
|
const tmp_mcv = MCValue{ .register = tmp_reg };
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.genIntMulComplexOpMir(dst_ty, tmp_mcv, src_mcv);
|
|
try self.genCopy(dst_ty, dst_mcv, tmp_mcv, .{});
|
|
},
|
|
}
|
|
}
|
|
|
|
fn airArg(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
// skip zero-bit arguments as they don't have a corresponding arg instruction
|
|
var arg_index = self.arg_index;
|
|
while (self.args[arg_index] == .none) arg_index += 1;
|
|
self.arg_index = arg_index + 1;
|
|
|
|
const result: MCValue = if (self.debug_output == .none and self.liveness.isUnused(inst)) .unreach else result: {
|
|
const arg_ty = self.typeOfIndex(inst);
|
|
const src_mcv = self.args[arg_index];
|
|
switch (src_mcv) {
|
|
.register, .register_pair, .load_frame => {
|
|
for (src_mcv.getRegs()) |reg| self.register_manager.getRegAssumeFree(reg, inst);
|
|
break :result src_mcv;
|
|
},
|
|
.indirect => |reg_off| {
|
|
self.register_manager.getRegAssumeFree(reg_off.reg, inst);
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.genCopy(arg_ty, dst_mcv, src_mcv, .{});
|
|
break :result dst_mcv;
|
|
},
|
|
.elementwise_regs_then_frame => |regs_frame_addr| {
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const fn_info = zcu.typeToFunc(self.fn_type).?;
|
|
const param_int_regs = abi.getCAbiIntParamRegs(fn_info.cc);
|
|
var prev_reg: Register = undefined;
|
|
for (
|
|
param_int_regs[param_int_regs.len - regs_frame_addr.regs ..],
|
|
0..,
|
|
) |dst_reg, elem_index| {
|
|
assert(self.register_manager.isRegFree(dst_reg));
|
|
if (elem_index > 0) {
|
|
try self.asmRegisterImmediate(.{ ._l, .sh }, dst_reg.to8(), .u(elem_index));
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .@"or" },
|
|
dst_reg.to8(),
|
|
prev_reg.to8(),
|
|
);
|
|
}
|
|
prev_reg = dst_reg;
|
|
}
|
|
|
|
const prev_lock = if (regs_frame_addr.regs > 0)
|
|
self.register_manager.lockRegAssumeUnused(prev_reg)
|
|
else
|
|
null;
|
|
defer if (prev_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
if (regs_frame_addr.regs > 0) try self.asmMemoryRegister(
|
|
.{ ._, .mov },
|
|
try dst_mcv.mem(self, .{ .size = .byte }),
|
|
prev_reg.to8(),
|
|
);
|
|
try self.genInlineMemset(
|
|
dst_mcv.address().offset(@intFromBool(regs_frame_addr.regs > 0)),
|
|
.{ .immediate = 0 },
|
|
.{ .immediate = arg_ty.abiSize(zcu) - @intFromBool(regs_frame_addr.regs > 0) },
|
|
.{},
|
|
);
|
|
|
|
const index_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const index_lock = self.register_manager.lockRegAssumeUnused(index_reg);
|
|
defer self.register_manager.unlockReg(index_lock);
|
|
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
index_reg.to32(),
|
|
.u(regs_frame_addr.regs),
|
|
);
|
|
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmMemoryImmediate(.{ ._, .cmp }, .{
|
|
.base = .{ .frame = regs_frame_addr.frame_index },
|
|
.mod = .{ .rm = .{
|
|
.size = .byte,
|
|
.index = index_reg.to64(),
|
|
.scale = .@"8",
|
|
.disp = regs_frame_addr.frame_off - @as(u6, regs_frame_addr.regs) * 8,
|
|
} },
|
|
}, Immediate.u(0));
|
|
const unset = try self.asmJccReloc(.e, undefined);
|
|
try self.asmMemoryRegister(
|
|
.{ ._s, .bt },
|
|
try dst_mcv.mem(self, .{ .size = .dword }),
|
|
index_reg.to32(),
|
|
);
|
|
self.performReloc(unset);
|
|
if (self.hasFeature(.slow_incdec)) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, index_reg.to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, index_reg.to32());
|
|
}
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .cmp },
|
|
index_reg.to32(),
|
|
.u(arg_ty.vectorLen(zcu)),
|
|
);
|
|
_ = try self.asmJccReloc(.b, loop);
|
|
|
|
break :result dst_mcv;
|
|
},
|
|
else => return self.fail("TODO implement arg for {}", .{src_mcv}),
|
|
}
|
|
};
|
|
return self.finishAir(inst, result, .{ .none, .none, .none });
|
|
}
|
|
|
|
fn airDbgVarArgs(self: *CodeGen) !void {
|
|
if (self.debug_output == .none) return;
|
|
if (!self.pt.zcu.typeToFunc(self.fn_type).?.is_var_args) return;
|
|
try self.asmPseudo(.pseudo_dbg_var_args_none);
|
|
}
|
|
|
|
fn genLocalDebugInfo(
|
|
self: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
mcv: MCValue,
|
|
) !void {
|
|
if (self.debug_output == .none) return;
|
|
switch (self.air.instructions.items(.tag)[@intFromEnum(inst)]) {
|
|
else => unreachable,
|
|
.arg, .dbg_arg_inline, .dbg_var_val => |tag| {
|
|
switch (mcv) {
|
|
.none => try self.asmAir(.dbg_local, inst),
|
|
.unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable,
|
|
.immediate => |imm| try self.asmAirImmediate(.dbg_local, inst, .u(imm)),
|
|
.lea_frame => |frame_addr| try self.asmAirFrameAddress(.dbg_local, inst, frame_addr),
|
|
.lea_symbol => |sym_off| try self.asmAirImmediate(.dbg_local, inst, .rel(sym_off)),
|
|
else => {
|
|
const ty = switch (tag) {
|
|
else => unreachable,
|
|
.arg => self.typeOfIndex(inst),
|
|
.dbg_arg_inline, .dbg_var_val => self.typeOf(
|
|
self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op.operand,
|
|
),
|
|
};
|
|
const frame_index = try self.allocFrameIndex(.initSpill(ty, self.pt.zcu));
|
|
try self.genSetMem(.{ .frame = frame_index }, 0, ty, mcv, .{});
|
|
try self.asmAirMemory(.dbg_local, inst, .{
|
|
.base = .{ .frame = frame_index },
|
|
.mod = .{ .rm = .{ .size = .qword } },
|
|
});
|
|
},
|
|
}
|
|
},
|
|
.dbg_var_ptr => switch (mcv) {
|
|
else => unreachable,
|
|
.unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable,
|
|
.lea_frame => |frame_addr| try self.asmAirMemory(.dbg_local, inst, .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = frame_addr.off,
|
|
} },
|
|
}),
|
|
.lea_symbol => |sym_off| try self.asmAirMemory(.dbg_local, inst, .{
|
|
.base = .{ .reloc = sym_off.sym_index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = sym_off.off,
|
|
} },
|
|
}),
|
|
.lea_direct, .lea_got, .lea_tlv => |sym_index| try self.asmAirMemory(.dbg_local, inst, .{
|
|
.base = .{ .reloc = sym_index },
|
|
.mod = .{ .rm = .{ .size = .qword } },
|
|
}),
|
|
},
|
|
}
|
|
}
|
|
|
|
fn airRetAddr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(.usize, dst_mcv, .{ .load_frame = .{ .index = .ret_addr } }, .{});
|
|
return self.finishAir(inst, dst_mcv, .{ .none, .none, .none });
|
|
}
|
|
|
|
fn airFrameAddress(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(.usize, dst_mcv, .{ .lea_frame = .{ .index = .base_ptr } }, .{});
|
|
return self.finishAir(inst, dst_mcv, .{ .none, .none, .none });
|
|
}
|
|
|
|
fn airCall(self: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier, opts: CopyOptions) !void {
|
|
if (modifier == .always_tail) return self.fail("TODO implement tail calls for x86_64", .{});
|
|
|
|
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
|
const extra = self.air.extraData(Air.Call, pl_op.payload);
|
|
const arg_refs: []const Air.Inst.Ref =
|
|
@ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
|
|
|
|
const ExpectedContents = extern struct {
|
|
tys: [16][@sizeOf(Type)]u8 align(@alignOf(Type)),
|
|
vals: [16][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)),
|
|
};
|
|
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
|
|
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
|
|
const allocator = stack.get();
|
|
|
|
const arg_tys = try allocator.alloc(Type, arg_refs.len);
|
|
defer allocator.free(arg_tys);
|
|
for (arg_tys, arg_refs) |*arg_ty, arg_ref| arg_ty.* = self.typeOf(arg_ref);
|
|
|
|
const arg_vals = try allocator.alloc(MCValue, arg_refs.len);
|
|
defer allocator.free(arg_vals);
|
|
for (arg_vals, arg_refs) |*arg_val, arg_ref| arg_val.* = .{ .air_ref = arg_ref };
|
|
|
|
const ret = try self.genCall(.{ .air = pl_op.operand }, arg_tys, arg_vals, opts);
|
|
|
|
var bt = self.liveness.iterateBigTomb(inst);
|
|
try self.feed(&bt, pl_op.operand);
|
|
for (arg_refs) |arg_ref| try self.feed(&bt, arg_ref);
|
|
|
|
const result = if (self.liveness.isUnused(inst)) .unreach else ret;
|
|
return self.finishAirResult(inst, result);
|
|
}
|
|
|
|
fn genCall(self: *CodeGen, info: union(enum) {
|
|
air: Air.Inst.Ref,
|
|
lib: struct {
|
|
return_type: InternPool.Index,
|
|
param_types: []const InternPool.Index,
|
|
lib: ?[]const u8 = null,
|
|
callee: []const u8,
|
|
},
|
|
}, arg_types: []const Type, args: []const MCValue, opts: CopyOptions) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
|
|
const fn_ty = switch (info) {
|
|
.air => |callee| fn_info: {
|
|
const callee_ty = self.typeOf(callee);
|
|
break :fn_info switch (callee_ty.zigTypeTag(zcu)) {
|
|
.@"fn" => callee_ty,
|
|
.pointer => callee_ty.childType(zcu),
|
|
else => unreachable,
|
|
};
|
|
},
|
|
.lib => |lib| try pt.funcType(.{
|
|
.param_types = lib.param_types,
|
|
.return_type = lib.return_type,
|
|
.cc = self.target.cCallingConvention().?,
|
|
}),
|
|
};
|
|
const fn_info = zcu.typeToFunc(fn_ty).?;
|
|
|
|
const ExpectedContents = extern struct {
|
|
var_args: [16][@sizeOf(Type)]u8 align(@alignOf(Type)),
|
|
frame_indices: [16]FrameIndex,
|
|
reg_locks: [16][@sizeOf(?RegisterLock)]u8 align(@alignOf(?RegisterLock)),
|
|
};
|
|
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
|
|
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
|
|
const allocator = stack.get();
|
|
|
|
const var_args = try allocator.alloc(Type, args.len - fn_info.param_types.len);
|
|
defer allocator.free(var_args);
|
|
for (var_args, arg_types[fn_info.param_types.len..]) |*var_arg, arg_ty| var_arg.* = arg_ty;
|
|
|
|
const frame_indices = try allocator.alloc(FrameIndex, args.len);
|
|
defer allocator.free(frame_indices);
|
|
|
|
var reg_locks: std.ArrayList(?RegisterLock) = .init(allocator);
|
|
defer reg_locks.deinit();
|
|
try reg_locks.ensureTotalCapacity(16);
|
|
defer for (reg_locks.items) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
var call_info = try self.resolveCallingConventionValues(fn_info, var_args, .call_frame);
|
|
defer call_info.deinit(self);
|
|
|
|
// We need a properly aligned and sized call frame to be able to call this function.
|
|
{
|
|
const needed_call_frame: FrameAlloc = .init(.{
|
|
.size = call_info.stack_byte_count,
|
|
.alignment = call_info.stack_align,
|
|
});
|
|
const frame_allocs_slice = self.frame_allocs.slice();
|
|
const stack_frame_size =
|
|
&frame_allocs_slice.items(.abi_size)[@intFromEnum(FrameIndex.call_frame)];
|
|
stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size);
|
|
const stack_frame_align =
|
|
&frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)];
|
|
stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align);
|
|
}
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
try self.spillCallerPreservedRegs(fn_info.cc, call_info.err_ret_trace_reg);
|
|
|
|
// set stack arguments first because this can clobber registers
|
|
// also clobber spill arguments as we go
|
|
switch (call_info.return_value.long) {
|
|
.none, .unreach => {},
|
|
.indirect => |reg_off| try self.register_manager.getReg(reg_off.reg, null),
|
|
else => unreachable,
|
|
}
|
|
for (call_info.args, arg_types, args, frame_indices) |dst_arg, arg_ty, src_arg, *frame_index|
|
|
switch (dst_arg) {
|
|
.none => {},
|
|
.register => |reg| {
|
|
try self.register_manager.getReg(reg, null);
|
|
try reg_locks.append(self.register_manager.lockReg(reg));
|
|
},
|
|
.register_pair => |regs| {
|
|
for (regs) |reg| try self.register_manager.getReg(reg, null);
|
|
try reg_locks.appendSlice(&self.register_manager.lockRegs(2, regs));
|
|
},
|
|
.indirect => |reg_off| {
|
|
frame_index.* = try self.allocFrameIndex(.initType(arg_ty, zcu));
|
|
try self.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg, opts);
|
|
try self.register_manager.getReg(reg_off.reg, null);
|
|
try reg_locks.append(self.register_manager.lockReg(reg_off.reg));
|
|
},
|
|
.load_frame => {
|
|
try self.genCopy(arg_ty, dst_arg, src_arg, opts);
|
|
try self.freeValue(src_arg);
|
|
},
|
|
.elementwise_regs_then_frame => |regs_frame_addr| {
|
|
const index_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const index_lock = self.register_manager.lockRegAssumeUnused(index_reg);
|
|
defer self.register_manager.unlockReg(index_lock);
|
|
|
|
const src_mem: Memory = if (src_arg.isBase()) try src_arg.mem(self, .{ .size = .dword }) else .{
|
|
.base = .{ .reg = try self.copyToTmpRegister(.usize, switch (src_arg) {
|
|
else => src_arg,
|
|
.air_ref => |src_ref| try self.resolveInst(src_ref),
|
|
}.address()) },
|
|
.mod = .{ .rm = .{ .size = .dword } },
|
|
};
|
|
const src_lock = switch (src_mem.base) {
|
|
.reg => |src_reg| self.register_manager.lockReg(src_reg),
|
|
else => null,
|
|
};
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
index_reg.to32(),
|
|
.u(regs_frame_addr.regs),
|
|
);
|
|
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
try self.asmMemoryRegister(.{ ._, .bt }, src_mem, index_reg.to32());
|
|
try self.asmSetccMemory(.c, .{
|
|
.base = .{ .frame = regs_frame_addr.frame_index },
|
|
.mod = .{ .rm = .{
|
|
.size = .byte,
|
|
.index = index_reg.to64(),
|
|
.scale = .@"8",
|
|
.disp = regs_frame_addr.frame_off - @as(u6, regs_frame_addr.regs) * 8,
|
|
} },
|
|
});
|
|
if (self.hasFeature(.slow_incdec)) {
|
|
try self.asmRegisterImmediate(.{ ._, .add }, index_reg.to32(), .u(1));
|
|
} else {
|
|
try self.asmRegister(.{ ._c, .in }, index_reg.to32());
|
|
}
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .cmp },
|
|
index_reg.to32(),
|
|
.u(arg_ty.vectorLen(zcu)),
|
|
);
|
|
_ = try self.asmJccReloc(.b, loop);
|
|
|
|
const param_int_regs = abi.getCAbiIntParamRegs(fn_info.cc);
|
|
for (param_int_regs[param_int_regs.len - regs_frame_addr.regs ..]) |dst_reg| {
|
|
try self.register_manager.getReg(dst_reg, null);
|
|
try reg_locks.append(self.register_manager.lockReg(dst_reg));
|
|
}
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
if (call_info.err_ret_trace_reg != .none) {
|
|
if (self.inst_tracking.getPtr(err_ret_trace_index)) |err_ret_trace| {
|
|
if (switch (err_ret_trace.short) {
|
|
.register => |reg| call_info.err_ret_trace_reg != reg,
|
|
else => true,
|
|
}) {
|
|
try self.register_manager.getReg(call_info.err_ret_trace_reg, err_ret_trace_index);
|
|
try reg_locks.append(self.register_manager.lockReg(call_info.err_ret_trace_reg));
|
|
|
|
try self.genSetReg(call_info.err_ret_trace_reg, .usize, err_ret_trace.short, .{});
|
|
err_ret_trace.trackMaterialize(err_ret_trace_index, .{
|
|
.long = err_ret_trace.long,
|
|
.short = .{ .register = call_info.err_ret_trace_reg },
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
// now we are free to set register arguments
|
|
switch (call_info.return_value.long) {
|
|
.none, .unreach => {},
|
|
.indirect => |reg_off| {
|
|
const ret_ty: Type = .fromInterned(fn_info.return_type);
|
|
const frame_index = try self.allocFrameIndex(.initSpill(ret_ty, zcu));
|
|
try self.genSetReg(reg_off.reg, .usize, .{
|
|
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
|
|
}, .{});
|
|
call_info.return_value.short = .{ .load_frame = .{ .index = frame_index } };
|
|
try reg_locks.append(self.register_manager.lockReg(reg_off.reg));
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
for (call_info.args, arg_types, args, frame_indices) |dst_arg, arg_ty, src_arg, frame_index|
|
|
switch (dst_arg) {
|
|
.none, .load_frame => {},
|
|
.register => |dst_reg| switch (fn_info.cc) {
|
|
else => try self.genSetReg(registerAlias(
|
|
dst_reg,
|
|
@intCast(arg_ty.abiSize(zcu)),
|
|
), arg_ty, src_arg, opts),
|
|
.x86_64_sysv, .x86_64_win => {
|
|
const promoted_ty = self.promoteInt(arg_ty);
|
|
const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(zcu));
|
|
const dst_alias = registerAlias(dst_reg, promoted_abi_size);
|
|
try self.genSetReg(dst_alias, promoted_ty, src_arg, opts);
|
|
if (promoted_ty.toIntern() != arg_ty.toIntern())
|
|
try self.truncateRegister(arg_ty, dst_alias);
|
|
},
|
|
},
|
|
.register_pair => try self.genCopy(arg_ty, dst_arg, src_arg, opts),
|
|
.indirect => |reg_off| try self.genSetReg(reg_off.reg, .usize, .{
|
|
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
|
|
}, .{}),
|
|
.elementwise_regs_then_frame => |regs_frame_addr| {
|
|
const src_mem: Memory = if (src_arg.isBase()) try src_arg.mem(self, .{ .size = .dword }) else .{
|
|
.base = .{ .reg = try self.copyToTmpRegister(
|
|
.usize,
|
|
switch (src_arg) {
|
|
else => src_arg,
|
|
.air_ref => |src_ref| try self.resolveInst(src_ref),
|
|
}.address(),
|
|
) },
|
|
.mod = .{ .rm = .{ .size = .dword } },
|
|
};
|
|
const src_lock = switch (src_mem.base) {
|
|
.reg => |src_reg| self.register_manager.lockReg(src_reg),
|
|
else => null,
|
|
};
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const param_int_regs = abi.getCAbiIntParamRegs(fn_info.cc);
|
|
for (
|
|
param_int_regs[param_int_regs.len - regs_frame_addr.regs ..],
|
|
0..,
|
|
) |dst_reg, elem_index| {
|
|
try self.asmRegisterRegister(.{ ._, .xor }, dst_reg.to32(), dst_reg.to32());
|
|
try self.asmMemoryImmediate(.{ ._, .bt }, src_mem, .u(elem_index));
|
|
try self.asmSetccRegister(.c, dst_reg.to8());
|
|
}
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
if (fn_info.is_var_args) try self.asmRegisterImmediate(.{ ._, .mov }, .al, .u(call_info.fp_count));
|
|
|
|
// Due to incremental compilation, how function calls are generated depends
|
|
// on linking.
|
|
switch (info) {
|
|
.air => |callee| if (try self.air.value(callee, pt)) |func_value| {
|
|
const func_key = ip.indexToKey(func_value.ip_index);
|
|
switch (switch (func_key) {
|
|
else => func_key,
|
|
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
|
|
.nav => |nav| ip.indexToKey(zcu.navValue(nav).toIntern()),
|
|
else => func_key,
|
|
} else func_key,
|
|
}) {
|
|
.func => |func| {
|
|
if (self.bin_file.cast(.elf)) |elf_file| {
|
|
const zo = elf_file.zigObjectPtr().?;
|
|
const sym_index = try zo.getOrCreateMetadataForNav(zcu, func.owner_nav);
|
|
try self.asmImmediate(.{ ._, .call }, .rel(.{ .sym_index = sym_index }));
|
|
} else if (self.bin_file.cast(.coff)) |coff_file| {
|
|
const atom = try coff_file.getOrCreateAtomForNav(func.owner_nav);
|
|
const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
|
|
const scratch_reg = abi.getCAbiLinkerScratchReg(fn_info.cc);
|
|
try self.genSetReg(scratch_reg, .usize, .{ .lea_got = sym_index }, .{});
|
|
try self.asmRegister(.{ ._, .call }, scratch_reg);
|
|
} else if (self.bin_file.cast(.macho)) |macho_file| {
|
|
const zo = macho_file.getZigObject().?;
|
|
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, func.owner_nav);
|
|
const sym = zo.symbols.items[sym_index];
|
|
try self.asmImmediate(.{ ._, .call }, .rel(.{ .sym_index = sym.nlist_idx }));
|
|
} else if (self.bin_file.cast(.plan9)) |p9| {
|
|
const atom_index = try p9.seeNav(pt, func.owner_nav);
|
|
const atom = p9.getAtom(atom_index);
|
|
try self.asmMemory(.{ ._, .call }, .{
|
|
.base = .{ .reg = .ds },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = @intCast(atom.getOffsetTableAddress(p9)),
|
|
} },
|
|
});
|
|
} else unreachable;
|
|
},
|
|
.@"extern" => |@"extern"| if (self.bin_file.cast(.elf)) |elf_file| {
|
|
const target_sym_index = try elf_file.getGlobalSymbol(
|
|
@"extern".name.toSlice(ip),
|
|
@"extern".lib_name.toSlice(ip),
|
|
);
|
|
try self.asmImmediate(.{ ._, .call }, .rel(.{ .sym_index = target_sym_index }));
|
|
} else if (self.bin_file.cast(.macho)) |macho_file| {
|
|
const target_sym_index = try macho_file.getGlobalSymbol(
|
|
@"extern".name.toSlice(ip),
|
|
@"extern".lib_name.toSlice(ip),
|
|
);
|
|
try self.asmImmediate(.{ ._, .call }, .rel(.{ .sym_index = target_sym_index }));
|
|
} else try self.genExternSymbolRef(
|
|
.call,
|
|
@"extern".lib_name.toSlice(ip),
|
|
@"extern".name.toSlice(ip),
|
|
),
|
|
else => return self.fail("TODO implement calling bitcasted functions", .{}),
|
|
}
|
|
} else {
|
|
assert(self.typeOf(callee).zigTypeTag(zcu) == .pointer);
|
|
const scratch_reg = abi.getCAbiLinkerScratchReg(fn_info.cc);
|
|
try self.genSetReg(scratch_reg, .usize, .{ .air_ref = callee }, .{});
|
|
try self.asmRegister(.{ ._, .call }, scratch_reg);
|
|
},
|
|
.lib => |lib| if (self.bin_file.cast(.elf)) |elf_file| {
|
|
const target_sym_index = try elf_file.getGlobalSymbol(lib.callee, lib.lib);
|
|
try self.asmImmediate(.{ ._, .call }, .rel(.{ .sym_index = target_sym_index }));
|
|
} else if (self.bin_file.cast(.macho)) |macho_file| {
|
|
const target_sym_index = try macho_file.getGlobalSymbol(lib.callee, lib.lib);
|
|
try self.asmImmediate(.{ ._, .call }, .rel(.{ .sym_index = target_sym_index }));
|
|
} else try self.genExternSymbolRef(.call, lib.lib, lib.callee),
|
|
}
|
|
return call_info.return_value.short;
|
|
}
|
|
|
|
fn airRet(self: *CodeGen, inst: Air.Inst.Index, safety: bool) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
|
|
const ret_ty = self.fn_type.fnReturnType(zcu);
|
|
switch (self.ret_mcv.short) {
|
|
.none => {},
|
|
.register => |reg| {
|
|
const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
|
|
defer self.register_manager.unlockReg(reg_lock);
|
|
try self.genCopy(ret_ty, self.ret_mcv.short, .{ .air_ref = un_op }, .{ .safety = safety });
|
|
},
|
|
inline .register_pair, .register_triple, .register_quadruple => |regs| {
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(regs.len, regs);
|
|
defer for (reg_locks) |reg_lock| self.register_manager.unlockReg(reg_lock);
|
|
try self.genCopy(ret_ty, self.ret_mcv.short, .{ .air_ref = un_op }, .{ .safety = safety });
|
|
},
|
|
.indirect => |reg_off| {
|
|
try self.register_manager.getReg(reg_off.reg, null);
|
|
const lock = self.register_manager.lockRegAssumeUnused(reg_off.reg);
|
|
defer self.register_manager.unlockReg(lock);
|
|
|
|
try self.genSetReg(reg_off.reg, .usize, self.ret_mcv.long, .{});
|
|
try self.genSetMem(
|
|
.{ .reg = reg_off.reg },
|
|
reg_off.off,
|
|
ret_ty,
|
|
.{ .air_ref = un_op },
|
|
.{ .safety = safety },
|
|
);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
self.ret_mcv.liveOut(self, inst);
|
|
|
|
if (self.err_ret_trace_reg != .none) {
|
|
if (self.inst_tracking.getPtr(err_ret_trace_index)) |err_ret_trace| {
|
|
if (switch (err_ret_trace.short) {
|
|
.register => |reg| self.err_ret_trace_reg != reg,
|
|
else => true,
|
|
}) try self.genSetReg(self.err_ret_trace_reg, .usize, err_ret_trace.short, .{});
|
|
err_ret_trace.liveOut(self, err_ret_trace_index);
|
|
}
|
|
}
|
|
|
|
try self.finishAir(inst, .unreach, .{ un_op, .none, .none });
|
|
|
|
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
|
|
// which is available if the jump is 127 bytes or less forward.
|
|
const jmp_reloc = try self.asmJmpReloc(undefined);
|
|
try self.epilogue_relocs.append(self.gpa, jmp_reloc);
|
|
}
|
|
|
|
fn airRetLoad(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const ptr = try self.resolveInst(un_op);
|
|
|
|
const ptr_ty = self.typeOf(un_op);
|
|
switch (self.ret_mcv.short) {
|
|
.none => {},
|
|
.register, .register_pair => try self.load(self.ret_mcv.short, ptr_ty, ptr),
|
|
.indirect => |reg_off| try self.genSetReg(reg_off.reg, ptr_ty, ptr, .{}),
|
|
else => unreachable,
|
|
}
|
|
self.ret_mcv.liveOut(self, inst);
|
|
|
|
if (self.err_ret_trace_reg != .none) {
|
|
if (self.inst_tracking.getPtr(err_ret_trace_index)) |err_ret_trace| {
|
|
if (switch (err_ret_trace.short) {
|
|
.register => |reg| self.err_ret_trace_reg != reg,
|
|
else => true,
|
|
}) try self.genSetReg(self.err_ret_trace_reg, .usize, err_ret_trace.short, .{});
|
|
err_ret_trace.liveOut(self, err_ret_trace_index);
|
|
}
|
|
}
|
|
|
|
try self.finishAir(inst, .unreach, .{ un_op, .none, .none });
|
|
|
|
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
|
|
// which is available if the jump is 127 bytes or less forward.
|
|
const jmp_reloc = try self.asmJmpReloc(undefined);
|
|
try self.epilogue_relocs.append(self.gpa, jmp_reloc);
|
|
}
|
|
|
|
fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
var ty = self.typeOf(bin_op.lhs);
|
|
var null_compare: ?Mir.Inst.Index = null;
|
|
|
|
const result: Condition = result: {
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const lhs_mcv = try self.resolveInst(bin_op.lhs);
|
|
const lhs_locks: [2]?RegisterLock = switch (lhs_mcv) {
|
|
.register => |lhs_reg| .{ self.register_manager.lockRegAssumeUnused(lhs_reg), null },
|
|
.register_pair => |lhs_regs| locks: {
|
|
const locks = self.register_manager.lockRegsAssumeUnused(2, lhs_regs);
|
|
break :locks .{ locks[0], locks[1] };
|
|
},
|
|
.register_offset => |lhs_ro| .{
|
|
self.register_manager.lockRegAssumeUnused(lhs_ro.reg),
|
|
null,
|
|
},
|
|
else => @splat(null),
|
|
};
|
|
defer for (lhs_locks) |lhs_lock| if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
|
const rhs_locks: [2]?RegisterLock = switch (rhs_mcv) {
|
|
.register => |rhs_reg| .{ self.register_manager.lockReg(rhs_reg), null },
|
|
.register_pair => |rhs_regs| self.register_manager.lockRegs(2, rhs_regs),
|
|
.register_offset => |rhs_ro| .{ self.register_manager.lockReg(rhs_ro.reg), null },
|
|
else => @splat(null),
|
|
};
|
|
defer for (rhs_locks) |rhs_lock| if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
switch (ty.zigTypeTag(zcu)) {
|
|
.float => {
|
|
const float_bits = ty.floatBits(self.target.*);
|
|
if (!switch (float_bits) {
|
|
16 => self.hasFeature(.f16c),
|
|
32 => self.hasFeature(.sse),
|
|
64 => self.hasFeature(.sse2),
|
|
80, 128 => false,
|
|
else => unreachable,
|
|
}) {
|
|
var callee_buf: ["__???f2".len]u8 = undefined;
|
|
const ret = try self.genCall(.{ .lib = .{
|
|
.return_type = .i32_type,
|
|
.param_types = &.{ ty.toIntern(), ty.toIntern() },
|
|
.callee = std.fmt.bufPrint(&callee_buf, "__{s}{c}f2", .{
|
|
switch (op) {
|
|
.eq => "eq",
|
|
.neq => "ne",
|
|
.lt => "lt",
|
|
.lte => "le",
|
|
.gt => "gt",
|
|
.gte => "ge",
|
|
},
|
|
floatCompilerRtAbiName(float_bits),
|
|
}) catch unreachable,
|
|
} }, &.{ ty, ty }, &.{ .{ .air_ref = bin_op.lhs }, .{ .air_ref = bin_op.rhs } }, .{});
|
|
try self.genBinOpMir(.{ ._, .@"test" }, .i32, ret, ret);
|
|
break :result switch (op) {
|
|
.eq => .e,
|
|
.neq => .ne,
|
|
.lt => .l,
|
|
.lte => .le,
|
|
.gt => .g,
|
|
.gte => .ge,
|
|
};
|
|
}
|
|
},
|
|
.optional => if (!ty.optionalReprIsPayload(zcu)) {
|
|
const opt_ty = ty;
|
|
const opt_abi_size: u31 = @intCast(opt_ty.abiSize(zcu));
|
|
ty = opt_ty.optionalChild(zcu);
|
|
const payload_abi_size: u31 = @intCast(ty.abiSize(zcu));
|
|
|
|
const temp_lhs_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const temp_lhs_lock = self.register_manager.lockRegAssumeUnused(temp_lhs_reg);
|
|
defer self.register_manager.unlockReg(temp_lhs_lock);
|
|
|
|
if (lhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
temp_lhs_reg.to8(),
|
|
try lhs_mcv.address().offset(payload_abi_size).deref().mem(self, .{ .size = .byte }),
|
|
) else {
|
|
try self.genSetReg(temp_lhs_reg, opt_ty, lhs_mcv, .{});
|
|
try self.asmRegisterImmediate(
|
|
.{ ._r, .sh },
|
|
registerAlias(temp_lhs_reg, opt_abi_size),
|
|
.u(payload_abi_size * 8),
|
|
);
|
|
}
|
|
|
|
const payload_compare = payload_compare: {
|
|
if (rhs_mcv.isBase()) {
|
|
const rhs_mem =
|
|
try rhs_mcv.address().offset(payload_abi_size).deref().mem(self, .{ .size = .byte });
|
|
try self.asmMemoryRegister(.{ ._, .@"test" }, rhs_mem, temp_lhs_reg.to8());
|
|
const payload_compare = try self.asmJccReloc(.nz, undefined);
|
|
try self.asmRegisterMemory(.{ ._, .cmp }, temp_lhs_reg.to8(), rhs_mem);
|
|
break :payload_compare payload_compare;
|
|
}
|
|
|
|
const temp_rhs_reg = try self.copyToTmpRegister(opt_ty, rhs_mcv);
|
|
const temp_rhs_lock = self.register_manager.lockRegAssumeUnused(temp_rhs_reg);
|
|
defer self.register_manager.unlockReg(temp_rhs_lock);
|
|
|
|
try self.asmRegisterImmediate(
|
|
.{ ._r, .sh },
|
|
registerAlias(temp_rhs_reg, opt_abi_size),
|
|
.u(payload_abi_size * 8),
|
|
);
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .@"test" },
|
|
temp_lhs_reg.to8(),
|
|
temp_rhs_reg.to8(),
|
|
);
|
|
const payload_compare = try self.asmJccReloc(.nz, undefined);
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .cmp },
|
|
temp_lhs_reg.to8(),
|
|
temp_rhs_reg.to8(),
|
|
);
|
|
break :payload_compare payload_compare;
|
|
};
|
|
null_compare = try self.asmJmpReloc(undefined);
|
|
self.performReloc(payload_compare);
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
switch (ty.zigTypeTag(zcu)) {
|
|
else => {
|
|
const abi_size: u16 = @intCast(ty.abiSize(zcu));
|
|
const may_flip: enum {
|
|
may_flip,
|
|
must_flip,
|
|
must_not_flip,
|
|
} = if (abi_size > 8) switch (op) {
|
|
.lt, .gte => .must_not_flip,
|
|
.lte, .gt => .must_flip,
|
|
.eq, .neq => .may_flip,
|
|
} else .may_flip;
|
|
|
|
const flipped = switch (may_flip) {
|
|
.may_flip => !lhs_mcv.isRegister() and !lhs_mcv.isBase(),
|
|
.must_flip => true,
|
|
.must_not_flip => false,
|
|
};
|
|
const unmat_dst_mcv = if (flipped) rhs_mcv else lhs_mcv;
|
|
const dst_mcv = if (unmat_dst_mcv.isRegister() or
|
|
(abi_size <= 8 and unmat_dst_mcv.isBase())) unmat_dst_mcv else dst: {
|
|
const dst_mcv = try self.allocTempRegOrMem(ty, true);
|
|
try self.genCopy(ty, dst_mcv, unmat_dst_mcv, .{});
|
|
break :dst dst_mcv;
|
|
};
|
|
const dst_lock =
|
|
if (dst_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const src_mcv = try self.resolveInst(if (flipped) bin_op.lhs else bin_op.rhs);
|
|
const src_lock =
|
|
if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
break :result .fromCompareOperator(
|
|
if (ty.isAbiInt(zcu)) ty.intInfo(zcu).signedness else .unsigned,
|
|
result_op: {
|
|
const flipped_op = if (flipped) op.reverse() else op;
|
|
if (abi_size > 8) switch (flipped_op) {
|
|
.lt, .gte => {},
|
|
.lte, .gt => unreachable,
|
|
.eq, .neq => {
|
|
const OpInfo = ?struct { addr_reg: Register, addr_lock: RegisterLock };
|
|
|
|
const resolved_dst_mcv = switch (dst_mcv) {
|
|
else => dst_mcv,
|
|
.air_ref => |dst_ref| try self.resolveInst(dst_ref),
|
|
};
|
|
const dst_info: OpInfo = switch (resolved_dst_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.indirect,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.lea_symbol,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable,
|
|
.register, .register_pair, .register_triple, .register_quadruple, .load_frame => null,
|
|
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => dst: {
|
|
switch (resolved_dst_mcv) {
|
|
.memory => |addr| if (std.math.cast(
|
|
i32,
|
|
@as(i64, @bitCast(addr)),
|
|
) != null and std.math.cast(
|
|
i32,
|
|
@as(i64, @bitCast(addr)) + abi_size - 8,
|
|
) != null) break :dst null,
|
|
.load_symbol, .load_got, .load_direct, .load_tlv => {},
|
|
else => unreachable,
|
|
}
|
|
|
|
const dst_addr_reg = (try self.register_manager.allocReg(
|
|
null,
|
|
abi.RegisterClass.gp,
|
|
)).to64();
|
|
const dst_addr_lock =
|
|
self.register_manager.lockRegAssumeUnused(dst_addr_reg);
|
|
errdefer self.register_manager.unlockReg(dst_addr_lock);
|
|
|
|
try self.genSetReg(dst_addr_reg, .usize, resolved_dst_mcv.address(), .{});
|
|
break :dst .{
|
|
.addr_reg = dst_addr_reg,
|
|
.addr_lock = dst_addr_lock,
|
|
};
|
|
},
|
|
};
|
|
defer if (dst_info) |info| self.register_manager.unlockReg(info.addr_lock);
|
|
|
|
const resolved_src_mcv = switch (src_mcv) {
|
|
else => src_mcv,
|
|
.air_ref => |src_ref| try self.resolveInst(src_ref),
|
|
};
|
|
const src_info: OpInfo = switch (resolved_src_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.indirect,
|
|
.lea_symbol,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable,
|
|
.register_pair, .register_triple, .register_quadruple, .load_frame => null,
|
|
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => src: {
|
|
switch (resolved_src_mcv) {
|
|
.memory => |addr| if (std.math.cast(
|
|
i32,
|
|
@as(i64, @bitCast(addr)),
|
|
) != null and std.math.cast(
|
|
i32,
|
|
@as(i64, @bitCast(addr)) + abi_size - 8,
|
|
) != null) break :src null,
|
|
.load_symbol, .load_got, .load_direct, .load_tlv => {},
|
|
else => unreachable,
|
|
}
|
|
|
|
const src_addr_reg = (try self.register_manager.allocReg(
|
|
null,
|
|
abi.RegisterClass.gp,
|
|
)).to64();
|
|
const src_addr_lock =
|
|
self.register_manager.lockRegAssumeUnused(src_addr_reg);
|
|
errdefer self.register_manager.unlockReg(src_addr_lock);
|
|
|
|
try self.genSetReg(src_addr_reg, .usize, resolved_src_mcv.address(), .{});
|
|
break :src .{
|
|
.addr_reg = src_addr_reg,
|
|
.addr_lock = src_addr_lock,
|
|
};
|
|
},
|
|
};
|
|
defer if (src_info) |info|
|
|
self.register_manager.unlockReg(info.addr_lock);
|
|
|
|
const regs = try self.register_manager.allocRegs(2, @splat(null), abi.RegisterClass.gp);
|
|
const acc_reg = regs[0].to64();
|
|
const locks = self.register_manager.lockRegsAssumeUnused(2, regs);
|
|
defer for (locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const limbs_len = std.math.divCeil(u16, abi_size, 8) catch unreachable;
|
|
var limb_i: u16 = 0;
|
|
while (limb_i < limbs_len) : (limb_i += 1) {
|
|
const off = limb_i * 8;
|
|
const tmp_reg = regs[@min(limb_i, 1)].to64();
|
|
|
|
try self.genSetReg(tmp_reg, .usize, if (dst_info) |info| .{
|
|
.indirect = .{ .reg = info.addr_reg, .off = off },
|
|
} else switch (resolved_dst_mcv) {
|
|
inline .register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
=> |dst_regs| .{ .register = dst_regs[limb_i] },
|
|
.memory => |dst_addr| .{
|
|
.memory = @bitCast(@as(i64, @bitCast(dst_addr)) + off),
|
|
},
|
|
.indirect => |reg_off| .{ .indirect = .{
|
|
.reg = reg_off.reg,
|
|
.off = reg_off.off + off,
|
|
} },
|
|
.load_frame => |frame_addr| .{ .load_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + off,
|
|
} },
|
|
else => unreachable,
|
|
}, .{});
|
|
|
|
try self.genBinOpMir(
|
|
.{ ._, .xor },
|
|
.usize,
|
|
.{ .register = tmp_reg },
|
|
if (src_info) |info| .{
|
|
.indirect = .{ .reg = info.addr_reg, .off = off },
|
|
} else switch (resolved_src_mcv) {
|
|
inline .register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
=> |src_regs| .{ .register = src_regs[limb_i] },
|
|
.memory => |src_addr| .{
|
|
.memory = @bitCast(@as(i64, @bitCast(src_addr)) + off),
|
|
},
|
|
.indirect => |reg_off| .{ .indirect = .{
|
|
.reg = reg_off.reg,
|
|
.off = reg_off.off + off,
|
|
} },
|
|
.load_frame => |frame_addr| .{ .load_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + off,
|
|
} },
|
|
else => unreachable,
|
|
},
|
|
);
|
|
|
|
if (limb_i > 0)
|
|
try self.asmRegisterRegister(.{ ._, .@"or" }, acc_reg, tmp_reg);
|
|
}
|
|
assert(limbs_len >= 2); // use flags from or
|
|
break :result_op flipped_op;
|
|
},
|
|
};
|
|
try self.genBinOpMir(.{ ._, .cmp }, ty, dst_mcv, src_mcv);
|
|
break :result_op flipped_op;
|
|
},
|
|
);
|
|
},
|
|
.float => {
|
|
const flipped = switch (op) {
|
|
.lt, .lte => true,
|
|
.eq, .gte, .gt, .neq => false,
|
|
};
|
|
|
|
const dst_mcv = if (flipped) rhs_mcv else lhs_mcv;
|
|
const dst_reg = if (dst_mcv.isRegister())
|
|
dst_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, dst_mcv);
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
|
|
|
|
switch (ty.floatBits(self.target.*)) {
|
|
16 => {
|
|
assert(self.hasFeature(.f16c));
|
|
const tmp1_reg =
|
|
(try self.register_manager.allocReg(null, abi.RegisterClass.sse)).to128();
|
|
const tmp1_mcv = MCValue{ .register = tmp1_reg };
|
|
const tmp1_lock = self.register_manager.lockRegAssumeUnused(tmp1_reg);
|
|
defer self.register_manager.unlockReg(tmp1_lock);
|
|
|
|
const tmp2_reg =
|
|
(try self.register_manager.allocReg(null, abi.RegisterClass.sse)).to128();
|
|
const tmp2_mcv = MCValue{ .register = tmp2_reg };
|
|
const tmp2_lock = self.register_manager.lockRegAssumeUnused(tmp2_reg);
|
|
defer self.register_manager.unlockReg(tmp2_lock);
|
|
|
|
if (src_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
.{ .vp_w, .insr },
|
|
tmp1_reg,
|
|
dst_reg.to128(),
|
|
try src_mcv.mem(self, .{ .size = .word }),
|
|
.u(1),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
.{ .vp_, .unpcklwd },
|
|
tmp1_reg,
|
|
dst_reg.to128(),
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, src_mcv)).to128(),
|
|
);
|
|
try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, tmp1_reg, tmp1_reg);
|
|
try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg);
|
|
try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv);
|
|
},
|
|
32 => try self.genBinOpMir(
|
|
.{ ._ss, .ucomi },
|
|
ty,
|
|
.{ .register = dst_reg },
|
|
src_mcv,
|
|
),
|
|
64 => try self.genBinOpMir(
|
|
.{ ._sd, .ucomi },
|
|
ty,
|
|
.{ .register = dst_reg },
|
|
src_mcv,
|
|
),
|
|
else => unreachable,
|
|
}
|
|
|
|
break :result switch (if (flipped) op.reverse() else op) {
|
|
.lt, .lte => unreachable, // required to have been canonicalized to gt(e)
|
|
.gt => .a,
|
|
.gte => .ae,
|
|
.eq => .z_and_np,
|
|
.neq => .nz_or_p,
|
|
};
|
|
},
|
|
}
|
|
};
|
|
|
|
if (null_compare) |reloc| self.performReloc(reloc);
|
|
self.eflags_inst = inst;
|
|
return self.finishAir(inst, .{ .eflags = result }, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airCmpVector(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data;
|
|
const dst_mcv = try self.genBinOp(
|
|
inst,
|
|
.fromCmpOp(extra.compareOperator(), false),
|
|
extra.lhs,
|
|
extra.rhs,
|
|
);
|
|
return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none });
|
|
}
|
|
|
|
fn airCmpLtErrorsLen(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
|
|
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
|
defer self.register_manager.unlockReg(addr_lock);
|
|
const anyerror_lazy_sym: link.File.LazySymbol = .{ .kind = .const_data, .ty = .anyerror_type };
|
|
try self.genLazySymbolRef(.lea, addr_reg, anyerror_lazy_sym);
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const op_ty = self.typeOf(un_op);
|
|
const op_abi_size: u32 = @intCast(op_ty.abiSize(zcu));
|
|
const op_mcv = try self.resolveInst(un_op);
|
|
const dst_reg = switch (op_mcv) {
|
|
.register => |reg| reg,
|
|
else => try self.copyToTmpRegister(op_ty, op_mcv),
|
|
};
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .cmp },
|
|
registerAlias(dst_reg, op_abi_size),
|
|
.{
|
|
.base = .{ .reg = addr_reg },
|
|
.mod = .{ .rm = .{ .size = .fromSize(op_abi_size) } },
|
|
},
|
|
);
|
|
|
|
self.eflags_inst = inst;
|
|
return self.finishAir(inst, .{ .eflags = .b }, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airTry(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
|
const extra = self.air.extraData(Air.Try, pl_op.payload);
|
|
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
|
|
const operand_ty = self.typeOf(pl_op.operand);
|
|
const result = try self.genTry(inst, pl_op.operand, body, operand_ty, false);
|
|
return self.finishAir(inst, result, .{ .none, .none, .none });
|
|
}
|
|
|
|
fn airTryPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
|
|
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
|
|
const operand_ty = self.typeOf(extra.data.ptr);
|
|
const result = try self.genTry(inst, extra.data.ptr, body, operand_ty, true);
|
|
return self.finishAir(inst, result, .{ .none, .none, .none });
|
|
}
|
|
|
|
fn genTry(
|
|
self: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
operand: Air.Inst.Ref,
|
|
body: []const Air.Inst.Index,
|
|
operand_ty: Type,
|
|
operand_is_ptr: bool,
|
|
) !MCValue {
|
|
const liveness_cond_br = self.liveness.getCondBr(inst);
|
|
|
|
const operand_mcv = try self.resolveInst(operand);
|
|
const is_err_mcv = if (operand_is_ptr)
|
|
try self.isErrPtr(null, operand_ty, operand_mcv)
|
|
else
|
|
try self.isErr(null, operand_ty, operand_mcv);
|
|
|
|
const reloc = try self.genCondBrMir(.anyerror, is_err_mcv);
|
|
|
|
if (self.liveness.operandDies(inst, 0)) {
|
|
if (operand.toIndex()) |operand_inst| try self.processDeath(operand_inst);
|
|
}
|
|
|
|
self.scope_generation += 1;
|
|
const state = try self.saveState();
|
|
|
|
for (liveness_cond_br.else_deaths) |death| try self.processDeath(death);
|
|
try self.genBodyBlock(body);
|
|
try self.restoreState(state, &.{}, .{
|
|
.emit_instructions = false,
|
|
.update_tracking = true,
|
|
.resurrect = true,
|
|
.close_scope = true,
|
|
});
|
|
|
|
self.performReloc(reloc);
|
|
|
|
for (liveness_cond_br.then_deaths) |death| try self.processDeath(death);
|
|
|
|
const result = if (self.liveness.isUnused(inst))
|
|
.unreach
|
|
else if (operand_is_ptr)
|
|
try self.genUnwrapErrUnionPayloadPtrMir(inst, operand_ty, operand_mcv)
|
|
else
|
|
try self.genUnwrapErrUnionPayloadMir(inst, operand_ty, operand_mcv);
|
|
return result;
|
|
}
|
|
|
|
fn airDbgVar(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
|
try self.genLocalDebugInfo(inst, try self.resolveInst(pl_op.operand));
|
|
return self.finishAir(inst, .unreach, .{ pl_op.operand, .none, .none });
|
|
}
|
|
|
|
fn genCondBrMir(self: *CodeGen, ty: Type, mcv: MCValue) !Mir.Inst.Index {
|
|
const pt = self.pt;
|
|
const abi_size = ty.abiSize(pt.zcu);
|
|
switch (mcv) {
|
|
.eflags => |cc| {
|
|
// Here we map the opposites since the jump is to the false branch.
|
|
return self.asmJccReloc(cc.negate(), undefined);
|
|
},
|
|
.register => |reg| {
|
|
try self.spillEflagsIfOccupied();
|
|
try self.asmRegisterImmediate(.{ ._, .@"test" }, reg.to8(), .u(1));
|
|
return self.asmJccReloc(.z, undefined);
|
|
},
|
|
.immediate,
|
|
.load_frame,
|
|
=> {
|
|
try self.spillEflagsIfOccupied();
|
|
if (abi_size <= 8) {
|
|
const reg = try self.copyToTmpRegister(ty, mcv);
|
|
return self.genCondBrMir(ty, .{ .register = reg });
|
|
}
|
|
return self.fail("TODO implement condbr when condition is {} with abi larger than 8 bytes", .{mcv});
|
|
},
|
|
else => return self.fail("TODO implement condbr when condition is {s}", .{@tagName(mcv)}),
|
|
}
|
|
}
|
|
|
|
fn airCondBr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
|
const cond = try self.resolveInst(pl_op.operand);
|
|
const cond_ty = self.typeOf(pl_op.operand);
|
|
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
|
|
const then_body: []const Air.Inst.Index =
|
|
@ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]);
|
|
const else_body: []const Air.Inst.Index =
|
|
@ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
|
|
const liveness_cond_br = self.liveness.getCondBr(inst);
|
|
|
|
// If the condition dies here in this condbr instruction, process
|
|
// that death now instead of later as this has an effect on
|
|
// whether it needs to be spilled in the branches
|
|
if (self.liveness.operandDies(inst, 0)) {
|
|
if (pl_op.operand.toIndex()) |op_inst| try self.processDeath(op_inst);
|
|
}
|
|
|
|
self.scope_generation += 1;
|
|
const state = try self.saveState();
|
|
const reloc = try self.genCondBrMir(cond_ty, cond);
|
|
|
|
for (liveness_cond_br.then_deaths) |death| try self.processDeath(death);
|
|
try self.genBodyBlock(then_body);
|
|
try self.restoreState(state, &.{}, .{
|
|
.emit_instructions = false,
|
|
.update_tracking = true,
|
|
.resurrect = true,
|
|
.close_scope = true,
|
|
});
|
|
|
|
self.performReloc(reloc);
|
|
|
|
for (liveness_cond_br.else_deaths) |death| try self.processDeath(death);
|
|
try self.genBodyBlock(else_body);
|
|
try self.restoreState(state, &.{}, .{
|
|
.emit_instructions = false,
|
|
.update_tracking = true,
|
|
.resurrect = true,
|
|
.close_scope = true,
|
|
});
|
|
|
|
// We already took care of pl_op.operand earlier, so there's nothing left to do.
|
|
}
|
|
|
|
fn isNull(self: *CodeGen, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
switch (opt_mcv) {
|
|
.register_overflow => |ro| return .{ .eflags = ro.eflags.negate() },
|
|
else => {},
|
|
}
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const pl_ty = opt_ty.optionalChild(zcu);
|
|
|
|
const some_info: struct { off: u31, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu))
|
|
.{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty }
|
|
else
|
|
.{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = .bool };
|
|
|
|
self.eflags_inst = inst;
|
|
switch (opt_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_symbol,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable,
|
|
|
|
.lea_frame => {
|
|
self.eflags_inst = null;
|
|
return .{ .immediate = @intFromBool(false) };
|
|
},
|
|
|
|
.register => |opt_reg| {
|
|
if (some_info.off == 0) {
|
|
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
|
|
const alias_reg = registerAlias(opt_reg, some_abi_size);
|
|
assert(some_abi_size * 8 == alias_reg.bitSize());
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg);
|
|
return .{ .eflags = .z };
|
|
}
|
|
assert(some_info.ty.ip_index == .bool_type);
|
|
const opt_abi_size: u32 = @intCast(opt_ty.abiSize(zcu));
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .bt },
|
|
registerAlias(opt_reg, opt_abi_size),
|
|
.u(@as(u6, @intCast(some_info.off * 8))),
|
|
);
|
|
return .{ .eflags = .nc };
|
|
},
|
|
|
|
.register_pair => |opt_regs| {
|
|
if (some_info.off == 0) {
|
|
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
|
|
const alias_reg = registerAlias(opt_regs[0], some_abi_size);
|
|
assert(some_abi_size * 8 == alias_reg.bitSize());
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg);
|
|
return .{ .eflags = .z };
|
|
}
|
|
assert(some_info.ty.ip_index == .bool_type);
|
|
const opt_abi_size: u32 = @intCast(opt_ty.abiSize(zcu));
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .bt },
|
|
registerAlias(opt_regs[some_info.off / 8], opt_abi_size),
|
|
.u(@as(u6, @truncate(some_info.off * 8))),
|
|
);
|
|
return .{ .eflags = .nc };
|
|
},
|
|
|
|
.memory,
|
|
.load_symbol,
|
|
.load_got,
|
|
.load_direct,
|
|
.load_tlv,
|
|
=> {
|
|
const addr_reg = (try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to64();
|
|
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
|
defer self.register_manager.unlockReg(addr_reg_lock);
|
|
|
|
try self.genSetReg(addr_reg, .usize, opt_mcv.address(), .{});
|
|
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
.{
|
|
.base = .{ .reg = addr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(some_abi_size),
|
|
.disp = some_info.off,
|
|
} },
|
|
},
|
|
.u(0),
|
|
);
|
|
return .{ .eflags = .e };
|
|
},
|
|
|
|
.indirect, .load_frame => {
|
|
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
switch (opt_mcv) {
|
|
.indirect => |reg_off| .{
|
|
.base = .{ .reg = reg_off.reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(some_abi_size),
|
|
.disp = reg_off.off + some_info.off,
|
|
} },
|
|
},
|
|
.load_frame => |frame_addr| .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(some_abi_size),
|
|
.disp = frame_addr.off + some_info.off,
|
|
} },
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.u(0),
|
|
);
|
|
return .{ .eflags = .e };
|
|
},
|
|
}
|
|
}
|
|
|
|
fn isNullPtr(self: *CodeGen, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const opt_ty = ptr_ty.childType(zcu);
|
|
const pl_ty = opt_ty.optionalChild(zcu);
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu))
|
|
.{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty }
|
|
else
|
|
.{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = .bool };
|
|
|
|
const ptr_reg = switch (ptr_mcv) {
|
|
.register => |reg| reg,
|
|
else => try self.copyToTmpRegister(ptr_ty, ptr_mcv),
|
|
};
|
|
const ptr_lock = self.register_manager.lockReg(ptr_reg);
|
|
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
.{
|
|
.base = .{ .reg = ptr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .fromSize(some_abi_size),
|
|
.disp = some_info.off,
|
|
} },
|
|
},
|
|
.u(0),
|
|
);
|
|
|
|
self.eflags_inst = inst;
|
|
return .{ .eflags = .e };
|
|
}
|
|
|
|
fn isErr(self: *CodeGen, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const err_ty = eu_ty.errorUnionSet(zcu);
|
|
if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const err_off: u31 = @intCast(codegen.errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu));
|
|
switch (eu_mcv) {
|
|
.register => |reg| {
|
|
const eu_lock = self.register_manager.lockReg(reg);
|
|
defer if (eu_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const tmp_reg = try self.copyToTmpRegister(eu_ty, eu_mcv);
|
|
if (err_off > 0) {
|
|
try self.genShiftBinOpMir(
|
|
.{ ._r, .sh },
|
|
eu_ty,
|
|
.{ .register = tmp_reg },
|
|
.u8,
|
|
.{ .immediate = @as(u6, @intCast(err_off * 8)) },
|
|
);
|
|
} else {
|
|
try self.truncateRegister(.anyerror, tmp_reg);
|
|
}
|
|
try self.genBinOpMir(.{ ._, .cmp }, .anyerror, .{ .register = tmp_reg }, .{ .immediate = 0 });
|
|
},
|
|
.load_frame => |frame_addr| try self.genBinOpMir(
|
|
.{ ._, .cmp },
|
|
.anyerror,
|
|
.{ .load_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + err_off,
|
|
} },
|
|
.{ .immediate = 0 },
|
|
),
|
|
else => return self.fail("TODO implement isErr for {}", .{eu_mcv}),
|
|
}
|
|
|
|
if (maybe_inst) |inst| self.eflags_inst = inst;
|
|
return MCValue{ .eflags = .a };
|
|
}
|
|
|
|
fn isErrPtr(self: *CodeGen, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const eu_ty = ptr_ty.childType(zcu);
|
|
const err_ty = eu_ty.errorUnionSet(zcu);
|
|
if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const ptr_reg = switch (ptr_mcv) {
|
|
.register => |reg| reg,
|
|
else => try self.copyToTmpRegister(ptr_ty, ptr_mcv),
|
|
};
|
|
const ptr_lock = self.register_manager.lockReg(ptr_reg);
|
|
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const err_off: u31 = @intCast(codegen.errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu));
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .cmp },
|
|
.{
|
|
.base = .{ .reg = ptr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = self.memSize(.anyerror),
|
|
.disp = err_off,
|
|
} },
|
|
},
|
|
.u(0),
|
|
);
|
|
|
|
if (maybe_inst) |inst| self.eflags_inst = inst;
|
|
return MCValue{ .eflags = .a };
|
|
}
|
|
|
|
fn isNonErr(self: *CodeGen, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue {
|
|
const is_err_res = try self.isErr(inst, eu_ty, eu_mcv);
|
|
switch (is_err_res) {
|
|
.eflags => |cc| {
|
|
assert(cc == .a);
|
|
return MCValue{ .eflags = cc.negate() };
|
|
},
|
|
.immediate => |imm| {
|
|
assert(imm == 0);
|
|
return MCValue{ .immediate = @intFromBool(imm == 0) };
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn isNonErrPtr(self: *CodeGen, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
|
|
const is_err_res = try self.isErrPtr(inst, ptr_ty, ptr_mcv);
|
|
switch (is_err_res) {
|
|
.eflags => |cc| {
|
|
assert(cc == .a);
|
|
return MCValue{ .eflags = cc.negate() };
|
|
},
|
|
.immediate => |imm| {
|
|
assert(imm == 0);
|
|
return MCValue{ .immediate = @intFromBool(imm == 0) };
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn airIsNull(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const operand = try self.resolveInst(un_op);
|
|
const ty = self.typeOf(un_op);
|
|
const result = try self.isNull(inst, ty, operand);
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airIsNullPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const operand = try self.resolveInst(un_op);
|
|
const ty = self.typeOf(un_op);
|
|
const result = try self.isNullPtr(inst, ty, operand);
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airIsNonNull(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const operand = try self.resolveInst(un_op);
|
|
const ty = self.typeOf(un_op);
|
|
const result: MCValue = switch (try self.isNull(inst, ty, operand)) {
|
|
.immediate => |imm| .{ .immediate = @intFromBool(imm == 0) },
|
|
.eflags => |cc| .{ .eflags = cc.negate() },
|
|
else => unreachable,
|
|
};
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airIsNonNullPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const operand = try self.resolveInst(un_op);
|
|
const ty = self.typeOf(un_op);
|
|
const result: MCValue = switch (try self.isNullPtr(inst, ty, operand)) {
|
|
.eflags => |cc| .{ .eflags = cc.negate() },
|
|
else => unreachable,
|
|
};
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airIsErr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const operand = try self.resolveInst(un_op);
|
|
const ty = self.typeOf(un_op);
|
|
const result = try self.isErr(inst, ty, operand);
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airIsErrPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const operand = try self.resolveInst(un_op);
|
|
const ty = self.typeOf(un_op);
|
|
const result = try self.isErrPtr(inst, ty, operand);
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airIsNonErr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const operand = try self.resolveInst(un_op);
|
|
const ty = self.typeOf(un_op);
|
|
const result = try self.isNonErr(inst, ty, operand);
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airIsNonErrPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const operand = try self.resolveInst(un_op);
|
|
const ty = self.typeOf(un_op);
|
|
const result = try self.isNonErrPtr(inst, ty, operand);
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airLoop(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
// A loop is a setup to be able to jump back to the beginning.
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const loop = self.air.extraData(Air.Block, ty_pl.payload);
|
|
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
|
|
|
|
self.scope_generation += 1;
|
|
const state = try self.saveState();
|
|
|
|
try self.loops.putNoClobber(self.gpa, inst, .{
|
|
.state = state,
|
|
.target = @intCast(self.mir_instructions.len),
|
|
});
|
|
defer assert(self.loops.remove(inst));
|
|
|
|
try self.genBodyBlock(body);
|
|
}
|
|
|
|
fn lowerBlock(self: *CodeGen, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void {
|
|
// A block is a setup to be able to jump to the end.
|
|
const inst_tracking_i = self.inst_tracking.count();
|
|
self.inst_tracking.putAssumeCapacityNoClobber(inst, .init(.unreach));
|
|
|
|
self.scope_generation += 1;
|
|
try self.blocks.putNoClobber(self.gpa, inst, .{ .state = self.initRetroactiveState() });
|
|
const liveness = self.liveness.getBlock(inst);
|
|
|
|
try self.genBody(body);
|
|
|
|
var block_data = self.blocks.fetchRemove(inst).?;
|
|
defer block_data.value.deinit(self.gpa);
|
|
if (block_data.value.relocs.items.len > 0) {
|
|
try self.restoreState(block_data.value.state, liveness.deaths, .{
|
|
.emit_instructions = false,
|
|
.update_tracking = true,
|
|
.resurrect = true,
|
|
.close_scope = true,
|
|
});
|
|
const block_relocs_last_index = block_data.value.relocs.items.len - 1;
|
|
for (if (block_data.value.relocs.items[block_relocs_last_index] == self.mir_instructions.len - 1) block_relocs: {
|
|
_ = self.mir_instructions.pop();
|
|
break :block_relocs block_data.value.relocs.items[0..block_relocs_last_index];
|
|
} else block_data.value.relocs.items) |block_reloc| self.performReloc(block_reloc);
|
|
}
|
|
|
|
if (std.debug.runtime_safety) assert(self.inst_tracking.getIndex(inst).? == inst_tracking_i);
|
|
const tracking = &self.inst_tracking.values()[inst_tracking_i];
|
|
if (self.liveness.isUnused(inst)) try tracking.die(self, inst);
|
|
self.getValueIfFree(tracking.short, inst);
|
|
}
|
|
|
|
fn lowerSwitchBr(
|
|
self: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
switch_br: Air.UnwrappedSwitch,
|
|
condition: MCValue,
|
|
condition_dies: bool,
|
|
is_loop: bool,
|
|
) !void {
|
|
const zcu = self.pt.zcu;
|
|
const condition_ty = self.typeOf(switch_br.operand);
|
|
|
|
const ExpectedContents = extern struct {
|
|
liveness_deaths: [1 << 8 | 1]Air.Inst.Index,
|
|
bigint_limbs: [std.math.big.int.calcTwosCompLimbCount(1 << 8)]std.math.big.Limb,
|
|
relocs: [1 << 6]Mir.Inst.Index,
|
|
};
|
|
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
|
|
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
|
|
const allocator = stack.get();
|
|
|
|
self.scope_generation += 1;
|
|
const state = try self.saveState();
|
|
|
|
const liveness = try self.liveness.getSwitchBr(allocator, inst, switch_br.cases_len + 1);
|
|
defer allocator.free(liveness.deaths);
|
|
|
|
if (!self.mod.pic and self.target.ofmt == .elf) table: {
|
|
var prong_items: u32 = 0;
|
|
var min: ?Value = null;
|
|
var max: ?Value = null;
|
|
{
|
|
var cases_it = switch_br.iterateCases();
|
|
while (cases_it.next()) |case| {
|
|
prong_items += @intCast(case.items.len + case.ranges.len);
|
|
for (case.items) |item| {
|
|
const val = Value.fromInterned(item.toInterned().?);
|
|
if (min == null or val.compareHetero(.lt, min.?, zcu)) min = val;
|
|
if (max == null or val.compareHetero(.gt, max.?, zcu)) max = val;
|
|
}
|
|
for (case.ranges) |range| {
|
|
const low = Value.fromInterned(range[0].toInterned().?);
|
|
if (min == null or low.compareHetero(.lt, min.?, zcu)) min = low;
|
|
const high = Value.fromInterned(range[1].toInterned().?);
|
|
if (max == null or high.compareHetero(.gt, max.?, zcu)) max = high;
|
|
}
|
|
}
|
|
}
|
|
// This condition also triggers for switches with no non-else prongs and switches on bool.
|
|
if (prong_items < 1 << 2 or prong_items > 1 << 8) break :table;
|
|
|
|
var min_space: Value.BigIntSpace = undefined;
|
|
const min_bigint = min.?.toBigInt(&min_space, zcu);
|
|
var max_space: Value.BigIntSpace = undefined;
|
|
const max_bigint = max.?.toBigInt(&max_space, zcu);
|
|
const limbs = try allocator.alloc(
|
|
std.math.big.Limb,
|
|
@max(min_bigint.limbs.len, max_bigint.limbs.len) + 1,
|
|
);
|
|
defer allocator.free(limbs);
|
|
const table_len = table_len: {
|
|
var table_len_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
table_len_bigint.sub(max_bigint, min_bigint);
|
|
assert(table_len_bigint.positive); // min <= max
|
|
break :table_len @as(u11, table_len_bigint.toConst().to(u10) catch break :table) + 1; // no more than a 1024 entry table
|
|
};
|
|
assert(prong_items <= table_len); // each prong item introduces at least one unique integer to the range
|
|
if (prong_items < table_len >> 2) break :table; // no more than 75% waste
|
|
|
|
const condition_index = if (condition_dies and condition.isModifiable()) condition else condition_index: {
|
|
const condition_index = try self.allocTempRegOrMem(condition_ty, true);
|
|
try self.genCopy(condition_ty, condition_index, condition, .{});
|
|
break :condition_index condition_index;
|
|
};
|
|
try self.spillEflagsIfOccupied();
|
|
if (min.?.orderAgainstZero(zcu).compare(.neq)) try self.genBinOpMir(
|
|
.{ ._, .sub },
|
|
condition_ty,
|
|
condition_index,
|
|
.{ .air_ref = Air.internedToRef(min.?.toIntern()) },
|
|
);
|
|
const else_reloc = if (switch_br.else_body_len > 0) else_reloc: {
|
|
try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition_index, .{ .immediate = table_len - 1 });
|
|
break :else_reloc try self.asmJccReloc(.a, undefined);
|
|
} else undefined;
|
|
const table_start: u31 = @intCast(self.mir_table.items.len);
|
|
{
|
|
const condition_index_reg = if (condition_index.isRegister())
|
|
condition_index.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(.usize, condition_index);
|
|
const condition_index_lock = self.register_manager.lockReg(condition_index_reg);
|
|
defer if (condition_index_lock) |lock| self.register_manager.unlockReg(lock);
|
|
try self.truncateRegister(condition_ty, condition_index_reg);
|
|
const ptr_size = @divExact(self.target.ptrBitWidth(), 8);
|
|
try self.asmMemory(.{ ._mp, .j }, .{
|
|
.base = .table,
|
|
.mod = .{ .rm = .{
|
|
.size = .ptr,
|
|
.index = registerAlias(condition_index_reg, ptr_size),
|
|
.scale = .fromFactor(@intCast(ptr_size)),
|
|
.disp = table_start * ptr_size,
|
|
} },
|
|
});
|
|
}
|
|
const else_reloc_marker: u32 = 0;
|
|
assert(self.mir_instructions.len > else_reloc_marker);
|
|
try self.mir_table.appendNTimes(self.gpa, else_reloc_marker, table_len);
|
|
if (is_loop) try self.loop_switches.putNoClobber(self.gpa, inst, .{
|
|
.start = table_start,
|
|
.len = table_len,
|
|
.min = min.?,
|
|
.else_relocs = if (switch_br.else_body_len > 0) .{ .forward = .empty } else .@"unreachable",
|
|
});
|
|
defer if (is_loop) {
|
|
var loop_switch_data = self.loop_switches.fetchRemove(inst).?.value;
|
|
switch (loop_switch_data.else_relocs) {
|
|
.@"unreachable", .backward => {},
|
|
.forward => |*else_relocs| else_relocs.deinit(self.gpa),
|
|
}
|
|
};
|
|
var cases_it = switch_br.iterateCases();
|
|
while (cases_it.next()) |case| {
|
|
{
|
|
const table = self.mir_table.items[table_start..][0..table_len];
|
|
for (case.items) |item| {
|
|
const val = Value.fromInterned(item.toInterned().?);
|
|
var val_space: Value.BigIntSpace = undefined;
|
|
const val_bigint = val.toBigInt(&val_space, zcu);
|
|
var index_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
index_bigint.sub(val_bigint, min_bigint);
|
|
table[index_bigint.toConst().to(u10) catch unreachable] = @intCast(self.mir_instructions.len);
|
|
}
|
|
for (case.ranges) |range| {
|
|
var low_space: Value.BigIntSpace = undefined;
|
|
const low_bigint = Value.fromInterned(range[0].toInterned().?).toBigInt(&low_space, zcu);
|
|
var high_space: Value.BigIntSpace = undefined;
|
|
const high_bigint = Value.fromInterned(range[1].toInterned().?).toBigInt(&high_space, zcu);
|
|
var index_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
index_bigint.sub(low_bigint, min_bigint);
|
|
const start = index_bigint.toConst().to(u10) catch unreachable;
|
|
index_bigint.sub(high_bigint, min_bigint);
|
|
const end = @as(u11, index_bigint.toConst().to(u10) catch unreachable) + 1;
|
|
@memset(table[start..end], @intCast(self.mir_instructions.len));
|
|
}
|
|
}
|
|
|
|
for (liveness.deaths[case.idx]) |operand| try self.processDeath(operand);
|
|
|
|
try self.genBodyBlock(case.body);
|
|
try self.restoreState(state, &.{}, .{
|
|
.emit_instructions = false,
|
|
.update_tracking = true,
|
|
.resurrect = true,
|
|
.close_scope = true,
|
|
});
|
|
}
|
|
if (switch_br.else_body_len > 0) {
|
|
const else_body = cases_it.elseBody();
|
|
|
|
const else_deaths = liveness.deaths.len - 1;
|
|
for (liveness.deaths[else_deaths]) |operand| try self.processDeath(operand);
|
|
|
|
self.performReloc(else_reloc);
|
|
if (is_loop) {
|
|
const loop_switch_data = self.loop_switches.getPtr(inst).?;
|
|
for (loop_switch_data.else_relocs.forward.items) |reloc| self.performReloc(reloc);
|
|
loop_switch_data.else_relocs.forward.deinit(self.gpa);
|
|
loop_switch_data.else_relocs = .{ .backward = @intCast(self.mir_instructions.len) };
|
|
}
|
|
for (self.mir_table.items[table_start..][0..table_len]) |*entry| if (entry.* == else_reloc_marker) {
|
|
entry.* = @intCast(self.mir_instructions.len);
|
|
};
|
|
|
|
try self.genBodyBlock(else_body);
|
|
try self.restoreState(state, &.{}, .{
|
|
.emit_instructions = false,
|
|
.update_tracking = true,
|
|
.resurrect = true,
|
|
.close_scope = true,
|
|
});
|
|
}
|
|
return;
|
|
}
|
|
|
|
const signedness = if (condition_ty.isAbiInt(zcu)) condition_ty.intInfo(zcu).signedness else .unsigned;
|
|
var cases_it = switch_br.iterateCases();
|
|
while (cases_it.next()) |case| {
|
|
var relocs = try allocator.alloc(Mir.Inst.Index, case.items.len + case.ranges.len);
|
|
defer allocator.free(relocs);
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
for (case.items, relocs[0..case.items.len]) |item, *reloc| {
|
|
const item_mcv = try self.resolveInst(item);
|
|
const cc: Condition = switch (condition) {
|
|
.eflags => |cc| switch (item_mcv.immediate) {
|
|
0 => cc.negate(),
|
|
1 => cc,
|
|
else => unreachable,
|
|
},
|
|
else => cc: {
|
|
try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition, item_mcv);
|
|
break :cc .e;
|
|
},
|
|
};
|
|
reloc.* = try self.asmJccReloc(cc, undefined);
|
|
}
|
|
|
|
for (case.ranges, relocs[case.items.len..]) |range, *reloc| {
|
|
const min_mcv = try self.resolveInst(range[0]);
|
|
const max_mcv = try self.resolveInst(range[1]);
|
|
// `null` means always false.
|
|
const lt_min: ?Condition = switch (condition) {
|
|
.eflags => |cc| switch (min_mcv.immediate) {
|
|
0 => null, // condition never <0
|
|
1 => cc.negate(),
|
|
else => unreachable,
|
|
},
|
|
else => cc: {
|
|
try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition, min_mcv);
|
|
break :cc switch (signedness) {
|
|
.unsigned => .b,
|
|
.signed => .l,
|
|
};
|
|
},
|
|
};
|
|
const lt_min_reloc = if (lt_min) |cc| r: {
|
|
break :r try self.asmJccReloc(cc, undefined);
|
|
} else null;
|
|
// `null` means always true.
|
|
const lte_max: ?Condition = switch (condition) {
|
|
.eflags => |cc| switch (max_mcv.immediate) {
|
|
0 => cc.negate(),
|
|
1 => null, // condition always >=1
|
|
else => unreachable,
|
|
},
|
|
else => cc: {
|
|
try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition, max_mcv);
|
|
break :cc switch (signedness) {
|
|
.unsigned => .be,
|
|
.signed => .le,
|
|
};
|
|
},
|
|
};
|
|
// "Success" case is in `reloc`....
|
|
if (lte_max) |cc| {
|
|
reloc.* = try self.asmJccReloc(cc, undefined);
|
|
} else {
|
|
reloc.* = try self.asmJmpReloc(undefined);
|
|
}
|
|
// ...and "fail" case falls through to next checks.
|
|
if (lt_min_reloc) |r| self.performReloc(r);
|
|
}
|
|
|
|
// The jump to skip this case if the conditions all failed.
|
|
const skip_case_reloc = try self.asmJmpReloc(undefined);
|
|
|
|
for (liveness.deaths[case.idx]) |operand| try self.processDeath(operand);
|
|
|
|
// Relocate all success cases to the body we're about to generate.
|
|
for (relocs) |reloc| self.performReloc(reloc);
|
|
try self.genBodyBlock(case.body);
|
|
try self.restoreState(state, &.{}, .{
|
|
.emit_instructions = false,
|
|
.update_tracking = true,
|
|
.resurrect = true,
|
|
.close_scope = true,
|
|
});
|
|
|
|
// Relocate the "skip" branch to fall through to the next case.
|
|
self.performReloc(skip_case_reloc);
|
|
}
|
|
if (switch_br.else_body_len > 0) {
|
|
const else_body = cases_it.elseBody();
|
|
|
|
const else_deaths = liveness.deaths.len - 1;
|
|
for (liveness.deaths[else_deaths]) |operand| try self.processDeath(operand);
|
|
|
|
try self.genBodyBlock(else_body);
|
|
try self.restoreState(state, &.{}, .{
|
|
.emit_instructions = false,
|
|
.update_tracking = true,
|
|
.resurrect = true,
|
|
.close_scope = true,
|
|
});
|
|
}
|
|
}
|
|
|
|
fn airSwitchBr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const switch_br = self.air.unwrapSwitch(inst);
|
|
const condition = try self.resolveInst(switch_br.operand);
|
|
|
|
// If the condition dies here in this switch instruction, process
|
|
// that death now instead of later as this has an effect on
|
|
// whether it needs to be spilled in the branches
|
|
const condition_dies = self.liveness.operandDies(inst, 0);
|
|
if (condition_dies) {
|
|
if (switch_br.operand.toIndex()) |op_inst| try self.processDeath(op_inst);
|
|
}
|
|
try self.lowerSwitchBr(inst, switch_br, condition, condition_dies, false);
|
|
|
|
// We already took care of pl_op.operand earlier, so there's nothing left to do
|
|
}
|
|
|
|
fn airLoopSwitchBr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const switch_br = self.air.unwrapSwitch(inst);
|
|
const condition = try self.resolveInst(switch_br.operand);
|
|
|
|
const mat_cond = if (condition.isModifiable() and
|
|
self.reuseOperand(inst, switch_br.operand, 0, condition))
|
|
condition
|
|
else mat_cond: {
|
|
const mat_cond = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(self.typeOf(switch_br.operand), mat_cond, condition, .{});
|
|
break :mat_cond mat_cond;
|
|
};
|
|
self.inst_tracking.putAssumeCapacityNoClobber(inst, .init(mat_cond));
|
|
|
|
// If the condition dies here in this switch instruction, process
|
|
// that death now instead of later as this has an effect on
|
|
// whether it needs to be spilled in the branches
|
|
if (self.liveness.operandDies(inst, 0)) {
|
|
if (switch_br.operand.toIndex()) |op_inst| try self.processDeath(op_inst);
|
|
}
|
|
|
|
// Ensure a register is available for dispatch.
|
|
if (!mat_cond.isRegister()) _ = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
|
|
self.scope_generation += 1;
|
|
const state = try self.saveState();
|
|
|
|
try self.loops.putNoClobber(self.gpa, inst, .{
|
|
.state = state,
|
|
.target = @intCast(self.mir_instructions.len),
|
|
});
|
|
defer assert(self.loops.remove(inst));
|
|
|
|
// Stop tracking block result without forgetting tracking info
|
|
try self.freeValue(mat_cond);
|
|
|
|
try self.lowerSwitchBr(inst, switch_br, mat_cond, true, true);
|
|
|
|
try self.processDeath(inst);
|
|
}
|
|
|
|
fn airSwitchDispatch(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
|
|
|
|
const block_ty = self.typeOfIndex(br.block_inst);
|
|
const loop_data = self.loops.getPtr(br.block_inst).?;
|
|
const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
|
|
{
|
|
try self.getValue(block_tracking.short, null);
|
|
const src_mcv = try self.resolveInst(br.operand);
|
|
|
|
if (self.reuseOperandAdvanced(inst, br.operand, 0, src_mcv, br.block_inst)) {
|
|
try self.getValue(block_tracking.short, br.block_inst);
|
|
// .long = .none to avoid merging operand and block result stack frames.
|
|
const current_tracking: InstTracking = .{ .long = .none, .short = src_mcv };
|
|
try current_tracking.materializeUnsafe(self, br.block_inst, block_tracking.*);
|
|
for (current_tracking.getRegs()) |src_reg| self.register_manager.freeReg(src_reg);
|
|
} else {
|
|
try self.getValue(block_tracking.short, br.block_inst);
|
|
try self.genCopy(block_ty, block_tracking.short, try self.resolveInst(br.operand), .{});
|
|
}
|
|
}
|
|
|
|
// Process operand death so that it is properly accounted for in the State below.
|
|
if (self.liveness.operandDies(inst, 0)) {
|
|
if (br.operand.toIndex()) |op_inst| try self.processDeath(op_inst);
|
|
}
|
|
|
|
try self.restoreState(loop_data.state, &.{}, .{
|
|
.emit_instructions = true,
|
|
.update_tracking = false,
|
|
.resurrect = false,
|
|
.close_scope = false,
|
|
});
|
|
|
|
if (self.loop_switches.getPtr(br.block_inst)) |table| {
|
|
const condition_ty = self.typeOf(br.operand);
|
|
const condition_mcv = block_tracking.short;
|
|
try self.spillEflagsIfOccupied();
|
|
if (table.min.orderAgainstZero(self.pt.zcu).compare(.neq)) try self.genBinOpMir(
|
|
.{ ._, .sub },
|
|
condition_ty,
|
|
condition_mcv,
|
|
.{ .air_ref = Air.internedToRef(table.min.toIntern()) },
|
|
);
|
|
switch (table.else_relocs) {
|
|
.@"unreachable" => {},
|
|
.forward => |*else_relocs| {
|
|
try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition_mcv, .{ .immediate = table.len - 1 });
|
|
try else_relocs.append(self.gpa, try self.asmJccReloc(.a, undefined));
|
|
},
|
|
.backward => |else_reloc| {
|
|
try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition_mcv, .{ .immediate = table.len - 1 });
|
|
_ = try self.asmJccReloc(.a, else_reloc);
|
|
},
|
|
}
|
|
{
|
|
const condition_index_reg = if (condition_mcv.isRegister()) condition_mcv.getReg().? else cond: {
|
|
const condition_index_reg =
|
|
RegisterManager.regAtTrackedIndex(@intCast(loop_data.state.free_registers.findFirstSet().?));
|
|
try self.genSetReg(condition_index_reg, condition_ty, condition_mcv, .{});
|
|
break :cond condition_index_reg;
|
|
};
|
|
const condition_index_lock = self.register_manager.lockReg(condition_index_reg);
|
|
defer if (condition_index_lock) |lock| self.register_manager.unlockReg(lock);
|
|
try self.truncateRegister(condition_ty, condition_index_reg);
|
|
const ptr_size = @divExact(self.target.ptrBitWidth(), 8);
|
|
try self.asmMemory(.{ ._mp, .j }, .{
|
|
.base = .table,
|
|
.mod = .{ .rm = .{
|
|
.size = .ptr,
|
|
.index = registerAlias(condition_index_reg, ptr_size),
|
|
.scale = .fromFactor(@intCast(ptr_size)),
|
|
.disp = @intCast(table.start * ptr_size),
|
|
} },
|
|
});
|
|
}
|
|
|
|
return self.finishAir(inst, .none, .{ br.operand, .none, .none });
|
|
}
|
|
|
|
// Emit a jump with a relocation. It will be patched up after the block ends.
|
|
// Leave the jump offset undefined
|
|
_ = try self.asmJmpReloc(loop_data.target);
|
|
|
|
// Stop tracking block result without forgetting tracking info
|
|
try self.freeValue(block_tracking.short);
|
|
}
|
|
|
|
fn performReloc(self: *CodeGen, reloc: Mir.Inst.Index) void {
|
|
const next_inst: u32 = @intCast(self.mir_instructions.len);
|
|
switch (self.mir_instructions.items(.tag)[reloc]) {
|
|
.j => {},
|
|
.pseudo => switch (self.mir_instructions.items(.ops)[reloc]) {
|
|
.pseudo_j_z_and_np_inst, .pseudo_j_nz_or_p_inst => {},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}
|
|
self.mir_instructions.items(.data)[reloc].inst.inst = next_inst;
|
|
}
|
|
|
|
fn airBr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const zcu = self.pt.zcu;
|
|
const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
|
|
|
|
const block_ty = self.typeOfIndex(br.block_inst);
|
|
const block_unused =
|
|
!block_ty.hasRuntimeBitsIgnoreComptime(zcu) or self.liveness.isUnused(br.block_inst);
|
|
const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
|
|
const block_data = self.blocks.getPtr(br.block_inst).?;
|
|
const first_br = block_data.relocs.items.len == 0;
|
|
const block_result = result: {
|
|
if (block_unused) break :result .none;
|
|
|
|
if (!first_br) try self.getValue(block_tracking.short, null);
|
|
const src_mcv = try self.resolveInst(br.operand);
|
|
|
|
if (self.reuseOperandAdvanced(inst, br.operand, 0, src_mcv, br.block_inst)) {
|
|
if (first_br) break :result src_mcv;
|
|
|
|
try self.getValue(block_tracking.short, br.block_inst);
|
|
// .long = .none to avoid merging operand and block result stack frames.
|
|
const current_tracking: InstTracking = .{ .long = .none, .short = src_mcv };
|
|
try current_tracking.materializeUnsafe(self, br.block_inst, block_tracking.*);
|
|
for (current_tracking.getRegs()) |src_reg| self.register_manager.freeReg(src_reg);
|
|
break :result block_tracking.short;
|
|
}
|
|
|
|
const dst_mcv = if (first_br) try self.allocRegOrMem(br.block_inst, true) else dst: {
|
|
try self.getValue(block_tracking.short, br.block_inst);
|
|
break :dst block_tracking.short;
|
|
};
|
|
try self.genCopy(block_ty, dst_mcv, try self.resolveInst(br.operand), .{});
|
|
break :result dst_mcv;
|
|
};
|
|
|
|
// Process operand death so that it is properly accounted for in the State below.
|
|
if (self.liveness.operandDies(inst, 0)) {
|
|
if (br.operand.toIndex()) |op_inst| try self.processDeath(op_inst);
|
|
}
|
|
|
|
if (first_br) {
|
|
block_tracking.* = .init(block_result);
|
|
try self.saveRetroactiveState(&block_data.state);
|
|
} else try self.restoreState(block_data.state, &.{}, .{
|
|
.emit_instructions = true,
|
|
.update_tracking = false,
|
|
.resurrect = false,
|
|
.close_scope = false,
|
|
});
|
|
|
|
// Emit a jump with a relocation. It will be patched up after the block ends.
|
|
// Leave the jump offset undefined
|
|
const jmp_reloc = try self.asmJmpReloc(undefined);
|
|
try block_data.relocs.append(self.gpa, jmp_reloc);
|
|
|
|
// Stop tracking block result without forgetting tracking info
|
|
try self.freeValue(block_tracking.short);
|
|
}
|
|
|
|
fn airRepeat(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const loop_inst = self.air.instructions.items(.data)[@intFromEnum(inst)].repeat.loop_inst;
|
|
const repeat_info = self.loops.get(loop_inst).?;
|
|
try self.restoreState(repeat_info.state, &.{}, .{
|
|
.emit_instructions = true,
|
|
.update_tracking = false,
|
|
.resurrect = false,
|
|
.close_scope = true,
|
|
});
|
|
_ = try self.asmJmpReloc(repeat_info.target);
|
|
}
|
|
|
|
fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
|
|
const clobbers_len: u31 = @truncate(extra.data.flags);
|
|
var extra_i: usize = extra.end;
|
|
const outputs: []const Air.Inst.Ref =
|
|
@ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]);
|
|
extra_i += outputs.len;
|
|
const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]);
|
|
extra_i += inputs.len;
|
|
|
|
var result: MCValue = .none;
|
|
var args: std.ArrayList(MCValue) = .init(self.gpa);
|
|
try args.ensureTotalCapacity(outputs.len + inputs.len);
|
|
defer {
|
|
for (args.items) |arg| if (arg.getReg()) |reg| self.register_manager.unlockReg(.{
|
|
.tracked_index = RegisterManager.indexOfRegIntoTracked(reg) orelse continue,
|
|
});
|
|
args.deinit();
|
|
}
|
|
var arg_map: std.StringHashMap(u8) = .init(self.gpa);
|
|
try arg_map.ensureTotalCapacity(@intCast(outputs.len + inputs.len));
|
|
defer arg_map.deinit();
|
|
|
|
var outputs_extra_i = extra_i;
|
|
for (outputs) |output| {
|
|
const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
|
|
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
|
|
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
|
|
// This equation accounts for the fact that even if we have exactly 4 bytes
|
|
// for the string, we still use the next u32 for the null terminator.
|
|
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
|
|
|
|
const maybe_inst = switch (output) {
|
|
.none => inst,
|
|
else => null,
|
|
};
|
|
const ty = switch (output) {
|
|
.none => self.typeOfIndex(inst),
|
|
else => self.typeOf(output).childType(zcu),
|
|
};
|
|
const is_read = switch (constraint[0]) {
|
|
'=' => false,
|
|
'+' => read: {
|
|
if (output == .none) return self.fail(
|
|
"read-write constraint unsupported for asm result: '{s}'",
|
|
.{constraint},
|
|
);
|
|
break :read true;
|
|
},
|
|
else => return self.fail("invalid constraint: '{s}'", .{constraint}),
|
|
};
|
|
const is_early_clobber = constraint[1] == '&';
|
|
const rest = constraint[@as(usize, 1) + @intFromBool(is_early_clobber) ..];
|
|
const arg_mcv: MCValue = arg_mcv: {
|
|
const arg_maybe_reg: ?Register = if (std.mem.eql(u8, rest, "r") or
|
|
std.mem.eql(u8, rest, "f") or std.mem.eql(u8, rest, "x"))
|
|
registerAlias(
|
|
self.register_manager.tryAllocReg(maybe_inst, switch (rest[0]) {
|
|
'r' => abi.RegisterClass.gp,
|
|
'f' => abi.RegisterClass.x87,
|
|
'x' => abi.RegisterClass.sse,
|
|
else => unreachable,
|
|
}) orelse return self.fail("ran out of registers lowering inline asm", .{}),
|
|
@intCast(ty.abiSize(zcu)),
|
|
)
|
|
else if (std.mem.eql(u8, rest, "m"))
|
|
if (output != .none) null else return self.fail(
|
|
"memory constraint unsupported for asm result: '{s}'",
|
|
.{constraint},
|
|
)
|
|
else if (std.mem.eql(u8, rest, "g") or
|
|
std.mem.eql(u8, rest, "rm") or std.mem.eql(u8, rest, "mr") or
|
|
std.mem.eql(u8, rest, "r,m") or std.mem.eql(u8, rest, "m,r"))
|
|
self.register_manager.tryAllocReg(maybe_inst, abi.RegisterClass.gp) orelse
|
|
if (output != .none)
|
|
null
|
|
else
|
|
return self.fail("ran out of registers lowering inline asm", .{})
|
|
else if (std.mem.startsWith(u8, rest, "{") and std.mem.endsWith(u8, rest, "}"))
|
|
parseRegName(rest["{".len .. rest.len - "}".len]) orelse
|
|
return self.fail("invalid register constraint: '{s}'", .{constraint})
|
|
else if (rest.len == 1 and std.ascii.isDigit(rest[0])) {
|
|
const index = std.fmt.charToDigit(rest[0], 10) catch unreachable;
|
|
if (index >= args.items.len) return self.fail("constraint out of bounds: '{s}'", .{
|
|
constraint,
|
|
});
|
|
break :arg_mcv args.items[index];
|
|
} else return self.fail("invalid constraint: '{s}'", .{constraint});
|
|
break :arg_mcv if (arg_maybe_reg) |reg| .{ .register = reg } else arg: {
|
|
const ptr_mcv = try self.resolveInst(output);
|
|
switch (ptr_mcv) {
|
|
.immediate => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |_|
|
|
break :arg ptr_mcv.deref(),
|
|
.register, .register_offset, .lea_frame => break :arg ptr_mcv.deref(),
|
|
else => {},
|
|
}
|
|
break :arg .{ .indirect = .{ .reg = try self.copyToTmpRegister(.usize, ptr_mcv) } };
|
|
};
|
|
};
|
|
if (arg_mcv.getReg()) |reg| if (RegisterManager.indexOfRegIntoTracked(reg)) |tracked_index| {
|
|
try self.register_manager.getRegIndex(tracked_index, if (output == .none) inst else null);
|
|
_ = self.register_manager.lockRegIndexAssumeUnused(tracked_index);
|
|
};
|
|
if (!std.mem.eql(u8, name, "_"))
|
|
arg_map.putAssumeCapacityNoClobber(name, @intCast(args.items.len));
|
|
args.appendAssumeCapacity(arg_mcv);
|
|
if (output == .none) result = arg_mcv;
|
|
if (is_read) try self.load(arg_mcv, self.typeOf(output), .{ .air_ref = output });
|
|
}
|
|
|
|
for (inputs) |input| {
|
|
const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
|
|
const constraint = std.mem.sliceTo(input_bytes, 0);
|
|
const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
|
|
// This equation accounts for the fact that even if we have exactly 4 bytes
|
|
// for the string, we still use the next u32 for the null terminator.
|
|
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
|
|
|
|
const ty = self.typeOf(input);
|
|
const input_mcv = try self.resolveInst(input);
|
|
const arg_mcv: MCValue = if (std.mem.eql(u8, constraint, "r") or
|
|
std.mem.eql(u8, constraint, "f") or std.mem.eql(u8, constraint, "x"))
|
|
arg: {
|
|
const rc = switch (constraint[0]) {
|
|
'r' => abi.RegisterClass.gp,
|
|
'f' => abi.RegisterClass.x87,
|
|
'x' => abi.RegisterClass.sse,
|
|
else => unreachable,
|
|
};
|
|
if (input_mcv.isRegister() and
|
|
rc.isSet(RegisterManager.indexOfRegIntoTracked(input_mcv.getReg().?).?))
|
|
break :arg input_mcv;
|
|
const reg = try self.register_manager.allocReg(null, rc);
|
|
try self.genSetReg(reg, ty, input_mcv, .{});
|
|
break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(zcu))) };
|
|
} else if (std.mem.eql(u8, constraint, "i") or std.mem.eql(u8, constraint, "n"))
|
|
switch (input_mcv) {
|
|
.immediate => |imm| .{ .immediate = imm },
|
|
else => return self.fail("immediate operand requires comptime value: '{s}'", .{
|
|
constraint,
|
|
}),
|
|
}
|
|
else if (std.mem.eql(u8, constraint, "m")) arg: {
|
|
switch (input_mcv) {
|
|
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |_|
|
|
break :arg input_mcv,
|
|
.indirect, .load_frame => break :arg input_mcv,
|
|
.load_symbol, .load_direct, .load_got, .load_tlv => {},
|
|
else => {
|
|
const temp_mcv = try self.allocTempRegOrMem(ty, false);
|
|
try self.genCopy(ty, temp_mcv, input_mcv, .{});
|
|
break :arg temp_mcv;
|
|
},
|
|
}
|
|
const addr_reg = self.register_manager.tryAllocReg(null, abi.RegisterClass.gp) orelse {
|
|
const temp_mcv = try self.allocTempRegOrMem(ty, false);
|
|
try self.genCopy(ty, temp_mcv, input_mcv, .{});
|
|
break :arg temp_mcv;
|
|
};
|
|
try self.genSetReg(addr_reg, .usize, input_mcv.address(), .{});
|
|
break :arg .{ .indirect = .{ .reg = addr_reg } };
|
|
} else if (std.mem.eql(u8, constraint, "g") or
|
|
std.mem.eql(u8, constraint, "rm") or std.mem.eql(u8, constraint, "mr") or
|
|
std.mem.eql(u8, constraint, "r,m") or std.mem.eql(u8, constraint, "m,r"))
|
|
arg: {
|
|
switch (input_mcv) {
|
|
.register, .indirect, .load_frame => break :arg input_mcv,
|
|
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |_|
|
|
break :arg input_mcv,
|
|
else => {},
|
|
}
|
|
const temp_mcv = try self.allocTempRegOrMem(ty, true);
|
|
try self.genCopy(ty, temp_mcv, input_mcv, .{});
|
|
break :arg temp_mcv;
|
|
} else if (std.mem.eql(u8, constraint, "X"))
|
|
input_mcv
|
|
else if (std.mem.startsWith(u8, constraint, "{") and std.mem.endsWith(u8, constraint, "}")) arg: {
|
|
const reg = parseRegName(constraint["{".len .. constraint.len - "}".len]) orelse
|
|
return self.fail("invalid register constraint: '{s}'", .{constraint});
|
|
try self.register_manager.getReg(reg, null);
|
|
try self.genSetReg(reg, ty, input_mcv, .{});
|
|
break :arg .{ .register = reg };
|
|
} else if (constraint.len == 1 and std.ascii.isDigit(constraint[0])) arg: {
|
|
const index = std.fmt.charToDigit(constraint[0], 10) catch unreachable;
|
|
if (index >= args.items.len) return self.fail("constraint out of bounds: '{s}'", .{constraint});
|
|
try self.genCopy(ty, args.items[index], input_mcv, .{});
|
|
break :arg args.items[index];
|
|
} else return self.fail("invalid constraint: '{s}'", .{constraint});
|
|
if (arg_mcv.getReg()) |reg| if (RegisterManager.indexOfRegIntoTracked(reg)) |_| {
|
|
_ = self.register_manager.lockReg(reg);
|
|
};
|
|
if (!std.mem.eql(u8, name, "_"))
|
|
arg_map.putAssumeCapacityNoClobber(name, @intCast(args.items.len));
|
|
args.appendAssumeCapacity(arg_mcv);
|
|
}
|
|
|
|
{
|
|
var clobber_i: u32 = 0;
|
|
while (clobber_i < clobbers_len) : (clobber_i += 1) {
|
|
const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
|
|
// This equation accounts for the fact that even if we have exactly 4 bytes
|
|
// for the string, we still use the next u32 for the null terminator.
|
|
extra_i += clobber.len / 4 + 1;
|
|
|
|
if (std.mem.eql(u8, clobber, "") or std.mem.eql(u8, clobber, "memory")) {
|
|
// ok, sure
|
|
} else if (std.mem.eql(u8, clobber, "cc") or
|
|
std.mem.eql(u8, clobber, "flags") or
|
|
std.mem.eql(u8, clobber, "eflags") or
|
|
std.mem.eql(u8, clobber, "rflags"))
|
|
{
|
|
try self.spillEflagsIfOccupied();
|
|
} else {
|
|
try self.register_manager.getReg(parseRegName(clobber) orelse
|
|
return self.fail("invalid clobber: '{s}'", .{clobber}), null);
|
|
}
|
|
}
|
|
}
|
|
|
|
const Label = struct {
|
|
target: Mir.Inst.Index = undefined,
|
|
pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
|
|
|
|
const Kind = enum { definition, reference };
|
|
|
|
fn isValid(kind: Kind, name: []const u8) bool {
|
|
for (name, 0..) |c, i| switch (c) {
|
|
else => return false,
|
|
'$' => if (i == 0) return false,
|
|
'.' => {},
|
|
'0'...'9' => if (i == 0) switch (kind) {
|
|
.definition => if (name.len != 1) return false,
|
|
.reference => {
|
|
if (name.len != 2) return false;
|
|
switch (name[1]) {
|
|
else => return false,
|
|
'B', 'F', 'b', 'f' => {},
|
|
}
|
|
},
|
|
},
|
|
'@', 'A'...'Z', '_', 'a'...'z' => {},
|
|
};
|
|
return name.len > 0;
|
|
}
|
|
};
|
|
var labels: std.StringHashMapUnmanaged(Label) = .empty;
|
|
defer {
|
|
var label_it = labels.valueIterator();
|
|
while (label_it.next()) |label| label.pending_relocs.deinit(self.gpa);
|
|
labels.deinit(self.gpa);
|
|
}
|
|
|
|
const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
|
|
var line_it = std.mem.tokenizeAny(u8, asm_source, "\n\r;");
|
|
next_line: while (line_it.next()) |line| {
|
|
var mnem_it = std.mem.tokenizeAny(u8, line, " \t");
|
|
var prefix: encoder.Instruction.Prefix = .none;
|
|
const mnem_str = while (mnem_it.next()) |mnem_str| {
|
|
if (mnem_str[0] == '#') continue :next_line;
|
|
if (std.mem.startsWith(u8, mnem_str, "//")) continue :next_line;
|
|
if (std.meta.stringToEnum(encoder.Instruction.Prefix, mnem_str)) |pre| {
|
|
if (prefix != .none) return self.fail("extra prefix: '{s}'", .{mnem_str});
|
|
prefix = pre;
|
|
continue;
|
|
}
|
|
if (!std.mem.endsWith(u8, mnem_str, ":")) break mnem_str;
|
|
const label_name = mnem_str[0 .. mnem_str.len - ":".len];
|
|
if (!Label.isValid(.definition, label_name))
|
|
return self.fail("invalid label: '{s}'", .{label_name});
|
|
const label_gop = try labels.getOrPut(self.gpa, label_name);
|
|
if (!label_gop.found_existing) label_gop.value_ptr.* = .{} else {
|
|
const anon = std.ascii.isDigit(label_name[0]);
|
|
if (!anon and label_gop.value_ptr.pending_relocs.items.len == 0)
|
|
return self.fail("redefined label: '{s}'", .{label_name});
|
|
for (label_gop.value_ptr.pending_relocs.items) |pending_reloc|
|
|
self.performReloc(pending_reloc);
|
|
if (anon)
|
|
label_gop.value_ptr.pending_relocs.clearRetainingCapacity()
|
|
else
|
|
label_gop.value_ptr.pending_relocs.clearAndFree(self.gpa);
|
|
}
|
|
label_gop.value_ptr.target = @intCast(self.mir_instructions.len);
|
|
} else continue;
|
|
if (mnem_str[0] == '.') {
|
|
if (prefix != .none) return self.fail("prefixed directive: '{s} {s}'", .{ @tagName(prefix), mnem_str });
|
|
prefix = .directive;
|
|
}
|
|
|
|
var mnem_size: struct {
|
|
used: bool,
|
|
size: ?Memory.Size,
|
|
fn use(size: *@This()) ?Memory.Size {
|
|
size.used = true;
|
|
return size.size;
|
|
}
|
|
} = .{
|
|
.used = false,
|
|
.size = if (prefix == .directive)
|
|
null
|
|
else if (std.mem.endsWith(u8, mnem_str, "b"))
|
|
.byte
|
|
else if (std.mem.endsWith(u8, mnem_str, "w"))
|
|
.word
|
|
else if (std.mem.endsWith(u8, mnem_str, "l"))
|
|
.dword
|
|
else if (std.mem.endsWith(u8, mnem_str, "q") and
|
|
(std.mem.indexOfScalar(u8, "vp", mnem_str[0]) == null or !std.mem.endsWith(u8, mnem_str, "dq")))
|
|
.qword
|
|
else if (std.mem.endsWith(u8, mnem_str, "t"))
|
|
.tbyte
|
|
else
|
|
null,
|
|
};
|
|
var mnem_tag = while (true) break std.meta.stringToEnum(
|
|
encoder.Instruction.Mnemonic,
|
|
mnem_str[0 .. mnem_str.len - @intFromBool(mnem_size.size != null)],
|
|
) orelse if (mnem_size.size) |_| {
|
|
mnem_size.size = null;
|
|
continue;
|
|
} else return self.fail("invalid mnemonic: '{s}'", .{mnem_str});
|
|
if (@as(?Memory.Size, switch (mnem_tag) {
|
|
.clflush => .byte,
|
|
.fldcw, .fnstcw, .fstcw, .fnstsw, .fstsw => .word,
|
|
.fldenv, .fnstenv, .fstenv => .none,
|
|
.frstor, .fsave, .fnsave, .fxrstor, .fxrstor64, .fxsave, .fxsave64 => .none,
|
|
.invlpg => .none,
|
|
.invpcid => .xword,
|
|
.ldmxcsr, .stmxcsr, .vldmxcsr, .vstmxcsr => .dword,
|
|
else => null,
|
|
})) |fixed_mnem_size| {
|
|
if (mnem_size.size) |size| if (size != fixed_mnem_size)
|
|
return self.fail("invalid size: '{s}'", .{mnem_str});
|
|
mnem_size.size = fixed_mnem_size;
|
|
}
|
|
|
|
var ops: [4]Operand = @splat(.none);
|
|
var ops_len: usize = 0;
|
|
|
|
var last_op = false;
|
|
var op_it = std.mem.splitScalar(u8, mnem_it.rest(), ',');
|
|
next_op: for (&ops) |*op| {
|
|
const op_str = while (!last_op) {
|
|
const full_str = op_it.next() orelse break :next_op;
|
|
const code_str = if (std.mem.indexOfScalar(u8, full_str, '#') orelse
|
|
std.mem.indexOf(u8, full_str, "//")) |comment|
|
|
code: {
|
|
last_op = true;
|
|
break :code full_str[0..comment];
|
|
} else full_str;
|
|
const trim_str = std.mem.trim(u8, code_str, " \t*");
|
|
if (trim_str.len > 0) break trim_str;
|
|
} else break;
|
|
if (std.mem.startsWith(u8, op_str, "%%")) {
|
|
const colon = std.mem.indexOfScalarPos(u8, op_str, "%%".len + 2, ':');
|
|
const reg = parseRegName(op_str["%%".len .. colon orelse op_str.len]) orelse
|
|
return self.fail("invalid register: '{s}'", .{op_str});
|
|
if (colon) |colon_pos| {
|
|
const disp = std.fmt.parseInt(i32, op_str[colon_pos + ":".len ..], 0) catch
|
|
return self.fail("invalid displacement: '{s}'", .{op_str});
|
|
op.* = .{ .mem = .{
|
|
.base = .{ .reg = reg },
|
|
.mod = .{ .rm = .{
|
|
.size = mnem_size.use() orelse
|
|
return self.fail("unknown size: '{s}'", .{op_str}),
|
|
.disp = disp,
|
|
} },
|
|
} };
|
|
} else {
|
|
if (mnem_size.use()) |size| if (reg.bitSize() != size.bitSize(self.target))
|
|
return self.fail("invalid register size: '{s}'", .{op_str});
|
|
op.* = .{ .reg = reg };
|
|
}
|
|
} else if (std.mem.startsWith(u8, op_str, "%[") and std.mem.endsWith(u8, op_str, "]")) {
|
|
const colon = std.mem.indexOfScalarPos(u8, op_str, "%[".len, ':');
|
|
const modifier = if (colon) |colon_pos|
|
|
op_str[colon_pos + ":".len .. op_str.len - "]".len]
|
|
else
|
|
"";
|
|
op.* = switch (args.items[
|
|
arg_map.get(op_str["%[".len .. colon orelse op_str.len - "]".len]) orelse
|
|
return self.fail("no matching constraint: '{s}'", .{op_str})
|
|
]) {
|
|
.immediate => |imm| if (std.mem.eql(u8, modifier, "") or std.mem.eql(u8, modifier, "c"))
|
|
.{ .imm = .u(imm) }
|
|
else
|
|
return self.fail("invalid modifier: '{s}'", .{modifier}),
|
|
.register => |reg| if (std.mem.eql(u8, modifier, ""))
|
|
.{ .reg = if (mnem_size.use()) |size|
|
|
registerAlias(reg, @intCast(@divExact(size.bitSize(self.target), 8)))
|
|
else
|
|
reg }
|
|
else
|
|
return self.fail("invalid modifier: '{s}'", .{modifier}),
|
|
.memory => |addr| if (std.mem.eql(u8, modifier, "") or std.mem.eql(u8, modifier, "P"))
|
|
.{ .mem = .{
|
|
.base = .{ .reg = .ds },
|
|
.mod = .{ .rm = .{
|
|
.size = mnem_size.use() orelse
|
|
return self.fail("unknown size: '{s}'", .{op_str}),
|
|
.disp = @intCast(@as(i64, @bitCast(addr))),
|
|
} },
|
|
} }
|
|
else
|
|
return self.fail("invalid modifier: '{s}'", .{modifier}),
|
|
.indirect => |reg_off| if (std.mem.eql(u8, modifier, ""))
|
|
.{ .mem = .{
|
|
.base = .{ .reg = reg_off.reg },
|
|
.mod = .{ .rm = .{
|
|
.size = mnem_size.use() orelse
|
|
return self.fail("unknown size: '{s}'", .{op_str}),
|
|
.disp = reg_off.off,
|
|
} },
|
|
} }
|
|
else
|
|
return self.fail("invalid modifier: '{s}'", .{modifier}),
|
|
.load_frame => |frame_addr| if (std.mem.eql(u8, modifier, ""))
|
|
.{ .mem = .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = mnem_size.use() orelse
|
|
return self.fail("unknown size: '{s}'", .{op_str}),
|
|
.disp = frame_addr.off,
|
|
} },
|
|
} }
|
|
else
|
|
return self.fail("invalid modifier: '{s}'", .{modifier}),
|
|
.lea_got => |sym_index| if (std.mem.eql(u8, modifier, "P"))
|
|
.{ .reg = try self.copyToTmpRegister(.usize, .{ .lea_got = sym_index }) }
|
|
else
|
|
return self.fail("invalid modifier: '{s}'", .{modifier}),
|
|
.lea_symbol => |sym_off| if (std.mem.eql(u8, modifier, "P"))
|
|
.{ .reg = try self.copyToTmpRegister(.usize, .{ .lea_symbol = sym_off }) }
|
|
else
|
|
return self.fail("invalid modifier: '{s}'", .{modifier}),
|
|
else => return self.fail("invalid constraint: '{s}'", .{op_str}),
|
|
};
|
|
} else if (std.mem.startsWith(u8, op_str, "$")) {
|
|
op.* = if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u|
|
|
.{ .imm = .u(u) }
|
|
else |_| if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s|
|
|
.{ .imm = .s(s) }
|
|
else |_|
|
|
return self.fail("invalid immediate: '{s}'", .{op_str});
|
|
} else if (std.mem.endsWith(u8, op_str, ")")) {
|
|
const open = std.mem.indexOfScalar(u8, op_str, '(') orelse
|
|
return self.fail("invalid operand: '{s}'", .{op_str});
|
|
var sib_it = std.mem.splitScalar(u8, op_str[open + "(".len .. op_str.len - ")".len], ',');
|
|
const base_str = sib_it.next() orelse
|
|
return self.fail("invalid memory operand: '{s}'", .{op_str});
|
|
if (base_str.len > 0 and !std.mem.startsWith(u8, base_str, "%%"))
|
|
return self.fail("invalid memory operand: '{s}'", .{op_str});
|
|
const index_str = sib_it.next() orelse "";
|
|
if (index_str.len > 0 and !std.mem.startsWith(u8, base_str, "%%"))
|
|
return self.fail("invalid memory operand: '{s}'", .{op_str});
|
|
const scale_str = sib_it.next() orelse "";
|
|
if (index_str.len == 0 and scale_str.len > 0)
|
|
return self.fail("invalid memory operand: '{s}'", .{op_str});
|
|
const scale: Memory.Scale = if (scale_str.len > 0)
|
|
switch (std.fmt.parseInt(u4, scale_str, 10) catch
|
|
return self.fail("invalid scale: '{s}'", .{op_str})) {
|
|
1 => .@"1",
|
|
2 => .@"2",
|
|
4 => .@"4",
|
|
8 => .@"8",
|
|
else => return self.fail("invalid scale: '{s}'", .{op_str}),
|
|
}
|
|
else
|
|
.@"1";
|
|
if (sib_it.next()) |_| return self.fail("invalid memory operand: '{s}'", .{op_str});
|
|
op.* = if (std.mem.eql(u8, base_str, "%%dx") and index_str.len == 0) .{ .reg = .dx } else .{ .mem = .{
|
|
.base = if (base_str.len > 0)
|
|
.{ .reg = parseRegName(base_str["%%".len..]) orelse
|
|
return self.fail("invalid base register: '{s}'", .{base_str}) }
|
|
else
|
|
.none,
|
|
.mod = .{ .rm = .{
|
|
.size = mnem_size.use() orelse return self.fail("unknown size: '{s}'", .{op_str}),
|
|
.index = if (index_str.len > 0)
|
|
parseRegName(index_str["%%".len..]) orelse
|
|
return self.fail("invalid index register: '{s}'", .{op_str})
|
|
else
|
|
.none,
|
|
.scale = scale,
|
|
.disp = if (std.mem.startsWith(u8, op_str[0..open], "%[") and
|
|
std.mem.endsWith(u8, op_str[0..open], "]"))
|
|
disp: {
|
|
const colon = std.mem.indexOfScalarPos(u8, op_str[0..open], "%[".len, ':');
|
|
const modifier = if (colon) |colon_pos|
|
|
op_str[colon_pos + ":".len .. open - "]".len]
|
|
else
|
|
"";
|
|
break :disp switch (args.items[
|
|
arg_map.get(op_str["%[".len .. colon orelse open - "]".len]) orelse
|
|
return self.fail("no matching constraint: '{s}'", .{op_str})
|
|
]) {
|
|
.immediate => |imm| if (std.mem.eql(u8, modifier, "") or
|
|
std.mem.eql(u8, modifier, "c"))
|
|
std.math.cast(i32, @as(i64, @bitCast(imm))) orelse
|
|
return self.fail("invalid displacement: '{s}'", .{op_str})
|
|
else
|
|
return self.fail("invalid modifier: '{s}'", .{modifier}),
|
|
else => return self.fail("invalid constraint: '{s}'", .{op_str}),
|
|
};
|
|
} else if (open > 0)
|
|
std.fmt.parseInt(i32, op_str[0..open], 0) catch
|
|
return self.fail("invalid displacement: '{s}'", .{op_str})
|
|
else
|
|
0,
|
|
} },
|
|
} };
|
|
} else if (Label.isValid(.reference, op_str)) {
|
|
const anon = std.ascii.isDigit(op_str[0]);
|
|
const label_gop = try labels.getOrPut(self.gpa, op_str[0..if (anon) 1 else op_str.len]);
|
|
if (!label_gop.found_existing) label_gop.value_ptr.* = .{};
|
|
if (anon and (op_str[1] == 'b' or op_str[1] == 'B') and !label_gop.found_existing)
|
|
return self.fail("undefined label: '{s}'", .{op_str});
|
|
const pending_relocs = &label_gop.value_ptr.pending_relocs;
|
|
if (if (anon)
|
|
op_str[1] == 'f' or op_str[1] == 'F'
|
|
else
|
|
!label_gop.found_existing or pending_relocs.items.len > 0)
|
|
try pending_relocs.append(self.gpa, @intCast(self.mir_instructions.len));
|
|
op.* = .{ .inst = label_gop.value_ptr.target };
|
|
} else return self.fail("invalid operand: '{s}'", .{op_str});
|
|
ops_len += 1;
|
|
} else if (op_it.next()) |op_str| return self.fail("extra operand: '{s}'", .{op_str});
|
|
|
|
// convert from att syntax to intel syntax
|
|
std.mem.reverse(Operand, ops[0..ops_len]);
|
|
if (!mnem_size.used) if (mnem_size.size) |size| {
|
|
comptime var max_mnem_len: usize = 0;
|
|
inline for (@typeInfo(encoder.Instruction.Mnemonic).@"enum".fields) |mnem|
|
|
max_mnem_len = @max(mnem.name.len, max_mnem_len);
|
|
var intel_mnem_buf: [max_mnem_len + 1]u8 = undefined;
|
|
const intel_mnem_str = std.fmt.bufPrint(&intel_mnem_buf, "{s}{c}", .{
|
|
@tagName(mnem_tag),
|
|
@as(u8, switch (size) {
|
|
.byte => 'b',
|
|
.word => 'w',
|
|
.dword => 'd',
|
|
.qword => 'q',
|
|
.tbyte => 't',
|
|
else => unreachable,
|
|
}),
|
|
}) catch unreachable;
|
|
if (std.meta.stringToEnum(encoder.Instruction.Mnemonic, intel_mnem_str)) |intel_mnem_tag| mnem_tag = intel_mnem_tag;
|
|
};
|
|
const mnem_name = @tagName(mnem_tag);
|
|
const mnem_fixed_tag: Mir.Inst.FixedTag = if (prefix == .directive)
|
|
.{ ._, .pseudo }
|
|
else for (std.enums.values(Mir.Inst.Fixes)) |fixes| {
|
|
const fixes_name = @tagName(fixes);
|
|
const space_i = std.mem.indexOfScalar(u8, fixes_name, ' ');
|
|
const fixes_prefix = if (space_i) |i|
|
|
std.meta.stringToEnum(encoder.Instruction.Prefix, fixes_name[0..i]).?
|
|
else
|
|
.none;
|
|
if (fixes_prefix != prefix) continue;
|
|
const pattern = fixes_name[if (space_i) |i| i + " ".len else 0..];
|
|
const wildcard_i = std.mem.indexOfScalar(u8, pattern, '_').?;
|
|
const mnem_prefix = pattern[0..wildcard_i];
|
|
const mnem_suffix = pattern[wildcard_i + "_".len ..];
|
|
if (!std.mem.startsWith(u8, mnem_name, mnem_prefix)) continue;
|
|
if (!std.mem.endsWith(u8, mnem_name, mnem_suffix)) continue;
|
|
break .{ fixes, std.meta.stringToEnum(
|
|
Mir.Inst.Tag,
|
|
mnem_name[mnem_prefix.len .. mnem_name.len - mnem_suffix.len],
|
|
) orelse continue };
|
|
} else {
|
|
assert(prefix != .none); // no combination of fixes produced a known mnemonic
|
|
return self.fail("invalid prefix for mnemonic: '{s} {s}'", .{
|
|
@tagName(prefix), mnem_name,
|
|
});
|
|
};
|
|
|
|
(if (prefix == .directive) switch (mnem_tag) {
|
|
.@".cfi_def_cfa" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
|
|
self.asmPseudoRegisterImmediate(.pseudo_cfi_def_cfa_ri_s, ops[0].reg, ops[1].imm)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_def_cfa_register" => if (ops[0] == .reg and ops[1] == .none)
|
|
self.asmPseudoRegister(.pseudo_cfi_def_cfa_register_r, ops[0].reg)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_def_cfa_offset" => if (ops[0] == .imm and ops[1] == .none)
|
|
self.asmPseudoImmediate(.pseudo_cfi_def_cfa_offset_i_s, ops[0].imm)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_adjust_cfa_offset" => if (ops[0] == .imm and ops[1] == .none)
|
|
self.asmPseudoImmediate(.pseudo_cfi_adjust_cfa_offset_i_s, ops[0].imm)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_offset" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
|
|
self.asmPseudoRegisterImmediate(.pseudo_cfi_offset_ri_s, ops[0].reg, ops[1].imm)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_val_offset" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
|
|
self.asmPseudoRegisterImmediate(.pseudo_cfi_val_offset_ri_s, ops[0].reg, ops[1].imm)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_rel_offset" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
|
|
self.asmPseudoRegisterImmediate(.pseudo_cfi_rel_offset_ri_s, ops[0].reg, ops[1].imm)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_register" => if (ops[0] == .reg and ops[1] == .reg and ops[2] == .none)
|
|
self.asmPseudoRegisterRegister(.pseudo_cfi_register_rr, ops[0].reg, ops[1].reg)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_restore" => if (ops[0] == .reg and ops[1] == .none)
|
|
self.asmPseudoRegister(.pseudo_cfi_restore_r, ops[0].reg)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_undefined" => if (ops[0] == .reg and ops[1] == .none)
|
|
self.asmPseudoRegister(.pseudo_cfi_undefined_r, ops[0].reg)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_same_value" => if (ops[0] == .reg and ops[1] == .none)
|
|
self.asmPseudoRegister(.pseudo_cfi_same_value_r, ops[0].reg)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_remember_state" => if (ops[0] == .none)
|
|
self.asmPseudo(.pseudo_cfi_remember_state_none)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_restore_state" => if (ops[0] == .none)
|
|
self.asmPseudo(.pseudo_cfi_restore_state_none)
|
|
else
|
|
error.InvalidInstruction,
|
|
.@".cfi_escape" => error.InvalidInstruction,
|
|
else => unreachable,
|
|
} else self.asmOps(mnem_fixed_tag, ops)) catch |err| switch (err) {
|
|
error.InvalidInstruction => return self.fail(
|
|
"invalid instruction: '{s} {s} {s} {s} {s}'",
|
|
.{
|
|
mnem_str,
|
|
@tagName(ops[0]),
|
|
@tagName(ops[1]),
|
|
@tagName(ops[2]),
|
|
@tagName(ops[3]),
|
|
},
|
|
),
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
var label_it = labels.iterator();
|
|
while (label_it.next()) |label| if (label.value_ptr.pending_relocs.items.len > 0)
|
|
return self.fail("undefined label: '{s}'", .{label.key_ptr.*});
|
|
|
|
for (outputs, args.items[0..outputs.len]) |output, arg_mcv| {
|
|
const extra_bytes = std.mem.sliceAsBytes(self.air.extra[outputs_extra_i..]);
|
|
const constraint =
|
|
std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[outputs_extra_i..]), 0);
|
|
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
|
|
// This equation accounts for the fact that even if we have exactly 4 bytes
|
|
// for the string, we still use the next u32 for the null terminator.
|
|
outputs_extra_i += (constraint.len + name.len + (2 + 3)) / 4;
|
|
|
|
if (output == .none) continue;
|
|
if (arg_mcv != .register) continue;
|
|
if (constraint.len == 2 and std.ascii.isDigit(constraint[1])) continue;
|
|
try self.store(self.typeOf(output), .{ .air_ref = output }, arg_mcv, .{});
|
|
}
|
|
|
|
simple: {
|
|
var buf: [Liveness.bpi - 1]Air.Inst.Ref = @splat(.none);
|
|
var buf_index: usize = 0;
|
|
for (outputs) |output| {
|
|
if (output == .none) continue;
|
|
|
|
if (buf_index >= buf.len) break :simple;
|
|
buf[buf_index] = output;
|
|
buf_index += 1;
|
|
}
|
|
if (buf_index + inputs.len > buf.len) break :simple;
|
|
@memcpy(buf[buf_index..][0..inputs.len], inputs);
|
|
return self.finishAir(inst, result, buf);
|
|
}
|
|
var bt = self.liveness.iterateBigTomb(inst);
|
|
for (outputs) |output| if (output != .none) try self.feed(&bt, output);
|
|
for (inputs) |input| try self.feed(&bt, input);
|
|
return self.finishAirResult(inst, result);
|
|
}
|
|
|
|
const MoveStrategy = union(enum) {
|
|
move: Mir.Inst.FixedTag,
|
|
x87_load_store,
|
|
insert_extract: InsertExtract,
|
|
vex_insert_extract: InsertExtract,
|
|
|
|
const InsertExtract = struct {
|
|
insert: Mir.Inst.FixedTag,
|
|
extract: Mir.Inst.FixedTag,
|
|
};
|
|
|
|
pub fn read(strat: MoveStrategy, self: *CodeGen, dst_reg: Register, src_mem: Memory) !void {
|
|
switch (strat) {
|
|
.move => |tag| try self.asmRegisterMemory(tag, switch (tag[1]) {
|
|
else => dst_reg,
|
|
.lea => if (dst_reg.bitSize() >= 32) dst_reg else dst_reg.to32(),
|
|
}, src_mem),
|
|
.x87_load_store => {
|
|
try self.asmMemory(.{ .f_, .ld }, src_mem);
|
|
assert(dst_reg != .st7);
|
|
try self.asmRegister(.{ .f_p, .st }, @enumFromInt(@intFromEnum(dst_reg) + 1));
|
|
},
|
|
.insert_extract => |ie| if (ie.insert[0] != .p_w or self.hasFeature(.sse2))
|
|
try self.asmRegisterMemoryImmediate(ie.insert, dst_reg, src_mem, .u(0))
|
|
else {
|
|
const tmp_frame_index = try self.allocFrameIndex(.init(.{
|
|
.size = 16,
|
|
.alignment = .@"16",
|
|
}));
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
try self.asmRegisterMemory(.{ ._, .movzx }, tmp_reg.to32(), src_mem);
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = tmp_frame_index },
|
|
.mod = .{ .rm = .{ .size = .word } },
|
|
}, tmp_reg.to16());
|
|
try self.asmRegisterMemory(.{ ._ps, .mova }, dst_reg.to128(), .{
|
|
.base = .{ .frame = tmp_frame_index },
|
|
.mod = .{ .rm = .{ .size = .xword } },
|
|
});
|
|
},
|
|
.vex_insert_extract => |ie| try self.asmRegisterRegisterMemoryImmediate(
|
|
ie.insert,
|
|
dst_reg,
|
|
dst_reg,
|
|
src_mem,
|
|
.u(0),
|
|
),
|
|
}
|
|
}
|
|
pub fn write(strat: MoveStrategy, self: *CodeGen, dst_mem: Memory, src_reg: Register) !void {
|
|
switch (strat) {
|
|
.move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_reg),
|
|
.x87_load_store => {
|
|
try self.asmRegister(.{ .f_, .ld }, src_reg);
|
|
try self.asmMemory(.{ .f_p, .st }, dst_mem);
|
|
},
|
|
.insert_extract, .vex_insert_extract => |ie| if (ie.extract[0] != .p_w or self.hasFeature(.sse4_1))
|
|
try self.asmMemoryRegisterImmediate(ie.extract, dst_mem, src_reg, .u(0))
|
|
else if (self.hasFeature(.sse2)) {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
try self.asmRegisterRegisterImmediate(ie.extract, tmp_reg.to32(), src_reg.to128(), .u(0));
|
|
try self.asmMemoryRegister(.{ ._, .mov }, dst_mem, tmp_reg.to16());
|
|
} else {
|
|
const tmp_frame_index = try self.allocFrameIndex(.init(.{
|
|
.size = 16,
|
|
.alignment = .@"16",
|
|
}));
|
|
try self.asmMemoryRegister(.{ ._ps, .mova }, .{
|
|
.base = .{ .frame = tmp_frame_index },
|
|
.mod = .{ .rm = .{ .size = .xword } },
|
|
}, src_reg.to128());
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
try self.asmRegisterMemory(.{ ._, .movzx }, tmp_reg.to32(), .{
|
|
.base = .{ .frame = tmp_frame_index },
|
|
.mod = .{ .rm = .{ .size = .word } },
|
|
});
|
|
try self.asmMemoryRegister(.{ ._, .mov }, dst_mem, tmp_reg.to16());
|
|
},
|
|
}
|
|
}
|
|
};
|
|
fn moveStrategy(self: *CodeGen, ty: Type, class: Register.Class, aligned: bool) !MoveStrategy {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
switch (class) {
|
|
.general_purpose, .segment => return .{ .move = .{ ._, .mov } },
|
|
.x87 => return .x87_load_store,
|
|
.mmx => {},
|
|
.sse => switch (ty.zigTypeTag(zcu)) {
|
|
else => {
|
|
const classes = std.mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .other), .none);
|
|
assert(std.mem.indexOfNone(abi.Class, classes, &.{
|
|
.integer, .sse, .sseup, .memory, .float, .float_combine,
|
|
}) == null);
|
|
const abi_size = ty.abiSize(zcu);
|
|
if (abi_size < 4 or
|
|
std.mem.indexOfScalar(abi.Class, classes, .integer) != null) switch (abi_size) {
|
|
1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{
|
|
.insert = .{ .vp_b, .insr },
|
|
.extract = .{ .vp_b, .extr },
|
|
} } else if (self.hasFeature(.sse4_2)) return .{ .insert_extract = .{
|
|
.insert = .{ .p_b, .insr },
|
|
.extract = .{ .p_b, .extr },
|
|
} },
|
|
2 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{
|
|
.insert = .{ .vp_w, .insr },
|
|
.extract = .{ .vp_w, .extr },
|
|
} } else .{ .insert_extract = .{
|
|
.insert = .{ .p_w, .insr },
|
|
.extract = .{ .p_w, .extr },
|
|
} },
|
|
3...4 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_d, .mov }
|
|
else
|
|
.{ ._d, .mov } },
|
|
5...8 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_q, .mov }
|
|
else
|
|
.{ ._q, .mov } },
|
|
9...16 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
17...32 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
} else switch (abi_size) {
|
|
4 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_ss, .mov }
|
|
else
|
|
.{ ._ss, .mov } },
|
|
5...8 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_sd, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ ._sd, .mov }
|
|
else
|
|
.{ ._ps, .movl } },
|
|
9...16 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_pd, if (aligned) .mova else .movu }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ ._pd, if (aligned) .mova else .movu }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
17...32 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ .v_pd, if (aligned) .mova else .movu } },
|
|
else => {},
|
|
}
|
|
},
|
|
.float => switch (ty.floatBits(self.target.*)) {
|
|
16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{
|
|
.insert = .{ .vp_w, .insr },
|
|
.extract = .{ .vp_w, .extr },
|
|
} } else .{ .insert_extract = .{
|
|
.insert = .{ .p_w, .insr },
|
|
.extract = .{ .p_w, .extr },
|
|
} },
|
|
32 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_ss, .mov }
|
|
else
|
|
.{ ._ss, .mov } },
|
|
64 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_sd, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ ._sd, .mov }
|
|
else
|
|
.{ ._ps, .movl } },
|
|
128 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
else => {},
|
|
},
|
|
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.bool => switch (ty.vectorLen(zcu)) {
|
|
33...64 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_q, .mov }
|
|
else
|
|
.{ ._q, .mov } },
|
|
else => {},
|
|
},
|
|
.int => switch (ty.childType(zcu).intInfo(zcu).bits) {
|
|
1...8 => switch (ty.vectorLen(zcu)) {
|
|
1...16 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
17...32 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
},
|
|
9...16 => switch (ty.vectorLen(zcu)) {
|
|
1...8 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
9...16 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
},
|
|
17...32 => switch (ty.vectorLen(zcu)) {
|
|
1...4 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
5...8 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
},
|
|
33...64 => switch (ty.vectorLen(zcu)) {
|
|
1...2 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
3...4 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
},
|
|
65...128 => switch (ty.vectorLen(zcu)) {
|
|
1 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
2 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
},
|
|
129...256 => switch (ty.vectorLen(zcu)) {
|
|
1 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
},
|
|
else => {},
|
|
},
|
|
.pointer, .optional => if (ty.childType(zcu).isPtrAtRuntime(zcu))
|
|
switch (ty.vectorLen(zcu)) {
|
|
1...2 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
3...4 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
}
|
|
else
|
|
unreachable,
|
|
.float => switch (ty.childType(zcu).floatBits(self.target.*)) {
|
|
16 => switch (ty.vectorLen(zcu)) {
|
|
1...8 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
9...16 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
},
|
|
32 => switch (ty.vectorLen(zcu)) {
|
|
1...4 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_ps, if (aligned) .mova else .movu }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
5...8 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ .v_ps, if (aligned) .mova else .movu } },
|
|
else => {},
|
|
},
|
|
64 => switch (ty.vectorLen(zcu)) {
|
|
1...2 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ .v_pd, if (aligned) .mova else .movu }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ ._pd, if (aligned) .mova else .movu }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
3...4 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ .v_pd, if (aligned) .mova else .movu } },
|
|
else => {},
|
|
},
|
|
80, 128 => switch (ty.vectorLen(zcu)) {
|
|
1 => return .{ .move = if (self.hasFeature(.avx))
|
|
.{ if (aligned) .v_dqa else .v_dqu, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ if (aligned) ._dqa else ._dqu, .mov }
|
|
else
|
|
.{ ._ps, if (aligned) .mova else .movu } },
|
|
2 => if (self.hasFeature(.avx))
|
|
return .{ .move = .{ if (aligned) .v_dqa else .v_dqu, .mov } },
|
|
else => {},
|
|
},
|
|
else => {},
|
|
},
|
|
else => {},
|
|
},
|
|
},
|
|
.ip, .cr, .dr => {},
|
|
}
|
|
return self.fail("TODO moveStrategy for {}", .{ty.fmt(pt)});
|
|
}
|
|
|
|
const CopyOptions = struct {
|
|
safety: bool = false,
|
|
};
|
|
|
|
fn genCopy(self: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: CopyOptions) InnerError!void {
|
|
const pt = self.pt;
|
|
|
|
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
switch (dst_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.lea_symbol,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable, // unmodifiable destination
|
|
.register => |reg| try self.genSetReg(reg, ty, src_mcv, opts),
|
|
.register_offset => |dst_reg_off| try self.genSetReg(dst_reg_off.reg, ty, switch (src_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.register_overflow,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
=> unreachable,
|
|
.immediate,
|
|
.register,
|
|
.register_offset,
|
|
.lea_frame,
|
|
=> src_mcv.offset(-dst_reg_off.off),
|
|
else => .{ .register_offset = .{
|
|
.reg = try self.copyToTmpRegister(ty, src_mcv),
|
|
.off = -dst_reg_off.off,
|
|
} },
|
|
}, opts),
|
|
inline .register_pair, .register_triple, .register_quadruple => |dst_regs, dst_tag| {
|
|
const src_info: ?struct { addr_reg: Register, addr_lock: RegisterLock } = src_info: switch (src_mcv) {
|
|
.undef, .memory, .indirect, .load_frame => null,
|
|
.register => |src_reg| switch (dst_regs[0].class()) {
|
|
.general_purpose => switch (src_reg.class()) {
|
|
else => unreachable,
|
|
.sse => if (ty.abiSize(pt.zcu) <= 16) {
|
|
if (self.hasFeature(.avx)) {
|
|
try self.asmRegisterRegister(.{ .v_q, .mov }, dst_regs[0].to64(), src_reg.to128());
|
|
try self.asmRegisterRegisterImmediate(.{ .vp_q, .extr }, dst_regs[1].to64(), src_reg.to128(), .u(1));
|
|
} else if (self.hasFeature(.sse4_1)) {
|
|
try self.asmRegisterRegister(.{ ._q, .mov }, dst_regs[0].to64(), src_reg.to128());
|
|
try self.asmRegisterRegisterImmediate(.{ .p_q, .extr }, dst_regs[1].to64(), src_reg.to128(), .u(1));
|
|
} else {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.sse);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.asmRegisterRegister(.{ ._q, .mov }, dst_regs[0].to64(), src_reg.to128());
|
|
try self.asmRegisterRegister(.{ ._ps, .movhl }, tmp_reg.to128(), src_reg.to128());
|
|
try self.asmRegisterRegister(.{ ._q, .mov }, dst_regs[1].to64(), tmp_reg.to128());
|
|
}
|
|
return;
|
|
} else unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
dst_tag => |src_regs| {
|
|
var hazard_regs = src_regs;
|
|
for (dst_regs, &hazard_regs, 1..) |dst_reg, src_reg, hazard_index| {
|
|
const dst_id = dst_reg.id();
|
|
if (dst_id == src_reg.id()) continue;
|
|
var mir_tag: Mir.Inst.FixedTag = .{ ._, .mov };
|
|
for (hazard_regs[hazard_index..]) |*hazard_reg| {
|
|
if (dst_id != hazard_reg.id()) continue;
|
|
mir_tag = .{ ._g, .xch };
|
|
hazard_reg.* = src_reg;
|
|
}
|
|
try self.asmRegisterRegister(mir_tag, dst_reg.to64(), src_reg.to64());
|
|
}
|
|
return;
|
|
},
|
|
.load_symbol, .load_direct, .load_got, .load_tlv => {
|
|
const src_addr_reg =
|
|
(try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to64();
|
|
const src_addr_lock = self.register_manager.lockRegAssumeUnused(src_addr_reg);
|
|
errdefer self.register_manager.unlockReg(src_addr_lock);
|
|
|
|
try self.genSetReg(src_addr_reg, .usize, src_mcv.address(), opts);
|
|
break :src_info .{ .addr_reg = src_addr_reg, .addr_lock = src_addr_lock };
|
|
},
|
|
.air_ref => |src_ref| return self.genCopy(ty, dst_mcv, try self.resolveInst(src_ref), opts),
|
|
else => return self.fail("TODO implement genCopy for {s} of {}", .{
|
|
@tagName(src_mcv), ty.fmt(pt),
|
|
}),
|
|
};
|
|
defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock);
|
|
|
|
for ([_]bool{ false, true }) |emit_hazard| {
|
|
var hazard_count: u3 = 0;
|
|
var part_disp: i32 = 0;
|
|
for (dst_regs, try self.splitType(dst_regs.len, ty), 0..) |dst_reg, dst_ty, part_i| {
|
|
defer part_disp += @intCast(dst_ty.abiSize(pt.zcu));
|
|
const is_hazard = if (src_mcv.getReg()) |src_reg|
|
|
dst_reg.id() == src_reg.id()
|
|
else if (src_info) |info|
|
|
dst_reg.id() == info.addr_reg.id()
|
|
else
|
|
false;
|
|
if (is_hazard) hazard_count += 1;
|
|
if (is_hazard != emit_hazard) continue;
|
|
try self.genSetReg(dst_reg, dst_ty, switch (src_mcv) {
|
|
.undef => if (opts.safety and part_i > 0) .{ .register = dst_regs[0] } else .undef,
|
|
dst_tag => |src_regs| .{ .register = src_regs[part_i] },
|
|
.memory, .indirect, .load_frame => src_mcv.address().offset(part_disp).deref(),
|
|
.load_symbol, .load_direct, .load_got, .load_tlv => .{ .indirect = .{
|
|
.reg = src_info.?.addr_reg,
|
|
.off = part_disp,
|
|
} },
|
|
else => unreachable,
|
|
}, opts);
|
|
}
|
|
switch (hazard_count) {
|
|
0 => break,
|
|
1 => continue,
|
|
else => unreachable,
|
|
}
|
|
}
|
|
},
|
|
.indirect => |reg_off| try self.genSetMem(
|
|
.{ .reg = reg_off.reg },
|
|
reg_off.off,
|
|
ty,
|
|
src_mcv,
|
|
opts,
|
|
),
|
|
.memory, .load_symbol, .load_direct, .load_got, .load_tlv => {
|
|
switch (dst_mcv) {
|
|
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
|
|
return self.genSetMem(.{ .reg = .ds }, small_addr, ty, src_mcv, opts),
|
|
.load_symbol, .load_direct, .load_got, .load_tlv => {},
|
|
else => unreachable,
|
|
}
|
|
|
|
const addr_reg = try self.copyToTmpRegister(.usize, dst_mcv.address());
|
|
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
|
defer self.register_manager.unlockReg(addr_lock);
|
|
|
|
try self.genSetMem(.{ .reg = addr_reg }, 0, ty, src_mcv, opts);
|
|
},
|
|
.load_frame => |frame_addr| try self.genSetMem(
|
|
.{ .frame = frame_addr.index },
|
|
frame_addr.off,
|
|
ty,
|
|
src_mcv,
|
|
opts,
|
|
),
|
|
}
|
|
}
|
|
|
|
fn genSetReg(
|
|
self: *CodeGen,
|
|
dst_reg: Register,
|
|
ty: Type,
|
|
src_mcv: MCValue,
|
|
opts: CopyOptions,
|
|
) InnerError!void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const abi_size: u32 = @intCast(ty.abiSize(zcu));
|
|
if (ty.bitSize(zcu) > dst_reg.bitSize())
|
|
return self.fail("genSetReg called with a value larger than dst_reg", .{});
|
|
switch (src_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.register_overflow,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
=> unreachable,
|
|
.undef => if (opts.safety) switch (dst_reg.class()) {
|
|
.general_purpose => switch (abi_size) {
|
|
1 => try self.asmRegisterImmediate(.{ ._, .mov }, dst_reg.to8(), .u(0xAA)),
|
|
2 => try self.asmRegisterImmediate(.{ ._, .mov }, dst_reg.to16(), .u(0xAAAA)),
|
|
3...4 => try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
dst_reg.to32(),
|
|
.s(@as(i32, @bitCast(@as(u32, 0xAAAAAAAA)))),
|
|
),
|
|
5...8 => try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
dst_reg.to64(),
|
|
.u(0xAAAAAAAAAAAAAAAA),
|
|
),
|
|
else => unreachable,
|
|
},
|
|
.segment, .x87, .mmx, .sse => {
|
|
const full_ty = try pt.vectorType(.{
|
|
.len = self.vectorSize(.float),
|
|
.child = .u8_type,
|
|
});
|
|
try self.genSetReg(dst_reg, full_ty, try self.genTypedValue(
|
|
.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
.ty = full_ty.toIntern(),
|
|
.storage = .{ .repeated_elem = (try pt.intValue(.u8, 0xaa)).toIntern() },
|
|
} })),
|
|
), opts);
|
|
},
|
|
.ip, .cr, .dr => unreachable,
|
|
},
|
|
.eflags => |cc| try self.asmSetccRegister(cc, dst_reg.to8()),
|
|
.immediate => |imm| {
|
|
if (imm == 0) {
|
|
// 32-bit moves zero-extend to 64-bit, so xoring the 32-bit
|
|
// register is the fastest way to zero a register.
|
|
try self.spillEflagsIfOccupied();
|
|
try self.asmRegisterRegister(.{ ._, .xor }, dst_reg.to32(), dst_reg.to32());
|
|
} else if (abi_size > 4 and std.math.cast(u32, imm) != null) {
|
|
// 32-bit moves zero-extend to 64-bit.
|
|
try self.asmRegisterImmediate(.{ ._, .mov }, dst_reg.to32(), .u(imm));
|
|
} else if (abi_size <= 4 and @as(i64, @bitCast(imm)) < 0) {
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
registerAlias(dst_reg, abi_size),
|
|
.s(@intCast(@as(i64, @bitCast(imm)))),
|
|
);
|
|
} else {
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .mov },
|
|
registerAlias(dst_reg, abi_size),
|
|
.u(imm),
|
|
);
|
|
}
|
|
},
|
|
.register => |src_reg| if (dst_reg.id() != src_reg.id()) switch (dst_reg.class()) {
|
|
.general_purpose => switch (src_reg.class()) {
|
|
.general_purpose => try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
registerAlias(dst_reg, abi_size),
|
|
registerAlias(src_reg, abi_size),
|
|
),
|
|
.segment => try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
registerAlias(dst_reg, abi_size),
|
|
src_reg,
|
|
),
|
|
.x87, .mmx, .ip, .cr, .dr => unreachable,
|
|
.sse => if (self.hasFeature(.sse2)) try self.asmRegisterRegister(
|
|
switch (abi_size) {
|
|
1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov },
|
|
5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov },
|
|
else => unreachable,
|
|
},
|
|
registerAlias(dst_reg, @max(abi_size, 4)),
|
|
src_reg.to128(),
|
|
) else {
|
|
const frame_size = std.math.ceilPowerOfTwoAssert(u32, @max(abi_size, 4));
|
|
const frame_index = try self.allocFrameIndex(.init(.{
|
|
.size = frame_size,
|
|
.alignment = .fromNonzeroByteUnits(frame_size),
|
|
}));
|
|
try self.asmMemoryRegister(switch (frame_size) {
|
|
4 => .{ ._ss, .mov },
|
|
8 => .{ ._ps, .movl },
|
|
16 => .{ ._ps, .mov },
|
|
else => unreachable,
|
|
}, .{
|
|
.base = .{ .frame = frame_index },
|
|
.mod = .{ .rm = .{ .size = .fromSize(frame_size) } },
|
|
}, src_reg.to128());
|
|
try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(dst_reg, abi_size), .{
|
|
.base = .{ .frame = frame_index },
|
|
.mod = .{ .rm = .{ .size = .fromSize(abi_size) } },
|
|
});
|
|
},
|
|
},
|
|
.segment => try self.asmRegisterRegister(
|
|
.{ ._, .mov },
|
|
dst_reg,
|
|
switch (src_reg.class()) {
|
|
.general_purpose, .segment => registerAlias(src_reg, abi_size),
|
|
.x87, .mmx, .ip, .cr, .dr => unreachable,
|
|
.sse => try self.copyToTmpRegister(ty, src_mcv),
|
|
},
|
|
),
|
|
.x87 => switch (src_reg.class()) {
|
|
.general_purpose, .segment => unreachable,
|
|
.x87 => switch (src_reg) {
|
|
.st0 => try self.asmRegister(.{ .f_, .st }, dst_reg),
|
|
.st1, .st2, .st3, .st4, .st5, .st6 => {
|
|
try self.asmRegister(.{ .f_, .ld }, src_reg);
|
|
assert(dst_reg != .st7);
|
|
try self.asmRegister(.{ .f_p, .st }, @enumFromInt(@intFromEnum(dst_reg) + 1));
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.mmx, .sse, .ip, .cr, .dr => unreachable,
|
|
},
|
|
.mmx => unreachable,
|
|
.sse => switch (src_reg.class()) {
|
|
.general_purpose => if (self.hasFeature(.sse2)) try self.asmRegisterRegister(
|
|
switch (abi_size) {
|
|
1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov },
|
|
5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov },
|
|
else => unreachable,
|
|
},
|
|
dst_reg.to128(),
|
|
registerAlias(src_reg, @max(abi_size, 4)),
|
|
) else {
|
|
const frame_size = std.math.ceilPowerOfTwoAssert(u32, @max(abi_size, 4));
|
|
const frame_index = try self.allocFrameIndex(.init(.{
|
|
.size = frame_size,
|
|
.alignment = .fromNonzeroByteUnits(frame_size),
|
|
}));
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = frame_index },
|
|
.mod = .{ .rm = .{ .size = .fromSize(abi_size) } },
|
|
}, registerAlias(src_reg, abi_size));
|
|
try self.asmRegisterMemory(switch (frame_size) {
|
|
4 => .{ ._ss, .mov },
|
|
8 => .{ ._ps, .movl },
|
|
16 => .{ ._ps, .mov },
|
|
else => unreachable,
|
|
}, dst_reg.to128(), .{
|
|
.base = .{ .frame = frame_index },
|
|
.mod = .{ .rm = .{ .size = .fromSize(frame_size) } },
|
|
});
|
|
},
|
|
.segment => try self.genSetReg(
|
|
dst_reg,
|
|
ty,
|
|
.{ .register = try self.copyToTmpRegister(ty, src_mcv) },
|
|
opts,
|
|
),
|
|
.x87, .mmx, .ip, .cr, .dr => unreachable,
|
|
.sse => try self.asmRegisterRegister(
|
|
@as(?Mir.Inst.FixedTag, switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
|
|
else => switch (abi_size) {
|
|
1...16 => if (self.hasFeature(.avx))
|
|
.{ .v_dqa, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ ._dqa, .mov }
|
|
else
|
|
.{ ._ps, .mova },
|
|
17...32 => if (self.hasFeature(.avx)) .{ .v_dqa, .mov } else null,
|
|
else => null,
|
|
},
|
|
.float => switch (ty.scalarType(zcu).floatBits(self.target.*)) {
|
|
16, 128 => switch (abi_size) {
|
|
2...16 => if (self.hasFeature(.avx))
|
|
.{ .v_dqa, .mov }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ ._dqa, .mov }
|
|
else
|
|
.{ ._ps, .mova },
|
|
17...32 => if (self.hasFeature(.avx)) .{ .v_dqa, .mov } else null,
|
|
else => null,
|
|
},
|
|
32 => if (self.hasFeature(.avx)) .{ .v_ps, .mova } else .{ ._ps, .mova },
|
|
64 => if (self.hasFeature(.avx))
|
|
.{ .v_pd, .mova }
|
|
else if (self.hasFeature(.sse2))
|
|
.{ ._pd, .mova }
|
|
else
|
|
.{ ._ps, .mova },
|
|
80 => null,
|
|
else => unreachable,
|
|
},
|
|
}) orelse return self.fail("TODO implement genSetReg for {}", .{ty.fmt(pt)}),
|
|
registerAlias(dst_reg, abi_size),
|
|
registerAlias(src_reg, abi_size),
|
|
),
|
|
},
|
|
.ip, .cr, .dr => unreachable,
|
|
},
|
|
inline .register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
=> |src_regs| switch (dst_reg.class()) {
|
|
.general_purpose => switch (src_regs[0].class()) {
|
|
.general_purpose => try self.genSetReg(dst_reg, ty, .{ .register = src_regs[0] }, opts),
|
|
else => unreachable,
|
|
},
|
|
.sse => switch (src_regs[0].class()) {
|
|
.general_purpose => if (abi_size <= 16) {
|
|
if (self.hasFeature(.avx)) {
|
|
try self.asmRegisterRegister(.{ .v_q, .mov }, dst_reg.to128(), src_regs[0].to64());
|
|
try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .vp_q, .insr },
|
|
dst_reg.to128(),
|
|
dst_reg.to128(),
|
|
src_regs[1].to64(),
|
|
.u(1),
|
|
);
|
|
} else if (self.hasFeature(.sse4_1)) {
|
|
try self.asmRegisterRegister(.{ ._q, .mov }, dst_reg.to128(), src_regs[0].to64());
|
|
try self.asmRegisterRegisterImmediate(.{ .p_q, .insr }, dst_reg.to128(), src_regs[1].to64(), .u(1));
|
|
} else {
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.sse);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.asmRegisterRegister(.{ ._q, .mov }, dst_reg.to128(), src_regs[0].to64());
|
|
try self.asmRegisterRegister(.{ ._q, .mov }, tmp_reg.to128(), src_regs[1].to64());
|
|
try self.asmRegisterRegister(.{ ._ps, .movlh }, dst_reg.to128(), tmp_reg.to128());
|
|
}
|
|
} else unreachable,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.register_offset,
|
|
.indirect,
|
|
.load_frame,
|
|
.lea_frame,
|
|
=> try @as(MoveStrategy, switch (src_mcv) {
|
|
.register_offset => |reg_off| switch (reg_off.off) {
|
|
0 => return self.genSetReg(dst_reg, ty, .{ .register = reg_off.reg }, opts),
|
|
else => .{ .move = .{ ._, .lea } },
|
|
},
|
|
.indirect => try self.moveStrategy(ty, dst_reg.class(), false),
|
|
.load_frame => |frame_addr| try self.moveStrategy(
|
|
ty,
|
|
dst_reg.class(),
|
|
self.getFrameAddrAlignment(frame_addr).compare(.gte, .fromLog2Units(
|
|
std.math.log2_int_ceil(u10, @divExact(dst_reg.bitSize(), 8)),
|
|
)),
|
|
),
|
|
.lea_frame => .{ .move = .{ ._, .lea } },
|
|
else => unreachable,
|
|
}).read(self, registerAlias(dst_reg, abi_size), switch (src_mcv) {
|
|
.register_offset, .indirect => |reg_off| .{
|
|
.base = .{ .reg = reg_off.reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = self.memSize(ty),
|
|
.disp = reg_off.off,
|
|
} },
|
|
},
|
|
.load_frame, .lea_frame => |frame_addr| .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = self.memSize(ty),
|
|
.disp = frame_addr.off,
|
|
} },
|
|
},
|
|
else => unreachable,
|
|
}),
|
|
.register_mask => |src_reg_mask| {
|
|
assert(src_reg_mask.reg.class() == .sse);
|
|
const has_avx = self.hasFeature(.avx);
|
|
const bits_reg = switch (dst_reg.class()) {
|
|
.general_purpose => dst_reg,
|
|
else => try self.register_manager.allocReg(null, abi.RegisterClass.gp),
|
|
};
|
|
const bits_lock = self.register_manager.lockReg(bits_reg);
|
|
defer if (bits_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const pack_reg = switch (src_reg_mask.info.scalar) {
|
|
else => src_reg_mask.reg,
|
|
.word => try self.register_manager.allocReg(null, abi.RegisterClass.sse),
|
|
};
|
|
const pack_lock = self.register_manager.lockReg(pack_reg);
|
|
defer if (pack_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
var mask_size: u32 = @intCast(ty.vectorLen(zcu) * @divExact(src_reg_mask.info.scalar.bitSize(self.target), 8));
|
|
switch (src_reg_mask.info.scalar) {
|
|
else => {},
|
|
.word => {
|
|
const src_alias = registerAlias(src_reg_mask.reg, mask_size);
|
|
const pack_alias = registerAlias(pack_reg, mask_size);
|
|
if (has_avx) {
|
|
try self.asmRegisterRegisterRegister(.{ .vp_b, .ackssw }, pack_alias, src_alias, src_alias);
|
|
} else {
|
|
try self.asmRegisterRegister(.{ ._dqa, .mov }, pack_alias, src_alias);
|
|
try self.asmRegisterRegister(.{ .p_b, .ackssw }, pack_alias, pack_alias);
|
|
}
|
|
mask_size = std.math.divCeil(u32, mask_size, 2) catch unreachable;
|
|
},
|
|
}
|
|
try self.asmRegisterRegister(.{ switch (src_reg_mask.info.scalar) {
|
|
.byte, .word => if (has_avx) .vp_b else .p_b,
|
|
.dword => if (has_avx) .v_ps else ._ps,
|
|
.qword => if (has_avx) .v_pd else ._pd,
|
|
else => unreachable,
|
|
}, .movmsk }, bits_reg.to32(), registerAlias(pack_reg, mask_size));
|
|
if (src_reg_mask.info.inverted) try self.asmRegister(.{ ._, .not }, registerAlias(bits_reg, abi_size));
|
|
try self.genSetReg(dst_reg, ty, .{ .register = bits_reg }, .{});
|
|
},
|
|
.memory, .load_symbol, .load_direct, .load_got, .load_tlv => {
|
|
switch (src_mcv) {
|
|
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
|
|
return (try self.moveStrategy(
|
|
ty,
|
|
dst_reg.class(),
|
|
ty.abiAlignment(zcu).check(@as(u32, @bitCast(small_addr))),
|
|
)).read(self, registerAlias(dst_reg, abi_size), .{
|
|
.base = .{ .reg = .ds },
|
|
.mod = .{ .rm = .{
|
|
.size = self.memSize(ty),
|
|
.disp = small_addr,
|
|
} },
|
|
}),
|
|
.load_symbol => |sym_off| switch (dst_reg.class()) {
|
|
.general_purpose => {
|
|
assert(sym_off.off == 0);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(dst_reg, abi_size), .{
|
|
.base = .{ .reloc = sym_off.sym_index },
|
|
.mod = .{ .rm = .{
|
|
.size = self.memSize(ty),
|
|
.disp = sym_off.off,
|
|
} },
|
|
});
|
|
return;
|
|
},
|
|
.segment, .mmx, .ip, .cr, .dr => unreachable,
|
|
.x87, .sse => {},
|
|
},
|
|
.load_direct => |sym_index| switch (dst_reg.class()) {
|
|
.general_purpose => {
|
|
_ = try self.addInst(.{
|
|
.tag = .mov,
|
|
.ops = .direct_reloc,
|
|
.data = .{ .rx = .{
|
|
.r1 = registerAlias(dst_reg, abi_size),
|
|
.payload = try self.addExtra(bits.SymbolOffset{ .sym_index = sym_index }),
|
|
} },
|
|
});
|
|
return;
|
|
},
|
|
.segment, .mmx, .ip, .cr, .dr => unreachable,
|
|
.x87, .sse => {},
|
|
},
|
|
.load_got, .load_tlv => {},
|
|
else => unreachable,
|
|
}
|
|
|
|
const addr_reg = try self.copyToTmpRegister(.usize, src_mcv.address());
|
|
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
|
defer self.register_manager.unlockReg(addr_lock);
|
|
|
|
try (try self.moveStrategy(ty, dst_reg.class(), false)).read(self, registerAlias(dst_reg, abi_size), .{
|
|
.base = .{ .reg = addr_reg.to64() },
|
|
.mod = .{ .rm = .{ .size = self.memSize(ty) } },
|
|
});
|
|
},
|
|
.lea_symbol => |sym_off| switch (self.bin_file.tag) {
|
|
.elf, .macho => try self.asmRegisterMemory(
|
|
.{ ._, .lea },
|
|
dst_reg.to64(),
|
|
.{
|
|
.base = .{ .reloc = sym_off.sym_index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = sym_off.off,
|
|
} },
|
|
},
|
|
),
|
|
else => return self.fail("TODO emit symbol sequence on {s}", .{
|
|
@tagName(self.bin_file.tag),
|
|
}),
|
|
},
|
|
.lea_direct, .lea_got => |sym_index| _ = try self.addInst(.{
|
|
.tag = switch (src_mcv) {
|
|
.lea_direct => .lea,
|
|
.lea_got => .mov,
|
|
else => unreachable,
|
|
},
|
|
.ops = switch (src_mcv) {
|
|
.lea_direct => .direct_reloc,
|
|
.lea_got => .got_reloc,
|
|
else => unreachable,
|
|
},
|
|
.data = .{ .rx = .{
|
|
.r1 = dst_reg.to64(),
|
|
.payload = try self.addExtra(bits.SymbolOffset{ .sym_index = sym_index }),
|
|
} },
|
|
}),
|
|
.lea_tlv => unreachable, // TODO: remove this
|
|
.air_ref => |src_ref| try self.genSetReg(dst_reg, ty, try self.resolveInst(src_ref), opts),
|
|
}
|
|
}
|
|
|
|
fn genSetMem(
|
|
self: *CodeGen,
|
|
base: Memory.Base,
|
|
disp: i32,
|
|
ty: Type,
|
|
src_mcv: MCValue,
|
|
opts: CopyOptions,
|
|
) InnerError!void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const abi_size: u32 = @intCast(ty.abiSize(zcu));
|
|
const dst_ptr_mcv: MCValue = switch (base) {
|
|
.none => .{ .immediate = @bitCast(@as(i64, disp)) },
|
|
.reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
|
|
.frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } },
|
|
.table => unreachable,
|
|
.reloc => |sym_index| .{ .lea_symbol = .{ .sym_index = sym_index, .off = disp } },
|
|
};
|
|
switch (src_mcv) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
=> unreachable,
|
|
.undef => if (opts.safety) try self.genInlineMemset(
|
|
dst_ptr_mcv,
|
|
src_mcv,
|
|
.{ .immediate = abi_size },
|
|
opts,
|
|
),
|
|
.immediate => |imm| switch (abi_size) {
|
|
1, 2, 4 => {
|
|
const immediate: Immediate = switch (if (ty.isAbiInt(zcu))
|
|
ty.intInfo(zcu).signedness
|
|
else
|
|
.unsigned) {
|
|
.signed => .s(@truncate(@as(i64, @bitCast(imm)))),
|
|
.unsigned => .u(@as(u32, @intCast(imm))),
|
|
};
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .mov },
|
|
.{ .base = base, .mod = .{ .rm = .{
|
|
.size = .fromSize(abi_size),
|
|
.disp = disp,
|
|
} } },
|
|
immediate,
|
|
);
|
|
},
|
|
3, 5...7 => unreachable,
|
|
else => if (std.math.cast(i32, @as(i64, @bitCast(imm)))) |small| {
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .mov },
|
|
.{ .base = base, .mod = .{ .rm = .{
|
|
.size = .fromSize(abi_size),
|
|
.disp = disp,
|
|
} } },
|
|
.s(small),
|
|
);
|
|
} else {
|
|
var offset: i32 = 0;
|
|
while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate(
|
|
.{ ._, .mov },
|
|
.{ .base = base, .mod = .{ .rm = .{
|
|
.size = .dword,
|
|
.disp = disp + offset,
|
|
} } },
|
|
if (ty.isSignedInt(zcu)) .s(
|
|
@truncate(@as(i64, @bitCast(imm)) >> (std.math.cast(u6, offset * 8) orelse 63)),
|
|
) else .u(
|
|
@as(u32, @truncate(if (std.math.cast(u6, offset * 8)) |shift| imm >> shift else 0)),
|
|
),
|
|
);
|
|
},
|
|
},
|
|
.eflags => |cc| try self.asmSetccMemory(cc, .{ .base = base, .mod = .{
|
|
.rm = .{ .size = .byte, .disp = disp },
|
|
} }),
|
|
.register => |src_reg| {
|
|
const mem_size = switch (base) {
|
|
.frame => |base_fi| mem_size: {
|
|
assert(disp >= 0);
|
|
const frame_abi_size = self.frame_allocs.items(.abi_size)[@intFromEnum(base_fi)];
|
|
const frame_spill_pad = self.frame_allocs.items(.spill_pad)[@intFromEnum(base_fi)];
|
|
assert(frame_abi_size - frame_spill_pad - disp >= abi_size);
|
|
break :mem_size if (frame_abi_size - frame_spill_pad - disp == abi_size)
|
|
frame_abi_size
|
|
else
|
|
abi_size;
|
|
},
|
|
else => abi_size,
|
|
};
|
|
const src_alias = registerAlias(src_reg, abi_size);
|
|
const src_size: u32 = @intCast(switch (src_alias.class()) {
|
|
.general_purpose, .segment, .x87, .ip, .cr, .dr => @divExact(src_alias.bitSize(), 8),
|
|
.mmx, .sse => abi_size,
|
|
});
|
|
const src_align: InternPool.Alignment = .fromNonzeroByteUnits(
|
|
std.math.ceilPowerOfTwoAssert(u32, src_size),
|
|
);
|
|
if (src_size > mem_size) {
|
|
const frame_index = try self.allocFrameIndex(.init(.{
|
|
.size = src_size,
|
|
.alignment = src_align,
|
|
}));
|
|
const frame_mcv: MCValue = .{ .load_frame = .{ .index = frame_index } };
|
|
try (try self.moveStrategy(ty, src_alias.class(), true)).write(
|
|
self,
|
|
.{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{
|
|
.size = .fromSize(src_size),
|
|
} } },
|
|
src_alias,
|
|
);
|
|
try self.genSetMem(base, disp, ty, frame_mcv, opts);
|
|
try self.freeValue(frame_mcv);
|
|
} else try (try self.moveStrategy(ty, src_alias.class(), switch (base) {
|
|
.none => src_align.check(@as(u32, @bitCast(disp))),
|
|
.reg => |reg| switch (reg) {
|
|
.es, .cs, .ss, .ds => src_align.check(@as(u32, @bitCast(disp))),
|
|
else => false,
|
|
},
|
|
.frame => |frame_index| self.getFrameAddrAlignment(.{
|
|
.index = frame_index,
|
|
.off = disp,
|
|
}).compare(.gte, src_align),
|
|
.table => unreachable,
|
|
.reloc => false,
|
|
})).write(
|
|
self,
|
|
.{ .base = base, .mod = .{ .rm = .{
|
|
.size = .fromBitSize(@min(
|
|
self.memSize(ty).bitSize(self.target),
|
|
src_alias.bitSize(),
|
|
)),
|
|
.disp = disp,
|
|
} } },
|
|
src_alias,
|
|
);
|
|
},
|
|
inline .register_pair, .register_triple, .register_quadruple => |src_regs| {
|
|
var part_disp: i32 = disp;
|
|
for (try self.splitType(src_regs.len, ty), src_regs) |src_ty, src_reg| {
|
|
try self.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }, opts);
|
|
part_disp += @intCast(src_ty.abiSize(zcu));
|
|
}
|
|
},
|
|
.register_overflow => |ro| switch (ty.zigTypeTag(zcu)) {
|
|
.@"struct" => {
|
|
try self.genSetMem(
|
|
base,
|
|
disp + @as(i32, @intCast(ty.structFieldOffset(0, zcu))),
|
|
ty.fieldType(0, zcu),
|
|
.{ .register = ro.reg },
|
|
opts,
|
|
);
|
|
try self.genSetMem(
|
|
base,
|
|
disp + @as(i32, @intCast(ty.structFieldOffset(1, zcu))),
|
|
ty.fieldType(1, zcu),
|
|
.{ .eflags = ro.eflags },
|
|
opts,
|
|
);
|
|
},
|
|
.optional => {
|
|
assert(!ty.optionalReprIsPayload(zcu));
|
|
const child_ty = ty.optionalChild(zcu);
|
|
try self.genSetMem(base, disp, child_ty, .{ .register = ro.reg }, opts);
|
|
try self.genSetMem(
|
|
base,
|
|
disp + @as(i32, @intCast(child_ty.abiSize(zcu))),
|
|
.bool,
|
|
.{ .eflags = ro.eflags },
|
|
opts,
|
|
);
|
|
},
|
|
else => return self.fail("TODO implement genSetMem for {s} of {}", .{
|
|
@tagName(src_mcv), ty.fmt(pt),
|
|
}),
|
|
},
|
|
.register_offset => |reg_off| {
|
|
const src_reg = self.copyToTmpRegister(ty, src_mcv) catch |err| switch (err) {
|
|
error.OutOfRegisters => {
|
|
const src_reg = registerAlias(reg_off.reg, abi_size);
|
|
try self.asmRegisterMemory(.{ ._, .lea }, src_reg, .{
|
|
.base = .{ .reg = src_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = reg_off.off,
|
|
} },
|
|
});
|
|
try self.genSetMem(base, disp, ty, .{ .register = reg_off.reg }, opts);
|
|
return self.asmRegisterMemory(.{ ._, .lea }, src_reg, .{
|
|
.base = .{ .reg = src_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = -reg_off.off,
|
|
} },
|
|
});
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
|
|
defer self.register_manager.unlockReg(src_lock);
|
|
|
|
try self.genSetMem(base, disp, ty, .{ .register = src_reg }, opts);
|
|
},
|
|
.register_mask => {
|
|
const src_reg = try self.copyToTmpRegister(ty, src_mcv);
|
|
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
|
|
defer self.register_manager.unlockReg(src_lock);
|
|
|
|
try self.genSetMem(base, disp, ty, .{ .register = src_reg }, opts);
|
|
},
|
|
.memory,
|
|
.indirect,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.load_frame,
|
|
.lea_frame,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
=> switch (abi_size) {
|
|
0 => {},
|
|
1, 2, 4, 8 => {
|
|
const src_reg = try self.copyToTmpRegister(ty, src_mcv);
|
|
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
|
|
defer self.register_manager.unlockReg(src_lock);
|
|
|
|
try self.genSetMem(base, disp, ty, .{ .register = src_reg }, opts);
|
|
},
|
|
else => try self.genInlineMemcpy(dst_ptr_mcv, src_mcv.address(), .{ .immediate = abi_size }, .{ .no_alias = true }),
|
|
},
|
|
.air_ref => |src_ref| try self.genSetMem(base, disp, ty, try self.resolveInst(src_ref), opts),
|
|
}
|
|
}
|
|
|
|
fn genInlineMemcpy(self: *CodeGen, dst_ptr: MCValue, src_ptr: MCValue, len: MCValue, opts: struct {
|
|
no_alias: bool,
|
|
}) InnerError!void {
|
|
if (opts.no_alias and dst_ptr.isAddress() and src_ptr.isAddress()) switch (len) {
|
|
else => {},
|
|
.immediate => |len_imm| switch (len_imm) {
|
|
else => {},
|
|
1 => if (self.register_manager.tryAllocReg(null, abi.RegisterClass.gp)) |reg| {
|
|
try self.asmRegisterMemory(.{ ._, .mov }, reg.to8(), try src_ptr.deref().mem(self, .{ .size = .byte }));
|
|
try self.asmMemoryRegister(.{ ._, .mov }, try dst_ptr.deref().mem(self, .{ .size = .byte }), reg.to8());
|
|
return;
|
|
},
|
|
2 => if (self.register_manager.tryAllocReg(null, abi.RegisterClass.gp)) |reg| {
|
|
try self.asmRegisterMemory(.{ ._, .mov }, reg.to16(), try src_ptr.deref().mem(self, .{ .size = .word }));
|
|
try self.asmMemoryRegister(.{ ._, .mov }, try dst_ptr.deref().mem(self, .{ .size = .word }), reg.to16());
|
|
return;
|
|
},
|
|
4 => if (self.register_manager.tryAllocReg(null, abi.RegisterClass.gp)) |reg| {
|
|
try self.asmRegisterMemory(.{ ._, .mov }, reg.to32(), try src_ptr.deref().mem(self, .{ .size = .dword }));
|
|
try self.asmMemoryRegister(.{ ._, .mov }, try dst_ptr.deref().mem(self, .{ .size = .dword }), reg.to32());
|
|
return;
|
|
},
|
|
8 => if (self.target.cpu.arch == .x86_64) {
|
|
if (self.register_manager.tryAllocReg(null, abi.RegisterClass.gp)) |reg| {
|
|
try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), try src_ptr.deref().mem(self, .{ .size = .qword }));
|
|
try self.asmMemoryRegister(.{ ._, .mov }, try dst_ptr.deref().mem(self, .{ .size = .qword }), reg.to64());
|
|
return;
|
|
}
|
|
},
|
|
16 => if (self.hasFeature(.avx)) {
|
|
if (self.register_manager.tryAllocReg(null, abi.RegisterClass.sse)) |reg| {
|
|
try self.asmRegisterMemory(.{ .v_dqu, .mov }, reg.to128(), try src_ptr.deref().mem(self, .{ .size = .xword }));
|
|
try self.asmMemoryRegister(.{ .v_dqu, .mov }, try dst_ptr.deref().mem(self, .{ .size = .xword }), reg.to128());
|
|
return;
|
|
}
|
|
} else if (self.hasFeature(.sse2)) {
|
|
if (self.register_manager.tryAllocReg(null, abi.RegisterClass.sse)) |reg| {
|
|
try self.asmRegisterMemory(.{ ._dqu, .mov }, reg.to128(), try src_ptr.deref().mem(self, .{ .size = .xword }));
|
|
try self.asmMemoryRegister(.{ ._dqu, .mov }, try dst_ptr.deref().mem(self, .{ .size = .xword }), reg.to128());
|
|
return;
|
|
}
|
|
} else if (self.hasFeature(.sse)) {
|
|
if (self.register_manager.tryAllocReg(null, abi.RegisterClass.sse)) |reg| {
|
|
try self.asmRegisterMemory(.{ ._ps, .movu }, reg.to128(), try src_ptr.deref().mem(self, .{ .size = .xword }));
|
|
try self.asmMemoryRegister(.{ ._ps, .movu }, try dst_ptr.deref().mem(self, .{ .size = .xword }), reg.to128());
|
|
return;
|
|
}
|
|
},
|
|
32 => if (self.hasFeature(.avx)) {
|
|
if (self.register_manager.tryAllocReg(null, abi.RegisterClass.sse)) |reg| {
|
|
try self.asmRegisterMemory(.{ .v_dqu, .mov }, reg.to256(), try src_ptr.deref().mem(self, .{ .size = .yword }));
|
|
try self.asmMemoryRegister(.{ .v_dqu, .mov }, try dst_ptr.deref().mem(self, .{ .size = .yword }), reg.to256());
|
|
return;
|
|
}
|
|
},
|
|
},
|
|
};
|
|
try self.spillRegisters(&.{ .rsi, .rdi, .rcx });
|
|
try self.genSetReg(.rsi, .usize, src_ptr, .{});
|
|
try self.genSetReg(.rdi, .usize, dst_ptr, .{});
|
|
try self.genSetReg(.rcx, .usize, len, .{});
|
|
try self.asmOpOnly(.{ .@"rep _sb", .mov });
|
|
}
|
|
|
|
fn genInlineMemset(
|
|
self: *CodeGen,
|
|
dst_ptr: MCValue,
|
|
value: MCValue,
|
|
len: MCValue,
|
|
opts: CopyOptions,
|
|
) InnerError!void {
|
|
try self.spillRegisters(&.{ .rdi, .al, .rcx });
|
|
try self.genSetReg(.rdi, .usize, dst_ptr, .{});
|
|
try self.genSetReg(.al, .u8, value, opts);
|
|
try self.genSetReg(.rcx, .usize, len, .{});
|
|
try self.asmOpOnly(.{ .@"rep _sb", .sto });
|
|
}
|
|
|
|
fn genExternSymbolRef(
|
|
self: *CodeGen,
|
|
comptime tag: Mir.Inst.Tag,
|
|
lib: ?[]const u8,
|
|
callee: []const u8,
|
|
) InnerError!void {
|
|
if (self.bin_file.cast(.coff)) |coff_file| {
|
|
const global_index = try coff_file.getGlobalSymbol(callee, lib);
|
|
const scratch_reg = abi.getCAbiLinkerScratchReg(self.fn_type.fnCallingConvention(self.pt.zcu));
|
|
_ = try self.addInst(.{
|
|
.tag = .mov,
|
|
.ops = .import_reloc,
|
|
.data = .{ .rx = .{
|
|
.r1 = scratch_reg,
|
|
.payload = try self.addExtra(bits.SymbolOffset{
|
|
.sym_index = link.File.Coff.global_symbol_bit | global_index,
|
|
}),
|
|
} },
|
|
});
|
|
switch (tag) {
|
|
.mov => {},
|
|
.call => try self.asmRegister(.{ ._, .call }, scratch_reg),
|
|
else => unreachable,
|
|
}
|
|
} else return self.fail("TODO implement calling extern functions", .{});
|
|
}
|
|
|
|
fn genLazySymbolRef(
|
|
self: *CodeGen,
|
|
comptime tag: Mir.Inst.Tag,
|
|
reg: Register,
|
|
lazy_sym: link.File.LazySymbol,
|
|
) InnerError!void {
|
|
const pt = self.pt;
|
|
if (self.bin_file.cast(.elf)) |elf_file| {
|
|
const zo = elf_file.zigObjectPtr().?;
|
|
const sym_index = zo.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err|
|
|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
|
|
if (self.mod.pic) {
|
|
switch (tag) {
|
|
.lea, .call => try self.genSetReg(reg, .usize, .{
|
|
.lea_symbol = .{ .sym_index = sym_index },
|
|
}, .{}),
|
|
.mov => try self.genSetReg(reg, .usize, .{
|
|
.load_symbol = .{ .sym_index = sym_index },
|
|
}, .{}),
|
|
else => unreachable,
|
|
}
|
|
switch (tag) {
|
|
.lea, .mov => {},
|
|
.call => try self.asmRegister(.{ ._, .call }, reg),
|
|
else => unreachable,
|
|
}
|
|
} else switch (tag) {
|
|
.lea, .mov => try self.asmRegisterMemory(.{ ._, tag }, reg.to64(), .{
|
|
.base = .{ .reloc = sym_index },
|
|
.mod = .{ .rm = .{ .size = .qword } },
|
|
}),
|
|
.call => try self.asmImmediate(.{ ._, .call }, .rel(.{ .sym_index = sym_index })),
|
|
else => unreachable,
|
|
}
|
|
} else if (self.bin_file.cast(.plan9)) |p9_file| {
|
|
const atom_index = p9_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
|
|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
|
|
var atom = p9_file.getAtom(atom_index);
|
|
_ = atom.getOrCreateOffsetTableEntry(p9_file);
|
|
const got_addr = atom.getOffsetTableAddress(p9_file);
|
|
const got_mem: Memory = .{
|
|
.base = .{ .reg = .ds },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = @intCast(got_addr),
|
|
} },
|
|
};
|
|
switch (tag) {
|
|
.lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem),
|
|
.call => try self.asmMemory(.{ ._, .call }, got_mem),
|
|
else => unreachable,
|
|
}
|
|
switch (tag) {
|
|
.lea, .call => {},
|
|
.mov => try self.asmRegisterMemory(
|
|
.{ ._, tag },
|
|
reg.to64(),
|
|
.initSib(.qword, .{ .base = .{ .reg = reg.to64() } }),
|
|
),
|
|
else => unreachable,
|
|
}
|
|
} else if (self.bin_file.cast(.coff)) |coff_file| {
|
|
const atom_index = coff_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
|
|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
|
|
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
|
|
switch (tag) {
|
|
.lea, .call => try self.genSetReg(reg, .usize, .{ .lea_got = sym_index }, .{}),
|
|
.mov => try self.genSetReg(reg, .usize, .{ .load_got = sym_index }, .{}),
|
|
else => unreachable,
|
|
}
|
|
switch (tag) {
|
|
.lea, .mov => {},
|
|
.call => try self.asmRegister(.{ ._, .call }, reg),
|
|
else => unreachable,
|
|
}
|
|
} else if (self.bin_file.cast(.macho)) |macho_file| {
|
|
const zo = macho_file.getZigObject().?;
|
|
const sym_index = zo.getOrCreateMetadataForLazySymbol(macho_file, pt, lazy_sym) catch |err|
|
|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
|
|
const sym = zo.symbols.items[sym_index];
|
|
switch (tag) {
|
|
.lea, .call => try self.genSetReg(reg, .usize, .{
|
|
.lea_symbol = .{ .sym_index = sym.nlist_idx },
|
|
}, .{}),
|
|
.mov => try self.genSetReg(reg, .usize, .{
|
|
.load_symbol = .{ .sym_index = sym.nlist_idx },
|
|
}, .{}),
|
|
else => unreachable,
|
|
}
|
|
switch (tag) {
|
|
.lea, .mov => {},
|
|
.call => try self.asmRegister(.{ ._, .call }, reg),
|
|
else => unreachable,
|
|
}
|
|
} else {
|
|
return self.fail("TODO implement genLazySymbol for x86_64 {s}", .{@tagName(self.bin_file.tag)});
|
|
}
|
|
}
|
|
|
|
fn airIntFromPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const result = result: {
|
|
// TODO: handle case where the operand is a slice not a raw pointer
|
|
const src_mcv = try self.resolveInst(un_op);
|
|
if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv;
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
try self.genCopy(dst_ty, dst_mcv, src_mcv, .{});
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airBitCast(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
|
|
const result = result: {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
if (dst_ty.isPtrAtRuntime(zcu) and src_ty.isPtrAtRuntime(zcu)) switch (src_mcv) {
|
|
.lea_frame => break :result src_mcv,
|
|
else => if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv,
|
|
};
|
|
|
|
const dst_rc = self.regSetForType(dst_ty);
|
|
const src_rc = self.regSetForType(src_ty);
|
|
|
|
const src_lock = if (src_mcv.getReg()) |src_reg| self.register_manager.lockReg(src_reg) else null;
|
|
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv = if (src_mcv != .register_mask and
|
|
(if (src_mcv.getReg()) |src_reg| src_reg.class() == .general_purpose else true) and
|
|
dst_rc.supersetOf(src_rc) and dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and
|
|
dst_ty.abiAlignment(zcu).order(src_ty.abiAlignment(zcu)).compare(.lte) and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: {
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(switch (src_mcv) {
|
|
else => switch (std.math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) {
|
|
.lt => dst_ty,
|
|
.eq => if (!dst_mcv.isBase() or src_mcv.isBase()) dst_ty else src_ty,
|
|
.gt => src_ty,
|
|
},
|
|
.register_mask => src_ty,
|
|
}, dst_mcv, src_mcv, .{});
|
|
break :dst dst_mcv;
|
|
};
|
|
|
|
if (dst_ty.isRuntimeFloat()) break :result dst_mcv;
|
|
|
|
if (dst_ty.isAbiInt(zcu) and src_ty.isAbiInt(zcu) and
|
|
dst_ty.intInfo(zcu).signedness == src_ty.intInfo(zcu).signedness) break :result dst_mcv;
|
|
|
|
const abi_size = dst_ty.abiSize(zcu);
|
|
const bit_size = dst_ty.bitSize(zcu);
|
|
if (abi_size * 8 <= bit_size or dst_ty.isVector(zcu)) break :result dst_mcv;
|
|
|
|
const dst_limbs_len = std.math.divCeil(u31, @intCast(bit_size), 64) catch unreachable;
|
|
const high_mcv: MCValue = switch (dst_mcv) {
|
|
.register => |dst_reg| .{ .register = dst_reg },
|
|
.register_pair => |dst_regs| .{ .register = dst_regs[1] },
|
|
else => dst_mcv.address().offset((dst_limbs_len - 1) * 8).deref(),
|
|
};
|
|
const high_reg = if (high_mcv.isRegister())
|
|
high_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(.usize, high_mcv);
|
|
const high_lock = self.register_manager.lockReg(high_reg);
|
|
defer if (high_lock) |lock| self.register_manager.unlockReg(lock);
|
|
try self.truncateRegister(dst_ty, high_reg);
|
|
if (!high_mcv.isRegister()) try self.genCopy(
|
|
if (abi_size <= 8) dst_ty else .usize,
|
|
high_mcv,
|
|
.{ .register = high_reg },
|
|
.{},
|
|
);
|
|
var offset = dst_limbs_len * 8;
|
|
if (offset < abi_size) {
|
|
const dst_signedness: std.builtin.Signedness = if (dst_ty.isAbiInt(zcu))
|
|
dst_ty.intInfo(zcu).signedness
|
|
else
|
|
.unsigned;
|
|
const ext_mcv: MCValue = ext_mcv: switch (dst_signedness) {
|
|
.signed => {
|
|
try self.asmRegisterImmediate(.{ ._r, .sa }, high_reg, .u(63));
|
|
break :ext_mcv .{ .register = high_reg };
|
|
},
|
|
.unsigned => .{ .immediate = 0 },
|
|
};
|
|
while (offset < abi_size) : (offset += 8) {
|
|
const limb_mcv: MCValue = switch (dst_mcv) {
|
|
.register => |dst_reg| .{ .register = dst_reg },
|
|
.register_pair => |dst_regs| .{ .register = dst_regs[@divExact(offset, 8)] },
|
|
else => dst_mcv.address().offset(offset).deref(),
|
|
};
|
|
const limb_lock = if (limb_mcv.isRegister())
|
|
self.register_manager.lockReg(limb_mcv.getReg().?)
|
|
else
|
|
null;
|
|
defer if (limb_lock) |lock| self.register_manager.unlockReg(lock);
|
|
try self.genCopy(.usize, limb_mcv, ext_mcv, .{});
|
|
}
|
|
}
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airArrayToSlice(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const slice_ty = self.typeOfIndex(inst);
|
|
const ptr_ty = self.typeOf(ty_op.operand);
|
|
const ptr = try self.resolveInst(ty_op.operand);
|
|
const array_ty = ptr_ty.childType(zcu);
|
|
const array_len = array_ty.arrayLen(zcu);
|
|
|
|
const frame_index = try self.allocFrameIndex(.initSpill(slice_ty, zcu));
|
|
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr, .{});
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(ptr_ty.abiSize(zcu)),
|
|
.usize,
|
|
.{ .immediate = array_len },
|
|
.{},
|
|
);
|
|
|
|
const result = MCValue{ .load_frame = .{ .index = frame_index } };
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airFloatFromInt(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const dst_bits = dst_ty.floatBits(self.target.*);
|
|
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_bits: u32 = @intCast(src_ty.bitSize(zcu));
|
|
const src_signedness =
|
|
if (src_ty.isAbiInt(zcu)) src_ty.intInfo(zcu).signedness else .unsigned;
|
|
const src_size = std.math.divCeil(u32, @max(switch (src_signedness) {
|
|
.signed => src_bits,
|
|
.unsigned => src_bits + 1,
|
|
}, 32), 8) catch unreachable;
|
|
|
|
const result = result: {
|
|
if (switch (dst_bits) {
|
|
16, 80, 128 => true,
|
|
32, 64 => src_size > 8,
|
|
else => unreachable,
|
|
}) {
|
|
if (src_bits > 128) return self.fail("TODO implement airFloatFromInt from {} to {}", .{
|
|
src_ty.fmt(pt), dst_ty.fmt(pt),
|
|
});
|
|
|
|
var callee_buf: ["__floatun?i?f".len]u8 = undefined;
|
|
break :result try self.genCall(.{ .lib = .{
|
|
.return_type = dst_ty.toIntern(),
|
|
.param_types = &.{src_ty.toIntern()},
|
|
.callee = std.fmt.bufPrint(&callee_buf, "__float{s}{c}i{c}f", .{
|
|
switch (src_signedness) {
|
|
.signed => "",
|
|
.unsigned => "un",
|
|
},
|
|
intCompilerRtAbiName(src_bits),
|
|
floatCompilerRtAbiName(dst_bits),
|
|
}) catch unreachable,
|
|
} }, &.{src_ty}, &.{.{ .air_ref = ty_op.operand }}, .{});
|
|
}
|
|
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const src_reg = if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv);
|
|
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
|
|
defer self.register_manager.unlockReg(src_lock);
|
|
|
|
if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg);
|
|
|
|
const dst_reg = try self.register_manager.allocReg(inst, self.regSetForType(dst_ty));
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(zcu)) {
|
|
.float => switch (dst_ty.floatBits(self.target.*)) {
|
|
32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 },
|
|
64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 },
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => null,
|
|
}) orelse return self.fail("TODO implement airFloatFromInt from {} to {}", .{
|
|
src_ty.fmt(pt), dst_ty.fmt(pt),
|
|
});
|
|
const dst_alias = dst_reg.to128();
|
|
const src_alias = registerAlias(src_reg, src_size);
|
|
switch (mir_tag[0]) {
|
|
.v_ss, .v_sd => try self.asmRegisterRegisterRegister(mir_tag, dst_alias, dst_alias, src_alias),
|
|
else => try self.asmRegisterRegister(mir_tag, dst_alias, src_alias),
|
|
}
|
|
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airIntFromFloat(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const dst_bits: u32 = @intCast(dst_ty.bitSize(zcu));
|
|
const dst_signedness =
|
|
if (dst_ty.isAbiInt(zcu)) dst_ty.intInfo(zcu).signedness else .unsigned;
|
|
const dst_size = std.math.divCeil(u32, @max(switch (dst_signedness) {
|
|
.signed => dst_bits,
|
|
.unsigned => dst_bits + 1,
|
|
}, 32), 8) catch unreachable;
|
|
|
|
const src_ty = self.typeOf(ty_op.operand);
|
|
const src_bits = src_ty.floatBits(self.target.*);
|
|
|
|
const result = result: {
|
|
if (switch (src_bits) {
|
|
16, 80, 128 => true,
|
|
32, 64 => dst_size > 8,
|
|
else => unreachable,
|
|
}) {
|
|
if (dst_bits > 128) return self.fail("TODO implement airIntFromFloat from {} to {}", .{
|
|
src_ty.fmt(pt), dst_ty.fmt(pt),
|
|
});
|
|
|
|
var callee_buf: ["__fixuns?f?i".len]u8 = undefined;
|
|
break :result try self.genCall(.{ .lib = .{
|
|
.return_type = dst_ty.toIntern(),
|
|
.param_types = &.{src_ty.toIntern()},
|
|
.callee = std.fmt.bufPrint(&callee_buf, "__fix{s}{c}f{c}i", .{
|
|
switch (dst_signedness) {
|
|
.signed => "",
|
|
.unsigned => "uns",
|
|
},
|
|
floatCompilerRtAbiName(src_bits),
|
|
intCompilerRtAbiName(dst_bits),
|
|
}) catch unreachable,
|
|
} }, &.{src_ty}, &.{.{ .air_ref = ty_op.operand }}, .{});
|
|
}
|
|
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const src_reg = if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(src_ty, src_mcv);
|
|
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
|
|
defer self.register_manager.unlockReg(src_lock);
|
|
|
|
const dst_reg = try self.register_manager.allocReg(inst, self.regSetForType(dst_ty));
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
try self.asmRegisterRegister(
|
|
switch (src_bits) {
|
|
32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si },
|
|
64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si },
|
|
else => unreachable,
|
|
},
|
|
registerAlias(dst_reg, dst_size),
|
|
src_reg.to128(),
|
|
);
|
|
|
|
if (dst_bits < dst_size * 8) try self.truncateRegister(dst_ty, dst_reg);
|
|
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airCmpxchg(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
|
|
|
|
const ptr_ty = self.typeOf(extra.ptr);
|
|
const val_ty = self.typeOf(extra.expected_value);
|
|
const val_abi_size: u32 = @intCast(val_ty.abiSize(pt.zcu));
|
|
|
|
try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
|
|
const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
|
|
defer for (regs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const exp_mcv = try self.resolveInst(extra.expected_value);
|
|
if (val_abi_size > 8) {
|
|
const exp_addr_mcv: MCValue = switch (exp_mcv) {
|
|
.memory, .indirect, .load_frame => exp_mcv.address(),
|
|
else => .{ .register = try self.copyToTmpRegister(.usize, exp_mcv.address()) },
|
|
};
|
|
const exp_addr_lock =
|
|
if (exp_addr_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
|
|
defer if (exp_addr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genSetReg(.rax, .usize, exp_addr_mcv.deref(), .{});
|
|
try self.genSetReg(.rdx, .usize, exp_addr_mcv.offset(8).deref(), .{});
|
|
} else try self.genSetReg(.rax, val_ty, exp_mcv, .{});
|
|
|
|
const new_mcv = try self.resolveInst(extra.new_value);
|
|
const new_reg = if (val_abi_size > 8) new: {
|
|
const new_addr_mcv: MCValue = switch (new_mcv) {
|
|
.memory, .indirect, .load_frame => new_mcv.address(),
|
|
else => .{ .register = try self.copyToTmpRegister(.usize, new_mcv.address()) },
|
|
};
|
|
const new_addr_lock =
|
|
if (new_addr_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
|
|
defer if (new_addr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genSetReg(.rbx, .usize, new_addr_mcv.deref(), .{});
|
|
try self.genSetReg(.rcx, .usize, new_addr_mcv.offset(8).deref(), .{});
|
|
break :new null;
|
|
} else try self.copyToTmpRegister(val_ty, new_mcv);
|
|
const new_lock = if (new_reg) |reg| self.register_manager.lockRegAssumeUnused(reg) else null;
|
|
defer if (new_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const ptr_mcv = try self.resolveInst(extra.ptr);
|
|
const mem_size: Memory.Size = .fromSize(val_abi_size);
|
|
const ptr_mem: Memory = switch (ptr_mcv) {
|
|
.immediate, .register, .register_offset, .lea_frame => try ptr_mcv.deref().mem(self, .{ .size = mem_size }),
|
|
else => .{
|
|
.base = .{ .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv) },
|
|
.mod = .{ .rm = .{ .size = mem_size } },
|
|
},
|
|
};
|
|
switch (ptr_mem.mod) {
|
|
.rm => {},
|
|
.off => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}),
|
|
}
|
|
const ptr_lock = switch (ptr_mem.base) {
|
|
.none, .frame, .reloc => null,
|
|
.reg => |reg| self.register_manager.lockReg(reg),
|
|
.table => unreachable,
|
|
};
|
|
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
if (val_abi_size <= 8) try self.asmMemoryRegister(
|
|
.{ .@"lock _", .cmpxchg },
|
|
ptr_mem,
|
|
registerAlias(new_reg.?, val_abi_size),
|
|
) else try self.asmMemory(.{ .@"lock _16b", .cmpxchg }, ptr_mem);
|
|
|
|
const result: MCValue = result: {
|
|
if (self.liveness.isUnused(inst)) break :result .unreach;
|
|
|
|
if (val_abi_size <= 8) {
|
|
self.eflags_inst = inst;
|
|
break :result .{ .register_overflow = .{ .reg = .rax, .eflags = .ne } };
|
|
}
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.genCopy(.usize, dst_mcv, .{ .register = .rax }, .{});
|
|
try self.genCopy(.usize, dst_mcv.address().offset(8).deref(), .{ .register = .rdx }, .{});
|
|
try self.genCopy(.bool, dst_mcv.address().offset(16).deref(), .{ .eflags = .ne }, .{});
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value });
|
|
}
|
|
|
|
fn atomicOp(
|
|
self: *CodeGen,
|
|
ptr_mcv: MCValue,
|
|
val_mcv: MCValue,
|
|
ptr_ty: Type,
|
|
val_ty: Type,
|
|
unused: bool,
|
|
rmw_op: ?std.builtin.AtomicRmwOp,
|
|
order: std.builtin.AtomicOrder,
|
|
) InnerError!MCValue {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ptr_lock = switch (ptr_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const val_lock = switch (val_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (val_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const val_abi_size: u32 = @intCast(val_ty.abiSize(zcu));
|
|
const mem_size: Memory.Size = .fromSize(val_abi_size);
|
|
const ptr_mem: Memory = switch (ptr_mcv) {
|
|
.immediate, .register, .register_offset, .lea_frame => try ptr_mcv.deref().mem(self, .{ .size = mem_size }),
|
|
else => .{
|
|
.base = .{ .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv) },
|
|
.mod = .{ .rm = .{ .size = mem_size } },
|
|
},
|
|
};
|
|
switch (ptr_mem.mod) {
|
|
.rm => {},
|
|
.off => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}),
|
|
}
|
|
const mem_lock = switch (ptr_mem.base) {
|
|
.none, .frame, .reloc => null,
|
|
.reg => |reg| self.register_manager.lockReg(reg),
|
|
.table => unreachable,
|
|
};
|
|
defer if (mem_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const use_sse = rmw_op orelse .Xchg != .Xchg and val_ty.isRuntimeFloat();
|
|
const strat: enum { lock, loop, libcall } = if (use_sse) .loop else switch (rmw_op orelse .Xchg) {
|
|
.Xchg,
|
|
.Add,
|
|
.Sub,
|
|
=> if (val_abi_size <= 8) .lock else if (val_abi_size <= 16) .loop else .libcall,
|
|
.And,
|
|
.Or,
|
|
.Xor,
|
|
=> if (val_abi_size <= 8 and unused) .lock else if (val_abi_size <= 16) .loop else .libcall,
|
|
.Nand,
|
|
.Max,
|
|
.Min,
|
|
=> if (val_abi_size <= 16) .loop else .libcall,
|
|
};
|
|
switch (strat) {
|
|
.lock => {
|
|
const mir_tag: Mir.Inst.FixedTag = if (rmw_op) |op| switch (op) {
|
|
.Xchg => if (unused) .{ ._, .mov } else .{ ._g, .xch },
|
|
.Add => .{ .@"lock _", if (unused) .add else .xadd },
|
|
.Sub => .{ .@"lock _", if (unused) .sub else .xadd },
|
|
.And => .{ .@"lock _", .@"and" },
|
|
.Or => .{ .@"lock _", .@"or" },
|
|
.Xor => .{ .@"lock _", .xor },
|
|
else => unreachable,
|
|
} else switch (order) {
|
|
.unordered, .monotonic, .release, .acq_rel => .{ ._, .mov },
|
|
.acquire => unreachable,
|
|
.seq_cst => .{ ._g, .xch },
|
|
};
|
|
|
|
const dst_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const dst_mcv = MCValue{ .register = dst_reg };
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
try self.genSetReg(dst_reg, val_ty, val_mcv, .{});
|
|
if (rmw_op == std.builtin.AtomicRmwOp.Sub and mir_tag[1] == .xadd) {
|
|
try self.genUnOpMir(.{ ._, .neg }, val_ty, dst_mcv);
|
|
}
|
|
try self.asmMemoryRegister(mir_tag, ptr_mem, registerAlias(dst_reg, val_abi_size));
|
|
|
|
return if (unused) .unreach else dst_mcv;
|
|
},
|
|
.loop => _ = if (val_abi_size <= 8) {
|
|
const sse_reg: Register = if (use_sse)
|
|
try self.register_manager.allocReg(null, abi.RegisterClass.sse)
|
|
else
|
|
undefined;
|
|
const sse_lock =
|
|
if (use_sse) self.register_manager.lockRegAssumeUnused(sse_reg) else undefined;
|
|
defer if (use_sse) self.register_manager.unlockReg(sse_lock);
|
|
|
|
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const tmp_mcv = MCValue{ .register = tmp_reg };
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(.rax, val_abi_size), ptr_mem);
|
|
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
if (!use_sse and rmw_op orelse .Xchg != .Xchg) {
|
|
try self.genSetReg(tmp_reg, val_ty, .{ .register = .rax }, .{});
|
|
}
|
|
if (rmw_op) |op| if (use_sse) {
|
|
const mir_tag = @as(?Mir.Inst.FixedTag, switch (op) {
|
|
.Add => switch (val_ty.floatBits(self.target.*)) {
|
|
32 => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add },
|
|
64 => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add },
|
|
else => null,
|
|
},
|
|
.Sub => switch (val_ty.floatBits(self.target.*)) {
|
|
32 => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub },
|
|
64 => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub },
|
|
else => null,
|
|
},
|
|
.Min => switch (val_ty.floatBits(self.target.*)) {
|
|
32 => if (self.hasFeature(.avx)) .{ .v_ss, .min } else .{ ._ss, .min },
|
|
64 => if (self.hasFeature(.avx)) .{ .v_sd, .min } else .{ ._sd, .min },
|
|
else => null,
|
|
},
|
|
.Max => switch (val_ty.floatBits(self.target.*)) {
|
|
32 => if (self.hasFeature(.avx)) .{ .v_ss, .max } else .{ ._ss, .max },
|
|
64 => if (self.hasFeature(.avx)) .{ .v_sd, .max } else .{ ._sd, .max },
|
|
else => null,
|
|
},
|
|
else => unreachable,
|
|
}) orelse return self.fail("TODO implement atomicOp of {s} for {}", .{
|
|
@tagName(op), val_ty.fmt(pt),
|
|
});
|
|
try self.genSetReg(sse_reg, val_ty, .{ .register = .rax }, .{});
|
|
switch (mir_tag[0]) {
|
|
.v_ss, .v_sd => if (val_mcv.isBase()) try self.asmRegisterRegisterMemory(
|
|
mir_tag,
|
|
sse_reg.to128(),
|
|
sse_reg.to128(),
|
|
try val_mcv.mem(self, .{ .size = self.memSize(val_ty) }),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
sse_reg.to128(),
|
|
sse_reg.to128(),
|
|
(if (val_mcv.isRegister())
|
|
val_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(val_ty, val_mcv)).to128(),
|
|
),
|
|
._ss, ._sd => if (val_mcv.isBase()) try self.asmRegisterMemory(
|
|
mir_tag,
|
|
sse_reg.to128(),
|
|
try val_mcv.mem(self, .{ .size = self.memSize(val_ty) }),
|
|
) else try self.asmRegisterRegister(
|
|
mir_tag,
|
|
sse_reg.to128(),
|
|
(if (val_mcv.isRegister())
|
|
val_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(val_ty, val_mcv)).to128(),
|
|
),
|
|
else => unreachable,
|
|
}
|
|
try self.genSetReg(tmp_reg, val_ty, .{ .register = sse_reg }, .{});
|
|
} else switch (op) {
|
|
.Xchg => try self.genSetReg(tmp_reg, val_ty, val_mcv, .{}),
|
|
.Add => try self.genBinOpMir(.{ ._, .add }, val_ty, tmp_mcv, val_mcv),
|
|
.Sub => try self.genBinOpMir(.{ ._, .sub }, val_ty, tmp_mcv, val_mcv),
|
|
.And => try self.genBinOpMir(.{ ._, .@"and" }, val_ty, tmp_mcv, val_mcv),
|
|
.Nand => {
|
|
try self.genBinOpMir(.{ ._, .@"and" }, val_ty, tmp_mcv, val_mcv);
|
|
try self.genUnOpMir(.{ ._, .not }, val_ty, tmp_mcv);
|
|
},
|
|
.Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv),
|
|
.Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv),
|
|
.Min, .Max => {
|
|
const cc: Condition = switch (if (val_ty.isAbiInt(zcu))
|
|
val_ty.intInfo(zcu).signedness
|
|
else
|
|
.unsigned) {
|
|
.unsigned => switch (op) {
|
|
.Min => .a,
|
|
.Max => .b,
|
|
else => unreachable,
|
|
},
|
|
.signed => switch (op) {
|
|
.Min => .g,
|
|
.Max => .l,
|
|
else => unreachable,
|
|
},
|
|
};
|
|
|
|
const cmov_abi_size = @max(val_abi_size, 2);
|
|
switch (val_mcv) {
|
|
.register => |val_reg| {
|
|
try self.genBinOpMir(.{ ._, .cmp }, val_ty, tmp_mcv, val_mcv);
|
|
try self.asmCmovccRegisterRegister(
|
|
cc,
|
|
registerAlias(tmp_reg, cmov_abi_size),
|
|
registerAlias(val_reg, cmov_abi_size),
|
|
);
|
|
},
|
|
.memory, .indirect, .load_frame => {
|
|
try self.genBinOpMir(.{ ._, .cmp }, val_ty, tmp_mcv, val_mcv);
|
|
try self.asmCmovccRegisterMemory(
|
|
cc,
|
|
registerAlias(tmp_reg, cmov_abi_size),
|
|
try val_mcv.mem(self, .{ .size = .fromSize(cmov_abi_size) }),
|
|
);
|
|
},
|
|
else => {
|
|
const mat_reg = try self.copyToTmpRegister(val_ty, val_mcv);
|
|
const mat_lock = self.register_manager.lockRegAssumeUnused(mat_reg);
|
|
defer self.register_manager.unlockReg(mat_lock);
|
|
|
|
try self.genBinOpMir(
|
|
.{ ._, .cmp },
|
|
val_ty,
|
|
tmp_mcv,
|
|
.{ .register = mat_reg },
|
|
);
|
|
try self.asmCmovccRegisterRegister(
|
|
cc,
|
|
registerAlias(tmp_reg, cmov_abi_size),
|
|
registerAlias(mat_reg, cmov_abi_size),
|
|
);
|
|
},
|
|
}
|
|
},
|
|
};
|
|
try self.asmMemoryRegister(
|
|
.{ .@"lock _", .cmpxchg },
|
|
ptr_mem,
|
|
registerAlias(tmp_reg, val_abi_size),
|
|
);
|
|
_ = try self.asmJccReloc(.ne, loop);
|
|
return if (unused) .unreach else .{ .register = .rax };
|
|
} else {
|
|
try self.asmRegisterMemory(.{ ._, .mov }, .rax, .{
|
|
.base = ptr_mem.base,
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = ptr_mem.mod.rm.index,
|
|
.scale = ptr_mem.mod.rm.scale,
|
|
.disp = ptr_mem.mod.rm.disp + 0,
|
|
} },
|
|
});
|
|
try self.asmRegisterMemory(.{ ._, .mov }, .rdx, .{
|
|
.base = ptr_mem.base,
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = ptr_mem.mod.rm.index,
|
|
.scale = ptr_mem.mod.rm.scale,
|
|
.disp = ptr_mem.mod.rm.disp + 8,
|
|
} },
|
|
});
|
|
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
|
|
const val_mem_mcv: MCValue = switch (val_mcv) {
|
|
.memory, .indirect, .load_frame => val_mcv,
|
|
else => .{ .indirect = .{
|
|
.reg = try self.copyToTmpRegister(.usize, val_mcv.address()),
|
|
} },
|
|
};
|
|
const val_lo_mem = try val_mem_mcv.mem(self, .{ .size = .qword });
|
|
const val_hi_mem = try val_mem_mcv.address().offset(8).deref().mem(self, .{ .size = .qword });
|
|
if (rmw_op != std.builtin.AtomicRmwOp.Xchg) {
|
|
try self.asmRegisterRegister(.{ ._, .mov }, .rbx, .rax);
|
|
try self.asmRegisterRegister(.{ ._, .mov }, .rcx, .rdx);
|
|
}
|
|
if (rmw_op) |op| switch (op) {
|
|
.Xchg => {
|
|
try self.asmRegisterMemory(.{ ._, .mov }, .rbx, val_lo_mem);
|
|
try self.asmRegisterMemory(.{ ._, .mov }, .rcx, val_hi_mem);
|
|
},
|
|
.Add => {
|
|
try self.asmRegisterMemory(.{ ._, .add }, .rbx, val_lo_mem);
|
|
try self.asmRegisterMemory(.{ ._, .adc }, .rcx, val_hi_mem);
|
|
},
|
|
.Sub => {
|
|
try self.asmRegisterMemory(.{ ._, .sub }, .rbx, val_lo_mem);
|
|
try self.asmRegisterMemory(.{ ._, .sbb }, .rcx, val_hi_mem);
|
|
},
|
|
.And => {
|
|
try self.asmRegisterMemory(.{ ._, .@"and" }, .rbx, val_lo_mem);
|
|
try self.asmRegisterMemory(.{ ._, .@"and" }, .rcx, val_hi_mem);
|
|
},
|
|
.Nand => {
|
|
try self.asmRegisterMemory(.{ ._, .@"and" }, .rbx, val_lo_mem);
|
|
try self.asmRegisterMemory(.{ ._, .@"and" }, .rcx, val_hi_mem);
|
|
try self.asmRegister(.{ ._, .not }, .rbx);
|
|
try self.asmRegister(.{ ._, .not }, .rcx);
|
|
},
|
|
.Or => {
|
|
try self.asmRegisterMemory(.{ ._, .@"or" }, .rbx, val_lo_mem);
|
|
try self.asmRegisterMemory(.{ ._, .@"or" }, .rcx, val_hi_mem);
|
|
},
|
|
.Xor => {
|
|
try self.asmRegisterMemory(.{ ._, .xor }, .rbx, val_lo_mem);
|
|
try self.asmRegisterMemory(.{ ._, .xor }, .rcx, val_hi_mem);
|
|
},
|
|
.Min, .Max => {
|
|
const cc: Condition = switch (if (val_ty.isAbiInt(zcu))
|
|
val_ty.intInfo(zcu).signedness
|
|
else
|
|
.unsigned) {
|
|
.unsigned => switch (op) {
|
|
.Min => .a,
|
|
.Max => .b,
|
|
else => unreachable,
|
|
},
|
|
.signed => switch (op) {
|
|
.Min => .g,
|
|
.Max => .l,
|
|
else => unreachable,
|
|
},
|
|
};
|
|
|
|
const tmp_reg = try self.copyToTmpRegister(.usize, .{ .register = .rcx });
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.asmRegisterMemory(.{ ._, .cmp }, .rbx, val_lo_mem);
|
|
try self.asmRegisterMemory(.{ ._, .sbb }, tmp_reg, val_hi_mem);
|
|
try self.asmCmovccRegisterMemory(cc, .rbx, val_lo_mem);
|
|
try self.asmCmovccRegisterMemory(cc, .rcx, val_hi_mem);
|
|
},
|
|
};
|
|
try self.asmMemory(.{ .@"lock _16b", .cmpxchg }, ptr_mem);
|
|
_ = try self.asmJccReloc(.ne, loop);
|
|
|
|
if (unused) return .unreach;
|
|
const dst_mcv = try self.allocTempRegOrMem(val_ty, false);
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_mcv.load_frame.off + 0,
|
|
} },
|
|
}, .rax);
|
|
try self.asmMemoryRegister(.{ ._, .mov }, .{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_mcv.load_frame.off + 8,
|
|
} },
|
|
}, .rdx);
|
|
return dst_mcv;
|
|
},
|
|
.libcall => return self.fail("TODO implement x86 atomic libcall", .{}),
|
|
}
|
|
}
|
|
|
|
fn airAtomicRmw(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
|
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
|
|
|
|
try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
|
|
const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
|
|
defer for (regs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const unused = self.liveness.isUnused(inst);
|
|
|
|
const ptr_ty = self.typeOf(pl_op.operand);
|
|
const ptr_mcv = try self.resolveInst(pl_op.operand);
|
|
|
|
const val_ty = self.typeOf(extra.operand);
|
|
const val_mcv = try self.resolveInst(extra.operand);
|
|
|
|
const result =
|
|
try self.atomicOp(ptr_mcv, val_mcv, ptr_ty, val_ty, unused, extra.op(), extra.ordering());
|
|
return self.finishAir(inst, result, .{ pl_op.operand, extra.operand, .none });
|
|
}
|
|
|
|
fn airAtomicLoad(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
|
|
|
|
const ptr_ty = self.typeOf(atomic_load.ptr);
|
|
const ptr_mcv = try self.resolveInst(atomic_load.ptr);
|
|
const ptr_lock = switch (ptr_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_mcv =
|
|
if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
|
|
ptr_mcv
|
|
else
|
|
try self.allocRegOrMem(inst, true);
|
|
|
|
try self.load(dst_mcv, ptr_ty, ptr_mcv);
|
|
return self.finishAir(inst, dst_mcv, .{ atomic_load.ptr, .none, .none });
|
|
}
|
|
|
|
fn airAtomicStore(self: *CodeGen, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
|
|
const ptr_ty = self.typeOf(bin_op.lhs);
|
|
const ptr_mcv = try self.resolveInst(bin_op.lhs);
|
|
|
|
const val_ty = self.typeOf(bin_op.rhs);
|
|
const val_mcv = try self.resolveInst(bin_op.rhs);
|
|
|
|
const result = try self.atomicOp(ptr_mcv, val_mcv, ptr_ty, val_ty, true, null, order);
|
|
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airMemset(self: *CodeGen, inst: Air.Inst.Index, safety: bool) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
|
|
result: {
|
|
if (!safety and (try self.resolveInst(bin_op.rhs)) == .undef) break :result;
|
|
|
|
try self.spillRegisters(&.{ .rax, .rdi, .rsi, .rcx });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdi, .rsi, .rcx });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst = try self.resolveInst(bin_op.lhs);
|
|
const dst_ty = self.typeOf(bin_op.lhs);
|
|
const dst_locks: [2]?RegisterLock = switch (dst) {
|
|
.register => |dst_reg| .{ self.register_manager.lockRegAssumeUnused(dst_reg), null },
|
|
.register_pair => |dst_regs| .{
|
|
self.register_manager.lockRegAssumeUnused(dst_regs[0]),
|
|
self.register_manager.lockRegAssumeUnused(dst_regs[1]),
|
|
},
|
|
else => @splat(null),
|
|
};
|
|
for (dst_locks) |dst_lock| if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const src_val = try self.resolveInst(bin_op.rhs);
|
|
const elem_ty = self.typeOf(bin_op.rhs);
|
|
const src_val_lock: ?RegisterLock = switch (src_val) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const elem_abi_size: u31 = @intCast(elem_ty.abiSize(zcu));
|
|
|
|
if (elem_abi_size == 1) {
|
|
const dst_ptr: MCValue = switch (dst_ty.ptrSize(zcu)) {
|
|
.slice => switch (dst) {
|
|
.register_pair => |dst_regs| .{ .register = dst_regs[0] },
|
|
else => dst,
|
|
},
|
|
.one => dst,
|
|
.c, .many => unreachable,
|
|
};
|
|
const len: MCValue = switch (dst_ty.ptrSize(zcu)) {
|
|
.slice => switch (dst) {
|
|
.register_pair => |dst_regs| .{ .register = dst_regs[1] },
|
|
else => dst.address().offset(8).deref(),
|
|
},
|
|
.one => .{ .immediate = dst_ty.childType(zcu).arrayLen(zcu) },
|
|
.c, .many => unreachable,
|
|
};
|
|
const len_lock: ?RegisterLock = switch (len) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (len_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genInlineMemset(dst_ptr, src_val, len, .{ .safety = safety });
|
|
break :result;
|
|
}
|
|
|
|
// Store the first element, and then rely on memcpy copying forwards.
|
|
// Length zero requires a runtime check - so we handle arrays specially
|
|
// here to elide it.
|
|
switch (dst_ty.ptrSize(zcu)) {
|
|
.slice => {
|
|
const slice_ptr_ty = dst_ty.slicePtrFieldType(zcu);
|
|
|
|
const dst_ptr: MCValue = switch (dst) {
|
|
.register_pair => |dst_regs| .{ .register = dst_regs[0] },
|
|
else => dst,
|
|
};
|
|
const len: MCValue = switch (dst) {
|
|
.register_pair => |dst_regs| .{ .register = dst_regs[1] },
|
|
else => dst.address().offset(8).deref(),
|
|
};
|
|
|
|
// Used to store the number of elements for comparison.
|
|
// After comparison, updated to store number of bytes needed to copy.
|
|
const len_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const len_mcv: MCValue = .{ .register = len_reg };
|
|
const len_lock = self.register_manager.lockRegAssumeUnused(len_reg);
|
|
defer self.register_manager.unlockReg(len_lock);
|
|
|
|
try self.genSetReg(len_reg, .usize, len, .{});
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, len_reg, len_reg);
|
|
|
|
const skip_reloc = try self.asmJccReloc(.z, undefined);
|
|
try self.store(slice_ptr_ty, dst_ptr, src_val, .{ .safety = safety });
|
|
|
|
const second_elem_ptr_reg =
|
|
try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const second_elem_ptr_mcv: MCValue = .{ .register = second_elem_ptr_reg };
|
|
const second_elem_ptr_lock =
|
|
self.register_manager.lockRegAssumeUnused(second_elem_ptr_reg);
|
|
defer self.register_manager.unlockReg(second_elem_ptr_lock);
|
|
|
|
try self.genSetReg(second_elem_ptr_reg, .usize, .{ .register_offset = .{
|
|
.reg = try self.copyToTmpRegister(.usize, dst_ptr),
|
|
.off = elem_abi_size,
|
|
} }, .{});
|
|
|
|
try self.genBinOpMir(.{ ._, .sub }, .usize, len_mcv, .{ .immediate = 1 });
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ .i_, .mul },
|
|
len_reg,
|
|
len_reg,
|
|
.s(elem_abi_size),
|
|
);
|
|
try self.genInlineMemcpy(second_elem_ptr_mcv, dst_ptr, len_mcv, .{ .no_alias = false });
|
|
|
|
self.performReloc(skip_reloc);
|
|
},
|
|
.one => {
|
|
const elem_ptr_ty = try pt.singleMutPtrType(elem_ty);
|
|
|
|
const len = dst_ty.childType(zcu).arrayLen(zcu);
|
|
|
|
assert(len != 0); // prevented by Sema
|
|
try self.store(elem_ptr_ty, dst, src_val, .{ .safety = safety });
|
|
|
|
const second_elem_ptr_reg =
|
|
try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const second_elem_ptr_mcv: MCValue = .{ .register = second_elem_ptr_reg };
|
|
const second_elem_ptr_lock =
|
|
self.register_manager.lockRegAssumeUnused(second_elem_ptr_reg);
|
|
defer self.register_manager.unlockReg(second_elem_ptr_lock);
|
|
|
|
try self.genSetReg(second_elem_ptr_reg, .usize, .{ .register_offset = .{
|
|
.reg = try self.copyToTmpRegister(.usize, dst),
|
|
.off = elem_abi_size,
|
|
} }, .{});
|
|
|
|
const bytes_to_copy: MCValue = .{ .immediate = elem_abi_size * (len - 1) };
|
|
try self.genInlineMemcpy(second_elem_ptr_mcv, dst, bytes_to_copy, .{ .no_alias = false });
|
|
},
|
|
.c, .many => unreachable,
|
|
}
|
|
}
|
|
return self.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airMemcpy(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
|
|
|
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst = try self.resolveInst(bin_op.lhs);
|
|
const dst_ty = self.typeOf(bin_op.lhs);
|
|
const dst_locks: [2]?RegisterLock = switch (dst) {
|
|
.register => |dst_reg| .{ self.register_manager.lockRegAssumeUnused(dst_reg), null },
|
|
.register_pair => |dst_regs| .{
|
|
self.register_manager.lockRegAssumeUnused(dst_regs[0]),
|
|
self.register_manager.lockReg(dst_regs[1]),
|
|
},
|
|
else => @splat(null),
|
|
};
|
|
for (dst_locks) |dst_lock| if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const src = try self.resolveInst(bin_op.rhs);
|
|
const src_locks: [2]?RegisterLock = switch (src) {
|
|
.register => |src_reg| .{ self.register_manager.lockReg(src_reg), null },
|
|
.register_pair => |src_regs| .{
|
|
self.register_manager.lockRegAssumeUnused(src_regs[0]),
|
|
self.register_manager.lockRegAssumeUnused(src_regs[1]),
|
|
},
|
|
else => @splat(null),
|
|
};
|
|
for (src_locks) |src_lock| if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const len: MCValue = switch (dst_ty.ptrSize(zcu)) {
|
|
.slice => len: {
|
|
const len_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const len_lock = self.register_manager.lockRegAssumeUnused(len_reg);
|
|
defer self.register_manager.unlockReg(len_lock);
|
|
|
|
switch (dst) {
|
|
.register_pair => |dst_regs| try self.asmRegisterRegisterImmediate(
|
|
.{ .i_, .mul },
|
|
len_reg,
|
|
dst_regs[1],
|
|
.s(@intCast(dst_ty.childType(zcu).abiSize(zcu))),
|
|
),
|
|
else => try self.asmRegisterMemoryImmediate(
|
|
.{ .i_, .mul },
|
|
len_reg,
|
|
try dst.address().offset(8).deref().mem(self, .{ .size = .qword }),
|
|
.s(@intCast(dst_ty.childType(zcu).abiSize(zcu))),
|
|
),
|
|
}
|
|
break :len .{ .register = len_reg };
|
|
},
|
|
.one => len: {
|
|
const array_ty = dst_ty.childType(zcu);
|
|
break :len .{ .immediate = array_ty.arrayLen(zcu) * array_ty.childType(zcu).abiSize(zcu) };
|
|
},
|
|
.c, .many => unreachable,
|
|
};
|
|
const len_lock: ?RegisterLock = switch (len) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
else => null,
|
|
};
|
|
defer if (len_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const dst_ptr: MCValue = switch (dst) {
|
|
.register_pair => |dst_regs| .{ .register = dst_regs[0] },
|
|
else => dst,
|
|
};
|
|
const src_ptr: MCValue = switch (src) {
|
|
.register_pair => |src_regs| .{ .register = src_regs[0] },
|
|
else => src,
|
|
};
|
|
|
|
try self.genInlineMemcpy(dst_ptr, src_ptr, len, .{ .no_alias = true });
|
|
|
|
return self.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none });
|
|
}
|
|
|
|
fn airTagName(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
const inst_ty = self.typeOfIndex(inst);
|
|
const enum_ty = self.typeOf(un_op);
|
|
|
|
// We need a properly aligned and sized call frame to be able to call this function.
|
|
{
|
|
const needed_call_frame: FrameAlloc = .init(.{
|
|
.size = inst_ty.abiSize(zcu),
|
|
.alignment = inst_ty.abiAlignment(zcu),
|
|
});
|
|
const frame_allocs_slice = self.frame_allocs.slice();
|
|
const stack_frame_size =
|
|
&frame_allocs_slice.items(.abi_size)[@intFromEnum(FrameIndex.call_frame)];
|
|
stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size);
|
|
const stack_frame_align =
|
|
&frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)];
|
|
stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align);
|
|
}
|
|
|
|
const err_ret_trace_reg = if (zcu.comp.config.any_error_tracing) err_ret_trace_reg: {
|
|
const param_gpr = abi.getCAbiIntParamRegs(.auto);
|
|
break :err_ret_trace_reg param_gpr[param_gpr.len - 1];
|
|
} else .none;
|
|
|
|
try self.spillEflagsIfOccupied();
|
|
try self.spillCallerPreservedRegs(.auto, err_ret_trace_reg);
|
|
|
|
const param_regs = abi.getCAbiIntParamRegs(.auto);
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.genSetReg(param_regs[0], .usize, dst_mcv.address(), .{});
|
|
|
|
const operand = try self.resolveInst(un_op);
|
|
try self.genSetReg(param_regs[1], enum_ty, operand, .{});
|
|
|
|
const enum_lazy_sym: link.File.LazySymbol = .{ .kind = .code, .ty = enum_ty.toIntern() };
|
|
try self.genLazySymbolRef(.call, abi.getCAbiLinkerScratchReg(self.fn_type.fnCallingConvention(zcu)), enum_lazy_sym);
|
|
|
|
return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airErrorName(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
|
|
const err_ty = self.typeOf(un_op);
|
|
const err_mcv = try self.resolveInst(un_op);
|
|
const err_reg = try self.copyToTmpRegister(err_ty, err_mcv);
|
|
const err_lock = self.register_manager.lockRegAssumeUnused(err_reg);
|
|
defer self.register_manager.unlockReg(err_lock);
|
|
|
|
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
|
defer self.register_manager.unlockReg(addr_lock);
|
|
const anyerror_lazy_sym: link.File.LazySymbol = .{ .kind = .const_data, .ty = .anyerror_type };
|
|
try self.genLazySymbolRef(.lea, addr_reg, anyerror_lazy_sym);
|
|
|
|
const start_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const start_lock = self.register_manager.lockRegAssumeUnused(start_reg);
|
|
defer self.register_manager.unlockReg(start_lock);
|
|
|
|
const end_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const end_lock = self.register_manager.lockRegAssumeUnused(end_reg);
|
|
defer self.register_manager.unlockReg(end_lock);
|
|
|
|
try self.truncateRegister(err_ty, err_reg.to32());
|
|
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
start_reg.to32(),
|
|
.{
|
|
.base = .{ .reg = addr_reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .dword,
|
|
.index = err_reg.to64(),
|
|
.scale = .@"4",
|
|
.disp = (1 - 1) * 4,
|
|
} },
|
|
},
|
|
);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .mov },
|
|
end_reg.to32(),
|
|
.{
|
|
.base = .{ .reg = addr_reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .dword,
|
|
.index = err_reg.to64(),
|
|
.scale = .@"4",
|
|
.disp = (2 - 1) * 4,
|
|
} },
|
|
},
|
|
);
|
|
try self.asmRegisterRegister(.{ ._, .sub }, end_reg.to32(), start_reg.to32());
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .lea },
|
|
start_reg.to64(),
|
|
.{
|
|
.base = .{ .reg = addr_reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .dword,
|
|
.index = start_reg.to64(),
|
|
} },
|
|
},
|
|
);
|
|
try self.asmRegisterMemory(
|
|
.{ ._, .lea },
|
|
end_reg.to32(),
|
|
.{
|
|
.base = .{ .reg = end_reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .byte,
|
|
.disp = -1,
|
|
} },
|
|
},
|
|
);
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
try self.asmMemoryRegister(
|
|
.{ ._, .mov },
|
|
.{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_mcv.load_frame.off,
|
|
} },
|
|
},
|
|
start_reg.to64(),
|
|
);
|
|
try self.asmMemoryRegister(
|
|
.{ ._, .mov },
|
|
.{
|
|
.base = .{ .frame = dst_mcv.load_frame.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = dst_mcv.load_frame.off + 8,
|
|
} },
|
|
},
|
|
end_reg.to64(),
|
|
);
|
|
|
|
return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn airSplat(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const vector_ty = self.typeOfIndex(inst);
|
|
const vector_len = vector_ty.vectorLen(zcu);
|
|
const dst_rc = self.regSetForType(vector_ty);
|
|
const scalar_ty = self.typeOf(ty_op.operand);
|
|
|
|
const result: MCValue = result: {
|
|
switch (scalar_ty.zigTypeTag(zcu)) {
|
|
else => {},
|
|
.bool => {
|
|
const regs =
|
|
try self.register_manager.allocRegs(2, .{ inst, null }, abi.RegisterClass.gp);
|
|
const reg_locks = self.register_manager.lockRegsAssumeUnused(2, regs);
|
|
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
try self.genSetReg(regs[1], vector_ty, .{ .immediate = 0 }, .{});
|
|
try self.genSetReg(
|
|
regs[1],
|
|
vector_ty,
|
|
.{ .immediate = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - vector_len) },
|
|
.{},
|
|
);
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const abi_size = @max(std.math.divCeil(u32, vector_len, 8) catch unreachable, 4);
|
|
try self.asmCmovccRegisterRegister(
|
|
switch (src_mcv) {
|
|
.eflags => |cc| cc,
|
|
.register => |src_reg| cc: {
|
|
try self.asmRegisterImmediate(.{ ._, .@"test" }, src_reg.to8(), .u(1));
|
|
break :cc .nz;
|
|
},
|
|
else => cc: {
|
|
try self.asmMemoryImmediate(
|
|
.{ ._, .@"test" },
|
|
try src_mcv.mem(self, .{ .size = .byte }),
|
|
.u(1),
|
|
);
|
|
break :cc .nz;
|
|
},
|
|
},
|
|
registerAlias(regs[0], abi_size),
|
|
registerAlias(regs[1], abi_size),
|
|
);
|
|
break :result .{ .register = regs[0] };
|
|
},
|
|
.int => if (self.hasFeature(.avx2)) avx2: {
|
|
const mir_tag = @as(?Mir.Inst.FixedTag, switch (scalar_ty.intInfo(zcu).bits) {
|
|
else => null,
|
|
1...8 => switch (vector_len) {
|
|
else => null,
|
|
1...32 => .{ .vp_b, .broadcast },
|
|
},
|
|
9...16 => switch (vector_len) {
|
|
else => null,
|
|
1...16 => .{ .vp_w, .broadcast },
|
|
},
|
|
17...32 => switch (vector_len) {
|
|
else => null,
|
|
1...8 => .{ .vp_d, .broadcast },
|
|
},
|
|
33...64 => switch (vector_len) {
|
|
else => null,
|
|
1...4 => .{ .vp_q, .broadcast },
|
|
},
|
|
65...128 => switch (vector_len) {
|
|
else => null,
|
|
1...2 => .{ .v_i128, .broadcast },
|
|
},
|
|
}) orelse break :avx2;
|
|
|
|
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.sse);
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
mir_tag,
|
|
registerAlias(dst_reg, @intCast(vector_ty.abiSize(zcu))),
|
|
try src_mcv.mem(self, .{ .size = self.memSize(scalar_ty) }),
|
|
) else {
|
|
if (mir_tag[0] == .v_i128) break :avx2;
|
|
try self.genSetReg(dst_reg, scalar_ty, src_mcv, .{});
|
|
try self.asmRegisterRegister(
|
|
mir_tag,
|
|
registerAlias(dst_reg, @intCast(vector_ty.abiSize(zcu))),
|
|
registerAlias(dst_reg, @intCast(scalar_ty.abiSize(zcu))),
|
|
);
|
|
}
|
|
break :result .{ .register = dst_reg };
|
|
} else {
|
|
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.sse);
|
|
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
|
defer self.register_manager.unlockReg(dst_lock);
|
|
|
|
try self.genSetReg(dst_reg, scalar_ty, .{ .air_ref = ty_op.operand }, .{});
|
|
if (vector_len == 1) break :result .{ .register = dst_reg };
|
|
|
|
const dst_alias = registerAlias(dst_reg, @intCast(vector_ty.abiSize(zcu)));
|
|
const scalar_bits = scalar_ty.intInfo(zcu).bits;
|
|
if (switch (scalar_bits) {
|
|
1...8 => true,
|
|
9...128 => false,
|
|
else => unreachable,
|
|
}) if (self.hasFeature(.avx)) try self.asmRegisterRegisterRegister(
|
|
.{ .vp_, .unpcklbw },
|
|
dst_alias,
|
|
dst_alias,
|
|
dst_alias,
|
|
) else try self.asmRegisterRegister(
|
|
.{ .p_, .unpcklbw },
|
|
dst_alias,
|
|
dst_alias,
|
|
);
|
|
if (switch (scalar_bits) {
|
|
1...8 => vector_len > 2,
|
|
9...16 => true,
|
|
17...128 => false,
|
|
else => unreachable,
|
|
}) try self.asmRegisterRegisterImmediate(
|
|
.{ if (self.hasFeature(.avx)) .vp_w else .p_w, .shufl },
|
|
dst_alias,
|
|
dst_alias,
|
|
.u(0b00_00_00_00),
|
|
);
|
|
if (switch (scalar_bits) {
|
|
1...8 => vector_len > 4,
|
|
9...16 => vector_len > 2,
|
|
17...64 => true,
|
|
65...128 => false,
|
|
else => unreachable,
|
|
}) try self.asmRegisterRegisterImmediate(
|
|
.{ if (self.hasFeature(.avx)) .vp_d else .p_d, .shuf },
|
|
dst_alias,
|
|
dst_alias,
|
|
.u(if (scalar_bits <= 64) 0b00_00_00_00 else 0b01_00_01_00),
|
|
);
|
|
break :result .{ .register = dst_reg };
|
|
},
|
|
.float => switch (scalar_ty.floatBits(self.target.*)) {
|
|
32 => switch (vector_len) {
|
|
1 => {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv;
|
|
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
|
|
try self.genSetReg(dst_reg, scalar_ty, src_mcv, .{});
|
|
break :result .{ .register = dst_reg };
|
|
},
|
|
2...4 => {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
if (self.hasFeature(.avx)) {
|
|
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ .v_ss, .broadcast },
|
|
dst_reg.to128(),
|
|
try src_mcv.mem(self, .{ .size = .dword }),
|
|
) else {
|
|
const src_reg = if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(scalar_ty, src_mcv);
|
|
try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .v_ps, .shuf },
|
|
dst_reg.to128(),
|
|
src_reg.to128(),
|
|
src_reg.to128(),
|
|
.u(0),
|
|
);
|
|
}
|
|
break :result .{ .register = dst_reg };
|
|
} else {
|
|
const dst_mcv = if (src_mcv.isRegister() and
|
|
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
|
|
src_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, scalar_ty, src_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ ._ps, .shuf },
|
|
dst_reg.to128(),
|
|
dst_reg.to128(),
|
|
.u(0),
|
|
);
|
|
break :result dst_mcv;
|
|
}
|
|
},
|
|
5...8 => if (self.hasFeature(.avx)) {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ .v_ss, .broadcast },
|
|
dst_reg.to256(),
|
|
try src_mcv.mem(self, .{ .size = .dword }),
|
|
) else {
|
|
const src_reg = if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(scalar_ty, src_mcv);
|
|
if (self.hasFeature(.avx2)) try self.asmRegisterRegister(
|
|
.{ .v_ss, .broadcast },
|
|
dst_reg.to256(),
|
|
src_reg.to128(),
|
|
) else {
|
|
try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .v_ps, .shuf },
|
|
dst_reg.to128(),
|
|
src_reg.to128(),
|
|
src_reg.to128(),
|
|
.u(0),
|
|
);
|
|
try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .v_f128, .insert },
|
|
dst_reg.to256(),
|
|
dst_reg.to256(),
|
|
dst_reg.to128(),
|
|
.u(1),
|
|
);
|
|
}
|
|
}
|
|
break :result .{ .register = dst_reg };
|
|
},
|
|
else => {},
|
|
},
|
|
64 => switch (vector_len) {
|
|
1 => {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv;
|
|
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
|
|
try self.genSetReg(dst_reg, scalar_ty, src_mcv, .{});
|
|
break :result .{ .register = dst_reg };
|
|
},
|
|
2 => {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
|
|
if (self.hasFeature(.sse3)) {
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
if (self.hasFeature(.avx)) .{ .v_, .movddup } else .{ ._, .movddup },
|
|
dst_reg.to128(),
|
|
try src_mcv.mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
if (self.hasFeature(.avx)) .{ .v_, .movddup } else .{ ._, .movddup },
|
|
dst_reg.to128(),
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(scalar_ty, src_mcv)).to128(),
|
|
);
|
|
break :result .{ .register = dst_reg };
|
|
} else try self.asmRegisterRegister(
|
|
.{ ._ps, .movlh },
|
|
dst_reg.to128(),
|
|
(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(scalar_ty, src_mcv)).to128(),
|
|
);
|
|
},
|
|
3...4 => if (self.hasFeature(.avx)) {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ .v_sd, .broadcast },
|
|
dst_reg.to256(),
|
|
try src_mcv.mem(self, .{ .size = .qword }),
|
|
) else {
|
|
const src_reg = if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(scalar_ty, src_mcv);
|
|
if (self.hasFeature(.avx2)) try self.asmRegisterRegister(
|
|
.{ .v_sd, .broadcast },
|
|
dst_reg.to256(),
|
|
src_reg.to128(),
|
|
) else {
|
|
try self.asmRegisterRegister(
|
|
.{ .v_, .movddup },
|
|
dst_reg.to128(),
|
|
src_reg.to128(),
|
|
);
|
|
try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .v_f128, .insert },
|
|
dst_reg.to256(),
|
|
dst_reg.to256(),
|
|
dst_reg.to128(),
|
|
.u(1),
|
|
);
|
|
}
|
|
}
|
|
break :result .{ .register = dst_reg };
|
|
},
|
|
else => {},
|
|
},
|
|
128 => switch (vector_len) {
|
|
1 => {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv;
|
|
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
|
|
try self.genSetReg(dst_reg, scalar_ty, src_mcv, .{});
|
|
break :result .{ .register = dst_reg };
|
|
},
|
|
2 => if (self.hasFeature(.avx)) {
|
|
const src_mcv = try self.resolveInst(ty_op.operand);
|
|
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
|
|
if (src_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ .v_f128, .broadcast },
|
|
dst_reg.to256(),
|
|
try src_mcv.mem(self, .{ .size = .xword }),
|
|
) else {
|
|
const src_reg = if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(scalar_ty, src_mcv);
|
|
try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .v_f128, .insert },
|
|
dst_reg.to256(),
|
|
src_reg.to256(),
|
|
src_reg.to128(),
|
|
.u(1),
|
|
);
|
|
}
|
|
break :result .{ .register = dst_reg };
|
|
},
|
|
else => {},
|
|
},
|
|
16, 80 => {},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
return self.fail("TODO implement airSplat for {}", .{vector_ty.fmt(pt)});
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
|
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
|
|
const ty = self.typeOfIndex(inst);
|
|
const vec_len = ty.vectorLen(zcu);
|
|
const elem_ty = ty.childType(zcu);
|
|
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
|
|
const abi_size: u32 = @intCast(ty.abiSize(zcu));
|
|
const pred_ty = self.typeOf(pl_op.operand);
|
|
|
|
const result = result: {
|
|
const has_blend = self.hasFeature(.sse4_1);
|
|
const has_avx = self.hasFeature(.avx);
|
|
const need_xmm0 = has_blend and !has_avx;
|
|
const pred_mcv = try self.resolveInst(pl_op.operand);
|
|
const mask_reg = mask: {
|
|
switch (pred_mcv) {
|
|
.register => |pred_reg| switch (pred_reg.class()) {
|
|
.general_purpose => {},
|
|
.sse => if (need_xmm0 and pred_reg.id() != comptime Register.xmm0.id()) {
|
|
try self.register_manager.getKnownReg(.xmm0, null);
|
|
try self.genSetReg(.xmm0, pred_ty, pred_mcv, .{});
|
|
break :mask .xmm0;
|
|
} else break :mask if (has_blend)
|
|
pred_reg
|
|
else
|
|
try self.copyToTmpRegister(pred_ty, pred_mcv),
|
|
else => unreachable,
|
|
},
|
|
else => {},
|
|
}
|
|
const mask_reg: Register = if (need_xmm0) mask_reg: {
|
|
try self.register_manager.getKnownReg(.xmm0, null);
|
|
break :mask_reg .xmm0;
|
|
} else try self.register_manager.allocReg(null, abi.RegisterClass.sse);
|
|
const mask_alias = registerAlias(mask_reg, abi_size);
|
|
const mask_lock = self.register_manager.lockRegAssumeUnused(mask_reg);
|
|
defer self.register_manager.unlockReg(mask_lock);
|
|
|
|
const pred_fits_in_elem = vec_len <= elem_abi_size;
|
|
if (self.hasFeature(.avx2) and abi_size <= 32) {
|
|
if (pred_mcv.isRegister()) broadcast: {
|
|
try self.asmRegisterRegister(
|
|
.{ .v_d, .mov },
|
|
mask_reg.to128(),
|
|
pred_mcv.getReg().?.to32(),
|
|
);
|
|
if (pred_fits_in_elem and vec_len > 1) try self.asmRegisterRegister(
|
|
.{ switch (elem_abi_size) {
|
|
1 => .vp_b,
|
|
2 => .vp_w,
|
|
3...4 => .vp_d,
|
|
5...8 => .vp_q,
|
|
9...16 => {
|
|
try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .v_f128, .insert },
|
|
mask_alias,
|
|
mask_alias,
|
|
mask_reg.to128(),
|
|
.u(1),
|
|
);
|
|
break :broadcast;
|
|
},
|
|
17...32 => break :broadcast,
|
|
else => unreachable,
|
|
}, .broadcast },
|
|
mask_alias,
|
|
mask_reg.to128(),
|
|
);
|
|
} else try self.asmRegisterMemory(
|
|
.{ switch (vec_len) {
|
|
1...8 => .vp_b,
|
|
9...16 => .vp_w,
|
|
17...32 => .vp_d,
|
|
else => unreachable,
|
|
}, .broadcast },
|
|
mask_alias,
|
|
if (pred_mcv.isBase()) try pred_mcv.mem(self, .{ .size = .byte }) else .{
|
|
.base = .{ .reg = (try self.copyToTmpRegister(
|
|
.usize,
|
|
pred_mcv.address(),
|
|
)).to64() },
|
|
.mod = .{ .rm = .{ .size = .byte } },
|
|
},
|
|
);
|
|
} else if (abi_size <= 16) broadcast: {
|
|
try self.asmRegisterRegister(
|
|
.{ if (has_avx) .v_d else ._d, .mov },
|
|
mask_alias,
|
|
(if (pred_mcv.isRegister())
|
|
pred_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(pred_ty, pred_mcv.address())).to32(),
|
|
);
|
|
if (!pred_fits_in_elem or vec_len == 1) break :broadcast;
|
|
if (elem_abi_size <= 1) {
|
|
if (has_avx) try self.asmRegisterRegisterRegister(
|
|
.{ .vp_, .unpcklbw },
|
|
mask_alias,
|
|
mask_alias,
|
|
mask_alias,
|
|
) else try self.asmRegisterRegister(
|
|
.{ .p_, .unpcklbw },
|
|
mask_alias,
|
|
mask_alias,
|
|
);
|
|
if (abi_size <= 2) break :broadcast;
|
|
}
|
|
if (elem_abi_size <= 2) {
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ if (has_avx) .vp_w else .p_w, .shufl },
|
|
mask_alias,
|
|
mask_alias,
|
|
.u(0b00_00_00_00),
|
|
);
|
|
if (abi_size <= 8) break :broadcast;
|
|
}
|
|
try self.asmRegisterRegisterImmediate(
|
|
.{ if (has_avx) .vp_d else .p_d, .shuf },
|
|
mask_alias,
|
|
mask_alias,
|
|
.u(switch (elem_abi_size) {
|
|
1...2, 5...8 => 0b01_00_01_00,
|
|
3...4 => 0b00_00_00_00,
|
|
else => unreachable,
|
|
}),
|
|
);
|
|
} else return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
|
|
const elem_bits: u16 = @intCast(elem_abi_size * 8);
|
|
const mask_elem_ty = try pt.intType(.unsigned, elem_bits);
|
|
const mask_ty = try pt.vectorType(.{ .len = vec_len, .child = mask_elem_ty.toIntern() });
|
|
if (!pred_fits_in_elem) if (self.hasFeature(.ssse3)) {
|
|
var mask_elems: [32]InternPool.Index = undefined;
|
|
for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{
|
|
.ty = mask_elem_ty.toIntern(),
|
|
.storage = .{ .u64 = bit / elem_bits },
|
|
} });
|
|
const mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
.ty = mask_ty.toIntern(),
|
|
.storage = .{ .elems = mask_elems[0..vec_len] },
|
|
} })));
|
|
const mask_mem: Memory = .{
|
|
.base = .{ .reg = try self.copyToTmpRegister(.usize, mask_mcv.address()) },
|
|
.mod = .{ .rm = .{ .size = self.memSize(ty) } },
|
|
};
|
|
if (has_avx) try self.asmRegisterRegisterMemory(
|
|
.{ .vp_b, .shuf },
|
|
mask_alias,
|
|
mask_alias,
|
|
mask_mem,
|
|
) else try self.asmRegisterMemory(
|
|
.{ .p_b, .shuf },
|
|
mask_alias,
|
|
mask_mem,
|
|
);
|
|
} else return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
|
|
{
|
|
var mask_elems: [32]InternPool.Index = undefined;
|
|
for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{
|
|
.ty = mask_elem_ty.toIntern(),
|
|
.storage = .{ .u64 = @as(u32, 1) << @intCast(bit & (elem_bits - 1)) },
|
|
} });
|
|
const mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
.ty = mask_ty.toIntern(),
|
|
.storage = .{ .elems = mask_elems[0..vec_len] },
|
|
} })));
|
|
const mask_mem: Memory = .{
|
|
.base = .{ .reg = try self.copyToTmpRegister(.usize, mask_mcv.address()) },
|
|
.mod = .{ .rm = .{ .size = self.memSize(ty) } },
|
|
};
|
|
if (has_avx) {
|
|
try self.asmRegisterRegisterMemory(
|
|
.{ .vp_, .@"and" },
|
|
mask_alias,
|
|
mask_alias,
|
|
mask_mem,
|
|
);
|
|
try self.asmRegisterRegisterMemory(
|
|
.{ .vp_d, .cmpeq },
|
|
mask_alias,
|
|
mask_alias,
|
|
mask_mem,
|
|
);
|
|
} else {
|
|
try self.asmRegisterMemory(
|
|
.{ .p_, .@"and" },
|
|
mask_alias,
|
|
mask_mem,
|
|
);
|
|
try self.asmRegisterMemory(
|
|
.{ .p_d, .cmpeq },
|
|
mask_alias,
|
|
mask_mem,
|
|
);
|
|
}
|
|
}
|
|
break :mask mask_reg;
|
|
};
|
|
const mask_alias = registerAlias(mask_reg, abi_size);
|
|
const mask_lock = self.register_manager.lockRegAssumeUnused(mask_reg);
|
|
defer self.register_manager.unlockReg(mask_lock);
|
|
|
|
const lhs_mcv = try self.resolveInst(extra.lhs);
|
|
const lhs_lock = switch (lhs_mcv) {
|
|
.register => |lhs_reg| self.register_manager.lockRegAssumeUnused(lhs_reg),
|
|
else => null,
|
|
};
|
|
defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const rhs_mcv = try self.resolveInst(extra.rhs);
|
|
const rhs_lock = switch (rhs_mcv) {
|
|
.register => |rhs_reg| self.register_manager.lockReg(rhs_reg),
|
|
else => null,
|
|
};
|
|
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const reuse_mcv = if (has_blend) rhs_mcv else lhs_mcv;
|
|
const dst_mcv: MCValue = if (reuse_mcv.isRegister() and self.reuseOperand(
|
|
inst,
|
|
if (has_blend) extra.rhs else extra.lhs,
|
|
@intFromBool(has_blend),
|
|
reuse_mcv,
|
|
)) reuse_mcv else if (has_avx)
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, ty, reuse_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, abi_size);
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
|
else => null,
|
|
.int => switch (abi_size) {
|
|
0 => unreachable,
|
|
1...16 => if (has_avx)
|
|
.{ .vp_b, .blendv }
|
|
else if (has_blend)
|
|
.{ .p_b, .blendv }
|
|
else
|
|
.{ .p_, undefined },
|
|
17...32 => if (self.hasFeature(.avx2))
|
|
.{ .vp_b, .blendv }
|
|
else
|
|
null,
|
|
else => null,
|
|
},
|
|
.float => switch (ty.childType(zcu).floatBits(self.target.*)) {
|
|
else => unreachable,
|
|
16, 80, 128 => null,
|
|
32 => switch (vec_len) {
|
|
0 => unreachable,
|
|
1...4 => if (has_avx) .{ .v_ps, .blendv } else .{ ._ps, .blendv },
|
|
5...8 => if (has_avx) .{ .v_ps, .blendv } else null,
|
|
else => null,
|
|
},
|
|
64 => switch (vec_len) {
|
|
0 => unreachable,
|
|
1...2 => if (has_avx) .{ .v_pd, .blendv } else .{ ._pd, .blendv },
|
|
3...4 => if (has_avx) .{ .v_pd, .blendv } else null,
|
|
else => null,
|
|
},
|
|
},
|
|
}) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
|
|
if (has_avx) {
|
|
const rhs_alias = if (rhs_mcv.isRegister())
|
|
registerAlias(rhs_mcv.getReg().?, abi_size)
|
|
else rhs: {
|
|
try self.genSetReg(dst_reg, ty, rhs_mcv, .{});
|
|
break :rhs dst_alias;
|
|
};
|
|
if (lhs_mcv.isBase()) try self.asmRegisterRegisterMemoryRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
rhs_alias,
|
|
try lhs_mcv.mem(self, .{ .size = self.memSize(ty) }),
|
|
mask_alias,
|
|
) else try self.asmRegisterRegisterRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
rhs_alias,
|
|
registerAlias(if (lhs_mcv.isRegister())
|
|
lhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, lhs_mcv), abi_size),
|
|
mask_alias,
|
|
);
|
|
} else if (has_blend) if (lhs_mcv.isBase()) try self.asmRegisterMemoryRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
try lhs_mcv.mem(self, .{ .size = self.memSize(ty) }),
|
|
mask_alias,
|
|
) else try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
registerAlias(if (lhs_mcv.isRegister())
|
|
lhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, lhs_mcv), abi_size),
|
|
mask_alias,
|
|
) else {
|
|
const mir_fixes = @as(?Mir.Inst.Fixes, switch (elem_ty.zigTypeTag(zcu)) {
|
|
else => null,
|
|
.int => .p_,
|
|
.float => switch (elem_ty.floatBits(self.target.*)) {
|
|
32 => ._ps,
|
|
64 => ._pd,
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
}) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
|
|
try self.asmRegisterRegister(.{ mir_fixes, .@"and" }, dst_alias, mask_alias);
|
|
if (rhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ mir_fixes, .andn },
|
|
mask_alias,
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(abi_size) }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ mir_fixes, .andn },
|
|
mask_alias,
|
|
if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(ty, rhs_mcv),
|
|
);
|
|
try self.asmRegisterRegister(.{ mir_fixes, .@"or" }, dst_alias, mask_alias);
|
|
}
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
|
|
}
|
|
|
|
fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
|
|
|
|
const dst_ty = self.typeOfIndex(inst);
|
|
const elem_ty = dst_ty.childType(zcu);
|
|
const elem_abi_size: u16 = @intCast(elem_ty.abiSize(zcu));
|
|
const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
|
|
const lhs_ty = self.typeOf(extra.a);
|
|
const lhs_abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
|
|
const rhs_ty = self.typeOf(extra.b);
|
|
const rhs_abi_size: u32 = @intCast(rhs_ty.abiSize(zcu));
|
|
const max_abi_size = @max(dst_abi_size, lhs_abi_size, rhs_abi_size);
|
|
|
|
const ExpectedContents = [32]?i32;
|
|
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
|
|
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
|
|
const allocator = stack.get();
|
|
|
|
const mask_elems = try allocator.alloc(?i32, extra.mask_len);
|
|
defer allocator.free(mask_elems);
|
|
for (mask_elems, 0..) |*mask_elem, elem_index| {
|
|
const mask_elem_val =
|
|
Value.fromInterned(extra.mask).elemValue(pt, elem_index) catch unreachable;
|
|
mask_elem.* = if (mask_elem_val.isUndef(zcu))
|
|
null
|
|
else
|
|
@intCast(mask_elem_val.toSignedInt(zcu));
|
|
}
|
|
|
|
const has_avx = self.hasFeature(.avx);
|
|
const result = @as(?MCValue, result: {
|
|
for (mask_elems) |mask_elem| {
|
|
if (mask_elem) |_| break;
|
|
} else break :result try self.allocRegOrMem(inst, true);
|
|
|
|
for (mask_elems, 0..) |mask_elem, elem_index| {
|
|
if (mask_elem orelse continue != elem_index) break;
|
|
} else {
|
|
const lhs_mcv = try self.resolveInst(extra.a);
|
|
if (self.reuseOperand(inst, extra.a, 0, lhs_mcv)) break :result lhs_mcv;
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(dst_ty, dst_mcv, lhs_mcv, .{});
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
for (mask_elems, 0..) |mask_elem, elem_index| {
|
|
if (~(mask_elem orelse continue) != elem_index) break;
|
|
} else {
|
|
const rhs_mcv = try self.resolveInst(extra.b);
|
|
if (self.reuseOperand(inst, extra.b, 1, rhs_mcv)) break :result rhs_mcv;
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(dst_ty, dst_mcv, rhs_mcv, .{});
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
for ([_]Mir.Inst.Tag{ .unpckl, .unpckh }) |variant| unpck: {
|
|
if (elem_abi_size > 8) break :unpck;
|
|
if (dst_abi_size > self.vectorSize(if (elem_abi_size >= 4) .float else .int)) break :unpck;
|
|
|
|
var sources: [2]?u1 = @splat(null);
|
|
for (mask_elems, 0..) |maybe_mask_elem, elem_index| {
|
|
const mask_elem = maybe_mask_elem orelse continue;
|
|
const mask_elem_index =
|
|
std.math.cast(u5, if (mask_elem < 0) ~mask_elem else mask_elem) orelse break :unpck;
|
|
const elem_byte = (elem_index >> 1) * elem_abi_size;
|
|
if (mask_elem_index * elem_abi_size != (elem_byte & 0b0111) | @as(u4, switch (variant) {
|
|
.unpckl => 0b0000,
|
|
.unpckh => 0b1000,
|
|
else => unreachable,
|
|
}) | (elem_byte << 1 & 0b10000)) break :unpck;
|
|
|
|
const source = @intFromBool(mask_elem < 0);
|
|
if (sources[elem_index & 0b00001]) |prev_source| {
|
|
if (source != prev_source) break :unpck;
|
|
} else sources[elem_index & 0b00001] = source;
|
|
}
|
|
if (sources[0] orelse break :unpck == sources[1] orelse break :unpck) break :unpck;
|
|
|
|
const operands = [2]Air.Inst.Ref{ extra.a, extra.b };
|
|
const operand_tys = [2]Type{ lhs_ty, rhs_ty };
|
|
const lhs_mcv = try self.resolveInst(operands[sources[0].?]);
|
|
const rhs_mcv = try self.resolveInst(operands[sources[1].?]);
|
|
|
|
const dst_mcv: MCValue = if (lhs_mcv.isRegister() and
|
|
self.reuseOperand(inst, operands[sources[0].?], sources[0].?, lhs_mcv))
|
|
lhs_mcv
|
|
else if (has_avx and lhs_mcv.isRegister())
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, operand_tys[sources[0].?], lhs_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, max_abi_size);
|
|
|
|
const mir_tag: Mir.Inst.FixedTag = if ((elem_abi_size >= 4 and elem_ty.isRuntimeFloat()) or
|
|
(dst_abi_size > 16 and !self.hasFeature(.avx2))) .{ switch (elem_abi_size) {
|
|
4 => if (has_avx) .v_ps else ._ps,
|
|
8 => if (has_avx) .v_pd else ._pd,
|
|
else => unreachable,
|
|
}, variant } else .{ if (has_avx) .vp_ else .p_, switch (variant) {
|
|
.unpckl => switch (elem_abi_size) {
|
|
1 => .unpcklbw,
|
|
2 => .unpcklwd,
|
|
4 => .unpckldq,
|
|
8 => .unpcklqdq,
|
|
else => unreachable,
|
|
},
|
|
.unpckh => switch (elem_abi_size) {
|
|
1 => .unpckhbw,
|
|
2 => .unpckhwd,
|
|
4 => .unpckhdq,
|
|
8 => .unpckhqdq,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
} };
|
|
if (has_avx) if (rhs_mcv.isBase()) try self.asmRegisterRegisterMemory(
|
|
mir_tag,
|
|
dst_alias,
|
|
registerAlias(lhs_mcv.getReg() orelse dst_reg, max_abi_size),
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(max_abi_size) }),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
registerAlias(lhs_mcv.getReg() orelse dst_reg, max_abi_size),
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(operand_tys[sources[1].?], rhs_mcv), max_abi_size),
|
|
) else if (rhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
mir_tag,
|
|
dst_alias,
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(max_abi_size) }),
|
|
) else try self.asmRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(operand_tys[sources[1].?], rhs_mcv), max_abi_size),
|
|
);
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
pshufd: {
|
|
if (elem_abi_size != 4) break :pshufd;
|
|
if (max_abi_size > self.vectorSize(.float)) break :pshufd;
|
|
|
|
var control: u8 = 0b00_00_00_00;
|
|
var sources: [1]?u1 = @splat(null);
|
|
for (mask_elems, 0..) |maybe_mask_elem, elem_index| {
|
|
const mask_elem = maybe_mask_elem orelse continue;
|
|
const mask_elem_index: u3 = @intCast(if (mask_elem < 0) ~mask_elem else mask_elem);
|
|
if (mask_elem_index & 0b100 != elem_index & 0b100) break :pshufd;
|
|
|
|
const source = @intFromBool(mask_elem < 0);
|
|
if (sources[0]) |prev_source| {
|
|
if (source != prev_source) break :pshufd;
|
|
} else sources[(elem_index & 0b010) >> 1] = source;
|
|
|
|
const select_bit: u3 = @intCast((elem_index & 0b011) << 1);
|
|
const select_mask = @as(u8, @intCast(mask_elem_index & 0b011)) << select_bit;
|
|
if (elem_index & 0b100 == 0)
|
|
control |= select_mask
|
|
else if (control & @as(u8, 0b11) << select_bit != select_mask) break :pshufd;
|
|
}
|
|
|
|
const operands = [2]Air.Inst.Ref{ extra.a, extra.b };
|
|
const operand_tys = [2]Type{ lhs_ty, rhs_ty };
|
|
const src_mcv = try self.resolveInst(operands[sources[0] orelse break :pshufd]);
|
|
|
|
const dst_reg = if (src_mcv.isRegister() and
|
|
self.reuseOperand(inst, operands[sources[0].?], sources[0].?, src_mcv))
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.register_manager.allocReg(inst, abi.RegisterClass.sse);
|
|
const dst_alias = registerAlias(dst_reg, max_abi_size);
|
|
|
|
if (src_mcv.isBase()) try self.asmRegisterMemoryImmediate(
|
|
.{ if (has_avx) .vp_d else .p_d, .shuf },
|
|
dst_alias,
|
|
try src_mcv.mem(self, .{ .size = .fromSize(max_abi_size) }),
|
|
.u(control),
|
|
) else try self.asmRegisterRegisterImmediate(
|
|
.{ if (has_avx) .vp_d else .p_d, .shuf },
|
|
dst_alias,
|
|
registerAlias(if (src_mcv.isRegister())
|
|
src_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(operand_tys[sources[0].?], src_mcv), max_abi_size),
|
|
.u(control),
|
|
);
|
|
break :result .{ .register = dst_reg };
|
|
}
|
|
|
|
shufps: {
|
|
if (elem_abi_size != 4) break :shufps;
|
|
if (max_abi_size > self.vectorSize(.float)) break :shufps;
|
|
|
|
var control: u8 = 0b00_00_00_00;
|
|
var sources: [2]?u1 = @splat(null);
|
|
for (mask_elems, 0..) |maybe_mask_elem, elem_index| {
|
|
const mask_elem = maybe_mask_elem orelse continue;
|
|
const mask_elem_index: u3 = @intCast(if (mask_elem < 0) ~mask_elem else mask_elem);
|
|
if (mask_elem_index & 0b100 != elem_index & 0b100) break :shufps;
|
|
|
|
const source = @intFromBool(mask_elem < 0);
|
|
if (sources[(elem_index & 0b010) >> 1]) |prev_source| {
|
|
if (source != prev_source) break :shufps;
|
|
} else sources[(elem_index & 0b010) >> 1] = source;
|
|
|
|
const select_bit: u3 = @intCast((elem_index & 0b011) << 1);
|
|
const select_mask = @as(u8, @intCast(mask_elem_index & 0b011)) << select_bit;
|
|
if (elem_index & 0b100 == 0)
|
|
control |= select_mask
|
|
else if (control & @as(u8, 0b11) << select_bit != select_mask) break :shufps;
|
|
}
|
|
if (sources[0] orelse break :shufps == sources[1] orelse break :shufps) break :shufps;
|
|
|
|
const operands = [2]Air.Inst.Ref{ extra.a, extra.b };
|
|
const operand_tys = [2]Type{ lhs_ty, rhs_ty };
|
|
const lhs_mcv = try self.resolveInst(operands[sources[0].?]);
|
|
const rhs_mcv = try self.resolveInst(operands[sources[1].?]);
|
|
|
|
const dst_mcv: MCValue = if (lhs_mcv.isRegister() and
|
|
self.reuseOperand(inst, operands[sources[0].?], sources[0].?, lhs_mcv))
|
|
lhs_mcv
|
|
else if (has_avx and lhs_mcv.isRegister())
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, operand_tys[sources[0].?], lhs_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, max_abi_size);
|
|
|
|
if (has_avx) if (rhs_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
.{ .v_ps, .shuf },
|
|
dst_alias,
|
|
registerAlias(lhs_mcv.getReg() orelse dst_reg, max_abi_size),
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(max_abi_size) }),
|
|
.u(control),
|
|
) else try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .v_ps, .shuf },
|
|
dst_alias,
|
|
registerAlias(lhs_mcv.getReg() orelse dst_reg, max_abi_size),
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(operand_tys[sources[1].?], rhs_mcv), max_abi_size),
|
|
.u(control),
|
|
) else if (rhs_mcv.isBase()) try self.asmRegisterMemoryImmediate(
|
|
.{ ._ps, .shuf },
|
|
dst_alias,
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(max_abi_size) }),
|
|
.u(control),
|
|
) else try self.asmRegisterRegisterImmediate(
|
|
.{ ._ps, .shuf },
|
|
dst_alias,
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(operand_tys[sources[1].?], rhs_mcv), max_abi_size),
|
|
.u(control),
|
|
);
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
shufpd: {
|
|
if (elem_abi_size != 8) break :shufpd;
|
|
if (max_abi_size > self.vectorSize(.float)) break :shufpd;
|
|
|
|
var control: u4 = 0b0_0_0_0;
|
|
var sources: [2]?u1 = @splat(null);
|
|
for (mask_elems, 0..) |maybe_mask_elem, elem_index| {
|
|
const mask_elem = maybe_mask_elem orelse continue;
|
|
const mask_elem_index: u2 = @intCast(if (mask_elem < 0) ~mask_elem else mask_elem);
|
|
if (mask_elem_index & 0b10 != elem_index & 0b10) break :shufpd;
|
|
|
|
const source = @intFromBool(mask_elem < 0);
|
|
if (sources[elem_index & 0b01]) |prev_source| {
|
|
if (source != prev_source) break :shufpd;
|
|
} else sources[elem_index & 0b01] = source;
|
|
|
|
control |= @as(u4, @intCast(mask_elem_index & 0b01)) << @intCast(elem_index);
|
|
}
|
|
if (sources[0] orelse break :shufpd == sources[1] orelse break :shufpd) break :shufpd;
|
|
|
|
const operands: [2]Air.Inst.Ref = .{ extra.a, extra.b };
|
|
const operand_tys: [2]Type = .{ lhs_ty, rhs_ty };
|
|
const lhs_mcv = try self.resolveInst(operands[sources[0].?]);
|
|
const rhs_mcv = try self.resolveInst(operands[sources[1].?]);
|
|
|
|
const dst_mcv: MCValue = if (lhs_mcv.isRegister() and
|
|
self.reuseOperand(inst, operands[sources[0].?], sources[0].?, lhs_mcv))
|
|
lhs_mcv
|
|
else if (has_avx and lhs_mcv.isRegister())
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, operand_tys[sources[0].?], lhs_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, max_abi_size);
|
|
|
|
if (has_avx) if (rhs_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
.{ .v_pd, .shuf },
|
|
dst_alias,
|
|
registerAlias(lhs_mcv.getReg() orelse dst_reg, max_abi_size),
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(max_abi_size) }),
|
|
.u(control),
|
|
) else try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .v_pd, .shuf },
|
|
dst_alias,
|
|
registerAlias(lhs_mcv.getReg() orelse dst_reg, max_abi_size),
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(operand_tys[sources[1].?], rhs_mcv), max_abi_size),
|
|
.u(control),
|
|
) else if (rhs_mcv.isBase()) try self.asmRegisterMemoryImmediate(
|
|
.{ ._pd, .shuf },
|
|
dst_alias,
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(max_abi_size) }),
|
|
.u(control),
|
|
) else try self.asmRegisterRegisterImmediate(
|
|
.{ ._pd, .shuf },
|
|
dst_alias,
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(operand_tys[sources[1].?], rhs_mcv), max_abi_size),
|
|
.u(control),
|
|
);
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
blend: {
|
|
if (elem_abi_size < 2) break :blend;
|
|
if (dst_abi_size > self.vectorSize(.float)) break :blend;
|
|
if (!self.hasFeature(.sse4_1)) break :blend;
|
|
|
|
var control: u8 = 0b0_0_0_0_0_0_0_0;
|
|
for (mask_elems, 0..) |maybe_mask_elem, elem_index| {
|
|
const mask_elem = maybe_mask_elem orelse continue;
|
|
const mask_elem_index =
|
|
std.math.cast(u4, if (mask_elem < 0) ~mask_elem else mask_elem) orelse break :blend;
|
|
if (mask_elem_index != elem_index) break :blend;
|
|
|
|
const select_mask = @as(u8, @intFromBool(mask_elem < 0)) << @truncate(elem_index);
|
|
if (elem_index & 0b1000 == 0)
|
|
control |= select_mask
|
|
else if (control & @as(u8, 0b1) << @truncate(elem_index) != select_mask) break :blend;
|
|
}
|
|
|
|
if (!elem_ty.isRuntimeFloat() and self.hasFeature(.avx2)) vpblendd: {
|
|
const expanded_control = switch (elem_abi_size) {
|
|
4 => control,
|
|
8 => @as(u8, if (control & 0b0001 != 0) 0b00_00_00_11 else 0b00_00_00_00) |
|
|
@as(u8, if (control & 0b0010 != 0) 0b00_00_11_00 else 0b00_00_00_00) |
|
|
@as(u8, if (control & 0b0100 != 0) 0b00_11_00_00 else 0b00_00_00_00) |
|
|
@as(u8, if (control & 0b1000 != 0) 0b11_00_00_00 else 0b00_00_00_00),
|
|
else => break :vpblendd,
|
|
};
|
|
|
|
const lhs_mcv = try self.resolveInst(extra.a);
|
|
const lhs_reg = if (lhs_mcv.isRegister())
|
|
lhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(dst_ty, lhs_mcv);
|
|
const lhs_lock = self.register_manager.lockReg(lhs_reg);
|
|
defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const rhs_mcv = try self.resolveInst(extra.b);
|
|
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.sse);
|
|
if (rhs_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
.{ .vp_d, .blend },
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(lhs_reg, dst_abi_size),
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(dst_abi_size) }),
|
|
.u(expanded_control),
|
|
) else try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .vp_d, .blend },
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(lhs_reg, dst_abi_size),
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(dst_ty, rhs_mcv), dst_abi_size),
|
|
.u(expanded_control),
|
|
);
|
|
break :result .{ .register = dst_reg };
|
|
}
|
|
|
|
if (!elem_ty.isRuntimeFloat() or elem_abi_size == 2) pblendw: {
|
|
const expanded_control = switch (elem_abi_size) {
|
|
2 => control,
|
|
4 => if (dst_abi_size <= 16 or
|
|
@as(u4, @intCast(control >> 4)) == @as(u4, @truncate(control >> 0)))
|
|
@as(u8, if (control & 0b0001 != 0) 0b00_00_00_11 else 0b00_00_00_00) |
|
|
@as(u8, if (control & 0b0010 != 0) 0b00_00_11_00 else 0b00_00_00_00) |
|
|
@as(u8, if (control & 0b0100 != 0) 0b00_11_00_00 else 0b00_00_00_00) |
|
|
@as(u8, if (control & 0b1000 != 0) 0b11_00_00_00 else 0b00_00_00_00)
|
|
else
|
|
break :pblendw,
|
|
8 => if (dst_abi_size <= 16 or
|
|
@as(u2, @intCast(control >> 2)) == @as(u2, @truncate(control >> 0)))
|
|
@as(u8, if (control & 0b01 != 0) 0b0000_1111 else 0b0000_0000) |
|
|
@as(u8, if (control & 0b10 != 0) 0b1111_0000 else 0b0000_0000)
|
|
else
|
|
break :pblendw,
|
|
16 => break :pblendw,
|
|
else => unreachable,
|
|
};
|
|
|
|
const lhs_mcv = try self.resolveInst(extra.a);
|
|
const rhs_mcv = try self.resolveInst(extra.b);
|
|
|
|
const dst_mcv: MCValue = if (lhs_mcv.isRegister() and
|
|
self.reuseOperand(inst, extra.a, 0, lhs_mcv))
|
|
lhs_mcv
|
|
else if (has_avx and lhs_mcv.isRegister())
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, lhs_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
|
|
if (has_avx) if (rhs_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
.{ .vp_w, .blend },
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(if (lhs_mcv.isRegister())
|
|
lhs_mcv.getReg().?
|
|
else
|
|
dst_reg, dst_abi_size),
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(dst_abi_size) }),
|
|
.u(expanded_control),
|
|
) else try self.asmRegisterRegisterRegisterImmediate(
|
|
.{ .vp_w, .blend },
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(if (lhs_mcv.isRegister())
|
|
lhs_mcv.getReg().?
|
|
else
|
|
dst_reg, dst_abi_size),
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(dst_ty, rhs_mcv), dst_abi_size),
|
|
.u(expanded_control),
|
|
) else if (rhs_mcv.isBase()) try self.asmRegisterMemoryImmediate(
|
|
.{ .p_w, .blend },
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(dst_abi_size) }),
|
|
.u(expanded_control),
|
|
) else try self.asmRegisterRegisterImmediate(
|
|
.{ .p_w, .blend },
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(dst_ty, rhs_mcv), dst_abi_size),
|
|
.u(expanded_control),
|
|
);
|
|
break :result .{ .register = dst_reg };
|
|
}
|
|
|
|
const expanded_control = switch (elem_abi_size) {
|
|
4, 8 => control,
|
|
16 => @as(u4, if (control & 0b01 != 0) 0b00_11 else 0b00_00) |
|
|
@as(u4, if (control & 0b10 != 0) 0b11_00 else 0b00_00),
|
|
else => unreachable,
|
|
};
|
|
|
|
const lhs_mcv = try self.resolveInst(extra.a);
|
|
const rhs_mcv = try self.resolveInst(extra.b);
|
|
|
|
const dst_mcv: MCValue = if (lhs_mcv.isRegister() and
|
|
self.reuseOperand(inst, extra.a, 0, lhs_mcv))
|
|
lhs_mcv
|
|
else if (has_avx and lhs_mcv.isRegister())
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, lhs_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
|
|
if (has_avx) if (rhs_mcv.isBase()) try self.asmRegisterRegisterMemoryImmediate(
|
|
switch (elem_abi_size) {
|
|
4 => .{ .v_ps, .blend },
|
|
8, 16 => .{ .v_pd, .blend },
|
|
else => unreachable,
|
|
},
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(if (lhs_mcv.isRegister())
|
|
lhs_mcv.getReg().?
|
|
else
|
|
dst_reg, dst_abi_size),
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(dst_abi_size) }),
|
|
.u(expanded_control),
|
|
) else try self.asmRegisterRegisterRegisterImmediate(
|
|
switch (elem_abi_size) {
|
|
4 => .{ .v_ps, .blend },
|
|
8, 16 => .{ .v_pd, .blend },
|
|
else => unreachable,
|
|
},
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(if (lhs_mcv.isRegister())
|
|
lhs_mcv.getReg().?
|
|
else
|
|
dst_reg, dst_abi_size),
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(dst_ty, rhs_mcv), dst_abi_size),
|
|
.u(expanded_control),
|
|
) else if (rhs_mcv.isBase()) try self.asmRegisterMemoryImmediate(
|
|
switch (elem_abi_size) {
|
|
4 => .{ ._ps, .blend },
|
|
8, 16 => .{ ._pd, .blend },
|
|
else => unreachable,
|
|
},
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(dst_abi_size) }),
|
|
.u(expanded_control),
|
|
) else try self.asmRegisterRegisterImmediate(
|
|
switch (elem_abi_size) {
|
|
4 => .{ ._ps, .blend },
|
|
8, 16 => .{ ._pd, .blend },
|
|
else => unreachable,
|
|
},
|
|
registerAlias(dst_reg, dst_abi_size),
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(dst_ty, rhs_mcv), dst_abi_size),
|
|
.u(expanded_control),
|
|
);
|
|
break :result .{ .register = dst_reg };
|
|
}
|
|
|
|
blendv: {
|
|
if (dst_abi_size > self.vectorSize(if (elem_abi_size >= 4) .float else .int)) break :blendv;
|
|
|
|
const select_mask_elem_ty = try pt.intType(.unsigned, elem_abi_size * 8);
|
|
const select_mask_ty = try pt.vectorType(.{
|
|
.len = @intCast(mask_elems.len),
|
|
.child = select_mask_elem_ty.toIntern(),
|
|
});
|
|
var select_mask_elems: [32]InternPool.Index = undefined;
|
|
for (
|
|
select_mask_elems[0..mask_elems.len],
|
|
mask_elems,
|
|
0..,
|
|
) |*select_mask_elem, maybe_mask_elem, elem_index| {
|
|
const mask_elem = maybe_mask_elem orelse continue;
|
|
const mask_elem_index =
|
|
std.math.cast(u5, if (mask_elem < 0) ~mask_elem else mask_elem) orelse break :blendv;
|
|
if (mask_elem_index != elem_index) break :blendv;
|
|
|
|
select_mask_elem.* = (if (mask_elem < 0)
|
|
try select_mask_elem_ty.maxIntScalar(pt, select_mask_elem_ty)
|
|
else
|
|
try select_mask_elem_ty.minIntScalar(pt, select_mask_elem_ty)).toIntern();
|
|
}
|
|
const select_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
.ty = select_mask_ty.toIntern(),
|
|
.storage = .{ .elems = select_mask_elems[0..mask_elems.len] },
|
|
} })));
|
|
|
|
if (self.hasFeature(.sse4_1)) {
|
|
const mir_tag: Mir.Inst.FixedTag = .{
|
|
if ((elem_abi_size >= 4 and elem_ty.isRuntimeFloat()) or
|
|
(dst_abi_size > 16 and !self.hasFeature(.avx2))) switch (elem_abi_size) {
|
|
4 => if (has_avx) .v_ps else ._ps,
|
|
8 => if (has_avx) .v_pd else ._pd,
|
|
else => unreachable,
|
|
} else if (has_avx) .vp_b else .p_b,
|
|
.blendv,
|
|
};
|
|
|
|
const select_mask_reg = if (!has_avx) reg: {
|
|
try self.register_manager.getKnownReg(.xmm0, null);
|
|
try self.genSetReg(.xmm0, select_mask_elem_ty, select_mask_mcv, .{});
|
|
break :reg .xmm0;
|
|
} else try self.copyToTmpRegister(select_mask_ty, select_mask_mcv);
|
|
const select_mask_alias = registerAlias(select_mask_reg, dst_abi_size);
|
|
const select_mask_lock = self.register_manager.lockRegAssumeUnused(select_mask_reg);
|
|
defer self.register_manager.unlockReg(select_mask_lock);
|
|
|
|
const lhs_mcv = try self.resolveInst(extra.a);
|
|
const rhs_mcv = try self.resolveInst(extra.b);
|
|
|
|
const dst_mcv: MCValue = if (lhs_mcv.isRegister() and
|
|
self.reuseOperand(inst, extra.a, 0, lhs_mcv))
|
|
lhs_mcv
|
|
else if (has_avx and lhs_mcv.isRegister())
|
|
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, lhs_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, dst_abi_size);
|
|
|
|
if (has_avx) if (rhs_mcv.isBase()) try self.asmRegisterRegisterMemoryRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
if (lhs_mcv.isRegister())
|
|
registerAlias(lhs_mcv.getReg().?, dst_abi_size)
|
|
else
|
|
dst_alias,
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(dst_abi_size) }),
|
|
select_mask_alias,
|
|
) else try self.asmRegisterRegisterRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
if (lhs_mcv.isRegister())
|
|
registerAlias(lhs_mcv.getReg().?, dst_abi_size)
|
|
else
|
|
dst_alias,
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(dst_ty, rhs_mcv), dst_abi_size),
|
|
select_mask_alias,
|
|
) else if (rhs_mcv.isBase()) try self.asmRegisterMemoryRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
try rhs_mcv.mem(self, .{ .size = .fromSize(dst_abi_size) }),
|
|
select_mask_alias,
|
|
) else try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
dst_alias,
|
|
registerAlias(if (rhs_mcv.isRegister())
|
|
rhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(dst_ty, rhs_mcv), dst_abi_size),
|
|
select_mask_alias,
|
|
);
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
const lhs_mcv = try self.resolveInst(extra.a);
|
|
const rhs_mcv = try self.resolveInst(extra.b);
|
|
|
|
const dst_mcv: MCValue = if (rhs_mcv.isRegister() and
|
|
self.reuseOperand(inst, extra.b, 1, rhs_mcv))
|
|
rhs_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, dst_ty, rhs_mcv);
|
|
const dst_reg = dst_mcv.getReg().?;
|
|
const dst_alias = registerAlias(dst_reg, dst_abi_size);
|
|
|
|
const mask_reg = try self.copyToTmpRegister(select_mask_ty, select_mask_mcv);
|
|
const mask_alias = registerAlias(mask_reg, dst_abi_size);
|
|
const mask_lock = self.register_manager.lockRegAssumeUnused(mask_reg);
|
|
defer self.register_manager.unlockReg(mask_lock);
|
|
|
|
const mir_fixes: Mir.Inst.Fixes = if (elem_ty.isRuntimeFloat())
|
|
switch (elem_ty.floatBits(self.target.*)) {
|
|
16, 80, 128 => .p_,
|
|
32 => ._ps,
|
|
64 => ._pd,
|
|
else => unreachable,
|
|
}
|
|
else
|
|
.p_;
|
|
try self.asmRegisterRegister(.{ mir_fixes, .@"and" }, dst_alias, mask_alias);
|
|
if (lhs_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ mir_fixes, .andn },
|
|
mask_alias,
|
|
try lhs_mcv.mem(self, .{ .size = .fromSize(dst_abi_size) }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ mir_fixes, .andn },
|
|
mask_alias,
|
|
if (lhs_mcv.isRegister())
|
|
lhs_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(dst_ty, lhs_mcv),
|
|
);
|
|
try self.asmRegisterRegister(.{ mir_fixes, .@"or" }, dst_alias, mask_alias);
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
pshufb: {
|
|
if (max_abi_size > 16) break :pshufb;
|
|
if (!self.hasFeature(.ssse3)) break :pshufb;
|
|
|
|
const temp_regs =
|
|
try self.register_manager.allocRegs(2, .{ inst, null }, abi.RegisterClass.sse);
|
|
const temp_locks = self.register_manager.lockRegsAssumeUnused(2, temp_regs);
|
|
defer for (temp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const lhs_temp_alias = registerAlias(temp_regs[0], max_abi_size);
|
|
try self.genSetReg(temp_regs[0], lhs_ty, .{ .air_ref = extra.a }, .{});
|
|
|
|
const rhs_temp_alias = registerAlias(temp_regs[1], max_abi_size);
|
|
try self.genSetReg(temp_regs[1], rhs_ty, .{ .air_ref = extra.b }, .{});
|
|
|
|
var lhs_mask_elems: [16]InternPool.Index = undefined;
|
|
for (lhs_mask_elems[0..max_abi_size], 0..) |*lhs_mask_elem, byte_index| {
|
|
const elem_index = byte_index / elem_abi_size;
|
|
lhs_mask_elem.* = try pt.intern(.{ .int = .{
|
|
.ty = .u8_type,
|
|
.storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
|
|
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
|
|
if (mask_elem < 0) break :elem 0b1_00_00000;
|
|
const mask_elem_index: u31 = @intCast(mask_elem);
|
|
const byte_off: u32 = @intCast(byte_index % elem_abi_size);
|
|
break :elem @intCast(mask_elem_index * elem_abi_size + byte_off);
|
|
} },
|
|
} });
|
|
}
|
|
const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
|
|
const lhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
.ty = lhs_mask_ty.toIntern(),
|
|
.storage = .{ .elems = lhs_mask_elems[0..max_abi_size] },
|
|
} })));
|
|
const lhs_mask_mem: Memory = .{
|
|
.base = .{ .reg = try self.copyToTmpRegister(.usize, lhs_mask_mcv.address()) },
|
|
.mod = .{ .rm = .{ .size = .fromSize(@max(max_abi_size, 16)) } },
|
|
};
|
|
if (has_avx) try self.asmRegisterRegisterMemory(
|
|
.{ .vp_b, .shuf },
|
|
lhs_temp_alias,
|
|
lhs_temp_alias,
|
|
lhs_mask_mem,
|
|
) else try self.asmRegisterMemory(
|
|
.{ .p_b, .shuf },
|
|
lhs_temp_alias,
|
|
lhs_mask_mem,
|
|
);
|
|
|
|
var rhs_mask_elems: [16]InternPool.Index = undefined;
|
|
for (rhs_mask_elems[0..max_abi_size], 0..) |*rhs_mask_elem, byte_index| {
|
|
const elem_index = byte_index / elem_abi_size;
|
|
rhs_mask_elem.* = try pt.intern(.{ .int = .{
|
|
.ty = .u8_type,
|
|
.storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
|
|
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
|
|
if (mask_elem >= 0) break :elem 0b1_00_00000;
|
|
const mask_elem_index: u31 = @intCast(~mask_elem);
|
|
const byte_off: u32 = @intCast(byte_index % elem_abi_size);
|
|
break :elem @intCast(mask_elem_index * elem_abi_size + byte_off);
|
|
} },
|
|
} });
|
|
}
|
|
const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
|
|
const rhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
|
.ty = rhs_mask_ty.toIntern(),
|
|
.storage = .{ .elems = rhs_mask_elems[0..max_abi_size] },
|
|
} })));
|
|
const rhs_mask_mem: Memory = .{
|
|
.base = .{ .reg = try self.copyToTmpRegister(.usize, rhs_mask_mcv.address()) },
|
|
.mod = .{ .rm = .{ .size = .fromSize(@max(max_abi_size, 16)) } },
|
|
};
|
|
if (has_avx) try self.asmRegisterRegisterMemory(
|
|
.{ .vp_b, .shuf },
|
|
rhs_temp_alias,
|
|
rhs_temp_alias,
|
|
rhs_mask_mem,
|
|
) else try self.asmRegisterMemory(
|
|
.{ .p_b, .shuf },
|
|
rhs_temp_alias,
|
|
rhs_mask_mem,
|
|
);
|
|
|
|
if (has_avx) try self.asmRegisterRegisterRegister(
|
|
.{ switch (elem_ty.zigTypeTag(zcu)) {
|
|
else => break :result null,
|
|
.int => .vp_,
|
|
.float => switch (elem_ty.floatBits(self.target.*)) {
|
|
32 => .v_ps,
|
|
64 => .v_pd,
|
|
16, 80, 128 => break :result null,
|
|
else => unreachable,
|
|
},
|
|
}, .@"or" },
|
|
lhs_temp_alias,
|
|
lhs_temp_alias,
|
|
rhs_temp_alias,
|
|
) else try self.asmRegisterRegister(
|
|
.{ switch (elem_ty.zigTypeTag(zcu)) {
|
|
else => break :result null,
|
|
.int => .p_,
|
|
.float => switch (elem_ty.floatBits(self.target.*)) {
|
|
32 => ._ps,
|
|
64 => ._pd,
|
|
16, 80, 128 => break :result null,
|
|
else => unreachable,
|
|
},
|
|
}, .@"or" },
|
|
lhs_temp_alias,
|
|
rhs_temp_alias,
|
|
);
|
|
break :result .{ .register = temp_regs[0] };
|
|
}
|
|
|
|
break :result null;
|
|
}) orelse return self.fail("TODO implement airShuffle from {} and {} to {} with {}", .{
|
|
lhs_ty.fmt(pt),
|
|
rhs_ty.fmt(pt),
|
|
dst_ty.fmt(pt),
|
|
Value.fromInterned(extra.mask).fmtValue(pt),
|
|
});
|
|
return self.finishAir(inst, result, .{ extra.a, extra.b, .none });
|
|
}
|
|
|
|
fn airReduce(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
|
|
|
|
const result: MCValue = result: {
|
|
const operand_ty = self.typeOf(reduce.operand);
|
|
if (operand_ty.isVector(zcu) and operand_ty.childType(zcu).toIntern() == .bool_type) {
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const abi_size: u32 = @intCast(operand_ty.abiSize(zcu));
|
|
const operand_mcv = try self.resolveInst(reduce.operand);
|
|
const mask_len = operand_ty.vectorLen(zcu);
|
|
const mask_len_minus_one = (std.math.cast(u6, mask_len - 1) orelse {
|
|
const acc_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
|
const acc_lock = self.register_manager.lockRegAssumeUnused(acc_reg);
|
|
defer self.register_manager.unlockReg(acc_lock);
|
|
var limb_offset: i31 = 0;
|
|
while (limb_offset < abi_size) : (limb_offset += 8) {
|
|
try self.asmRegisterMemory(
|
|
.{ ._, if (limb_offset == 0) .mov else switch (reduce.operation) {
|
|
.Or => .@"or",
|
|
.And => .@"and",
|
|
else => return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)}),
|
|
} },
|
|
acc_reg.to64(),
|
|
try operand_mcv.mem(self, .{
|
|
.size = .qword,
|
|
.disp = limb_offset,
|
|
}),
|
|
);
|
|
}
|
|
switch (reduce.operation) {
|
|
.Or => {
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, acc_reg.to64(), acc_reg.to64());
|
|
break :result .{ .eflags = .nz };
|
|
},
|
|
.And => {
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, acc_reg.to64(), .s(-1));
|
|
break :result .{ .eflags = .z };
|
|
},
|
|
else => unreachable,
|
|
}
|
|
});
|
|
const mask = @as(u64, std.math.maxInt(u64)) >> ~mask_len_minus_one;
|
|
switch (reduce.operation) {
|
|
.Or => {
|
|
if (operand_mcv.isBase()) try self.asmMemoryImmediate(
|
|
.{ ._, .@"test" },
|
|
try operand_mcv.mem(self, .{ .size = .fromSize(abi_size) }),
|
|
if (mask_len < abi_size * 8)
|
|
.u(mask)
|
|
else
|
|
.s(-1),
|
|
) else {
|
|
const operand_reg = registerAlias(operand_reg: {
|
|
if (operand_mcv.isRegister()) {
|
|
const operand_reg = operand_mcv.getReg().?;
|
|
if (operand_reg.class() == .general_purpose) break :operand_reg operand_reg;
|
|
}
|
|
break :operand_reg try self.copyToTmpRegister(operand_ty, operand_mcv);
|
|
}, abi_size);
|
|
const operand_lock = self.register_manager.lockReg(operand_reg);
|
|
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (mask_len < abi_size * 8) try self.asmRegisterImmediate(
|
|
.{ ._, .@"test" },
|
|
operand_reg,
|
|
.u(mask),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._, .@"test" },
|
|
operand_reg,
|
|
operand_reg,
|
|
);
|
|
}
|
|
break :result .{ .eflags = .nz };
|
|
},
|
|
.And => {
|
|
const tmp_reg = registerAlias(
|
|
try self.copyToTmpRegister(operand_ty, operand_mcv),
|
|
abi_size,
|
|
);
|
|
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
|
defer self.register_manager.unlockReg(tmp_lock);
|
|
|
|
try self.asmRegister(.{ ._, .not }, tmp_reg);
|
|
if (mask_len < abi_size * 8)
|
|
try self.asmRegisterImmediate(.{ ._, .@"test" }, tmp_reg, .u(mask))
|
|
else
|
|
try self.asmRegisterRegister(.{ ._, .@"test" }, tmp_reg, tmp_reg);
|
|
break :result .{ .eflags = .z };
|
|
},
|
|
else => return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)}),
|
|
}
|
|
}
|
|
return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)});
|
|
};
|
|
return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
|
|
}
|
|
|
|
fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const result_ty = self.typeOfIndex(inst);
|
|
const len: usize = @intCast(result_ty.arrayLen(zcu));
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
|
|
const result: MCValue = result: {
|
|
switch (result_ty.zigTypeTag(zcu)) {
|
|
.@"struct" => {
|
|
const frame_index = try self.allocFrameIndex(.initSpill(result_ty, zcu));
|
|
if (result_ty.containerLayout(zcu) == .@"packed") {
|
|
const loaded_struct = zcu.intern_pool.loadStructType(result_ty.toIntern());
|
|
try self.genInlineMemset(
|
|
.{ .lea_frame = .{ .index = frame_index } },
|
|
.{ .immediate = 0 },
|
|
.{ .immediate = result_ty.abiSize(zcu) },
|
|
.{},
|
|
);
|
|
for (elements, 0..) |elem, elem_i_usize| {
|
|
const elem_i: u32 = @intCast(elem_i_usize);
|
|
if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
|
|
|
|
const elem_ty = result_ty.fieldType(elem_i, zcu);
|
|
const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu));
|
|
if (elem_bit_size > 64) {
|
|
return self.fail(
|
|
"TODO airAggregateInit implement packed structs with large fields",
|
|
.{},
|
|
);
|
|
}
|
|
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
|
|
const elem_abi_bits = elem_abi_size * 8;
|
|
const elem_off = pt.structPackedFieldBitOffset(loaded_struct, elem_i);
|
|
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
|
|
const elem_bit_off = elem_off % elem_abi_bits;
|
|
const elem_mcv = try self.resolveInst(elem);
|
|
const mat_elem_mcv = switch (elem_mcv) {
|
|
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
|
|
else => elem_mcv,
|
|
};
|
|
const elem_lock = switch (mat_elem_mcv) {
|
|
.register => |reg| self.register_manager.lockReg(reg),
|
|
.immediate => |imm| lock: {
|
|
if (imm == 0) continue;
|
|
break :lock null;
|
|
},
|
|
else => null,
|
|
};
|
|
defer if (elem_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const elem_extra_bits = self.regExtraBits(elem_ty);
|
|
{
|
|
const temp_reg = try self.copyToTmpRegister(elem_ty, mat_elem_mcv);
|
|
const temp_alias = registerAlias(temp_reg, elem_abi_size);
|
|
const temp_lock = self.register_manager.lockRegAssumeUnused(temp_reg);
|
|
defer self.register_manager.unlockReg(temp_lock);
|
|
|
|
if (elem_bit_off < elem_extra_bits) {
|
|
try self.truncateRegister(elem_ty, temp_alias);
|
|
}
|
|
if (elem_bit_off > 0) try self.genShiftBinOpMir(
|
|
.{ ._l, .sh },
|
|
elem_ty,
|
|
.{ .register = temp_alias },
|
|
.u8,
|
|
.{ .immediate = elem_bit_off },
|
|
);
|
|
try self.genBinOpMir(
|
|
.{ ._, .@"or" },
|
|
elem_ty,
|
|
.{ .load_frame = .{ .index = frame_index, .off = elem_byte_off } },
|
|
.{ .register = temp_alias },
|
|
);
|
|
}
|
|
if (elem_bit_off > elem_extra_bits) {
|
|
const temp_reg = try self.copyToTmpRegister(elem_ty, mat_elem_mcv);
|
|
const temp_alias = registerAlias(temp_reg, elem_abi_size);
|
|
const temp_lock = self.register_manager.lockRegAssumeUnused(temp_reg);
|
|
defer self.register_manager.unlockReg(temp_lock);
|
|
|
|
if (elem_extra_bits > 0) {
|
|
try self.truncateRegister(elem_ty, temp_alias);
|
|
}
|
|
try self.genShiftBinOpMir(
|
|
.{ ._r, .sh },
|
|
elem_ty,
|
|
.{ .register = temp_reg },
|
|
.u8,
|
|
.{ .immediate = elem_abi_bits - elem_bit_off },
|
|
);
|
|
try self.genBinOpMir(
|
|
.{ ._, .@"or" },
|
|
elem_ty,
|
|
.{ .load_frame = .{
|
|
.index = frame_index,
|
|
.off = elem_byte_off + @as(i32, @intCast(elem_abi_size)),
|
|
} },
|
|
.{ .register = temp_alias },
|
|
);
|
|
}
|
|
}
|
|
} else for (elements, 0..) |elem, elem_i| {
|
|
if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
|
|
|
|
const elem_ty = result_ty.fieldType(elem_i, zcu);
|
|
const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu));
|
|
const elem_mcv = try self.resolveInst(elem);
|
|
const mat_elem_mcv = switch (elem_mcv) {
|
|
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
|
|
else => elem_mcv,
|
|
};
|
|
try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv, .{});
|
|
}
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
},
|
|
.array, .vector => {
|
|
const elem_ty = result_ty.childType(zcu);
|
|
if (result_ty.isVector(zcu) and elem_ty.toIntern() == .bool_type) {
|
|
const result_size: u32 = @intCast(result_ty.abiSize(zcu));
|
|
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .xor },
|
|
registerAlias(dst_reg, @min(result_size, 4)),
|
|
registerAlias(dst_reg, @min(result_size, 4)),
|
|
);
|
|
|
|
for (elements, 0..) |elem, elem_i| {
|
|
const elem_reg = try self.copyToTmpRegister(elem_ty, .{ .air_ref = elem });
|
|
const elem_lock = self.register_manager.lockRegAssumeUnused(elem_reg);
|
|
defer self.register_manager.unlockReg(elem_lock);
|
|
|
|
try self.asmRegisterImmediate(
|
|
.{ ._, .@"and" },
|
|
registerAlias(elem_reg, @min(result_size, 4)),
|
|
.u(1),
|
|
);
|
|
if (elem_i > 0) try self.asmRegisterImmediate(
|
|
.{ ._l, .sh },
|
|
registerAlias(elem_reg, result_size),
|
|
.u(@intCast(elem_i)),
|
|
);
|
|
try self.asmRegisterRegister(
|
|
.{ ._, .@"or" },
|
|
registerAlias(dst_reg, result_size),
|
|
registerAlias(elem_reg, result_size),
|
|
);
|
|
}
|
|
break :result .{ .register = dst_reg };
|
|
} else {
|
|
const frame_index = try self.allocFrameIndex(.initSpill(result_ty, zcu));
|
|
const elem_size: u32 = @intCast(elem_ty.abiSize(zcu));
|
|
|
|
for (elements, 0..) |elem, elem_i| {
|
|
const elem_mcv = try self.resolveInst(elem);
|
|
const mat_elem_mcv = switch (elem_mcv) {
|
|
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
|
|
else => elem_mcv,
|
|
};
|
|
const elem_off: i32 = @intCast(elem_size * elem_i);
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
elem_off,
|
|
elem_ty,
|
|
mat_elem_mcv,
|
|
.{},
|
|
);
|
|
}
|
|
if (result_ty.sentinel(zcu)) |sentinel| try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
@intCast(elem_size * elements.len),
|
|
elem_ty,
|
|
try self.genTypedValue(sentinel),
|
|
.{},
|
|
);
|
|
break :result .{ .load_frame = .{ .index = frame_index } };
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
};
|
|
|
|
if (elements.len <= Liveness.bpi - 1) {
|
|
var buf: [Liveness.bpi - 1]Air.Inst.Ref = @splat(.none);
|
|
@memcpy(buf[0..elements.len], elements);
|
|
return self.finishAir(inst, result, buf);
|
|
}
|
|
var bt = self.liveness.iterateBigTomb(inst);
|
|
for (elements) |elem| try self.feed(&bt, elem);
|
|
return self.finishAirResult(inst, result);
|
|
}
|
|
|
|
fn airUnionInit(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
|
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
|
|
const result: MCValue = result: {
|
|
const union_ty = self.typeOfIndex(inst);
|
|
const layout = union_ty.unionGetLayout(zcu);
|
|
|
|
const src_ty = self.typeOf(extra.init);
|
|
const src_mcv = try self.resolveInst(extra.init);
|
|
if (layout.tag_size == 0) {
|
|
if (layout.abi_size <= src_ty.abiSize(zcu) and
|
|
self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv;
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(src_ty, dst_mcv, src_mcv, .{});
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, false);
|
|
|
|
const loaded_union = zcu.typeToUnion(union_ty).?;
|
|
const field_name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index];
|
|
const tag_ty: Type = .fromInterned(loaded_union.enum_tag_ty);
|
|
const field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
|
|
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
|
|
const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
|
|
const tag_int = tag_int_val.toUnsignedInt(zcu);
|
|
const tag_off: i32 = @intCast(layout.tagOffset());
|
|
try self.genCopy(
|
|
tag_ty,
|
|
dst_mcv.address().offset(tag_off).deref(),
|
|
.{ .immediate = tag_int },
|
|
.{},
|
|
);
|
|
|
|
const pl_off: i32 = @intCast(layout.payloadOffset());
|
|
try self.genCopy(src_ty, dst_mcv.address().offset(pl_off).deref(), src_mcv, .{});
|
|
|
|
break :result dst_mcv;
|
|
};
|
|
return self.finishAir(inst, result, .{ extra.init, .none, .none });
|
|
}
|
|
|
|
fn airPrefetch(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
|
|
return self.finishAir(inst, .unreach, .{ prefetch.ptr, .none, .none });
|
|
}
|
|
|
|
fn airMulAdd(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
|
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
|
|
const ty = self.typeOfIndex(inst);
|
|
|
|
const ops = [3]Air.Inst.Ref{ extra.lhs, extra.rhs, pl_op.operand };
|
|
const result = result: {
|
|
if (switch (ty.scalarType(zcu).floatBits(self.target.*)) {
|
|
16, 80, 128 => true,
|
|
32, 64 => !self.hasFeature(.fma),
|
|
else => unreachable,
|
|
}) {
|
|
if (ty.zigTypeTag(zcu) != .float) return self.fail("TODO implement airMulAdd for {}", .{
|
|
ty.fmt(pt),
|
|
});
|
|
|
|
var callee_buf: ["__fma?".len]u8 = undefined;
|
|
break :result try self.genCall(.{ .lib = .{
|
|
.return_type = ty.toIntern(),
|
|
.param_types = &.{ ty.toIntern(), ty.toIntern(), ty.toIntern() },
|
|
.callee = std.fmt.bufPrint(&callee_buf, "{s}fma{s}", .{
|
|
floatLibcAbiPrefix(ty),
|
|
floatLibcAbiSuffix(ty),
|
|
}) catch unreachable,
|
|
} }, &.{ ty, ty, ty }, &.{
|
|
.{ .air_ref = extra.lhs }, .{ .air_ref = extra.rhs }, .{ .air_ref = pl_op.operand },
|
|
}, .{});
|
|
}
|
|
|
|
var mcvs: [3]MCValue = undefined;
|
|
var locks: [3]?RegisterManager.RegisterLock = @splat(null);
|
|
defer for (locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
|
|
var order: [3]u2 = @splat(0);
|
|
var unused: std.StaticBitSet(3) = .initFull();
|
|
for (ops, &mcvs, &locks, 0..) |op, *mcv, *lock, op_i| {
|
|
const op_index: u2 = @intCast(op_i);
|
|
mcv.* = try self.resolveInst(op);
|
|
if (unused.isSet(0) and mcv.isRegister() and self.reuseOperand(inst, op, op_index, mcv.*)) {
|
|
order[op_index] = 1;
|
|
unused.unset(0);
|
|
} else if (unused.isSet(2) and mcv.isBase()) {
|
|
order[op_index] = 3;
|
|
unused.unset(2);
|
|
}
|
|
switch (mcv.*) {
|
|
.register => |reg| lock.* = self.register_manager.lockReg(reg),
|
|
else => {},
|
|
}
|
|
}
|
|
for (&order, &mcvs, &locks) |*mop_index, *mcv, *lock| {
|
|
if (mop_index.* != 0) continue;
|
|
mop_index.* = 1 + @as(u2, @intCast(unused.toggleFirstSet().?));
|
|
if (mop_index.* > 1 and mcv.isRegister()) continue;
|
|
const reg = try self.copyToTmpRegister(ty, mcv.*);
|
|
mcv.* = .{ .register = reg };
|
|
if (lock.*) |old_lock| self.register_manager.unlockReg(old_lock);
|
|
lock.* = self.register_manager.lockRegAssumeUnused(reg);
|
|
}
|
|
|
|
const mir_tag = @as(?Mir.Inst.FixedTag, if (std.mem.eql(u2, &order, &.{ 1, 3, 2 }) or
|
|
std.mem.eql(u2, &order, &.{ 3, 1, 2 }))
|
|
switch (ty.zigTypeTag(zcu)) {
|
|
.float => switch (ty.floatBits(self.target.*)) {
|
|
32 => .{ .v_ss, .fmadd132 },
|
|
64 => .{ .v_sd, .fmadd132 },
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (ty.childType(zcu).floatBits(self.target.*)) {
|
|
32 => switch (ty.vectorLen(zcu)) {
|
|
1 => .{ .v_ss, .fmadd132 },
|
|
2...8 => .{ .v_ps, .fmadd132 },
|
|
else => null,
|
|
},
|
|
64 => switch (ty.vectorLen(zcu)) {
|
|
1 => .{ .v_sd, .fmadd132 },
|
|
2...4 => .{ .v_pd, .fmadd132 },
|
|
else => null,
|
|
},
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}
|
|
else if (std.mem.eql(u2, &order, &.{ 2, 1, 3 }) or std.mem.eql(u2, &order, &.{ 1, 2, 3 }))
|
|
switch (ty.zigTypeTag(zcu)) {
|
|
.float => switch (ty.floatBits(self.target.*)) {
|
|
32 => .{ .v_ss, .fmadd213 },
|
|
64 => .{ .v_sd, .fmadd213 },
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (ty.childType(zcu).floatBits(self.target.*)) {
|
|
32 => switch (ty.vectorLen(zcu)) {
|
|
1 => .{ .v_ss, .fmadd213 },
|
|
2...8 => .{ .v_ps, .fmadd213 },
|
|
else => null,
|
|
},
|
|
64 => switch (ty.vectorLen(zcu)) {
|
|
1 => .{ .v_sd, .fmadd213 },
|
|
2...4 => .{ .v_pd, .fmadd213 },
|
|
else => null,
|
|
},
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}
|
|
else if (std.mem.eql(u2, &order, &.{ 2, 3, 1 }) or std.mem.eql(u2, &order, &.{ 3, 2, 1 }))
|
|
switch (ty.zigTypeTag(zcu)) {
|
|
.float => switch (ty.floatBits(self.target.*)) {
|
|
32 => .{ .v_ss, .fmadd231 },
|
|
64 => .{ .v_sd, .fmadd231 },
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
|
.float => switch (ty.childType(zcu).floatBits(self.target.*)) {
|
|
32 => switch (ty.vectorLen(zcu)) {
|
|
1 => .{ .v_ss, .fmadd231 },
|
|
2...8 => .{ .v_ps, .fmadd231 },
|
|
else => null,
|
|
},
|
|
64 => switch (ty.vectorLen(zcu)) {
|
|
1 => .{ .v_sd, .fmadd231 },
|
|
2...4 => .{ .v_pd, .fmadd231 },
|
|
else => null,
|
|
},
|
|
16, 80, 128 => null,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}
|
|
else
|
|
unreachable) orelse return self.fail("TODO implement airMulAdd for {}", .{ty.fmt(pt)});
|
|
|
|
var mops: [3]MCValue = undefined;
|
|
for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv;
|
|
|
|
const abi_size: u32 = @intCast(ty.abiSize(zcu));
|
|
const mop1_reg = registerAlias(mops[0].getReg().?, abi_size);
|
|
const mop2_reg = registerAlias(mops[1].getReg().?, abi_size);
|
|
if (mops[2].isRegister()) try self.asmRegisterRegisterRegister(
|
|
mir_tag,
|
|
mop1_reg,
|
|
mop2_reg,
|
|
registerAlias(mops[2].getReg().?, abi_size),
|
|
) else try self.asmRegisterRegisterMemory(
|
|
mir_tag,
|
|
mop1_reg,
|
|
mop2_reg,
|
|
try mops[2].mem(self, .{ .size = .fromSize(abi_size) }),
|
|
);
|
|
break :result mops[0];
|
|
};
|
|
return self.finishAir(inst, result, ops);
|
|
}
|
|
|
|
fn airVaStart(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const va_list_ty = self.air.instructions.items(.data)[@intFromEnum(inst)].ty;
|
|
const ptr_anyopaque_ty = try pt.singleMutPtrType(.anyopaque);
|
|
|
|
const result: MCValue = switch (self.fn_type.fnCallingConvention(zcu)) {
|
|
.x86_64_sysv => result: {
|
|
const info = self.va_info.sysv;
|
|
const dst_fi = try self.allocFrameIndex(.initSpill(va_list_ty, zcu));
|
|
var field_off: u31 = 0;
|
|
// gp_offset: c_uint,
|
|
try self.genSetMem(
|
|
.{ .frame = dst_fi },
|
|
field_off,
|
|
.c_uint,
|
|
.{ .immediate = info.gp_count * 8 },
|
|
.{},
|
|
);
|
|
field_off += @intCast(Type.c_uint.abiSize(zcu));
|
|
// fp_offset: c_uint,
|
|
try self.genSetMem(
|
|
.{ .frame = dst_fi },
|
|
field_off,
|
|
.c_uint,
|
|
.{ .immediate = abi.SysV.c_abi_int_param_regs.len * 8 + info.fp_count * 16 },
|
|
.{},
|
|
);
|
|
field_off += @intCast(Type.c_uint.abiSize(zcu));
|
|
// overflow_arg_area: *anyopaque,
|
|
try self.genSetMem(
|
|
.{ .frame = dst_fi },
|
|
field_off,
|
|
ptr_anyopaque_ty,
|
|
.{ .lea_frame = info.overflow_arg_area },
|
|
.{},
|
|
);
|
|
field_off += @intCast(ptr_anyopaque_ty.abiSize(zcu));
|
|
// reg_save_area: *anyopaque,
|
|
try self.genSetMem(
|
|
.{ .frame = dst_fi },
|
|
field_off,
|
|
ptr_anyopaque_ty,
|
|
.{ .lea_frame = info.reg_save_area },
|
|
.{},
|
|
);
|
|
field_off += @intCast(ptr_anyopaque_ty.abiSize(zcu));
|
|
break :result .{ .load_frame = .{ .index = dst_fi } };
|
|
},
|
|
.x86_64_win => return self.fail("TODO implement c_va_start for Win64", .{}),
|
|
else => |cc| return self.fail("{s} does not support var args", .{@tagName(cc)}),
|
|
};
|
|
return self.finishAir(inst, result, .{ .none, .none, .none });
|
|
}
|
|
|
|
fn airVaArg(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const ty = self.typeOfIndex(inst);
|
|
const promote_ty = self.promoteVarArg(ty);
|
|
const ptr_anyopaque_ty = try pt.singleMutPtrType(.anyopaque);
|
|
const unused = self.liveness.isUnused(inst);
|
|
|
|
const result: MCValue = switch (self.fn_type.fnCallingConvention(zcu)) {
|
|
.x86_64_sysv => result: {
|
|
try self.spillEflagsIfOccupied();
|
|
|
|
const tmp_regs =
|
|
try self.register_manager.allocRegs(2, @splat(null), abi.RegisterClass.gp);
|
|
const offset_reg = tmp_regs[0].to32();
|
|
const addr_reg = tmp_regs[1].to64();
|
|
const tmp_locks = self.register_manager.lockRegsAssumeUnused(2, tmp_regs);
|
|
defer for (tmp_locks) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const promote_mcv = try self.allocTempRegOrMem(promote_ty, true);
|
|
const promote_lock = switch (promote_mcv) {
|
|
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
|
else => null,
|
|
};
|
|
defer if (promote_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
const ptr_arg_list_reg =
|
|
try self.copyToTmpRegister(self.typeOf(ty_op.operand), .{ .air_ref = ty_op.operand });
|
|
const ptr_arg_list_lock = self.register_manager.lockRegAssumeUnused(ptr_arg_list_reg);
|
|
defer self.register_manager.unlockReg(ptr_arg_list_lock);
|
|
|
|
const gp_offset: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 0 } };
|
|
const fp_offset: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 4 } };
|
|
const overflow_arg_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 8 } };
|
|
const reg_save_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 16 } };
|
|
|
|
const classes = std.mem.sliceTo(&abi.classifySystemV(promote_ty, zcu, self.target.*, .arg), .none);
|
|
switch (classes[0]) {
|
|
.integer => {
|
|
assert(classes.len == 1);
|
|
|
|
try self.genSetReg(offset_reg, .c_uint, gp_offset, .{});
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, offset_reg, .u(
|
|
abi.SysV.c_abi_int_param_regs.len * 8,
|
|
));
|
|
const mem_reloc = try self.asmJccReloc(.ae, undefined);
|
|
|
|
try self.genSetReg(addr_reg, ptr_anyopaque_ty, reg_save_area, .{});
|
|
if (!unused) try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, .{
|
|
.base = .{ .reg = addr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = offset_reg.to64(),
|
|
} },
|
|
});
|
|
try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, .{
|
|
.base = .{ .reg = offset_reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = 8,
|
|
} },
|
|
});
|
|
try self.genCopy(.c_uint, gp_offset, .{ .register = offset_reg }, .{});
|
|
const done_reloc = try self.asmJmpReloc(undefined);
|
|
|
|
self.performReloc(mem_reloc);
|
|
try self.genSetReg(addr_reg, ptr_anyopaque_ty, overflow_arg_area, .{});
|
|
try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), .{
|
|
.base = .{ .reg = addr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = @intCast(@max(promote_ty.abiSize(zcu), 8)),
|
|
} },
|
|
});
|
|
try self.genCopy(
|
|
ptr_anyopaque_ty,
|
|
overflow_arg_area,
|
|
.{ .register = offset_reg.to64() },
|
|
.{},
|
|
);
|
|
|
|
self.performReloc(done_reloc);
|
|
if (!unused) try self.genCopy(promote_ty, promote_mcv, .{
|
|
.indirect = .{ .reg = addr_reg },
|
|
}, .{});
|
|
},
|
|
.sse => {
|
|
assert(classes.len == 1);
|
|
|
|
try self.genSetReg(offset_reg, .c_uint, fp_offset, .{});
|
|
try self.asmRegisterImmediate(.{ ._, .cmp }, offset_reg, .u(
|
|
abi.SysV.c_abi_int_param_regs.len * 8 + abi.SysV.c_abi_sse_param_regs.len * 16,
|
|
));
|
|
const mem_reloc = try self.asmJccReloc(.ae, undefined);
|
|
|
|
try self.genSetReg(addr_reg, ptr_anyopaque_ty, reg_save_area, .{});
|
|
if (!unused) try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, .{
|
|
.base = .{ .reg = addr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.index = offset_reg.to64(),
|
|
} },
|
|
});
|
|
try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, .{
|
|
.base = .{ .reg = offset_reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = 16,
|
|
} },
|
|
});
|
|
try self.genCopy(.c_uint, fp_offset, .{ .register = offset_reg }, .{});
|
|
const done_reloc = try self.asmJmpReloc(undefined);
|
|
|
|
self.performReloc(mem_reloc);
|
|
try self.genSetReg(addr_reg, ptr_anyopaque_ty, overflow_arg_area, .{});
|
|
try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), .{
|
|
.base = .{ .reg = addr_reg },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = @intCast(@max(promote_ty.abiSize(zcu), 8)),
|
|
} },
|
|
});
|
|
try self.genCopy(
|
|
ptr_anyopaque_ty,
|
|
overflow_arg_area,
|
|
.{ .register = offset_reg.to64() },
|
|
.{},
|
|
);
|
|
|
|
self.performReloc(done_reloc);
|
|
if (!unused) try self.genCopy(promote_ty, promote_mcv, .{
|
|
.indirect = .{ .reg = addr_reg },
|
|
}, .{});
|
|
},
|
|
.memory => {
|
|
assert(classes.len == 1);
|
|
unreachable;
|
|
},
|
|
else => return self.fail("TODO implement c_va_arg for {} on SysV", .{promote_ty.fmt(pt)}),
|
|
}
|
|
|
|
if (unused) break :result .unreach;
|
|
if (ty.toIntern() == promote_ty.toIntern()) break :result promote_mcv;
|
|
|
|
if (!promote_ty.isRuntimeFloat()) {
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.genCopy(ty, dst_mcv, promote_mcv, .{});
|
|
break :result dst_mcv;
|
|
}
|
|
|
|
assert(ty.toIntern() == .f32_type and promote_ty.toIntern() == .f64_type);
|
|
const dst_mcv = if (promote_mcv.isRegister())
|
|
promote_mcv
|
|
else
|
|
try self.copyToRegisterWithInstTracking(inst, ty, promote_mcv);
|
|
const dst_reg = dst_mcv.getReg().?.to128();
|
|
const dst_lock = self.register_manager.lockReg(dst_reg);
|
|
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
|
|
|
if (self.hasFeature(.avx)) if (promote_mcv.isBase()) try self.asmRegisterRegisterMemory(
|
|
.{ .v_ss, .cvtsd2 },
|
|
dst_reg,
|
|
dst_reg,
|
|
try promote_mcv.mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegisterRegister(
|
|
.{ .v_ss, .cvtsd2 },
|
|
dst_reg,
|
|
dst_reg,
|
|
(if (promote_mcv.isRegister())
|
|
promote_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(promote_ty, promote_mcv)).to128(),
|
|
) else if (promote_mcv.isBase()) try self.asmRegisterMemory(
|
|
.{ ._ss, .cvtsd2 },
|
|
dst_reg,
|
|
try promote_mcv.mem(self, .{ .size = .qword }),
|
|
) else try self.asmRegisterRegister(
|
|
.{ ._ss, .cvtsd2 },
|
|
dst_reg,
|
|
(if (promote_mcv.isRegister())
|
|
promote_mcv.getReg().?
|
|
else
|
|
try self.copyToTmpRegister(promote_ty, promote_mcv)).to128(),
|
|
);
|
|
break :result promote_mcv;
|
|
},
|
|
.x86_64_win => return self.fail("TODO implement c_va_arg for Win64", .{}),
|
|
else => |cc| return self.fail("{s} does not support var args", .{@tagName(cc)}),
|
|
};
|
|
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airVaCopy(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
|
const ptr_va_list_ty = self.typeOf(ty_op.operand);
|
|
|
|
const dst_mcv = try self.allocRegOrMem(inst, true);
|
|
try self.load(dst_mcv, ptr_va_list_ty, .{ .air_ref = ty_op.operand });
|
|
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
|
|
}
|
|
|
|
fn airVaEnd(self: *CodeGen, inst: Air.Inst.Index) !void {
|
|
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
|
return self.finishAir(inst, .unreach, .{ un_op, .none, .none });
|
|
}
|
|
|
|
fn resolveInst(self: *CodeGen, ref: Air.Inst.Ref) InnerError!MCValue {
|
|
const zcu = self.pt.zcu;
|
|
const ty = self.typeOf(ref);
|
|
|
|
// If the type has no codegen bits, no need to store it.
|
|
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
|
|
|
|
const mcv = if (ref.toIndex()) |inst| mcv: {
|
|
break :mcv self.inst_tracking.getPtr(inst).?.short;
|
|
} else mcv: {
|
|
const ip_index = ref.toInterned().?;
|
|
const gop = try self.const_tracking.getOrPut(self.gpa, ip_index);
|
|
if (!gop.found_existing) gop.value_ptr.* = .init(init: {
|
|
const const_mcv = try self.genTypedValue(.fromInterned(ip_index));
|
|
switch (const_mcv) {
|
|
.lea_tlv => |tlv_sym| switch (self.bin_file.tag) {
|
|
.elf, .macho => {
|
|
if (self.mod.pic) {
|
|
try self.spillRegisters(&.{ .rdi, .rax });
|
|
} else {
|
|
try self.spillRegisters(&.{.rax});
|
|
}
|
|
const frame_index = try self.allocFrameIndex(.init(.{
|
|
.size = 8,
|
|
.alignment = .@"8",
|
|
}));
|
|
try self.genSetMem(
|
|
.{ .frame = frame_index },
|
|
0,
|
|
.usize,
|
|
.{ .lea_symbol = .{ .sym_index = tlv_sym } },
|
|
.{},
|
|
);
|
|
break :init .{ .load_frame = .{ .index = frame_index } };
|
|
},
|
|
else => break :init const_mcv,
|
|
},
|
|
else => break :init const_mcv,
|
|
}
|
|
});
|
|
break :mcv gop.value_ptr.short;
|
|
};
|
|
|
|
switch (mcv) {
|
|
.none, .unreach, .dead => unreachable,
|
|
else => return mcv,
|
|
}
|
|
}
|
|
|
|
fn getResolvedInstValue(self: *CodeGen, inst: Air.Inst.Index) *InstTracking {
|
|
const tracking = self.inst_tracking.getPtr(inst).?;
|
|
return switch (tracking.short) {
|
|
.none, .unreach, .dead => unreachable,
|
|
else => tracking,
|
|
};
|
|
}
|
|
|
|
/// If the MCValue is an immediate, and it does not fit within this type,
|
|
/// we put it in a register.
|
|
/// A potential opportunity for future optimization here would be keeping track
|
|
/// of the fact that the instruction is available both as an immediate
|
|
/// and as a register.
|
|
fn limitImmediateType(self: *CodeGen, operand: Air.Inst.Ref, comptime T: type) !MCValue {
|
|
const mcv = try self.resolveInst(operand);
|
|
const ti = @typeInfo(T).int;
|
|
switch (mcv) {
|
|
.immediate => |imm| {
|
|
// This immediate is unsigned.
|
|
const U = std.meta.Int(.unsigned, ti.bits - @intFromBool(ti.signedness == .signed));
|
|
if (imm >= std.math.maxInt(U)) {
|
|
return MCValue{ .register = try self.copyToTmpRegister(.usize, mcv) };
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
return mcv;
|
|
}
|
|
|
|
fn genResult(self: *CodeGen, res: codegen.GenResult) InnerError!MCValue {
|
|
return switch (res) {
|
|
.mcv => |mcv| switch (mcv) {
|
|
.none => .none,
|
|
.undef => .undef,
|
|
.immediate => |imm| .{ .immediate = imm },
|
|
.memory => |addr| .{ .memory = addr },
|
|
.load_symbol => |sym_index| .{ .load_symbol = .{ .sym_index = sym_index } },
|
|
.lea_symbol => |sym_index| .{ .lea_symbol = .{ .sym_index = sym_index } },
|
|
.load_direct => |sym_index| .{ .load_direct = sym_index },
|
|
.lea_direct => |sym_index| .{ .lea_direct = sym_index },
|
|
.load_got => |sym_index| .{ .lea_got = sym_index },
|
|
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
|
|
},
|
|
.fail => |msg| return self.failMsg(msg),
|
|
};
|
|
}
|
|
|
|
fn genTypedValue(self: *CodeGen, val: Value) InnerError!MCValue {
|
|
return self.genResult(try codegen.genTypedValue(self.bin_file, self.pt, self.src_loc, val, self.target.*));
|
|
}
|
|
|
|
fn lowerUav(self: *CodeGen, val: Value) InnerError!MCValue {
|
|
return self.genResult(try self.bin_file.lowerUav(self.pt, val.toIntern(), .none, self.src_loc));
|
|
}
|
|
|
|
const CallMCValues = struct {
|
|
args: []MCValue,
|
|
return_value: InstTracking,
|
|
stack_byte_count: u31,
|
|
stack_align: InternPool.Alignment,
|
|
gp_count: u32,
|
|
fp_count: u32,
|
|
err_ret_trace_reg: Register,
|
|
|
|
fn deinit(self: *CallMCValues, func: *CodeGen) void {
|
|
func.gpa.free(self.args);
|
|
self.* = undefined;
|
|
}
|
|
};
|
|
|
|
/// Caller must call `CallMCValues.deinit`.
|
|
fn resolveCallingConventionValues(
|
|
self: *CodeGen,
|
|
fn_info: InternPool.Key.FuncType,
|
|
var_args: []const Type,
|
|
stack_frame_base: FrameIndex,
|
|
) !CallMCValues {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
const cc = fn_info.cc;
|
|
const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len);
|
|
defer self.gpa.free(param_types);
|
|
|
|
for (param_types[0..fn_info.param_types.len], fn_info.param_types.get(ip)) |*dest, src|
|
|
dest.* = .fromInterned(src);
|
|
for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg_ty|
|
|
param_ty.* = self.promoteVarArg(arg_ty);
|
|
|
|
var result: CallMCValues = .{
|
|
.args = try self.gpa.alloc(MCValue, param_types.len),
|
|
// These undefined values must be populated before returning from this function.
|
|
.return_value = undefined,
|
|
.stack_byte_count = 0,
|
|
.stack_align = undefined,
|
|
.gp_count = 0,
|
|
.fp_count = 0,
|
|
.err_ret_trace_reg = .none,
|
|
};
|
|
errdefer self.gpa.free(result.args);
|
|
|
|
const ret_ty: Type = .fromInterned(fn_info.return_type);
|
|
switch (cc) {
|
|
.naked => {
|
|
assert(result.args.len == 0);
|
|
result.return_value = .init(.unreach);
|
|
result.stack_align = switch (self.target.cpu.arch) {
|
|
else => unreachable,
|
|
.x86 => .@"4",
|
|
.x86_64 => .@"8",
|
|
};
|
|
},
|
|
.x86_64_sysv, .x86_64_win => |cc_opts| {
|
|
var ret_int_reg_i: u32 = 0;
|
|
var ret_sse_reg_i: u32 = 0;
|
|
var param_int_reg_i: u32 = 0;
|
|
var param_sse_reg_i: u32 = 0;
|
|
result.stack_align = .fromByteUnits(cc_opts.incoming_stack_alignment orelse 16);
|
|
|
|
switch (cc) {
|
|
.x86_64_sysv => {},
|
|
.x86_64_win => result.stack_byte_count += @intCast(4 * 8),
|
|
else => unreachable,
|
|
}
|
|
|
|
// Return values
|
|
if (ret_ty.isNoReturn(zcu)) {
|
|
result.return_value = .init(.unreach);
|
|
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
|
// TODO: is this even possible for C calling convention?
|
|
result.return_value = .init(.none);
|
|
} else {
|
|
var ret_tracking: [4]InstTracking = undefined;
|
|
var ret_tracking_i: usize = 0;
|
|
|
|
const classes = switch (cc) {
|
|
.x86_64_sysv => std.mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, self.target.*, .ret), .none),
|
|
.x86_64_win => &.{abi.classifyWindows(ret_ty, zcu)},
|
|
else => unreachable,
|
|
};
|
|
for (classes) |class| switch (class) {
|
|
.integer => {
|
|
const ret_int_reg = registerAlias(
|
|
abi.getCAbiIntReturnRegs(cc)[ret_int_reg_i],
|
|
@intCast(@min(ret_ty.abiSize(zcu), 8)),
|
|
);
|
|
ret_int_reg_i += 1;
|
|
|
|
ret_tracking[ret_tracking_i] = .init(.{ .register = ret_int_reg });
|
|
ret_tracking_i += 1;
|
|
},
|
|
.sse, .float, .float_combine, .win_i128 => {
|
|
const ret_sse_regs = abi.getCAbiSseReturnRegs(cc);
|
|
const abi_size: u32 = @intCast(ret_ty.abiSize(zcu));
|
|
const reg_size = @min(abi_size, self.vectorSize(.float));
|
|
var byte_offset: u32 = 0;
|
|
while (byte_offset < abi_size) : (byte_offset += reg_size) {
|
|
const ret_sse_reg = registerAlias(ret_sse_regs[ret_sse_reg_i], reg_size);
|
|
ret_sse_reg_i += 1;
|
|
|
|
ret_tracking[ret_tracking_i] = .init(.{ .register = ret_sse_reg });
|
|
ret_tracking_i += 1;
|
|
}
|
|
},
|
|
.sseup => assert(ret_tracking[ret_tracking_i - 1].short.register.class() == .sse),
|
|
.x87 => {
|
|
ret_tracking[ret_tracking_i] = .init(.{ .register = abi.getCAbiX87ReturnRegs(cc)[0] });
|
|
ret_tracking_i += 1;
|
|
},
|
|
.x87up => assert(ret_tracking[ret_tracking_i - 1].short.register.class() == .x87),
|
|
.complex_x87 => {
|
|
ret_tracking[ret_tracking_i] = .init(.{ .register_pair = abi.getCAbiX87ReturnRegs(cc)[0..2].* });
|
|
ret_tracking_i += 1;
|
|
},
|
|
.memory => {
|
|
const ret_int_reg = abi.getCAbiIntReturnRegs(cc)[ret_int_reg_i].to64();
|
|
ret_int_reg_i += 1;
|
|
const ret_indirect_reg = abi.getCAbiIntParamRegs(cc)[param_int_reg_i];
|
|
param_int_reg_i += 1;
|
|
|
|
ret_tracking[ret_tracking_i] = .{
|
|
.short = .{ .indirect = .{ .reg = ret_int_reg } },
|
|
.long = .{ .indirect = .{ .reg = ret_indirect_reg } },
|
|
};
|
|
ret_tracking_i += 1;
|
|
},
|
|
.none, .integer_per_element => unreachable,
|
|
};
|
|
result.return_value = switch (ret_tracking_i) {
|
|
else => unreachable,
|
|
1 => ret_tracking[0],
|
|
2 => .init(.{ .register_pair = .{
|
|
ret_tracking[0].short.register,
|
|
ret_tracking[1].short.register,
|
|
} }),
|
|
3 => .init(.{ .register_triple = .{
|
|
ret_tracking[0].short.register,
|
|
ret_tracking[1].short.register,
|
|
ret_tracking[2].short.register,
|
|
} }),
|
|
4 => .init(.{ .register_quadruple = .{
|
|
ret_tracking[0].short.register,
|
|
ret_tracking[1].short.register,
|
|
ret_tracking[2].short.register,
|
|
ret_tracking[3].short.register,
|
|
} }),
|
|
};
|
|
}
|
|
|
|
// Input params
|
|
for (param_types, result.args) |ty, *arg| {
|
|
assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
|
|
switch (cc) {
|
|
.x86_64_sysv => {},
|
|
.x86_64_win => {
|
|
param_int_reg_i = @max(param_int_reg_i, param_sse_reg_i);
|
|
param_sse_reg_i = param_int_reg_i;
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
var arg_mcv: [4]MCValue = undefined;
|
|
var arg_mcv_i: usize = 0;
|
|
|
|
const classes = switch (cc) {
|
|
.x86_64_sysv => std.mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .arg), .none),
|
|
.x86_64_win => &.{abi.classifyWindows(ty, zcu)},
|
|
else => unreachable,
|
|
};
|
|
classes: for (classes) |class| switch (class) {
|
|
.integer => {
|
|
const param_int_regs = abi.getCAbiIntParamRegs(cc);
|
|
if (param_int_reg_i >= param_int_regs.len) break;
|
|
|
|
const param_int_reg =
|
|
registerAlias(param_int_regs[param_int_reg_i], @intCast(@min(ty.abiSize(zcu), 8)));
|
|
param_int_reg_i += 1;
|
|
|
|
arg_mcv[arg_mcv_i] = .{ .register = param_int_reg };
|
|
arg_mcv_i += 1;
|
|
},
|
|
.sse, .float, .float_combine => {
|
|
const param_sse_regs = abi.getCAbiSseParamRegs(cc, self.target);
|
|
const abi_size: u32 = @intCast(ty.abiSize(zcu));
|
|
const reg_size = @min(abi_size, self.vectorSize(.float));
|
|
var byte_offset: u32 = 0;
|
|
while (byte_offset < abi_size) : (byte_offset += reg_size) {
|
|
if (param_sse_reg_i >= param_sse_regs.len) break :classes;
|
|
|
|
const param_sse_reg = registerAlias(param_sse_regs[param_sse_reg_i], reg_size);
|
|
param_sse_reg_i += 1;
|
|
|
|
arg_mcv[arg_mcv_i] = .{ .register = param_sse_reg };
|
|
arg_mcv_i += 1;
|
|
}
|
|
},
|
|
.sseup => assert(arg_mcv[arg_mcv_i - 1].register.class() == .sse),
|
|
.x87, .x87up, .complex_x87, .memory, .win_i128 => switch (cc) {
|
|
.x86_64_sysv => switch (class) {
|
|
.x87, .x87up, .complex_x87, .memory => break,
|
|
else => unreachable,
|
|
},
|
|
.x86_64_win => if (ty.abiSize(zcu) > 8) {
|
|
const param_int_reg = abi.getCAbiIntParamRegs(cc)[param_int_reg_i].to64();
|
|
param_int_reg_i += 1;
|
|
|
|
arg_mcv[arg_mcv_i] = .{ .indirect = .{ .reg = param_int_reg } };
|
|
arg_mcv_i += 1;
|
|
} else break,
|
|
else => unreachable,
|
|
},
|
|
.none => unreachable,
|
|
.integer_per_element => {
|
|
const param_int_regs_len: u32 =
|
|
@intCast(abi.getCAbiIntParamRegs(cc).len);
|
|
const remaining_param_int_regs: u3 =
|
|
@intCast(param_int_regs_len - param_int_reg_i);
|
|
param_int_reg_i = param_int_regs_len;
|
|
|
|
const frame_elem_align = 8;
|
|
const frame_elems_len = ty.vectorLen(zcu) - remaining_param_int_regs;
|
|
const frame_elem_size = std.mem.alignForward(
|
|
u64,
|
|
ty.childType(zcu).abiSize(zcu),
|
|
frame_elem_align,
|
|
);
|
|
const frame_size: u31 = @intCast(frame_elems_len * frame_elem_size);
|
|
|
|
result.stack_byte_count =
|
|
std.mem.alignForward(u31, result.stack_byte_count, frame_elem_align);
|
|
arg_mcv[arg_mcv_i] = .{ .elementwise_regs_then_frame = .{
|
|
.regs = remaining_param_int_regs,
|
|
.frame_off = @intCast(result.stack_byte_count),
|
|
.frame_index = stack_frame_base,
|
|
} };
|
|
arg_mcv_i += 1;
|
|
result.stack_byte_count += frame_size;
|
|
},
|
|
} else {
|
|
arg.* = switch (arg_mcv_i) {
|
|
else => unreachable,
|
|
1 => arg_mcv[0],
|
|
2 => .{ .register_pair = .{
|
|
arg_mcv[0].register,
|
|
arg_mcv[1].register,
|
|
} },
|
|
3 => .{ .register_triple = .{
|
|
arg_mcv[0].register,
|
|
arg_mcv[1].register,
|
|
arg_mcv[2].register,
|
|
} },
|
|
4 => .{ .register_quadruple = .{
|
|
arg_mcv[0].register,
|
|
arg_mcv[1].register,
|
|
arg_mcv[2].register,
|
|
arg_mcv[3].register,
|
|
} },
|
|
};
|
|
continue;
|
|
}
|
|
|
|
const param_align = ty.abiAlignment(zcu).max(.@"8");
|
|
result.stack_byte_count = @intCast(param_align.forward(result.stack_byte_count));
|
|
result.stack_align = result.stack_align.max(param_align);
|
|
arg.* = .{ .load_frame = .{
|
|
.index = stack_frame_base,
|
|
.off = result.stack_byte_count,
|
|
} };
|
|
result.stack_byte_count += @intCast(ty.abiSize(zcu));
|
|
}
|
|
assert(param_int_reg_i <= 6);
|
|
result.gp_count = param_int_reg_i;
|
|
assert(param_sse_reg_i <= 16);
|
|
result.fp_count = param_sse_reg_i;
|
|
},
|
|
.auto => {
|
|
result.stack_align = abi.zigcc.stack_align orelse .fromByteUnits(self.vectorSize(.float));
|
|
|
|
var param_gpr = abi.getCAbiIntParamRegs(cc);
|
|
var param_x87 = abi.getCAbiX87ParamRegs(cc);
|
|
var param_sse = abi.getCAbiSseParamRegs(cc, self.target);
|
|
|
|
if (zcu.comp.config.any_error_tracing) {
|
|
result.err_ret_trace_reg = param_gpr[param_gpr.len - 1];
|
|
param_gpr = param_gpr[0 .. param_gpr.len - 1];
|
|
}
|
|
|
|
// Return values
|
|
result.return_value = if (ret_ty.isNoReturn(zcu))
|
|
.init(.unreach)
|
|
else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu))
|
|
.init(.none)
|
|
else return_value: {
|
|
const ret_gpr = abi.getCAbiIntReturnRegs(cc);
|
|
const ret_size: u31 = @intCast(ret_ty.abiSize(zcu));
|
|
if (abi.zigcc.return_in_regs) switch (self.regClassForType(ret_ty)) {
|
|
.general_purpose => if (ret_size <= @as(u4, switch (self.target.cpu.arch) {
|
|
else => unreachable,
|
|
.x86 => 4,
|
|
.x86_64 => 8,
|
|
}))
|
|
break :return_value .init(.{ .register = registerAlias(ret_gpr[0], ret_size) })
|
|
else if (ret_gpr.len >= 2 and ret_ty.isSliceAtRuntime(zcu))
|
|
break :return_value .init(.{ .register_pair = ret_gpr[0..2].* }),
|
|
.segment, .mmx, .ip, .cr, .dr => unreachable,
|
|
.x87 => break :return_value .init(.{ .register = .st0 }),
|
|
.sse => if (ret_size <= self.vectorSize(.float)) break :return_value .init(.{
|
|
.register = registerAlias(abi.getCAbiSseReturnRegs(cc)[0], @max(ret_size, 16)),
|
|
}),
|
|
};
|
|
const ret_indirect_reg = param_gpr[0];
|
|
param_gpr = param_gpr[1..];
|
|
break :return_value .{
|
|
.short = .{ .indirect = .{ .reg = ret_gpr[0] } },
|
|
.long = .{ .indirect = .{ .reg = ret_indirect_reg } },
|
|
};
|
|
};
|
|
|
|
// Input params
|
|
for (param_types, result.args) |param_ty, *arg| {
|
|
if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
|
arg.* = .none;
|
|
continue;
|
|
}
|
|
const param_size: u31 = @intCast(param_ty.abiSize(zcu));
|
|
if (abi.zigcc.params_in_regs) switch (self.regClassForType(param_ty)) {
|
|
.general_purpose => if (param_gpr.len >= 1 and param_size <= @as(u4, switch (self.target.cpu.arch) {
|
|
else => unreachable,
|
|
.x86 => 4,
|
|
.x86_64 => 8,
|
|
})) {
|
|
arg.* = .{ .register = registerAlias(param_gpr[0], param_size) };
|
|
param_gpr = param_gpr[1..];
|
|
continue;
|
|
} else if (param_gpr.len >= 2 and param_ty.isSliceAtRuntime(zcu)) {
|
|
arg.* = .{ .register_pair = param_gpr[0..2].* };
|
|
param_gpr = param_gpr[2..];
|
|
continue;
|
|
},
|
|
.segment, .mmx, .ip, .cr, .dr => unreachable,
|
|
.x87 => if (param_x87.len >= 1) {
|
|
arg.* = .{ .register = param_x87[0] };
|
|
param_x87 = param_x87[1..];
|
|
continue;
|
|
},
|
|
.sse => if (param_sse.len >= 1 and param_size <= self.vectorSize(.float)) {
|
|
arg.* = .{
|
|
.register = registerAlias(param_sse[0], @max(param_size, 16)),
|
|
};
|
|
param_sse = param_sse[1..];
|
|
continue;
|
|
},
|
|
};
|
|
const param_align = param_ty.abiAlignment(zcu);
|
|
result.stack_byte_count = @intCast(param_align.forward(result.stack_byte_count));
|
|
result.stack_align = result.stack_align.max(param_align);
|
|
arg.* = .{ .load_frame = .{
|
|
.index = stack_frame_base,
|
|
.off = result.stack_byte_count,
|
|
} };
|
|
result.stack_byte_count += param_size;
|
|
}
|
|
},
|
|
else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}),
|
|
}
|
|
|
|
result.stack_byte_count = @intCast(result.stack_align.forward(result.stack_byte_count));
|
|
return result;
|
|
}
|
|
|
|
fn fail(self: *CodeGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
|
|
@branchHint(.cold);
|
|
const zcu = self.pt.zcu;
|
|
switch (self.owner) {
|
|
.nav_index => |i| return zcu.codegenFail(i, format, args),
|
|
.lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args),
|
|
}
|
|
return error.CodegenFail;
|
|
}
|
|
|
|
fn failMsg(self: *CodeGen, msg: *Zcu.ErrorMsg) error{ OutOfMemory, CodegenFail } {
|
|
@branchHint(.cold);
|
|
const zcu = self.pt.zcu;
|
|
switch (self.owner) {
|
|
.nav_index => |i| return zcu.codegenFailMsg(i, msg),
|
|
.lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg),
|
|
}
|
|
return error.CodegenFail;
|
|
}
|
|
|
|
fn parseRegName(name: []const u8) ?Register {
|
|
if (std.mem.startsWith(u8, name, "db")) return @enumFromInt(
|
|
@intFromEnum(Register.dr0) + (std.fmt.parseInt(u4, name["db".len..], 0) catch return null),
|
|
);
|
|
return std.meta.stringToEnum(Register, name);
|
|
}
|
|
|
|
/// Returns register wide enough to hold at least `size_bytes`.
|
|
fn registerAlias(reg: Register, size_bytes: u32) Register {
|
|
return switch (reg.class()) {
|
|
.general_purpose => if (size_bytes == 0)
|
|
unreachable // should be comptime-known
|
|
else if (size_bytes <= 1)
|
|
reg.to8()
|
|
else if (size_bytes <= 2)
|
|
reg.to16()
|
|
else if (size_bytes <= 4)
|
|
reg.to32()
|
|
else if (size_bytes <= 8)
|
|
reg.to64()
|
|
else
|
|
unreachable,
|
|
.segment => if (size_bytes <= 2)
|
|
reg
|
|
else
|
|
unreachable,
|
|
.x87 => if (size_bytes >= 10 and size_bytes <= 16)
|
|
reg
|
|
else
|
|
unreachable,
|
|
.mmx => if (size_bytes <= 8)
|
|
reg
|
|
else
|
|
unreachable,
|
|
.sse => if (size_bytes <= 16)
|
|
reg.to128()
|
|
else if (size_bytes <= 32)
|
|
reg.to256()
|
|
else
|
|
unreachable,
|
|
.ip => if (size_bytes <= 2)
|
|
.ip
|
|
else if (size_bytes <= 4)
|
|
.eip
|
|
else if (size_bytes <= 8)
|
|
.rip
|
|
else
|
|
unreachable,
|
|
.cr => if (size_bytes <= 8)
|
|
reg
|
|
else
|
|
unreachable,
|
|
.dr => if (size_bytes <= 8)
|
|
reg
|
|
else
|
|
unreachable,
|
|
};
|
|
}
|
|
|
|
fn memSize(self: *CodeGen, ty: Type) Memory.Size {
|
|
const zcu = self.pt.zcu;
|
|
return switch (ty.zigTypeTag(zcu)) {
|
|
.float => .fromBitSize(ty.floatBits(self.target.*)),
|
|
else => .fromSize(@intCast(ty.abiSize(zcu))),
|
|
};
|
|
}
|
|
|
|
fn splitType(self: *CodeGen, comptime parts_len: usize, ty: Type) ![parts_len]Type {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
var parts: [parts_len]Type = undefined;
|
|
if (ty.isVector(zcu)) if (std.math.divExact(u32, ty.vectorLen(zcu), parts_len)) |vec_len| return .{
|
|
try pt.vectorType(.{ .len = vec_len, .child = ty.scalarType(zcu).toIntern() }),
|
|
} ** parts_len else |err| switch (err) {
|
|
error.DivisionByZero => unreachable,
|
|
error.UnexpectedRemainder => {},
|
|
};
|
|
const classes = std.mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .other), .none);
|
|
if (classes.len == parts_len) for (&parts, classes, 0..) |*part, class, part_i| {
|
|
part.* = switch (class) {
|
|
.integer => if (part_i < parts_len - 1)
|
|
.u64
|
|
else part: {
|
|
const elem_size = ty.abiAlignment(zcu).minStrict(.@"8").toByteUnits().?;
|
|
const elem_ty = try pt.intType(.unsigned, @intCast(elem_size * 8));
|
|
break :part switch (@divExact(ty.abiSize(zcu) - part_i * 8, elem_size)) {
|
|
1 => elem_ty,
|
|
else => |array_len| try pt.arrayType(.{ .len = array_len, .child = elem_ty.toIntern() }),
|
|
};
|
|
},
|
|
.float => .f32,
|
|
.float_combine => try pt.arrayType(.{ .len = 2, .child = .f32_type }),
|
|
.sse => .f64,
|
|
else => break,
|
|
};
|
|
} else {
|
|
var part_sizes: u64 = 0;
|
|
for (parts) |part| part_sizes += part.abiSize(zcu);
|
|
if (part_sizes == ty.abiSize(zcu)) return parts;
|
|
};
|
|
return self.fail("TODO implement splitType({d}, {})", .{ parts_len, ty.fmt(pt) });
|
|
}
|
|
|
|
/// Truncates the value in the register in place.
|
|
/// Clobbers any remaining bits.
|
|
fn truncateRegister(self: *CodeGen, ty: Type, reg: Register) !void {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const int_info: InternPool.Key.IntType = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else .{
|
|
.signedness = .unsigned,
|
|
.bits = @intCast(ty.bitSize(zcu)),
|
|
};
|
|
const shift = std.math.cast(u6, 64 - int_info.bits % 64) orelse return;
|
|
try self.spillEflagsIfOccupied();
|
|
switch (int_info.signedness) {
|
|
.signed => {
|
|
try self.genShiftBinOpMir(.{ ._l, .sa }, .isize, .{ .register = reg }, .u8, .{ .immediate = shift });
|
|
try self.genShiftBinOpMir(.{ ._r, .sa }, .isize, .{ .register = reg }, .u8, .{ .immediate = shift });
|
|
},
|
|
.unsigned => {
|
|
const mask = ~@as(u64, 0) >> shift;
|
|
if (int_info.bits <= 32) {
|
|
try self.genBinOpMir(.{ ._, .@"and" }, .u32, .{ .register = reg }, .{ .immediate = mask });
|
|
} else {
|
|
const tmp_reg = try self.copyToTmpRegister(.usize, .{ .immediate = mask });
|
|
try self.genBinOpMir(.{ ._, .@"and" }, .usize, .{ .register = reg }, .{ .register = tmp_reg });
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
fn regBitSize(self: *CodeGen, ty: Type) u64 {
|
|
const zcu = self.pt.zcu;
|
|
const abi_size = ty.abiSize(zcu);
|
|
return switch (ty.zigTypeTag(zcu)) {
|
|
else => switch (abi_size) {
|
|
1 => 8,
|
|
2 => 16,
|
|
3...4 => 32,
|
|
5...8 => 64,
|
|
else => unreachable,
|
|
},
|
|
.float => switch (abi_size) {
|
|
1...16 => 128,
|
|
17...32 => 256,
|
|
else => unreachable,
|
|
},
|
|
};
|
|
}
|
|
|
|
fn regExtraBits(self: *CodeGen, ty: Type) u64 {
|
|
return self.regBitSize(ty) - ty.bitSize(self.pt.zcu);
|
|
}
|
|
|
|
fn hasFeature(cg: *CodeGen, feature: std.Target.x86.Feature) bool {
|
|
return switch (feature) {
|
|
.@"64bit" => switch (cg.target.cpu.arch) {
|
|
else => unreachable,
|
|
.x86 => false,
|
|
.x86_64 => true,
|
|
},
|
|
.false_deps_getmant,
|
|
.false_deps_lzcnt_tzcnt,
|
|
.false_deps_mulc,
|
|
.false_deps_mullq,
|
|
.false_deps_perm,
|
|
.false_deps_popcnt,
|
|
.false_deps_range,
|
|
.slow_3ops_lea,
|
|
.slow_incdec,
|
|
.slow_lea,
|
|
.slow_pmaddwd,
|
|
.slow_pmulld,
|
|
.slow_shld,
|
|
.slow_two_mem_ops,
|
|
.slow_unaligned_mem_16,
|
|
.slow_unaligned_mem_32,
|
|
=> switch (cg.mod.optimize_mode) {
|
|
.Debug, .ReleaseSafe, .ReleaseFast => null,
|
|
.ReleaseSmall => false,
|
|
},
|
|
.fast_11bytenop,
|
|
.fast_15bytenop,
|
|
.fast_7bytenop,
|
|
.fast_bextr,
|
|
.fast_dpwssd,
|
|
.fast_gather,
|
|
.fast_hops,
|
|
.fast_imm16,
|
|
.fast_lzcnt,
|
|
.fast_movbe,
|
|
.fast_scalar_fsqrt,
|
|
.fast_scalar_shift_masks,
|
|
.fast_shld_rotate,
|
|
.fast_variable_crosslane_shuffle,
|
|
.fast_variable_perlane_shuffle,
|
|
.fast_vector_fsqrt,
|
|
.fast_vector_shift_masks,
|
|
=> switch (cg.mod.optimize_mode) {
|
|
.Debug, .ReleaseSafe, .ReleaseFast => null,
|
|
.ReleaseSmall => true,
|
|
},
|
|
.mmx => false,
|
|
else => null,
|
|
} orelse std.Target.x86.featureSetHas(cg.target.cpu.features, feature);
|
|
}
|
|
|
|
fn typeOf(self: *CodeGen, inst: Air.Inst.Ref) Type {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
return self.air.typeOf(inst, &zcu.intern_pool);
|
|
}
|
|
|
|
fn typeOfIndex(self: *CodeGen, inst: Air.Inst.Index) Type {
|
|
const temp: Temp = .{ .index = inst };
|
|
return temp.typeOf(self);
|
|
}
|
|
|
|
fn intCompilerRtAbiName(int_bits: u32) u8 {
|
|
return switch (int_bits) {
|
|
1...32 => 's',
|
|
33...64 => 'd',
|
|
65...128 => 't',
|
|
else => unreachable,
|
|
};
|
|
}
|
|
|
|
fn floatCompilerRtAbiName(float_bits: u32) u8 {
|
|
return switch (float_bits) {
|
|
16 => 'h',
|
|
32 => 's',
|
|
64 => 'd',
|
|
80 => 'x',
|
|
128 => 't',
|
|
else => unreachable,
|
|
};
|
|
}
|
|
|
|
fn floatCompilerRtAbiType(self: *CodeGen, ty: Type, other_ty: Type) Type {
|
|
if (ty.toIntern() == .f16_type and
|
|
(other_ty.toIntern() == .f32_type or other_ty.toIntern() == .f64_type) and
|
|
self.target.isDarwin()) return .u16;
|
|
return ty;
|
|
}
|
|
|
|
fn floatLibcAbiPrefix(ty: Type) []const u8 {
|
|
return switch (ty.toIntern()) {
|
|
.f16_type, .f80_type => "__",
|
|
.f32_type, .f64_type, .f128_type, .c_longdouble_type => "",
|
|
else => unreachable,
|
|
};
|
|
}
|
|
|
|
fn floatLibcAbiSuffix(ty: Type) []const u8 {
|
|
return switch (ty.toIntern()) {
|
|
.f16_type => "h",
|
|
.f32_type => "f",
|
|
.f64_type => "",
|
|
.f80_type => "x",
|
|
.f128_type => "q",
|
|
.c_longdouble_type => "l",
|
|
else => unreachable,
|
|
};
|
|
}
|
|
|
|
fn promoteInt(self: *CodeGen, ty: Type) Type {
|
|
const pt = self.pt;
|
|
const zcu = pt.zcu;
|
|
const int_info: InternPool.Key.IntType = switch (ty.toIntern()) {
|
|
.bool_type => .{ .signedness = .unsigned, .bits = 1 },
|
|
else => if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else return ty,
|
|
};
|
|
for ([_]Type{
|
|
.c_int, .c_uint,
|
|
.c_long, .c_ulong,
|
|
.c_longlong, .c_ulonglong,
|
|
}) |promote_ty| {
|
|
const promote_info = promote_ty.intInfo(zcu);
|
|
if (int_info.signedness == .signed and promote_info.signedness == .unsigned) continue;
|
|
if (int_info.bits + @intFromBool(int_info.signedness == .unsigned and
|
|
promote_info.signedness == .signed) <= promote_info.bits) return promote_ty;
|
|
}
|
|
return ty;
|
|
}
|
|
|
|
fn promoteVarArg(self: *CodeGen, ty: Type) Type {
|
|
if (!ty.isRuntimeFloat()) return self.promoteInt(ty);
|
|
switch (ty.floatBits(self.target.*)) {
|
|
32, 64 => return .f64,
|
|
else => |float_bits| {
|
|
assert(float_bits == self.target.cTypeBitSize(.longdouble));
|
|
return .c_longdouble;
|
|
},
|
|
}
|
|
}
|
|
|
|
fn unalignedSize(cg: *CodeGen, ty: Type) u64 {
|
|
const zcu = cg.pt.zcu;
|
|
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
|
|
.vector_type => |vector_type| Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len,
|
|
else => ty.abiSize(zcu),
|
|
};
|
|
}
|
|
|
|
fn intInfo(cg: *CodeGen, ty: Type) ?std.builtin.Type.Int {
|
|
const zcu = cg.pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
var ty_index = ty.ip_index;
|
|
while (true) switch (ip.indexToKey(ty_index)) {
|
|
.int_type => |int_type| return int_type,
|
|
.ptr_type => |ptr_type| return switch (ptr_type.flags.size) {
|
|
.one, .many, .c => .{ .signedness = .unsigned, .bits = cg.target.ptrBitWidth() },
|
|
.slice => null,
|
|
},
|
|
.opt_type => |opt_child| return if (!Type.fromInterned(opt_child).hasRuntimeBitsIgnoreComptime(zcu))
|
|
.{ .signedness = .unsigned, .bits = 1 }
|
|
else switch (ip.indexToKey(opt_child)) {
|
|
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
|
.one, .many => switch (ptr_type.flags.is_allowzero) {
|
|
false => .{ .signedness = .unsigned, .bits = cg.target.ptrBitWidth() },
|
|
true => null,
|
|
},
|
|
.slice, .c => null,
|
|
},
|
|
else => null,
|
|
},
|
|
.error_union_type => |error_union_type| return if (!Type.fromInterned(error_union_type.payload_type)
|
|
.hasRuntimeBitsIgnoreComptime(zcu)) .{ .signedness = .unsigned, .bits = zcu.errorSetBits() } else null,
|
|
.simple_type => |simple_type| return switch (simple_type) {
|
|
.bool => .{ .signedness = .unsigned, .bits = 1 },
|
|
.anyerror => .{ .signedness = .unsigned, .bits = zcu.errorSetBits() },
|
|
.isize => .{ .signedness = .signed, .bits = cg.target.ptrBitWidth() },
|
|
.usize => .{ .signedness = .unsigned, .bits = cg.target.ptrBitWidth() },
|
|
.c_char => .{ .signedness = cg.target.charSignedness(), .bits = cg.target.cTypeBitSize(.char) },
|
|
.c_short => .{ .signedness = .signed, .bits = cg.target.cTypeBitSize(.short) },
|
|
.c_ushort => .{ .signedness = .unsigned, .bits = cg.target.cTypeBitSize(.short) },
|
|
.c_int => .{ .signedness = .signed, .bits = cg.target.cTypeBitSize(.int) },
|
|
.c_uint => .{ .signedness = .unsigned, .bits = cg.target.cTypeBitSize(.int) },
|
|
.c_long => .{ .signedness = .signed, .bits = cg.target.cTypeBitSize(.long) },
|
|
.c_ulong => .{ .signedness = .unsigned, .bits = cg.target.cTypeBitSize(.long) },
|
|
.c_longlong => .{ .signedness = .signed, .bits = cg.target.cTypeBitSize(.longlong) },
|
|
.c_ulonglong => .{ .signedness = .unsigned, .bits = cg.target.cTypeBitSize(.longlong) },
|
|
.f16, .f32, .f64, .f80, .f128, .c_longdouble => null,
|
|
.anyopaque,
|
|
.void,
|
|
.type,
|
|
.comptime_int,
|
|
.comptime_float,
|
|
.noreturn,
|
|
.null,
|
|
.undefined,
|
|
.enum_literal,
|
|
.adhoc_inferred_error_set,
|
|
.generic_poison,
|
|
=> unreachable,
|
|
},
|
|
.struct_type => {
|
|
const loaded_struct = ip.loadStructType(ty_index);
|
|
switch (loaded_struct.layout) {
|
|
.auto, .@"extern" => return null,
|
|
.@"packed" => ty_index = loaded_struct.backingIntTypeUnordered(ip),
|
|
}
|
|
},
|
|
.union_type => return switch (ip.loadUnionType(ty_index).flagsUnordered(ip).layout) {
|
|
.auto, .@"extern" => null,
|
|
.@"packed" => .{ .signedness = .unsigned, .bits = @intCast(ty.bitSize(zcu)) },
|
|
},
|
|
.enum_type => ty_index = ip.loadEnumType(ty_index).tag_ty,
|
|
.error_set_type, .inferred_error_set_type => return .{ .signedness = .unsigned, .bits = zcu.errorSetBits() },
|
|
else => return null,
|
|
};
|
|
}
|
|
|
|
fn floatBits(cg: *CodeGen, ty: Type) ?u16 {
|
|
return if (ty.isRuntimeFloat()) ty.floatBits(cg.target.*) else null;
|
|
}
|
|
|
|
const Temp = struct {
|
|
index: Air.Inst.Index,
|
|
|
|
fn unwrap(temp: Temp, cg: *CodeGen) union(enum) {
|
|
ref: Air.Inst.Ref,
|
|
temp: Index,
|
|
err_ret_trace,
|
|
} {
|
|
switch (temp.index.unwrap()) {
|
|
.ref => |ref| return .{ .ref = ref },
|
|
.target => |target_index| {
|
|
if (temp.index == err_ret_trace_index) return .err_ret_trace;
|
|
const temp_index: Index = @enumFromInt(target_index);
|
|
assert(temp_index.isValid(cg));
|
|
return .{ .temp = temp_index };
|
|
},
|
|
}
|
|
}
|
|
|
|
fn typeOf(temp: Temp, cg: *CodeGen) Type {
|
|
return switch (temp.unwrap(cg)) {
|
|
.ref => switch (cg.air.instructions.items(.tag)[@intFromEnum(temp.index)]) {
|
|
.loop_switch_br => cg.typeOf(cg.air.unwrapSwitch(temp.index).operand),
|
|
else => cg.air.typeOfIndex(temp.index, &cg.pt.zcu.intern_pool),
|
|
},
|
|
.temp => |temp_index| temp_index.typeOf(cg),
|
|
.err_ret_trace => .usize,
|
|
};
|
|
}
|
|
|
|
fn isMut(temp: Temp, cg: *CodeGen) bool {
|
|
return switch (temp.unwrap(cg)) {
|
|
.ref, .err_ret_trace => false,
|
|
.temp => |temp_index| switch (temp_index.tracking(cg).short) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.immediate,
|
|
.eflags,
|
|
.register_offset,
|
|
.register_mask,
|
|
.memory,
|
|
.load_symbol,
|
|
.lea_symbol,
|
|
.indirect,
|
|
.load_direct,
|
|
.lea_direct,
|
|
.load_got,
|
|
.lea_got,
|
|
.load_tlv,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> false,
|
|
.register,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_overflow,
|
|
=> true,
|
|
.load_frame => |frame_addr| !frame_addr.index.isNamed(),
|
|
},
|
|
};
|
|
}
|
|
|
|
fn tracking(temp: Temp, cg: *CodeGen) InstTracking {
|
|
return cg.inst_tracking.get(temp.index).?;
|
|
}
|
|
|
|
fn getOffset(temp: Temp, off: i32, cg: *CodeGen) !Temp {
|
|
const new_temp_index = cg.next_temp_index;
|
|
cg.temp_type[@intFromEnum(new_temp_index)] = .usize;
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(new_temp_index) + 1);
|
|
switch (temp.tracking(cg).short) {
|
|
else => |mcv| std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }),
|
|
.register => |reg| {
|
|
const new_reg =
|
|
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
|
|
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
|
|
try cg.asmRegisterMemory(.{ ._, .lea }, new_reg.to64(), .{
|
|
.base = .{ .reg = reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = off,
|
|
} },
|
|
});
|
|
},
|
|
.register_offset => |reg_off| {
|
|
const new_reg =
|
|
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
|
|
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
|
|
try cg.asmRegisterMemory(.{ ._, .lea }, new_reg.to64(), .{
|
|
.base = .{ .reg = reg_off.reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = reg_off.off + off,
|
|
} },
|
|
});
|
|
},
|
|
.lea_symbol => |sym_off| new_temp_index.tracking(cg).* = .init(.{ .lea_symbol = .{
|
|
.sym_index = sym_off.sym_index,
|
|
.off = sym_off.off + off,
|
|
} }),
|
|
.load_frame => |frame_addr| {
|
|
const new_reg =
|
|
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
|
|
new_temp_index.tracking(cg).* = .init(.{ .register_offset = .{
|
|
.reg = new_reg,
|
|
.off = off,
|
|
} });
|
|
try cg.asmRegisterMemory(.{ ._, .mov }, new_reg.to64(), .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = frame_addr.off,
|
|
} },
|
|
});
|
|
},
|
|
.lea_frame => |frame_addr| new_temp_index.tracking(cg).* = .init(.{ .lea_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + off,
|
|
} }),
|
|
}
|
|
return .{ .index = new_temp_index.toIndex() };
|
|
}
|
|
|
|
fn toOffset(temp: *Temp, off: i32, cg: *CodeGen) !void {
|
|
if (off == 0) return;
|
|
switch (temp.unwrap(cg)) {
|
|
.ref, .err_ret_trace => {},
|
|
.temp => |temp_index| {
|
|
const temp_tracking = temp_index.tracking(cg);
|
|
switch (temp_tracking.short) {
|
|
else => {},
|
|
.register => |reg| {
|
|
try cg.freeValue(temp_tracking.long);
|
|
temp_tracking.* = .init(.{ .register_offset = .{
|
|
.reg = reg,
|
|
.off = off,
|
|
} });
|
|
return;
|
|
},
|
|
.register_offset => |reg_off| {
|
|
try cg.freeValue(temp_tracking.long);
|
|
temp_tracking.* = .init(.{ .register_offset = .{
|
|
.reg = reg_off.reg,
|
|
.off = reg_off.off + off,
|
|
} });
|
|
return;
|
|
},
|
|
.lea_symbol => |sym_off| {
|
|
assert(std.meta.eql(temp_tracking.long.lea_symbol, sym_off));
|
|
temp_tracking.* = .init(.{ .lea_symbol = .{
|
|
.sym_index = sym_off.sym_index,
|
|
.off = sym_off.off + off,
|
|
} });
|
|
return;
|
|
},
|
|
.lea_frame => |frame_addr| {
|
|
assert(std.meta.eql(temp_tracking.long.lea_frame, frame_addr));
|
|
temp_tracking.* = .init(.{ .lea_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + off,
|
|
} });
|
|
return;
|
|
},
|
|
}
|
|
},
|
|
}
|
|
const new_temp = try temp.getOffset(off, cg);
|
|
try temp.die(cg);
|
|
temp.* = new_temp;
|
|
}
|
|
|
|
fn getLimb(temp: Temp, limb_ty: Type, limb_index: u28, cg: *CodeGen) !Temp {
|
|
const new_temp_index = cg.next_temp_index;
|
|
cg.temp_type[@intFromEnum(new_temp_index)] = limb_ty;
|
|
switch (temp.tracking(cg).short) {
|
|
else => |mcv| std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }),
|
|
.immediate => |imm| {
|
|
assert(limb_index == 0);
|
|
new_temp_index.tracking(cg).* = .init(.{ .immediate = imm });
|
|
},
|
|
.register => |reg| {
|
|
assert(limb_index == 0);
|
|
const new_reg =
|
|
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
|
|
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
|
|
try cg.asmRegisterRegister(.{ ._, .mov }, new_reg.to64(), reg.to64());
|
|
},
|
|
.register_pair => |regs| {
|
|
const new_reg =
|
|
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
|
|
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
|
|
try cg.asmRegisterRegister(.{ ._, .mov }, new_reg.to64(), regs[limb_index].to64());
|
|
},
|
|
.register_offset => |reg_off| {
|
|
assert(limb_index == 0);
|
|
const new_reg =
|
|
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
|
|
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
|
|
try cg.asmRegisterMemory(.{ ._, .lea }, new_reg.to64(), .{
|
|
.base = .{ .reg = reg_off.reg.to64() },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = reg_off.off + @as(u31, limb_index) * 8,
|
|
} },
|
|
});
|
|
},
|
|
.load_symbol => |sym_off| {
|
|
const new_reg =
|
|
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
|
|
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
|
|
try cg.asmRegisterMemory(.{ ._, .mov }, new_reg.to64(), .{
|
|
.base = .{ .reloc = sym_off.sym_index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = sym_off.off + @as(u31, limb_index) * 8,
|
|
} },
|
|
});
|
|
},
|
|
.lea_symbol => |sym_off| {
|
|
assert(limb_index == 0);
|
|
new_temp_index.tracking(cg).* = .init(.{ .lea_symbol = sym_off });
|
|
},
|
|
.load_frame => |frame_addr| {
|
|
const new_reg =
|
|
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
|
|
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
|
|
try cg.asmRegisterMemory(.{ ._, .mov }, new_reg.to64(), .{
|
|
.base = .{ .frame = frame_addr.index },
|
|
.mod = .{ .rm = .{
|
|
.size = .qword,
|
|
.disp = frame_addr.off + @as(u31, limb_index) * 8,
|
|
} },
|
|
});
|
|
},
|
|
.lea_frame => |frame_addr| {
|
|
assert(limb_index == 0);
|
|
new_temp_index.tracking(cg).* = .init(.{ .lea_frame = frame_addr });
|
|
},
|
|
}
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(new_temp_index) + 1);
|
|
return .{ .index = new_temp_index.toIndex() };
|
|
}
|
|
|
|
fn toLimb(temp: *Temp, limb_ty: Type, limb_index: u28, cg: *CodeGen) !void {
|
|
switch (temp.unwrap(cg)) {
|
|
.ref => {},
|
|
.temp => |temp_index| {
|
|
const temp_tracking = temp_index.tracking(cg);
|
|
switch (temp_tracking.short) {
|
|
else => {},
|
|
.register, .lea_symbol, .lea_frame => {
|
|
assert(limb_index == 0);
|
|
cg.temp_type[@intFromEnum(temp_index)] = limb_ty;
|
|
return;
|
|
},
|
|
.register_pair => |regs| {
|
|
switch (temp_tracking.long) {
|
|
.none, .reserved_frame => {},
|
|
else => temp_tracking.long =
|
|
temp_tracking.long.address().offset(@as(u31, limb_index) * 8).deref(),
|
|
}
|
|
for (regs, 0..) |reg, reg_index| if (reg_index != limb_index)
|
|
cg.register_manager.freeReg(reg);
|
|
temp_tracking.* = .init(.{ .register = regs[limb_index] });
|
|
cg.temp_type[@intFromEnum(temp_index)] = limb_ty;
|
|
return;
|
|
},
|
|
.load_symbol => |sym_off| {
|
|
assert(std.meta.eql(temp_tracking.long.load_symbol, sym_off));
|
|
temp_tracking.* = .init(.{ .load_symbol = .{
|
|
.sym_index = sym_off.sym_index,
|
|
.off = sym_off.off + @as(u31, limb_index) * 8,
|
|
} });
|
|
cg.temp_type[@intFromEnum(temp_index)] = limb_ty;
|
|
return;
|
|
},
|
|
.load_frame => |frame_addr| if (!frame_addr.index.isNamed()) {
|
|
assert(std.meta.eql(temp_tracking.long.load_frame, frame_addr));
|
|
temp_tracking.* = .init(.{ .load_frame = .{
|
|
.index = frame_addr.index,
|
|
.off = frame_addr.off + @as(u31, limb_index) * 8,
|
|
} });
|
|
cg.temp_type[@intFromEnum(temp_index)] = limb_ty;
|
|
return;
|
|
},
|
|
}
|
|
},
|
|
.err_ret_trace => unreachable,
|
|
}
|
|
const new_temp = try temp.getLimb(limb_ty, limb_index, cg);
|
|
try temp.die(cg);
|
|
temp.* = new_temp;
|
|
}
|
|
|
|
fn toSlicePtr(temp: *Temp, cg: *CodeGen) !void {
|
|
const temp_ty = temp.typeOf(cg);
|
|
if (temp_ty.isSlice(cg.pt.zcu)) try temp.toLimb(temp_ty.slicePtrFieldType(cg.pt.zcu), 0, cg);
|
|
}
|
|
|
|
fn toSliceLen(temp: *Temp, cg: *CodeGen) !void {
|
|
try temp.toLimb(.usize, 1, cg);
|
|
}
|
|
|
|
fn toReg(temp: *Temp, new_reg: Register, cg: *CodeGen) !bool {
|
|
const val, const ty: Type = val_ty: switch (temp.unwrap(cg)) {
|
|
.ref => |ref| .{ temp.tracking(cg).short, cg.typeOf(ref) },
|
|
.temp => |temp_index| {
|
|
const temp_tracking = temp_index.tracking(cg);
|
|
if (temp_tracking.short == .register and
|
|
temp_tracking.short.register == new_reg) return false;
|
|
break :val_ty .{ temp_tracking.short, temp_index.typeOf(cg) };
|
|
},
|
|
.err_ret_trace => .{ temp.tracking(cg).short, .usize },
|
|
};
|
|
const new_temp_index = cg.next_temp_index;
|
|
try cg.register_manager.getReg(new_reg, new_temp_index.toIndex());
|
|
cg.temp_type[@intFromEnum(new_temp_index)] = ty;
|
|
try cg.genSetReg(new_reg, ty, val, .{});
|
|
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
|
|
try temp.die(cg);
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(new_temp_index) + 1);
|
|
temp.* = .{ .index = new_temp_index.toIndex() };
|
|
return true;
|
|
}
|
|
|
|
fn toRegClass(temp: *Temp, mut: bool, rc: Register.Class, cg: *CodeGen) !bool {
|
|
const val = temp.tracking(cg).short;
|
|
if (!mut or temp.isMut(cg)) switch (val) {
|
|
else => {},
|
|
.register => |reg| if (reg.class() == rc) return false,
|
|
.register_offset => |reg_off| if (reg_off.reg.class() == rc and reg_off.off == 0) return false,
|
|
};
|
|
const ty = temp.typeOf(cg);
|
|
const new_temp_index = cg.next_temp_index;
|
|
cg.temp_type[@intFromEnum(new_temp_index)] = ty;
|
|
const new_reg = try cg.register_manager.allocReg(new_temp_index.toIndex(), regSetForRegClass(rc));
|
|
try cg.genSetReg(new_reg, ty, val, .{});
|
|
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
|
|
try temp.die(cg);
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(new_temp_index) + 1);
|
|
temp.* = .{ .index = new_temp_index.toIndex() };
|
|
return true;
|
|
}
|
|
|
|
fn toPair(first_temp: *Temp, second_temp: *Temp, cg: *CodeGen) !void {
|
|
while (true) for ([_]*Temp{ first_temp, second_temp }) |part_temp| {
|
|
if (try part_temp.toRegClass(true, .general_purpose, cg)) break;
|
|
} else break;
|
|
const first_temp_tracking = first_temp.unwrap(cg).temp.tracking(cg);
|
|
const second_temp_tracking = second_temp.unwrap(cg).temp.tracking(cg);
|
|
const result: MCValue = .{ .register_pair = .{
|
|
first_temp_tracking.short.register,
|
|
second_temp_tracking.short.register,
|
|
} };
|
|
const result_temp_index = cg.next_temp_index;
|
|
const result_temp: Temp = .{ .index = result_temp_index.toIndex() };
|
|
assert(cg.reuseTemp(result_temp.index, first_temp.index, first_temp_tracking));
|
|
assert(cg.reuseTemp(result_temp.index, second_temp.index, second_temp_tracking));
|
|
cg.temp_type[@intFromEnum(result_temp_index)] = .slice_const_u8;
|
|
result_temp_index.tracking(cg).* = .init(result);
|
|
first_temp.* = result_temp;
|
|
second_temp.* = result_temp;
|
|
}
|
|
|
|
fn asMask(temp: Temp, info: MaskInfo, cg: *CodeGen) void {
|
|
assert(info.scalar != .none);
|
|
const mcv = &temp.unwrap(cg).temp.tracking(cg).short;
|
|
const reg = mcv.register;
|
|
mcv.* = .{ .register_mask = .{ .reg = reg, .info = info } };
|
|
}
|
|
|
|
fn toLea(temp: *Temp, cg: *CodeGen) !bool {
|
|
switch (temp.tracking(cg).short) {
|
|
.none,
|
|
.unreach,
|
|
.dead,
|
|
.undef,
|
|
.eflags,
|
|
.register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.elementwise_regs_then_frame,
|
|
.reserved_frame,
|
|
.air_ref,
|
|
=> unreachable, // not a valid pointer
|
|
.immediate,
|
|
.register,
|
|
.register_offset,
|
|
.lea_direct,
|
|
.lea_got,
|
|
.lea_tlv,
|
|
.lea_frame,
|
|
=> return false,
|
|
.memory,
|
|
.indirect,
|
|
.load_symbol,
|
|
.load_direct,
|
|
.load_got,
|
|
.load_tlv,
|
|
.load_frame,
|
|
=> return temp.toRegClass(true, .general_purpose, cg),
|
|
.lea_symbol => |sym_off| {
|
|
const off = sym_off.off;
|
|
if (off == 0) return false;
|
|
try temp.toOffset(-off, cg);
|
|
while (try temp.toRegClass(true, .general_purpose, cg)) {}
|
|
try temp.toOffset(off, cg);
|
|
return true;
|
|
},
|
|
}
|
|
}
|
|
|
|
fn toMemory(temp: *Temp, cg: *CodeGen) !bool {
|
|
const temp_tracking = temp.tracking(cg);
|
|
if (temp_tracking.short.isMemory()) return false;
|
|
const new_temp_index = cg.next_temp_index;
|
|
const ty = temp.typeOf(cg);
|
|
cg.temp_type[@intFromEnum(new_temp_index)] = ty;
|
|
const new_frame_index = try cg.allocFrameIndex(.initSpill(ty, cg.pt.zcu));
|
|
try cg.genSetMem(.{ .frame = new_frame_index }, 0, ty, temp_tracking.short, .{});
|
|
new_temp_index.tracking(cg).* = .init(.{ .load_frame = .{ .index = new_frame_index } });
|
|
try temp.die(cg);
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(new_temp_index) + 1);
|
|
temp.* = .{ .index = new_temp_index.toIndex() };
|
|
return true;
|
|
}
|
|
|
|
// hack around linker relocation bugs
|
|
fn toBase(temp: *Temp, cg: *CodeGen) !bool {
|
|
const temp_tracking = temp.tracking(cg);
|
|
if (temp_tracking.short.isBase()) return false;
|
|
if (try temp.toMemory(cg)) return true;
|
|
const new_temp_index = cg.next_temp_index;
|
|
cg.temp_type[@intFromEnum(new_temp_index)] = temp.typeOf(cg);
|
|
const new_reg =
|
|
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
|
|
try cg.genSetReg(new_reg, .usize, temp_tracking.short.address(), .{});
|
|
new_temp_index.tracking(cg).* = .init(.{ .indirect = .{ .reg = new_reg } });
|
|
try temp.die(cg);
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(new_temp_index) + 1);
|
|
temp.* = .{ .index = new_temp_index.toIndex() };
|
|
return true;
|
|
}
|
|
|
|
const AccessOptions = struct {
|
|
disp: i32 = 0,
|
|
safe: bool = false,
|
|
};
|
|
|
|
fn load(ptr: *Temp, val_ty: Type, opts: AccessOptions, cg: *CodeGen) !Temp {
|
|
const val = try cg.tempAlloc(val_ty);
|
|
try ptr.toOffset(opts.disp, cg);
|
|
while (try ptr.toLea(cg)) {}
|
|
const val_mcv = val.tracking(cg).short;
|
|
switch (val_mcv) {
|
|
else => |mcv| std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }),
|
|
.register => |val_reg| try ptr.loadReg(val_ty, registerAlias(
|
|
val_reg,
|
|
@intCast(val_ty.abiSize(cg.pt.zcu)),
|
|
), cg),
|
|
inline .register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
=> |val_regs| for (val_regs) |val_reg| {
|
|
try ptr.loadReg(val_ty, val_reg, cg);
|
|
try ptr.toOffset(@divExact(val_reg.bitSize(), 8), cg);
|
|
while (try ptr.toLea(cg)) {}
|
|
},
|
|
.register_offset => |val_reg_off| switch (val_reg_off.off) {
|
|
0 => try ptr.loadReg(val_ty, registerAlias(
|
|
val_reg_off.reg,
|
|
@intCast(val_ty.abiSize(cg.pt.zcu)),
|
|
), cg),
|
|
else => unreachable,
|
|
},
|
|
.memory, .indirect, .load_frame, .load_symbol => {
|
|
var val_ptr = try cg.tempInit(.usize, val_mcv.address());
|
|
var len = try cg.tempInit(.usize, .{ .immediate = val_ty.abiSize(cg.pt.zcu) });
|
|
try val_ptr.memcpy(ptr, &len, cg);
|
|
try val_ptr.die(cg);
|
|
try len.die(cg);
|
|
},
|
|
}
|
|
return val;
|
|
}
|
|
|
|
fn store(ptr: *Temp, val: *Temp, opts: AccessOptions, cg: *CodeGen) !void {
|
|
const val_ty = val.typeOf(cg);
|
|
try ptr.toOffset(opts.disp, cg);
|
|
while (try ptr.toLea(cg)) {}
|
|
val_to_gpr: while (true) : (while (try ptr.toLea(cg) or
|
|
try val.toRegClass(false, .general_purpose, cg))
|
|
{}) {
|
|
const val_mcv = val.tracking(cg).short;
|
|
switch (val_mcv) {
|
|
else => |mcv| std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }),
|
|
.undef => if (opts.safe) {
|
|
var pat = try cg.tempInit(.u8, .{ .immediate = 0xaa });
|
|
var len = try cg.tempInit(.usize, .{ .immediate = val_ty.abiSize(cg.pt.zcu) });
|
|
try ptr.memset(&pat, &len, cg);
|
|
try pat.die(cg);
|
|
try len.die(cg);
|
|
},
|
|
.immediate => |val_imm| {
|
|
const val_op: Immediate = if (std.math.cast(u31, val_imm)) |val_uimm31|
|
|
.u(val_uimm31)
|
|
else if (std.math.cast(i32, @as(i64, @bitCast(val_imm)))) |val_simm32|
|
|
.s(val_simm32)
|
|
else
|
|
continue :val_to_gpr;
|
|
// hack around linker relocation bugs
|
|
switch (ptr.tracking(cg).short) {
|
|
else => {},
|
|
.lea_symbol => while (try ptr.toRegClass(false, .general_purpose, cg)) {},
|
|
}
|
|
try cg.asmMemoryImmediate(
|
|
.{ ._, .mov },
|
|
try ptr.tracking(cg).short.deref().mem(cg, .{
|
|
.size = cg.memSize(val_ty),
|
|
}),
|
|
val_op,
|
|
);
|
|
},
|
|
.eflags => |cc| {
|
|
// hack around linker relocation bugs
|
|
switch (ptr.tracking(cg).short) {
|
|
else => {},
|
|
.lea_symbol => while (try ptr.toRegClass(false, .general_purpose, cg)) {},
|
|
}
|
|
try cg.asmSetccMemory(
|
|
cc,
|
|
try ptr.tracking(cg).short.deref().mem(cg, .{ .size = .byte }),
|
|
);
|
|
},
|
|
.register => |val_reg| try ptr.storeRegs(val_ty, &.{registerAlias(
|
|
val_reg,
|
|
@intCast(val_ty.abiSize(cg.pt.zcu)),
|
|
)}, cg),
|
|
inline .register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
=> |val_regs| try ptr.storeRegs(val_ty, &val_regs, cg),
|
|
.register_offset => |val_reg_off| switch (val_reg_off.off) {
|
|
0 => try ptr.storeRegs(val_ty, &.{registerAlias(
|
|
val_reg_off.reg,
|
|
@intCast(val_ty.abiSize(cg.pt.zcu)),
|
|
)}, cg),
|
|
else => continue :val_to_gpr,
|
|
},
|
|
.register_overflow => |val_reg_ov| {
|
|
const ip = &cg.pt.zcu.intern_pool;
|
|
const first_ty: Type = .fromInterned(first_ty: switch (ip.indexToKey(val_ty.toIntern())) {
|
|
.tuple_type => |tuple_type| {
|
|
const tuple_field_types = tuple_type.types.get(ip);
|
|
assert(tuple_field_types.len == 2 and tuple_field_types[1] == .u1_type);
|
|
break :first_ty tuple_field_types[0];
|
|
},
|
|
.opt_type => |opt_child| {
|
|
assert(!val_ty.optionalReprIsPayload(cg.pt.zcu));
|
|
break :first_ty opt_child;
|
|
},
|
|
else => std.debug.panic("{s}: {}\n", .{ @src().fn_name, val_ty.fmt(cg.pt) }),
|
|
});
|
|
const first_size: u31 = @intCast(first_ty.abiSize(cg.pt.zcu));
|
|
try ptr.storeRegs(first_ty, &.{registerAlias(val_reg_ov.reg, first_size)}, cg);
|
|
try ptr.toOffset(first_size, cg);
|
|
try cg.asmSetccMemory(
|
|
val_reg_ov.eflags,
|
|
try ptr.tracking(cg).short.deref().mem(cg, .{ .size = .byte }),
|
|
);
|
|
},
|
|
.lea_frame, .lea_symbol => continue :val_to_gpr,
|
|
.memory, .indirect, .load_frame, .load_symbol => {
|
|
var val_ptr = try cg.tempInit(.usize, val_mcv.address());
|
|
var len = try cg.tempInit(.usize, .{ .immediate = val_ty.abiSize(cg.pt.zcu) });
|
|
try ptr.memcpy(&val_ptr, &len, cg);
|
|
try val_ptr.die(cg);
|
|
try len.die(cg);
|
|
},
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
|
|
fn read(src: *Temp, val_ty: Type, opts: AccessOptions, cg: *CodeGen) !Temp {
|
|
var val = try cg.tempAlloc(val_ty);
|
|
while (try src.toBase(cg)) {}
|
|
const val_mcv = val.tracking(cg).short;
|
|
switch (val_mcv) {
|
|
else => |mcv| std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }),
|
|
.register => |val_reg| try src.readReg(opts.disp, val_ty, registerAlias(
|
|
val_reg,
|
|
@intCast(val_ty.abiSize(cg.pt.zcu)),
|
|
), cg),
|
|
inline .register_pair, .register_triple, .register_quadruple => |val_regs| {
|
|
var disp = opts.disp;
|
|
for (val_regs) |val_reg| {
|
|
try src.readReg(disp, val_ty, val_reg, cg);
|
|
disp += @divExact(val_reg.bitSize(), 8);
|
|
}
|
|
},
|
|
.register_offset => |val_reg_off| switch (val_reg_off.off) {
|
|
0 => try src.readReg(opts.disp, val_ty, registerAlias(
|
|
val_reg_off.reg,
|
|
@intCast(val_ty.abiSize(cg.pt.zcu)),
|
|
), cg),
|
|
else => unreachable,
|
|
},
|
|
.memory, .indirect, .load_frame, .load_symbol => {
|
|
var val_ptr = try cg.tempInit(.usize, val_mcv.address());
|
|
var src_ptr =
|
|
try cg.tempInit(.usize, src.tracking(cg).short.address().offset(opts.disp));
|
|
var len = try cg.tempInit(.usize, .{ .immediate = val_ty.abiSize(cg.pt.zcu) });
|
|
try val_ptr.memcpy(&src_ptr, &len, cg);
|
|
try val_ptr.die(cg);
|
|
try src_ptr.die(cg);
|
|
try len.die(cg);
|
|
},
|
|
}
|
|
return val;
|
|
}
|
|
|
|
fn write(dst: *Temp, val: *Temp, opts: AccessOptions, cg: *CodeGen) !void {
|
|
const val_ty = val.typeOf(cg);
|
|
while (try dst.toBase(cg)) {}
|
|
val_to_gpr: while (true) : (while (try dst.toBase(cg) or
|
|
try val.toRegClass(false, .general_purpose, cg))
|
|
{}) {
|
|
const val_mcv = val.tracking(cg).short;
|
|
switch (val_mcv) {
|
|
else => |mcv| std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }),
|
|
.undef => if (opts.safe) {
|
|
var dst_ptr = try cg.tempInit(.usize, dst.tracking(cg).short.address().offset(opts.disp));
|
|
var pat = try cg.tempInit(.u8, .{ .immediate = 0xaa });
|
|
var len = try cg.tempInit(.usize, .{ .immediate = val_ty.abiSize(cg.pt.zcu) });
|
|
try dst_ptr.memset(&pat, &len, cg);
|
|
try dst_ptr.die(cg);
|
|
try pat.die(cg);
|
|
try len.die(cg);
|
|
},
|
|
.immediate => |val_imm| {
|
|
const val_op: Immediate = if (std.math.cast(u31, val_imm)) |val_uimm31|
|
|
.u(val_uimm31)
|
|
else if (std.math.cast(i32, @as(i64, @bitCast(val_imm)))) |val_simm32|
|
|
.s(val_simm32)
|
|
else
|
|
continue :val_to_gpr;
|
|
try cg.asmMemoryImmediate(
|
|
.{ ._, .mov },
|
|
try dst.tracking(cg).short.mem(cg, .{
|
|
.size = cg.memSize(val_ty),
|
|
.disp = opts.disp,
|
|
}),
|
|
val_op,
|
|
);
|
|
},
|
|
.eflags => |cc| try cg.asmSetccMemory(
|
|
cc,
|
|
try dst.tracking(cg).short.mem(cg, .{
|
|
.size = .byte,
|
|
.disp = opts.disp,
|
|
}),
|
|
),
|
|
.register => |val_reg| try dst.writeRegs(opts.disp, val_ty, &.{registerAlias(
|
|
val_reg,
|
|
@intCast(val_ty.abiSize(cg.pt.zcu)),
|
|
)}, cg),
|
|
inline .register_pair,
|
|
.register_triple,
|
|
.register_quadruple,
|
|
=> |val_regs| try dst.writeRegs(opts.disp, val_ty, &val_regs, cg),
|
|
.register_offset => |val_reg_off| switch (val_reg_off.off) {
|
|
0 => try dst.writeRegs(opts.disp, val_ty, &.{registerAlias(
|
|
val_reg_off.reg,
|
|
@intCast(val_ty.abiSize(cg.pt.zcu)),
|
|
)}, cg),
|
|
else => continue :val_to_gpr,
|
|
},
|
|
.register_overflow => |val_reg_ov| {
|
|
const ip = &cg.pt.zcu.intern_pool;
|
|
const first_ty: Type = .fromInterned(first_ty: switch (ip.indexToKey(val_ty.toIntern())) {
|
|
.tuple_type => |tuple_type| {
|
|
const tuple_field_types = tuple_type.types.get(ip);
|
|
assert(tuple_field_types.len == 2 and tuple_field_types[1] == .u1_type);
|
|
break :first_ty tuple_field_types[0];
|
|
},
|
|
.opt_type => |opt_child| {
|
|
assert(!val_ty.optionalReprIsPayload(cg.pt.zcu));
|
|
break :first_ty opt_child;
|
|
},
|
|
else => std.debug.panic("{s}: {}\n", .{ @src().fn_name, val_ty.fmt(cg.pt) }),
|
|
});
|
|
const first_size: u31 = @intCast(first_ty.abiSize(cg.pt.zcu));
|
|
try dst.writeRegs(opts.disp, first_ty, &.{registerAlias(val_reg_ov.reg, first_size)}, cg);
|
|
try cg.asmSetccMemory(
|
|
val_reg_ov.eflags,
|
|
try dst.tracking(cg).short.mem(cg, .{
|
|
.size = .byte,
|
|
.disp = opts.disp + first_size,
|
|
}),
|
|
);
|
|
},
|
|
.lea_frame, .lea_symbol => continue :val_to_gpr,
|
|
.memory, .indirect, .load_frame, .load_symbol => {
|
|
var dst_ptr =
|
|
try cg.tempInit(.usize, dst.tracking(cg).short.address().offset(opts.disp));
|
|
var val_ptr = try cg.tempInit(.usize, val_mcv.address());
|
|
var len = try cg.tempInit(.usize, .{ .immediate = val_ty.abiSize(cg.pt.zcu) });
|
|
try dst_ptr.memcpy(&val_ptr, &len, cg);
|
|
try dst_ptr.die(cg);
|
|
try val_ptr.die(cg);
|
|
try len.die(cg);
|
|
},
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
|
|
fn loadReg(ptr: *Temp, dst_ty: Type, dst_reg: Register, cg: *CodeGen) !void {
|
|
const dst_rc = dst_reg.class();
|
|
const strat = try cg.moveStrategy(dst_ty, dst_rc, false);
|
|
// hack around linker relocation bugs
|
|
switch (ptr.tracking(cg).short) {
|
|
else => {},
|
|
.lea_symbol => |sym_off| if (dst_rc != .general_purpose or sym_off.off != 0)
|
|
while (try ptr.toRegClass(false, .general_purpose, cg)) {},
|
|
}
|
|
try strat.read(cg, dst_reg, try ptr.tracking(cg).short.deref().mem(cg, .{
|
|
.size = .fromBitSize(@min(8 * dst_ty.abiSize(cg.pt.zcu), dst_reg.bitSize())),
|
|
}));
|
|
}
|
|
|
|
fn storeRegs(ptr: *Temp, src_ty: Type, src_regs: []const Register, cg: *CodeGen) !void {
|
|
var part_disp: u31 = 0;
|
|
var deferred_disp: u31 = 0;
|
|
var src_abi_size: u32 = @intCast(src_ty.abiSize(cg.pt.zcu));
|
|
for (src_regs) |src_reg| {
|
|
const src_rc = src_reg.class();
|
|
const part_bit_size = @min(8 * src_abi_size, src_reg.bitSize());
|
|
const part_size = @divExact(part_bit_size, 8);
|
|
if (src_rc == .x87 or std.math.isPowerOfTwo(part_size)) {
|
|
// hack around linker relocation bugs
|
|
switch (ptr.tracking(cg).short) {
|
|
else => {},
|
|
.lea_symbol => while (try ptr.toRegClass(false, .general_purpose, cg)) {},
|
|
}
|
|
const strat = try cg.moveStrategy(src_ty, src_rc, false);
|
|
try strat.write(cg, try ptr.tracking(cg).short.deref().mem(cg, .{
|
|
.size = .fromBitSize(part_bit_size),
|
|
.disp = part_disp,
|
|
}), registerAlias(src_reg, part_size));
|
|
} else {
|
|
const frame_size = std.math.ceilPowerOfTwoAssert(u32, part_size);
|
|
const frame_index = try cg.allocFrameIndex(.init(.{
|
|
.size = frame_size,
|
|
.alignment = .fromNonzeroByteUnits(frame_size),
|
|
}));
|
|
const strat = try cg.moveStrategy(src_ty, src_rc, true);
|
|
try strat.write(cg, .{
|
|
.base = .{ .frame = frame_index },
|
|
.mod = .{ .rm = .{ .size = .fromSize(frame_size) } },
|
|
}, registerAlias(src_reg, frame_size));
|
|
try ptr.toOffset(deferred_disp, cg);
|
|
deferred_disp = 0;
|
|
var src_ptr = try cg.tempInit(.usize, .{ .lea_frame = .{ .index = frame_index } });
|
|
var len = try cg.tempInit(.usize, .{ .immediate = src_abi_size });
|
|
try ptr.memcpy(&src_ptr, &len, cg);
|
|
try src_ptr.die(cg);
|
|
try len.die(cg);
|
|
}
|
|
part_disp += part_size;
|
|
deferred_disp += part_size;
|
|
src_abi_size -= part_size;
|
|
}
|
|
}
|
|
|
|
fn readReg(src: Temp, disp: i32, dst_ty: Type, dst_reg: Register, cg: *CodeGen) !void {
|
|
const strat = try cg.moveStrategy(dst_ty, dst_reg.class(), false);
|
|
try strat.read(cg, dst_reg, try src.tracking(cg).short.mem(cg, .{
|
|
.size = .fromBitSize(@min(8 * dst_ty.abiSize(cg.pt.zcu), dst_reg.bitSize())),
|
|
.disp = disp,
|
|
}));
|
|
}
|
|
|
|
fn writeRegs(dst: Temp, disp: i32, src_ty: Type, src_regs: []const Register, cg: *CodeGen) !void {
|
|
var part_disp = disp;
|
|
var src_abi_size: u32 = @intCast(src_ty.abiSize(cg.pt.zcu));
|
|
for (src_regs) |src_reg| {
|
|
const src_rc = src_reg.class();
|
|
const part_bit_size = @min(8 * src_abi_size, src_reg.bitSize());
|
|
const part_size = @divExact(part_bit_size, 8);
|
|
if (src_rc == .x87 or std.math.isPowerOfTwo(part_size)) {
|
|
const strat = try cg.moveStrategy(src_ty, src_rc, false);
|
|
try strat.write(cg, try dst.tracking(cg).short.mem(cg, .{
|
|
.size = .fromBitSize(part_bit_size),
|
|
.disp = part_disp,
|
|
}), registerAlias(src_reg, part_size));
|
|
} else {
|
|
const frame_size = std.math.ceilPowerOfTwoAssert(u32, part_size);
|
|
const frame_index = try cg.allocFrameIndex(.init(.{
|
|
.size = frame_size,
|
|
.alignment = .fromNonzeroByteUnits(frame_size),
|
|
}));
|
|
const strat = try cg.moveStrategy(src_ty, src_rc, true);
|
|
try strat.write(cg, .{
|
|
.base = .{ .frame = frame_index },
|
|
.mod = .{ .rm = .{ .size = .fromSize(frame_size) } },
|
|
}, registerAlias(src_reg, frame_size));
|
|
var dst_ptr = try cg.tempInit(.usize, dst.tracking(cg).short.address());
|
|
try dst_ptr.toOffset(part_disp, cg);
|
|
var src_ptr = try cg.tempInit(.usize, .{ .lea_frame = .{ .index = frame_index } });
|
|
var len = try cg.tempInit(.usize, .{ .immediate = src_abi_size });
|
|
try dst_ptr.memcpy(&src_ptr, &len, cg);
|
|
try dst_ptr.die(cg);
|
|
try src_ptr.die(cg);
|
|
try len.die(cg);
|
|
}
|
|
part_disp += part_size;
|
|
src_abi_size -= part_size;
|
|
}
|
|
}
|
|
|
|
fn memcpy(dst: *Temp, src: *Temp, len: *Temp, cg: *CodeGen) !void {
|
|
while (true) for ([_]*Temp{ dst, src, len }, [_]Register{ .rdi, .rsi, .rcx }) |temp, reg| {
|
|
if (try temp.toReg(reg, cg)) break;
|
|
} else break;
|
|
try cg.asmOpOnly(.{ .@"rep _sb", .mov });
|
|
}
|
|
|
|
fn memset(dst: *Temp, val: *Temp, len: *Temp, cg: *CodeGen) !void {
|
|
while (true) for ([_]*Temp{ dst, val, len }, [_]Register{ .rdi, .rax, .rcx }) |temp, reg| {
|
|
if (try temp.toReg(reg, cg)) break;
|
|
} else break;
|
|
try cg.asmOpOnly(.{ .@"rep _sb", .sto });
|
|
}
|
|
|
|
fn finish(
|
|
temp: Temp,
|
|
inst: Air.Inst.Index,
|
|
op_refs: []const Air.Inst.Ref,
|
|
op_temps: []const Temp,
|
|
cg: *CodeGen,
|
|
) !void {
|
|
const tomb_bits = cg.liveness.getTombBits(inst);
|
|
for (0.., op_refs, op_temps) |op_index, op_ref, op_temp| {
|
|
if (op_temp.index != temp.index) try op_temp.die(cg);
|
|
if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
|
|
if (cg.reused_operands.isSet(op_index)) continue;
|
|
try cg.processDeath(op_ref.toIndexAllowNone() orelse continue);
|
|
}
|
|
if (cg.liveness.isUnused(inst)) try temp.die(cg) else switch (temp.unwrap(cg)) {
|
|
.ref, .err_ret_trace => {
|
|
const result = try cg.allocRegOrMem(inst, true);
|
|
try cg.genCopy(cg.typeOfIndex(inst), result, temp.tracking(cg).short, .{});
|
|
tracking_log.debug("{} => {} (birth)", .{ inst, result });
|
|
cg.inst_tracking.putAssumeCapacityNoClobber(inst, .init(result));
|
|
},
|
|
.temp => |temp_index| {
|
|
const temp_tracking = temp_index.tracking(cg);
|
|
tracking_log.debug("{} => {} (birth)", .{ inst, temp_tracking.short });
|
|
cg.inst_tracking.putAssumeCapacityNoClobber(inst, temp_tracking.*);
|
|
assert(cg.reuseTemp(inst, temp_index.toIndex(), temp_tracking));
|
|
},
|
|
}
|
|
}
|
|
|
|
fn die(temp: Temp, cg: *CodeGen) !void {
|
|
switch (temp.unwrap(cg)) {
|
|
.ref, .err_ret_trace => {},
|
|
.temp => |temp_index| try temp_index.tracking(cg).die(cg, temp_index.toIndex()),
|
|
}
|
|
}
|
|
|
|
const Index = enum(u4) {
|
|
_,
|
|
|
|
fn toIndex(index: Index) Air.Inst.Index {
|
|
return .fromTargetIndex(@intFromEnum(index));
|
|
}
|
|
|
|
fn fromIndex(index: Air.Inst.Index) Index {
|
|
return @enumFromInt(index.toTargetIndex());
|
|
}
|
|
|
|
fn tracking(index: Index, cg: *CodeGen) *InstTracking {
|
|
return &cg.inst_tracking.values()[@intFromEnum(index)];
|
|
}
|
|
|
|
fn isValid(index: Index, cg: *CodeGen) bool {
|
|
return index.tracking(cg).short != .dead;
|
|
}
|
|
|
|
fn typeOf(index: Index, cg: *CodeGen) Type {
|
|
assert(index.isValid(cg));
|
|
return cg.temp_type[@intFromEnum(index)];
|
|
}
|
|
|
|
const max = std.math.maxInt(@typeInfo(Index).@"enum".tag_type);
|
|
const Set = std.StaticBitSet(max);
|
|
const SafetySet = if (std.debug.runtime_safety) Set else struct {
|
|
inline fn initEmpty() @This() {
|
|
return .{};
|
|
}
|
|
|
|
inline fn isSet(_: @This(), index: usize) bool {
|
|
assert(index < max);
|
|
return true;
|
|
}
|
|
|
|
inline fn set(_: @This(), index: usize) void {
|
|
assert(index < max);
|
|
}
|
|
|
|
inline fn eql(_: @This(), _: @This()) bool {
|
|
return true;
|
|
}
|
|
};
|
|
};
|
|
};
|
|
|
|
fn resetTemps(cg: *CodeGen) void {
|
|
for (0..@intFromEnum(cg.next_temp_index)) |temp_index| {
|
|
const temp: Temp.Index = @enumFromInt(temp_index);
|
|
assert(!temp.isValid(cg));
|
|
cg.temp_type[temp_index] = undefined;
|
|
}
|
|
cg.next_temp_index = @enumFromInt(0);
|
|
}
|
|
|
|
fn reuseTemp(
|
|
cg: *CodeGen,
|
|
new_inst: Air.Inst.Index,
|
|
old_inst: Air.Inst.Index,
|
|
tracking: *InstTracking,
|
|
) bool {
|
|
switch (tracking.short) {
|
|
.register,
|
|
.register_pair,
|
|
.register_offset,
|
|
.register_overflow,
|
|
.register_mask,
|
|
.indirect,
|
|
=> for (tracking.short.getRegs()) |tracked_reg| {
|
|
if (RegisterManager.indexOfRegIntoTracked(tracked_reg)) |tracked_index| {
|
|
cg.register_manager.registers[tracked_index] = new_inst;
|
|
}
|
|
},
|
|
.load_frame => |frame_addr| if (frame_addr.index.isNamed()) return false,
|
|
else => {},
|
|
}
|
|
switch (tracking.short) {
|
|
.eflags, .register_overflow => cg.eflags_inst = new_inst,
|
|
else => {},
|
|
}
|
|
tracking.reuse(cg, new_inst, old_inst);
|
|
return true;
|
|
}
|
|
|
|
fn tempAlloc(cg: *CodeGen, ty: Type) !Temp {
|
|
const temp_index = cg.next_temp_index;
|
|
temp_index.tracking(cg).* = .init(
|
|
try cg.allocRegOrMemAdvanced(ty, temp_index.toIndex(), true),
|
|
);
|
|
cg.temp_type[@intFromEnum(temp_index)] = ty;
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(temp_index) + 1);
|
|
return .{ .index = temp_index.toIndex() };
|
|
}
|
|
|
|
fn tempAllocReg(cg: *CodeGen, ty: Type, rs: RegisterManager.RegisterBitSet) !Temp {
|
|
const temp_index = cg.next_temp_index;
|
|
temp_index.tracking(cg).* = .init(
|
|
.{ .register = try cg.register_manager.allocReg(temp_index.toIndex(), rs) },
|
|
);
|
|
cg.temp_type[@intFromEnum(temp_index)] = ty;
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(temp_index) + 1);
|
|
return .{ .index = temp_index.toIndex() };
|
|
}
|
|
|
|
fn tempAllocRegPair(cg: *CodeGen, ty: Type, rs: RegisterManager.RegisterBitSet) !Temp {
|
|
const temp_index = cg.next_temp_index;
|
|
temp_index.tracking(cg).* = .init(
|
|
.{ .register_pair = try cg.register_manager.allocRegs(2, temp_index.toIndex(), rs) },
|
|
);
|
|
cg.temp_type[@intFromEnum(temp_index)] = ty;
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(temp_index) + 1);
|
|
return .{ .index = temp_index.toIndex() };
|
|
}
|
|
|
|
fn tempAllocMem(cg: *CodeGen, ty: Type) !Temp {
|
|
const temp_index = cg.next_temp_index;
|
|
temp_index.tracking(cg).* = .init(
|
|
try cg.allocRegOrMemAdvanced(ty, temp_index.toIndex(), false),
|
|
);
|
|
cg.temp_type[@intFromEnum(temp_index)] = ty;
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(temp_index) + 1);
|
|
return .{ .index = temp_index.toIndex() };
|
|
}
|
|
|
|
fn tempInit(cg: *CodeGen, ty: Type, value: MCValue) !Temp {
|
|
const temp_index = cg.next_temp_index;
|
|
temp_index.tracking(cg).* = .init(value);
|
|
cg.temp_type[@intFromEnum(temp_index)] = ty;
|
|
try cg.getValue(value, temp_index.toIndex());
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(temp_index) + 1);
|
|
return .{ .index = temp_index.toIndex() };
|
|
}
|
|
|
|
fn tempFromValue(cg: *CodeGen, value: Value) !Temp {
|
|
return cg.tempInit(value.typeOf(cg.pt.zcu), try cg.genTypedValue(value));
|
|
}
|
|
|
|
fn tempMemFromValue(cg: *CodeGen, value: Value) !Temp {
|
|
return cg.tempInit(value.typeOf(cg.pt.zcu), try cg.lowerUav(value));
|
|
}
|
|
|
|
fn tempFromOperand(
|
|
cg: *CodeGen,
|
|
inst: Air.Inst.Index,
|
|
op_index: Liveness.OperandInt,
|
|
op_ref: Air.Inst.Ref,
|
|
ignore_death: bool,
|
|
) !Temp {
|
|
const zcu = cg.pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
|
|
if (ignore_death or !cg.liveness.operandDies(inst, op_index)) {
|
|
if (op_ref.toIndex()) |op_inst| return .{ .index = op_inst };
|
|
const val = op_ref.toInterned().?;
|
|
const gop = try cg.const_tracking.getOrPut(cg.gpa, val);
|
|
if (!gop.found_existing) gop.value_ptr.* = .init(init: {
|
|
const const_mcv = try cg.genTypedValue(.fromInterned(val));
|
|
switch (const_mcv) {
|
|
.lea_tlv => |tlv_sym| switch (cg.bin_file.tag) {
|
|
.elf, .macho => {
|
|
if (cg.mod.pic) {
|
|
try cg.spillRegisters(&.{ .rdi, .rax });
|
|
} else {
|
|
try cg.spillRegisters(&.{.rax});
|
|
}
|
|
const frame_index = try cg.allocFrameIndex(.init(.{
|
|
.size = 8,
|
|
.alignment = .@"8",
|
|
}));
|
|
try cg.genSetMem(
|
|
.{ .frame = frame_index },
|
|
0,
|
|
.usize,
|
|
.{ .lea_symbol = .{ .sym_index = tlv_sym } },
|
|
.{},
|
|
);
|
|
break :init .{ .load_frame = .{ .index = frame_index } };
|
|
},
|
|
else => break :init const_mcv,
|
|
},
|
|
else => break :init const_mcv,
|
|
}
|
|
});
|
|
return cg.tempInit(.fromInterned(ip.typeOf(val)), gop.value_ptr.short);
|
|
}
|
|
|
|
const temp_index = cg.next_temp_index;
|
|
const temp: Temp = .{ .index = temp_index.toIndex() };
|
|
const op_inst = op_ref.toIndex().?;
|
|
const tracking = cg.getResolvedInstValue(op_inst);
|
|
temp_index.tracking(cg).* = tracking.*;
|
|
if (!cg.reuseTemp(temp.index, op_inst, tracking)) return .{ .index = op_ref.toIndex().? };
|
|
cg.temp_type[@intFromEnum(temp_index)] = cg.typeOf(op_ref);
|
|
cg.next_temp_index = @enumFromInt(@intFromEnum(temp_index) + 1);
|
|
return temp;
|
|
}
|
|
|
|
inline fn tempsFromOperands(cg: *CodeGen, inst: Air.Inst.Index, op_refs: anytype) ![op_refs.len]Temp {
|
|
var temps: [op_refs.len]Temp = undefined;
|
|
inline for (&temps, 0.., op_refs) |*temp, op_index, op_ref| {
|
|
temp.* = try cg.tempFromOperand(inst, op_index, op_ref, inline for (0..op_index) |prev_op_index| {
|
|
if (op_ref == op_refs[prev_op_index]) break true;
|
|
} else false);
|
|
}
|
|
return temps;
|
|
}
|
|
|
|
const Operand = union(enum) {
|
|
none,
|
|
reg: Register,
|
|
mem: Memory,
|
|
imm: Immediate,
|
|
inst: Mir.Inst.Index,
|
|
};
|
|
|
|
const Select = struct {
|
|
cg: *CodeGen,
|
|
temps: [@intFromEnum(Select.Operand.Ref.none)]Temp,
|
|
labels: [@intFromEnum(Label._)]struct {
|
|
backward: ?Mir.Inst.Index,
|
|
forward: [1]?Mir.Inst.Index,
|
|
},
|
|
top: u3,
|
|
|
|
fn emitLabel(s: *Select, label_index: Label) void {
|
|
if (label_index == ._) return;
|
|
const label = &s.labels[@intFromEnum(label_index)];
|
|
for (&label.forward) |*reloc| {
|
|
if (reloc.*) |r| s.cg.performReloc(r);
|
|
reloc.* = null;
|
|
}
|
|
label.backward = @intCast(s.cg.mir_instructions.len);
|
|
}
|
|
|
|
fn emit(s: *Select, inst: Instruction) !void {
|
|
s.emitLabel(inst[0]);
|
|
const mir_tag: Mir.Inst.FixedTag = .{ inst[1], inst[2] };
|
|
var mir_ops: [4]CodeGen.Operand = undefined;
|
|
inline for (&mir_ops, 3..) |*mir_op, inst_index| mir_op.* = try inst[inst_index].lower(s);
|
|
s.cg.asmOps(mir_tag, mir_ops) catch |err| switch (err) {
|
|
error.InvalidInstruction => {
|
|
const fixes = @tagName(mir_tag[0]);
|
|
const fixes_blank = std.mem.indexOfScalar(u8, fixes, '_').?;
|
|
return s.cg.fail(
|
|
"invalid instruction: '{s}{s}{s} {s} {s} {s} {s}'",
|
|
.{
|
|
fixes[0..fixes_blank],
|
|
@tagName(mir_tag[1]),
|
|
fixes[fixes_blank + 1 ..],
|
|
@tagName(mir_ops[0]),
|
|
@tagName(mir_ops[1]),
|
|
@tagName(mir_ops[2]),
|
|
@tagName(mir_ops[3]),
|
|
},
|
|
);
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
switch (mir_tag[0]) {
|
|
.f_ => switch (mir_tag[1]) {
|
|
.@"2xm1",
|
|
.abs,
|
|
.add,
|
|
.chs,
|
|
.clex,
|
|
.com,
|
|
.comi,
|
|
.cos,
|
|
.div,
|
|
.divr,
|
|
.free,
|
|
.mul,
|
|
.nop,
|
|
.prem,
|
|
.rndint,
|
|
.scale,
|
|
.sin,
|
|
.sqrt,
|
|
.st,
|
|
.sub,
|
|
.subr,
|
|
.tst,
|
|
.ucom,
|
|
.ucomi,
|
|
.wait,
|
|
.xam,
|
|
.xch,
|
|
=> {},
|
|
.init, .save => s.top = 0,
|
|
.ld, .ptan, .sincos, .xtract => s.top -%= 1,
|
|
.patan, .yl2x => s.top +%= 1,
|
|
.rstor => unreachable,
|
|
else => unreachable,
|
|
},
|
|
.f_1 => switch (mir_tag[1]) {
|
|
.ld => s.top -%= 1,
|
|
.prem => {},
|
|
else => unreachable,
|
|
},
|
|
.f_b, .f_be, .f_e, .f_nb, .f_nbe, .f_ne, .f_nu, .f_u => switch (mir_tag[1]) {
|
|
.cmov => {},
|
|
else => unreachable,
|
|
},
|
|
.f_cw, .f_env, .f_sw => switch (mir_tag[1]) {
|
|
.ld, .st => {},
|
|
else => unreachable,
|
|
},
|
|
.f_p1 => switch (mir_tag[1]) {
|
|
.yl2x => s.top +%= 1,
|
|
else => unreachable,
|
|
},
|
|
.fb_ => switch (mir_tag[1]) {
|
|
.ld => s.top -%= 1,
|
|
else => unreachable,
|
|
},
|
|
.fb_p => switch (mir_tag[1]) {
|
|
.st => s.top +%= 1,
|
|
else => unreachable,
|
|
},
|
|
.fi_ => switch (mir_tag[1]) {
|
|
.add, .com, .div, .divr, .mul, .st, .stt, .sub, .subr => {},
|
|
.ld => s.top -%= 1,
|
|
else => unreachable,
|
|
},
|
|
.fi_p => switch (mir_tag[1]) {
|
|
.com, .st => s.top +%= 1,
|
|
else => unreachable,
|
|
},
|
|
.fn_ => switch (mir_tag[1]) {
|
|
.clex => {},
|
|
.init, .save => s.top = 0,
|
|
else => unreachable,
|
|
},
|
|
.fn_cw, .fn_env, .fn_sw => switch (mir_tag[1]) {
|
|
.st => {},
|
|
else => unreachable,
|
|
},
|
|
.f_cstp => switch (mir_tag[1]) {
|
|
.de => s.top -%= 1,
|
|
.in => s.top +%= 1,
|
|
else => unreachable,
|
|
},
|
|
.f_l2e, .f_l2t, .f_lg2, .f_ln2, .f_pi, .f_z => switch (mir_tag[1]) {
|
|
.ld => s.top -%= 1,
|
|
else => unreachable,
|
|
},
|
|
.f_p => switch (mir_tag[1]) {
|
|
.add, .com, .comi, .div, .divr, .mul, .st, .sub, .subr, .ucom, .ucomi => s.top +%= 1,
|
|
else => {
|
|
const fixes = @tagName(mir_tag[0]);
|
|
const fixes_blank = std.mem.indexOfScalar(u8, fixes, '_').?;
|
|
std.debug.panic("{s}: {s}{s}{s}\n", .{
|
|
@src().fn_name,
|
|
fixes[0..fixes_blank],
|
|
@tagName(mir_tag[1]),
|
|
fixes[fixes_blank + 1 ..],
|
|
});
|
|
},
|
|
},
|
|
.f_pp => switch (mir_tag[1]) {
|
|
.com, .ucom => s.top +%= 2,
|
|
else => unreachable,
|
|
},
|
|
.fx_ => switch (mir_tag[1]) {
|
|
.rstor => unreachable,
|
|
.save => {},
|
|
else => unreachable,
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
fn lowerReg(s: *const Select, reg: Register) Register {
|
|
if (reg.class() != .x87) return reg;
|
|
return @enumFromInt(@intFromEnum(Register.st0) + (@as(u3, @intCast(reg.enc())) -% s.top));
|
|
}
|
|
|
|
const Case = struct {
|
|
required_features: [4]?std.Target.x86.Feature = @splat(null),
|
|
dst_constraints: [@intFromEnum(Select.Operand.Ref.src0) - @intFromEnum(Select.Operand.Ref.dst0)]Constraint = @splat(.any),
|
|
src_constraints: [@intFromEnum(Select.Operand.Ref.none) - @intFromEnum(Select.Operand.Ref.src0)]Constraint = @splat(.any),
|
|
patterns: []const Select.Pattern,
|
|
call_frame: packed struct(u16) { size: u10 = 0, alignment: InternPool.Alignment } = .{ .size = 0, .alignment = .none },
|
|
extra_temps: [@intFromEnum(Select.Operand.Ref.dst0) - @intFromEnum(Select.Operand.Ref.tmp0)]TempSpec = @splat(.unused),
|
|
dst_temps: [@intFromEnum(Select.Operand.Ref.src0) - @intFromEnum(Select.Operand.Ref.dst0)]TempSpec.Kind = @splat(.unused),
|
|
clobbers: packed struct {
|
|
eflags: bool = false,
|
|
caller_preserved: enum(u2) { none, ccc, zigcc } = .none,
|
|
} = .{},
|
|
each: union(enum) {
|
|
once: []const Instruction,
|
|
},
|
|
};
|
|
|
|
const Constraint = union(enum) {
|
|
any,
|
|
any_bool_vec,
|
|
any_int,
|
|
any_signed_int,
|
|
any_unsigned_int,
|
|
any_scalar_int,
|
|
any_scalar_signed_int,
|
|
any_scalar_unsigned_int,
|
|
any_float,
|
|
po2_any,
|
|
bool_vec: Memory.Size,
|
|
vec: Memory.Size,
|
|
signed_int_vec: Memory.Size,
|
|
signed_int_or_full_vec: Memory.Size,
|
|
unsigned_int_vec: Memory.Size,
|
|
size: Memory.Size,
|
|
multiple_size: Memory.Size,
|
|
int: Memory.Size,
|
|
scalar_int_is: Memory.Size,
|
|
scalar_signed_int_is: Memory.Size,
|
|
scalar_unsigned_int_is: Memory.Size,
|
|
scalar_int: OfIsSizes,
|
|
scalar_signed_int: OfIsSizes,
|
|
scalar_unsigned_int: OfIsSizes,
|
|
multiple_scalar_int: OfIsSizes,
|
|
multiple_scalar_signed_int: OfIsSizes,
|
|
multiple_scalar_unsigned_int: OfIsSizes,
|
|
scalar_remainder_int: OfIsSizes,
|
|
float: Memory.Size,
|
|
scalar_any_float: Memory.Size,
|
|
scalar_float: OfIsSizes,
|
|
multiple_scalar_any_float: Memory.Size,
|
|
multiple_scalar_float: OfIsSizes,
|
|
exact_int: u16,
|
|
exact_signed_int: u16,
|
|
exact_unsigned_int: u16,
|
|
signed_or_exact_int: Memory.Size,
|
|
unsigned_or_exact_int: Memory.Size,
|
|
po2_int: Memory.Size,
|
|
signed_po2_int: Memory.Size,
|
|
unsigned_po2_or_exact_int: Memory.Size,
|
|
remainder_int: OfIsSizes,
|
|
exact_remainder_int: OfIsSizes,
|
|
signed_or_exact_remainder_int: OfIsSizes,
|
|
unsigned_or_exact_remainder_int: OfIsSizes,
|
|
signed_int: Memory.Size,
|
|
unsigned_int: Memory.Size,
|
|
elem_size_is: u8,
|
|
po2_elem_size,
|
|
elem_int: Memory.Size,
|
|
|
|
const OfIsSizes = struct { of: Memory.Size, is: Memory.Size };
|
|
|
|
fn accepts(constraint: Constraint, ty: Type, cg: *CodeGen) bool {
|
|
const zcu = cg.pt.zcu;
|
|
return switch (constraint) {
|
|
.any => true,
|
|
.any_bool_vec => ty.isVector(zcu) and ty.childType(zcu).toIntern() == .bool_type,
|
|
.any_int => cg.intInfo(ty) != null,
|
|
.any_signed_int => if (cg.intInfo(ty)) |int_info| int_info.signedness == .signed else false,
|
|
.any_unsigned_int => if (cg.intInfo(ty)) |int_info| int_info.signedness == .unsigned else false,
|
|
.any_scalar_int => cg.intInfo(ty.scalarType(zcu)) != null,
|
|
.any_scalar_signed_int => if (cg.intInfo(ty.scalarType(zcu))) |int_info| int_info.signedness == .signed else false,
|
|
.any_scalar_unsigned_int => if (cg.intInfo(ty.scalarType(zcu))) |int_info| int_info.signedness == .unsigned else false,
|
|
.any_float => ty.isRuntimeFloat(),
|
|
.po2_any => std.math.isPowerOfTwo(ty.abiSize(zcu)),
|
|
.bool_vec => |size| ty.isVector(zcu) and ty.scalarType(zcu).toIntern() == .bool_type and
|
|
size.bitSize(cg.target) >= ty.vectorLen(zcu),
|
|
.vec => |size| ty.isVector(zcu) and ty.scalarType(zcu).toIntern() != .bool_type and
|
|
size.bitSize(cg.target) >= ty.abiSize(zcu),
|
|
.signed_int_vec => |size| ty.isVector(zcu) and @divExact(size.bitSize(cg.target), 8) >= ty.abiSize(zcu) and
|
|
if (cg.intInfo(ty.childType(zcu))) |int_info| int_info.signedness == .signed else false,
|
|
.signed_int_or_full_vec => |size| ty.isVector(zcu) and @divExact(size.bitSize(cg.target), 8) >= ty.abiSize(zcu) and
|
|
if (cg.intInfo(ty.childType(zcu))) |int_info| switch (int_info.signedness) {
|
|
.signed => true,
|
|
.unsigned => int_info.bits >= 8 and std.math.isPowerOfTwo(int_info.bits),
|
|
} else false,
|
|
.unsigned_int_vec => |size| ty.isVector(zcu) and @divExact(size.bitSize(cg.target), 8) >= ty.abiSize(zcu) and
|
|
if (cg.intInfo(ty.childType(zcu))) |int_info| int_info.signedness == .unsigned else false,
|
|
.size => |size| @divExact(size.bitSize(cg.target), 8) >= ty.abiSize(zcu),
|
|
.multiple_size => |size| ty.abiSize(zcu) % @divExact(size.bitSize(cg.target), 8) == 0,
|
|
.int => |size| if (cg.intInfo(ty)) |int_info| size.bitSize(cg.target) >= int_info.bits else false,
|
|
.scalar_int_is => |size| if (cg.intInfo(ty.scalarType(zcu))) |int_info|
|
|
size.bitSize(cg.target) >= int_info.bits
|
|
else
|
|
false,
|
|
.scalar_signed_int_is => |size| if (cg.intInfo(ty.scalarType(zcu))) |int_info| switch (int_info.signedness) {
|
|
.signed => size.bitSize(cg.target) >= int_info.bits,
|
|
.unsigned => false,
|
|
} else false,
|
|
.scalar_unsigned_int_is => |size| if (cg.intInfo(ty.scalarType(zcu))) |int_info| switch (int_info.signedness) {
|
|
.signed => false,
|
|
.unsigned => size.bitSize(cg.target) >= int_info.bits,
|
|
} else false,
|
|
.scalar_int => |of_is| @divExact(of_is.of.bitSize(cg.target), 8) >= cg.unalignedSize(ty) and
|
|
if (cg.intInfo(ty.scalarType(zcu))) |int_info| of_is.is.bitSize(cg.target) >= int_info.bits else false,
|
|
.scalar_signed_int => |of_is| @divExact(of_is.of.bitSize(cg.target), 8) >= cg.unalignedSize(ty) and
|
|
if (cg.intInfo(ty.scalarType(zcu))) |int_info| int_info.signedness == .signed and
|
|
of_is.is.bitSize(cg.target) >= int_info.bits else false,
|
|
.scalar_unsigned_int => |of_is| @divExact(of_is.of.bitSize(cg.target), 8) >= cg.unalignedSize(ty) and
|
|
if (cg.intInfo(ty.scalarType(zcu))) |int_info| int_info.signedness == .unsigned and
|
|
of_is.is.bitSize(cg.target) >= int_info.bits else false,
|
|
.multiple_scalar_int => |of_is| ty.abiSize(zcu) % @divExact(of_is.of.bitSize(cg.target), 8) == 0 and
|
|
if (cg.intInfo(ty.scalarType(zcu))) |int_info| of_is.is.bitSize(cg.target) >= int_info.bits else false,
|
|
.multiple_scalar_signed_int => |of_is| ty.abiSize(zcu) % @divExact(of_is.of.bitSize(cg.target), 8) == 0 and
|
|
if (cg.intInfo(ty.scalarType(zcu))) |int_info| int_info.signedness == .signed and
|
|
of_is.is.bitSize(cg.target) >= int_info.bits else false,
|
|
.multiple_scalar_unsigned_int => |of_is| ty.abiSize(zcu) % @divExact(of_is.of.bitSize(cg.target), 8) == 0 and
|
|
if (cg.intInfo(ty.scalarType(zcu))) |int_info| int_info.signedness == .unsigned and
|
|
of_is.is.bitSize(cg.target) >= int_info.bits else false,
|
|
.scalar_remainder_int => |of_is| if (cg.intInfo(ty.scalarType(zcu))) |int_info|
|
|
of_is.is.bitSize(cg.target) >= (int_info.bits - 1) % of_is.of.bitSize(cg.target) + 1
|
|
else
|
|
false,
|
|
.float => |size| if (cg.floatBits(ty)) |float_bits| size.bitSize(cg.target) == float_bits else false,
|
|
.scalar_any_float => |size| @divExact(size.bitSize(cg.target), 8) >= ty.abiSize(zcu) and
|
|
cg.floatBits(ty.scalarType(zcu)) != null,
|
|
.scalar_float => |of_is| @divExact(of_is.of.bitSize(cg.target), 8) >= cg.unalignedSize(ty) and
|
|
if (cg.floatBits(ty.scalarType(zcu))) |float_bits| of_is.is.bitSize(cg.target) == float_bits else false,
|
|
.multiple_scalar_any_float => |size| ty.abiSize(zcu) % @divExact(size.bitSize(cg.target), 8) == 0 and
|
|
cg.floatBits(ty.scalarType(zcu)) != null,
|
|
.multiple_scalar_float => |of_is| ty.abiSize(zcu) % @divExact(of_is.of.bitSize(cg.target), 8) == 0 and
|
|
if (cg.floatBits(ty.scalarType(zcu))) |float_bits| of_is.is.bitSize(cg.target) == float_bits else false,
|
|
.exact_int => |bit_size| if (cg.intInfo(ty)) |int_info| bit_size == int_info.bits else false,
|
|
.exact_signed_int => |bit_size| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => bit_size == int_info.bits,
|
|
.unsigned => false,
|
|
} else false,
|
|
.exact_unsigned_int => |bit_size| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => false,
|
|
.unsigned => bit_size == int_info.bits,
|
|
} else false,
|
|
.signed_or_exact_int => |size| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => size.bitSize(cg.target) >= int_info.bits,
|
|
.unsigned => size.bitSize(cg.target) == int_info.bits,
|
|
} else false,
|
|
.unsigned_or_exact_int => |size| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => size.bitSize(cg.target) == int_info.bits,
|
|
.unsigned => size.bitSize(cg.target) >= int_info.bits,
|
|
} else false,
|
|
.po2_int => |size| if (cg.intInfo(ty)) |int_info|
|
|
std.math.isPowerOfTwo(int_info.bits) and size.bitSize(cg.target) >= int_info.bits
|
|
else
|
|
false,
|
|
.signed_po2_int => |size| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => std.math.isPowerOfTwo(int_info.bits) and size.bitSize(cg.target) >= int_info.bits,
|
|
.unsigned => false,
|
|
} else false,
|
|
.unsigned_po2_or_exact_int => |size| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => size.bitSize(cg.target) == int_info.bits,
|
|
.unsigned => std.math.isPowerOfTwo(int_info.bits) and size.bitSize(cg.target) >= int_info.bits,
|
|
} else false,
|
|
.remainder_int => |of_is| if (cg.intInfo(ty)) |int_info|
|
|
of_is.is.bitSize(cg.target) >= (int_info.bits - 1) % of_is.of.bitSize(cg.target) + 1
|
|
else
|
|
false,
|
|
.exact_remainder_int => |of_is| if (cg.intInfo(ty)) |int_info|
|
|
of_is.is.bitSize(cg.target) == (int_info.bits - 1) % of_is.of.bitSize(cg.target) + 1
|
|
else
|
|
false,
|
|
.signed_or_exact_remainder_int => |of_is| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => of_is.is.bitSize(cg.target) >= (int_info.bits - 1) % of_is.of.bitSize(cg.target) + 1,
|
|
.unsigned => of_is.is.bitSize(cg.target) == (int_info.bits - 1) % of_is.of.bitSize(cg.target) + 1,
|
|
} else false,
|
|
.unsigned_or_exact_remainder_int => |of_is| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => of_is.is.bitSize(cg.target) == (int_info.bits - 1) % of_is.of.bitSize(cg.target) + 1,
|
|
.unsigned => of_is.is.bitSize(cg.target) >= (int_info.bits - 1) % of_is.of.bitSize(cg.target) + 1,
|
|
} else false,
|
|
.signed_int => |size| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => size.bitSize(cg.target) >= int_info.bits,
|
|
.unsigned => false,
|
|
} else false,
|
|
.unsigned_int => |size| if (cg.intInfo(ty)) |int_info| switch (int_info.signedness) {
|
|
.signed => false,
|
|
.unsigned => size.bitSize(cg.target) >= int_info.bits,
|
|
} else false,
|
|
.elem_size_is => |size| size == ty.elemType2(zcu).abiSize(zcu),
|
|
.po2_elem_size => std.math.isPowerOfTwo(ty.elemType2(zcu).abiSize(zcu)),
|
|
.elem_int => |size| if (cg.intInfo(ty.elemType2(zcu))) |elem_int_info|
|
|
size.bitSize(cg.target) >= elem_int_info.bits
|
|
else
|
|
false,
|
|
};
|
|
}
|
|
};
|
|
|
|
const Pattern = struct {
|
|
src: [2]Src,
|
|
commute: struct { u8, u8 } = .{ 0, 0 },
|
|
|
|
const Src = union(enum) {
|
|
none,
|
|
any,
|
|
imm8,
|
|
imm16,
|
|
imm32,
|
|
simm32,
|
|
to_reg: Register,
|
|
mem,
|
|
to_mem,
|
|
mut_mem,
|
|
to_mut_mem,
|
|
gpr,
|
|
to_gpr,
|
|
mut_gpr,
|
|
to_mut_gpr,
|
|
x87,
|
|
to_x87,
|
|
mut_x87,
|
|
to_mut_x87,
|
|
mmx,
|
|
to_mmx,
|
|
mut_mmx,
|
|
to_mut_mmx,
|
|
mm,
|
|
to_mm,
|
|
mut_mm,
|
|
to_mut_mm,
|
|
sse,
|
|
to_sse,
|
|
mut_sse,
|
|
to_mut_sse,
|
|
xmm,
|
|
to_xmm,
|
|
mut_xmm,
|
|
to_mut_xmm,
|
|
ymm,
|
|
to_ymm,
|
|
mut_ymm,
|
|
to_mut_ymm,
|
|
|
|
fn matches(src: Src, temp: Temp, cg: *CodeGen) bool {
|
|
return switch (src) {
|
|
.none => unreachable,
|
|
.any => true,
|
|
.imm8 => switch (temp.tracking(cg).short) {
|
|
.immediate => |imm| std.math.cast(u8, imm) != null,
|
|
else => false,
|
|
},
|
|
.imm16 => switch (temp.tracking(cg).short) {
|
|
.immediate => |imm| std.math.cast(u16, imm) != null,
|
|
else => false,
|
|
},
|
|
.imm32 => switch (temp.tracking(cg).short) {
|
|
.immediate => |imm| std.math.cast(u32, imm) != null,
|
|
else => false,
|
|
},
|
|
.simm32 => switch (temp.tracking(cg).short) {
|
|
.immediate => |imm| std.math.cast(i32, @as(i64, @bitCast(imm))) != null,
|
|
else => false,
|
|
},
|
|
.mem => temp.tracking(cg).short.isMemory(),
|
|
.to_mem, .to_mut_mem => true,
|
|
.mut_mem => temp.isMut(cg) and temp.tracking(cg).short.isMemory(),
|
|
.to_reg => true,
|
|
.gpr => temp.typeOf(cg).abiSize(cg.pt.zcu) <= 8 and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .general_purpose,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .general_purpose and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.mut_gpr => temp.isMut(cg) and temp.typeOf(cg).abiSize(cg.pt.zcu) <= 8 and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .general_purpose,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .general_purpose and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.to_gpr, .to_mut_gpr => temp.typeOf(cg).abiSize(cg.pt.zcu) <= 8,
|
|
.x87 => switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .x87,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .x87 and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.mut_x87 => temp.isMut(cg) and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .x87,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .x87 and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.to_x87, .to_mut_x87 => true,
|
|
.mmx => switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .mmx,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .mmx and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.mut_mmx => temp.isMut(cg) and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .mmx,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .mmx and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.to_mmx, .to_mut_mmx => true,
|
|
.mm => temp.typeOf(cg).abiSize(cg.pt.zcu) == 8 and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .mmx,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .mmx and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.mut_mm => temp.isMut(cg) and temp.typeOf(cg).abiSize(cg.pt.zcu) == 8 and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .mmx,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .mmx and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.to_mm, .to_mut_mm => temp.typeOf(cg).abiSize(cg.pt.zcu) == 8,
|
|
.sse => switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .sse,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .sse and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.mut_sse => temp.isMut(cg) and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .sse,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .sse and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.to_sse, .to_mut_sse => true,
|
|
.xmm => temp.typeOf(cg).abiSize(cg.pt.zcu) == 16 and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .sse,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .sse and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.mut_xmm => temp.isMut(cg) and temp.typeOf(cg).abiSize(cg.pt.zcu) == 16 and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .sse,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .sse and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.to_xmm, .to_mut_xmm => temp.typeOf(cg).abiSize(cg.pt.zcu) == 16,
|
|
.ymm => temp.typeOf(cg).abiSize(cg.pt.zcu) == 32 and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .sse,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .sse and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.mut_ymm => temp.isMut(cg) and temp.typeOf(cg).abiSize(cg.pt.zcu) == 32 and switch (temp.tracking(cg).short) {
|
|
.register => |reg| reg.class() == .sse,
|
|
.register_offset => |reg_off| reg_off.reg.class() == .sse and reg_off.off == 0,
|
|
else => false,
|
|
},
|
|
.to_ymm, .to_mut_ymm => temp.typeOf(cg).abiSize(cg.pt.zcu) == 32,
|
|
};
|
|
}
|
|
|
|
fn convert(src: Src, temp: *Temp, cg: *CodeGen) !bool {
|
|
return switch (src) {
|
|
.none => unreachable,
|
|
.any, .imm8, .imm16, .imm32, .simm32 => false,
|
|
.mem, .to_mem, .mut_mem, .to_mut_mem => try temp.toBase(cg),
|
|
.to_reg => |reg| try temp.toReg(reg, cg),
|
|
.gpr, .to_gpr => try temp.toRegClass(false, .general_purpose, cg),
|
|
.mut_gpr, .to_mut_gpr => try temp.toRegClass(true, .general_purpose, cg),
|
|
.x87, .to_x87 => try temp.toRegClass(false, .x87, cg),
|
|
.mut_x87, .to_mut_x87 => try temp.toRegClass(true, .x87, cg),
|
|
.mmx, .to_mmx, .mm, .to_mm => try temp.toRegClass(false, .mmx, cg),
|
|
.mut_mmx, .to_mut_mmx, .mut_mm, .to_mut_mm => try temp.toRegClass(true, .mmx, cg),
|
|
.sse, .to_sse, .xmm, .to_xmm, .ymm, .to_ymm => try temp.toRegClass(false, .sse, cg),
|
|
.mut_sse, .to_mut_sse, .mut_xmm, .to_mut_xmm, .mut_ymm, .to_mut_ymm => try temp.toRegClass(true, .sse, cg),
|
|
};
|
|
}
|
|
};
|
|
};
|
|
|
|
const TempSpec = struct {
|
|
type: Type = .noreturn,
|
|
kind: Kind,
|
|
|
|
const unused: TempSpec = .{ .kind = .unused };
|
|
|
|
const Kind = union(enum) {
|
|
unused,
|
|
any,
|
|
cc: Condition,
|
|
ref: Select.Operand.Ref,
|
|
reg: Register,
|
|
rc: Register.Class,
|
|
mut_rc: struct { ref: Select.Operand.Ref, rc: Register.Class },
|
|
ref_mask: struct { ref: Select.Operand.Ref, info: MaskInfo },
|
|
rc_mask: struct { rc: Register.Class, info: MaskInfo },
|
|
mut_rc_mask: struct { ref: Select.Operand.Ref, rc: Register.Class, info: MaskInfo },
|
|
mem,
|
|
smin_mem: ConstInfo,
|
|
smax_mem: ConstInfo,
|
|
umin_mem: ConstInfo,
|
|
umax_mem: ConstInfo,
|
|
symbol: *const struct { lib: ?[]const u8 = null, name: []const u8 },
|
|
|
|
const ConstInfo = struct { ref: Select.Operand.Ref, vectorize_to: ?Memory.Size = null };
|
|
|
|
fn finish(kind: Kind, temp: Temp, s: *const Select) void {
|
|
switch (kind) {
|
|
else => {},
|
|
inline .rc_mask, .mut_rc_mask, .ref_mask => |mask| temp.asMask(mask.info, s.cg),
|
|
}
|
|
}
|
|
|
|
fn pass(kind: Kind) u2 {
|
|
return switch (kind) {
|
|
.unused => 0,
|
|
.reg => 1,
|
|
else => 2,
|
|
};
|
|
}
|
|
};
|
|
|
|
fn create(spec: TempSpec, s: *Select) !struct { Temp, bool } {
|
|
const cg = s.cg;
|
|
return switch (spec.kind) {
|
|
.unused => unreachable,
|
|
.any => .{ try cg.tempAlloc(spec.type), true },
|
|
.cc => |cc| .{ try cg.tempInit(spec.type, .{ .eflags = cc }), true },
|
|
.ref => |ref| .{ ref.deref(s), false },
|
|
.reg => |reg| .{ try cg.tempInit(spec.type, .{ .register = reg }), true },
|
|
.rc => |rc| .{ try cg.tempAllocReg(spec.type, regSetForRegClass(rc)), true },
|
|
.mut_rc => |ref_rc| {
|
|
const temp = ref_rc.ref.deref(s);
|
|
if (temp.isMut(cg)) switch (temp.tracking(cg).short) {
|
|
.register => |reg| if (reg.class() == ref_rc.rc) return .{ temp, false },
|
|
.register_offset => |reg_off| if (reg_off.off == 0 and reg_off.reg.class() == ref_rc.rc) return .{ temp, false },
|
|
else => {},
|
|
};
|
|
return .{ try cg.tempAllocReg(spec.type, regSetForRegClass(ref_rc.rc)), true };
|
|
},
|
|
.ref_mask => |ref_mask| .{ ref_mask.ref.deref(s), false },
|
|
.rc_mask => |rc_mask| .{ try cg.tempAllocReg(spec.type, regSetForRegClass(rc_mask.rc)), true },
|
|
.mut_rc_mask => |ref_rc_mask| {
|
|
const temp = ref_rc_mask.ref.deref(s);
|
|
if (temp.isMut(cg)) switch (temp.tracking(cg).short) {
|
|
.register => |reg| if (reg.class() == ref_rc_mask.rc) return .{ temp, false },
|
|
.register_offset => |reg_off| if (reg_off.off == 0 and reg_off.reg.class() == ref_rc_mask.rc) return .{ temp, false },
|
|
else => {},
|
|
};
|
|
return .{ try cg.tempAllocReg(spec.type, regSetForRegClass(ref_rc_mask.rc)), true };
|
|
},
|
|
.mem => .{ try cg.tempAllocMem(spec.type), true },
|
|
.smin_mem, .smax_mem, .umin_mem, .umax_mem => |const_info| {
|
|
const pt = cg.pt;
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
const ty = const_info.ref.deref(s).typeOf(cg);
|
|
const vector_len, const scalar_ty: Type = switch (ip.indexToKey(ty.toIntern())) {
|
|
else => .{ null, ty },
|
|
.vector_type => |vector_type| .{ vector_type.len, .fromInterned(vector_type.child) },
|
|
};
|
|
const res_vector_len: ?u32 = if (const_info.vectorize_to) |vectorize_to| switch (vectorize_to) {
|
|
.none => null,
|
|
else => @intCast(@divExact(@divExact(vectorize_to.bitSize(cg.target), 8), scalar_ty.abiSize(pt.zcu))),
|
|
} else vector_len;
|
|
const res_scalar_ty, const res_scalar_val: Value = res_scalar: switch (scalar_ty.toIntern()) {
|
|
.bool_type => .{
|
|
scalar_ty,
|
|
.fromInterned(switch (spec.kind) {
|
|
else => unreachable,
|
|
.smin_mem, .umax_mem => .bool_true,
|
|
.smax_mem, .umin_mem => .bool_false,
|
|
}),
|
|
},
|
|
else => {
|
|
const scalar_info: std.builtin.Type.Int = cg.intInfo(scalar_ty) orelse .{
|
|
.signedness = .signed,
|
|
.bits = cg.floatBits(scalar_ty).?,
|
|
};
|
|
const scalar_int_ty = try pt.intType(scalar_info.signedness, scalar_info.bits);
|
|
if (scalar_info.bits <= 64) {
|
|
const int_val: i64 = switch (spec.kind) {
|
|
else => unreachable,
|
|
.smin_mem => std.math.minInt(i64),
|
|
.smax_mem => std.math.maxInt(i64),
|
|
.umin_mem => 0,
|
|
.umax_mem => -1,
|
|
};
|
|
const shift: u6 = @intCast(64 - scalar_info.bits);
|
|
break :res_scalar .{ scalar_int_ty, switch (scalar_info.signedness) {
|
|
.signed => try pt.intValue_i64(scalar_int_ty, int_val >> shift),
|
|
.unsigned => try pt.intValue_u64(scalar_int_ty, @as(u64, @bitCast(int_val)) >> shift),
|
|
} };
|
|
}
|
|
var big_int: std.math.big.int.Managed = try .init(cg.gpa);
|
|
defer big_int.deinit();
|
|
try big_int.setTwosCompIntLimit(switch (spec.kind) {
|
|
else => unreachable,
|
|
.smin_mem, .umin_mem => .min,
|
|
.smax_mem, .umax_mem => .max,
|
|
}, switch (spec.kind) {
|
|
else => unreachable,
|
|
.smin_mem, .smax_mem => .signed,
|
|
.umin_mem, .umax_mem => .unsigned,
|
|
}, scalar_info.bits);
|
|
try big_int.truncate(&big_int, scalar_info.signedness, scalar_info.bits);
|
|
break :res_scalar .{ scalar_int_ty, try pt.intValue_big(scalar_int_ty, big_int.toConst()) };
|
|
},
|
|
};
|
|
const res_val: Value = if (res_vector_len) |len| .fromInterned(try pt.intern(.{ .aggregate = .{
|
|
.ty = (try pt.vectorType(.{
|
|
.len = len,
|
|
.child = res_scalar_ty.toIntern(),
|
|
})).toIntern(),
|
|
.storage = .{ .repeated_elem = res_scalar_val.toIntern() },
|
|
} })) else res_scalar_val;
|
|
return .{ try cg.tempMemFromValue(res_val), true };
|
|
},
|
|
.symbol => |symbol| .{ try cg.tempInit(spec.type, .{ .lea_symbol = .{
|
|
.sym_index = if (cg.bin_file.cast(.elf)) |elf_file|
|
|
try elf_file.getGlobalSymbol(symbol.name, symbol.lib)
|
|
else if (cg.bin_file.cast(.macho)) |macho_file|
|
|
try macho_file.getGlobalSymbol(symbol.name, symbol.lib)
|
|
else
|
|
return cg.fail("external symbols unimplemented for {s}", .{@tagName(cg.bin_file.tag)}),
|
|
} }), true },
|
|
};
|
|
}
|
|
};
|
|
|
|
const Instruction = struct {
|
|
Label,
|
|
Mir.Inst.Fixes,
|
|
Mir.Inst.Tag,
|
|
Select.Operand,
|
|
Select.Operand,
|
|
Select.Operand,
|
|
Select.Operand,
|
|
};
|
|
const Label = enum { @"0:", @"1:", @"_" };
|
|
const Operand = struct {
|
|
tag: Tag,
|
|
base: Ref.Sized = .none,
|
|
index: packed struct(u6) {
|
|
ref: Ref,
|
|
scale: Memory.Scale,
|
|
} = .{ .ref = .none, .scale = .@"1" },
|
|
adjust: Adjust = .none,
|
|
imm: i32 = 0,
|
|
|
|
const Tag = enum {
|
|
none,
|
|
backward_label,
|
|
forward_label,
|
|
ref,
|
|
simm,
|
|
uimm,
|
|
lea,
|
|
mem,
|
|
};
|
|
const Adjust = packed struct(u8) {
|
|
sign: enum(u1) { neg, pos },
|
|
lhs: enum(u4) {
|
|
none,
|
|
ptr_size,
|
|
ptr_bit_size,
|
|
size,
|
|
size_sub_elem_size,
|
|
src0_unaligned_size,
|
|
bit_size,
|
|
src0_bit_size,
|
|
len,
|
|
elem_limbs,
|
|
src0_elem_size,
|
|
src0_elem_size_times_src1,
|
|
log2_src0_elem_size,
|
|
smin,
|
|
smax,
|
|
umax,
|
|
},
|
|
op: enum(u1) { mul, div },
|
|
rhs: Memory.Scale,
|
|
|
|
const none: Adjust = .{ .sign = .pos, .lhs = .none, .op = .mul, .rhs = .@"1" };
|
|
const sub_ptr_size: Adjust = .{ .sign = .neg, .lhs = .ptr_size, .op = .mul, .rhs = .@"1" };
|
|
const add_ptr_bit_size: Adjust = .{ .sign = .pos, .lhs = .ptr_bit_size, .op = .mul, .rhs = .@"1" };
|
|
const add_size: Adjust = .{ .sign = .pos, .lhs = .size, .op = .mul, .rhs = .@"1" };
|
|
const add_size_div_8: Adjust = .{ .sign = .pos, .lhs = .size, .op = .div, .rhs = .@"8" };
|
|
const sub_size_div_8: Adjust = .{ .sign = .neg, .lhs = .size, .op = .div, .rhs = .@"8" };
|
|
const sub_size: Adjust = .{ .sign = .neg, .lhs = .size, .op = .mul, .rhs = .@"1" };
|
|
const add_size_sub_elem_size: Adjust = .{ .sign = .pos, .lhs = .size_sub_elem_size, .op = .mul, .rhs = .@"1" };
|
|
const add_src0_unaligned_size: Adjust = .{ .sign = .pos, .lhs = .src0_unaligned_size, .op = .mul, .rhs = .@"1" };
|
|
const sub_src0_unaligned_size: Adjust = .{ .sign = .neg, .lhs = .src0_unaligned_size, .op = .mul, .rhs = .@"1" };
|
|
const add_2_bit_size: Adjust = .{ .sign = .pos, .lhs = .bit_size, .op = .mul, .rhs = .@"2" };
|
|
const add_bit_size: Adjust = .{ .sign = .pos, .lhs = .bit_size, .op = .mul, .rhs = .@"1" };
|
|
const sub_bit_size: Adjust = .{ .sign = .neg, .lhs = .bit_size, .op = .mul, .rhs = .@"1" };
|
|
const add_src0_bit_size: Adjust = .{ .sign = .pos, .lhs = .src0_bit_size, .op = .mul, .rhs = .@"1" };
|
|
const sub_src0_bit_size: Adjust = .{ .sign = .neg, .lhs = .src0_bit_size, .op = .mul, .rhs = .@"1" };
|
|
const add_8_len: Adjust = .{ .sign = .pos, .lhs = .len, .op = .mul, .rhs = .@"8" };
|
|
const add_4_len: Adjust = .{ .sign = .pos, .lhs = .len, .op = .mul, .rhs = .@"4" };
|
|
const add_3_len: Adjust = .{ .sign = .pos, .lhs = .len, .op = .mul, .rhs = .@"3" };
|
|
const add_2_len: Adjust = .{ .sign = .pos, .lhs = .len, .op = .mul, .rhs = .@"2" };
|
|
const add_len: Adjust = .{ .sign = .pos, .lhs = .len, .op = .mul, .rhs = .@"1" };
|
|
const sub_len: Adjust = .{ .sign = .neg, .lhs = .len, .op = .mul, .rhs = .@"1" };
|
|
const add_src0_elem_size: Adjust = .{ .sign = .pos, .lhs = .src0_elem_size, .op = .mul, .rhs = .@"1" };
|
|
const add_2_src0_elem_size: Adjust = .{ .sign = .pos, .lhs = .src0_elem_size, .op = .mul, .rhs = .@"2" };
|
|
const add_4_src0_elem_size: Adjust = .{ .sign = .pos, .lhs = .src0_elem_size, .op = .mul, .rhs = .@"4" };
|
|
const add_8_src0_elem_size: Adjust = .{ .sign = .pos, .lhs = .src0_elem_size, .op = .mul, .rhs = .@"8" };
|
|
const add_src0_elem_size_div_8: Adjust = .{ .sign = .pos, .lhs = .src0_elem_size, .op = .div, .rhs = .@"8" };
|
|
const sub_src0_elem_size: Adjust = .{ .sign = .neg, .lhs = .src0_elem_size, .op = .mul, .rhs = .@"1" };
|
|
const add_src0_elem_size_times_src1: Adjust = .{ .sign = .pos, .lhs = .src0_elem_size_times_src1, .op = .mul, .rhs = .@"1" };
|
|
const sub_src0_elem_size_times_src1: Adjust = .{ .sign = .neg, .lhs = .src0_elem_size_times_src1, .op = .mul, .rhs = .@"1" };
|
|
const add_log2_src0_elem_size: Adjust = .{ .sign = .pos, .lhs = .log2_src0_elem_size, .op = .mul, .rhs = .@"1" };
|
|
const add_elem_limbs: Adjust = .{ .sign = .pos, .lhs = .elem_limbs, .op = .mul, .rhs = .@"1" };
|
|
const add_umax: Adjust = .{ .sign = .pos, .lhs = .umax, .op = .mul, .rhs = .@"1" };
|
|
};
|
|
const Ref = enum(u4) {
|
|
tmp0,
|
|
tmp1,
|
|
tmp2,
|
|
tmp3,
|
|
tmp4,
|
|
tmp5,
|
|
tmp6,
|
|
tmp7,
|
|
tmp8,
|
|
dst0,
|
|
src0,
|
|
src1,
|
|
none,
|
|
|
|
const Sized = packed struct(u8) {
|
|
ref: Ref,
|
|
size: Memory.Size,
|
|
|
|
const none: Sized = .{ .ref = .none, .size = .none };
|
|
|
|
const tmp0: Sized = .{ .ref = .tmp0, .size = .none };
|
|
const tmp0b: Sized = .{ .ref = .tmp0, .size = .byte };
|
|
const tmp0w: Sized = .{ .ref = .tmp0, .size = .word };
|
|
const tmp0d: Sized = .{ .ref = .tmp0, .size = .dword };
|
|
const tmp0p: Sized = .{ .ref = .tmp0, .size = .ptr };
|
|
const tmp0q: Sized = .{ .ref = .tmp0, .size = .qword };
|
|
const tmp0t: Sized = .{ .ref = .tmp0, .size = .tbyte };
|
|
const tmp0x: Sized = .{ .ref = .tmp0, .size = .xword };
|
|
const tmp0y: Sized = .{ .ref = .tmp0, .size = .yword };
|
|
|
|
const tmp1: Sized = .{ .ref = .tmp1, .size = .none };
|
|
const tmp1b: Sized = .{ .ref = .tmp1, .size = .byte };
|
|
const tmp1w: Sized = .{ .ref = .tmp1, .size = .word };
|
|
const tmp1d: Sized = .{ .ref = .tmp1, .size = .dword };
|
|
const tmp1p: Sized = .{ .ref = .tmp1, .size = .ptr };
|
|
const tmp1q: Sized = .{ .ref = .tmp1, .size = .qword };
|
|
const tmp1t: Sized = .{ .ref = .tmp1, .size = .tbyte };
|
|
const tmp1x: Sized = .{ .ref = .tmp1, .size = .xword };
|
|
const tmp1y: Sized = .{ .ref = .tmp1, .size = .yword };
|
|
|
|
const tmp2: Sized = .{ .ref = .tmp2, .size = .none };
|
|
const tmp2b: Sized = .{ .ref = .tmp2, .size = .byte };
|
|
const tmp2w: Sized = .{ .ref = .tmp2, .size = .word };
|
|
const tmp2d: Sized = .{ .ref = .tmp2, .size = .dword };
|
|
const tmp2p: Sized = .{ .ref = .tmp2, .size = .ptr };
|
|
const tmp2q: Sized = .{ .ref = .tmp2, .size = .qword };
|
|
const tmp2t: Sized = .{ .ref = .tmp2, .size = .tbyte };
|
|
const tmp2x: Sized = .{ .ref = .tmp2, .size = .xword };
|
|
const tmp2y: Sized = .{ .ref = .tmp2, .size = .yword };
|
|
|
|
const tmp3: Sized = .{ .ref = .tmp3, .size = .none };
|
|
const tmp3b: Sized = .{ .ref = .tmp3, .size = .byte };
|
|
const tmp3w: Sized = .{ .ref = .tmp3, .size = .word };
|
|
const tmp3d: Sized = .{ .ref = .tmp3, .size = .dword };
|
|
const tmp3p: Sized = .{ .ref = .tmp3, .size = .ptr };
|
|
const tmp3q: Sized = .{ .ref = .tmp3, .size = .qword };
|
|
const tmp3t: Sized = .{ .ref = .tmp3, .size = .tbyte };
|
|
const tmp3x: Sized = .{ .ref = .tmp3, .size = .xword };
|
|
const tmp3y: Sized = .{ .ref = .tmp3, .size = .yword };
|
|
|
|
const tmp4: Sized = .{ .ref = .tmp4, .size = .none };
|
|
const tmp4b: Sized = .{ .ref = .tmp4, .size = .byte };
|
|
const tmp4w: Sized = .{ .ref = .tmp4, .size = .word };
|
|
const tmp4d: Sized = .{ .ref = .tmp4, .size = .dword };
|
|
const tmp4p: Sized = .{ .ref = .tmp4, .size = .ptr };
|
|
const tmp4q: Sized = .{ .ref = .tmp4, .size = .qword };
|
|
const tmp4t: Sized = .{ .ref = .tmp4, .size = .tbyte };
|
|
const tmp4x: Sized = .{ .ref = .tmp4, .size = .xword };
|
|
const tmp4y: Sized = .{ .ref = .tmp4, .size = .yword };
|
|
|
|
const tmp5: Sized = .{ .ref = .tmp5, .size = .none };
|
|
const tmp5b: Sized = .{ .ref = .tmp5, .size = .byte };
|
|
const tmp5w: Sized = .{ .ref = .tmp5, .size = .word };
|
|
const tmp5d: Sized = .{ .ref = .tmp5, .size = .dword };
|
|
const tmp5p: Sized = .{ .ref = .tmp5, .size = .ptr };
|
|
const tmp5q: Sized = .{ .ref = .tmp5, .size = .qword };
|
|
const tmp5t: Sized = .{ .ref = .tmp5, .size = .tbyte };
|
|
const tmp5x: Sized = .{ .ref = .tmp5, .size = .xword };
|
|
const tmp5y: Sized = .{ .ref = .tmp5, .size = .yword };
|
|
|
|
const tmp6: Sized = .{ .ref = .tmp6, .size = .none };
|
|
const tmp6b: Sized = .{ .ref = .tmp6, .size = .byte };
|
|
const tmp6w: Sized = .{ .ref = .tmp6, .size = .word };
|
|
const tmp6d: Sized = .{ .ref = .tmp6, .size = .dword };
|
|
const tmp6p: Sized = .{ .ref = .tmp6, .size = .ptr };
|
|
const tmp6q: Sized = .{ .ref = .tmp6, .size = .qword };
|
|
const tmp6t: Sized = .{ .ref = .tmp6, .size = .tbyte };
|
|
const tmp6x: Sized = .{ .ref = .tmp6, .size = .xword };
|
|
const tmp6y: Sized = .{ .ref = .tmp6, .size = .yword };
|
|
|
|
const tmp7: Sized = .{ .ref = .tmp7, .size = .none };
|
|
const tmp7b: Sized = .{ .ref = .tmp7, .size = .byte };
|
|
const tmp7w: Sized = .{ .ref = .tmp7, .size = .word };
|
|
const tmp7d: Sized = .{ .ref = .tmp7, .size = .dword };
|
|
const tmp7p: Sized = .{ .ref = .tmp7, .size = .ptr };
|
|
const tmp7q: Sized = .{ .ref = .tmp7, .size = .qword };
|
|
const tmp7t: Sized = .{ .ref = .tmp7, .size = .tbyte };
|
|
const tmp7x: Sized = .{ .ref = .tmp7, .size = .xword };
|
|
const tmp7y: Sized = .{ .ref = .tmp7, .size = .yword };
|
|
|
|
const tmp8: Sized = .{ .ref = .tmp8, .size = .none };
|
|
const tmp8b: Sized = .{ .ref = .tmp8, .size = .byte };
|
|
const tmp8w: Sized = .{ .ref = .tmp8, .size = .word };
|
|
const tmp8d: Sized = .{ .ref = .tmp8, .size = .dword };
|
|
const tmp8p: Sized = .{ .ref = .tmp8, .size = .ptr };
|
|
const tmp8q: Sized = .{ .ref = .tmp8, .size = .qword };
|
|
const tmp8t: Sized = .{ .ref = .tmp8, .size = .tbyte };
|
|
const tmp8x: Sized = .{ .ref = .tmp8, .size = .xword };
|
|
const tmp8y: Sized = .{ .ref = .tmp8, .size = .yword };
|
|
|
|
const dst0: Sized = .{ .ref = .dst0, .size = .none };
|
|
const dst0b: Sized = .{ .ref = .dst0, .size = .byte };
|
|
const dst0w: Sized = .{ .ref = .dst0, .size = .word };
|
|
const dst0d: Sized = .{ .ref = .dst0, .size = .dword };
|
|
const dst0p: Sized = .{ .ref = .dst0, .size = .ptr };
|
|
const dst0q: Sized = .{ .ref = .dst0, .size = .qword };
|
|
const dst0t: Sized = .{ .ref = .dst0, .size = .tbyte };
|
|
const dst0x: Sized = .{ .ref = .dst0, .size = .xword };
|
|
const dst0y: Sized = .{ .ref = .dst0, .size = .yword };
|
|
|
|
const src0: Sized = .{ .ref = .src0, .size = .none };
|
|
const src0b: Sized = .{ .ref = .src0, .size = .byte };
|
|
const src0w: Sized = .{ .ref = .src0, .size = .word };
|
|
const src0d: Sized = .{ .ref = .src0, .size = .dword };
|
|
const src0p: Sized = .{ .ref = .src0, .size = .ptr };
|
|
const src0q: Sized = .{ .ref = .src0, .size = .qword };
|
|
const src0t: Sized = .{ .ref = .src0, .size = .tbyte };
|
|
const src0x: Sized = .{ .ref = .src0, .size = .xword };
|
|
const src0y: Sized = .{ .ref = .src0, .size = .yword };
|
|
|
|
const src1: Sized = .{ .ref = .src1, .size = .none };
|
|
const src1b: Sized = .{ .ref = .src1, .size = .byte };
|
|
const src1w: Sized = .{ .ref = .src1, .size = .word };
|
|
const src1d: Sized = .{ .ref = .src1, .size = .dword };
|
|
const src1p: Sized = .{ .ref = .src1, .size = .ptr };
|
|
const src1q: Sized = .{ .ref = .src1, .size = .qword };
|
|
const src1t: Sized = .{ .ref = .src1, .size = .tbyte };
|
|
const src1x: Sized = .{ .ref = .src1, .size = .xword };
|
|
const src1y: Sized = .{ .ref = .src1, .size = .yword };
|
|
};
|
|
|
|
fn deref(ref: Ref, s: *const Select) Temp {
|
|
return s.temps[@intFromEnum(ref)];
|
|
}
|
|
};
|
|
|
|
const @"_": Select.Operand = .{ .tag = .none };
|
|
|
|
const @"0b": Select.Operand = .{ .tag = .backward_label, .base = .{ .ref = .tmp0, .size = .none } };
|
|
const @"0f": Select.Operand = .{ .tag = .forward_label, .base = .{ .ref = .tmp0, .size = .none } };
|
|
const @"1b": Select.Operand = .{ .tag = .backward_label, .base = .{ .ref = .tmp1, .size = .none } };
|
|
const @"1f": Select.Operand = .{ .tag = .forward_label, .base = .{ .ref = .tmp1, .size = .none } };
|
|
|
|
const tmp0b: Select.Operand = .{ .tag = .ref, .base = .tmp0b };
|
|
const tmp0w: Select.Operand = .{ .tag = .ref, .base = .tmp0w };
|
|
const tmp0d: Select.Operand = .{ .tag = .ref, .base = .tmp0d };
|
|
const tmp0p: Select.Operand = .{ .tag = .ref, .base = .tmp0p };
|
|
const tmp0q: Select.Operand = .{ .tag = .ref, .base = .tmp0q };
|
|
const tmp0t: Select.Operand = .{ .tag = .ref, .base = .tmp0t };
|
|
const tmp0x: Select.Operand = .{ .tag = .ref, .base = .tmp0x };
|
|
const tmp0y: Select.Operand = .{ .tag = .ref, .base = .tmp0y };
|
|
|
|
const tmp1b: Select.Operand = .{ .tag = .ref, .base = .tmp1b };
|
|
const tmp1w: Select.Operand = .{ .tag = .ref, .base = .tmp1w };
|
|
const tmp1d: Select.Operand = .{ .tag = .ref, .base = .tmp1d };
|
|
const tmp1p: Select.Operand = .{ .tag = .ref, .base = .tmp1p };
|
|
const tmp1q: Select.Operand = .{ .tag = .ref, .base = .tmp1q };
|
|
const tmp1t: Select.Operand = .{ .tag = .ref, .base = .tmp1t };
|
|
const tmp1x: Select.Operand = .{ .tag = .ref, .base = .tmp1x };
|
|
const tmp1y: Select.Operand = .{ .tag = .ref, .base = .tmp1y };
|
|
|
|
const tmp2b: Select.Operand = .{ .tag = .ref, .base = .tmp2b };
|
|
const tmp2w: Select.Operand = .{ .tag = .ref, .base = .tmp2w };
|
|
const tmp2d: Select.Operand = .{ .tag = .ref, .base = .tmp2d };
|
|
const tmp2p: Select.Operand = .{ .tag = .ref, .base = .tmp2p };
|
|
const tmp2q: Select.Operand = .{ .tag = .ref, .base = .tmp2q };
|
|
const tmp2t: Select.Operand = .{ .tag = .ref, .base = .tmp2t };
|
|
const tmp2x: Select.Operand = .{ .tag = .ref, .base = .tmp2x };
|
|
const tmp2y: Select.Operand = .{ .tag = .ref, .base = .tmp2y };
|
|
|
|
const tmp3b: Select.Operand = .{ .tag = .ref, .base = .tmp3b };
|
|
const tmp3w: Select.Operand = .{ .tag = .ref, .base = .tmp3w };
|
|
const tmp3d: Select.Operand = .{ .tag = .ref, .base = .tmp3d };
|
|
const tmp3p: Select.Operand = .{ .tag = .ref, .base = .tmp3p };
|
|
const tmp3q: Select.Operand = .{ .tag = .ref, .base = .tmp3q };
|
|
const tmp3t: Select.Operand = .{ .tag = .ref, .base = .tmp3t };
|
|
const tmp3x: Select.Operand = .{ .tag = .ref, .base = .tmp3x };
|
|
const tmp3y: Select.Operand = .{ .tag = .ref, .base = .tmp3y };
|
|
|
|
const tmp4b: Select.Operand = .{ .tag = .ref, .base = .tmp4b };
|
|
const tmp4w: Select.Operand = .{ .tag = .ref, .base = .tmp4w };
|
|
const tmp4d: Select.Operand = .{ .tag = .ref, .base = .tmp4d };
|
|
const tmp4p: Select.Operand = .{ .tag = .ref, .base = .tmp4p };
|
|
const tmp4q: Select.Operand = .{ .tag = .ref, .base = .tmp4q };
|
|
const tmp4t: Select.Operand = .{ .tag = .ref, .base = .tmp4t };
|
|
const tmp4x: Select.Operand = .{ .tag = .ref, .base = .tmp4x };
|
|
const tmp4y: Select.Operand = .{ .tag = .ref, .base = .tmp4y };
|
|
|
|
const tmp5b: Select.Operand = .{ .tag = .ref, .base = .tmp5b };
|
|
const tmp5w: Select.Operand = .{ .tag = .ref, .base = .tmp5w };
|
|
const tmp5d: Select.Operand = .{ .tag = .ref, .base = .tmp5d };
|
|
const tmp5p: Select.Operand = .{ .tag = .ref, .base = .tmp5p };
|
|
const tmp5q: Select.Operand = .{ .tag = .ref, .base = .tmp5q };
|
|
const tmp5t: Select.Operand = .{ .tag = .ref, .base = .tmp5t };
|
|
const tmp5x: Select.Operand = .{ .tag = .ref, .base = .tmp5x };
|
|
const tmp5y: Select.Operand = .{ .tag = .ref, .base = .tmp5y };
|
|
|
|
const tmp6b: Select.Operand = .{ .tag = .ref, .base = .tmp6b };
|
|
const tmp6w: Select.Operand = .{ .tag = .ref, .base = .tmp6w };
|
|
const tmp6d: Select.Operand = .{ .tag = .ref, .base = .tmp6d };
|
|
const tmp6p: Select.Operand = .{ .tag = .ref, .base = .tmp6p };
|
|
const tmp6q: Select.Operand = .{ .tag = .ref, .base = .tmp6q };
|
|
const tmp6t: Select.Operand = .{ .tag = .ref, .base = .tmp6t };
|
|
const tmp6x: Select.Operand = .{ .tag = .ref, .base = .tmp6x };
|
|
const tmp6y: Select.Operand = .{ .tag = .ref, .base = .tmp6y };
|
|
|
|
const tmp7b: Select.Operand = .{ .tag = .ref, .base = .tmp7b };
|
|
const tmp7w: Select.Operand = .{ .tag = .ref, .base = .tmp7w };
|
|
const tmp7d: Select.Operand = .{ .tag = .ref, .base = .tmp7d };
|
|
const tmp7p: Select.Operand = .{ .tag = .ref, .base = .tmp7p };
|
|
const tmp7q: Select.Operand = .{ .tag = .ref, .base = .tmp7q };
|
|
const tmp7t: Select.Operand = .{ .tag = .ref, .base = .tmp7t };
|
|
const tmp7x: Select.Operand = .{ .tag = .ref, .base = .tmp7x };
|
|
const tmp7y: Select.Operand = .{ .tag = .ref, .base = .tmp7y };
|
|
|
|
const tmp8b: Select.Operand = .{ .tag = .ref, .base = .tmp8b };
|
|
const tmp8w: Select.Operand = .{ .tag = .ref, .base = .tmp8w };
|
|
const tmp8d: Select.Operand = .{ .tag = .ref, .base = .tmp8d };
|
|
const tmp8p: Select.Operand = .{ .tag = .ref, .base = .tmp8p };
|
|
const tmp8q: Select.Operand = .{ .tag = .ref, .base = .tmp8q };
|
|
const tmp8t: Select.Operand = .{ .tag = .ref, .base = .tmp8t };
|
|
const tmp8x: Select.Operand = .{ .tag = .ref, .base = .tmp8x };
|
|
const tmp8y: Select.Operand = .{ .tag = .ref, .base = .tmp8y };
|
|
|
|
const dst0b: Select.Operand = .{ .tag = .ref, .base = .dst0b };
|
|
const dst0w: Select.Operand = .{ .tag = .ref, .base = .dst0w };
|
|
const dst0d: Select.Operand = .{ .tag = .ref, .base = .dst0d };
|
|
const dst0p: Select.Operand = .{ .tag = .ref, .base = .dst0p };
|
|
const dst0q: Select.Operand = .{ .tag = .ref, .base = .dst0q };
|
|
const dst0t: Select.Operand = .{ .tag = .ref, .base = .dst0t };
|
|
const dst0x: Select.Operand = .{ .tag = .ref, .base = .dst0x };
|
|
const dst0y: Select.Operand = .{ .tag = .ref, .base = .dst0y };
|
|
|
|
const src0b: Select.Operand = .{ .tag = .ref, .base = .src0b };
|
|
const src0w: Select.Operand = .{ .tag = .ref, .base = .src0w };
|
|
const src0d: Select.Operand = .{ .tag = .ref, .base = .src0d };
|
|
const src0p: Select.Operand = .{ .tag = .ref, .base = .src0p };
|
|
const src0q: Select.Operand = .{ .tag = .ref, .base = .src0q };
|
|
const src0t: Select.Operand = .{ .tag = .ref, .base = .src0t };
|
|
const src0x: Select.Operand = .{ .tag = .ref, .base = .src0x };
|
|
const src0y: Select.Operand = .{ .tag = .ref, .base = .src0y };
|
|
|
|
const src1b: Select.Operand = .{ .tag = .ref, .base = .src1b };
|
|
const src1w: Select.Operand = .{ .tag = .ref, .base = .src1w };
|
|
const src1d: Select.Operand = .{ .tag = .ref, .base = .src1d };
|
|
const src1p: Select.Operand = .{ .tag = .ref, .base = .src1p };
|
|
const src1q: Select.Operand = .{ .tag = .ref, .base = .src1q };
|
|
const src1t: Select.Operand = .{ .tag = .ref, .base = .src1t };
|
|
const src1x: Select.Operand = .{ .tag = .ref, .base = .src1x };
|
|
const src1y: Select.Operand = .{ .tag = .ref, .base = .src1y };
|
|
|
|
fn si(imm: i32) Select.Operand {
|
|
return .{ .tag = .simm, .imm = imm };
|
|
}
|
|
fn sa(base: Ref.Sized, adjust: Adjust) Select.Operand {
|
|
return .{ .tag = .simm, .base = base, .adjust = adjust };
|
|
}
|
|
fn sia(imm: i32, base: Ref.Sized, adjust: Adjust) Select.Operand {
|
|
return .{ .tag = .simm, .base = base, .adjust = adjust, .imm = imm };
|
|
}
|
|
fn ui(imm: u32) Select.Operand {
|
|
return .{ .tag = .uimm, .imm = @bitCast(imm) };
|
|
}
|
|
fn ua(base: Ref.Sized, adjust: Adjust) Select.Operand {
|
|
return .{ .tag = .uimm, .base = base, .adjust = adjust };
|
|
}
|
|
fn uia(imm: u32, base: Ref.Sized, adjust: Adjust) Select.Operand {
|
|
return .{ .tag = .uimm, .base = base, .adjust = adjust, .imm = @bitCast(imm) };
|
|
}
|
|
|
|
fn rm(mode: bits.RoundMode) Select.Operand {
|
|
return .{ .tag = .uimm, .imm = @intCast(mode.imm().unsigned) };
|
|
}
|
|
fn sp(pred: bits.SseFloatPredicate) Select.Operand {
|
|
return .{ .tag = .uimm, .imm = @intCast(pred.imm().unsigned) };
|
|
}
|
|
fn vp(pred: bits.VexFloatPredicate) Select.Operand {
|
|
return .{ .tag = .uimm, .imm = @intCast(pred.imm().unsigned) };
|
|
}
|
|
|
|
fn lea(size: Memory.Size, base: Ref) Select.Operand {
|
|
return .{
|
|
.tag = .lea,
|
|
.base = .{ .ref = base, .size = size },
|
|
};
|
|
}
|
|
fn leaa(size: Memory.Size, base: Ref, adjust: Adjust) Select.Operand {
|
|
return .{
|
|
.tag = .lea,
|
|
.base = .{ .ref = base, .size = size },
|
|
.adjust = adjust,
|
|
};
|
|
}
|
|
fn lead(size: Memory.Size, base: Ref, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .lea,
|
|
.base = .{ .ref = base, .size = size },
|
|
.imm = disp,
|
|
};
|
|
}
|
|
fn leai(size: Memory.Size, base: Ref, index: Ref) Select.Operand {
|
|
return .{
|
|
.tag = .lea,
|
|
.base = .{ .ref = base, .size = size },
|
|
.index = .{ .ref = index, .scale = .@"1" },
|
|
};
|
|
}
|
|
fn leaia(size: Memory.Size, base: Ref, index: Ref, adjust: Adjust) Select.Operand {
|
|
return .{
|
|
.tag = .lea,
|
|
.base = .{ .ref = base, .size = size },
|
|
.index = .{ .ref = index, .scale = .@"1" },
|
|
.adjust = adjust,
|
|
};
|
|
}
|
|
fn leaid(size: Memory.Size, base: Ref, index: Ref, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .lea,
|
|
.base = .{ .ref = base, .size = size },
|
|
.index = .{ .ref = index, .scale = .@"1" },
|
|
.imm = disp,
|
|
};
|
|
}
|
|
fn leasi(size: Memory.Size, base: Ref, scale: Memory.Scale, index: Ref) Select.Operand {
|
|
return .{
|
|
.tag = .lea,
|
|
.base = .{ .ref = base, .size = size },
|
|
.index = .{ .ref = index, .scale = scale },
|
|
};
|
|
}
|
|
fn leasid(size: Memory.Size, base: Ref, scale: Memory.Scale, index: Ref, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .lea,
|
|
.base = .{ .ref = base, .size = size },
|
|
.index = .{ .ref = index, .scale = scale },
|
|
.imm = disp,
|
|
};
|
|
}
|
|
fn leasiad(size: Memory.Size, base: Ref, scale: Memory.Scale, index: Ref, adjust: Adjust, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .lea,
|
|
.base = .{ .ref = base, .size = size },
|
|
.index = .{ .ref = index, .scale = scale },
|
|
.adjust = adjust,
|
|
.imm = disp,
|
|
};
|
|
}
|
|
|
|
fn mem(base: Ref.Sized) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
};
|
|
}
|
|
fn memd(base: Ref.Sized, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.imm = disp,
|
|
};
|
|
}
|
|
fn mema(base: Ref.Sized, adjust: Adjust) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.adjust = adjust,
|
|
};
|
|
}
|
|
fn memad(base: Ref.Sized, adjust: Adjust, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.adjust = adjust,
|
|
.imm = disp,
|
|
};
|
|
}
|
|
fn memi(base: Ref.Sized, index: Ref) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.index = .{ .ref = index, .scale = .@"1" },
|
|
};
|
|
}
|
|
fn memia(base: Ref.Sized, index: Ref, adjust: Adjust) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.index = .{ .ref = index, .scale = .@"1" },
|
|
.adjust = adjust,
|
|
};
|
|
}
|
|
fn memiad(base: Ref.Sized, index: Ref, adjust: Adjust, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.index = .{ .ref = index, .scale = .@"1" },
|
|
.adjust = adjust,
|
|
.imm = disp,
|
|
};
|
|
}
|
|
fn memid(base: Ref.Sized, index: Ref, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.index = .{ .ref = index, .scale = .@"1" },
|
|
.imm = disp,
|
|
};
|
|
}
|
|
fn memsi(base: Ref.Sized, scale: Memory.Scale, index: Ref) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.index = .{ .ref = index, .scale = scale },
|
|
};
|
|
}
|
|
fn memsia(base: Ref.Sized, scale: Memory.Scale, index: Ref, adjust: Adjust) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.index = .{ .ref = index, .scale = scale },
|
|
.adjust = adjust,
|
|
};
|
|
}
|
|
fn memsid(base: Ref.Sized, scale: Memory.Scale, index: Ref, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.index = .{ .ref = index, .scale = scale },
|
|
.imm = disp,
|
|
};
|
|
}
|
|
fn memsiad(base: Ref.Sized, scale: Memory.Scale, index: Ref, adjust: Adjust, disp: i32) Select.Operand {
|
|
return .{
|
|
.tag = .mem,
|
|
.base = base,
|
|
.index = .{ .ref = index, .scale = scale },
|
|
.adjust = adjust,
|
|
.imm = disp,
|
|
};
|
|
}
|
|
|
|
fn adjustedImm(op: Select.Operand, comptime SignedImm: type, s: *const Select) SignedImm {
|
|
const UnsignedImm = @Type(.{
|
|
.int = .{ .signedness = .unsigned, .bits = @typeInfo(SignedImm).int.bits },
|
|
});
|
|
const lhs: SignedImm = lhs: switch (op.adjust.lhs) {
|
|
.none => 0,
|
|
.ptr_size => @divExact(s.cg.target.ptrBitWidth(), 8),
|
|
.ptr_bit_size => s.cg.target.ptrBitWidth(),
|
|
.size => @intCast(op.base.ref.deref(s).typeOf(s.cg).abiSize(s.cg.pt.zcu)),
|
|
.size_sub_elem_size => {
|
|
const ty = op.base.ref.deref(s).typeOf(s.cg);
|
|
break :lhs @intCast(ty.abiSize(s.cg.pt.zcu) - ty.elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu));
|
|
},
|
|
.src0_unaligned_size => @intCast(s.cg.unalignedSize(Select.Operand.Ref.src0.deref(s).typeOf(s.cg))),
|
|
.bit_size => @intCast(op.base.ref.deref(s).typeOf(s.cg).scalarType(s.cg.pt.zcu).bitSize(s.cg.pt.zcu)),
|
|
.src0_bit_size => @intCast(Select.Operand.Ref.src0.deref(s).typeOf(s.cg).scalarType(s.cg.pt.zcu).bitSize(s.cg.pt.zcu)),
|
|
.len => @intCast(op.base.ref.deref(s).typeOf(s.cg).vectorLen(s.cg.pt.zcu)),
|
|
.elem_limbs => @intCast(@divExact(
|
|
op.base.ref.deref(s).typeOf(s.cg).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu),
|
|
@divExact(op.base.size.bitSize(s.cg.target), 8),
|
|
)),
|
|
.src0_elem_size => @intCast(Select.Operand.Ref.src0.deref(s).typeOf(s.cg).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
|
|
.src0_elem_size_times_src1 => @intCast(Select.Operand.Ref.src0.deref(s).typeOf(s.cg).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu) *
|
|
Select.Operand.Ref.src1.deref(s).tracking(s.cg).short.immediate),
|
|
.log2_src0_elem_size => @intCast(std.math.log2(Select.Operand.Ref.src0.deref(s).typeOf(s.cg).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))),
|
|
.smin => @as(SignedImm, std.math.minInt(SignedImm)) >> @truncate(
|
|
-%op.base.ref.deref(s).typeOf(s.cg).scalarType(s.cg.pt.zcu).bitSize(s.cg.pt.zcu),
|
|
),
|
|
.smax => @as(SignedImm, std.math.maxInt(SignedImm)) >> @truncate(
|
|
-%op.base.ref.deref(s).typeOf(s.cg).scalarType(s.cg.pt.zcu).bitSize(s.cg.pt.zcu),
|
|
),
|
|
.umax => @bitCast(@as(UnsignedImm, std.math.maxInt(UnsignedImm)) >> @truncate(
|
|
-%op.base.ref.deref(s).typeOf(s.cg).scalarType(s.cg.pt.zcu).bitSize(s.cg.pt.zcu),
|
|
)),
|
|
};
|
|
const rhs = op.adjust.rhs.toLog2();
|
|
const res = res: switch (op.adjust.op) {
|
|
.mul => {
|
|
const res = @shlWithOverflow(lhs, rhs);
|
|
assert(res[1] == 0);
|
|
break :res res[0];
|
|
},
|
|
.div => @shrExact(lhs, rhs),
|
|
};
|
|
return switch (op.adjust.sign) {
|
|
.neg => op.imm - res,
|
|
.pos => op.imm + res,
|
|
};
|
|
}
|
|
|
|
fn lower(op: Select.Operand, s: *Select) !CodeGen.Operand {
|
|
return switch (op.tag) {
|
|
.none => .none,
|
|
.backward_label => .{ .inst = s.labels[@intFromEnum(op.base.ref)].backward.? },
|
|
.forward_label => for (&s.labels[@intFromEnum(op.base.ref)].forward) |*label| {
|
|
if (label.*) |_| continue;
|
|
label.* = @intCast(s.cg.mir_instructions.len);
|
|
break .{ .inst = undefined };
|
|
} else unreachable,
|
|
.ref => switch (op.base.ref.deref(s).tracking(s.cg).short) {
|
|
.immediate => |imm| .{ .imm = switch (op.base.size) {
|
|
.byte => if (std.math.cast(i8, @as(i64, @bitCast(imm)))) |simm| .s(simm) else .u(@as(u8, @intCast(imm))),
|
|
.word => if (std.math.cast(i16, @as(i64, @bitCast(imm)))) |simm| .s(simm) else .u(@as(u16, @intCast(imm))),
|
|
.dword => if (std.math.cast(i32, @as(i64, @bitCast(imm)))) |simm| .s(simm) else .u(@as(u32, @intCast(imm))),
|
|
.qword => if (std.math.cast(i32, @as(i64, @bitCast(imm)))) |simm| .s(simm) else .u(imm),
|
|
else => unreachable,
|
|
} },
|
|
else => |mcv| .{ .mem = try mcv.mem(s.cg, .{ .size = op.base.size }) },
|
|
.register => |reg| .{ .reg = s.lowerReg(registerAlias(reg, @intCast(@divExact(op.base.size.bitSize(s.cg.target), 8)))) },
|
|
.lea_symbol => |sym_off| .{ .imm = .rel(sym_off) },
|
|
},
|
|
.simm => .{ .imm = .s(op.adjustedImm(i32, s)) },
|
|
.uimm => .{ .imm = .u(@bitCast(op.adjustedImm(i64, s))) },
|
|
.lea => .{ .mem = .{
|
|
.base = .{ .reg = registerAlias(op.base.ref.deref(s).tracking(s.cg).short.register, @divExact(s.cg.target.ptrBitWidth(), 8)) },
|
|
.mod = .{ .rm = .{
|
|
.size = op.base.size,
|
|
.index = switch (op.index.ref) {
|
|
else => |ref| registerAlias(ref.deref(s).tracking(s.cg).short.register, @divExact(s.cg.target.ptrBitWidth(), 8)),
|
|
.none => .none,
|
|
},
|
|
.scale = op.index.scale,
|
|
.disp = op.adjustedImm(i32, s),
|
|
} },
|
|
} },
|
|
.mem => .{ .mem = try op.base.ref.deref(s).tracking(s.cg).short.mem(s.cg, .{
|
|
.size = op.base.size,
|
|
.index = switch (op.index.ref) {
|
|
else => |ref| registerAlias(ref.deref(s).tracking(s.cg).short.register, @divExact(s.cg.target.ptrBitWidth(), 8)),
|
|
.none => .none,
|
|
},
|
|
.scale = op.index.scale,
|
|
.disp = op.adjustedImm(i32, s),
|
|
}) },
|
|
};
|
|
}
|
|
};
|
|
};
|
|
fn select(
|
|
cg: *CodeGen,
|
|
dst_temps: []Temp,
|
|
dst_tys: []const Type,
|
|
src_temps: []Temp,
|
|
cases: []const Select.Case,
|
|
) !void {
|
|
@setEvalBranchQuota(33_600);
|
|
cases: for (cases) |case| {
|
|
for (case.required_features) |required_feature| if (required_feature) |feature| if (!cg.hasFeature(feature)) continue :cases;
|
|
for (case.dst_constraints[0..dst_temps.len], dst_tys) |dst_constraint, dst_ty| if (!dst_constraint.accepts(dst_ty, cg)) continue :cases;
|
|
for (case.src_constraints[0..src_temps.len], src_temps) |src_constraint, src_temp| if (!src_constraint.accepts(src_temp.typeOf(cg), cg)) continue :cases;
|
|
if (std.debug.runtime_safety) {
|
|
for (case.dst_constraints[dst_temps.len..]) |dst_constraint| assert(dst_constraint == .any);
|
|
for (case.src_constraints[src_temps.len..]) |src_constraint| assert(src_constraint == .any);
|
|
}
|
|
patterns: for (case.patterns) |pattern| {
|
|
for (pattern.src[0..src_temps.len], src_temps) |src_pattern, src_temp| if (!src_pattern.matches(src_temp, cg)) continue :patterns;
|
|
if (std.debug.runtime_safety) for (pattern.src[src_temps.len..]) |src_pattern| assert(src_pattern == .none);
|
|
|
|
if (case.call_frame.alignment != .none) {
|
|
const frame_allocs_slice = cg.frame_allocs.slice();
|
|
const stack_frame_size =
|
|
&frame_allocs_slice.items(.abi_size)[@intFromEnum(FrameIndex.call_frame)];
|
|
stack_frame_size.* = @max(stack_frame_size.*, case.call_frame.size);
|
|
const stack_frame_align =
|
|
&frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)];
|
|
stack_frame_align.* = stack_frame_align.max(case.call_frame.alignment);
|
|
}
|
|
|
|
var s: Select = .{
|
|
.cg = cg,
|
|
.temps = undefined,
|
|
.labels = @splat(.{ .forward = @splat(null), .backward = null }),
|
|
.top = 0,
|
|
};
|
|
const tmp_slots = s.temps[@intFromEnum(Select.Operand.Ref.tmp0)..@intFromEnum(Select.Operand.Ref.dst0)];
|
|
const dst_slots = s.temps[@intFromEnum(Select.Operand.Ref.dst0)..@intFromEnum(Select.Operand.Ref.src0)];
|
|
const src_slots = s.temps[@intFromEnum(Select.Operand.Ref.src0)..@intFromEnum(Select.Operand.Ref.none)];
|
|
|
|
caller_preserved: {
|
|
const cc = switch (case.clobbers.caller_preserved) {
|
|
.none => break :caller_preserved,
|
|
.ccc => cg.target.cCallingConvention().?,
|
|
.zigcc => .auto,
|
|
};
|
|
assert(case.clobbers.eflags);
|
|
const err_ret_trace_reg = if (cc == .auto and cg.pt.zcu.comp.config.any_error_tracing) err_ret_trace_reg: {
|
|
const param_gpr = abi.getCAbiIntParamRegs(.auto);
|
|
break :err_ret_trace_reg param_gpr[param_gpr.len - 1];
|
|
} else .none;
|
|
switch (cc) {
|
|
else => unreachable,
|
|
inline .x86_64_sysv, .x86_64_win, .auto => |_, tag| inline for (comptime abi.getCallerPreservedRegs(tag)) |reg| skip: {
|
|
if (reg == err_ret_trace_reg) break :skip;
|
|
const tracked_index = RegisterManager.indexOfKnownRegIntoTracked(reg) orelse break :skip;
|
|
try cg.register_manager.getRegIndex(tracked_index, null);
|
|
assert(cg.register_manager.lockRegIndexAssumeUnused(tracked_index).tracked_index == tracked_index);
|
|
},
|
|
}
|
|
}
|
|
|
|
@memcpy(src_slots[0..src_temps.len], src_temps);
|
|
std.mem.swap(Temp, &src_slots[pattern.commute[0]], &src_slots[pattern.commute[1]]);
|
|
for (dst_temps, dst_tys, case.dst_temps[0..dst_temps.len]) |*dst_temp, dst_ty, dst_kind| {
|
|
if (dst_kind.pass() != 1) continue;
|
|
dst_temp.*, _ = try Select.TempSpec.create(.{ .type = dst_ty, .kind = dst_kind }, &s);
|
|
}
|
|
var tmp_owned: [tmp_slots.len]bool = @splat(false);
|
|
for (1..3) |pass| for (tmp_slots, &tmp_owned, case.extra_temps) |*slot, *owned, spec| {
|
|
if (spec.kind.pass() != pass) continue;
|
|
slot.*, owned.* = try spec.create(&s);
|
|
};
|
|
|
|
while (true) for (pattern.src[0..src_temps.len], src_temps) |src_pattern, *src_temp| {
|
|
if (try src_pattern.convert(src_temp, cg)) break;
|
|
} else break;
|
|
@memcpy(src_slots[0..src_temps.len], src_temps);
|
|
std.mem.swap(Temp, &src_slots[pattern.commute[0]], &src_slots[pattern.commute[1]]);
|
|
|
|
if (case.clobbers.eflags) try cg.spillEflagsIfOccupied();
|
|
|
|
for (dst_temps, dst_tys, case.dst_temps[0..dst_temps.len]) |*dst_temp, dst_ty, dst_kind| {
|
|
if (dst_kind.pass() != 2) continue;
|
|
dst_temp.*, _ = try Select.TempSpec.create(.{ .type = dst_ty, .kind = dst_kind }, &s);
|
|
}
|
|
@memcpy(dst_slots[0..dst_temps.len], dst_temps);
|
|
|
|
switch (case.each) {
|
|
.once => |body| {
|
|
for (body) |inst| try s.emit(inst);
|
|
s.emitLabel(.@"0:");
|
|
},
|
|
}
|
|
assert(s.top == 0);
|
|
|
|
caller_preserved: {
|
|
const cc = switch (case.clobbers.caller_preserved) {
|
|
.none => break :caller_preserved,
|
|
.ccc => cg.target.cCallingConvention().?,
|
|
.zigcc => .auto,
|
|
};
|
|
const err_ret_trace_reg = if (cc == .auto and cg.pt.zcu.comp.config.any_error_tracing) err_ret_trace_reg: {
|
|
const param_gpr = abi.getCAbiIntParamRegs(.auto);
|
|
break :err_ret_trace_reg param_gpr[param_gpr.len - 1];
|
|
} else .none;
|
|
switch (cc) {
|
|
else => unreachable,
|
|
inline .x86_64_sysv, .x86_64_win, .auto => |_, tag| inline for (comptime abi.getCallerPreservedRegs(tag)) |reg| skip: {
|
|
if (reg == err_ret_trace_reg) break :skip;
|
|
cg.register_manager.unlockReg(.{ .tracked_index = RegisterManager.indexOfKnownRegIntoTracked(reg) orelse break :skip });
|
|
},
|
|
}
|
|
}
|
|
for (dst_temps, case.dst_temps[0..dst_temps.len]) |dst_temp, dst_kind| dst_kind.finish(dst_temp, &s);
|
|
for (tmp_owned, tmp_slots) |owned, temp| if (owned) try temp.die(cg);
|
|
return;
|
|
}
|
|
}
|
|
return error.SelectFailed;
|
|
}
|