x86_64: optimize mir tag usage

This moves all pseudo-instructions to a single `Mir.Inst.Tag` tag and
prepares to start coalescing similar mnemonics. 239 tags left in use.
This commit is contained in:
Jacob Young
2023-05-08 06:50:18 -04:00
parent 6778da4516
commit 6c14eb2863
6 changed files with 853 additions and 878 deletions

View File

@@ -973,14 +973,14 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len);
self.mir_instructions.appendAssumeCapacity(inst);
switch (inst.tag) {
else => wip_mir_log.debug("{}", .{self.fmtWipMir(result_index)}),
.dbg_line,
.dbg_prologue_end,
.dbg_epilogue_begin,
.dead,
=> {},
}
if (inst.tag != .pseudo or switch (inst.ops) {
else => true,
.pseudo_dbg_prologue_end_none,
.pseudo_dbg_line_line_column,
.pseudo_dbg_epilogue_begin_none,
.pseudo_dead_none,
=> false,
}) wip_mir_log.debug("{}", .{self.fmtWipMir(result_index)});
return result_index;
}
@@ -1003,98 +1003,166 @@ fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
return result;
}
/// A `cc` of `.z_and_np` clobbers `reg2`!
fn asmCmovccRegisterRegister(self: *Self, reg1: Register, reg2: Register, cc: bits.Condition) !void {
_ = try self.addInst(.{
.tag = switch (cc) {
else => .cmov,
.z_and_np, .nz_or_p => .pseudo,
},
.ops = switch (cc) {
else => .rr,
.z_and_np => .pseudo_cmov_z_and_np_rr,
.nz_or_p => .pseudo_cmov_nz_or_p_rr,
},
.data = .{ .rr = .{
.fixes = switch (cc) {
else => Mir.Inst.Fixes.fromCondition(cc),
.z_and_np, .nz_or_p => ._,
},
.r1 = reg1,
.r2 = reg2,
} },
});
}
/// A `cc` of `.z_and_np` is not supported by this encoding!
fn asmCmovccRegisterMemory(self: *Self, reg: Register, m: Memory, cc: bits.Condition) !void {
_ = try self.addInst(.{
.tag = switch (cc) {
else => .cmov,
.z_and_np => unreachable,
.nz_or_p => .pseudo,
},
.ops = switch (cc) {
else => switch (m) {
.sib => .rm_sib,
.rip => .rm_rip,
else => unreachable,
},
.z_and_np => unreachable,
.nz_or_p => switch (m) {
.sib => .pseudo_cmov_nz_or_p_rm_sib,
.rip => .pseudo_cmov_nz_or_p_rm_rip,
else => unreachable,
},
},
.data = .{ .rx = .{
.fixes = switch (cc) {
else => Mir.Inst.Fixes.fromCondition(cc),
.z_and_np => unreachable,
.nz_or_p => ._,
},
.r1 = reg,
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
fn asmSetccRegister(self: *Self, reg: Register, cc: bits.Condition) !void {
_ = try self.addInst(.{
.tag = .setcc,
.ops = .r_cc,
.data = .{ .r_cc = .{
.r = reg,
.scratch = if (cc == .z_and_np or cc == .nz_or_p)
(try self.register_manager.allocReg(null, gp)).to8()
else
.none,
.cc = cc,
} },
.tag = switch (cc) {
else => .set,
.z_and_np, .nz_or_p => .pseudo,
},
.ops = switch (cc) {
else => .r,
.z_and_np => .pseudo_set_z_and_np_r,
.nz_or_p => .pseudo_set_nz_or_p_r,
},
.data = switch (cc) {
else => .{ .r = .{
.fixes = Mir.Inst.Fixes.fromCondition(cc),
.r1 = reg,
} },
.z_and_np, .nz_or_p => .{ .r_scratch = .{
.r1 = reg,
.scratch_reg = (try self.register_manager.allocReg(null, gp)).to8(),
} },
},
});
}
fn asmSetccMemory(self: *Self, m: Memory, cc: bits.Condition) !void {
const payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
};
_ = try self.addInst(.{
.tag = .setcc,
.ops = switch (m) {
.sib => .m_sib_cc,
.rip => .m_rip_cc,
else => unreachable,
.tag = switch (cc) {
else => .set,
.z_and_np, .nz_or_p => .pseudo,
},
.data = .{ .x_cc = .{
.scratch = if (cc == .z_and_np or cc == .nz_or_p)
(try self.register_manager.allocReg(null, gp)).to8()
else
.none,
.cc = cc,
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
.ops = switch (cc) {
else => switch (m) {
.sib => .m_sib,
.rip => .m_rip,
else => unreachable,
},
} },
});
}
/// A `cc` of `.z_and_np` clobbers `reg2`!
fn asmCmovccRegisterRegister(self: *Self, reg1: Register, reg2: Register, cc: bits.Condition) !void {
_ = try self.addInst(.{
.tag = .cmovcc,
.ops = .rr_cc,
.data = .{ .rr_cc = .{
.r1 = reg1,
.r2 = reg2,
.cc = cc,
} },
});
}
fn asmCmovccRegisterMemory(self: *Self, reg: Register, m: Memory, cc: bits.Condition) !void {
assert(cc != .z_and_np); // not supported
_ = try self.addInst(.{
.tag = .cmovcc,
.ops = switch (m) {
.sib => .rm_sib_cc,
.rip => .rm_rip_cc,
else => unreachable,
},
.data = .{ .rx_cc = .{
.r = reg,
.cc = cc,
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
.z_and_np => switch (m) {
.sib => .pseudo_set_z_and_np_m_sib,
.rip => .pseudo_set_z_and_np_m_rip,
else => unreachable,
},
} },
.nz_or_p => switch (m) {
.sib => .pseudo_set_nz_or_p_m_sib,
.rip => .pseudo_set_nz_or_p_m_rip,
else => unreachable,
},
},
.data = switch (cc) {
else => .{ .x = .{
.fixes = Mir.Inst.Fixes.fromCondition(cc),
.payload = payload,
} },
.z_and_np, .nz_or_p => .{ .x_scratch = .{
.scratch_reg = (try self.register_manager.allocReg(null, gp)).to8(),
.payload = payload,
} },
},
});
}
fn asmJmpReloc(self: *Self, target: Mir.Inst.Index) !Mir.Inst.Index {
return self.addInst(.{
.tag = .jmp_reloc,
.ops = undefined,
.data = .{ .inst = target },
.tag = .jmp,
.ops = .inst,
.data = .{ .inst = .{
.inst = target,
} },
});
}
fn asmJccReloc(self: *Self, target: Mir.Inst.Index, cc: bits.Condition) !Mir.Inst.Index {
return self.addInst(.{
.tag = .jcc,
.ops = .inst_cc,
.data = .{ .inst_cc = .{ .inst = target, .cc = cc } },
.tag = switch (cc) {
else => .j,
.z_and_np, .nz_or_p => .pseudo,
},
.ops = switch (cc) {
else => .inst,
.z_and_np => .pseudo_j_z_and_np_inst,
.nz_or_p => .pseudo_j_nz_or_p_inst,
},
.data = .{ .inst = .{
.fixes = switch (cc) {
else => Mir.Inst.Fixes.fromCondition(cc),
.z_and_np, .nz_or_p => ._,
},
.inst = target,
} },
});
}
fn asmPlaceholder(self: *Self) !Mir.Inst.Index {
return self.addInst(.{
.tag = .dead,
.ops = undefined,
.tag = .pseudo,
.ops = .pseudo_dead_none,
.data = undefined,
});
}
@@ -1107,11 +1175,19 @@ fn asmOpOnly(self: *Self, tag: Mir.Inst.Tag) !void {
});
}
fn asmPseudo(self: *Self, ops: Mir.Inst.Ops) !void {
_ = try self.addInst(.{
.tag = .pseudo,
.ops = ops,
.data = undefined,
});
}
fn asmRegister(self: *Self, tag: Mir.Inst.Tag, reg: Register) !void {
_ = try self.addInst(.{
.tag = tag,
.ops = .r,
.data = .{ .r = reg },
.data = .{ .r = .{ .r1 = reg } },
});
}
@@ -1122,9 +1198,11 @@ fn asmImmediate(self: *Self, tag: Mir.Inst.Tag, imm: Immediate) !void {
.signed => .i_s,
.unsigned => .i_u,
},
.data = .{ .i = switch (imm) {
.signed => |s| @bitCast(u32, s),
.unsigned => |u| @intCast(u32, u),
.data = .{ .i = .{
.i = switch (imm) {
.signed => |s| @bitCast(u32, s),
.unsigned => |u| @intCast(u32, u),
},
} },
});
}
@@ -1147,14 +1225,14 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Imme
.ops = ops,
.data = switch (ops) {
.ri_s, .ri_u => .{ .ri = .{
.r = reg,
.r1 = reg,
.i = switch (imm) {
.signed => |s| @bitCast(u32, s),
.unsigned => |u| @intCast(u32, u),
},
} },
.ri64 => .{ .rx = .{
.r = reg,
.r1 = reg,
.payload = try self.addExtra(Mir.Imm64.encode(imm.unsigned)),
} },
else => unreachable,
@@ -1249,10 +1327,12 @@ fn asmMemory(self: *Self, tag: Mir.Inst.Tag, m: Memory) !void {
.rip => .m_rip,
else => unreachable,
},
.data = .{ .payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
.data = .{ .x = .{
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
@@ -1266,7 +1346,7 @@ fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) !
else => unreachable,
},
.data = .{ .rx = .{
.r = reg,
.r1 = reg,
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
@@ -1291,7 +1371,7 @@ fn asmRegisterMemoryImmediate(
else => unreachable,
},
.data = .{ .rix = .{
.r = reg,
.r1 = reg,
.i = @intCast(u8, imm.unsigned),
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
@@ -1339,7 +1419,7 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) !
else => unreachable,
},
.data = .{ .rx = .{
.r = reg,
.r1 = reg,
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
@@ -1413,11 +1493,15 @@ fn asmMemoryRegisterImmediate(
.rip => .mri_rip,
else => unreachable,
},
.data = .{ .rix = .{ .r = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
} } },
.data = .{ .rix = .{
.r1 = reg,
.i = @intCast(u8, imm.unsigned),
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
@@ -1450,7 +1534,7 @@ fn gen(self: *Self) InnerError!void {
else => unreachable,
}
try self.asmOpOnly(.dbg_prologue_end);
try self.asmPseudo(.pseudo_dbg_prologue_end_none);
try self.genBody(self.air.getMainBody());
@@ -1462,11 +1546,11 @@ fn gen(self: *Self) InnerError!void {
// }
// Eliding the reloc will cause a miscompilation in this case.
for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.items(.data)[jmp_reloc].inst =
self.mir_instructions.items(.data)[jmp_reloc].inst.inst =
@intCast(u32, self.mir_instructions.len);
}
try self.asmOpOnly(.dbg_epilogue_begin);
try self.asmPseudo(.pseudo_dbg_epilogue_begin_none);
const backpatch_stack_dealloc = try self.asmPlaceholder();
const backpatch_pop_callee_preserved_regs = try self.asmPlaceholder();
try self.asmRegister(.pop, .rbp);
@@ -1480,46 +1564,54 @@ fn gen(self: *Self) InnerError!void {
self.mir_instructions.set(backpatch_frame_align, .{
.tag = .@"and",
.ops = .ri_s,
.data = .{ .ri = .{ .r = .rsp, .i = frame_layout.stack_mask } },
.data = .{ .ri = .{
.r1 = .rsp,
.i = frame_layout.stack_mask,
} },
});
}
if (need_stack_adjust) {
self.mir_instructions.set(backpatch_stack_alloc, .{
.tag = .sub,
.ops = .ri_s,
.data = .{ .ri = .{ .r = .rsp, .i = frame_layout.stack_adjust } },
.data = .{ .ri = .{
.r1 = .rsp,
.i = frame_layout.stack_adjust,
} },
});
}
if (need_frame_align or need_stack_adjust) {
self.mir_instructions.set(backpatch_stack_dealloc, .{
.tag = .mov,
.ops = .rr,
.data = .{ .rr = .{ .r1 = .rsp, .r2 = .rbp } },
.data = .{ .rr = .{
.r1 = .rsp,
.r2 = .rbp,
} },
});
}
if (need_save_reg) {
const save_reg_list = frame_layout.save_reg_list.asInt();
self.mir_instructions.set(backpatch_push_callee_preserved_regs, .{
.tag = .push_regs,
.ops = undefined,
.data = .{ .payload = save_reg_list },
.tag = .pseudo,
.ops = .pseudo_push_reg_list,
.data = .{ .reg_list = frame_layout.save_reg_list },
});
self.mir_instructions.set(backpatch_pop_callee_preserved_regs, .{
.tag = .pop_regs,
.ops = undefined,
.data = .{ .payload = save_reg_list },
.tag = .pseudo,
.ops = .pseudo_pop_reg_list,
.data = .{ .reg_list = frame_layout.save_reg_list },
});
}
} else {
try self.asmOpOnly(.dbg_prologue_end);
try self.asmPseudo(.pseudo_dbg_prologue_end_none);
try self.genBody(self.air.getMainBody());
try self.asmOpOnly(.dbg_epilogue_begin);
try self.asmPseudo(.pseudo_dbg_epilogue_begin_none);
}
// Drop them off at the rbrace.
_ = try self.addInst(.{
.tag = .dbg_line,
.ops = undefined,
.tag = .pseudo,
.ops = .pseudo_dbg_line_line_column,
.data = .{ .line_column = .{
.line = self.end_di_line,
.column = self.end_di_column,
@@ -2446,11 +2538,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
.register => |dst_reg| {
const min_abi_size = @min(dst_abi_size, src_abi_size);
const tag: Mir.Inst.Tag = switch (signedness) {
.signed => .movsx,
.unsigned => if (min_abi_size > 2) .mov else .movzx,
.signed => if (min_abi_size >= 4) .movsxd else .movsx,
.unsigned => if (min_abi_size >= 4) .mov else .movzx,
};
const dst_alias = switch (tag) {
.movsx => dst_reg.to64(),
.movsx, .movsxd => dst_reg.to64(),
.mov, .movzx => if (min_abi_size > 4) dst_reg.to64() else dst_reg.to32(),
else => unreachable,
};
@@ -5247,7 +5339,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const field_byte_size = @intCast(u32, field_ty.abiSize(self.target.*));
if (signedness == .signed and field_byte_size < 8) {
try self.asmRegisterRegister(
.movsx,
if (field_byte_size >= 4) .movsxd else .movsx,
dst_mcv.register,
registerAlias(dst_mcv.register, field_byte_size),
);
@@ -7194,10 +7286,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const atom_index = try self.owner.getSymbolIndex(self);
const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name);
_ = try self.addInst(.{
.tag = .mov_linker,
.tag = .mov,
.ops = .import_reloc,
.data = .{ .rx = .{
.r = .rax,
.r1 = .rax,
.payload = try self.addExtra(Mir.Reloc{
.atom_index = atom_index,
.sym_index = sym_index,
@@ -7209,9 +7301,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const atom_index = try self.owner.getSymbolIndex(self);
const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name);
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
.data = .{ .relocation = .{
.tag = .call,
.ops = .extern_fn_reloc,
.data = .{ .reloc = .{
.atom_index = atom_index,
.sym_index = sym_index,
} },
@@ -7489,8 +7581,8 @@ fn genTry(
fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
_ = try self.addInst(.{
.tag = .dbg_line,
.ops = undefined,
.tag = .pseudo,
.ops = .pseudo_dbg_line_line_column,
.data = .{ .line_column = .{
.line = dbg_stmt.line,
.column = dbg_stmt.column,
@@ -8021,14 +8113,14 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void {
fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
const next_inst = @intCast(u32, self.mir_instructions.len);
switch (self.mir_instructions.items(.tag)[reloc]) {
.jcc => {
self.mir_instructions.items(.data)[reloc].inst_cc.inst = next_inst;
},
.jmp_reloc => {
self.mir_instructions.items(.data)[reloc].inst = next_inst;
.j, .jmp => {},
.pseudo => switch (self.mir_instructions.items(.ops)[reloc]) {
.pseudo_j_z_and_np_inst, .pseudo_j_nz_or_p_inst => {},
else => unreachable,
},
else => unreachable,
}
self.mir_instructions.items(.data)[reloc].inst.inst = next_inst;
}
fn airBr(self: *Self, inst: Air.Inst.Index) !void {
@@ -8577,10 +8669,10 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
.load_direct => |sym_index| if (!ty.isRuntimeFloat()) {
const atom_index = try self.owner.getSymbolIndex(self);
_ = try self.addInst(.{
.tag = .mov_linker,
.tag = .mov,
.ops = .direct_reloc,
.data = .{ .rx = .{
.r = dst_reg.to64(),
.r1 = dst_reg.to64(),
.payload = try self.addExtra(Mir.Reloc{
.atom_index = atom_index,
.sym_index = sym_index,
@@ -8618,8 +8710,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
const atom_index = try self.owner.getSymbolIndex(self);
_ = try self.addInst(.{
.tag = switch (src_mcv) {
.lea_direct => .lea_linker,
.lea_got => .mov_linker,
.lea_direct => .lea,
.lea_got => .mov,
else => unreachable,
},
.ops = switch (src_mcv) {
@@ -8628,7 +8720,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
else => unreachable,
},
.data = .{ .rx = .{
.r = dst_reg.to64(),
.r1 = dst_reg.to64(),
.payload = try self.addExtra(Mir.Reloc{
.atom_index = atom_index,
.sym_index = sym_index,
@@ -8640,10 +8732,10 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
const atom_index = try self.owner.getSymbolIndex(self);
if (self.bin_file.cast(link.File.MachO)) |_| {
_ = try self.addInst(.{
.tag = .lea_linker,
.tag = .lea,
.ops = .tlv_reloc,
.data = .{ .rx = .{
.r = .rdi,
.r1 = .rdi,
.payload = try self.addExtra(Mir.Reloc{
.atom_index = atom_index,
.sym_index = sym_index,
@@ -8847,9 +8939,9 @@ fn genInlineMemcpy(self: *Self, dst_ptr: MCValue, src_ptr: MCValue, len: MCValue
try self.genSetReg(.rsi, Type.usize, src_ptr);
try self.genSetReg(.rcx, Type.usize, len);
_ = try self.addInst(.{
.tag = .movs,
.ops = .string,
.data = .{ .string = .{ .repeat = .rep, .width = .b } },
.tag = .mov,
.ops = .none,
.data = .{ .none = .{ .fixes = .@"rep _sb" } },
});
}
@@ -8859,9 +8951,9 @@ fn genInlineMemset(self: *Self, dst_ptr: MCValue, value: MCValue, len: MCValue)
try self.genSetReg(.al, Type.u8, value);
try self.genSetReg(.rcx, Type.usize, len);
_ = try self.addInst(.{
.tag = .stos,
.ops = .string,
.data = .{ .string = .{ .repeat = .rep, .width = .b } },
.tag = .sto,
.ops = .none,
.data = .{ .none = .{ .fixes = .@"rep _sb" } },
});
}
@@ -9135,22 +9227,22 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
try self.spillEflagsIfOccupied();
if (val_abi_size <= 8) {
_ = try self.addInst(.{
.tag = .cmpxchg,
.ops = .lock_mr_sib,
.data = .{ .rx = .{
.r = registerAlias(new_reg.?, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} },
});
} else {
_ = try self.addInst(.{
.tag = .cmpxchgb,
.ops = .lock_m_sib,
.data = .{ .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)) },
});
}
_ = try self.addInst(if (val_abi_size <= 8) .{
.tag = .cmpxchg,
.ops = .mr_sib,
.data = .{ .rx = .{
.fixes = .@"lock _",
.r1 = registerAlias(new_reg.?, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} },
} else .{
.tag = .cmpxchg,
.ops = .m_sib,
.data = .{ .x = .{
.fixes = .@"lock _16b",
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} },
});
const result: MCValue = result: {
if (self.liveness.isUnused(inst)) break :result .unreach;
@@ -9252,13 +9344,14 @@ fn atomicOp(
}
_ = try self.addInst(.{
.tag = tag,
.ops = switch (tag) {
.mov, .xchg => .mr_sib,
.xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib,
else => unreachable,
},
.ops = .mr_sib,
.data = .{ .rx = .{
.r = registerAlias(dst_reg, val_abi_size),
.fixes = switch (tag) {
.mov, .xchg => ._,
.xadd, .add, .sub, .@"and", .@"or", .xor => .@"lock _",
else => unreachable,
},
.r1 = registerAlias(dst_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} },
});
@@ -9330,9 +9423,10 @@ fn atomicOp(
};
_ = try self.addInst(.{
.tag = .cmpxchg,
.ops = .lock_mr_sib,
.ops = .mr_sib,
.data = .{ .rx = .{
.r = registerAlias(tmp_reg, val_abi_size),
.fixes = .@"lock _",
.r1 = registerAlias(tmp_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} },
});
@@ -9397,9 +9491,14 @@ fn atomicOp(
val_ty.fmt(self.bin_file.options.module.?), @tagName(op),
}),
};
_ = try self.addInst(.{ .tag = .cmpxchgb, .ops = .lock_m_sib, .data = .{
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} });
_ = try self.addInst(.{
.tag = .cmpxchg,
.ops = .m_sib,
.data = .{ .x = .{
.fixes = .@"lock _16b",
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} },
});
_ = try self.asmJccReloc(loop, .ne);
if (unused) return .unreach;

View File

@@ -41,7 +41,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.offset = end_offset - 4,
.length = @intCast(u5, end_offset - start_offset),
}),
.@"extern" => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
.linker_extern_fn => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
const atom_index = macho_file.getAtomIndexForSymbol(
.{ .sym_index = symbol.atom_index, .file = null },
@@ -129,36 +129,39 @@ pub fn emitMir(emit: *Emit) Error!void {
const mir_inst = emit.lower.mir.instructions.get(mir_index);
switch (mir_inst.tag) {
else => unreachable,
.dead => {},
.dbg_line => try emit.dbgAdvancePCAndLine(
mir_inst.data.line_column.line,
mir_inst.data.line_column.column,
),
.dbg_prologue_end => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.dbg_epilogue_begin => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
.pseudo => switch (mir_inst.ops) {
else => unreachable,
.pseudo_dbg_prologue_end_none => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.pseudo_dbg_line_line_column => try emit.dbgAdvancePCAndLine(
mir_inst.data.line_column.line,
mir_inst.data.line_column.column,
),
.pseudo_dbg_epilogue_begin_none => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.pseudo_dead_none => {},
},
}
}

View File

@@ -705,7 +705,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op
}
const mnemonic_to_encodings_map = init: {
@setEvalBranchQuota(100_000);
@setEvalBranchQuota(20_000);
const encodings = @import("encodings.zig");
var entries = encodings.table;
std.sort.sort(encodings.Entry, &entries, {}, struct {

View File

@@ -35,7 +35,7 @@ pub const Reloc = struct {
const Target = union(enum) {
inst: Mir.Inst.Index,
@"extern": Mir.Reloc,
linker_extern_fn: Mir.Reloc,
linker_got: Mir.Reloc,
linker_direct: Mir.Reloc,
linker_import: Mir.Reloc,
@@ -59,280 +59,119 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
const inst = lower.mir.instructions.get(index);
switch (inst.tag) {
.adc,
.add,
.@"and",
.bsf,
.bsr,
.bswap,
.bt,
.btc,
.btr,
.bts,
.call,
.cbw,
.cwde,
.cdqe,
.cwd,
.cdq,
.cqo,
.cmp,
.cmpxchg,
.div,
.fisttp,
.fld,
.idiv,
.imul,
.int3,
.jmp,
.lea,
.lfence,
.lzcnt,
.mfence,
.mov,
.movbe,
.movd,
.movq,
.movzx,
.mul,
.neg,
.nop,
.not,
.@"or",
.pop,
.popcnt,
.push,
.rcl,
.rcr,
.ret,
.rol,
.ror,
.sal,
.sar,
.sbb,
.sfence,
.shl,
.shld,
.shr,
.shrd,
.sub,
.syscall,
.@"test",
.tzcnt,
.ud2,
.xadd,
.xchg,
.xor,
else => try lower.generic(inst),
.pseudo => switch (inst.ops) {
.pseudo_cmov_z_and_np_rr => {
try lower.emit(.none, .cmovnz, &.{
.{ .reg = inst.data.rr.r2 },
.{ .reg = inst.data.rr.r1 },
});
try lower.emit(.none, .cmovnp, &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
});
},
.pseudo_cmov_nz_or_p_rr => {
try lower.emit(.none, .cmovnz, &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
});
try lower.emit(.none, .cmovp, &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
});
},
.pseudo_cmov_nz_or_p_rm_sib,
.pseudo_cmov_nz_or_p_rm_rip,
=> {
try lower.emit(.none, .cmovnz, &.{
.{ .reg = inst.data.rx.r1 },
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
});
try lower.emit(.none, .cmovp, &.{
.{ .reg = inst.data.rx.r1 },
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
});
},
.pseudo_set_z_and_np_r => {
try lower.emit(.none, .setz, &.{
.{ .reg = inst.data.r_scratch.r1 },
});
try lower.emit(.none, .setnp, &.{
.{ .reg = inst.data.r_scratch.scratch_reg },
});
try lower.emit(.none, .@"and", &.{
.{ .reg = inst.data.r_scratch.r1 },
.{ .reg = inst.data.r_scratch.scratch_reg },
});
},
.pseudo_set_z_and_np_m_sib,
.pseudo_set_z_and_np_m_rip,
=> {
try lower.emit(.none, .setz, &.{
.{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) },
});
try lower.emit(.none, .setnp, &.{
.{ .reg = inst.data.x_scratch.scratch_reg },
});
try lower.emit(.none, .@"and", &.{
.{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) },
.{ .reg = inst.data.x_scratch.scratch_reg },
});
},
.pseudo_set_nz_or_p_r => {
try lower.emit(.none, .setnz, &.{
.{ .reg = inst.data.r_scratch.r1 },
});
try lower.emit(.none, .setp, &.{
.{ .reg = inst.data.r_scratch.scratch_reg },
});
try lower.emit(.none, .@"or", &.{
.{ .reg = inst.data.r_scratch.r1 },
.{ .reg = inst.data.r_scratch.scratch_reg },
});
},
.pseudo_set_nz_or_p_m_sib,
.pseudo_set_nz_or_p_m_rip,
=> {
try lower.emit(.none, .setnz, &.{
.{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) },
});
try lower.emit(.none, .setp, &.{
.{ .reg = inst.data.x_scratch.scratch_reg },
});
try lower.emit(.none, .@"or", &.{
.{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) },
.{ .reg = inst.data.x_scratch.scratch_reg },
});
},
.pseudo_j_z_and_np_inst => {
try lower.emit(.none, .jnz, &.{
.{ .imm = lower.reloc(.{ .inst = index + 1 }) },
});
try lower.emit(.none, .jnp, &.{
.{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) },
});
},
.pseudo_j_nz_or_p_inst => {
try lower.emit(.none, .jnz, &.{
.{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) },
});
try lower.emit(.none, .jp, &.{
.{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) },
});
},
.addps,
.addss,
.andnps,
.andps,
.cmpss,
.cvtsi2ss,
.divps,
.divss,
.maxps,
.maxss,
.minps,
.minss,
.movaps,
.movhlps,
.movss,
.movups,
.mulps,
.mulss,
.orps,
.pextrw,
.pinsrw,
.sqrtps,
.sqrtss,
.subps,
.subss,
.ucomiss,
.unpckhps,
.unpcklps,
.xorps,
.pseudo_push_reg_list => try lower.pushPopRegList(.push, inst),
.pseudo_pop_reg_list => try lower.pushPopRegList(.pop, inst),
.addpd,
.addsd,
.andnpd,
.andpd,
.cmpsd,
.cvtsd2ss,
.cvtsi2sd,
.cvtss2sd,
.divpd,
.divsd,
.maxpd,
.maxsd,
.minpd,
.minsd,
.movsd,
.mulpd,
.mulsd,
.orpd,
.pshufhw,
.pshuflw,
.psrld,
.psrlq,
.psrlw,
.punpckhbw,
.punpckhdq,
.punpckhqdq,
.punpckhwd,
.punpcklbw,
.punpckldq,
.punpcklqdq,
.punpcklwd,
.sqrtpd,
.sqrtsd,
.subpd,
.subsd,
.ucomisd,
.unpckhpd,
.unpcklpd,
.xorpd,
.movddup,
.movshdup,
.movsldup,
.pextrb,
.pextrd,
.pextrq,
.pinsrb,
.pinsrd,
.pinsrq,
.roundpd,
.roundps,
.roundsd,
.roundss,
.vaddpd,
.vaddps,
.vaddsd,
.vaddss,
.vcvtsd2ss,
.vcvtsi2sd,
.vcvtsi2ss,
.vcvtss2sd,
.vdivpd,
.vdivps,
.vdivsd,
.vdivss,
.vmaxpd,
.vmaxps,
.vmaxsd,
.vmaxss,
.vminpd,
.vminps,
.vminsd,
.vminss,
.vmovapd,
.vmovaps,
.vmovddup,
.vmovhlps,
.vmovsd,
.vmovshdup,
.vmovsldup,
.vmovss,
.vmovupd,
.vmovups,
.vmulpd,
.vmulps,
.vmulsd,
.vmulss,
.vpextrb,
.vpextrd,
.vpextrq,
.vpextrw,
.vpinsrb,
.vpinsrd,
.vpinsrq,
.vpinsrw,
.vpshufhw,
.vpshuflw,
.vpsrld,
.vpsrlq,
.vpsrlw,
.vpunpckhbw,
.vpunpckhdq,
.vpunpckhqdq,
.vpunpckhwd,
.vpunpcklbw,
.vpunpckldq,
.vpunpcklqdq,
.vpunpcklwd,
.vroundpd,
.vroundps,
.vroundsd,
.vroundss,
.vsqrtpd,
.vsqrtps,
.vsqrtsd,
.vsqrtss,
.vsubpd,
.vsubps,
.vsubsd,
.vsubss,
.vunpckhpd,
.vunpckhps,
.vunpcklpd,
.vunpcklps,
.vcvtph2ps,
.vcvtps2ph,
.vfmadd132pd,
.vfmadd213pd,
.vfmadd231pd,
.vfmadd132ps,
.vfmadd213ps,
.vfmadd231ps,
.vfmadd132sd,
.vfmadd213sd,
.vfmadd231sd,
.vfmadd132ss,
.vfmadd213ss,
.vfmadd231ss,
=> try lower.mirGeneric(inst),
.cmps,
.lods,
.movs,
.scas,
.stos,
=> try lower.mirString(inst),
.cmpxchgb => try lower.mirCmpxchgBytes(inst),
.jmp_reloc => try lower.emitInstWithReloc(.none, .jmp, &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst }),
.call_extern => try lower.emitInstWithReloc(.none, .call, &.{
.{ .imm = Immediate.s(0) },
}, .{ .@"extern" = inst.data.relocation }),
.lea_linker => try lower.mirLinker(.lea, inst),
.mov_linker => try lower.mirLinker(.mov, inst),
.mov_moffs => try lower.mirMovMoffs(inst),
.movsx => try lower.mirMovsx(inst),
.cmovcc => try lower.mirCmovcc(inst),
.setcc => try lower.mirSetcc(inst),
.jcc => try lower.mirJcc(index, inst),
.push_regs => try lower.mirRegisterList(.push, inst),
.pop_regs => try lower.mirRegisterList(.pop, inst),
.dbg_line,
.dbg_prologue_end,
.dbg_epilogue_begin,
.dead,
=> {},
.pseudo_dbg_prologue_end_none,
.pseudo_dbg_line_line_column,
.pseudo_dbg_epilogue_begin_none,
.pseudo_dead_none,
=> {},
else => unreachable,
},
}
return .{
@@ -348,15 +187,6 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
return error.LowerFail;
}
fn mnem_cc(comptime base: @Type(.EnumLiteral), cc: bits.Condition) Mnemonic {
return switch (cc) {
inline else => |c| if (@hasField(Mnemonic, @tagName(base) ++ @tagName(c)))
@field(Mnemonic, @tagName(base) ++ @tagName(c))
else
unreachable,
};
}
fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
return switch (ops) {
.rri_s,
@@ -364,8 +194,6 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
.i_s,
.mi_sib_s,
.mi_rip_s,
.lock_mi_sib_s,
.lock_mi_rip_s,
=> Immediate.s(@bitCast(i32, i)),
.rrri,
@@ -374,8 +202,6 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
.i_u,
.mi_sib_u,
.mi_rip_u,
.lock_mi_sib_u,
.lock_mi_rip_u,
.rmi_sib,
.rmi_rip,
.mri_sib,
@@ -395,10 +221,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
return lower.mir.resolveFrameLoc(switch (ops) {
.rm_sib,
.rm_sib_cc,
.rmi_sib,
.m_sib,
.m_sib_cc,
.mi_sib_u,
.mi_sib_s,
.mr_sib,
@@ -406,17 +230,15 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
.mri_sib,
.rrm_sib,
.rrmi_sib,
.lock_m_sib,
.lock_mi_sib_u,
.lock_mi_sib_s,
.lock_mr_sib,
.pseudo_cmov_nz_or_p_rm_sib,
.pseudo_set_z_and_np_m_sib,
.pseudo_set_nz_or_p_m_sib,
=> lower.mir.extraData(Mir.MemorySib, payload).data.decode(),
.rm_rip,
.rm_rip_cc,
.rmi_rip,
.m_rip,
.m_rip_cc,
.mi_rip_u,
.mi_rip_s,
.mr_rip,
@@ -424,66 +246,83 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
.mri_rip,
.rrm_rip,
.rrmi_rip,
.lock_m_rip,
.lock_mi_rip_u,
.lock_mi_rip_s,
.lock_mr_rip,
.pseudo_cmov_nz_or_p_rm_rip,
.pseudo_set_z_and_np_m_rip,
.pseudo_set_nz_or_p_m_rip,
=> lower.mir.extraData(Mir.MemoryRip, payload).data.decode(),
.rax_moffs,
.moffs_rax,
.lock_moffs_rax,
=> lower.mir.extraData(Mir.MemoryMoffs, payload).data.decode(),
else => unreachable,
});
}
fn emitInst(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
lower.result_insts[lower.result_insts_len] = try Instruction.new(prefix, mnemonic, ops);
lower.result_insts_len += 1;
}
fn emitInstWithReloc(
lower: *Lower,
prefix: Prefix,
mnemonic: Mnemonic,
ops: []const Operand,
target: Reloc.Target,
) Error!void {
fn reloc(lower: *Lower, target: Reloc.Target) Immediate {
lower.result_relocs[lower.result_relocs_len] = .{
.lowered_inst_index = lower.result_insts_len,
.target = target,
};
lower.result_relocs_len += 1;
try lower.emitInst(prefix, mnemonic, ops);
return Immediate.s(0);
}
fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void {
try lower.emitInst(switch (inst.ops) {
else => .none,
.lock_m_sib,
.lock_m_rip,
.lock_mi_sib_u,
.lock_mi_rip_u,
.lock_mi_sib_s,
.lock_mi_rip_s,
.lock_mr_sib,
.lock_mr_rip,
.lock_moffs_rax,
=> .lock,
}, switch (inst.tag) {
inline else => |tag| if (@hasField(Mnemonic, @tagName(tag)))
@field(Mnemonic, @tagName(tag))
fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
lower.result_insts[lower.result_insts_len] = try Instruction.new(prefix, mnemonic, ops);
lower.result_insts_len += 1;
}
fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
const fixes = switch (inst.ops) {
.none => inst.data.none.fixes,
.inst => inst.data.inst.fixes,
.i_s, .i_u => inst.data.i.fixes,
.r => inst.data.r.fixes,
.rr => inst.data.rr.fixes,
.rrr => inst.data.rrr.fixes,
.rrri => inst.data.rrri.fixes,
.rri_s, .rri_u => inst.data.rri.fixes,
.ri_s, .ri_u => inst.data.ri.fixes,
.ri64, .rm_sib, .rm_rip, .mr_sib, .mr_rip => inst.data.rx.fixes,
.mi_sib_u, .mi_rip_u, .mi_sib_s, .mi_rip_s => ._,
.mrr_sib, .mrr_rip, .rrm_sib, .rrm_rip => inst.data.rrx.fixes,
.rmi_sib, .rmi_rip, .mri_sib, .mri_rip => inst.data.rix.fixes,
.rrmi_sib, .rrmi_rip => inst.data.rrix.fixes,
.m_sib, .m_rip, .rax_moffs, .moffs_rax => inst.data.x.fixes,
.extern_fn_reloc, .got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ._,
else => return lower.fail("TODO lower .{s}", .{@tagName(inst.ops)}),
};
try lower.emit(switch (fixes) {
inline else => |tag| comptime if (std.mem.indexOfScalar(u8, @tagName(tag), ' ')) |space|
@field(Prefix, @tagName(tag)[0..space])
else
unreachable,
.none,
}, mnemonic: {
comptime var max_len = 0;
inline for (@typeInfo(Mnemonic).Enum.fields) |field| max_len = @max(field.name.len, max_len);
var buf: [max_len]u8 = undefined;
const fixes_name = @tagName(fixes);
const pattern = fixes_name[if (std.mem.indexOfScalar(u8, fixes_name, ' ')) |i| i + 1 else 0..];
const wildcard_i = std.mem.indexOfScalar(u8, pattern, '_').?;
const parts = .{ pattern[0..wildcard_i], @tagName(inst.tag), pattern[wildcard_i + 1 ..] };
const err_msg = "unsupported mnemonic: ";
const mnemonic = std.fmt.bufPrint(&buf, "{s}{s}{s}", parts) catch
return lower.fail(err_msg ++ "'{s}{s}{s}'", parts);
break :mnemonic std.meta.stringToEnum(Mnemonic, mnemonic) orelse
return lower.fail(err_msg ++ "'{s}'", .{mnemonic});
}, switch (inst.ops) {
.none => &.{},
.inst => &.{
.{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) },
},
.i_s, .i_u => &.{
.{ .imm = lower.imm(inst.ops, inst.data.i) },
.{ .imm = lower.imm(inst.ops, inst.data.i.i) },
},
.r => &.{
.{ .reg = inst.data.r },
.{ .reg = inst.data.r.r1 },
},
.rr => &.{
.{ .reg = inst.data.rr.r1 },
@@ -501,11 +340,11 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void {
.{ .imm = lower.imm(inst.ops, inst.data.rrri.i) },
},
.ri_s, .ri_u => &.{
.{ .reg = inst.data.ri.r },
.{ .reg = inst.data.ri.r1 },
.{ .imm = lower.imm(inst.ops, inst.data.ri.i) },
},
.ri64 => &.{
.{ .reg = inst.data.rx.r },
.{ .reg = inst.data.rx.r1 },
.{ .imm = lower.imm(inst.ops, inst.data.rx.payload) },
},
.rri_s, .rri_u => &.{
@@ -513,33 +352,25 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void {
.{ .reg = inst.data.rri.r2 },
.{ .imm = lower.imm(inst.ops, inst.data.rri.i) },
},
.m_sib, .lock_m_sib, .m_rip, .lock_m_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.payload) },
.m_sib, .m_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.x.payload) },
},
.mi_sib_s,
.lock_mi_sib_s,
.mi_sib_u,
.lock_mi_sib_u,
.mi_rip_u,
.lock_mi_rip_u,
.mi_rip_s,
.lock_mi_rip_s,
=> &.{
.mi_sib_s, .mi_sib_u, .mi_rip_u, .mi_rip_s => &.{
.{ .mem = lower.mem(inst.ops, inst.data.ix.payload) },
.{ .imm = lower.imm(inst.ops, inst.data.ix.i) },
},
.rm_sib, .rm_rip => &.{
.{ .reg = inst.data.rx.r },
.{ .reg = inst.data.rx.r1 },
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
},
.rmi_sib, .rmi_rip => &.{
.{ .reg = inst.data.rix.r },
.{ .reg = inst.data.rix.r1 },
.{ .mem = lower.mem(inst.ops, inst.data.rix.payload) },
.{ .imm = lower.imm(inst.ops, inst.data.rix.i) },
},
.mr_sib, .lock_mr_sib, .mr_rip, .lock_mr_rip => &.{
.mr_sib, .mr_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
.{ .reg = inst.data.rx.r },
.{ .reg = inst.data.rx.r1 },
},
.mrr_sib, .mrr_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.rrx.payload) },
@@ -548,7 +379,7 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void {
},
.mri_sib, .mri_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.rix.payload) },
.{ .reg = inst.data.rix.r },
.{ .reg = inst.data.rix.r1 },
.{ .imm = lower.imm(inst.ops, inst.data.rix.i) },
},
.rrm_sib, .rrm_rip => &.{
@@ -562,180 +393,46 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void {
.{ .mem = lower.mem(inst.ops, inst.data.rrix.payload) },
.{ .imm = lower.imm(inst.ops, inst.data.rrix.i) },
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
});
}
fn mirString(lower: *Lower, inst: Mir.Inst) Error!void {
switch (inst.ops) {
.string => try lower.emitInst(switch (inst.data.string.repeat) {
inline else => |repeat| @field(Prefix, @tagName(repeat)),
}, switch (inst.tag) {
inline .cmps, .lods, .movs, .scas, .stos => |tag| switch (inst.data.string.width) {
inline else => |width| @field(Mnemonic, @tagName(tag) ++ @tagName(width)),
},
else => unreachable,
}, &.{}),
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
}
}
fn mirCmpxchgBytes(lower: *Lower, inst: Mir.Inst) Error!void {
const ops: [1]Operand = switch (inst.ops) {
.m_sib, .lock_m_sib, .m_rip, .lock_m_rip => .{
.{ .mem = lower.mem(inst.ops, inst.data.payload) },
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
try lower.emitInst(switch (inst.ops) {
.m_sib, .m_rip => .none,
.lock_m_sib, .lock_m_rip => .lock,
else => unreachable,
}, switch (@divExact(ops[0].bitSize(), 8)) {
8 => .cmpxchg8b,
16 => .cmpxchg16b,
else => return lower.fail("invalid operand for {s}", .{@tagName(inst.tag)}),
}, &ops);
}
fn mirMovMoffs(lower: *Lower, inst: Mir.Inst) Error!void {
try lower.emitInst(switch (inst.ops) {
.rax_moffs, .moffs_rax => .none,
.lock_moffs_rax => .lock,
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
}, .mov, switch (inst.ops) {
.rax_moffs => &.{
.{ .reg = .rax },
.{ .mem = lower.mem(inst.ops, inst.data.payload) },
.{ .mem = lower.mem(inst.ops, inst.data.x.payload) },
},
.moffs_rax, .lock_moffs_rax => &.{
.{ .mem = lower.mem(inst.ops, inst.data.payload) },
.moffs_rax => &.{
.{ .mem = lower.mem(inst.ops, inst.data.x.payload) },
.{ .reg = .rax },
},
else => unreachable,
.extern_fn_reloc => &.{
.{ .imm = lower.reloc(.{ .linker_extern_fn = inst.data.reloc }) },
},
.got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ops: {
const reg = inst.data.rx.r1;
const extra = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data;
_ = lower.reloc(switch (inst.ops) {
.got_reloc => .{ .linker_got = extra },
.direct_reloc => .{ .linker_direct = extra },
.import_reloc => .{ .linker_import = extra },
.tlv_reloc => .{ .linker_tlv = extra },
else => unreachable,
});
break :ops &.{
.{ .reg = reg },
.{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) },
};
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
});
}
fn mirMovsx(lower: *Lower, inst: Mir.Inst) Error!void {
const ops: [2]Operand = switch (inst.ops) {
.rr => .{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
},
.rm_sib, .rm_rip => .{
.{ .reg = inst.data.rx.r },
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
try lower.emitInst(.none, switch (ops[0].bitSize()) {
32, 64 => switch (ops[1].bitSize()) {
32 => .movsxd,
else => .movsx,
},
else => .movsx,
}, &ops);
}
fn mirCmovcc(lower: *Lower, inst: Mir.Inst) Error!void {
const data: struct { cc: bits.Condition, ops: [2]Operand } = switch (inst.ops) {
.rr_cc => .{ .cc = inst.data.rr_cc.cc, .ops = .{
.{ .reg = inst.data.rr_cc.r1 },
.{ .reg = inst.data.rr_cc.r2 },
} },
.rm_sib_cc, .rm_rip_cc => .{ .cc = inst.data.rx_cc.cc, .ops = .{
.{ .reg = inst.data.rx_cc.r },
.{ .mem = lower.mem(inst.ops, inst.data.rx_cc.payload) },
} },
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
switch (data.cc) {
else => |cc| try lower.emitInst(.none, mnem_cc(.cmov, cc), &data.ops),
.z_and_np => {
try lower.emitInst(.none, mnem_cc(.cmov, .nz), &.{ data.ops[1], data.ops[0] });
try lower.emitInst(.none, mnem_cc(.cmov, .np), &data.ops);
},
.nz_or_p => {
try lower.emitInst(.none, mnem_cc(.cmov, .nz), &data.ops);
try lower.emitInst(.none, mnem_cc(.cmov, .p), &data.ops);
},
}
}
fn mirSetcc(lower: *Lower, inst: Mir.Inst) Error!void {
const data: struct { cc: bits.Condition, ops: [2]Operand } = switch (inst.ops) {
.r_cc => .{ .cc = inst.data.r_cc.cc, .ops = .{
.{ .reg = inst.data.r_cc.r },
.{ .reg = inst.data.r_cc.scratch },
} },
.m_sib_cc, .m_rip_cc => .{ .cc = inst.data.x_cc.cc, .ops = .{
.{ .mem = lower.mem(inst.ops, inst.data.x_cc.payload) },
.{ .reg = inst.data.x_cc.scratch },
} },
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
switch (data.cc) {
else => |cc| try lower.emitInst(.none, mnem_cc(.set, cc), data.ops[0..1]),
.z_and_np => {
try lower.emitInst(.none, mnem_cc(.set, .z), data.ops[0..1]);
try lower.emitInst(.none, mnem_cc(.set, .np), data.ops[1..2]);
try lower.emitInst(.none, .@"and", data.ops[0..2]);
},
.nz_or_p => {
try lower.emitInst(.none, mnem_cc(.set, .nz), data.ops[0..1]);
try lower.emitInst(.none, mnem_cc(.set, .p), data.ops[1..2]);
try lower.emitInst(.none, .@"or", data.ops[0..2]);
},
}
}
fn mirJcc(lower: *Lower, index: Mir.Inst.Index, inst: Mir.Inst) Error!void {
switch (inst.data.inst_cc.cc) {
else => |cc| try lower.emitInstWithReloc(.none, mnem_cc(.j, cc), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst_cc.inst }),
.z_and_np => {
try lower.emitInstWithReloc(.none, mnem_cc(.j, .nz), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = index + 1 });
try lower.emitInstWithReloc(.none, mnem_cc(.j, .np), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst_cc.inst });
},
.nz_or_p => {
try lower.emitInstWithReloc(.none, mnem_cc(.j, .nz), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst_cc.inst });
try lower.emitInstWithReloc(.none, mnem_cc(.j, .p), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst_cc.inst });
},
}
}
fn mirRegisterList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void {
const reg_list = Mir.RegisterList.fromInt(inst.data.payload);
fn pushPopRegList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void {
const callee_preserved_regs = abi.getCalleePreservedRegs(lower.target.*);
var it = reg_list.iterator(.{ .direction = switch (mnemonic) {
var it = inst.data.reg_list.iterator(.{ .direction = switch (mnemonic) {
.push => .reverse,
.pop => .forward,
else => unreachable,
} });
while (it.next()) |i| try lower.emitInst(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }});
}
fn mirLinker(lower: *Lower, mnemonic: Mnemonic, inst: Mir.Inst) Error!void {
const reloc = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data;
try lower.emitInstWithReloc(.none, mnemonic, &.{
.{ .reg = inst.data.rx.r },
.{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(inst.data.rx.r.bitSize()), 0) },
}, switch (inst.ops) {
.got_reloc => .{ .linker_got = reloc },
.direct_reloc => .{ .linker_direct = reloc },
.import_reloc => .{ .linker_import = reloc },
.tlv_reloc => .{ .linker_tlv = reloc },
else => unreachable,
});
while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{
.reg = callee_preserved_regs[i],
}});
}
const abi = @import("abi.zig");

View File

@@ -32,6 +32,210 @@ pub const Inst = struct {
pub const Index = u32;
pub const Fixes = enum(u8) {
/// ___
@"_",
/// ___ Above
_a,
/// ___ Above Or Equal
_ae,
/// ___ Below
_b,
/// ___ Below Or Equal
_be,
/// ___ Carry
_c,
/// ___ Equal
_e,
/// ___ Greater
_g,
/// ___ Greater Or Equal
_ge,
/// ___ Less
_l,
/// ___ Less Or Equal
_le,
/// ___ Not Above
_na,
/// ___ Not Above Or Equal
_nae,
/// ___ Not Below
_nb,
/// ___ Not Below Or Equal
_nbe,
/// ___ Not Carry
_nc,
/// ___ Not Equal
_ne,
/// ___ Not Greater
_ng,
/// ___ Not Greater Or Equal
_nge,
/// ___ Not Less
_nl,
/// ___ Not Less Or Equal
_nle,
/// ___ Not Overflow
_no,
/// ___ Not Parity
_np,
/// ___ Not Sign
_ns,
/// ___ Not Zero
_nz,
/// ___ Overflow
_o,
/// ___ Parity
_p,
/// ___ Parity Even
_pe,
/// ___ Parity Odd
_po,
/// ___ Sign
_s,
/// ___ Zero
_z,
/// ___ String
//_s,
/// ___ String Byte
_sb,
/// ___ String Word
_sw,
/// ___ String Doubleword
_sd,
/// ___ String Quadword
_sq,
/// Repeat ___ String
@"rep _s",
/// Repeat ___ String Byte
@"rep _sb",
/// Repeat ___ String Word
@"rep _sw",
/// Repeat ___ String Doubleword
@"rep _sd",
/// Repeat ___ String Quadword
@"rep _sq",
/// Repeat Equal ___ String
@"repe _s",
/// Repeat Equal ___ String Byte
@"repe _sb",
/// Repeat Equal ___ String Word
@"repe _sw",
/// Repeat Equal ___ String Doubleword
@"repe _sd",
/// Repeat Equal ___ String Quadword
@"repe _sq",
/// Repeat Not Equal ___ String
@"repne _s",
/// Repeat Not Equal ___ String Byte
@"repne _sb",
/// Repeat Not Equal ___ String Word
@"repne _sw",
/// Repeat Not Equal ___ String Doubleword
@"repne _sd",
/// Repeat Not Equal ___ String Quadword
@"repne _sq",
/// Repeat Not Zero ___ String
@"repnz _s",
/// Repeat Not Zero ___ String Byte
@"repnz _sb",
/// Repeat Not Zero ___ String Word
@"repnz _sw",
/// Repeat Not Zero ___ String Doubleword
@"repnz _sd",
/// Repeat Not Zero ___ String Quadword
@"repnz _sq",
/// Repeat Zero ___ String
@"repz _s",
/// Repeat Zero ___ String Byte
@"repz _sb",
/// Repeat Zero ___ String Word
@"repz _sw",
/// Repeat Zero ___ String Doubleword
@"repz _sd",
/// Repeat Zero ___ String Quadword
@"repz _sq",
/// Locked ___
@"lock _",
/// ___ 8 Bytes
_8b,
/// Locked ___ 8 Bytes
@"lock _8b",
/// ___ 16 Bytes
_16b,
/// Locked ___ 16 Bytes
@"lock _16b",
/// Packed ___
p_,
/// Packed ___ Byte
p_b,
/// Packed ___ Word
p_w,
/// Packed ___ Doubleword
p_d,
/// Packed ___ Quadword
p_q,
/// Packed ___ Double Quadword
p_dq,
/// ___ Scalar Single-Precision Values
_ss,
/// ___ Packed Single-Precision Values
_ps,
/// ___ Scalar Double-Precision Values
//_sd,
/// ___ Packed Double-Precision Values
_pd,
/// VEX-Encoded ___
v_,
/// VEX-Encoded Packed ___
vp_,
/// VEX-Encoded Packed ___ Byte
vp_b,
/// VEX-Encoded Packed ___ Word
vp_w,
/// VEX-Encoded Packed ___ Doubleword
vp_d,
/// VEX-Encoded Packed ___ Quadword
vp_q,
/// VEX-Encoded Packed ___ Double Quadword
vp_dq,
/// VEX-Encoded ___ Scalar Single-Precision Values
v_ss,
/// VEX-Encoded ___ Packed Single-Precision Values
v_ps,
/// VEX-Encoded ___ Scalar Double-Precision Values
v_sd,
/// VEX-Encoded ___ Packed Double-Precision Values
v_pd,
/// Mask ___ Byte
k_b,
/// Mask ___ Word
k_w,
/// Mask ___ Doubleword
k_d,
/// Mask ___ Quadword
k_q,
pub fn fromCondition(cc: bits.Condition) Fixes {
return switch (cc) {
inline else => |cc_tag| @field(Fixes, "_" ++ @tagName(cc_tag)),
.z_and_np, .nz_or_p => unreachable,
};
}
};
pub const Tag = enum(u8) {
/// Add with carry
adc,
@@ -57,22 +261,24 @@ pub const Inst = struct {
call,
/// Convert byte to word
cbw,
/// Convert word to doubleword
cwde,
/// Convert doubleword to quadword
cdqe,
/// Convert word to doubleword
cwd,
/// Convert doubleword to quadword
cdq,
/// Convert doubleword to quadword
cqo,
cdqe,
/// Conditional move
cmov,
/// Logical compare
/// Compare string
cmp,
/// Compare and exchange
cmpxchg,
/// Compare and exchange bytes
cmpxchgb,
cmpxchg,
/// Convert doubleword to quadword
cqo,
/// Convert word to doubleword
cwd,
/// Convert word to doubleword
cwde,
/// Unsigned division
div,
/// Store integer with truncation
@@ -85,10 +291,14 @@ pub const Inst = struct {
imul,
///
int3,
/// Conditional jump
j,
/// Jump
jmp,
/// Load effective address
lea,
/// Load string
lod,
/// Load fence
lfence,
/// Count the number of leading zero bits
@@ -96,6 +306,7 @@ pub const Inst = struct {
/// Memory fence
mfence,
/// Move
/// Move data from string to string
mov,
/// Move data after swapping bytes
movbe,
@@ -105,6 +316,8 @@ pub const Inst = struct {
movq,
/// Move with sign extension
movsx,
/// Move with sign extension
movsxd,
/// Move with zero extension
movzx,
/// Multiply
@@ -139,6 +352,10 @@ pub const Inst = struct {
sar,
/// Integer subtraction with borrow
sbb,
/// Scan string
sca,
/// Set byte on condition
set,
/// Store fence
sfence,
/// Logical shift left
@@ -151,6 +368,8 @@ pub const Inst = struct {
shrd,
/// Subtract
sub,
/// Store string
sto,
/// Syscall
syscall,
/// Test condition
@@ -505,57 +724,10 @@ pub const Inst = struct {
/// Fused multiply-add of scalar single-precision floating-point values
vfmadd231ss,
/// Compare string operands
cmps,
/// Load string
lods,
/// Move data from string to string
movs,
/// Scan string
scas,
/// Store string
stos,
/// Conditional move
cmovcc,
/// Conditional jump
jcc,
/// Set byte on condition
setcc,
/// Mov absolute to/from memory wrt segment register to/from rax
mov_moffs,
/// Jump with relocation to another local MIR instruction
/// Uses `inst` payload.
jmp_reloc,
/// Call to an extern symbol via linker relocation.
/// Uses `relocation` payload.
call_extern,
/// Load effective address of a symbol not yet allocated in VM.
lea_linker,
/// Move address of a symbol not yet allocated in VM.
mov_linker,
/// End of prologue
dbg_prologue_end,
/// Start of epilogue
dbg_epilogue_begin,
/// Update debug line
/// Uses `line_column` payload containing the line and column.
dbg_line,
/// Push registers
/// Uses `payload` payload containing `RegisterList.asInt` directly.
push_regs,
/// Pop registers
/// Uses `payload` payload containing `RegisterList.asInt` directly.
pop_regs,
/// Tombstone
/// Emitter should skip this instruction.
dead,
/// A pseudo instruction that requires special lowering.
/// This should be the only tag in this enum that doesn't
/// directly correspond to one or more instruction mnemonics.
pseudo,
};
pub const Ops = enum(u8) {
@@ -579,12 +751,6 @@ pub const Inst = struct {
/// Register, register, immediate (unsigned) operands.
/// Uses `rri` payload.
rri_u,
/// Register with condition code (CC).
/// Uses `r_cc` payload.
r_cc,
/// Register, register with condition code (CC).
/// Uses `rr_cc` payload.
rr_cc,
/// Register, immediate (sign-extended) operands.
/// Uses `ri` payload.
ri_s,
@@ -609,12 +775,6 @@ pub const Inst = struct {
/// Register, memory (RIP) operands.
/// Uses `rx` payload.
rm_rip,
/// Register, memory (SIB) operands with condition code (CC).
/// Uses `rx_cc` payload.
rm_sib_cc,
/// Register, memory (RIP) operands with condition code (CC).
/// Uses `rx_cc` payload.
rm_rip_cc,
/// Register, memory (SIB), immediate (byte) operands.
/// Uses `rix` payload with extra data of type `MemorySib`.
rmi_sib,
@@ -634,17 +794,11 @@ pub const Inst = struct {
/// Uses `rix` payload with extra data of type `MemoryRip`.
rmi_rip,
/// Single memory (SIB) operand.
/// Uses `payload` with extra data of type `MemorySib`.
/// Uses `x` with extra data of type `MemorySib`.
m_sib,
/// Single memory (RIP) operand.
/// Uses `payload` with extra data of type `MemoryRip`.
/// Uses `x` with extra data of type `MemoryRip`.
m_rip,
/// Single memory (SIB) operand with condition code (CC).
/// Uses `x_cc` with extra data of type `MemorySib`.
m_sib_cc,
/// Single memory (RIP) operand with condition code (CC).
/// Uses `x_cc` with extra data of type `MemoryRip`.
m_rip_cc,
/// Memory (SIB), immediate (unsigned) operands.
/// Uses `ix` payload with extra data of type `MemorySib`.
mi_sib_u,
@@ -676,49 +830,17 @@ pub const Inst = struct {
/// Uses `rix` payload with extra data of type `MemoryRip`.
mri_rip,
/// Rax, Memory moffs.
/// Uses `payload` with extra data of type `MemoryMoffs`.
/// Uses `x` with extra data of type `MemoryMoffs`.
rax_moffs,
/// Memory moffs, rax.
/// Uses `payload` with extra data of type `MemoryMoffs`.
/// Uses `x` with extra data of type `MemoryMoffs`.
moffs_rax,
/// Single memory (SIB) operand with lock prefix.
/// Uses `payload` with extra data of type `MemorySib`.
lock_m_sib,
/// Single memory (RIP) operand with lock prefix.
/// Uses `payload` with extra data of type `MemoryRip`.
lock_m_rip,
/// Memory (SIB), immediate (unsigned) operands with lock prefix.
/// Uses `xi` payload with extra data of type `MemorySib`.
lock_mi_sib_u,
/// Memory (RIP), immediate (unsigned) operands with lock prefix.
/// Uses `xi` payload with extra data of type `MemoryRip`.
lock_mi_rip_u,
/// Memory (SIB), immediate (sign-extend) operands with lock prefix.
/// Uses `xi` payload with extra data of type `MemorySib`.
lock_mi_sib_s,
/// Memory (RIP), immediate (sign-extend) operands with lock prefix.
/// Uses `xi` payload with extra data of type `MemoryRip`.
lock_mi_rip_s,
/// Memory (SIB), register operands with lock prefix.
/// Uses `rx` payload with extra data of type `MemorySib`.
lock_mr_sib,
/// Memory (RIP), register operands with lock prefix.
/// Uses `rx` payload with extra data of type `MemoryRip`.
lock_mr_rip,
/// Memory moffs, rax with lock prefix.
/// Uses `payload` with extra data of type `MemoryMoffs`.
lock_moffs_rax,
/// References another Mir instruction directly.
/// Uses `inst` payload.
inst,
/// References another Mir instruction directly with condition code (CC).
/// Uses `inst_cc` payload.
inst_cc,
/// String repeat and width
/// Uses `string` payload.
string,
/// Linker relocation - external function.
/// Uses `reloc` payload.
reloc,
extern_fn_reloc,
/// Linker relocation - GOT indirection.
/// Uses `rx` payload with extra data of type `Reloc`.
got_reloc,
@@ -731,74 +853,125 @@ pub const Inst = struct {
/// Linker relocation - threadlocal variable via GOT indirection.
/// Uses `rx` payload with extra data of type `Reloc`.
tlv_reloc,
// Pseudo instructions:
/// Conditional move if zero flag set and parity flag not set
/// Clobbers the source operand!
/// Uses `rr` payload.
pseudo_cmov_z_and_np_rr,
/// Conditional move if zero flag not set or parity flag set
/// Uses `rr` payload.
pseudo_cmov_nz_or_p_rr,
/// Conditional move if zero flag not set or parity flag set
/// Uses `rx` payload.
pseudo_cmov_nz_or_p_rm_sib,
/// Conditional move if zero flag not set or parity flag set
/// Uses `rx` payload.
pseudo_cmov_nz_or_p_rm_rip,
/// Set byte if zero flag set and parity flag not set
/// Requires a scratch register!
/// Uses `r_scratch` payload.
pseudo_set_z_and_np_r,
/// Set byte if zero flag set and parity flag not set
/// Requires a scratch register!
/// Uses `x_scratch` payload.
pseudo_set_z_and_np_m_sib,
/// Set byte if zero flag set and parity flag not set
/// Requires a scratch register!
/// Uses `x_scratch` payload.
pseudo_set_z_and_np_m_rip,
/// Set byte if zero flag not set or parity flag set
/// Requires a scratch register!
/// Uses `r_scratch` payload.
pseudo_set_nz_or_p_r,
/// Set byte if zero flag not set or parity flag set
/// Requires a scratch register!
/// Uses `x_scratch` payload.
pseudo_set_nz_or_p_m_sib,
/// Set byte if zero flag not set or parity flag set
/// Requires a scratch register!
/// Uses `x_scratch` payload.
pseudo_set_nz_or_p_m_rip,
/// Jump if zero flag set and parity flag not set
/// Uses `inst` payload.
pseudo_j_z_and_np_inst,
/// Jump if zero flag not set or parity flag set
/// Uses `inst` payload.
pseudo_j_nz_or_p_inst,
/// Push registers
/// Uses `reg_list` payload.
pseudo_push_reg_list,
/// Pop registers
/// Uses `reg_list` payload.
pseudo_pop_reg_list,
/// End of prologue
pseudo_dbg_prologue_end_none,
/// Update debug line
/// Uses `line_column` payload.
pseudo_dbg_line_line_column,
/// Start of epilogue
pseudo_dbg_epilogue_begin_none,
/// Tombstone
/// Emitter should skip this instruction.
pseudo_dead_none,
};
pub const Data = union {
none: struct {
fixes: Fixes = ._,
},
/// References another Mir instruction.
inst: Index,
/// Another instruction with condition code (CC).
/// Used by `jcc`.
inst_cc: struct {
/// Another instruction.
inst: struct {
fixes: Fixes = ._,
inst: Index,
/// A condition code for use with EFLAGS register.
cc: bits.Condition,
},
/// A 32-bit immediate value.
i: u32,
r: Register,
i: struct {
fixes: Fixes = ._,
i: u32,
},
r: struct {
fixes: Fixes = ._,
r1: Register,
},
rr: struct {
fixes: Fixes = ._,
r1: Register,
r2: Register,
},
rrr: struct {
fixes: Fixes = ._,
r1: Register,
r2: Register,
r3: Register,
},
rrri: struct {
fixes: Fixes = ._,
r1: Register,
r2: Register,
r3: Register,
i: u8,
},
rri: struct {
fixes: Fixes = ._,
r1: Register,
r2: Register,
i: u32,
},
/// Condition code (CC), followed by custom payload found in extra.
x_cc: struct {
scratch: Register,
cc: bits.Condition,
payload: u32,
},
/// Register with condition code (CC).
r_cc: struct {
r: Register,
scratch: Register,
cc: bits.Condition,
},
/// Register, register with condition code (CC).
rr_cc: struct {
r1: Register,
r2: Register,
cc: bits.Condition,
},
/// Register, immediate.
ri: struct {
r: Register,
fixes: Fixes = ._,
r1: Register,
i: u32,
},
/// Register, followed by custom payload found in extra.
rx: struct {
r: Register,
payload: u32,
},
/// Register with condition code (CC), followed by custom payload found in extra.
rx_cc: struct {
r: Register,
cc: bits.Condition,
fixes: Fixes = ._,
r1: Register,
payload: u32,
},
/// Immediate, followed by Custom payload found in extra.
@@ -808,39 +981,54 @@ pub const Inst = struct {
},
/// Register, register, followed by Custom payload found in extra.
rrx: struct {
fixes: Fixes = ._,
r1: Register,
r2: Register,
payload: u32,
},
/// Register, byte immediate, followed by Custom payload found in extra.
rix: struct {
r: Register,
fixes: Fixes = ._,
r1: Register,
i: u8,
payload: u32,
},
/// Register, register, byte immediate, followed by Custom payload found in extra.
rrix: struct {
fixes: Fixes = ._,
r1: Register,
r2: Register,
i: u8,
payload: u32,
},
/// String instruction prefix and width.
string: struct {
repeat: bits.StringRepeat,
width: bits.StringWidth,
/// Register, scratch register
r_scratch: struct {
fixes: Fixes = ._,
r1: Register,
scratch_reg: Register,
},
/// Scratch register, followed by Custom payload found in extra.
x_scratch: struct {
fixes: Fixes = ._,
scratch_reg: Register,
payload: u32,
},
/// Custom payload found in extra.
x: struct {
fixes: Fixes = ._,
payload: u32,
},
/// Relocation for the linker where:
/// * `atom_index` is the index of the source
/// * `sym_index` is the index of the target
relocation: Reloc,
reloc: Reloc,
/// Debug line and column position
line_column: struct {
line: u32,
column: u32,
},
/// Index into `extra`. Meaning of what can be found there is context-dependent.
payload: u32,
/// Register list
reg_list: RegisterList,
};
// Make sure we don't accidentally make instructions bigger than expected.
@@ -852,6 +1040,7 @@ pub const Inst = struct {
}
};
/// A linker symbol not yet allocated in VM.
pub const Reloc = struct {
/// Index of the containing atom.
atom_index: u32,
@@ -887,16 +1076,6 @@ pub const RegisterList = struct {
return self.bitset.iterator(options);
}
pub fn asInt(self: Self) u32 {
return self.bitset.mask;
}
pub fn fromInt(mask: u32) Self {
return .{
.bitset = BitSet{ .mask = @intCast(BitSet.MaskInt, mask) },
};
}
pub fn count(self: Self) u32 {
return @intCast(u32, self.bitset.count());
}

View File

@@ -6,9 +6,6 @@ const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const DW = std.dwarf;
pub const StringRepeat = enum(u3) { none, rep, repe, repz, repne, repnz };
pub const StringWidth = enum(u2) { b, w, d, q };
/// EFLAGS condition codes
pub const Condition = enum(u5) {
/// above