Merge remote-tracking branch 'origin/master' into llvm13

Conflicts:

 * cmake/Findclang.cmake
 * cmake/Findlld.cmake
 * cmake/Findllvm.cmake

In master branch, more search paths were added to these files with "12"
in the path. In this commit I updated them to "13".

 * src/stage1/codegen.cpp
 * src/zig_llvm.cpp
 * src/zig_llvm.h

In master branch, ZigLLVMBuildCmpXchg is improved to add
`is_single_threaded`. However, the LLVM 13 C API has this already, and
in the llvm13 branch, ZigLLVMBuildCmpXchg is deleted in favor of the C
API. In this commit I updated stage2 to use the LLVM 13 C API rather
than depending on an improved ZigLLVMBuildCmpXchg.

Additionally, src/target.zig largestAtomicBits needed to be updated to
include the new m68k ISA.
This commit is contained in:
Andrew Kelley
2021-09-15 14:46:31 -07:00
282 changed files with 34565 additions and 32482 deletions

View File

@@ -52,7 +52,7 @@ pub const Register = enum(u6) {
}
pub fn dwarfLocOp(self: Register) u8 {
return @as(u8, self.id()) + DW.OP_reg0;
return @as(u8, self.id()) + DW.OP.reg0;
}
};

View File

@@ -170,7 +170,7 @@ pub const Register = enum(u5) {
}
pub fn dwarfLocOp(self: Register) u8 {
return @as(u8, self.id()) + DW.OP_reg0;
return @as(u8, self.id()) + DW.OP.reg0;
}
};

View File

@@ -858,6 +858,7 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
.div => try airBinOp( o, inst, " / "),
.rem => try airBinOp( o, inst, " % "),
.cmp_eq => try airBinOp(o, inst, " == "),
.cmp_gt => try airBinOp(o, inst, " > "),
@@ -909,6 +910,9 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM
.switch_br => try airSwitchBr(o, inst),
.wrap_optional => try airWrapOptional(o, inst),
.struct_field_ptr => try airStructFieldPtr(o, inst),
.array_to_slice => try airArrayToSlice(o, inst),
.cmpxchg_weak => try airCmpxchg(o, inst, "weak"),
.cmpxchg_strong => try airCmpxchg(o, inst, "strong"),
.struct_field_ptr_index_0 => try airStructFieldPtrIndex(o, inst, 0),
.struct_field_ptr_index_1 => try airStructFieldPtrIndex(o, inst, 1),
@@ -1859,6 +1863,60 @@ fn airIsErr(
return local;
}
fn airArrayToSlice(o: *Object, inst: Air.Inst.Index) !CValue {
if (o.liveness.isUnused(inst))
return CValue.none;
const inst_ty = o.air.typeOfIndex(inst);
const local = try o.allocLocal(inst_ty, .Const);
const ty_op = o.air.instructions.items(.data)[inst].ty_op;
const writer = o.writer();
const operand = try o.resolveInst(ty_op.operand);
const array_len = o.air.typeOf(ty_op.operand).elemType().arrayLen();
try writer.writeAll(" = { .ptr = ");
try o.writeCValue(writer, operand);
try writer.print(", .len = {d} }};\n", .{array_len});
return local;
}
fn airCmpxchg(o: *Object, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue {
const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
const extra = o.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const inst_ty = o.air.typeOfIndex(inst);
const ptr = try o.resolveInst(extra.ptr);
const expected_value = try o.resolveInst(extra.expected_value);
const new_value = try o.resolveInst(extra.new_value);
const local = try o.allocLocal(inst_ty, .Const);
const writer = o.writer();
try writer.print(" = zig_cmpxchg_{s}(", .{flavor});
try o.writeCValue(writer, ptr);
try writer.writeAll(", ");
try o.writeCValue(writer, expected_value);
try writer.writeAll(", ");
try o.writeCValue(writer, new_value);
try writer.writeAll(", ");
try writeMemoryOrder(writer, extra.successOrder());
try writer.writeAll(", ");
try writeMemoryOrder(writer, extra.failureOrder());
try writer.writeAll(");\n");
return local;
}
fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void {
const str = switch (order) {
.Unordered => "memory_order_relaxed",
.Monotonic => "memory_order_consume",
.Acquire => "memory_order_acquire",
.Release => "memory_order_release",
.AcqRel => "memory_order_acq_rel",
.SeqCst => "memory_order_seq_cst",
};
return w.writeAll(str);
}
fn IndentWriter(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();

View File

@@ -392,6 +392,7 @@ pub const Object = struct {
.latest_alloca_inst = null,
.llvm_func = llvm_func,
.blocks = .{},
.single_threaded = module.comp.bin_file.options.single_threaded,
};
defer fg.deinit();
@@ -791,11 +792,13 @@ pub const DeclGen = struct {
const gpa = self.gpa;
const elem_ty = tv.ty.elemType();
const elem_vals = payload.data;
const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len);
const sento = tv.ty.sentinel();
const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len + @boolToInt(sento != null));
defer gpa.free(llvm_elems);
for (elem_vals) |elem_val, i| {
llvm_elems[i] = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_val });
}
if (sento) |sent| llvm_elems[elem_vals.len] = try self.genTypedValue(.{ .ty = elem_ty, .val = sent });
const llvm_elem_ty = try self.llvmType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@@ -907,6 +910,31 @@ pub const DeclGen = struct {
// TODO: improve this API, `addAttr(-1, attr_name)`
self.addAttr(val, std.math.maxInt(llvm.AttributeIndex), attr_name);
}
/// If the operand type of an atomic operation is not byte sized we need to
/// widen it before using it and then truncate the result.
/// RMW exchange of floating-point values is bitcasted to same-sized integer
/// types to work around a LLVM deficiency when targeting ARM/AArch64.
fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*const llvm.Type {
const target = dg.module.getTarget();
var buffer: Type.Payload.Bits = undefined;
const int_ty = switch (ty.zigTypeTag()) {
.Int => ty,
.Enum => ty.enumTagType(&buffer),
.Float => {
if (!is_rmw_xchg) return null;
return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8));
},
.Bool => return dg.context.intType(8),
else => return null,
};
const bit_count = int_ty.intInfo(target).bits;
if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) {
return dg.context.intType(@intCast(c_uint, int_ty.abiSize(target) * 8));
} else {
return null;
}
}
};
pub const FuncGen = struct {
@@ -941,6 +969,8 @@ pub const FuncGen = struct {
break_vals: *BreakValues,
}),
single_threaded: bool,
const BreakBasicBlocks = std.ArrayListUnmanaged(*const llvm.BasicBlock);
const BreakValues = std.ArrayListUnmanaged(*const llvm.Value);
@@ -980,6 +1010,7 @@ pub const FuncGen = struct {
.mul => try self.airMul(inst, false),
.mulwrap => try self.airMul(inst, true),
.div => try self.airDiv(inst),
.rem => try self.airRem(inst),
.ptr_add => try self.airPtrAdd(inst),
.ptr_sub => try self.airPtrSub(inst),
@@ -1006,28 +1037,31 @@ pub const FuncGen = struct {
.is_err => try self.airIsErr(inst, .NE, false),
.is_err_ptr => try self.airIsErr(inst, .NE, true),
.alloc => try self.airAlloc(inst),
.arg => try self.airArg(inst),
.bitcast => try self.airBitCast(inst),
.bool_to_int=> try self.airBoolToInt(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.switch_br => try self.airSwitchBr(inst),
.breakpoint => try self.airBreakpoint(inst),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
.floatcast => try self.airFloatCast(inst),
.ptrtoint => try self.airPtrToInt(inst),
.load => try self.airLoad(inst),
.loop => try self.airLoop(inst),
.not => try self.airNot(inst),
.ret => try self.airRet(inst),
.store => try self.airStore(inst),
.assembly => try self.airAssembly(inst),
.slice_ptr => try self.airSliceField(inst, 0),
.slice_len => try self.airSliceField(inst, 1),
.alloc => try self.airAlloc(inst),
.arg => try self.airArg(inst),
.bitcast => try self.airBitCast(inst),
.bool_to_int => try self.airBoolToInt(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.switch_br => try self.airSwitchBr(inst),
.breakpoint => try self.airBreakpoint(inst),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
.floatcast => try self.airFloatCast(inst),
.ptrtoint => try self.airPtrToInt(inst),
.load => try self.airLoad(inst),
.loop => try self.airLoop(inst),
.not => try self.airNot(inst),
.ret => try self.airRet(inst),
.store => try self.airStore(inst),
.assembly => try self.airAssembly(inst),
.slice_ptr => try self.airSliceField(inst, 0),
.slice_len => try self.airSliceField(inst, 1),
.array_to_slice => try self.airArrayToSlice(inst),
.cmpxchg_weak => try self.airCmpxchg(inst, true),
.cmpxchg_strong => try self.airCmpxchg(inst, false),
.struct_field_ptr => try self.airStructFieldPtr(inst),
.struct_field_val => try self.airStructFieldVal(inst),
@@ -1246,6 +1280,24 @@ pub const FuncGen = struct {
return null;
}
fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const array_len = self.air.typeOf(ty_op.operand).elemType().arrayLen();
const usize_llvm_ty = try self.dg.llvmType(Type.initTag(.usize));
const len = usize_llvm_ty.constInt(array_len, .False);
const slice_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
const indices: [2]*const llvm.Value = .{
usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
};
const ptr = self.builder.buildInBoundsGEP(operand, &indices, indices.len, "");
const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr, 0, "");
return self.builder.buildInsertValue(partial, len, 1, "");
}
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1723,6 +1775,19 @@ pub const FuncGen = struct {
return self.builder.buildUDiv(lhs, rhs, "");
}
fn airRem(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
if (inst_ty.isFloat()) return self.builder.buildFRem(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, "");
return self.builder.buildURem(lhs, rhs, "");
}
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1944,6 +2009,56 @@ pub const FuncGen = struct {
return null;
}
fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*const llvm.Value {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
var ptr = try self.resolveInst(extra.ptr);
var expected_value = try self.resolveInst(extra.expected_value);
var new_value = try self.resolveInst(extra.new_value);
const operand_ty = self.air.typeOf(extra.ptr).elemType();
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
if (opt_abi_ty) |abi_ty| {
// operand needs widening and truncating
ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), "");
if (operand_ty.isSignedInt()) {
expected_value = self.builder.buildSExt(expected_value, abi_ty, "");
new_value = self.builder.buildSExt(new_value, abi_ty, "");
} else {
expected_value = self.builder.buildZExt(expected_value, abi_ty, "");
new_value = self.builder.buildZExt(new_value, abi_ty, "");
}
}
const result = self.builder.buildAtomicCmpXchg(
ptr,
expected_value,
new_value,
toLlvmAtomicOrdering(extra.successOrder()),
toLlvmAtomicOrdering(extra.failureOrder()),
llvm.Bool.fromBool(self.single_threaded),
);
result.setWeak(llvm.Bool.fromBool(is_weak));
const optional_ty = self.air.typeOfIndex(inst);
var buffer: Type.Payload.ElemType = undefined;
const child_ty = optional_ty.optionalChild(&buffer);
var payload = self.builder.buildExtractValue(result, 0, "");
if (opt_abi_ty != null) {
payload = self.builder.buildTrunc(payload, try self.dg.llvmType(operand_ty), "");
}
const success_bit = self.builder.buildExtractValue(result, 1, "");
if (optional_ty.isPtrLikeOptional()) {
const child_llvm_ty = try self.dg.llvmType(child_ty);
return self.builder.buildSelect(success_bit, child_llvm_ty.constNull(), payload, "");
}
const optional_llvm_ty = try self.dg.llvmType(optional_ty);
const non_null_bit = self.builder.buildNot(success_bit, "");
const partial = self.builder.buildInsertValue(optional_llvm_ty.getUndef(), payload, 0, "");
return self.builder.buildInsertValue(partial, non_null_bit, 1, "");
}
fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
@@ -2135,3 +2250,14 @@ fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
.spirv64 => unreachable, // LLVM does not support this backend
}
}
fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) llvm.AtomicOrdering {
return switch (atomic_order) {
.Unordered => .Unordered,
.Monotonic => .Monotonic,
.Acquire => .Acquire,
.Release => .Release,
.AcqRel => .AcquireRelease,
.SeqCst => .SequentiallyConsistent,
};
}

View File

@@ -133,6 +133,9 @@ pub const Value = opaque {
pub const constIntToPtr = LLVMConstIntToPtr;
extern fn LLVMConstIntToPtr(ConstantVal: *const Value, ToType: *const Type) *const Value;
pub const setWeak = LLVMSetWeak;
extern fn LLVMSetWeak(CmpXchgInst: *const Value, IsWeak: Bool) void;
};
pub const Type = opaque {
@@ -299,6 +302,14 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *const Value;
pub const buildSExt = LLVMBuildSExt;
extern fn LLVMBuildSExt(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildCall = LLVMBuildCall;
extern fn LLVMBuildCall(
*const Builder,
@@ -387,6 +398,15 @@ pub const Builder = opaque {
pub const buildFDiv = LLVMBuildFDiv;
extern fn LLVMBuildFDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildURem = LLVMBuildURem;
extern fn LLVMBuildURem(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildSRem = LLVMBuildSRem;
extern fn LLVMBuildSRem(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildFRem = LLVMBuildFRem;
extern fn LLVMBuildFRem(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildAnd = LLVMBuildAnd;
extern fn LLVMBuildAnd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@@ -476,6 +496,35 @@ pub const Builder = opaque {
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildInsertValue = LLVMBuildInsertValue;
extern fn LLVMBuildInsertValue(
*const Builder,
AggVal: *const Value,
EltVal: *const Value,
Index: c_uint,
Name: [*:0]const u8,
) *const Value;
pub const buildAtomicCmpXchg = LLVMBuildAtomicCmpXchg;
extern fn LLVMBuildAtomicCmpXchg(
builder: *const Builder,
ptr: *const Value,
cmp: *const Value,
new_val: *const Value,
success_ordering: AtomicOrdering,
failure_ordering: AtomicOrdering,
is_single_threaded: Bool,
) *const Value;
pub const buildSelect = LLVMBuildSelect;
extern fn LLVMBuildSelect(
*const Builder,
If: *const Value,
Then: *const Value,
Else: *const Value,
Name: [*:0]const u8,
) *const Value;
};
pub const IntPredicate = enum(c_uint) {
@@ -856,3 +905,13 @@ pub const Linkage = enum(c_uint) {
LinkerPrivate,
LinkerPrivateWeak,
};
pub const AtomicOrdering = enum(c_uint) {
NotAtomic = 0,
Unordered = 1,
Monotonic = 2,
Acquire = 4,
Release = 5,
AcquireRelease = 6,
SequentiallyConsistent = 7,
};

View File

@@ -390,7 +390,7 @@ pub const RawRegister = enum(u5) {
x24, x25, x26, x27, x28, x29, x30, x31,
pub fn dwarfLocOp(reg: RawRegister) u8 {
return @enumToInt(reg) + DW.OP_reg0;
return @enumToInt(reg) + DW.OP.reg0;
}
};
@@ -424,7 +424,7 @@ pub const Register = enum(u5) {
}
pub fn dwarfLocOp(reg: Register) u8 {
return @as(u8, @enumToInt(reg)) + DW.OP_reg0;
return @as(u8, @enumToInt(reg)) + DW.OP.reg0;
}
};

View File

@@ -582,8 +582,8 @@ pub const Opcode = enum(u16) {
OpSpecConstantCompositeContinuedINTEL = 6092,
_,
const OpReportIntersectionKHR = OpReportIntersectionNV;
const OpTypeAccelerationStructureKHR = OpTypeAccelerationStructureNV;
const OpReportIntersectionKHR: Opcode = .OpReportIntersectionNV;
const OpTypeAccelerationStructureKHR: Opcode = .OpTypeAccelerationStructureNV;
};
pub const ImageOperands = packed struct {
Bias: bool align(@alignOf(u32)) = false,

View File

@@ -59,14 +59,14 @@ pub const Register = enum(u8) {
pub fn dwarfLocOp(reg: Register) u8 {
return switch (reg.to32()) {
.eax => DW.OP_reg0,
.ecx => DW.OP_reg1,
.edx => DW.OP_reg2,
.ebx => DW.OP_reg3,
.esp => DW.OP_reg4,
.ebp => DW.OP_reg5,
.esi => DW.OP_reg6,
.edi => DW.OP_reg7,
.eax => DW.OP.reg0,
.ecx => DW.OP.reg1,
.edx => DW.OP.reg2,
.ebx => DW.OP.reg3,
.esp => DW.OP.reg4,
.ebp => DW.OP.reg5,
.esi => DW.OP.reg6,
.edi => DW.OP.reg7,
else => unreachable,
};
}

View File

@@ -115,23 +115,23 @@ pub const Register = enum(u8) {
pub fn dwarfLocOp(self: Register) u8 {
return switch (self.to64()) {
.rax => DW.OP_reg0,
.rdx => DW.OP_reg1,
.rcx => DW.OP_reg2,
.rbx => DW.OP_reg3,
.rsi => DW.OP_reg4,
.rdi => DW.OP_reg5,
.rbp => DW.OP_reg6,
.rsp => DW.OP_reg7,
.rax => DW.OP.reg0,
.rdx => DW.OP.reg1,
.rcx => DW.OP.reg2,
.rbx => DW.OP.reg3,
.rsi => DW.OP.reg4,
.rdi => DW.OP.reg5,
.rbp => DW.OP.reg6,
.rsp => DW.OP.reg7,
.r8 => DW.OP_reg8,
.r9 => DW.OP_reg9,
.r10 => DW.OP_reg10,
.r11 => DW.OP_reg11,
.r12 => DW.OP_reg12,
.r13 => DW.OP_reg13,
.r14 => DW.OP_reg14,
.r15 => DW.OP_reg15,
.r8 => DW.OP.reg8,
.r9 => DW.OP.reg9,
.r10 => DW.OP.reg10,
.r11 => DW.OP.reg11,
.r12 => DW.OP.reg12,
.r13 => DW.OP.reg13,
.r14 => DW.OP.reg14,
.r15 => DW.OP.reg15,
else => unreachable,
};