stage2: implement comptime @atomicRmw

* introduce float_to_int and int_to_float AIR instructionts and
   implement for the LLVM backend and C backend.
 * Sema: implement `zirIntToFloat`.
 * Sema: implement `@atomicRmw` comptime evaluation
   - introduce `storePtrVal` for when one needs to store a Value to a
     pointer which is a Value, and assert it happens at comptime.
 * Value: introduce new functionality:
   - intToFloat
   - numberAddWrap
   - numberSubWrap
   - numberMax
   - numberMin
   - bitwiseAnd
   - bitwiseNand (not implemented yet)
   - bitwiseOr
   - bitwiseXor
 * Sema: hook up `zirBitwise` to the new Value bitwise implementations
 * Type: rename `isFloat` to `isRuntimeFloat` because it returns `false`
   for `comptime_float`.
This commit is contained in:
Andrew Kelley
2021-09-20 14:13:33 -07:00
parent 5dc251747b
commit b9d3527e0e
12 changed files with 573 additions and 128 deletions

View File

@@ -311,6 +311,12 @@ pub const Inst = struct {
/// Given a pointer to an array, return a slice.
/// Uses the `ty_op` field.
array_to_slice,
/// Given a float operand, return the integer with the closest mathematical meaning.
/// Uses the `ty_op` field.
float_to_int,
/// Given an integer operand, return the float with the closest mathematical meaning.
/// Uses the `ty_op` field.
int_to_float,
/// Uses the `ty_pl` field with payload `Cmpxchg`.
cmpxchg_weak,
@@ -598,6 +604,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.array_to_slice,
.float_to_int,
.int_to_float,
=> return air.getRefType(datas[inst].ty_op.ty),
.loop,

View File

@@ -293,6 +293,8 @@ fn analyzeInst(
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.array_to_slice,
.float_to_int,
.int_to_float,
=> {
const o = inst_datas[inst].ty_op;
return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none });

View File

@@ -4830,8 +4830,8 @@ fn analyzeSwitch(
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
const min_int = try operand_ty.minInt(&arena, mod.getTarget());
const max_int = try operand_ty.maxInt(&arena, mod.getTarget());
const min_int = try operand_ty.minInt(&arena.allocator, mod.getTarget());
const max_int = try operand_ty.maxInt(&arena.allocator, mod.getTarget());
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
return mod.fail(
@@ -5671,10 +5671,13 @@ fn zirBitwise(
if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(resolved_type);
}
return sema.mod.fail(&block.base, src, "TODO implement comptime bitwise operations", .{});
const result_val = switch (air_tag) {
.bit_and => try lhs_val.bitwiseAnd(rhs_val, sema.arena),
.bit_or => try lhs_val.bitwiseOr(rhs_val, sema.arena),
.xor => try lhs_val.bitwiseXor(rhs_val, sema.arena),
else => unreachable,
};
return sema.addConstant(scalar_type, result_val);
}
}
@@ -6028,8 +6031,8 @@ fn analyzeArithmetic(
}
if (zir_tag == .mod_rem) {
const dirty_lhs = lhs_ty.isSignedInt() or lhs_ty.isFloat();
const dirty_rhs = rhs_ty.isSignedInt() or rhs_ty.isFloat();
const dirty_lhs = lhs_ty.isSignedInt() or lhs_ty.isRuntimeFloat();
const dirty_rhs = rhs_ty.isSignedInt() or rhs_ty.isRuntimeFloat();
if (dirty_lhs or dirty_rhs) {
return sema.mod.fail(&block.base, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty });
}
@@ -7298,13 +7301,30 @@ fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE
fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
// TODO don't forget the safety check!
return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{});
}
fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{});
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const dest_ty = try sema.resolveType(block, ty_src, extra.lhs);
const operand = sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
try sema.checkIntType(block, ty_src, dest_ty);
try sema.checkFloatType(block, operand_src, operand_ty);
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
const target = sema.mod.getTarget();
const result_val = try val.intToFloat(sema.arena, dest_ty, target);
return sema.addConstant(dest_ty, result_val);
}
try sema.requireRuntimeBlock(block, operand_src);
return block.addTyOp(.int_to_float, dest_ty, operand);
}
fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -7542,6 +7562,34 @@ fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{});
}
fn checkIntType(
sema: *Sema,
block: *Scope.Block,
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
switch (ty.zigTypeTag()) {
.ComptimeInt, .Int => {},
else => return sema.mod.fail(&block.base, ty_src, "expected integer type, found '{}'", .{
ty,
}),
}
}
fn checkFloatType(
sema: *Sema,
block: *Scope.Block,
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
switch (ty.zigTypeTag()) {
.ComptimeFloat, .Float => {},
else => return sema.mod.fail(&block.base, ty_src, "expected float type, found '{}'", .{
ty,
}),
}
}
fn checkAtomicOperandType(
sema: *Sema,
block: *Scope.Block,
@@ -7815,9 +7863,23 @@ fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| {
_ = ptr_val;
_ = operand_val;
return mod.fail(&block.base, src, "TODO implement Sema for @atomicRmw at comptime", .{});
const target = sema.mod.getTarget();
const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
const new_val = switch (op) {
// zig fmt: off
.Xchg => operand_val,
.Add => try stored_val.numberAddWrap(operand_val, operand_ty, sema.arena, target),
.Sub => try stored_val.numberSubWrap(operand_val, operand_ty, sema.arena, target),
.And => try stored_val.bitwiseAnd (operand_val, sema.arena),
.Nand => try stored_val.bitwiseNand (operand_val, operand_ty, sema.arena),
.Or => try stored_val.bitwiseOr (operand_val, sema.arena),
.Xor => try stored_val.bitwiseXor (operand_val, sema.arena),
.Max => try stored_val.numberMax (operand_val, sema.arena),
.Min => try stored_val.numberMin (operand_val, sema.arena),
// zig fmt: on
};
try sema.storePtrVal(block, src, ptr_val, new_val, operand_ty);
return sema.addConstant(operand_ty, stored_val);
} else break :rs operand_src;
} else ptr_src;
@@ -9298,33 +9360,38 @@ fn coerceNum(
const target = sema.mod.getTarget();
if (dst_zig_tag == .ComptimeInt or dst_zig_tag == .Int) {
if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
if (val.floatHasFraction()) {
return sema.mod.fail(&block.base, inst_src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst_ty });
switch (dst_zig_tag) {
.ComptimeInt, .Int => {
if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
if (val.floatHasFraction()) {
return sema.mod.fail(&block.base, inst_src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst_ty });
}
return sema.mod.fail(&block.base, inst_src, "TODO float to int", .{});
} else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
if (!val.intFitsInType(dest_type, target)) {
return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ dest_type, val });
}
return try sema.addConstant(dest_type, val);
}
return sema.mod.fail(&block.base, inst_src, "TODO float to int", .{});
} else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
if (!val.intFitsInType(dest_type, target)) {
return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ dest_type, val });
},
.ComptimeFloat, .Float => {
if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
const res = val.floatCast(sema.arena, dest_type) catch |err| switch (err) {
error.Overflow => return sema.mod.fail(
&block.base,
inst_src,
"cast of value {} to type '{}' loses information",
.{ val, dest_type },
),
error.OutOfMemory => return error.OutOfMemory,
};
return try sema.addConstant(dest_type, res);
} else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
const result_val = try val.intToFloat(sema.arena, dest_type, target);
return try sema.addConstant(dest_type, result_val);
}
return try sema.addConstant(dest_type, val);
}
} else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) {
if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
const res = val.floatCast(sema.arena, dest_type) catch |err| switch (err) {
error.Overflow => return sema.mod.fail(
&block.base,
inst_src,
"cast of value {} to type '{}' loses information",
.{ val, dest_type },
),
error.OutOfMemory => return error.OutOfMemory,
};
return try sema.addConstant(dest_type, res);
} else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
return sema.mod.fail(&block.base, inst_src, "TODO int to float", .{});
}
},
else => {},
}
return null;
}
@@ -9375,42 +9442,10 @@ fn storePtr2(
return;
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
if (ptr_val.castTag(.decl_ref_mut)) |decl_ref_mut| {
const const_val = (try sema.resolveMaybeUndefVal(block, operand_src, operand)) orelse
return sema.mod.fail(&block.base, src, "cannot store runtime value in compile time variable", .{});
if (decl_ref_mut.data.runtime_index < block.runtime_index) {
if (block.runtime_cond) |cond_src| {
const msg = msg: {
const msg = try sema.mod.errMsg(&block.base, src, "store to comptime variable depends on runtime condition", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNote(&block.base, cond_src, msg, "runtime condition here", .{});
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
}
if (block.runtime_loop) |loop_src| {
const msg = msg: {
const msg = try sema.mod.errMsg(&block.base, src, "cannot store to comptime variable in non-inline loop", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNote(&block.base, loop_src, msg, "non-inline loop here", .{});
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
}
unreachable;
}
var new_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_arena.deinit();
const new_ty = try elem_ty.copy(&new_arena.allocator);
const new_val = try const_val.copy(&new_arena.allocator);
const decl = decl_ref_mut.data.decl;
var old_arena = decl.value_arena.?.promote(sema.gpa);
decl.value_arena = null;
try decl.finalizeNewArena(&new_arena);
decl.ty = new_ty;
decl.val = new_val;
old_arena.deinit();
const operand_val = (try sema.resolveMaybeUndefVal(block, operand_src, operand)) orelse
return sema.mod.fail(&block.base, src, "cannot store runtime value in compile time variable", .{});
if (ptr_val.tag() == .decl_ref_mut) {
try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty);
return;
}
break :rs operand_src;
@@ -9422,6 +9457,53 @@ fn storePtr2(
_ = try block.addBinOp(air_tag, ptr, operand);
}
/// Call when you have Value objects rather than Air instructions, and you want to
/// assert the store must be done at comptime.
fn storePtrVal(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
ptr_val: Value,
operand_val: Value,
operand_ty: Type,
) !void {
if (ptr_val.castTag(.decl_ref_mut)) |decl_ref_mut| {
if (decl_ref_mut.data.runtime_index < block.runtime_index) {
if (block.runtime_cond) |cond_src| {
const msg = msg: {
const msg = try sema.mod.errMsg(&block.base, src, "store to comptime variable depends on runtime condition", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNote(&block.base, cond_src, msg, "runtime condition here", .{});
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
}
if (block.runtime_loop) |loop_src| {
const msg = msg: {
const msg = try sema.mod.errMsg(&block.base, src, "cannot store to comptime variable in non-inline loop", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNote(&block.base, loop_src, msg, "non-inline loop here", .{});
break :msg msg;
};
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
}
unreachable;
}
var new_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_arena.deinit();
const new_ty = try operand_ty.copy(&new_arena.allocator);
const new_val = try operand_val.copy(&new_arena.allocator);
const decl = decl_ref_mut.data.decl;
var old_arena = decl.value_arena.?.promote(sema.gpa);
decl.value_arena = null;
try decl.finalizeNewArena(&new_arena);
decl.ty = new_ty;
decl.val = new_val;
old_arena.deinit();
return;
}
}
fn bitcast(
sema: *Sema,
block: *Scope.Block,
@@ -9801,11 +9883,11 @@ fn cmpNumeric(
const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
lhs_val.compareWithZero(.lt)
else
(lhs_ty.isFloat() or lhs_ty.isSignedInt());
(lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt());
const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
rhs_val.compareWithZero(.lt)
else
(rhs_ty.isFloat() or rhs_ty.isSignedInt());
(rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt());
const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
var dest_float_type: ?Type = null;
@@ -10031,7 +10113,7 @@ fn resolvePeerTypes(
}
continue;
}
if (chosen_ty.isFloat() and candidate_ty.isFloat()) {
if (chosen_ty.isRuntimeFloat() and candidate_ty.isRuntimeFloat()) {
if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) {
chosen = candidate;
chosen_i = candidate_i + 1;
@@ -10049,13 +10131,13 @@ fn resolvePeerTypes(
continue;
}
if (chosen_ty.zigTypeTag() == .ComptimeFloat and candidate_ty.isFloat()) {
if (chosen_ty.zigTypeTag() == .ComptimeFloat and candidate_ty.isRuntimeFloat()) {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
if (chosen_ty.isFloat() and candidate_ty.zigTypeTag() == .ComptimeFloat) {
if (chosen_ty.isRuntimeFloat() and candidate_ty.zigTypeTag() == .ComptimeFloat) {
continue;
}

View File

@@ -858,6 +858,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.struct_field_ptr=> try self.airStructFieldPtr(inst),
.struct_field_val=> try self.airStructFieldVal(inst),
.array_to_slice => try self.airArrayToSlice(inst),
.int_to_float => try self.airIntToFloat(inst),
.float_to_int => try self.airFloatToInt(inst),
.cmpxchg_strong => try self.airCmpxchg(inst),
.cmpxchg_weak => try self.airCmpxchg(inst),
.atomic_rmw => try self.airAtomicRmw(inst),
@@ -4769,6 +4771,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement airIntToFloat for {}", .{
self.target.cpu.arch,
}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement airFloatToInt for {}", .{
self.target.cpu.arch,
}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);

View File

@@ -917,6 +917,8 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM
.atomic_rmw => try airAtomicRmw(o, inst),
.atomic_load => try airAtomicLoad(o, inst),
.int_to_float, .float_to_int => try airSimpleCast(o, inst),
.atomic_store_unordered => try airAtomicStore(o, inst, toMemoryOrder(.Unordered)),
.atomic_store_monotonic => try airAtomicStore(o, inst, toMemoryOrder(.Monotonic)),
.atomic_store_release => try airAtomicStore(o, inst, toMemoryOrder(.Release)),
@@ -1899,6 +1901,24 @@ fn airArrayToSlice(o: *Object, inst: Air.Inst.Index) !CValue {
return local;
}
/// Emits a local variable with the result type and initializes it
/// with the operand.
fn airSimpleCast(o: *Object, inst: Air.Inst.Index) !CValue {
if (o.liveness.isUnused(inst))
return CValue.none;
const inst_ty = o.air.typeOfIndex(inst);
const local = try o.allocLocal(inst_ty, .Const);
const ty_op = o.air.instructions.items(.data)[inst].ty_op;
const writer = o.writer();
const operand = try o.resolveInst(ty_op.operand);
try writer.writeAll(" = ");
try o.writeCValue(writer, operand);
try writer.writeAll(";\n");
return local;
}
fn airCmpxchg(o: *Object, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue {
const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
const extra = o.air.extraData(Air.Cmpxchg, ty_pl.payload).data;

View File

@@ -1122,6 +1122,8 @@ pub const FuncGen = struct {
.slice_ptr => try self.airSliceField(inst, 0),
.slice_len => try self.airSliceField(inst, 1),
.array_to_slice => try self.airArrayToSlice(inst),
.float_to_int => try self.airFloatToInt(inst),
.int_to_float => try self.airIntToFloat(inst),
.cmpxchg_weak => try self.airCmpxchg(inst, true),
.cmpxchg_strong => try self.airCmpxchg(inst, false),
.fence => try self.airFence(inst),
@@ -1372,6 +1374,40 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(partial, len, 1, "");
}
fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const dest_ty = self.air.typeOfIndex(inst);
const dest_llvm_ty = try self.dg.llvmType(dest_ty);
if (dest_ty.isSignedInt()) {
return self.builder.buildSIToFP(operand, dest_llvm_ty, "");
} else {
return self.builder.buildUIToFP(operand, dest_llvm_ty, "");
}
}
fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const dest_ty = self.air.typeOfIndex(inst);
const dest_llvm_ty = try self.dg.llvmType(dest_ty);
// TODO set fast math flag
if (dest_ty.isSignedInt()) {
return self.builder.buildFPToSI(operand, dest_llvm_ty, "");
} else {
return self.builder.buildFPToUI(operand, dest_llvm_ty, "");
}
}
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1818,7 +1854,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
if (inst_ty.isFloat()) return self.builder.buildFAdd(lhs, rhs, "");
if (inst_ty.isRuntimeFloat()) return self.builder.buildFAdd(lhs, rhs, "");
if (wrap) return self.builder.buildAdd(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, "");
return self.builder.buildNUWAdd(lhs, rhs, "");
@@ -1833,7 +1869,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
if (inst_ty.isFloat()) return self.builder.buildFSub(lhs, rhs, "");
if (inst_ty.isRuntimeFloat()) return self.builder.buildFSub(lhs, rhs, "");
if (wrap) return self.builder.buildSub(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, "");
return self.builder.buildNUWSub(lhs, rhs, "");
@@ -1848,7 +1884,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
if (inst_ty.isFloat()) return self.builder.buildFMul(lhs, rhs, "");
if (inst_ty.isRuntimeFloat()) return self.builder.buildFMul(lhs, rhs, "");
if (wrap) return self.builder.buildMul(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, "");
return self.builder.buildNUWMul(lhs, rhs, "");
@@ -1863,7 +1899,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
if (inst_ty.isFloat()) return self.builder.buildFDiv(lhs, rhs, "");
if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, "");
return self.builder.buildUDiv(lhs, rhs, "");
}
@@ -1876,7 +1912,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
if (inst_ty.isFloat()) return self.builder.buildFRem(lhs, rhs, "");
if (inst_ty.isRuntimeFloat()) return self.builder.buildFRem(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, "");
return self.builder.buildURem(lhs, rhs, "");
}
@@ -2165,7 +2201,7 @@ pub const FuncGen = struct {
const operand_ty = ptr_ty.elemType();
const operand = try self.resolveInst(extra.operand);
const is_signed_int = operand_ty.isSignedInt();
const is_float = operand_ty.isFloat();
const is_float = operand_ty.isRuntimeFloat();
const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float);
const ordering = toLlvmAtomicOrdering(extra.ordering());
const single_threaded = llvm.Bool.fromBool(self.single_threaded);

View File

@@ -563,6 +563,38 @@ pub const Builder = opaque {
ordering: AtomicOrdering,
singleThread: Bool,
) *const Value;
pub const buildFPToUI = LLVMBuildFPToUI;
extern fn LLVMBuildFPToUI(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildFPToSI = LLVMBuildFPToSI;
extern fn LLVMBuildFPToSI(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildUIToFP = LLVMBuildUIToFP;
extern fn LLVMBuildUIToFP(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildSIToFP = LLVMBuildSIToFP;
extern fn LLVMBuildSIToFP(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
};
pub const IntPredicate = enum(c_uint) {

View File

@@ -175,6 +175,8 @@ const Writer = struct {
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.array_to_slice,
.int_to_float,
.float_to_int,
=> try w.writeTyOp(s, inst),
.block,

View File

@@ -2523,7 +2523,8 @@ pub const Type = extern union {
};
}
pub fn isFloat(self: Type) bool {
/// Returns `false` for `comptime_float`.
pub fn isRuntimeFloat(self: Type) bool {
return switch (self.tag()) {
.f16,
.f32,
@@ -2536,13 +2537,29 @@ pub const Type = extern union {
};
}
/// Asserts the type is a fixed-size float.
/// Returns `true` for `comptime_float`.
pub fn isAnyFloat(self: Type) bool {
return switch (self.tag()) {
.f16,
.f32,
.f64,
.f128,
.c_longdouble,
.comptime_float,
=> true,
else => false,
};
}
/// Asserts the type is a fixed-size float or comptime_float.
/// Returns 128 for comptime_float types.
pub fn floatBits(self: Type, target: Target) u16 {
return switch (self.tag()) {
.f16 => 16,
.f32 => 32,
.f64 => 64,
.f128 => 128,
.f128, .comptime_float => 128,
.c_longdouble => CType.longdouble.sizeInBits(target),
else => unreachable,
@@ -2879,7 +2896,7 @@ pub const Type = extern union {
}
/// Asserts that self.zigTypeTag() == .Int.
pub fn minInt(self: Type, arena: *std.heap.ArenaAllocator, target: Target) !Value {
pub fn minInt(self: Type, arena: *Allocator, target: Target) !Value {
assert(self.zigTypeTag() == .Int);
const info = self.intInfo(target);
@@ -2889,35 +2906,35 @@ pub const Type = extern union {
if ((info.bits - 1) <= std.math.maxInt(u6)) {
const n: i64 = -(@as(i64, 1) << @truncate(u6, info.bits - 1));
return Value.Tag.int_i64.create(&arena.allocator, n);
return Value.Tag.int_i64.create(arena, n);
}
var res = try std.math.big.int.Managed.initSet(&arena.allocator, 1);
var res = try std.math.big.int.Managed.initSet(arena, 1);
try res.shiftLeft(res, info.bits - 1);
res.negate();
const res_const = res.toConst();
if (res_const.positive) {
return Value.Tag.int_big_positive.create(&arena.allocator, res_const.limbs);
return Value.Tag.int_big_positive.create(arena, res_const.limbs);
} else {
return Value.Tag.int_big_negative.create(&arena.allocator, res_const.limbs);
return Value.Tag.int_big_negative.create(arena, res_const.limbs);
}
}
/// Asserts that self.zigTypeTag() == .Int.
pub fn maxInt(self: Type, arena: *std.heap.ArenaAllocator, target: Target) !Value {
pub fn maxInt(self: Type, arena: *Allocator, target: Target) !Value {
assert(self.zigTypeTag() == .Int);
const info = self.intInfo(target);
if (info.signedness == .signed and (info.bits - 1) <= std.math.maxInt(u6)) {
const n: i64 = (@as(i64, 1) << @truncate(u6, info.bits - 1)) - 1;
return Value.Tag.int_i64.create(&arena.allocator, n);
return Value.Tag.int_i64.create(arena, n);
} else if (info.signedness == .signed and info.bits <= std.math.maxInt(u6)) {
const n: u64 = (@as(u64, 1) << @truncate(u6, info.bits)) - 1;
return Value.Tag.int_u64.create(&arena.allocator, n);
return Value.Tag.int_u64.create(arena, n);
}
var res = try std.math.big.int.Managed.initSet(&arena.allocator, 1);
var res = try std.math.big.int.Managed.initSet(arena, 1);
try res.shiftLeft(res, info.bits - @boolToInt(info.signedness == .signed));
const one = std.math.big.int.Const{
.limbs = &[_]std.math.big.Limb{1},
@@ -2927,9 +2944,9 @@ pub const Type = extern union {
const res_const = res.toConst();
if (res_const.positive) {
return Value.Tag.int_big_positive.create(&arena.allocator, res_const.limbs);
return Value.Tag.int_big_positive.create(arena, res_const.limbs);
} else {
return Value.Tag.int_big_negative.create(&arena.allocator, res_const.limbs);
return Value.Tag.int_big_negative.create(arena, res_const.limbs);
}
}

View File

@@ -1524,6 +1524,230 @@ pub const Value = extern union {
};
}
pub fn intToFloat(val: Value, allocator: *Allocator, dest_ty: Type, target: Target) !Value {
switch (val.tag()) {
.undef, .zero, .one => return val,
.int_u64 => {
return intToFloatInner(val.castTag(.int_u64).?.data, allocator, dest_ty, target);
},
.int_i64 => {
return intToFloatInner(val.castTag(.int_i64).?.data, allocator, dest_ty, target);
},
.int_big_positive, .int_big_negative => @panic("big int to float"),
else => unreachable,
}
}
fn intToFloatInner(x: anytype, arena: *Allocator, dest_ty: Type, target: Target) !Value {
switch (dest_ty.floatBits(target)) {
16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)),
32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)),
64 => return Value.Tag.float_64.create(arena, @intToFloat(f64, x)),
128 => return Value.Tag.float_128.create(arena, @intToFloat(f128, x)),
else => unreachable,
}
}
/// Supports both floats and ints; handles undefined.
pub fn numberAddWrap(
lhs: Value,
rhs: Value,
ty: Type,
arena: *Allocator,
target: Target,
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.isAnyFloat()) {
return floatAdd(lhs, rhs, ty, arena);
}
const result = try intAdd(lhs, rhs, arena);
const max = try ty.maxInt(arena, target);
if (compare(result, .gt, max, ty)) {
@panic("TODO comptime wrapping integer addition");
}
const min = try ty.minInt(arena, target);
if (compare(result, .lt, min, ty)) {
@panic("TODO comptime wrapping integer addition");
}
return result;
}
/// Supports both floats and ints; handles undefined.
pub fn numberSubWrap(
lhs: Value,
rhs: Value,
ty: Type,
arena: *Allocator,
target: Target,
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.isAnyFloat()) {
return floatSub(lhs, rhs, ty, arena);
}
const result = try intSub(lhs, rhs, arena);
const max = try ty.maxInt(arena, target);
if (compare(result, .gt, max, ty)) {
@panic("TODO comptime wrapping integer subtraction");
}
const min = try ty.minInt(arena, target);
if (compare(result, .lt, min, ty)) {
@panic("TODO comptime wrapping integer subtraction");
}
return result;
}
/// Supports both floats and ints; handles undefined.
pub fn numberMax(lhs: Value, rhs: Value, arena: *Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
switch (lhs_bigint.order(rhs_bigint)) {
.lt => result_bigint.copy(rhs_bigint),
.gt, .eq => result_bigint.copy(lhs_bigint),
}
const result_limbs = result_bigint.limbs[0..result_bigint.len];
if (result_bigint.positive) {
return Value.Tag.int_big_positive.create(arena, result_limbs);
} else {
return Value.Tag.int_big_negative.create(arena, result_limbs);
}
}
/// Supports both floats and ints; handles undefined.
pub fn numberMin(lhs: Value, rhs: Value, arena: *Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
switch (lhs_bigint.order(rhs_bigint)) {
.lt => result_bigint.copy(lhs_bigint),
.gt, .eq => result_bigint.copy(rhs_bigint),
}
const result_limbs = result_bigint.limbs[0..result_bigint.len];
if (result_bigint.positive) {
return Value.Tag.int_big_positive.create(arena, result_limbs);
} else {
return Value.Tag.int_big_negative.create(arena, result_limbs);
}
}
/// operands must be integers; handles undefined.
pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: *Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
const result_limbs = result_bigint.limbs[0..result_bigint.len];
if (result_bigint.positive) {
return Value.Tag.int_big_positive.create(arena, result_limbs);
} else {
return Value.Tag.int_big_negative.create(arena, result_limbs);
}
}
/// operands must be integers; handles undefined.
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: *Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
_ = ty;
_ = arena;
@panic("TODO comptime bitwise NAND");
}
/// operands must be integers; handles undefined.
pub fn bitwiseOr(lhs: Value, rhs: Value, arena: *Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitOr(lhs_bigint, rhs_bigint);
const result_limbs = result_bigint.limbs[0..result_bigint.len];
if (result_bigint.positive) {
return Value.Tag.int_big_positive.create(arena, result_limbs);
} else {
return Value.Tag.int_big_negative.create(arena, result_limbs);
}
}
/// operands must be integers; handles undefined.
pub fn bitwiseXor(lhs: Value, rhs: Value, arena: *Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitXor(lhs_bigint, rhs_bigint);
const result_limbs = result_bigint.limbs[0..result_bigint.len];
if (result_bigint.positive) {
return Value.Tag.int_big_positive.create(arena, result_limbs);
} else {
return Value.Tag.int_big_negative.create(arena, result_limbs);
}
}
pub fn intAdd(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.

View File

@@ -138,3 +138,32 @@ test "atomic store" {
@atomicStore(u32, &x, 12345678, .SeqCst);
try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
}
test "atomic store comptime" {
comptime try testAtomicStore();
try testAtomicStore();
}
fn testAtomicStore() !void {
var x: u32 = 0;
@atomicStore(u32, &x, 1, .SeqCst);
try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
@atomicStore(u32, &x, 12345678, .SeqCst);
try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
}
test "atomicrmw with floats" {
try testAtomicRmwFloat();
comptime try testAtomicRmwFloat();
}
fn testAtomicRmwFloat() !void {
var x: f32 = 0;
try expect(x == 0);
_ = @atomicRmw(f32, &x, .Xchg, 1, .SeqCst);
try expect(x == 1);
_ = @atomicRmw(f32, &x, .Add, 5, .SeqCst);
try expect(x == 6);
_ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst);
try expect(x == 4);
}

View File

@@ -3,35 +3,6 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const builtin = @import("builtin");
test "atomic store comptime" {
comptime try testAtomicStore();
try testAtomicStore();
}
fn testAtomicStore() !void {
var x: u32 = 0;
@atomicStore(u32, &x, 1, .SeqCst);
try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
@atomicStore(u32, &x, 12345678, .SeqCst);
try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
}
test "atomicrmw with floats" {
try testAtomicRmwFloat();
comptime try testAtomicRmwFloat();
}
fn testAtomicRmwFloat() !void {
var x: f32 = 0;
try expect(x == 0);
_ = @atomicRmw(f32, &x, .Xchg, 1, .SeqCst);
try expect(x == 1);
_ = @atomicRmw(f32, &x, .Add, 5, .SeqCst);
try expect(x == 6);
_ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst);
try expect(x == 4);
}
test "atomicrmw with ints" {
try testAtomicRmwInt();
comptime try testAtomicRmwInt();