Sema: rewrite comptime arithmetic
This commit reworks how Sema handles arithmetic on comptime-known values, fixing many bugs in the process. The general pattern is that arithmetic on comptime-known values is now handled by the new namespace `Sema.arith`. Functions handling comptime arithmetic no longer live on `Value`; this is because some of them can emit compile errors, so some *can't* go on `Value`. Only semantic analysis should really be doing arithmetic on `Value`s anyway, so it makes sense for it to integrate more tightly with `Sema`. This commit also implements more coherent rules surrounding how `undefined` interacts with comptime and mixed-comptime-runtime arithmetic. The rules are as follows. * If an operation cannot trigger Illegal Behavior, and any operand is `undefined`, the result is `undefined`. This includes operations like `0 *| undef`, where the LHS logically *could* be used to determine a defined result. This is partly to simplify the language, but mostly to permit codegen backends to represent `undefined` values as completely invalid states. * If an operation *can* trigger Illegal Behvaior, and any operand is `undefined`, then Illegal Behavior results. This occurs even if the operand in question isn't the one that "decides" illegal behavior; for instance, `undef / 1` is undefined. This is for the same reasons as described above. * An operation which would trigger Illegal Behavior, when evaluated at comptime, instead triggers a compile error. Additionally, if one operand is comptime-known undef, such that the other (runtime-known) operand isn't needed to determine that Illegal Behavior would occur, the compile error is triggered. * The only situation in which an operation with one comptime-known operand has a comptime-known result is if that operand is undefined, in which case the result is either undefined or a compile error per the above rules. This could potentially be loosened in future (for instance, `0 * rt` could be comptime-known 0 with a runtime assertion that `rt` is not undefined), but at least for now, defining it more conservatively simplifies the language and allows us to easily change this in future if desired. This commit fixes many bugs regarding the handling of `undefined`, particularly in vectors. Along with a collection of smaller tests, two very large test cases are added to check arithmetic on `undefined`. The operations which have been rewritten in this PR are: * `+`, `+%`, `+|`, `@addWithOverflow` * `-`, `-%`, `-|`, `@subWithOverflow` * `*`, `*%`, `*|`, `@mulWithOverflow` * `/`, `@divFloor`, `@divTrunc`, `@divExact` * `%`, `@rem`, `@mod` Other arithmetic operations are currently unchanged. Resolves: #22743 Resolves: #22745 Resolves: #22748 Resolves: #22749 Resolves: #22914
This commit is contained in:
1769
src/Sema.zig
1769
src/Sema.zig
File diff suppressed because it is too large
Load Diff
1609
src/Sema/arith.zig
Normal file
1609
src/Sema/arith.zig
Normal file
File diff suppressed because it is too large
Load Diff
890
src/Value.zig
890
src/Value.zig
@@ -895,7 +895,7 @@ pub fn readFromPackedMemory(
|
||||
}
|
||||
|
||||
/// Asserts that the value is a float or an integer.
|
||||
pub fn toFloat(val: Value, comptime T: type, zcu: *Zcu) T {
|
||||
pub fn toFloat(val: Value, comptime T: type, zcu: *const Zcu) T {
|
||||
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
.int => |int| switch (int.storage) {
|
||||
.big_int => |big_int| big_int.toFloat(T),
|
||||
@@ -1415,17 +1415,63 @@ pub fn unionValue(val: Value, zcu: *Zcu) Value {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isUndef(val: Value, zcu: *Zcu) bool {
|
||||
pub fn isUndef(val: Value, zcu: *const Zcu) bool {
|
||||
return zcu.intern_pool.isUndef(val.toIntern());
|
||||
}
|
||||
|
||||
/// TODO: check for cases such as array that is not marked undef but all the element
|
||||
/// values are marked undef, or struct that is not marked undef but all fields are marked
|
||||
/// undef, etc.
|
||||
pub fn isUndefDeep(val: Value, zcu: *Zcu) bool {
|
||||
pub fn isUndefDeep(val: Value, zcu: *const Zcu) bool {
|
||||
return val.isUndef(zcu);
|
||||
}
|
||||
|
||||
/// `val` must have a numeric or vector type.
|
||||
/// Returns whether `val` is undefined or contains any undefined elements.
|
||||
pub fn anyScalarIsUndef(val: Value, zcu: *const Zcu) bool {
|
||||
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
.undef => return true,
|
||||
.int, .float => return false,
|
||||
.aggregate => |agg| {
|
||||
assert(Type.fromInterned(agg.ty).zigTypeTag(zcu) == .vector);
|
||||
for (agg.storage.values()) |elem_val| {
|
||||
if (Value.fromInterned(elem_val).isUndef(zcu)) return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
/// `val` must have a numeric or vector type.
|
||||
/// Returns whether `val` contains any elements equal to zero.
|
||||
/// Asserts that `val` is not `undefined`, nor a vector containing any `undefined` elements.
|
||||
pub fn anyScalarIsZero(val: Value, zcu: *Zcu) bool {
|
||||
assert(!val.anyScalarIsUndef(zcu));
|
||||
|
||||
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
.int, .float => return val.eqlScalarNum(.zero_comptime_int, zcu),
|
||||
.aggregate => |agg| {
|
||||
assert(Type.fromInterned(agg.ty).zigTypeTag(zcu) == .vector);
|
||||
switch (agg.storage) {
|
||||
.bytes => |str| {
|
||||
const len = Type.fromInterned(agg.ty).vectorLen(zcu);
|
||||
const slice = str.toSlice(len, &zcu.intern_pool);
|
||||
return std.mem.indexOfScalar(u8, slice, 0) != null;
|
||||
},
|
||||
.elems => |elems| {
|
||||
for (elems) |elem| {
|
||||
if (Value.fromInterned(elem).isUndef(zcu)) return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
.repeated_elem => |elem| return Value.fromInterned(elem).isUndef(zcu),
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
/// Asserts the value is not undefined and not unreachable.
|
||||
/// C pointers with an integer value of 0 are also considered null.
|
||||
pub fn isNull(val: Value, zcu: *Zcu) bool {
|
||||
@@ -1572,295 +1618,6 @@ pub const OverflowArithmeticResult = struct {
|
||||
wrapped_result: Value,
|
||||
};
|
||||
|
||||
/// Supports (vectors of) integers only; asserts neither operand is undefined.
|
||||
pub fn intAddSat(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return intAddSatScalar(lhs, rhs, ty, arena, pt);
|
||||
}
|
||||
|
||||
/// Supports integers only; asserts neither operand is undefined.
|
||||
pub fn intAddSatScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
assert(!lhs.isUndef(zcu));
|
||||
assert(!rhs.isUndef(zcu));
|
||||
|
||||
const info = ty.intInfo(zcu);
|
||||
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
/// Supports (vectors of) integers only; asserts neither operand is undefined.
|
||||
pub fn intSubSat(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return intSubSatScalar(lhs, rhs, ty, arena, pt);
|
||||
}
|
||||
|
||||
/// Supports integers only; asserts neither operand is undefined.
|
||||
pub fn intSubSatScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
|
||||
assert(!lhs.isUndef(zcu));
|
||||
assert(!rhs.isUndef(zcu));
|
||||
|
||||
const info = ty.intInfo(zcu);
|
||||
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
result_bigint.subSat(lhs_bigint, rhs_bigint, info.signedness, info.bits);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
pub fn intMulWithOverflow(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !OverflowArithmeticResult {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const vec_len = ty.vectorLen(zcu);
|
||||
const overflowed_data = try arena.alloc(InternPool.Index, vec_len);
|
||||
const result_data = try arena.alloc(InternPool.Index, vec_len);
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt);
|
||||
of.* = of_math_result.overflow_bit.toIntern();
|
||||
scalar.* = of_math_result.wrapped_result.toIntern();
|
||||
}
|
||||
return OverflowArithmeticResult{
|
||||
.overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
|
||||
.storage = .{ .elems = overflowed_data },
|
||||
} })),
|
||||
.wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} })),
|
||||
};
|
||||
}
|
||||
return intMulWithOverflowScalar(lhs, rhs, ty, arena, pt);
|
||||
}
|
||||
|
||||
pub fn intMulWithOverflowScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !OverflowArithmeticResult {
|
||||
const zcu = pt.zcu;
|
||||
const info = ty.intInfo(zcu);
|
||||
|
||||
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) {
|
||||
return .{
|
||||
.overflow_bit = try pt.undefValue(Type.u1),
|
||||
.wrapped_result = try pt.undefValue(ty),
|
||||
};
|
||||
}
|
||||
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
const limbs_buffer = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1),
|
||||
);
|
||||
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena);
|
||||
|
||||
const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits);
|
||||
if (overflowed) {
|
||||
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
|
||||
}
|
||||
|
||||
return OverflowArithmeticResult{
|
||||
.overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)),
|
||||
.wrapped_result = try pt.intValue_big(ty, result_bigint.toConst()),
|
||||
};
|
||||
}
|
||||
|
||||
/// Supports both (vectors of) floats and ints; handles undefined scalars.
|
||||
pub fn numberMulWrap(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return numberMulWrapScalar(lhs, rhs, ty, arena, pt);
|
||||
}
|
||||
|
||||
/// Supports both floats and ints; handles undefined.
|
||||
pub fn numberMulWrapScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return Value.undef;
|
||||
|
||||
if (ty.zigTypeTag(zcu) == .comptime_int) {
|
||||
return intMul(lhs, rhs, ty, undefined, arena, pt);
|
||||
}
|
||||
|
||||
if (ty.isAnyFloat()) {
|
||||
return floatMul(lhs, rhs, ty, arena, pt);
|
||||
}
|
||||
|
||||
const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, pt);
|
||||
return overflow_result.wrapped_result;
|
||||
}
|
||||
|
||||
/// Supports (vectors of) integers only; asserts neither operand is undefined.
|
||||
pub fn intMulSat(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return intMulSatScalar(lhs, rhs, ty, arena, pt);
|
||||
}
|
||||
|
||||
/// Supports (vectors of) integers only; asserts neither operand is undefined.
|
||||
pub fn intMulSatScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
ty: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
|
||||
assert(!lhs.isUndef(zcu));
|
||||
assert(!rhs.isUndef(zcu));
|
||||
|
||||
const info = ty.intInfo(zcu);
|
||||
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
@max(
|
||||
// For the saturate
|
||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
||||
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
|
||||
),
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
const limbs_buffer = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1),
|
||||
);
|
||||
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena);
|
||||
result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
/// Supports both floats and ints; handles undefined.
|
||||
pub fn numberMax(lhs: Value, rhs: Value, zcu: *Zcu) Value {
|
||||
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return undef;
|
||||
@@ -2126,143 +1883,6 @@ pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt:
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
|
||||
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
|
||||
pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
var overflow: usize = undefined;
|
||||
return intDivInner(lhs, rhs, ty, &overflow, allocator, pt) catch |err| switch (err) {
|
||||
error.Overflow => {
|
||||
const is_vec = ty.isVector(pt.zcu);
|
||||
overflow_idx.* = if (is_vec) overflow else 0;
|
||||
const safe_ty = if (is_vec) try pt.vectorType(.{
|
||||
.len = ty.vectorLen(pt.zcu),
|
||||
.child = .comptime_int_type,
|
||||
}) else Type.comptime_int;
|
||||
return intDivInner(lhs, rhs, safe_ty, undefined, allocator, pt) catch |err1| switch (err1) {
|
||||
error.Overflow => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
const val = intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt) catch |err| switch (err) {
|
||||
error.Overflow => {
|
||||
overflow_idx.* = i;
|
||||
return error.Overflow;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
scalar.* = val.toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return intDivScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
const zcu = pt.zcu;
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs_q = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_r = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
rhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_buffer = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
||||
);
|
||||
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
|
||||
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
|
||||
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
||||
if (ty.toIntern() != .comptime_int_type) {
|
||||
const info = ty.intInfo(pt.zcu);
|
||||
if (!result_q.toConst().fitsInTwosComp(info.signedness, info.bits)) {
|
||||
return error.Overflow;
|
||||
}
|
||||
}
|
||||
return pt.intValue_big(ty, result_q.toConst());
|
||||
}
|
||||
|
||||
pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return intDivFloorScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
const zcu = pt.zcu;
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs_q = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_r = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
rhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_buffer = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
||||
);
|
||||
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
|
||||
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
|
||||
result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
||||
return pt.intValue_big(ty, result_q.toConst());
|
||||
}
|
||||
|
||||
pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
if (ty.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
|
||||
const scalar_ty = ty.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return intModScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
@@ -2386,80 +2006,6 @@ pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThrea
|
||||
} }));
|
||||
}
|
||||
|
||||
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
|
||||
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
|
||||
pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
var overflow: usize = undefined;
|
||||
return intMulInner(lhs, rhs, ty, &overflow, allocator, pt) catch |err| switch (err) {
|
||||
error.Overflow => {
|
||||
const is_vec = ty.isVector(zcu);
|
||||
overflow_idx.* = if (is_vec) overflow else 0;
|
||||
const safe_ty = if (is_vec) try pt.vectorType(.{
|
||||
.len = ty.vectorLen(zcu),
|
||||
.child = .comptime_int_type,
|
||||
}) else Type.comptime_int;
|
||||
return intMulInner(lhs, rhs, safe_ty, undefined, allocator, pt) catch |err1| switch (err1) {
|
||||
error.Overflow => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
|
||||
const scalar_ty = ty.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
const val = intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt) catch |err| switch (err) {
|
||||
error.Overflow => {
|
||||
overflow_idx.* = i;
|
||||
return error.Overflow;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
scalar.* = val.toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return intMulScalar(lhs, rhs, ty, allocator, pt);
|
||||
}
|
||||
|
||||
pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.toIntern() != .comptime_int_type) {
|
||||
const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, pt);
|
||||
if (res.overflow_bit.compareAllWithZero(.neq, zcu)) return error.Overflow;
|
||||
return res.wrapped_result;
|
||||
}
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
|
||||
const limbs = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
|
||||
);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
const limbs_buffer = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1),
|
||||
);
|
||||
defer allocator.free(limbs_buffer);
|
||||
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator);
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) == .vector) {
|
||||
@@ -2773,318 +2319,6 @@ pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu
|
||||
return pt.intValue_big(ty, result_bigint.toConst());
|
||||
}
|
||||
|
||||
pub fn floatNeg(
|
||||
val: Value,
|
||||
float_type: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (float_type.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
|
||||
const scalar_ty = float_type.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try floatNegScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return floatNegScalar(val, float_type, pt);
|
||||
}
|
||||
|
||||
pub fn floatNegScalar(val: Value, float_type: Type, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const target = zcu.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
||||
16 => .{ .f16 = -val.toFloat(f16, zcu) },
|
||||
32 => .{ .f32 = -val.toFloat(f32, zcu) },
|
||||
64 => .{ .f64 = -val.toFloat(f64, zcu) },
|
||||
80 => .{ .f80 = -val.toFloat(f80, zcu) },
|
||||
128 => .{ .f128 = -val.toFloat(f128, zcu) },
|
||||
else => unreachable,
|
||||
};
|
||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = storage,
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn floatAdd(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (float_type.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
|
||||
const scalar_ty = float_type.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return floatAddScalar(lhs, rhs, float_type, pt);
|
||||
}
|
||||
|
||||
pub fn floatAddScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const target = zcu.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
||||
16 => .{ .f16 = lhs.toFloat(f16, zcu) + rhs.toFloat(f16, zcu) },
|
||||
32 => .{ .f32 = lhs.toFloat(f32, zcu) + rhs.toFloat(f32, zcu) },
|
||||
64 => .{ .f64 = lhs.toFloat(f64, zcu) + rhs.toFloat(f64, zcu) },
|
||||
80 => .{ .f80 = lhs.toFloat(f80, zcu) + rhs.toFloat(f80, zcu) },
|
||||
128 => .{ .f128 = lhs.toFloat(f128, zcu) + rhs.toFloat(f128, zcu) },
|
||||
else => unreachable,
|
||||
};
|
||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = storage,
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn floatSub(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (float_type.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
|
||||
const scalar_ty = float_type.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return floatSubScalar(lhs, rhs, float_type, pt);
|
||||
}
|
||||
|
||||
pub fn floatSubScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const target = zcu.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
||||
16 => .{ .f16 = lhs.toFloat(f16, zcu) - rhs.toFloat(f16, zcu) },
|
||||
32 => .{ .f32 = lhs.toFloat(f32, zcu) - rhs.toFloat(f32, zcu) },
|
||||
64 => .{ .f64 = lhs.toFloat(f64, zcu) - rhs.toFloat(f64, zcu) },
|
||||
80 => .{ .f80 = lhs.toFloat(f80, zcu) - rhs.toFloat(f80, zcu) },
|
||||
128 => .{ .f128 = lhs.toFloat(f128, zcu) - rhs.toFloat(f128, zcu) },
|
||||
else => unreachable,
|
||||
};
|
||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = storage,
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn floatDiv(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
||||
const scalar_ty = float_type.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return floatDivScalar(lhs, rhs, float_type, pt);
|
||||
}
|
||||
|
||||
pub fn floatDivScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const target = zcu.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
||||
16 => .{ .f16 = lhs.toFloat(f16, zcu) / rhs.toFloat(f16, zcu) },
|
||||
32 => .{ .f32 = lhs.toFloat(f32, zcu) / rhs.toFloat(f32, zcu) },
|
||||
64 => .{ .f64 = lhs.toFloat(f64, zcu) / rhs.toFloat(f64, zcu) },
|
||||
80 => .{ .f80 = lhs.toFloat(f80, zcu) / rhs.toFloat(f80, zcu) },
|
||||
128 => .{ .f128 = lhs.toFloat(f128, zcu) / rhs.toFloat(f128, zcu) },
|
||||
else => unreachable,
|
||||
};
|
||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = storage,
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn floatDivFloor(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
||||
const scalar_ty = float_type.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return floatDivFloorScalar(lhs, rhs, float_type, pt);
|
||||
}
|
||||
|
||||
pub fn floatDivFloorScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const target = zcu.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
||||
16 => .{ .f16 = @divFloor(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
|
||||
32 => .{ .f32 = @divFloor(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
|
||||
64 => .{ .f64 = @divFloor(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
|
||||
80 => .{ .f80 = @divFloor(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
|
||||
128 => .{ .f128 = @divFloor(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
|
||||
else => unreachable,
|
||||
};
|
||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = storage,
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn floatDivTrunc(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
||||
const scalar_ty = float_type.scalarType(pt.zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return floatDivTruncScalar(lhs, rhs, float_type, pt);
|
||||
}
|
||||
|
||||
pub fn floatDivTruncScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const target = zcu.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
||||
16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
|
||||
32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
|
||||
64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
|
||||
80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
|
||||
128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
|
||||
else => unreachable,
|
||||
};
|
||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = storage,
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn floatMul(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
arena: Allocator,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
if (float_type.zigTypeTag(zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
|
||||
const scalar_ty = float_type.scalarType(zcu);
|
||||
for (result_data, 0..) |*scalar, i| {
|
||||
const lhs_elem = try lhs.elemValue(pt, i);
|
||||
const rhs_elem = try rhs.elemValue(pt, i);
|
||||
scalar.* = (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
}
|
||||
return floatMulScalar(lhs, rhs, float_type, pt);
|
||||
}
|
||||
|
||||
pub fn floatMulScalar(
|
||||
lhs: Value,
|
||||
rhs: Value,
|
||||
float_type: Type,
|
||||
pt: Zcu.PerThread,
|
||||
) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const target = zcu.getTarget();
|
||||
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
|
||||
16 => .{ .f16 = lhs.toFloat(f16, zcu) * rhs.toFloat(f16, zcu) },
|
||||
32 => .{ .f32 = lhs.toFloat(f32, zcu) * rhs.toFloat(f32, zcu) },
|
||||
64 => .{ .f64 = lhs.toFloat(f64, zcu) * rhs.toFloat(f64, zcu) },
|
||||
80 => .{ .f80 = lhs.toFloat(f80, zcu) * rhs.toFloat(f80, zcu) },
|
||||
128 => .{ .f128 = lhs.toFloat(f128, zcu) * rhs.toFloat(f128, zcu) },
|
||||
else => unreachable,
|
||||
};
|
||||
return Value.fromInterned(try pt.intern(.{ .float = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = storage,
|
||||
} }));
|
||||
}
|
||||
|
||||
pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
if (float_type.zigTypeTag(pt.zcu) == .vector) {
|
||||
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
|
||||
@@ -4805,3 +4039,29 @@ pub fn doPointersOverlap(ptr_val_a: Value, ptr_val_b: Value, elem_count: u64, zc
|
||||
return bytes_diff < need_bytes_diff;
|
||||
}
|
||||
}
|
||||
|
||||
/// `lhs` and `rhs` are both scalar numeric values (int or float).
|
||||
/// Supports comparisons between heterogeneous types.
|
||||
/// If `lhs` or `rhs` is undef, returns `false`.
|
||||
pub fn eqlScalarNum(lhs: Value, rhs: Value, zcu: *Zcu) bool {
|
||||
if (lhs.isUndef(zcu)) return false;
|
||||
if (rhs.isUndef(zcu)) return false;
|
||||
|
||||
if (lhs.isFloat(zcu) or rhs.isFloat(zcu)) {
|
||||
const lhs_f128 = lhs.toFloat(f128, zcu);
|
||||
const rhs_f128 = rhs.toFloat(f128, zcu);
|
||||
return lhs_f128 == rhs_f128;
|
||||
}
|
||||
|
||||
if (lhs.getUnsignedInt(zcu)) |lhs_u64| {
|
||||
if (rhs.getUnsignedInt(zcu)) |rhs_u64| {
|
||||
return lhs_u64 == rhs_u64;
|
||||
}
|
||||
}
|
||||
|
||||
var lhs_bigint_space: BigIntSpace = undefined;
|
||||
var rhs_bigint_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_bigint_space, zcu);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_bigint_space, zcu);
|
||||
return lhs_bigint.eql(rhs_bigint);
|
||||
}
|
||||
|
||||
48
src/Zcu.zig
48
src/Zcu.zig
@@ -1716,9 +1716,25 @@ pub const SrcLoc = struct {
|
||||
const node = node_off.toAbsolute(src_loc.base_node);
|
||||
|
||||
switch (tree.nodeTag(node)) {
|
||||
.assign => {
|
||||
return tree.nodeToSpan(tree.nodeData(node).node_and_node[0]);
|
||||
},
|
||||
.assign,
|
||||
.assign_mul,
|
||||
.assign_div,
|
||||
.assign_mod,
|
||||
.assign_add,
|
||||
.assign_sub,
|
||||
.assign_shl,
|
||||
.assign_shl_sat,
|
||||
.assign_shr,
|
||||
.assign_bit_and,
|
||||
.assign_bit_xor,
|
||||
.assign_bit_or,
|
||||
.assign_mul_wrap,
|
||||
.assign_add_wrap,
|
||||
.assign_sub_wrap,
|
||||
.assign_mul_sat,
|
||||
.assign_add_sat,
|
||||
.assign_sub_sat,
|
||||
=> return tree.nodeToSpan(tree.nodeData(node).node_and_node[0]),
|
||||
else => return tree.nodeToSpan(node),
|
||||
}
|
||||
},
|
||||
@@ -1727,9 +1743,25 @@ pub const SrcLoc = struct {
|
||||
const node = node_off.toAbsolute(src_loc.base_node);
|
||||
|
||||
switch (tree.nodeTag(node)) {
|
||||
.assign => {
|
||||
return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]);
|
||||
},
|
||||
.assign,
|
||||
.assign_mul,
|
||||
.assign_div,
|
||||
.assign_mod,
|
||||
.assign_add,
|
||||
.assign_sub,
|
||||
.assign_shl,
|
||||
.assign_shl_sat,
|
||||
.assign_shr,
|
||||
.assign_bit_and,
|
||||
.assign_bit_xor,
|
||||
.assign_bit_or,
|
||||
.assign_mul_wrap,
|
||||
.assign_add_wrap,
|
||||
.assign_sub_wrap,
|
||||
.assign_mul_sat,
|
||||
.assign_add_sat,
|
||||
.assign_sub_sat,
|
||||
=> return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]),
|
||||
else => return tree.nodeToSpan(node),
|
||||
}
|
||||
},
|
||||
@@ -2209,9 +2241,9 @@ pub const LazySrcLoc = struct {
|
||||
node_offset_field_default: Ast.Node.Offset,
|
||||
/// The source location points to the type of an array or struct initializer.
|
||||
node_offset_init_ty: Ast.Node.Offset,
|
||||
/// The source location points to the LHS of an assignment.
|
||||
/// The source location points to the LHS of an assignment (or assign-op, e.g. `+=`).
|
||||
node_offset_store_ptr: Ast.Node.Offset,
|
||||
/// The source location points to the RHS of an assignment.
|
||||
/// The source location points to the RHS of an assignment (or assign-op, e.g. `+=`).
|
||||
node_offset_store_operand: Ast.Node.Offset,
|
||||
/// The source location points to the operand of a `return` statement, or
|
||||
/// the `return` itself if there is no explicit operand.
|
||||
|
||||
@@ -1696,10 +1696,6 @@ test "comptime fixed-width float non-zero divided by zero produces signed Inf" {
|
||||
}
|
||||
}
|
||||
|
||||
test "comptime_float zero divided by zero produces zero" {
|
||||
try expect((0.0 / 0.0) == 0.0);
|
||||
}
|
||||
|
||||
test "comptime float compared with runtime int" {
|
||||
const f = 10.0;
|
||||
var i: usize = 0;
|
||||
|
||||
@@ -1265,12 +1265,6 @@ test "allow signed integer division/remainder when values are comptime-known and
|
||||
|
||||
try expect(5 % 3 == 2);
|
||||
try expect(-6 % 3 == 0);
|
||||
|
||||
var undef: i32 = undefined;
|
||||
_ = &undef;
|
||||
if (0 % undef != 0) {
|
||||
@compileError("0 as numerator should return comptime zero independent of denominator");
|
||||
}
|
||||
}
|
||||
|
||||
test "quad hex float literal parsing accurate" {
|
||||
@@ -1861,3 +1855,134 @@ test "runtime int comparison to inf is comptime-known" {
|
||||
comptime S.doTheTest(f64, 123);
|
||||
comptime S.doTheTest(f128, 123);
|
||||
}
|
||||
|
||||
test "float divide by zero" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest(comptime F: type, zero: F, one: F) !void {
|
||||
try expect(math.isPositiveInf(@divTrunc(one, zero)));
|
||||
try expect(math.isPositiveInf(@divFloor(one, zero)));
|
||||
|
||||
try expect(math.isNan(@rem(one, zero)));
|
||||
try expect(math.isNan(@mod(one, zero)));
|
||||
}
|
||||
};
|
||||
|
||||
try S.doTheTest(f16, 0, 1);
|
||||
comptime S.doTheTest(f16, 0, 1) catch unreachable;
|
||||
|
||||
try S.doTheTest(f32, 0, 1);
|
||||
comptime S.doTheTest(f32, 0, 1) catch unreachable;
|
||||
|
||||
try S.doTheTest(f64, 0, 1);
|
||||
comptime S.doTheTest(f64, 0, 1) catch unreachable;
|
||||
|
||||
try S.doTheTest(f80, 0, 1);
|
||||
comptime S.doTheTest(f80, 0, 1) catch unreachable;
|
||||
|
||||
try S.doTheTest(f128, 0, 1);
|
||||
comptime S.doTheTest(f128, 0, 1) catch unreachable;
|
||||
}
|
||||
|
||||
test "partially-runtime integer vector division would be illegal if vector elements were reordered" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var lhs: @Vector(2, i8) = .{ -128, 5 };
|
||||
const rhs: @Vector(2, i8) = .{ 1, -1 };
|
||||
|
||||
const expected: @Vector(2, i8) = .{ -128, -5 };
|
||||
|
||||
lhs = lhs; // suppress error
|
||||
|
||||
const trunc = @divTrunc(lhs, rhs);
|
||||
try expect(trunc[0] == expected[0]);
|
||||
try expect(trunc[1] == expected[1]);
|
||||
|
||||
const floor = @divFloor(lhs, rhs);
|
||||
try expect(floor[0] == expected[0]);
|
||||
try expect(floor[1] == expected[1]);
|
||||
|
||||
const exact = @divExact(lhs, rhs);
|
||||
try expect(exact[0] == expected[0]);
|
||||
try expect(exact[1] == expected[1]);
|
||||
}
|
||||
|
||||
test "float vector division of comptime zero by runtime nan is nan" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const ct_zero: @Vector(1, f32) = .{0};
|
||||
var rt_nan: @Vector(1, f32) = .{math.nan(f32)};
|
||||
|
||||
rt_nan = rt_nan; // suppress error
|
||||
|
||||
try expect(math.isNan((@divTrunc(ct_zero, rt_nan))[0]));
|
||||
try expect(math.isNan((@divFloor(ct_zero, rt_nan))[0]));
|
||||
try expect(math.isNan((ct_zero / rt_nan)[0]));
|
||||
}
|
||||
|
||||
test "float vector multiplication of comptime zero by runtime nan is nan" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const ct_zero: @Vector(1, f32) = .{0};
|
||||
var rt_nan: @Vector(1, f32) = .{math.nan(f32)};
|
||||
|
||||
rt_nan = rt_nan; // suppress error
|
||||
|
||||
try expect(math.isNan((ct_zero * rt_nan)[0]));
|
||||
try expect(math.isNan((rt_nan * ct_zero)[0]));
|
||||
}
|
||||
|
||||
test "comptime float vector division of zero by nan is nan" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const ct_zero: @Vector(1, f32) = .{0};
|
||||
const ct_nan: @Vector(1, f32) = .{math.nan(f32)};
|
||||
|
||||
comptime assert(math.isNan((@divTrunc(ct_zero, ct_nan))[0]));
|
||||
comptime assert(math.isNan((@divFloor(ct_zero, ct_nan))[0]));
|
||||
comptime assert(math.isNan((ct_zero / ct_nan)[0]));
|
||||
}
|
||||
|
||||
test "comptime float vector multiplication of zero by nan is nan" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const ct_zero: @Vector(1, f32) = .{0};
|
||||
const ct_nan: @Vector(1, f32) = .{math.nan(f32)};
|
||||
|
||||
comptime assert(math.isNan((ct_zero * ct_nan)[0]));
|
||||
comptime assert(math.isNan((ct_nan * ct_zero)[0]));
|
||||
}
|
||||
|
||||
@@ -1316,6 +1316,8 @@ test "zero multiplicand" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const zeros = @Vector(2, u32){ 0.0, 0.0 };
|
||||
var ones = @Vector(2, u32){ 1.0, 1.0 };
|
||||
|
||||
@@ -21685,20 +21685,7 @@ test mulUnsafe {
|
||||
}
|
||||
|
||||
inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs * rhs) {
|
||||
if (@inComptime() and @typeInfo(Type) == .vector) {
|
||||
// workaround https://github.com/ziglang/zig/issues/22743
|
||||
// TODO: return @select(Scalar(Type), boolAnd(lhs == lhs, rhs == rhs), lhs * rhs, lhs + rhs);
|
||||
// workaround https://github.com/ziglang/zig/issues/22744
|
||||
var res: Type = undefined;
|
||||
for (0..@typeInfo(Type).vector.len) |i| res[i] = lhs[i] * rhs[i];
|
||||
return res;
|
||||
}
|
||||
// workaround https://github.com/ziglang/zig/issues/22745
|
||||
// TODO: return lhs * rhs;
|
||||
var rt_lhs = lhs;
|
||||
var rt_rhs = rhs;
|
||||
_ = .{ &rt_lhs, &rt_rhs };
|
||||
return rt_lhs * rt_rhs;
|
||||
return lhs * rhs;
|
||||
}
|
||||
test multiply {
|
||||
const test_multiply = binary(multiply, .{});
|
||||
@@ -21715,24 +21702,8 @@ test divide {
|
||||
try test_divide.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn divTrunc(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
switch (@typeInfo(Scalar(Type))) {
|
||||
else => @compileError(@typeName(Type)),
|
||||
.int => return @divTrunc(lhs, rhs),
|
||||
.float => {
|
||||
if (@inComptime()) {
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
return @trunc(lhs / rhs);
|
||||
}
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
// workaround https://github.com/ziglang/zig/issues/22749
|
||||
// TODO: return @divTrunc(lhs, rhs);
|
||||
var rt_lhs = lhs;
|
||||
var rt_rhs = rhs;
|
||||
_ = .{ &rt_lhs, &rt_rhs };
|
||||
return @divTrunc(rt_lhs, rt_rhs);
|
||||
},
|
||||
}
|
||||
inline fn divTrunc(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@divTrunc(lhs, rhs)) {
|
||||
return @divTrunc(lhs, rhs);
|
||||
}
|
||||
test divTrunc {
|
||||
const test_div_trunc = binary(divTrunc, .{ .compare = .approx_int });
|
||||
@@ -21742,24 +21713,8 @@ test divTrunc {
|
||||
try test_div_trunc.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn divFloor(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
switch (@typeInfo(Scalar(Type))) {
|
||||
else => @compileError(@typeName(Type)),
|
||||
.int => return @divFloor(lhs, rhs),
|
||||
.float => {
|
||||
if (@inComptime()) {
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
return @floor(lhs / rhs);
|
||||
}
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
// workaround https://github.com/ziglang/zig/issues/22749
|
||||
// TODO: return @divFloor(lhs, rhs);
|
||||
var rt_lhs = lhs;
|
||||
var rt_rhs = rhs;
|
||||
_ = .{ &rt_lhs, &rt_rhs };
|
||||
return @divFloor(rt_lhs, rt_rhs);
|
||||
},
|
||||
}
|
||||
inline fn divFloor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@divFloor(lhs, rhs)) {
|
||||
return @divFloor(lhs, rhs);
|
||||
}
|
||||
test divFloor {
|
||||
const test_div_floor = binary(divFloor, .{ .compare = .approx_int });
|
||||
@@ -21767,31 +21722,8 @@ test divFloor {
|
||||
try test_div_floor.testFloatVectors();
|
||||
}
|
||||
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
// TODO: @TypeOf(@rem(lhs, rhs))
|
||||
inline fn rem(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
switch (@typeInfo(Scalar(Type))) {
|
||||
else => @compileError(@typeName(Type)),
|
||||
.int => return @rem(lhs, rhs),
|
||||
.float => {
|
||||
if (@inComptime()) {
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
switch (@typeInfo(Type)) {
|
||||
else => return if (rhs != 0) @rem(lhs, rhs) else nan(Type),
|
||||
.vector => |info| {
|
||||
var res: Type = undefined;
|
||||
inline for (0..info.len) |i| res[i] = if (rhs[i] != 0) @rem(lhs[i], rhs[i]) else nan(Scalar(Type));
|
||||
return res;
|
||||
},
|
||||
}
|
||||
}
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
// TODO: return @rem(lhs, rhs);
|
||||
var rt_rhs = rhs;
|
||||
_ = &rt_rhs;
|
||||
return @rem(lhs, rt_rhs);
|
||||
},
|
||||
}
|
||||
inline fn rem(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@rem(lhs, rhs)) {
|
||||
return @rem(lhs, rhs);
|
||||
}
|
||||
test rem {
|
||||
const test_rem = binary(rem, .{});
|
||||
@@ -21801,25 +21733,16 @@ test rem {
|
||||
try test_rem.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn mod(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
inline fn mod(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@mod(lhs, rhs)) {
|
||||
// workaround llvm backend bugs
|
||||
if (@inComptime()) {
|
||||
const scalarMod = struct {
|
||||
fn scalarMod(scalar_lhs: Scalar(Type), scalar_rhs: Scalar(Type)) Scalar(Type) {
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
if (scalar_rhs == 0) return nan(Scalar(Type));
|
||||
const scalar_rem = @rem(scalar_lhs, scalar_rhs);
|
||||
return if (scalar_rem == 0 or math.signbit(scalar_rem) == math.signbit(scalar_rhs)) scalar_rem else scalar_rem + scalar_rhs;
|
||||
}
|
||||
}.scalarMod;
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
switch (@typeInfo(Type)) {
|
||||
// workaround llvm backend bugs
|
||||
// TODO: else => return if (rhs != 0) @mod(lhs, rhs) else nan(Type),
|
||||
// TODO: .vector => |info| {
|
||||
// TODO: var res: Type = undefined;
|
||||
// TODO: inline for (0..info.len) |i| res[i] = if (rhs[i] != 0) @mod(lhs[i], rhs[i]) else nan(Scalar(Type));
|
||||
// TODO: return res;
|
||||
// TODO: },
|
||||
else => return scalarMod(lhs, rhs),
|
||||
.vector => |info| {
|
||||
var res: Type = undefined;
|
||||
@@ -21828,11 +21751,7 @@ inline fn mod(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
},
|
||||
}
|
||||
}
|
||||
// workaround https://github.com/ziglang/zig/issues/22748
|
||||
// TODO: return @mod(lhs, rhs);
|
||||
var rt_rhs = rhs;
|
||||
_ = &rt_rhs;
|
||||
return @mod(lhs, rt_rhs);
|
||||
return @mod(lhs, rhs);
|
||||
}
|
||||
test mod {
|
||||
const test_mod = binary(mod, .{});
|
||||
|
||||
@@ -4,7 +4,5 @@ comptime {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:10: error: use of undefined value here causes undefined behavior
|
||||
// :3:5: error: use of undefined value here causes undefined behavior
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
comptime {
|
||||
const undef: i64 = undefined;
|
||||
const not_undef: i64 = 32;
|
||||
|
||||
// If either of the operands are zero, then the other operand is returned.
|
||||
@compileLog(undef + 0);
|
||||
@compileLog(not_undef + 0);
|
||||
@compileLog(0 + undef);
|
||||
@compileLog(0 + not_undef);
|
||||
|
||||
_ = undef + undef;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :11:17: error: use of undefined value here causes undefined behavior
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, 32)
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, 32)
|
||||
@@ -9,7 +9,5 @@ export fn a() void {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :8:7: error: cannot assign to constant
|
||||
// :8:5: error: cannot assign to constant
|
||||
|
||||
@@ -72,24 +72,22 @@ export fn entry18() void {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:5: error: cannot assign to constant
|
||||
// :7:7: error: cannot assign to constant
|
||||
// :11:7: error: cannot assign to constant
|
||||
// :15:7: error: cannot assign to constant
|
||||
// :19:7: error: cannot assign to constant
|
||||
// :23:7: error: cannot assign to constant
|
||||
// :27:7: error: cannot assign to constant
|
||||
// :31:7: error: cannot assign to constant
|
||||
// :35:7: error: cannot assign to constant
|
||||
// :39:7: error: cannot assign to constant
|
||||
// :43:7: error: cannot assign to constant
|
||||
// :47:7: error: cannot assign to constant
|
||||
// :51:7: error: cannot assign to constant
|
||||
// :55:7: error: cannot assign to constant
|
||||
// :59:7: error: cannot assign to constant
|
||||
// :63:7: error: cannot assign to constant
|
||||
// :67:7: error: cannot assign to constant
|
||||
// :71:7: error: cannot assign to constant
|
||||
// :7:5: error: cannot assign to constant
|
||||
// :11:5: error: cannot assign to constant
|
||||
// :15:5: error: cannot assign to constant
|
||||
// :19:5: error: cannot assign to constant
|
||||
// :23:5: error: cannot assign to constant
|
||||
// :27:5: error: cannot assign to constant
|
||||
// :31:5: error: cannot assign to constant
|
||||
// :35:5: error: cannot assign to constant
|
||||
// :39:5: error: cannot assign to constant
|
||||
// :43:5: error: cannot assign to constant
|
||||
// :47:5: error: cannot assign to constant
|
||||
// :51:5: error: cannot assign to constant
|
||||
// :55:5: error: cannot assign to constant
|
||||
// :59:5: error: cannot assign to constant
|
||||
// :63:5: error: cannot assign to constant
|
||||
// :67:5: error: cannot assign to constant
|
||||
// :71:5: error: cannot assign to constant
|
||||
|
||||
@@ -6,8 +6,6 @@ comptime {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :4:15: error: overflow of vector type '@Vector(4, u8)' with value '.{ 6, 8, 256, 12 }'
|
||||
// :4:15: error: overflow of integer type 'u8' with value '256'
|
||||
// :4:15: note: when computing vector element at index '2'
|
||||
|
||||
@@ -4,7 +4,5 @@ comptime {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:10: error: use of undefined value here causes undefined behavior
|
||||
// :3:5: error: use of undefined value here causes undefined behavior
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
comptime {
|
||||
var a: i64 = undefined;
|
||||
_ = a / a;
|
||||
_ = &a;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:13: error: use of undefined value here causes undefined behavior
|
||||
@@ -4,7 +4,5 @@ comptime {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:10: error: use of undefined value here causes undefined behavior
|
||||
// :3:5: error: use of undefined value here causes undefined behavior
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
comptime {
|
||||
const undef: i64 = undefined;
|
||||
const not_undef: i64 = 32;
|
||||
|
||||
// If either of the operands are zero, the result is zero.
|
||||
@compileLog(undef * 0);
|
||||
@compileLog(not_undef * 0);
|
||||
@compileLog(0 * undef);
|
||||
@compileLog(0 * not_undef);
|
||||
|
||||
// If either of the operands are one, the result is the other
|
||||
// operand, even if it is undefined.
|
||||
@compileLog(undef * 1);
|
||||
@compileLog(not_undef * 1);
|
||||
@compileLog(1 * undef);
|
||||
@compileLog(1 * not_undef);
|
||||
|
||||
// If either of the operands are undefined, it's a compile error
|
||||
// because there is a possible value for which the addition would
|
||||
// overflow (max_int), causing illegal behavior.
|
||||
_ = undef * undef;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :21:17: error: use of undefined value here causes undefined behavior
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(i64, 0)
|
||||
// @as(i64, 0)
|
||||
// @as(i64, 0)
|
||||
// @as(i64, 0)
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, 32)
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, 32)
|
||||
@@ -1,38 +0,0 @@
|
||||
comptime {
|
||||
const undef: i64 = undefined;
|
||||
const not_undef: i64 = 32;
|
||||
|
||||
// If either of the operands are zero, the result is zero.
|
||||
@compileLog(undef *| 0);
|
||||
@compileLog(not_undef *| 0);
|
||||
@compileLog(0 *| undef);
|
||||
@compileLog(0 *| not_undef);
|
||||
|
||||
// If either of the operands are one, result is the other operand.
|
||||
@compileLog(undef *| 1);
|
||||
@compileLog(not_undef *| 1);
|
||||
@compileLog(1 *| undef);
|
||||
@compileLog(1 *| not_undef);
|
||||
|
||||
// If either of the operands are undefined, result is undefined.
|
||||
@compileLog(undef *| 2);
|
||||
@compileLog(2 *| undef);
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :6:5: error: found compile log statement
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(i64, 0)
|
||||
// @as(i64, 0)
|
||||
// @as(i64, 0)
|
||||
// @as(i64, 0)
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, 32)
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, 32)
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, undefined)
|
||||
@@ -1,38 +0,0 @@
|
||||
comptime {
|
||||
const undef: i64 = undefined;
|
||||
const not_undef: i64 = 32;
|
||||
|
||||
// If either of the operands are zero, the result is zero.
|
||||
@compileLog(undef *% 0);
|
||||
@compileLog(not_undef *% 0);
|
||||
@compileLog(0 *% undef);
|
||||
@compileLog(0 *% not_undef);
|
||||
|
||||
// If either of the operands are one, result is the other operand.
|
||||
@compileLog(undef *% 1);
|
||||
@compileLog(not_undef *% 1);
|
||||
@compileLog(1 *% undef);
|
||||
@compileLog(1 *% not_undef);
|
||||
|
||||
// If either of the operands are undefined, result is undefined.
|
||||
@compileLog(undef *% 2);
|
||||
@compileLog(2 *% undef);
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :6:5: error: found compile log statement
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(i64, 0)
|
||||
// @as(i64, 0)
|
||||
// @as(i64, 0)
|
||||
// @as(i64, 0)
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, 32)
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, 32)
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, undefined)
|
||||
@@ -18,8 +18,6 @@ export fn function_with_return_type_type() void {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:7: error: unable to evaluate comptime expression
|
||||
// :3:5: note: operation is runtime due to this operand
|
||||
|
||||
@@ -3,7 +3,5 @@ export fn foo(a: i32, b: i32) i32 {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:12: error: remainder division with 'i32' and 'i32': signed integers and floats must use @rem or @mod
|
||||
|
||||
@@ -6,12 +6,21 @@ const Small = enum(u2) {
|
||||
Five,
|
||||
};
|
||||
|
||||
export fn entry() void {
|
||||
_ = Small.One;
|
||||
const SmallUnion = union(enum(u2)) {
|
||||
one = 1,
|
||||
two,
|
||||
three,
|
||||
four,
|
||||
};
|
||||
|
||||
comptime {
|
||||
_ = Small;
|
||||
}
|
||||
comptime {
|
||||
_ = SmallUnion;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :6:5: error: enumeration value '4' too large for type 'u2'
|
||||
// :13:5: error: enumeration value '4' too large for type 'u2'
|
||||
|
||||
@@ -4,7 +4,5 @@ comptime {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:10: error: use of undefined value here causes undefined behavior
|
||||
// :3:5: error: use of undefined value here causes undefined behavior
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
comptime {
|
||||
const undef: i64 = undefined;
|
||||
const not_undef: i64 = 32;
|
||||
|
||||
// If the rhs is zero, then the other operand is returned, even if it is undefined.
|
||||
@compileLog(undef - 0);
|
||||
@compileLog(not_undef - 0);
|
||||
|
||||
_ = undef - undef;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :9:17: error: use of undefined value here causes undefined behavior
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(i64, undefined)
|
||||
// @as(i64, 32)
|
||||
@@ -1,5 +1,5 @@
|
||||
const c = @cImport({
|
||||
_ = 1 + foo;
|
||||
if (foo == 0) {}
|
||||
});
|
||||
extern var foo: i32;
|
||||
export fn entry() void {
|
||||
@@ -7,9 +7,7 @@ export fn entry() void {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:11: error: unable to evaluate comptime expression
|
||||
// :2:13: note: operation is runtime due to this operand
|
||||
// :2:13: error: unable to evaluate comptime expression
|
||||
// :2:9: note: operation is runtime due to this operand
|
||||
// :1:11: note: operand to '@cImport' is evaluated at comptime
|
||||
|
||||
6885
test/cases/compile_errors/undef_arith_is_illegal.zig
Normal file
6885
test/cases/compile_errors/undef_arith_is_illegal.zig
Normal file
File diff suppressed because it is too large
Load Diff
2047
test/cases/compile_errors/undef_arith_returns_undef.zig
Normal file
2047
test/cases/compile_errors/undef_arith_returns_undef.zig
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user