Merge pull request #10370 from Snektron/stage2-inferred-error-sets-2

stage2: Make page_allocator work
This commit is contained in:
Andrew Kelley
2021-12-21 11:28:40 -08:00
committed by GitHub
21 changed files with 1167 additions and 390 deletions

View File

@@ -443,12 +443,12 @@ pub const Mutable = struct {
}
}
/// r = a + b with 2s-complement wrapping semantics.
/// r = a + b with 2s-complement wrapping semantics. Returns whether overflow occurred.
/// r, a and b may be aliases
///
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
/// r is `calcTwosCompLimbCount(bit_count)`.
pub fn addWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) void {
pub fn addWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) bool {
const req_limbs = calcTwosCompLimbCount(bit_count);
// Slice of the upper bits if they exist, these will be ignored and allows us to use addCarry to determine
@@ -463,6 +463,7 @@ pub const Mutable = struct {
.limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)],
};
var carry_truncated = false;
if (r.addCarry(x, y)) {
// There are two possibilities here:
// - We overflowed req_limbs. In this case, the carry is ignored, as it would be removed by
@@ -473,10 +474,17 @@ pub const Mutable = struct {
if (msl < req_limbs) {
r.limbs[msl] = 1;
r.len = req_limbs;
} else {
carry_truncated = true;
}
}
r.truncate(r.toConst(), signedness, bit_count);
if (!r.toConst().fitsInTwosComp(signedness, bit_count)) {
r.truncate(r.toConst(), signedness, bit_count);
return true;
}
return carry_truncated;
}
/// r = a + b with 2s-complement saturating semantics.
@@ -581,13 +589,13 @@ pub const Mutable = struct {
r.add(a, b.negate());
}
/// r = a - b with 2s-complement wrapping semantics.
/// r = a - b with 2s-complement wrapping semantics. Returns whether any overflow occured.
///
/// r, a and b may be aliases
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
/// r is `calcTwosCompLimbCount(bit_count)`.
pub fn subWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) void {
r.addWrap(a, b.negate(), signedness, bit_count);
pub fn subWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) bool {
return r.addWrap(a, b.negate(), signedness, bit_count);
}
/// r = a - b with 2s-complement saturating semantics.
@@ -1039,7 +1047,7 @@ pub const Mutable = struct {
pub fn bitNotWrap(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void {
r.copy(a.negate());
const negative_one = Const{ .limbs = &.{1}, .positive = false };
r.addWrap(r.toConst(), negative_one, signedness, bit_count);
_ = r.addWrap(r.toConst(), negative_one, signedness, bit_count);
}
/// r = a | b under 2s complement semantics.
@@ -2443,17 +2451,18 @@ pub const Managed = struct {
r.setMetadata(m.positive, m.len);
}
/// r = a + b with 2s-complement wrapping semantics.
/// r = a + b with 2s-complement wrapping semantics. Returns whether any overflow occured.
///
/// r, a and b may be aliases. If r aliases a or b, then caller must call
/// `r.ensureTwosCompCapacity` prior to calling `add`.
///
/// Returns an error if memory could not be allocated.
pub fn addWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!void {
pub fn addWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!bool {
try r.ensureTwosCompCapacity(bit_count);
var m = r.toMutable();
m.addWrap(a, b, signedness, bit_count);
const wrapped = m.addWrap(a, b, signedness, bit_count);
r.setMetadata(m.positive, m.len);
return wrapped;
}
/// r = a + b with 2s-complement saturating semantics.
@@ -2481,17 +2490,18 @@ pub const Managed = struct {
r.setMetadata(m.positive, m.len);
}
/// r = a - b with 2s-complement wrapping semantics.
/// r = a - b with 2s-complement wrapping semantics. Returns whether any overflow occured.
///
/// r, a and b may be aliases. If r aliases a or b, then caller must call
/// `r.ensureTwosCompCapacity` prior to calling `add`.
///
/// Returns an error if memory could not be allocated.
pub fn subWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!void {
pub fn subWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!bool {
try r.ensureTwosCompCapacity(bit_count);
var m = r.toMutable();
m.subWrap(a, b, signedness, bit_count);
const wrapped = m.subWrap(a, b, signedness, bit_count);
r.setMetadata(m.positive, m.len);
return wrapped;
}
/// r = a - b with 2s-complement saturating semantics.

View File

@@ -590,8 +590,9 @@ test "big.int addWrap single-single, unsigned" {
var b = try Managed.initSet(testing.allocator, 10);
defer b.deinit();
try a.addWrap(a.toConst(), b.toConst(), .unsigned, 17);
const wrapped = try a.addWrap(a.toConst(), b.toConst(), .unsigned, 17);
try testing.expect(wrapped);
try testing.expect((try a.to(u17)) == 9);
}
@@ -602,8 +603,9 @@ test "big.int subWrap single-single, unsigned" {
var b = try Managed.initSet(testing.allocator, maxInt(u17));
defer b.deinit();
try a.subWrap(a.toConst(), b.toConst(), .unsigned, 17);
const wrapped = try a.subWrap(a.toConst(), b.toConst(), .unsigned, 17);
try testing.expect(wrapped);
try testing.expect((try a.to(u17)) == 1);
}
@@ -614,8 +616,9 @@ test "big.int addWrap multi-multi, unsigned, limb aligned" {
var b = try Managed.initSet(testing.allocator, maxInt(DoubleLimb));
defer b.deinit();
try a.addWrap(a.toConst(), b.toConst(), .unsigned, @bitSizeOf(DoubleLimb));
const wrapped = try a.addWrap(a.toConst(), b.toConst(), .unsigned, @bitSizeOf(DoubleLimb));
try testing.expect(wrapped);
try testing.expect((try a.to(DoubleLimb)) == maxInt(DoubleLimb) - 1);
}
@@ -626,8 +629,9 @@ test "big.int subWrap single-multi, unsigned, limb aligned" {
var b = try Managed.initSet(testing.allocator, maxInt(DoubleLimb) + 100);
defer b.deinit();
try a.subWrap(a.toConst(), b.toConst(), .unsigned, @bitSizeOf(DoubleLimb));
const wrapped = try a.subWrap(a.toConst(), b.toConst(), .unsigned, @bitSizeOf(DoubleLimb));
try testing.expect(wrapped);
try testing.expect((try a.to(DoubleLimb)) == maxInt(DoubleLimb) - 88);
}
@@ -638,8 +642,9 @@ test "big.int addWrap single-single, signed" {
var b = try Managed.initSet(testing.allocator, 1 + 1 + maxInt(u21));
defer b.deinit();
try a.addWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(i21));
const wrapped = try a.addWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(i21));
try testing.expect(wrapped);
try testing.expect((try a.to(i21)) == minInt(i21));
}
@@ -650,8 +655,9 @@ test "big.int subWrap single-single, signed" {
var b = try Managed.initSet(testing.allocator, 1);
defer b.deinit();
try a.subWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(i21));
const wrapped = try a.subWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(i21));
try testing.expect(wrapped);
try testing.expect((try a.to(i21)) == maxInt(i21));
}
@@ -662,8 +668,9 @@ test "big.int addWrap multi-multi, signed, limb aligned" {
var b = try Managed.initSet(testing.allocator, maxInt(SignedDoubleLimb));
defer b.deinit();
try a.addWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(SignedDoubleLimb));
const wrapped = try a.addWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(SignedDoubleLimb));
try testing.expect(wrapped);
try testing.expect((try a.to(SignedDoubleLimb)) == -2);
}
@@ -674,8 +681,9 @@ test "big.int subWrap single-multi, signed, limb aligned" {
var b = try Managed.initSet(testing.allocator, 1);
defer b.deinit();
try a.subWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(SignedDoubleLimb));
const wrapped = try a.subWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(SignedDoubleLimb));
try testing.expect(wrapped);
try testing.expect((try a.to(SignedDoubleLimb)) == maxInt(SignedDoubleLimb));
}

View File

@@ -4968,7 +4968,11 @@ pub fn toPosixPath(file_path: []const u8) ![MAX_PATH_BYTES - 1:0]u8 {
/// if this happens the fix is to add the error code to the corresponding
/// switch expression, possibly introduce a new error in the error set, and
/// send a patch to Zig.
pub const unexpected_error_tracing = builtin.mode == .Debug;
/// The self-hosted compiler is not fully capable of handle the related code.
/// Until then, unexpected error tracing is disabled for the self-hosted compiler.
/// TODO remove this once self-hosted is capable enough to handle printing and
/// stack trace dumping.
pub const unexpected_error_tracing = !builtin.zig_is_stage2 and builtin.mode == .Debug;
pub const UnexpectedError = error{
/// The Operating System returned an undocumented error code.

View File

@@ -135,6 +135,30 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
min,
/// Integer addition with overflow. Both operands are guaranteed to be the same type,
/// and the result is bool. The wrapped value is written to the pointer given by the in
/// operand of the `pl_op` field. Payload is `Bin` with `lhs` and `rhs` the relevant types
/// of the operation.
/// Uses the `pl_op` field with payload `Bin`.
add_with_overflow,
/// Integer subtraction with overflow. Both operands are guaranteed to be the same type,
/// and the result is bool. The wrapped value is written to the pointer given by the in
/// operand of the `pl_op` field. Payload is `Bin` with `lhs` and `rhs` the relevant types
/// of the operation.
/// Uses the `pl_op` field with payload `Bin`.
sub_with_overflow,
/// Integer multiplication with overflow. Both operands are guaranteed to be the same type,
/// and the result is bool. The wrapped value is written to the pointer given by the in
/// operand of the `pl_op` field. Payload is `Bin` with `lhs` and `rhs` the relevant types
/// of the operation.
/// Uses the `pl_op` field with payload `Bin`.
mul_with_overflow,
/// Integer left-shift with overflow. Both operands are guaranteed to be the same type,
/// and the result is bool. The wrapped value is written to the pointer given by the in
/// operand of the `pl_op` field. Payload is `Bin` with `lhs` and `rhs` the relevant types
/// of the operation.
/// Uses the `pl_op` field with payload `Bin`.
shl_with_overflow,
/// Allocates stack local memory.
/// Uses the `ty` field.
alloc,
@@ -189,6 +213,9 @@ pub const Inst = struct {
/// Lowers to a hardware trap instruction, or the next best thing.
/// Result type is always void.
breakpoint,
/// Yields the return address of the current function.
/// Uses the `no_op` field.
ret_addr,
/// Function call.
/// Result type is the return type of the function being called.
/// Uses the `pl_op` field with the `Call` payload. operand is the callee.
@@ -779,6 +806,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.ptrtoint,
.slice_len,
.ret_addr,
=> return Type.initTag(.usize),
.bool_to_int => return Type.initTag(.u1),
@@ -804,6 +832,12 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
const ptr_ty = air.typeOf(datas[inst].pl_op.operand);
return ptr_ty.elemType();
},
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
=> return Type.initTag(.bool),
}
}

View File

@@ -984,17 +984,17 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
.fn_proto_simple => {
var params: [1]Ast.Node.Index = undefined;
return fnProtoExpr(gz, scope, rl, tree.fnProtoSimple(&params, node));
return fnProtoExpr(gz, scope, rl, node, tree.fnProtoSimple(&params, node));
},
.fn_proto_multi => {
return fnProtoExpr(gz, scope, rl, tree.fnProtoMulti(node));
return fnProtoExpr(gz, scope, rl, node, tree.fnProtoMulti(node));
},
.fn_proto_one => {
var params: [1]Ast.Node.Index = undefined;
return fnProtoExpr(gz, scope, rl, tree.fnProtoOne(&params, node));
return fnProtoExpr(gz, scope, rl, node, tree.fnProtoOne(&params, node));
},
.fn_proto => {
return fnProtoExpr(gz, scope, rl, tree.fnProto(node));
return fnProtoExpr(gz, scope, rl, node, tree.fnProto(node));
},
}
}
@@ -1101,6 +1101,7 @@ fn fnProtoExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: Ast.Node.Index,
fn_proto: Ast.full.FnProto,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
@@ -1113,6 +1114,11 @@ fn fnProtoExpr(
};
assert(!is_extern);
var block_scope = gz.makeSubBlock(scope);
defer block_scope.unstack();
const block_inst = try gz.makeBlockInst(.block_inline, node);
const is_var_args = is_var_args: {
var param_type_i: usize = 0;
var it = fn_proto.iterate(tree.*);
@@ -1144,11 +1150,11 @@ fn fnProtoExpr(
.param_anytype_comptime
else
.param_anytype;
_ = try gz.addStrTok(tag, param_name, name_token);
_ = try block_scope.addStrTok(tag, param_name, name_token);
} else {
const param_type_node = param.type_expr;
assert(param_type_node != 0);
var param_gz = gz.makeSubBlock(scope);
var param_gz = block_scope.makeSubBlock(scope);
defer param_gz.unstack();
const param_type = try expr(&param_gz, scope, coerced_type_rl, param_type_node);
const param_inst_expected = @intCast(u32, astgen.instructions.len + 1);
@@ -1156,7 +1162,7 @@ fn fnProtoExpr(
const main_tokens = tree.nodes.items(.main_token);
const name_token = param.name_token orelse main_tokens[param_type_node];
const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param;
const param_inst = try gz.addParam(&param_gz, tag, name_token, param_name);
const param_inst = try block_scope.addParam(&param_gz, tag, name_token, param_name);
assert(param_inst_expected == param_inst);
}
}
@@ -1164,7 +1170,7 @@ fn fnProtoExpr(
};
const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: {
break :inst try expr(gz, scope, align_rl, fn_proto.ast.align_expr);
break :inst try expr(&block_scope, scope, align_rl, fn_proto.ast.align_expr);
};
if (fn_proto.ast.addrspace_expr != 0) {
@@ -1177,7 +1183,7 @@ fn fnProtoExpr(
const cc: Zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0)
try expr(
gz,
&block_scope,
scope,
.{ .ty = .calling_convention_type },
fn_proto.ast.callconv_expr,
@@ -1190,14 +1196,14 @@ fn fnProtoExpr(
if (is_inferred_error) {
return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{});
}
var ret_gz = gz.makeSubBlock(scope);
var ret_gz = block_scope.makeSubBlock(scope);
defer ret_gz.unstack();
const ret_ty = try expr(&ret_gz, scope, coerced_type_rl, fn_proto.ast.return_type);
const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
const result = try gz.addFunc(.{
const result = try block_scope.addFunc(.{
.src_node = fn_proto.ast.proto_node,
.param_block = 0,
.param_block = block_inst,
.ret_gz = &ret_gz,
.ret_br = ret_br,
.body_gz = null,
@@ -1209,7 +1215,12 @@ fn fnProtoExpr(
.is_test = false,
.is_extern = false,
});
return rvalue(gz, rl, result, fn_proto.ast.proto_node);
_ = try block_scope.addBreak(.break_inline, block_inst, result);
try block_scope.setBlockBody(block_inst);
try gz.instructions.append(astgen.gpa, block_inst);
return rvalue(gz, rl, indexToRef(block_inst), fn_proto.ast.proto_node);
}
fn arrayInitExpr(

View File

@@ -281,6 +281,7 @@ fn analyzeInst(
.dbg_stmt,
.unreach,
.fence,
.ret_addr,
=> return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }),
.not,
@@ -381,7 +382,13 @@ fn analyzeInst(
const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none });
},
.memset, .memcpy => {
.memset,
.memcpy,
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
=> {
const pl_op = inst_datas[inst].pl_op;
const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs });

View File

@@ -796,15 +796,11 @@ pub const ErrorSet = struct {
owner_decl: *Decl,
/// Offset from Decl node index, points to the error set AST node.
node_offset: i32,
names_len: u32,
/// The string bytes are stored in the owner Decl arena.
/// They are in the same order they appear in the AST.
/// The length is given by `names_len`.
names_ptr: [*]const []const u8,
names: NameMap,
pub fn names(self: ErrorSet) []const []const u8 {
return self.names_ptr[0..self.names_len];
}
pub const NameMap = std.StringArrayHashMapUnmanaged(void);
pub fn srcLoc(self: ErrorSet) SrcLoc {
return .{
@@ -1211,6 +1207,10 @@ pub const Fn = struct {
is_cold: bool = false,
is_noinline: bool = false,
/// Any inferred error sets that this function owns, both it's own inferred error set and
/// inferred error sets of any inline/comptime functions called.
inferred_error_sets: InferredErrorSetList = .{},
pub const Analysis = enum {
queued,
/// This function intentionally only has ZIR generated because it is marked
@@ -1225,24 +1225,73 @@ pub const Fn = struct {
success,
};
pub fn deinit(func: *Fn, gpa: Allocator) void {
if (func.getInferredErrorSet()) |error_set_data| {
error_set_data.map.deinit(gpa);
error_set_data.functions.deinit(gpa);
}
}
/// This struct is used to keep track of any dependencies related to functions instances
/// that return inferred error sets. Note that a function may be associated to multiple different error sets,
/// for example an inferred error set which this function returns, but also any inferred error sets
/// of called inline or comptime functions.
pub const InferredErrorSet = struct {
/// The function from which this error set originates.
/// Note: may be the function itself.
func: *Fn,
pub fn getInferredErrorSet(func: *Fn) ?*Type.Payload.ErrorSetInferred.Data {
const ret_ty = func.owner_decl.ty.fnReturnType();
if (ret_ty.tag() == .generic_poison) {
return null;
}
if (ret_ty.zigTypeTag() == .ErrorUnion) {
if (ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| {
return &payload.data;
/// All currently known errors that this error set contains. This includes direct additions
/// via `return error.Foo;`, and possibly also errors that are returned from any dependent functions.
/// When the inferred error set is fully resolved, this map contains all the errors that the function might return.
errors: std.StringHashMapUnmanaged(void) = .{},
/// Other inferred error sets which this inferred error set should include.
inferred_error_sets: std.AutoHashMapUnmanaged(*InferredErrorSet, void) = .{},
/// Whether the function returned anyerror. This is true if either of the dependent functions
/// returns anyerror.
is_anyerror: bool = false,
/// Whether this error set is already fully resolved. If true, resolving can skip resolving any dependents
/// of this inferred error set.
is_resolved: bool = false,
pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void {
switch (err_set_ty.tag()) {
.error_set => {
const names = err_set_ty.castTag(.error_set).?.data.names.keys();
for (names) |name| {
try self.errors.put(gpa, name, {});
}
},
.error_set_single => {
const name = err_set_ty.castTag(.error_set_single).?.data;
try self.errors.put(gpa, name, {});
},
.error_set_inferred => {
const set = err_set_ty.castTag(.error_set_inferred).?.data;
try self.inferred_error_sets.put(gpa, set, {});
},
.error_set_merged => {
const names = err_set_ty.castTag(.error_set_merged).?.data.keys();
for (names) |name| {
try self.errors.put(gpa, name, {});
}
},
.anyerror => {
self.is_anyerror = true;
},
else => unreachable,
}
}
return null;
};
pub const InferredErrorSetList = std.SinglyLinkedList(InferredErrorSet);
pub const InferredErrorSetListNode = InferredErrorSetList.Node;
pub fn deinit(func: *Fn, gpa: Allocator) void {
var it = func.inferred_error_sets.first;
while (it) |node| {
const next = node.next;
node.data.errors.deinit(gpa);
node.data.inferred_error_sets.deinit(gpa);
gpa.destroy(node);
it = next;
}
}
};

View File

@@ -940,6 +940,15 @@ pub fn analyzeBody(
const inst_data = datas[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
// If this block contains a function prototype, we need to reset the
// current list of parameters and restore it later.
// Note: this probably needs to be resolved in a more general manner.
const prev_params = block.params;
block.params = .{};
defer {
block.params.deinit(sema.gpa);
block.params = prev_params;
}
const break_inst = try sema.analyzeBody(block, inline_body);
const break_data = datas[break_inst].@"break";
if (inst == break_data.block_inst) {
@@ -953,6 +962,15 @@ pub fn analyzeBody(
const inst_data = datas[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
// If this block contains a function prototype, we need to reset the
// current list of parameters and restore it later.
// Note: this probably needs to be resolved in a more general manner.
const prev_params = block.params;
block.params = .{};
defer {
block.params.deinit(sema.gpa);
block.params = prev_params;
}
const break_inst = try sema.analyzeBody(block, inline_body);
const break_data = datas[break_inst].@"break";
if (inst == break_data.block_inst) {
@@ -1033,10 +1051,10 @@ fn zirExtended(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.@"asm" => return sema.zirAsm( block, extended, inst),
.typeof_peer => return sema.zirTypeofPeer( block, extended),
.compile_log => return sema.zirCompileLog( block, extended),
.add_with_overflow => return sema.zirOverflowArithmetic(block, extended),
.sub_with_overflow => return sema.zirOverflowArithmetic(block, extended),
.mul_with_overflow => return sema.zirOverflowArithmetic(block, extended),
.shl_with_overflow => return sema.zirOverflowArithmetic(block, extended),
.add_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode),
.sub_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode),
.mul_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode),
.shl_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode),
.c_undef => return sema.zirCUndef( block, extended),
.c_include => return sema.zirCInclude( block, extended),
.c_define => return sema.zirCDefine( block, extended),
@@ -2025,15 +2043,22 @@ fn zirErrorSetDecl(
}, type_name);
new_decl.owns_tv = true;
errdefer sema.mod.abortAnonDecl(new_decl);
const names = try new_decl_arena_allocator.alloc([]const u8, fields.len);
for (fields) |str_index, i| {
names[i] = try new_decl_arena_allocator.dupe(u8, sema.code.nullTerminatedString(str_index));
var names = Module.ErrorSet.NameMap{};
try names.ensureUnusedCapacity(new_decl_arena_allocator, fields.len);
for (fields) |str_index| {
const name = try new_decl_arena_allocator.dupe(u8, sema.code.nullTerminatedString(str_index));
// TODO: This check should be performed in AstGen instead.
const result = names.getOrPutAssumeCapacity(name);
if (result.found_existing) {
return sema.fail(block, src, "duplicate error set field {s}", .{name});
}
}
error_set.* = .{
.owner_decl = new_decl,
.node_offset = inst_data.src_node,
.names_ptr = names.ptr,
.names_len = @intCast(u32, names.len),
.names = names,
};
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl);
@@ -3887,17 +3912,20 @@ fn analyzeCall(
const ret_ty_inst = try sema.resolveBody(&child_block, fn_info.ret_ty_body);
const ret_ty_src = func_src; // TODO better source location
const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
// If the function has an inferred error set, `bare_return_type` is the payload type only.
// Create a fresh inferred error set type for inline/comptime calls.
const fn_ret_ty = blk: {
// TODO instead of reusing the function's inferred error set, this code should
// create a temporary error set which is used for the comptime/inline function
// call alone, independent from the runtime instantiation.
if (func_ty_info.return_type.castTag(.error_union)) |payload| {
const error_set_ty = payload.data.error_set;
break :blk try Type.Tag.error_union.create(sema.arena, .{
.error_set = error_set_ty,
.payload = bare_return_type,
});
if (payload.data.error_set.tag() == .error_set_inferred) {
const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode);
node.data = .{ .func = module_fn };
parent_func.?.inferred_error_sets.prepend(node);
const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data);
break :blk try Type.Tag.error_union.create(sema.arena, .{
.error_set = error_set_ty,
.payload = bare_return_type,
});
}
}
break :blk bare_return_type;
};
@@ -4556,63 +4584,43 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
return Air.Inst.Ref.anyerror_type;
}
// Resolve both error sets now.
var set: std.StringHashMapUnmanaged(void) = .{};
defer set.deinit(sema.gpa);
const lhs_names = switch (lhs_ty.tag()) {
.error_set_single => blk: {
// Work around coercion problems
const tmp: *const [1][]const u8 = &lhs_ty.castTag(.error_set_single).?.data;
break :blk tmp;
},
.error_set_merged => lhs_ty.castTag(.error_set_merged).?.data.keys(),
.error_set => lhs_ty.castTag(.error_set).?.data.names.keys(),
else => unreachable,
};
switch (lhs_ty.tag()) {
.error_set_single => {
const name = lhs_ty.castTag(.error_set_single).?.data;
try set.put(sema.gpa, name, {});
},
.error_set_merged => {
const names = lhs_ty.castTag(.error_set_merged).?.data;
for (names) |name| {
try set.put(sema.gpa, name, {});
}
},
.error_set => {
const lhs_set = lhs_ty.castTag(.error_set).?.data;
try set.ensureUnusedCapacity(sema.gpa, lhs_set.names_len);
for (lhs_set.names_ptr[0..lhs_set.names_len]) |name| {
set.putAssumeCapacityNoClobber(name, {});
}
const rhs_names = switch (rhs_ty.tag()) {
.error_set_single => blk: {
const tmp: *const [1][]const u8 = &rhs_ty.castTag(.error_set_single).?.data;
break :blk tmp;
},
.error_set_merged => rhs_ty.castTag(.error_set_merged).?.data.keys(),
.error_set => rhs_ty.castTag(.error_set).?.data.names.keys(),
else => unreachable,
}
switch (rhs_ty.tag()) {
.error_set_single => {
const name = rhs_ty.castTag(.error_set_single).?.data;
try set.put(sema.gpa, name, {});
},
.error_set_merged => {
const names = rhs_ty.castTag(.error_set_merged).?.data;
for (names) |name| {
try set.put(sema.gpa, name, {});
}
},
.error_set => {
const rhs_set = rhs_ty.castTag(.error_set).?.data;
try set.ensureUnusedCapacity(sema.gpa, rhs_set.names_len);
for (rhs_set.names_ptr[0..rhs_set.names_len]) |name| {
set.putAssumeCapacity(name, {});
}
},
else => unreachable,
}
};
// TODO do we really want to create a Decl for this?
// The reason we do it right now is for memory management.
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const new_names = try anon_decl.arena().alloc([]const u8, set.count());
var it = set.keyIterator();
var i: usize = 0;
while (it.next()) |key| : (i += 1) {
new_names[i] = key.*;
var names = Module.ErrorSet.NameMap{};
// TODO: Guess is an upper bound, but maybe this needs to be reduced by computing the exact size first.
try names.ensureUnusedCapacity(anon_decl.arena(), @intCast(u32, lhs_names.len + rhs_names.len));
for (lhs_names) |name| {
names.putAssumeCapacityNoClobber(name, {});
}
for (rhs_names) |name| {
names.putAssumeCapacity(name, {});
}
const err_set_ty = try Type.Tag.error_set_merged.create(anon_decl.arena(), new_names);
const err_set_ty = try Type.Tag.error_set_merged.create(anon_decl.arena(), names);
const err_set_decl = try anon_decl.finish(
Type.type,
try Value.Tag.ty.create(anon_decl.arena(), err_set_ty),
@@ -5079,6 +5087,10 @@ fn funcCommon(
};
errdefer if (body_inst != 0) sema.gpa.destroy(new_func);
var maybe_inferred_error_set_node: ?*Module.Fn.InferredErrorSetListNode = null;
errdefer if (maybe_inferred_error_set_node) |node| sema.gpa.destroy(node);
// Note: no need to errdefer since this will still be in its default state at the end of the function.
const fn_ty: Type = fn_ty: {
// Hot path for some common function types.
// TODO can we eliminate some of these Type tag values? seems unnecessarily complicated.
@@ -5120,12 +5132,11 @@ fn funcCommon(
const return_type = if (!inferred_error_set or bare_return_type.tag() == .generic_poison)
bare_return_type
else blk: {
const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, .{
.func = new_func,
.map = .{},
.functions = .{},
.is_anyerror = false,
});
const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode);
node.data = .{ .func = new_func };
maybe_inferred_error_set_node = node;
const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data);
break :blk try Type.Tag.error_union.create(sema.arena, .{
.error_set = error_set_ty,
.payload = bare_return_type,
@@ -5217,6 +5228,10 @@ fn funcCommon(
.lbrace_column = @truncate(u16, src_locs.columns),
.rbrace_column = @truncate(u16, src_locs.columns >> 16),
};
if (maybe_inferred_error_set_node) |node| {
new_func.inferred_error_sets.prepend(node);
}
maybe_inferred_error_set_node = null;
fn_payload.* = .{
.base = .{ .tag = .function },
.data = new_func,
@@ -5368,7 +5383,7 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ptr = sema.resolveInst(inst_data.operand);
const ptr_ty = sema.typeOf(ptr);
if (ptr_ty.zigTypeTag() != .Pointer) {
if (!ptr_ty.isPtrAtRuntime()) {
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty});
}
@@ -7295,6 +7310,7 @@ fn zirOverflowArithmetic(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
zir_tag: Zir.Inst.Extended,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -7302,7 +7318,170 @@ fn zirOverflowArithmetic(
const extra = sema.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data;
const src: LazySrcLoc = .{ .node_offset = extra.node };
return sema.fail(block, src, "TODO implement Sema.zirOverflowArithmetic", .{});
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node };
const lhs = sema.resolveInst(extra.lhs);
const rhs = sema.resolveInst(extra.rhs);
const ptr = sema.resolveInst(extra.ptr);
const lhs_ty = sema.typeOf(lhs);
// Note, the types of lhs/rhs (also for shifting)/ptr are already correct as ensured by astgen.
const dest_ty = lhs_ty;
if (dest_ty.zigTypeTag() != .Int) {
return sema.fail(block, src, "expected integer type, found '{}'", .{dest_ty});
}
const target = sema.mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs);
const result: struct {
overflowed: enum { yes, no, undef },
wrapped: Air.Inst.Ref,
} = result: {
switch (zir_tag) {
.add_with_overflow => {
// If either of the arguments is zero, `false` is returned and the other is stored
// to the result, even if it is undefined..
// Otherwise, if either of the argument is undefined, undefined is returned.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
break :result .{ .overflowed = .no, .wrapped = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef() and rhs_val.compareWithZero(.eq)) {
break :result .{ .overflowed = .no, .wrapped = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) };
}
const result = try lhs_val.intAddWithOverflow(rhs_val, dest_ty, sema.arena, target);
const inst = try sema.addConstant(dest_ty, result.wrapped_result);
break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst };
}
}
},
.sub_with_overflow => {
// If the rhs is zero, then the result is lhs and no overflow occured.
// Otherwise, if either result is undefined, both results are undefined.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) };
} else if (rhs_val.compareWithZero(.eq)) {
break :result .{ .overflowed = .no, .wrapped = lhs };
} else if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) };
}
const result = try lhs_val.intSubWithOverflow(rhs_val, dest_ty, sema.arena, target);
const inst = try sema.addConstant(dest_ty, result.wrapped_result);
break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst };
}
}
},
.mul_with_overflow => {
// If either of the arguments is zero, the result is zero and no overflow occured.
// If either of the arguments is one, the result is the other and no overflow occured.
// Otherwise, if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
if (lhs_val.compareWithZero(.eq)) {
break :result .{ .overflowed = .no, .wrapped = lhs };
} else if (lhs_val.compare(.eq, Value.one, dest_ty)) {
break :result .{ .overflowed = .no, .wrapped = rhs };
}
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef()) {
if (rhs_val.compareWithZero(.eq)) {
break :result .{ .overflowed = .no, .wrapped = rhs };
} else if (rhs_val.compare(.eq, Value.one, dest_ty)) {
break :result .{ .overflowed = .no, .wrapped = lhs };
}
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) };
}
const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, target);
const inst = try sema.addConstant(dest_ty, result.wrapped_result);
break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst };
}
}
},
.shl_with_overflow => {
// If lhs is zero, the result is zero and no overflow occurred.
// If rhs is zero, the result is lhs (even if undefined) and no overflow occurred.
// Oterhwise if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
break :result .{ .overflowed = .no, .wrapped = lhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef() and rhs_val.compareWithZero(.eq)) {
break :result .{ .overflowed = .no, .wrapped = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) };
}
const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, target);
const inst = try sema.addConstant(dest_ty, result.wrapped_result);
break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst };
}
}
},
else => unreachable,
}
const air_tag: Air.Inst.Tag = switch (zir_tag) {
.add_with_overflow => .add_with_overflow,
.mul_with_overflow => .mul_with_overflow,
.sub_with_overflow => .sub_with_overflow,
.shl_with_overflow => .shl_with_overflow,
else => unreachable,
};
try sema.requireRuntimeBlock(block, src);
return block.addInst(.{
.tag = air_tag,
.data = .{ .pl_op = .{
.operand = ptr,
.payload = try sema.addExtra(Air.Bin{
.lhs = lhs,
.rhs = rhs,
}),
} },
});
};
try sema.storePtr2(block, src, ptr, ptr_src, result.wrapped, src, .store);
return switch (result.overflowed) {
.yes => Air.Inst.Ref.bool_true,
.no => Air.Inst.Ref.bool_false,
.undef => try sema.addConstUndef(Type.initTag(.bool)),
};
}
fn analyzeArithmetic(
@@ -8635,8 +8814,12 @@ fn zirRetAddr(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
return sema.fail(block, src, "TODO: implement Sema.zirRetAddr", .{});
try sema.requireRuntimeBlock(block, src);
return try block.addNoOp(.ret_addr);
}
fn zirBuiltinSrc(
@@ -8777,7 +8960,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
.Pointer => {
const info = ty.ptrInfo().data;
const field_values = try sema.arena.alloc(Value, 7);
const field_values = try sema.arena.alloc(Value, 8);
// size: Size,
field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size));
// is_const: bool,
@@ -8786,12 +8969,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
field_values[2] = if (info.@"volatile") Value.initTag(.bool_true) else Value.initTag(.bool_false);
// alignment: comptime_int,
field_values[3] = try Value.Tag.int_u64.create(sema.arena, info.@"align");
// address_space: AddressSpace
field_values[4] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace"));
// child: type,
field_values[4] = try Value.Tag.ty.create(sema.arena, info.pointee_type);
field_values[5] = try Value.Tag.ty.create(sema.arena, info.pointee_type);
// is_allowzero: bool,
field_values[5] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false);
field_values[6] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false);
// sentinel: anytype,
field_values[6] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null";
field_values[7] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null";
return sema.addConstant(
type_info_ty,
@@ -8881,11 +9066,17 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
switch (operand.zigTypeTag()) {
.ComptimeInt => return Air.Inst.Ref.comptime_int_type,
.Int => {
var count: u16 = 0;
var s = operand.bitSize(sema.mod.getTarget()) - 1;
while (s != 0) : (s >>= 1) {
count += 1;
}
const bits = operand.bitSize(sema.mod.getTarget());
const count = if (bits == 0)
0
else blk: {
var count: u16 = 0;
var s = bits - 1;
while (s != 0) : (s >>= 1) {
count += 1;
}
break :blk count;
};
const res = try Module.makeIntType(sema.arena, .unsigned, count);
return sema.addType(res);
},
@@ -11425,14 +11616,8 @@ fn fieldVal(
switch (child_type.zigTypeTag()) {
.ErrorSet => {
const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: {
const error_set = payload.data;
// TODO this is O(N). I'm putting off solving this until we solve inferred
// error sets at the same time.
const names = error_set.names_ptr[0..error_set.names_len];
for (names) |name| {
if (mem.eql(u8, field_name, name)) {
break :blk name;
}
if (payload.data.names.getEntry(field_name)) |entry| {
break :blk entry.key_ptr.*;
}
return sema.fail(block, src, "no error named '{s}' in '{}'", .{
field_name, child_type,
@@ -11630,14 +11815,8 @@ fn fieldPtr(
.ErrorSet => {
// TODO resolve inferred error sets
const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: {
const error_set = payload.data;
// TODO this is O(N). I'm putting off solving this until we solve inferred
// error sets at the same time.
const names = error_set.names_ptr[0..error_set.names_len];
for (names) |name| {
if (mem.eql(u8, field_name, name)) {
break :blk name;
}
if (payload.data.names.getEntry(field_name)) |entry| {
break :blk entry.key_ptr.*;
}
return sema.fail(block, src, "no error named '{s}' in '{}'", .{
field_name, child_type,
@@ -12207,7 +12386,7 @@ fn coerce(
const arena = sema.arena;
const target = sema.mod.getTarget();
const in_memory_result = coerceInMemoryAllowed(dest_ty, inst_ty, false, target);
const in_memory_result = try sema.coerceInMemoryAllowed(dest_ty, inst_ty, false, target);
if (in_memory_result == .ok) {
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
// Keep the comptime Value representation; take the new type.
@@ -12266,7 +12445,7 @@ fn coerce(
if (inst_ty.isConstPtr() and dest_is_mut) break :single_item;
if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :single_item;
if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :single_item;
switch (coerceInMemoryAllowed(array_elem_ty, ptr_elem_ty, dest_is_mut, target)) {
switch (try sema.coerceInMemoryAllowed(array_elem_ty, ptr_elem_ty, dest_is_mut, target)) {
.ok => {},
.no_match => break :single_item,
}
@@ -12285,7 +12464,7 @@ fn coerce(
if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :src_array_ptr;
const dst_elem_type = dest_info.pointee_type;
switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, target)) {
switch (try sema.coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, target)) {
.ok => {},
.no_match => break :src_array_ptr,
}
@@ -12324,7 +12503,7 @@ fn coerce(
const src_elem_ty = inst_ty.childType();
const dest_is_mut = dest_info.mutable;
const dst_elem_type = dest_info.pointee_type;
switch (coerceInMemoryAllowed(dst_elem_type, src_elem_ty, dest_is_mut, target)) {
switch (try sema.coerceInMemoryAllowed(dst_elem_type, src_elem_ty, dest_is_mut, target)) {
.ok => {},
.no_match => break :src_c_ptr,
}
@@ -12467,7 +12646,13 @@ const InMemoryCoercionResult = enum {
/// * sentinel-terminated pointers can coerce into `[*]`
/// TODO improve this function to report recursive compile errors like it does in stage1.
/// look at the function types_match_const_cast_only
fn coerceInMemoryAllowed(dest_ty: Type, src_ty: Type, dest_is_mut: bool, target: std.Target) InMemoryCoercionResult {
fn coerceInMemoryAllowed(
sema: *Sema,
dest_ty: Type,
src_ty: Type,
dest_is_mut: bool,
target: std.Target,
) CompileError!InMemoryCoercionResult {
if (dest_ty.eql(src_ty))
return .ok;
@@ -12476,32 +12661,35 @@ fn coerceInMemoryAllowed(dest_ty: Type, src_ty: Type, dest_is_mut: bool, target:
var src_buf: Type.Payload.ElemType = undefined;
if (dest_ty.ptrOrOptionalPtrTy(&dest_buf)) |dest_ptr_ty| {
if (src_ty.ptrOrOptionalPtrTy(&src_buf)) |src_ptr_ty| {
return coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target);
return try sema.coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target);
}
}
// Slices
if (dest_ty.isSlice() and src_ty.isSlice()) {
return coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target);
return try sema.coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target);
}
const dest_tag = dest_ty.zigTypeTag();
const src_tag = src_ty.zigTypeTag();
// Functions
if (dest_ty.zigTypeTag() == .Fn and src_ty.zigTypeTag() == .Fn) {
return coerceInMemoryAllowedFns(dest_ty, src_ty, target);
if (dest_tag == .Fn and src_tag == .Fn) {
return try sema.coerceInMemoryAllowedFns(dest_ty, src_ty, target);
}
// Error Unions
if (dest_ty.zigTypeTag() == .ErrorUnion and src_ty.zigTypeTag() == .ErrorUnion) {
const child = coerceInMemoryAllowed(dest_ty.errorUnionPayload(), src_ty.errorUnionPayload(), dest_is_mut, target);
if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) {
const child = try sema.coerceInMemoryAllowed(dest_ty.errorUnionPayload(), src_ty.errorUnionPayload(), dest_is_mut, target);
if (child == .no_match) {
return child;
}
return coerceInMemoryAllowed(dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target);
return try sema.coerceInMemoryAllowed(dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target);
}
// Error Sets
if (dest_ty.zigTypeTag() == .ErrorSet and src_ty.zigTypeTag() == .ErrorSet) {
return coerceInMemoryAllowedErrorSets(dest_ty, src_ty);
if (dest_tag == .ErrorSet and src_tag == .ErrorSet) {
return try sema.coerceInMemoryAllowedErrorSets(dest_ty, src_ty);
}
// TODO: arrays
@@ -12512,14 +12700,16 @@ fn coerceInMemoryAllowed(dest_ty: Type, src_ty: Type, dest_is_mut: bool, target:
}
fn coerceInMemoryAllowedErrorSets(
sema: *Sema,
dest_ty: Type,
src_ty: Type,
) InMemoryCoercionResult {
// Coercion to `anyerror`. Note that this check can return false positives
) !InMemoryCoercionResult {
// Coercion to `anyerror`. Note that this check can return false negatives
// in case the error sets did not get resolved.
if (dest_ty.isAnyError()) {
return .ok;
}
// If both are inferred error sets of functions, and
// the dest includes the source function, the coercion is OK.
// This check is important because it works without forcing a full resolution
@@ -12529,21 +12719,85 @@ fn coerceInMemoryAllowedErrorSets(
const src_func = src_payload.data.func;
const dst_func = dst_payload.data.func;
if (src_func == dst_func or dst_payload.data.functions.contains(src_func)) {
if (src_func == dst_func or dst_payload.data.inferred_error_sets.contains(src_payload.data)) {
return .ok;
}
return .no_match;
}
}
// TODO full error set resolution and compare sets by names.
if (dest_ty.castTag(.error_set_inferred)) |payload| {
try sema.resolveInferredErrorSet(payload.data);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (dest_ty.isAnyError()) {
return .ok;
}
}
switch (src_ty.tag()) {
.error_set_inferred => {
const src_data = src_ty.castTag(.error_set_inferred).?.data;
try sema.resolveInferredErrorSet(src_data);
// src anyerror status might have changed after the resolution.
if (src_ty.isAnyError()) {
// dest_ty.isAnyError() == true is already checked for at this point.
return .no_match;
}
var it = src_data.errors.keyIterator();
while (it.next()) |name_ptr| {
if (!dest_ty.errorSetHasField(name_ptr.*)) {
return .no_match;
}
}
return .ok;
},
.error_set_single => {
const name = src_ty.castTag(.error_set_single).?.data;
if (dest_ty.errorSetHasField(name)) {
return .ok;
}
},
.error_set_merged => {
const names = src_ty.castTag(.error_set_merged).?.data.keys();
for (names) |name| {
if (!dest_ty.errorSetHasField(name)) {
return .no_match;
}
}
return .ok;
},
.error_set => {
const names = src_ty.castTag(.error_set).?.data.names.keys();
for (names) |name| {
if (!dest_ty.errorSetHasField(name)) {
return .no_match;
}
}
return .ok;
},
.anyerror => switch (dest_ty.tag()) {
.error_set_inferred => return .no_match, // Caught by dest.isAnyError() above.
.error_set_single, .error_set_merged, .error_set => {},
.anyerror => unreachable, // Filtered out above.
else => unreachable,
},
else => unreachable,
}
return .no_match;
}
fn coerceInMemoryAllowedFns(
sema: *Sema,
dest_ty: Type,
src_ty: Type,
target: std.Target,
) InMemoryCoercionResult {
) !InMemoryCoercionResult {
const dest_info = dest_ty.fnInfo();
const src_info = src_ty.fnInfo();
@@ -12556,7 +12810,7 @@ fn coerceInMemoryAllowedFns(
}
if (!src_info.return_type.isNoReturn()) {
const rt = coerceInMemoryAllowed(dest_info.return_type, src_info.return_type, false, target);
const rt = try sema.coerceInMemoryAllowed(dest_info.return_type, src_info.return_type, false, target);
if (rt == .no_match) {
return rt;
}
@@ -12576,7 +12830,7 @@ fn coerceInMemoryAllowedFns(
// TODO: nolias
// Note: Cast direction is reversed here.
const param = coerceInMemoryAllowed(src_param_ty, dest_param_ty, false, target);
const param = try sema.coerceInMemoryAllowed(src_param_ty, dest_param_ty, false, target);
if (param == .no_match) {
return param;
}
@@ -12590,17 +12844,18 @@ fn coerceInMemoryAllowedFns(
}
fn coerceInMemoryAllowedPtrs(
sema: *Sema,
dest_ty: Type,
src_ty: Type,
dest_ptr_ty: Type,
src_ptr_ty: Type,
dest_is_mut: bool,
target: std.Target,
) InMemoryCoercionResult {
) !InMemoryCoercionResult {
const dest_info = dest_ptr_ty.ptrInfo().data;
const src_info = src_ptr_ty.ptrInfo().data;
const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target);
const child = try sema.coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target);
if (child == .no_match) {
return child;
}
@@ -13321,7 +13576,7 @@ fn coerceVectorInMemory(
const target = sema.mod.getTarget();
const dest_elem_ty = dest_ty.childType();
const inst_elem_ty = inst_ty.childType();
const in_memory_result = coerceInMemoryAllowed(dest_elem_ty, inst_elem_ty, false, target);
const in_memory_result = try sema.coerceInMemoryAllowed(dest_elem_ty, inst_elem_ty, false, target);
if (in_memory_result != .ok) {
// TODO recursive error notes for coerceInMemoryAllowed failure
return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty });
@@ -13916,25 +14171,28 @@ fn wrapErrorUnion(
if (mem.eql(u8, expected_name, n)) break :ok;
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
.error_set => ok: {
.error_set => {
const expected_name = val.castTag(.@"error").?.data.name;
const error_set = dest_err_set_ty.castTag(.error_set).?.data;
const names = error_set.names_ptr[0..error_set.names_len];
// TODO this is O(N). I'm putting off solving this until we solve inferred
// error sets at the same time.
for (names) |name| {
if (mem.eql(u8, expected_name, name)) break :ok;
if (!error_set.names.contains(expected_name)) {
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
}
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
.error_set_inferred => ok: {
const err_set_payload = dest_err_set_ty.castTag(.error_set_inferred).?.data;
if (err_set_payload.is_anyerror) break :ok;
const expected_name = val.castTag(.@"error").?.data.name;
if (err_set_payload.map.contains(expected_name)) break :ok;
// TODO error set resolution here before emitting a compile error
const data = dest_err_set_ty.castTag(.error_set_inferred).?.data;
try sema.resolveInferredErrorSet(data);
if (data.is_anyerror) break :ok;
if (data.errors.contains(expected_name)) break :ok;
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
.error_set_merged => {
const expected_name = val.castTag(.@"error").?.data.name;
const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data;
if (!error_set.contains(expected_name)) {
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
}
},
else => unreachable,
}
return sema.addConstant(dest_ty, val);
@@ -14077,12 +14335,12 @@ fn resolvePeerTypes(
.Optional => {
var opt_child_buf: Type.Payload.ElemType = undefined;
const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf);
if (coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target) == .ok) {
if ((try sema.coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target)) == .ok) {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
if (coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target) == .ok) {
if ((try sema.coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target)) == .ok) {
any_are_null = true;
continue;
}
@@ -14105,10 +14363,10 @@ fn resolvePeerTypes(
.Optional => {
var opt_child_buf: Type.Payload.ElemType = undefined;
const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf);
if (coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target) == .ok) {
if ((try sema.coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target)) == .ok) {
continue;
}
if (coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target) == .ok) {
if ((try sema.coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target)) == .ok) {
any_are_null = true;
chosen = candidate;
chosen_i = candidate_i + 1;
@@ -14274,6 +14532,42 @@ fn resolveBuiltinTypeFields(
return sema.resolveTypeFields(block, src, resolved_ty);
}
fn resolveInferredErrorSet(sema: *Sema, inferred_error_set: *Module.Fn.InferredErrorSet) CompileError!void {
// Ensuring that a particular decl is analyzed does not neccesarily mean that
// it's error set is inferred, so traverse all of them to get the complete
// picture.
// Note: We want to skip re-resolving the current function, as recursion
// doesn't change the error set. We can just check for state == .in_progress for this.
// TODO: Is that correct?
if (inferred_error_set.is_resolved) {
return;
}
var it = inferred_error_set.inferred_error_sets.keyIterator();
while (it.next()) |other_error_set_ptr| {
const func = other_error_set_ptr.*.func;
const decl = func.*.owner_decl;
if (func.*.state == .in_progress) {
// Recursion, doesn't alter current error set, keep going.
continue;
}
try sema.ensureDeclAnalyzed(decl); // To ensure that all dependencies are properly added to the set.
try sema.resolveInferredErrorSet(other_error_set_ptr.*);
var error_it = other_error_set_ptr.*.errors.keyIterator();
while (error_it.next()) |entry| {
try inferred_error_set.errors.put(sema.gpa, entry.*, {});
}
if (other_error_set_ptr.*.is_anyerror)
inferred_error_set.is_anyerror = true;
}
inferred_error_set.is_resolved = true;
}
fn semaStructFields(
mod: *Module,
struct_obj: *Module.Struct,
@@ -15236,8 +15530,8 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr
// We have a Value that lines up in virtual memory exactly with what we want to load.
// If the Type is in-memory coercable to `load_ty`, it may be returned without modifications.
const coerce_in_mem_ok =
coerceInMemoryAllowed(load_ty, parent.ty, false, target) == .ok or
coerceInMemoryAllowed(parent.ty, load_ty, false, target) == .ok;
(try sema.coerceInMemoryAllowed(load_ty, parent.ty, false, target)) == .ok or
(try sema.coerceInMemoryAllowed(parent.ty, load_ty, false, target)) == .ok;
if (coerce_in_mem_ok) {
if (parent.is_mutable) {
// The decl whose value we are obtaining here may be overwritten with

View File

@@ -521,6 +521,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
.mul_with_overflow => try self.airMulWithOverflow(inst),
.shl_with_overflow => try self.airShlWithOverflow(inst),
.div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
.cmp_lt => try self.airCmp(inst, .lt),
@@ -545,6 +550,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(),
.fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
@@ -968,6 +974,26 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch});
}
fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch});
}
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch});
}
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch});
}
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch});
@@ -1409,6 +1435,10 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airRetAddr(self: *Self) !void {
return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch});
}
fn airFence(self: *Self) !void {
return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch});
//return self.finishAirBookkeeping();

View File

@@ -519,6 +519,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
.mul_with_overflow => try self.airMulWithOverflow(inst),
.shl_with_overflow => try self.airShlWithOverflow(inst),
.div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
.cmp_lt => try self.airCmp(inst, .lt),
@@ -543,6 +548,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(),
.fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
@@ -998,6 +1004,26 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch});
}
fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch});
}
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch});
}
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch});
}
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch});
@@ -1843,6 +1869,10 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airRetAddr(self: *Self) !void {
return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch});
}
fn airFence(self: *Self) !void {
return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch});
//return self.finishAirBookkeeping();

View File

@@ -500,6 +500,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
.mul_with_overflow => try self.airMulWithOverflow(inst),
.shl_with_overflow => try self.airShlWithOverflow(inst),
.div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
.cmp_lt => try self.airCmp(inst, .lt),
@@ -524,6 +529,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(),
.fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
@@ -913,6 +919,26 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch});
}
fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch});
}
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch});
}
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch});
}
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch});
@@ -1347,6 +1373,10 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airRetAddr(self: *Self) !void {
return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch});
}
fn airFence(self: *Self) !void {
return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch});
//return self.finishAirBookkeeping();

View File

@@ -553,6 +553,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
.add_with_overflow => try self.airAddWithOverflow(inst),
.sub_with_overflow => try self.airSubWithOverflow(inst),
.mul_with_overflow => try self.airMulWithOverflow(inst),
.shl_with_overflow => try self.airShlWithOverflow(inst),
.div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
.cmp_lt => try self.airCmp(inst, .lt),
@@ -577,6 +582,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(),
.fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
@@ -1027,6 +1033,26 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch});
}
fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch});
}
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch});
}
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch});
}
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst))
@@ -1832,6 +1858,10 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airRetAddr(self: *Self) !void {
return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch});
}
fn airFence(self: *Self) !void {
return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch});
//return self.finishAirBookkeeping();

View File

@@ -1125,6 +1125,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.arg => airArg(f),
.breakpoint => try airBreakpoint(f),
.ret_addr => try airRetAddr(f),
.unreach => try airUnreach(f),
.fence => try airFence(f, inst),
@@ -1155,6 +1156,11 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.mul_sat => try airSatOp(f, inst, "muls_"),
.shl_sat => try airSatOp(f, inst, "shls_"),
.add_with_overflow => try airAddWithOverflow(f, inst),
.sub_with_overflow => try airSubWithOverflow(f, inst),
.mul_with_overflow => try airMulWithOverflow(f, inst),
.shl_with_overflow => try airShlWithOverflow(f, inst),
.min => try airMinMax(f, inst, "<"),
.max => try airMinMax(f, inst, ">"),
@@ -1864,6 +1870,30 @@ fn airSatOp(f: *Function, inst: Air.Inst.Index, fn_op: [*:0]const u8) !CValue {
return ret;
}
fn airAddWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue {
_ = f;
_ = inst;
return f.fail("TODO add with overflow", .{});
}
fn airSubWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue {
_ = f;
_ = inst;
return f.fail("TODO sub with overflow", .{});
}
fn airMulWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue {
_ = f;
_ = inst;
return f.fail("TODO mul with overflow", .{});
}
fn airShlWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue {
_ = f;
_ = inst;
return f.fail("TODO shl with overflow", .{});
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
if (f.liveness.isUnused(inst))
return CValue.none;
@@ -2183,6 +2213,10 @@ fn airBreakpoint(f: *Function) !CValue {
return CValue.none;
}
fn airRetAddr(f: *Function) !CValue {
return f.fail("TODO implement codegen for airRetAddr", .{});
}
fn airFence(f: *Function, inst: Air.Inst.Index) !CValue {
const atomic_order = f.air.instructions.items(.data)[inst].fence;
const writer = f.object.writer();

View File

@@ -761,21 +761,25 @@ pub const DeclGen = struct {
dg.context.intType(8);
return llvm_elem_ty.pointerType(llvm_addrspace);
},
.Opaque => {
const gop = try dg.object.type_map.getOrPut(gpa, t);
if (gop.found_existing) return gop.value_ptr.*;
.Opaque => switch (t.tag()) {
.@"opaque" => {
const gop = try dg.object.type_map.getOrPut(gpa, t);
if (gop.found_existing) return gop.value_ptr.*;
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
const opaque_obj = t.castTag(.@"opaque").?.data;
const name = try opaque_obj.getFullyQualifiedName(gpa);
defer gpa.free(name);
const opaque_obj = t.castTag(.@"opaque").?.data;
const name = try opaque_obj.getFullyQualifiedName(gpa);
defer gpa.free(name);
const llvm_struct_ty = dg.context.structCreateNamed(name);
gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
return llvm_struct_ty;
const llvm_struct_ty = dg.context.structCreateNamed(name);
gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
return llvm_struct_ty;
},
.anyopaque => return dg.context.intType(8),
else => unreachable,
},
.Array => {
const elem_type = try dg.llvmType(t.childType());
@@ -1714,6 +1718,11 @@ pub const FuncGen = struct {
.max => try self.airMax(inst),
.slice => try self.airSlice(inst),
.add_with_overflow => try self.airOverflow(inst, "llvm.sadd.with.overflow", "llvm.uadd.with.overflow"),
.sub_with_overflow => try self.airOverflow(inst, "llvm.ssub.with.overflow", "llvm.usub.with.overflow"),
.mul_with_overflow => try self.airOverflow(inst, "llvm.smul.with.overflow", "llvm.umul.with.overflow"),
.shl_with_overflow => try self.airShlWithOverflow(inst),
.bit_and, .bool_and => try self.airAnd(inst),
.bit_or, .bool_or => try self.airOr(inst),
.xor => try self.airXor(inst),
@@ -1745,6 +1754,7 @@ pub const FuncGen = struct {
.br => try self.airBr(inst),
.switch_br => try self.airSwitchBr(inst),
.breakpoint => try self.airBreakpoint(inst),
.ret_addr => try self.airRetAddr(inst),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.intcast => try self.airIntCast(inst),
@@ -3133,6 +3143,75 @@ pub const FuncGen = struct {
}
}
fn airOverflow(
self: *FuncGen,
inst: Air.Inst.Index,
signed_intrinsic: []const u8,
unsigned_intrinsic: []const u8,
) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const ptr = try self.resolveInst(pl_op.operand);
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
const ptr_ty = self.air.typeOf(pl_op.operand);
const lhs_ty = self.air.typeOf(extra.lhs);
const intrinsic_name = if (lhs_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic;
const llvm_lhs_ty = try self.dg.llvmType(lhs_ty);
const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty});
const result_struct = self.builder.buildCall(llvm_fn, &[_]*const llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, "");
const result = self.builder.buildExtractValue(result_struct, 0, "");
const overflow_bit = self.builder.buildExtractValue(result_struct, 1, "");
self.store(ptr, ptr_ty, result, .NotAtomic);
return overflow_bit;
}
fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const ptr = try self.resolveInst(pl_op.operand);
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
const ptr_ty = self.air.typeOf(pl_op.operand);
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
const tg = self.dg.module.getTarget();
const casted_rhs = if (rhs_ty.bitSize(tg) < lhs_ty.bitSize(tg))
self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "")
else
rhs;
const result = self.builder.buildShl(lhs, casted_rhs, "");
const reconstructed = if (lhs_ty.isSignedInt())
self.builder.buildAShr(result, casted_rhs, "")
else
self.builder.buildLShr(result, casted_rhs, "");
const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, "");
self.store(ptr, ptr_ty, result, .NotAtomic);
return overflow_bit;
}
fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -3511,11 +3590,20 @@ pub const FuncGen = struct {
fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
_ = inst;
const llvm_fn = self.getIntrinsic("llvm.debugtrap");
const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{});
_ = self.builder.buildCall(llvm_fn, undefined, 0, .C, .Auto, "");
return null;
}
fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
_ = inst;
const i32_zero = self.context.intType(32).constNull();
const usize_llvm_ty = try self.dg.llvmType(Type.usize);
const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{});
const ptr_val = self.builder.buildCall(llvm_fn, &[_]*const llvm.Value{i32_zero}, 1, .Fast, .Auto, "");
return self.builder.buildPtrToInt(ptr_val, usize_llvm_ty, "");
}
fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const atomic_order = self.air.instructions.items(.data)[inst].fence;
const llvm_memory_order = toLlvmAtomicOrdering(atomic_order);
@@ -3946,13 +4034,10 @@ pub const FuncGen = struct {
return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
}
fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
fn getIntrinsic(self: *FuncGen, name: []const u8, types: []*const llvm.Type) *const llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
// TODO: add support for overload intrinsics by passing the prefix of the intrinsic
// to `lookupIntrinsicID` and then passing the correct types to
// `getIntrinsicDeclaration`
return self.llvmModule().getIntrinsicDeclaration(id, null, 0);
return self.llvmModule().getIntrinsicDeclaration(id, types.ptr, types.len);
}
fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) ?*const llvm.Value {

View File

@@ -159,6 +159,7 @@ const Writer = struct {
.breakpoint,
.unreach,
.ret_addr,
=> try w.writeNoOp(s, inst),
.const_ty,
@@ -228,6 +229,12 @@ const Writer = struct {
.atomic_rmw => try w.writeAtomicRmw(s, inst),
.memcpy => try w.writeMemcpy(s, inst),
.memset => try w.writeMemset(s, inst),
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
=> try w.writeOverflow(s, inst),
}
}
@@ -348,6 +355,17 @@ const Writer = struct {
try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) });
}
fn writeOverflow(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
try w.writeOperand(s, inst, 0, pl_op.operand);
try s.writeAll(", ");
try w.writeOperand(s, inst, 1, extra.lhs);
try s.writeAll(", ");
try w.writeOperand(s, inst, 2, extra.rhs);
}
fn writeMemset(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;

View File

@@ -627,7 +627,7 @@ pub const Type = extern union {
}
if (a.tag() == .error_set_inferred and b.tag() == .error_set_inferred) {
return a.castTag(.error_set_inferred).?.data.func == b.castTag(.error_set_inferred).?.data.func;
return a.castTag(.error_set_inferred).?.data == b.castTag(.error_set_inferred).?.data;
}
if (a.tag() == .error_set_single and b.tag() == .error_set_single) {
@@ -904,10 +904,11 @@ pub const Type = extern union {
});
},
.error_set_merged => {
const names = self.castTag(.error_set_merged).?.data;
const duped_names = try allocator.alloc([]const u8, names.len);
for (duped_names) |*name, i| {
name.* = try allocator.dupe(u8, names[i]);
const names = self.castTag(.error_set_merged).?.data.keys();
var duped_names = Module.ErrorSet.NameMap{};
try duped_names.ensureTotalCapacity(allocator, names.len);
for (names) |name| {
duped_names.putAssumeCapacityNoClobber(name, .{});
}
return Tag.error_set_merged.create(allocator, duped_names);
},
@@ -1206,7 +1207,7 @@ pub const Type = extern union {
return writer.print("(inferred error set of {s})", .{func.owner_decl.name});
},
.error_set_merged => {
const names = ty.castTag(.error_set_merged).?.data;
const names = ty.castTag(.error_set_merged).?.data.keys();
try writer.writeAll("error{");
for (names) |name, i| {
if (i != 0) try writer.writeByte(',');
@@ -1574,6 +1575,7 @@ pub const Type = extern union {
.extern_options,
.@"anyframe",
.anyframe_T,
.anyopaque,
.@"opaque",
.single_const_pointer,
.single_mut_pointer,
@@ -1653,7 +1655,6 @@ pub const Type = extern union {
return payload.error_set.hasCodeGenBits() or payload.payload.hasCodeGenBits();
},
.anyopaque,
.void,
.type,
.comptime_int,
@@ -2873,6 +2874,35 @@ pub const Type = extern union {
};
}
/// Returns whether ty, which must be an error set, includes an error `name`.
/// Might return a false negative if `ty` is an inferred error set and not fully
/// resolved yet.
pub fn errorSetHasField(ty: Type, name: []const u8) bool {
if (ty.isAnyError()) {
return true;
}
switch (ty.tag()) {
.error_set_single => {
const data = ty.castTag(.error_set_single).?.data;
return std.mem.eql(u8, data, name);
},
.error_set_inferred => {
const data = ty.castTag(.error_set_inferred).?.data;
return data.errors.contains(name);
},
.error_set_merged => {
const data = ty.castTag(.error_set_merged).?.data;
return data.contains(name);
},
.error_set => {
const data = ty.castTag(.error_set).?.data;
return data.names.contains(name);
},
else => unreachable,
}
}
/// Asserts the type is an array or vector.
pub fn arrayLen(ty: Type) u64 {
return switch (ty.tag()) {
@@ -4148,57 +4178,14 @@ pub const Type = extern union {
pub const base_tag = Tag.error_set_merged;
base: Payload = Payload{ .tag = base_tag },
data: []const []const u8,
data: Module.ErrorSet.NameMap,
};
pub const ErrorSetInferred = struct {
pub const base_tag = Tag.error_set_inferred;
base: Payload = Payload{ .tag = base_tag },
data: Data,
pub const Data = struct {
func: *Module.Fn,
/// Direct additions to the inferred error set via `return error.Foo;`.
map: std.StringHashMapUnmanaged(void),
/// Other functions with inferred error sets which this error set includes.
functions: std.AutoHashMapUnmanaged(*Module.Fn, void),
is_anyerror: bool,
pub fn addErrorSet(self: *Data, gpa: Allocator, err_set_ty: Type) !void {
switch (err_set_ty.tag()) {
.error_set => {
const names = err_set_ty.castTag(.error_set).?.data.names();
for (names) |name| {
try self.map.put(gpa, name, {});
}
},
.error_set_single => {
const name = err_set_ty.castTag(.error_set_single).?.data;
try self.map.put(gpa, name, {});
},
.error_set_inferred => {
const func = err_set_ty.castTag(.error_set_inferred).?.data.func;
try self.functions.put(gpa, func, {});
var it = func.owner_decl.ty.fnReturnType().errorUnionSet()
.castTag(.error_set_inferred).?.data.map.iterator();
while (it.next()) |entry| {
try self.map.put(gpa, entry.key_ptr.*, {});
}
},
.error_set_merged => {
const names = err_set_ty.castTag(.error_set_merged).?.data;
for (names) |name| {
try self.map.put(gpa, name, {});
}
},
.anyerror => {
self.is_anyerror = true;
},
else => unreachable,
}
}
};
data: *Module.Fn.InferredErrorSet,
};
pub const Pointer = struct {

View File

@@ -1969,6 +1969,37 @@ pub const Value = extern union {
return @divFloor(@floatToInt(std.math.big.Limb, std.math.log2(w_value)), @typeInfo(std.math.big.Limb).Int.bits) + 1;
}
pub const OverflowArithmeticResult = struct {
overflowed: bool,
wrapped_result: Value,
};
pub fn intAddWithOverflow(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !OverflowArithmeticResult {
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const result = try fromBigInt(arena, result_bigint.toConst());
return OverflowArithmeticResult{
.overflowed = overflowed,
.wrapped_result = result,
};
}
/// Supports both floats and ints; handles undefined.
pub fn numberAddWrap(
lhs: Value,
@@ -1983,19 +2014,8 @@ pub const Value = extern union {
return floatAdd(lhs, rhs, ty, arena);
}
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
return fromBigInt(arena, result_bigint.toConst());
const overflow_result = try intAddWithOverflow(lhs, rhs, ty, arena, target);
return overflow_result.wrapped_result;
}
fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value {
@@ -2040,6 +2060,32 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
pub fn intSubWithOverflow(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !OverflowArithmeticResult {
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const wrapped_result = try fromBigInt(arena, result_bigint.toConst());
return OverflowArithmeticResult{
.overflowed = overflowed,
.wrapped_result = wrapped_result,
};
}
/// Supports both floats and ints; handles undefined.
pub fn numberSubWrap(
lhs: Value,
@@ -2054,19 +2100,8 @@ pub const Value = extern union {
return floatSub(lhs, rhs, ty, arena);
}
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
return fromBigInt(arena, result_bigint.toConst());
const overflow_result = try intSubWithOverflow(lhs, rhs, ty, arena, target);
return overflow_result.wrapped_result;
}
/// Supports integers only; asserts neither operand is undefined.
@@ -2095,6 +2130,41 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
pub fn intMulWithOverflow(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !OverflowArithmeticResult {
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var limbs_buffer = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1),
);
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena);
const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits);
if (overflowed) {
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
}
return OverflowArithmeticResult{
.overflowed = overflowed,
.wrapped_result = try fromBigInt(arena, result_bigint.toConst()),
};
}
/// Supports both floats and ints; handles undefined.
pub fn numberMulWrap(
lhs: Value,
@@ -2109,24 +2179,8 @@ pub const Value = extern union {
return floatMul(lhs, rhs, ty, arena);
}
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
var limbs_buffer = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcMulWrapLimbsBufferLen(info.bits, lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1),
);
defer arena.free(limbs_buffer);
result_bigint.mulWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits, limbs_buffer, arena);
return fromBigInt(arena, result_bigint.toConst());
const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, target);
return overflow_result.wrapped_result;
}
/// Supports integers only; asserts neither operand is undefined.
@@ -2159,7 +2213,6 @@ pub const Value = extern union {
std.math.big.Limb,
std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1),
);
defer arena.free(limbs_buffer);
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena);
result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits);
return fromBigInt(arena, result_bigint.toConst());
@@ -2495,6 +2548,37 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
pub fn shlWithOverflow(
lhs: Value,
rhs: Value,
ty: Type,
allocator: Allocator,
target: Target,
) !OverflowArithmeticResult {
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const shift = @intCast(usize, rhs.toUnsignedInt());
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
);
var result_bigint = BigIntMutable{
.limbs = limbs,
.positive = undefined,
.len = undefined,
};
result_bigint.shiftLeft(lhs_bigint, shift);
const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits);
if (overflowed) {
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
}
return OverflowArithmeticResult{
.overflowed = overflowed,
.wrapped_result = try fromBigInt(allocator, result_bigint.toConst()),
};
}
pub fn shlSat(
lhs: Value,
rhs: Value,

View File

@@ -451,3 +451,19 @@ test "comptime bitwise operators" {
try expect(~@as(u128, 0) == 0xffffffffffffffffffffffffffffffff);
}
}
test "comptime shlWithOverflow" {
const ct_shifted: u64 = comptime amt: {
var amt = @as(u64, 0);
_ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt);
break :amt amt;
};
const rt_shifted: u64 = amt: {
var amt = @as(u64, 0);
_ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt);
break :amt amt;
};
try expect(ct_shifted == rt_shifted);
}

View File

@@ -162,22 +162,6 @@ test "const ptr to comptime mutable data is not memoized" {
}
}
test "comptime shlWithOverflow" {
const ct_shifted: u64 = comptime amt: {
var amt = @as(u64, 0);
_ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt);
break :amt amt;
};
const rt_shifted: u64 = amt: {
var amt = @as(u64, 0);
_ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt);
break :amt amt;
};
try expect(ct_shifted == rt_shifted);
}
test "runtime 128 bit integer division" {
var a: u128 = 152313999999999991610955792383;
var b: u128 = 10000000000000000000;

View File

@@ -444,3 +444,98 @@ test "128-bit multiplication" {
var c = a * b;
try expect(c == 6);
}
test "@addWithOverflow" {
var result: u8 = undefined;
try expect(@addWithOverflow(u8, 250, 100, &result));
try expect(result == 94);
try expect(!@addWithOverflow(u8, 100, 150, &result));
try expect(result == 250);
var a: u8 = 200;
var b: u8 = 99;
try expect(@addWithOverflow(u8, a, b, &result));
try expect(result == 43);
b = 55;
try expect(!@addWithOverflow(u8, a, b, &result));
try expect(result == 255);
}
test "small int addition" {
var x: u2 = 0;
try expect(x == 0);
x += 1;
try expect(x == 1);
x += 1;
try expect(x == 2);
x += 1;
try expect(x == 3);
var result: @TypeOf(x) = 3;
try expect(@addWithOverflow(@TypeOf(x), x, 1, &result));
try expect(result == 0);
}
test "@mulWithOverflow" {
var result: u8 = undefined;
try expect(@mulWithOverflow(u8, 86, 3, &result));
try expect(result == 2);
try expect(!@mulWithOverflow(u8, 85, 3, &result));
try expect(result == 255);
var a: u8 = 123;
var b: u8 = 2;
try expect(!@mulWithOverflow(u8, a, b, &result));
try expect(result == 246);
b = 4;
try expect(@mulWithOverflow(u8, a, b, &result));
try expect(result == 236);
}
test "@subWithOverflow" {
var result: u8 = undefined;
try expect(@subWithOverflow(u8, 1, 2, &result));
try expect(result == 255);
try expect(!@subWithOverflow(u8, 1, 1, &result));
try expect(result == 0);
var a: u8 = 1;
var b: u8 = 2;
try expect(@subWithOverflow(u8, a, b, &result));
try expect(result == 255);
b = 1;
try expect(!@subWithOverflow(u8, a, b, &result));
try expect(result == 0);
}
test "@shlWithOverflow" {
var result: u16 = undefined;
try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result));
try expect(result == 0b0111111111111000);
try expect(!@shlWithOverflow(u16, 0b0010111111111111, 2, &result));
try expect(result == 0b1011111111111100);
var a: u16 = 0b0000_0000_0000_0011;
var b: u4 = 15;
try expect(@shlWithOverflow(u16, a, b, &result));
try expect(result == 0b1000_0000_0000_0000);
b = 14;
try expect(!@shlWithOverflow(u16, a, b, &result));
try expect(result == 0b1100_0000_0000_0000);
}
test "overflow arithmetic with u0 values" {
var result: u0 = undefined;
try expect(!@addWithOverflow(u0, 0, 0, &result));
try expect(result == 0);
try expect(!@subWithOverflow(u0, 0, 0, &result));
try expect(result == 0);
try expect(!@mulWithOverflow(u0, 0, 0, &result));
try expect(result == 0);
try expect(!@shlWithOverflow(u0, 0, 0, &result));
try expect(result == 0);
}

View File

@@ -6,50 +6,6 @@ const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const mem = std.mem;
test "@addWithOverflow" {
var result: u8 = undefined;
try expect(@addWithOverflow(u8, 250, 100, &result));
try expect(result == 94);
try expect(!@addWithOverflow(u8, 100, 150, &result));
try expect(result == 250);
}
test "@mulWithOverflow" {
var result: u8 = undefined;
try expect(@mulWithOverflow(u8, 86, 3, &result));
try expect(result == 2);
try expect(!@mulWithOverflow(u8, 85, 3, &result));
try expect(result == 255);
}
test "@subWithOverflow" {
var result: u8 = undefined;
try expect(@subWithOverflow(u8, 1, 2, &result));
try expect(result == 255);
try expect(!@subWithOverflow(u8, 1, 1, &result));
try expect(result == 0);
}
test "@shlWithOverflow" {
var result: u16 = undefined;
try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result));
try expect(result == 0b0111111111111000);
try expect(!@shlWithOverflow(u16, 0b0010111111111111, 2, &result));
try expect(result == 0b1011111111111100);
}
test "overflow arithmetic with u0 values" {
var result: u0 = undefined;
try expect(!@addWithOverflow(u0, 0, 0, &result));
try expect(result == 0);
try expect(!@subWithOverflow(u0, 0, 0, &result));
try expect(result == 0);
try expect(!@mulWithOverflow(u0, 0, 0, &result));
try expect(result == 0);
try expect(!@shlWithOverflow(u0, 0, 0, &result));
try expect(result == 0);
}
test "@clz vectors" {
try testClzVectors();
comptime try testClzVectors();
@@ -90,25 +46,6 @@ fn testCtzVectors() !void {
try expectEqual(@ctz(u16, @splat(64, @as(u16, 0b00000000))), @splat(64, @as(u5, 16)));
}
test "small int addition" {
var x: u2 = 0;
try expect(x == 0);
x += 1;
try expect(x == 1);
x += 1;
try expect(x == 2);
x += 1;
try expect(x == 3);
var result: @TypeOf(x) = 3;
try expect(@addWithOverflow(@TypeOf(x), x, 1, &result));
try expect(result == 0);
}
test "allow signed integer division/remainder when values are comptime known and positive or exact" {
try expect(5 / 3 == 1);
try expect(-5 / -3 == 1);