From cd733ceb852369427301fbb526b82ad4407d0607 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Thu, 16 Dec 2021 01:45:41 +0100 Subject: [PATCH 01/17] stage2: replace ErrorSet and ErrorSetMerged arrays with hash maps --- src/Module.zig | 8 +--- src/Sema.zig | 119 +++++++++++++++++++------------------------------ src/type.zig | 17 +++---- 3 files changed, 56 insertions(+), 88 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 341b6bb56e..8341c3de60 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -796,15 +796,11 @@ pub const ErrorSet = struct { owner_decl: *Decl, /// Offset from Decl node index, points to the error set AST node. node_offset: i32, - names_len: u32, /// The string bytes are stored in the owner Decl arena. /// They are in the same order they appear in the AST. - /// The length is given by `names_len`. - names_ptr: [*]const []const u8, + names: NameMap, - pub fn names(self: ErrorSet) []const []const u8 { - return self.names_ptr[0..self.names_len]; - } + pub const NameMap = std.StringArrayHashMapUnmanaged(void); pub fn srcLoc(self: ErrorSet) SrcLoc { return .{ diff --git a/src/Sema.zig b/src/Sema.zig index 044084a349..6e208f8b7f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2025,15 +2025,22 @@ fn zirErrorSetDecl( }, type_name); new_decl.owns_tv = true; errdefer sema.mod.abortAnonDecl(new_decl); - const names = try new_decl_arena_allocator.alloc([]const u8, fields.len); - for (fields) |str_index, i| { - names[i] = try new_decl_arena_allocator.dupe(u8, sema.code.nullTerminatedString(str_index)); + + var names = Module.ErrorSet.NameMap{}; + try names.ensureUnusedCapacity(new_decl_arena_allocator, fields.len); + for (fields) |str_index| { + const name = try new_decl_arena_allocator.dupe(u8, sema.code.nullTerminatedString(str_index)); + + // TODO: This check should be performed in AstGen instead. + const result = names.getOrPutAssumeCapacity(name); + if (result.found_existing) { + return sema.fail(block, src, "duplicate error set field {s}", .{name}); + } } error_set.* = .{ .owner_decl = new_decl, .node_offset = inst_data.src_node, - .names_ptr = names.ptr, - .names_len = @intCast(u32, names.len), + .names = names, }; try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); @@ -4556,63 +4563,43 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return Air.Inst.Ref.anyerror_type; } // Resolve both error sets now. - var set: std.StringHashMapUnmanaged(void) = .{}; - defer set.deinit(sema.gpa); + const lhs_names = switch (lhs_ty.tag()) { + .error_set_single => blk: { + // Work around coercion problems + const tmp: *const [1][]const u8 = &lhs_ty.castTag(.error_set_single).?.data; + break :blk tmp; + }, + .error_set_merged => lhs_ty.castTag(.error_set_merged).?.data.keys(), + .error_set => lhs_ty.castTag(.error_set).?.data.names.keys(), + else => unreachable, + }; - switch (lhs_ty.tag()) { - .error_set_single => { - const name = lhs_ty.castTag(.error_set_single).?.data; - try set.put(sema.gpa, name, {}); - }, - .error_set_merged => { - const names = lhs_ty.castTag(.error_set_merged).?.data; - for (names) |name| { - try set.put(sema.gpa, name, {}); - } - }, - .error_set => { - const lhs_set = lhs_ty.castTag(.error_set).?.data; - try set.ensureUnusedCapacity(sema.gpa, lhs_set.names_len); - for (lhs_set.names_ptr[0..lhs_set.names_len]) |name| { - set.putAssumeCapacityNoClobber(name, {}); - } + const rhs_names = switch (rhs_ty.tag()) { + .error_set_single => blk: { + const tmp: *const [1][]const u8 = &rhs_ty.castTag(.error_set_single).?.data; + break :blk tmp; }, + .error_set_merged => rhs_ty.castTag(.error_set_merged).?.data.keys(), + .error_set => rhs_ty.castTag(.error_set).?.data.names.keys(), else => unreachable, - } - switch (rhs_ty.tag()) { - .error_set_single => { - const name = rhs_ty.castTag(.error_set_single).?.data; - try set.put(sema.gpa, name, {}); - }, - .error_set_merged => { - const names = rhs_ty.castTag(.error_set_merged).?.data; - for (names) |name| { - try set.put(sema.gpa, name, {}); - } - }, - .error_set => { - const rhs_set = rhs_ty.castTag(.error_set).?.data; - try set.ensureUnusedCapacity(sema.gpa, rhs_set.names_len); - for (rhs_set.names_ptr[0..rhs_set.names_len]) |name| { - set.putAssumeCapacity(name, {}); - } - }, - else => unreachable, - } + }; // TODO do we really want to create a Decl for this? // The reason we do it right now is for memory management. var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const new_names = try anon_decl.arena().alloc([]const u8, set.count()); - var it = set.keyIterator(); - var i: usize = 0; - while (it.next()) |key| : (i += 1) { - new_names[i] = key.*; + var names = Module.ErrorSet.NameMap{}; + // TODO: Guess is an upper bound, but maybe this needs to be reduced by computing the exact size first. + try names.ensureUnusedCapacity(anon_decl.arena(), @intCast(u32, lhs_names.len + rhs_names.len)); + for (lhs_names) |name| { + names.putAssumeCapacityNoClobber(name, {}); + } + for (rhs_names) |name| { + names.putAssumeCapacity(name, {}); } - const err_set_ty = try Type.Tag.error_set_merged.create(anon_decl.arena(), new_names); + const err_set_ty = try Type.Tag.error_set_merged.create(anon_decl.arena(), names); const err_set_decl = try anon_decl.finish( Type.type, try Value.Tag.ty.create(anon_decl.arena(), err_set_ty), @@ -11425,14 +11412,8 @@ fn fieldVal( switch (child_type.zigTypeTag()) { .ErrorSet => { const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - const error_set = payload.data; - // TODO this is O(N). I'm putting off solving this until we solve inferred - // error sets at the same time. - const names = error_set.names_ptr[0..error_set.names_len]; - for (names) |name| { - if (mem.eql(u8, field_name, name)) { - break :blk name; - } + if (payload.data.names.getEntry(field_name)) |entry| { + break :blk entry.key_ptr.*; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ field_name, child_type, @@ -11630,14 +11611,8 @@ fn fieldPtr( .ErrorSet => { // TODO resolve inferred error sets const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - const error_set = payload.data; - // TODO this is O(N). I'm putting off solving this until we solve inferred - // error sets at the same time. - const names = error_set.names_ptr[0..error_set.names_len]; - for (names) |name| { - if (mem.eql(u8, field_name, name)) { - break :blk name; - } + if (payload.data.names.getEntry(field_name)) |entry| { + break :blk entry.key_ptr.*; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ field_name, child_type, @@ -13916,16 +13891,12 @@ fn wrapErrorUnion( if (mem.eql(u8, expected_name, n)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, - .error_set => ok: { + .error_set => { const expected_name = val.castTag(.@"error").?.data.name; const error_set = dest_err_set_ty.castTag(.error_set).?.data; - const names = error_set.names_ptr[0..error_set.names_len]; - // TODO this is O(N). I'm putting off solving this until we solve inferred - // error sets at the same time. - for (names) |name| { - if (mem.eql(u8, expected_name, name)) break :ok; + if (!error_set.names.contains(expected_name)) { + return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); } - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .error_set_inferred => ok: { const err_set_payload = dest_err_set_ty.castTag(.error_set_inferred).?.data; diff --git a/src/type.zig b/src/type.zig index c5ba6c03c7..4930c7057f 100644 --- a/src/type.zig +++ b/src/type.zig @@ -904,10 +904,11 @@ pub const Type = extern union { }); }, .error_set_merged => { - const names = self.castTag(.error_set_merged).?.data; - const duped_names = try allocator.alloc([]const u8, names.len); - for (duped_names) |*name, i| { - name.* = try allocator.dupe(u8, names[i]); + const names = self.castTag(.error_set_merged).?.data.keys(); + var duped_names = Module.ErrorSet.NameMap{}; + try duped_names.ensureTotalCapacity(allocator, names.len); + for (names) |name| { + duped_names.putAssumeCapacityNoClobber(name, .{}); } return Tag.error_set_merged.create(allocator, duped_names); }, @@ -1206,7 +1207,7 @@ pub const Type = extern union { return writer.print("(inferred error set of {s})", .{func.owner_decl.name}); }, .error_set_merged => { - const names = ty.castTag(.error_set_merged).?.data; + const names = ty.castTag(.error_set_merged).?.data.keys(); try writer.writeAll("error{"); for (names) |name, i| { if (i != 0) try writer.writeByte(','); @@ -4148,7 +4149,7 @@ pub const Type = extern union { pub const base_tag = Tag.error_set_merged; base: Payload = Payload{ .tag = base_tag }, - data: []const []const u8, + data: Module.ErrorSet.NameMap, }; pub const ErrorSetInferred = struct { @@ -4168,7 +4169,7 @@ pub const Type = extern union { pub fn addErrorSet(self: *Data, gpa: Allocator, err_set_ty: Type) !void { switch (err_set_ty.tag()) { .error_set => { - const names = err_set_ty.castTag(.error_set).?.data.names(); + const names = err_set_ty.castTag(.error_set).?.data.names.keys(); for (names) |name| { try self.map.put(gpa, name, {}); } @@ -4187,7 +4188,7 @@ pub const Type = extern union { } }, .error_set_merged => { - const names = err_set_ty.castTag(.error_set_merged).?.data; + const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); for (names) |name| { try self.map.put(gpa, name, {}); } From b2343e63bd06d1312ca80745236bb42358062115 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Thu, 16 Dec 2021 02:23:15 +0100 Subject: [PATCH 02/17] stage2: move inferred error set state into func --- src/Module.zig | 60 ++++++++++++++++++++++++++++++++++++----------- src/Sema.zig | 25 ++++++++------------ src/codegen/c.zig | 2 +- src/type.zig | 51 ++++------------------------------------ 4 files changed, 61 insertions(+), 77 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 8341c3de60..b5d856efd0 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1207,6 +1207,24 @@ pub const Fn = struct { is_cold: bool = false, is_noinline: bool = false, + /// These fields are used to keep track of any dependencies related to functions + /// that return inferred error sets. It's values are not used when the function + /// does not return an inferred error set. + inferred_error_set: struct { + /// All currently known errors that this function returns. This includes direct additions + /// via `return error.Foo;`, and possibly also errors that are returned from any dependent functions. + /// When the inferred error set is fully resolved, this map contains all the errors that the function might return. + errors: std.StringHashMapUnmanaged(void) = .{}, + + /// Other functions with inferred error sets which the inferred error set of this + /// function should include. + functions: std.AutoHashMapUnmanaged(*Fn, void) = .{}, + + /// Whether the function returned anyerror. This is true if either of the dependent functions + /// returns anyerror. + is_anyerror: bool = false, + } = .{}, + pub const Analysis = enum { queued, /// This function intentionally only has ZIR generated because it is marked @@ -1222,23 +1240,37 @@ pub const Fn = struct { }; pub fn deinit(func: *Fn, gpa: Allocator) void { - if (func.getInferredErrorSet()) |error_set_data| { - error_set_data.map.deinit(gpa); - error_set_data.functions.deinit(gpa); - } + func.inferred_error_set.errors.deinit(gpa); + func.inferred_error_set.functions.deinit(gpa); } - pub fn getInferredErrorSet(func: *Fn) ?*Type.Payload.ErrorSetInferred.Data { - const ret_ty = func.owner_decl.ty.fnReturnType(); - if (ret_ty.tag() == .generic_poison) { - return null; + pub fn addErrorSet(func: *Fn, gpa: Allocator, err_set_ty: Type) !void { + switch (err_set_ty.tag()) { + .error_set => { + const names = err_set_ty.castTag(.error_set).?.data.names.keys(); + for (names) |name| { + try func.inferred_error_set.errors.put(gpa, name, {}); + } + }, + .error_set_single => { + const name = err_set_ty.castTag(.error_set_single).?.data; + try func.inferred_error_set.errors.put(gpa, name, {}); + }, + .error_set_inferred => { + const dependent_func = err_set_ty.castTag(.error_set_inferred).?.data; + try func.inferred_error_set.functions.put(gpa, dependent_func, {}); + }, + .error_set_merged => { + const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); + for (names) |name| { + try func.inferred_error_set.errors.put(gpa, name, {}); + } + }, + .anyerror => { + func.inferred_error_set.is_anyerror = true; + }, + else => unreachable, } - if (ret_ty.zigTypeTag() == .ErrorUnion) { - if (ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { - return &payload.data; - } - } - return null; } }; diff --git a/src/Sema.zig b/src/Sema.zig index 6e208f8b7f..30c59f9efa 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5107,12 +5107,7 @@ fn funcCommon( const return_type = if (!inferred_error_set or bare_return_type.tag() == .generic_poison) bare_return_type else blk: { - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, .{ - .func = new_func, - .map = .{}, - .functions = .{}, - .is_anyerror = false, - }); + const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, new_func); break :blk try Type.Tag.error_union.create(sema.arena, .{ .error_set = error_set_ty, .payload = bare_return_type, @@ -9209,14 +9204,14 @@ fn analyzeRet( // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { - if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { + if (sema.fn_ret_ty.errorUnionSet().tag() == .error_set_inferred) { const op_ty = sema.typeOf(uncasted_operand); switch (op_ty.zigTypeTag()) { .ErrorSet => { - try payload.data.addErrorSet(sema.gpa, op_ty); + try sema.func.?.addErrorSet(sema.gpa, op_ty); }, .ErrorUnion => { - try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet()); + try sema.func.?.addErrorSet(sema.gpa, op_ty.errorUnionSet()); }, else => {}, } @@ -12501,10 +12496,10 @@ fn coerceInMemoryAllowedErrorSets( // of inferred error sets. if (src_ty.castTag(.error_set_inferred)) |src_payload| { if (dest_ty.castTag(.error_set_inferred)) |dst_payload| { - const src_func = src_payload.data.func; - const dst_func = dst_payload.data.func; + const src_func = src_payload.data; + const dst_func = dst_payload.data; - if (src_func == dst_func or dst_payload.data.functions.contains(src_func)) { + if (src_func == dst_func or dst_func.inferred_error_set.functions.contains(src_func)) { return .ok; } } @@ -13899,10 +13894,10 @@ fn wrapErrorUnion( } }, .error_set_inferred => ok: { - const err_set_payload = dest_err_set_ty.castTag(.error_set_inferred).?.data; - if (err_set_payload.is_anyerror) break :ok; + const func = dest_err_set_ty.castTag(.error_set_inferred).?.data; + if (func.inferred_error_set.is_anyerror) break :ok; const expected_name = val.castTag(.@"error").?.data.name; - if (err_set_payload.map.contains(expected_name)) break :ok; + if (func.inferred_error_set.errors.contains(expected_name)) break :ok; // TODO error set resolution here before emitting a compile error return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f54ae7f76d..8babcb9a83 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -722,7 +722,7 @@ pub const DeclGen = struct { try bw.writeAll(" payload; uint16_t error; } "); const name_index = buffer.items.len; if (err_set_type.castTag(.error_set_inferred)) |inf_err_set_payload| { - const func = inf_err_set_payload.data.func; + const func = inf_err_set_payload.data; try bw.writeAll("zig_E_"); try dg.renderDeclName(func.owner_decl, bw); try bw.writeAll(";\n"); diff --git a/src/type.zig b/src/type.zig index 4930c7057f..298df2cffc 100644 --- a/src/type.zig +++ b/src/type.zig @@ -627,7 +627,7 @@ pub const Type = extern union { } if (a.tag() == .error_set_inferred and b.tag() == .error_set_inferred) { - return a.castTag(.error_set_inferred).?.data.func == b.castTag(.error_set_inferred).?.data.func; + return a.castTag(.error_set_inferred).?.data == b.castTag(.error_set_inferred).?.data; } if (a.tag() == .error_set_single and b.tag() == .error_set_single) { @@ -1203,7 +1203,7 @@ pub const Type = extern union { return writer.writeAll(std.mem.sliceTo(error_set.owner_decl.name, 0)); }, .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data.func; + const func = ty.castTag(.error_set_inferred).?.data; return writer.print("(inferred error set of {s})", .{func.owner_decl.name}); }, .error_set_merged => { @@ -2869,7 +2869,7 @@ pub const Type = extern union { pub fn isAnyError(ty: Type) bool { return switch (ty.tag()) { .anyerror => true, - .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror, + .error_set_inferred => ty.castTag(.error_set_inferred).?.data.inferred_error_set.is_anyerror, else => false, }; } @@ -4156,50 +4156,7 @@ pub const Type = extern union { pub const base_tag = Tag.error_set_inferred; base: Payload = Payload{ .tag = base_tag }, - data: Data, - - pub const Data = struct { - func: *Module.Fn, - /// Direct additions to the inferred error set via `return error.Foo;`. - map: std.StringHashMapUnmanaged(void), - /// Other functions with inferred error sets which this error set includes. - functions: std.AutoHashMapUnmanaged(*Module.Fn, void), - is_anyerror: bool, - - pub fn addErrorSet(self: *Data, gpa: Allocator, err_set_ty: Type) !void { - switch (err_set_ty.tag()) { - .error_set => { - const names = err_set_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - try self.map.put(gpa, name, {}); - } - }, - .error_set_single => { - const name = err_set_ty.castTag(.error_set_single).?.data; - try self.map.put(gpa, name, {}); - }, - .error_set_inferred => { - const func = err_set_ty.castTag(.error_set_inferred).?.data.func; - try self.functions.put(gpa, func, {}); - var it = func.owner_decl.ty.fnReturnType().errorUnionSet() - .castTag(.error_set_inferred).?.data.map.iterator(); - while (it.next()) |entry| { - try self.map.put(gpa, entry.key_ptr.*, {}); - } - }, - .error_set_merged => { - const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - try self.map.put(gpa, name, {}); - } - }, - .anyerror => { - self.is_anyerror = true; - }, - else => unreachable, - } - } - }; + data: *Module.Fn, }; pub const Pointer = struct { From a2958a4ede0af4b4559eeb142c0400ae640db63e Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Fri, 17 Dec 2021 03:40:48 +0100 Subject: [PATCH 03/17] stage2: allow multiple inferred error sets per Fn This allows the inferred error set of comptime and inline invocations to be resolved separately from the inferred error set of the runtime version or other comptime/inline invocations. --- src/Module.zig | 112 +++++++++++++++++++++++++++------------------- src/Sema.zig | 46 ++++++++++++------- src/codegen/c.zig | 2 +- src/type.zig | 4 +- 4 files changed, 99 insertions(+), 65 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index b5d856efd0..7031dc20a5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1207,23 +1207,9 @@ pub const Fn = struct { is_cold: bool = false, is_noinline: bool = false, - /// These fields are used to keep track of any dependencies related to functions - /// that return inferred error sets. It's values are not used when the function - /// does not return an inferred error set. - inferred_error_set: struct { - /// All currently known errors that this function returns. This includes direct additions - /// via `return error.Foo;`, and possibly also errors that are returned from any dependent functions. - /// When the inferred error set is fully resolved, this map contains all the errors that the function might return. - errors: std.StringHashMapUnmanaged(void) = .{}, - - /// Other functions with inferred error sets which the inferred error set of this - /// function should include. - functions: std.AutoHashMapUnmanaged(*Fn, void) = .{}, - - /// Whether the function returned anyerror. This is true if either of the dependent functions - /// returns anyerror. - is_anyerror: bool = false, - } = .{}, + /// Any inferred error sets that this function owns, both it's own inferred error set and + /// inferred error sets of any inline/comptime functions called. + inferred_error_sets: InferredErrorSetList = .{}, pub const Analysis = enum { queued, @@ -1239,37 +1225,69 @@ pub const Fn = struct { success, }; - pub fn deinit(func: *Fn, gpa: Allocator) void { - func.inferred_error_set.errors.deinit(gpa); - func.inferred_error_set.functions.deinit(gpa); - } + /// This struct is used to keep track of any dependencies related to functions instances + /// that return inferred error sets. Note that a function may be associated to multiple different error sets, + /// for example an inferred error set which this function returns, but also any inferred error sets + /// of called inline or comptime functions. + pub const InferredErrorSet = struct { + /// The function from which this error set originates. + /// Note: may be the function itself. + func: *Fn, - pub fn addErrorSet(func: *Fn, gpa: Allocator, err_set_ty: Type) !void { - switch (err_set_ty.tag()) { - .error_set => { - const names = err_set_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - try func.inferred_error_set.errors.put(gpa, name, {}); - } - }, - .error_set_single => { - const name = err_set_ty.castTag(.error_set_single).?.data; - try func.inferred_error_set.errors.put(gpa, name, {}); - }, - .error_set_inferred => { - const dependent_func = err_set_ty.castTag(.error_set_inferred).?.data; - try func.inferred_error_set.functions.put(gpa, dependent_func, {}); - }, - .error_set_merged => { - const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - try func.inferred_error_set.errors.put(gpa, name, {}); - } - }, - .anyerror => { - func.inferred_error_set.is_anyerror = true; - }, - else => unreachable, + /// All currently known errors that this error set contains. This includes direct additions + /// via `return error.Foo;`, and possibly also errors that are returned from any dependent functions. + /// When the inferred error set is fully resolved, this map contains all the errors that the function might return. + errors: std.StringHashMapUnmanaged(void) = .{}, + + /// Other functions with inferred error sets which the inferred error set of this + /// function should include. + functions: std.AutoHashMapUnmanaged(*Fn, void) = .{}, + + /// Whether the function returned anyerror. This is true if either of the dependent functions + /// returns anyerror. + is_anyerror: bool = false, + + pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void { + switch (err_set_ty.tag()) { + .error_set => { + const names = err_set_ty.castTag(.error_set).?.data.names.keys(); + for (names) |name| { + try self.errors.put(gpa, name, {}); + } + }, + .error_set_single => { + const name = err_set_ty.castTag(.error_set_single).?.data; + try self.errors.put(gpa, name, {}); + }, + .error_set_inferred => { + const dependent_func = err_set_ty.castTag(.error_set_inferred).?.data.func; + try self.functions.put(gpa, dependent_func, {}); + }, + .error_set_merged => { + const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); + for (names) |name| { + try self.errors.put(gpa, name, {}); + } + }, + .anyerror => { + self.is_anyerror = true; + }, + else => unreachable, + } + } + }; + + pub const InferredErrorSetList = std.SinglyLinkedList(InferredErrorSet); + pub const InferredErrorSetListNode = InferredErrorSetList.Node; + + pub fn deinit(func: *Fn, gpa: Allocator) void { + var it = func.inferred_error_sets.first; + while (it) |node| { + const next = node.next; + node.data.errors.deinit(gpa); + node.data.functions.deinit(gpa); + gpa.destroy(node); + it = next; } } }; diff --git a/src/Sema.zig b/src/Sema.zig index 30c59f9efa..f23ffe24c0 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3896,11 +3896,12 @@ fn analyzeCall( const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); // If the function has an inferred error set, `bare_return_type` is the payload type only. const fn_ret_ty = blk: { - // TODO instead of reusing the function's inferred error set, this code should - // create a temporary error set which is used for the comptime/inline function - // call alone, independent from the runtime instantiation. - if (func_ty_info.return_type.castTag(.error_union)) |payload| { - const error_set_ty = payload.data.error_set; + if (func_ty_info.return_type.tag() == .error_union) { + const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); + node.data = .{ .func = module_fn }; + parent_func.?.inferred_error_sets.prepend(node); + + const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); break :blk try Type.Tag.error_union.create(sema.arena, .{ .error_set = error_set_ty, .payload = bare_return_type, @@ -5066,6 +5067,10 @@ fn funcCommon( }; errdefer if (body_inst != 0) sema.gpa.destroy(new_func); + var maybe_inferred_error_set_node: ?*Module.Fn.InferredErrorSetListNode = null; + errdefer if (maybe_inferred_error_set_node) |node| sema.gpa.destroy(node); + // Note: no need to errdefer since this will still be in its default state at the end of the function. + const fn_ty: Type = fn_ty: { // Hot path for some common function types. // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. @@ -5107,7 +5112,11 @@ fn funcCommon( const return_type = if (!inferred_error_set or bare_return_type.tag() == .generic_poison) bare_return_type else blk: { - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, new_func); + const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); + node.data = .{ .func = new_func }; + maybe_inferred_error_set_node = node; + + const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); break :blk try Type.Tag.error_union.create(sema.arena, .{ .error_set = error_set_ty, .payload = bare_return_type, @@ -5198,7 +5207,14 @@ fn funcCommon( .rbrace_line = src_locs.rbrace_line, .lbrace_column = @truncate(u16, src_locs.columns), .rbrace_column = @truncate(u16, src_locs.columns >> 16), + .inferred_error_sets = .{ + .first = maybe_inferred_error_set_node, + }, }; + if (maybe_inferred_error_set_node) |node| { + new_func.inferred_error_sets.prepend(node); + } + maybe_inferred_error_set_node = null; fn_payload.* = .{ .base = .{ .tag = .function }, .data = new_func, @@ -9204,14 +9220,14 @@ fn analyzeRet( // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { - if (sema.fn_ret_ty.errorUnionSet().tag() == .error_set_inferred) { + if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { const op_ty = sema.typeOf(uncasted_operand); switch (op_ty.zigTypeTag()) { .ErrorSet => { - try sema.func.?.addErrorSet(sema.gpa, op_ty); + try payload.data.addErrorSet(sema.gpa, op_ty); }, .ErrorUnion => { - try sema.func.?.addErrorSet(sema.gpa, op_ty.errorUnionSet()); + try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet()); }, else => {}, } @@ -12496,10 +12512,10 @@ fn coerceInMemoryAllowedErrorSets( // of inferred error sets. if (src_ty.castTag(.error_set_inferred)) |src_payload| { if (dest_ty.castTag(.error_set_inferred)) |dst_payload| { - const src_func = src_payload.data; - const dst_func = dst_payload.data; + const src_func = src_payload.data.func; + const dst_func = dst_payload.data.func; - if (src_func == dst_func or dst_func.inferred_error_set.functions.contains(src_func)) { + if (src_func == dst_func or dst_payload.data.functions.contains(src_func)) { return .ok; } } @@ -13894,10 +13910,10 @@ fn wrapErrorUnion( } }, .error_set_inferred => ok: { - const func = dest_err_set_ty.castTag(.error_set_inferred).?.data; - if (func.inferred_error_set.is_anyerror) break :ok; + const data = dest_err_set_ty.castTag(.error_set_inferred).?.data; + if (data.is_anyerror) break :ok; const expected_name = val.castTag(.@"error").?.data.name; - if (func.inferred_error_set.errors.contains(expected_name)) break :ok; + if (data.errors.contains(expected_name)) break :ok; // TODO error set resolution here before emitting a compile error return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 8babcb9a83..f54ae7f76d 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -722,7 +722,7 @@ pub const DeclGen = struct { try bw.writeAll(" payload; uint16_t error; } "); const name_index = buffer.items.len; if (err_set_type.castTag(.error_set_inferred)) |inf_err_set_payload| { - const func = inf_err_set_payload.data; + const func = inf_err_set_payload.data.func; try bw.writeAll("zig_E_"); try dg.renderDeclName(func.owner_decl, bw); try bw.writeAll(";\n"); diff --git a/src/type.zig b/src/type.zig index 298df2cffc..3360477a03 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2869,7 +2869,7 @@ pub const Type = extern union { pub fn isAnyError(ty: Type) bool { return switch (ty.tag()) { .anyerror => true, - .error_set_inferred => ty.castTag(.error_set_inferred).?.data.inferred_error_set.is_anyerror, + .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror, else => false, }; } @@ -4156,7 +4156,7 @@ pub const Type = extern union { pub const base_tag = Tag.error_set_inferred; base: Payload = Payload{ .tag = base_tag }, - data: *Module.Fn, + data: *Module.Fn.InferredErrorSet, }; pub const Pointer = struct { From 9d6c45f6979543607a7064be7155afa409be956a Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Fri, 17 Dec 2021 04:39:09 +0100 Subject: [PATCH 04/17] stage2: inferred error set coercion --- src/Module.zig | 15 ++-- src/Sema.zig | 181 +++++++++++++++++++++++++++++++++++++++---------- src/type.zig | 31 ++++++++- 3 files changed, 184 insertions(+), 43 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 7031dc20a5..4f9f8307db 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1239,14 +1239,17 @@ pub const Fn = struct { /// When the inferred error set is fully resolved, this map contains all the errors that the function might return. errors: std.StringHashMapUnmanaged(void) = .{}, - /// Other functions with inferred error sets which the inferred error set of this - /// function should include. - functions: std.AutoHashMapUnmanaged(*Fn, void) = .{}, + /// Other inferred error sets which this inferred error set should include. + inferred_error_sets: std.AutoHashMapUnmanaged(*InferredErrorSet, void) = .{}, /// Whether the function returned anyerror. This is true if either of the dependent functions /// returns anyerror. is_anyerror: bool = false, + /// Whether this error set is already fully resolved. If true, resolving can skip resolving any dependents + /// of this inferred error set. + is_resolved: bool = false, + pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void { switch (err_set_ty.tag()) { .error_set => { @@ -1260,8 +1263,8 @@ pub const Fn = struct { try self.errors.put(gpa, name, {}); }, .error_set_inferred => { - const dependent_func = err_set_ty.castTag(.error_set_inferred).?.data.func; - try self.functions.put(gpa, dependent_func, {}); + const set = err_set_ty.castTag(.error_set_inferred).?.data; + try self.inferred_error_sets.put(gpa, set, {}); }, .error_set_merged => { const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); @@ -1285,7 +1288,7 @@ pub const Fn = struct { while (it) |node| { const next = node.next; node.data.errors.deinit(gpa); - node.data.functions.deinit(gpa); + node.data.inferred_error_sets.deinit(gpa); gpa.destroy(node); it = next; } diff --git a/src/Sema.zig b/src/Sema.zig index f23ffe24c0..a82a56c253 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5207,9 +5207,6 @@ fn funcCommon( .rbrace_line = src_locs.rbrace_line, .lbrace_column = @truncate(u16, src_locs.columns), .rbrace_column = @truncate(u16, src_locs.columns >> 16), - .inferred_error_sets = .{ - .first = maybe_inferred_error_set_node, - }, }; if (maybe_inferred_error_set_node) |node| { new_func.inferred_error_sets.prepend(node); @@ -12193,7 +12190,7 @@ fn coerce( const arena = sema.arena; const target = sema.mod.getTarget(); - const in_memory_result = coerceInMemoryAllowed(dest_ty, inst_ty, false, target); + const in_memory_result = try sema.coerceInMemoryAllowed(dest_ty, inst_ty, false, target); if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. @@ -12252,7 +12249,7 @@ fn coerce( if (inst_ty.isConstPtr() and dest_is_mut) break :single_item; if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :single_item; if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :single_item; - switch (coerceInMemoryAllowed(array_elem_ty, ptr_elem_ty, dest_is_mut, target)) { + switch (try sema.coerceInMemoryAllowed(array_elem_ty, ptr_elem_ty, dest_is_mut, target)) { .ok => {}, .no_match => break :single_item, } @@ -12271,7 +12268,7 @@ fn coerce( if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :src_array_ptr; const dst_elem_type = dest_info.pointee_type; - switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, target)) { + switch (try sema.coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, target)) { .ok => {}, .no_match => break :src_array_ptr, } @@ -12310,7 +12307,7 @@ fn coerce( const src_elem_ty = inst_ty.childType(); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; - switch (coerceInMemoryAllowed(dst_elem_type, src_elem_ty, dest_is_mut, target)) { + switch (try sema.coerceInMemoryAllowed(dst_elem_type, src_elem_ty, dest_is_mut, target)) { .ok => {}, .no_match => break :src_c_ptr, } @@ -12453,7 +12450,13 @@ const InMemoryCoercionResult = enum { /// * sentinel-terminated pointers can coerce into `[*]` /// TODO improve this function to report recursive compile errors like it does in stage1. /// look at the function types_match_const_cast_only -fn coerceInMemoryAllowed(dest_ty: Type, src_ty: Type, dest_is_mut: bool, target: std.Target) InMemoryCoercionResult { +fn coerceInMemoryAllowed( + sema: *Sema, + dest_ty: Type, + src_ty: Type, + dest_is_mut: bool, + target: std.Target +) CompileError!InMemoryCoercionResult { if (dest_ty.eql(src_ty)) return .ok; @@ -12462,32 +12465,35 @@ fn coerceInMemoryAllowed(dest_ty: Type, src_ty: Type, dest_is_mut: bool, target: var src_buf: Type.Payload.ElemType = undefined; if (dest_ty.ptrOrOptionalPtrTy(&dest_buf)) |dest_ptr_ty| { if (src_ty.ptrOrOptionalPtrTy(&src_buf)) |src_ptr_ty| { - return coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target); + return try sema.coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target); } } // Slices if (dest_ty.isSlice() and src_ty.isSlice()) { - return coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target); + return try sema.coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target); } + const dest_tag = dest_ty.zigTypeTag(); + const src_tag = src_ty.zigTypeTag(); + // Functions - if (dest_ty.zigTypeTag() == .Fn and src_ty.zigTypeTag() == .Fn) { - return coerceInMemoryAllowedFns(dest_ty, src_ty, target); + if (dest_tag == .Fn and src_tag == .Fn) { + return try sema.coerceInMemoryAllowedFns(dest_ty, src_ty, target); } // Error Unions - if (dest_ty.zigTypeTag() == .ErrorUnion and src_ty.zigTypeTag() == .ErrorUnion) { - const child = coerceInMemoryAllowed(dest_ty.errorUnionPayload(), src_ty.errorUnionPayload(), dest_is_mut, target); + if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) { + const child = try sema.coerceInMemoryAllowed(dest_ty.errorUnionPayload(), src_ty.errorUnionPayload(), dest_is_mut, target); if (child == .no_match) { return child; } - return coerceInMemoryAllowed(dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target); + return try sema.coerceInMemoryAllowed(dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target); } // Error Sets - if (dest_ty.zigTypeTag() == .ErrorSet and src_ty.zigTypeTag() == .ErrorSet) { - return coerceInMemoryAllowedErrorSets(dest_ty, src_ty); + if (dest_tag == .ErrorSet and src_tag == .ErrorSet) { + return try sema.coerceInMemoryAllowedErrorSets(dest_ty, src_ty); } // TODO: arrays @@ -12498,14 +12504,16 @@ fn coerceInMemoryAllowed(dest_ty: Type, src_ty: Type, dest_is_mut: bool, target: } fn coerceInMemoryAllowedErrorSets( + sema: *Sema, dest_ty: Type, src_ty: Type, -) InMemoryCoercionResult { - // Coercion to `anyerror`. Note that this check can return false positives +) !InMemoryCoercionResult { + // Coercion to `anyerror`. Note that this check can return false negatives // in case the error sets did not get resolved. if (dest_ty.isAnyError()) { return .ok; } + // If both are inferred error sets of functions, and // the dest includes the source function, the coercion is OK. // This check is important because it works without forcing a full resolution @@ -12515,21 +12523,85 @@ fn coerceInMemoryAllowedErrorSets( const src_func = src_payload.data.func; const dst_func = dst_payload.data.func; - if (src_func == dst_func or dst_payload.data.functions.contains(src_func)) { + if (src_func == dst_func or dst_payload.data.inferred_error_sets.contains(src_payload.data)) { return .ok; } + return .no_match; } } - // TODO full error set resolution and compare sets by names. + if (dest_ty.castTag(.error_set_inferred)) |payload| { + try sema.resolveInferredErrorSet(payload.data); + // isAnyError might have changed from a false negative to a true positive after resolution. + if (dest_ty.isAnyError()) { + return .ok; + } + } + + switch (src_ty.tag()) { + .error_set_inferred => { + const src_data = src_ty.castTag(.error_set_inferred).?.data; + + try sema.resolveInferredErrorSet(src_data); + // src anyerror status might have changed after the resolution. + if (src_ty.isAnyError()) { + // dest_ty.isAnyError() == true is already checked for at this point. + return .no_match; + } + + var it = src_data.errors.keyIterator(); + while (it.next()) |name_ptr| { + if (!dest_ty.errorSetHasField(name_ptr.*)) { + return .no_match; + } + } + + return .ok; + }, + .error_set_single => { + const name = src_ty.castTag(.error_set_single).?.data; + if (dest_ty.errorSetHasField(name)) { + return .ok; + } + }, + .error_set_merged => { + const names = src_ty.castTag(.error_set_merged).?.data.keys(); + for (names) |name| { + if (!dest_ty.errorSetHasField(name)) { + return .no_match; + } + } + + return .ok; + }, + .error_set => { + const names = src_ty.castTag(.error_set).?.data.names.keys(); + for (names) |name| { + if (!dest_ty.errorSetHasField(name)) { + return .no_match; + } + } + + return .ok; + }, + .anyerror => switch (dest_ty.tag()) { + .error_set_inferred => return .no_match, // Caught by dest.isAnyError() above. + .error_set_single, .error_set_merged, .error_set => {}, + .anyerror => unreachable, // Filtered out above. + else => unreachable, + }, + else => unreachable, + } + return .no_match; } fn coerceInMemoryAllowedFns( + sema: *Sema, dest_ty: Type, src_ty: Type, target: std.Target, -) InMemoryCoercionResult { +) !InMemoryCoercionResult { const dest_info = dest_ty.fnInfo(); const src_info = src_ty.fnInfo(); @@ -12542,7 +12614,7 @@ fn coerceInMemoryAllowedFns( } if (!src_info.return_type.isNoReturn()) { - const rt = coerceInMemoryAllowed(dest_info.return_type, src_info.return_type, false, target); + const rt = try sema.coerceInMemoryAllowed(dest_info.return_type, src_info.return_type, false, target); if (rt == .no_match) { return rt; } @@ -12562,7 +12634,7 @@ fn coerceInMemoryAllowedFns( // TODO: nolias // Note: Cast direction is reversed here. - const param = coerceInMemoryAllowed(src_param_ty, dest_param_ty, false, target); + const param = try sema.coerceInMemoryAllowed(src_param_ty, dest_param_ty, false, target); if (param == .no_match) { return param; } @@ -12576,17 +12648,18 @@ fn coerceInMemoryAllowedFns( } fn coerceInMemoryAllowedPtrs( + sema: *Sema, dest_ty: Type, src_ty: Type, dest_ptr_ty: Type, src_ptr_ty: Type, dest_is_mut: bool, target: std.Target, -) InMemoryCoercionResult { +) !InMemoryCoercionResult { const dest_info = dest_ptr_ty.ptrInfo().data; const src_info = src_ptr_ty.ptrInfo().data; - const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target); + const child = try sema.coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target); if (child == .no_match) { return child; } @@ -13307,7 +13380,7 @@ fn coerceVectorInMemory( const target = sema.mod.getTarget(); const dest_elem_ty = dest_ty.childType(); const inst_elem_ty = inst_ty.childType(); - const in_memory_result = coerceInMemoryAllowed(dest_elem_ty, inst_elem_ty, false, target); + const in_memory_result = try sema.coerceInMemoryAllowed(dest_elem_ty, inst_elem_ty, false, target); if (in_memory_result != .ok) { // TODO recursive error notes for coerceInMemoryAllowed failure return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty }); @@ -13910,11 +13983,11 @@ fn wrapErrorUnion( } }, .error_set_inferred => ok: { - const data = dest_err_set_ty.castTag(.error_set_inferred).?.data; - if (data.is_anyerror) break :ok; const expected_name = val.castTag(.@"error").?.data.name; + const data = dest_err_set_ty.castTag(.error_set_inferred).?.data; + try sema.resolveInferredErrorSet(data); + if (data.is_anyerror) break :ok; if (data.errors.contains(expected_name)) break :ok; - // TODO error set resolution here before emitting a compile error return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, else => unreachable, @@ -14059,12 +14132,12 @@ fn resolvePeerTypes( .Optional => { var opt_child_buf: Type.Payload.ElemType = undefined; const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf); - if (coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target) == .ok) { + if ((try sema.coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target)) == .ok) { chosen = candidate; chosen_i = candidate_i + 1; continue; } - if (coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target) == .ok) { + if ((try sema.coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target)) == .ok) { any_are_null = true; continue; } @@ -14087,10 +14160,10 @@ fn resolvePeerTypes( .Optional => { var opt_child_buf: Type.Payload.ElemType = undefined; const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf); - if (coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target) == .ok) { + if ((try sema.coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target)) == .ok) { continue; } - if (coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target) == .ok) { + if ((try sema.coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target)) == .ok) { any_are_null = true; chosen = candidate; chosen_i = candidate_i + 1; @@ -14256,6 +14329,42 @@ fn resolveBuiltinTypeFields( return sema.resolveTypeFields(block, src, resolved_ty); } +fn resolveInferredErrorSet(sema: *Sema, inferred_error_set: *Module.Fn.InferredErrorSet) CompileError!void { + // Ensuring that a particular decl is analyzed does not neccesarily mean that + // it's error set is inferred, so traverse all of them to get the complete + // picture. + // Note: We want to skip re-resolving the current function, as recursion + // doesn't change the error set. We can just check for state == .in_progress for this. + // TODO: Is that correct? + + if (inferred_error_set.is_resolved) { + return; + } + + var it = inferred_error_set.inferred_error_sets.keyIterator(); + while (it.next()) |other_error_set_ptr| { + const func = other_error_set_ptr.*.func; + const decl = func.*.owner_decl; + + if (func.*.state == .in_progress) { + // Recursion, doesn't alter current error set, keep going. + continue; + } + + try sema.ensureDeclAnalyzed(decl); // To ensure that all dependencies are properly added to the set. + try sema.resolveInferredErrorSet(other_error_set_ptr.*); + + var error_it = other_error_set_ptr.*.errors.keyIterator(); + while (error_it.next()) |entry| { + try inferred_error_set.errors.put(sema.gpa, entry.*, {}); + } + if (other_error_set_ptr.*.is_anyerror) + inferred_error_set.is_anyerror = true; + } + + inferred_error_set.is_resolved = true; +} + fn semaStructFields( mod: *Module, struct_obj: *Module.Struct, @@ -15218,8 +15327,8 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr // We have a Value that lines up in virtual memory exactly with what we want to load. // If the Type is in-memory coercable to `load_ty`, it may be returned without modifications. const coerce_in_mem_ok = - coerceInMemoryAllowed(load_ty, parent.ty, false, target) == .ok or - coerceInMemoryAllowed(parent.ty, load_ty, false, target) == .ok; + (try sema.coerceInMemoryAllowed(load_ty, parent.ty, false, target)) == .ok or + (try sema.coerceInMemoryAllowed(parent.ty, load_ty, false, target)) == .ok; if (coerce_in_mem_ok) { if (parent.is_mutable) { // The decl whose value we are obtaining here may be overwritten with diff --git a/src/type.zig b/src/type.zig index 3360477a03..a81bd3ed32 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1203,7 +1203,7 @@ pub const Type = extern union { return writer.writeAll(std.mem.sliceTo(error_set.owner_decl.name, 0)); }, .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data; + const func = ty.castTag(.error_set_inferred).?.data.func; return writer.print("(inferred error set of {s})", .{func.owner_decl.name}); }, .error_set_merged => { @@ -2874,6 +2874,35 @@ pub const Type = extern union { }; } + /// Returns whether ty, which must be an error set, includes an error `name`. + /// Might return a false negative if `ty` is an inferred error set and not fully + /// resolved yet. + pub fn errorSetHasField(ty: Type, name: []const u8) bool { + if (ty.isAnyError()) { + return true; + } + + switch (ty.tag()) { + .error_set_single => { + const data = ty.castTag(.error_set_single).?.data; + return std.mem.eql(u8, data, name); + }, + .error_set_inferred => { + const data = ty.castTag(.error_set_inferred).?.data; + return data.errors.contains(name); + }, + .error_set_merged => { + const data = ty.castTag(.error_set_merged).?.data; + return data.contains(name); + }, + .error_set => { + const data = ty.castTag(.error_set).?.data; + return data.names.contains(name); + }, + else => unreachable, + } + } + /// Asserts the type is an array or vector. pub fn arrayLen(ty: Type) u64 { return switch (ty.tag()) { From 993197cd868f312d19ab694dd3a5250e39077f67 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Fri, 17 Dec 2021 04:46:42 +0100 Subject: [PATCH 05/17] stage2: merged error sets in wrapErrorUnion --- src/Sema.zig | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/Sema.zig b/src/Sema.zig index a82a56c253..bdbdea42e7 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -13990,6 +13990,13 @@ fn wrapErrorUnion( if (data.errors.contains(expected_name)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, + .error_set_merged => { + const expected_name = val.castTag(.@"error").?.data.name; + const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; + if (!error_set.contains(expected_name)) { + return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); + } + }, else => unreachable, } return sema.addConstant(dest_ty, val); From e18c3f3109cffa76e4369c810f82d36eb02c56af Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Fri, 17 Dec 2021 18:44:39 +0100 Subject: [PATCH 06/17] stage2: wrap function prototypes in an inline block. Previously, function parameter instructions for function prototypes would be generated in the parent block. This caused issues in blocks where multiple prototypes would be generated in, such as the block for struct fields for example. This change introduces an inline block around every prototype such that all parameters for a prototype are confined to a unique block. --- src/AstGen.zig | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index a3a63be09f..5e9868e885 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -984,17 +984,17 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; - return fnProtoExpr(gz, scope, rl, tree.fnProtoSimple(¶ms, node)); + return fnProtoExpr(gz, scope, rl, node, tree.fnProtoSimple(¶ms, node)); }, .fn_proto_multi => { - return fnProtoExpr(gz, scope, rl, tree.fnProtoMulti(node)); + return fnProtoExpr(gz, scope, rl, node, tree.fnProtoMulti(node)); }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; - return fnProtoExpr(gz, scope, rl, tree.fnProtoOne(¶ms, node)); + return fnProtoExpr(gz, scope, rl, node, tree.fnProtoOne(¶ms, node)); }, .fn_proto => { - return fnProtoExpr(gz, scope, rl, tree.fnProto(node)); + return fnProtoExpr(gz, scope, rl, node, tree.fnProto(node)); }, } } @@ -1101,6 +1101,7 @@ fn fnProtoExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, + node: Ast.Node.Index, fn_proto: Ast.full.FnProto, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; @@ -1113,6 +1114,11 @@ fn fnProtoExpr( }; assert(!is_extern); + var block_scope = gz.makeSubBlock(scope); + defer block_scope.unstack(); + + const block_inst = try gz.makeBlockInst(.block_inline, node); + const is_var_args = is_var_args: { var param_type_i: usize = 0; var it = fn_proto.iterate(tree.*); @@ -1144,11 +1150,11 @@ fn fnProtoExpr( .param_anytype_comptime else .param_anytype; - _ = try gz.addStrTok(tag, param_name, name_token); + _ = try block_scope.addStrTok(tag, param_name, name_token); } else { const param_type_node = param.type_expr; assert(param_type_node != 0); - var param_gz = gz.makeSubBlock(scope); + var param_gz = block_scope.makeSubBlock(scope); defer param_gz.unstack(); const param_type = try expr(¶m_gz, scope, coerced_type_rl, param_type_node); const param_inst_expected = @intCast(u32, astgen.instructions.len + 1); @@ -1156,7 +1162,7 @@ fn fnProtoExpr( const main_tokens = tree.nodes.items(.main_token); const name_token = param.name_token orelse main_tokens[param_type_node]; const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param; - const param_inst = try gz.addParam(¶m_gz, tag, name_token, param_name); + const param_inst = try block_scope.addParam(¶m_gz, tag, name_token, param_name); assert(param_inst_expected == param_inst); } } @@ -1164,7 +1170,7 @@ fn fnProtoExpr( }; const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { - break :inst try expr(gz, scope, align_rl, fn_proto.ast.align_expr); + break :inst try expr(&block_scope, scope, align_rl, fn_proto.ast.align_expr); }; if (fn_proto.ast.addrspace_expr != 0) { @@ -1177,7 +1183,7 @@ fn fnProtoExpr( const cc: Zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0) try expr( - gz, + &block_scope, scope, .{ .ty = .calling_convention_type }, fn_proto.ast.callconv_expr, @@ -1190,14 +1196,14 @@ fn fnProtoExpr( if (is_inferred_error) { return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); } - var ret_gz = gz.makeSubBlock(scope); + var ret_gz = block_scope.makeSubBlock(scope); defer ret_gz.unstack(); const ret_ty = try expr(&ret_gz, scope, coerced_type_rl, fn_proto.ast.return_type); const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty); - const result = try gz.addFunc(.{ + const result = try block_scope.addFunc(.{ .src_node = fn_proto.ast.proto_node, - .param_block = 0, + .param_block = block_inst, .ret_gz = &ret_gz, .ret_br = ret_br, .body_gz = null, @@ -1209,7 +1215,12 @@ fn fnProtoExpr( .is_test = false, .is_extern = false, }); - return rvalue(gz, rl, result, fn_proto.ast.proto_node); + + _ = try block_scope.addBreak(.break_inline, block_inst, result); + try block_scope.setBlockBody(block_inst); + try gz.instructions.append(astgen.gpa, block_inst); + + return rvalue(gz, rl, indexToRef(block_inst), fn_proto.ast.proto_node); } fn arrayInitExpr( From d5621504b0e2160fa44991fc4180165584d72819 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Fri, 17 Dec 2021 18:50:38 +0100 Subject: [PATCH 07/17] stage2: save and restore parameters when resolving inline bodies This caused zirParam instructions of parent blocks to be present in inline analyzed blocks, and so function prototypes declared in the inline blocks would also gain and add to the parameters in the parent block. Only block and block_inline are affected in this commit, as prototypes and declarations are always generated in block_inline. This might need to be resolved in a more general way at some point. --- src/Sema.zig | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/Sema.zig b/src/Sema.zig index bdbdea42e7..174e038db4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -940,6 +940,15 @@ pub fn analyzeBody( const inst_data = datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + // If this block contains a function prototype, we need to reset the + // current list of parameters and restore it later. + // Note: this probably needs to be resolved in a more general manner. + const prev_params = block.params; + block.params = .{}; + defer { + block.params.deinit(sema.gpa); + block.params = prev_params; + } const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { @@ -953,6 +962,15 @@ pub fn analyzeBody( const inst_data = datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + // If this block contains a function prototype, we need to reset the + // current list of parameters and restore it later. + // Note: this probably needs to be resolved in a more general manner. + const prev_params = block.params; + block.params = .{}; + defer { + block.params.deinit(sema.gpa); + block.params = prev_params; + } const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { From 16bddecff99eafb6c60d2c7a406d92d9c2d3c35a Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Fri, 17 Dec 2021 19:01:25 +0100 Subject: [PATCH 08/17] stage2: allow @ptrToInt on anything that is a pointer at runtime This in particular allows @ptrToInt on pointer-like optionals. --- src/Sema.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index 174e038db4..a51e6ffbf3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5381,7 +5381,7 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ptr = sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); - if (ptr_ty.zigTypeTag() != .Pointer) { + if (!ptr_ty.isPtrAtRuntime()) { const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}); } From 138f85bf258d3520591e4a5abfb6d5db7ae45d97 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 18 Dec 2021 00:30:16 +0100 Subject: [PATCH 09/17] stage2: only create inferred error sets for inferred error set functions --- src/Sema.zig | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index a51e6ffbf3..05cdcb794f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3912,18 +3912,20 @@ fn analyzeCall( const ret_ty_inst = try sema.resolveBody(&child_block, fn_info.ret_ty_body); const ret_ty_src = func_src; // TODO better source location const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); - // If the function has an inferred error set, `bare_return_type` is the payload type only. + // Create a fresh inferred error set type for inline/comptime calls. const fn_ret_ty = blk: { - if (func_ty_info.return_type.tag() == .error_union) { - const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); - node.data = .{ .func = module_fn }; - parent_func.?.inferred_error_sets.prepend(node); + if (func_ty_info.return_type.castTag(.error_union)) |payload| { + if (payload.data.error_set.tag() == .error_set_inferred) { + const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); + node.data = .{ .func = module_fn }; + parent_func.?.inferred_error_sets.prepend(node); - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); - break :blk try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = bare_return_type, - }); + const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); + break :blk try Type.Tag.error_union.create(sema.arena, .{ + .error_set = error_set_ty, + .payload = bare_return_type, + }); + } } break :blk bare_return_type; }; @@ -12473,7 +12475,7 @@ fn coerceInMemoryAllowed( dest_ty: Type, src_ty: Type, dest_is_mut: bool, - target: std.Target + target: std.Target, ) CompileError!InMemoryCoercionResult { if (dest_ty.eql(src_ty)) return .ok; From 28bcd7dbdda7fb2c2fe80dbdb5981479a04e973a Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 18 Dec 2021 00:57:23 +0100 Subject: [PATCH 10/17] os: disable unexpected error tracing on stage 2 builds The self-hosted compiler cannot yet deal with the print function that this field enables. It is not critical, however, and allows us to remove formatting from the list of neccesary features to implement to get the page allocator working. --- lib/std/os.zig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/std/os.zig b/lib/std/os.zig index e16a660741..1728c2ac0d 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -4968,7 +4968,11 @@ pub fn toPosixPath(file_path: []const u8) ![MAX_PATH_BYTES - 1:0]u8 { /// if this happens the fix is to add the error code to the corresponding /// switch expression, possibly introduce a new error in the error set, and /// send a patch to Zig. -pub const unexpected_error_tracing = builtin.mode == .Debug; +/// The self-hosted compiler is not fully capable of handle the related code. +/// Until then, unexpected error tracing is disabled for the self-hosted compiler. +/// TODO remove this once self-hosted is capable enough to handle printing and +/// stack trace dumping. +pub const unexpected_error_tracing = !builtin.zig_is_stage2 and builtin.mode == .Debug; pub const UnexpectedError = error{ /// The Operating System returned an undocumented error code. From f3d635b6683ba4a53f82ae8087b1cf78552abac5 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 18 Dec 2021 04:42:13 +0100 Subject: [PATCH 11/17] stage2: @addWithOverflow --- lib/std/math/big/int.zig | 36 ++++++++----- lib/std/math/big/int_test.zig | 24 ++++++--- src/Air.zig | 8 +++ src/Liveness.zig | 2 +- src/Sema.zig | 99 +++++++++++++++++++++++++++++++++-- src/arch/aarch64/CodeGen.zig | 7 +++ src/arch/arm/CodeGen.zig | 7 +++ src/arch/riscv64/CodeGen.zig | 7 +++ src/arch/x86_64/CodeGen.zig | 7 +++ src/codegen/c.zig | 8 +++ src/codegen/llvm.zig | 43 ++++++++++++--- src/print_air.zig | 12 +++++ src/value.zig | 87 +++++++++++++++++++++--------- test/behavior/math.zig | 27 ++++++++++ test/behavior/math_stage1.zig | 27 ---------- 15 files changed, 315 insertions(+), 86 deletions(-) diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index d7bcf9badc..ec0143a3d7 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -443,12 +443,12 @@ pub const Mutable = struct { } } - /// r = a + b with 2s-complement wrapping semantics. + /// r = a + b with 2s-complement wrapping semantics. Returns whether overflow occurred. /// r, a and b may be aliases /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by /// r is `calcTwosCompLimbCount(bit_count)`. - pub fn addWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) void { + pub fn addWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) bool { const req_limbs = calcTwosCompLimbCount(bit_count); // Slice of the upper bits if they exist, these will be ignored and allows us to use addCarry to determine @@ -463,6 +463,7 @@ pub const Mutable = struct { .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)], }; + var carry_truncated = false; if (r.addCarry(x, y)) { // There are two possibilities here: // - We overflowed req_limbs. In this case, the carry is ignored, as it would be removed by @@ -473,10 +474,17 @@ pub const Mutable = struct { if (msl < req_limbs) { r.limbs[msl] = 1; r.len = req_limbs; + } else { + carry_truncated = true; } } - r.truncate(r.toConst(), signedness, bit_count); + if (!r.toConst().fitsInTwosComp(signedness, bit_count)) { + r.truncate(r.toConst(), signedness, bit_count); + return true; + } + + return carry_truncated; } /// r = a + b with 2s-complement saturating semantics. @@ -581,13 +589,13 @@ pub const Mutable = struct { r.add(a, b.negate()); } - /// r = a - b with 2s-complement wrapping semantics. + /// r = a - b with 2s-complement wrapping semantics. Returns whether any overflow occured. /// /// r, a and b may be aliases /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by /// r is `calcTwosCompLimbCount(bit_count)`. - pub fn subWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) void { - r.addWrap(a, b.negate(), signedness, bit_count); + pub fn subWrap(r: *Mutable, a: Const, b: Const, signedness: Signedness, bit_count: usize) bool { + return r.addWrap(a, b.negate(), signedness, bit_count); } /// r = a - b with 2s-complement saturating semantics. @@ -1039,7 +1047,7 @@ pub const Mutable = struct { pub fn bitNotWrap(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void { r.copy(a.negate()); const negative_one = Const{ .limbs = &.{1}, .positive = false }; - r.addWrap(r.toConst(), negative_one, signedness, bit_count); + _ = r.addWrap(r.toConst(), negative_one, signedness, bit_count); } /// r = a | b under 2s complement semantics. @@ -2443,17 +2451,18 @@ pub const Managed = struct { r.setMetadata(m.positive, m.len); } - /// r = a + b with 2s-complement wrapping semantics. + /// r = a + b with 2s-complement wrapping semantics. Returns whether any overflow occured. /// /// r, a and b may be aliases. If r aliases a or b, then caller must call /// `r.ensureTwosCompCapacity` prior to calling `add`. /// /// Returns an error if memory could not be allocated. - pub fn addWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!void { + pub fn addWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!bool { try r.ensureTwosCompCapacity(bit_count); var m = r.toMutable(); - m.addWrap(a, b, signedness, bit_count); + const wrapped = m.addWrap(a, b, signedness, bit_count); r.setMetadata(m.positive, m.len); + return wrapped; } /// r = a + b with 2s-complement saturating semantics. @@ -2481,17 +2490,18 @@ pub const Managed = struct { r.setMetadata(m.positive, m.len); } - /// r = a - b with 2s-complement wrapping semantics. + /// r = a - b with 2s-complement wrapping semantics. Returns whether any overflow occured. /// /// r, a and b may be aliases. If r aliases a or b, then caller must call /// `r.ensureTwosCompCapacity` prior to calling `add`. /// /// Returns an error if memory could not be allocated. - pub fn subWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!void { + pub fn subWrap(r: *Managed, a: Const, b: Const, signedness: Signedness, bit_count: usize) Allocator.Error!bool { try r.ensureTwosCompCapacity(bit_count); var m = r.toMutable(); - m.subWrap(a, b, signedness, bit_count); + const wrapped = m.subWrap(a, b, signedness, bit_count); r.setMetadata(m.positive, m.len); + return wrapped; } /// r = a - b with 2s-complement saturating semantics. diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index d226f20083..4c1d12116e 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -590,8 +590,9 @@ test "big.int addWrap single-single, unsigned" { var b = try Managed.initSet(testing.allocator, 10); defer b.deinit(); - try a.addWrap(a.toConst(), b.toConst(), .unsigned, 17); + const wrapped = try a.addWrap(a.toConst(), b.toConst(), .unsigned, 17); + try testing.expect(wrapped); try testing.expect((try a.to(u17)) == 9); } @@ -602,8 +603,9 @@ test "big.int subWrap single-single, unsigned" { var b = try Managed.initSet(testing.allocator, maxInt(u17)); defer b.deinit(); - try a.subWrap(a.toConst(), b.toConst(), .unsigned, 17); + const wrapped = try a.subWrap(a.toConst(), b.toConst(), .unsigned, 17); + try testing.expect(wrapped); try testing.expect((try a.to(u17)) == 1); } @@ -614,8 +616,9 @@ test "big.int addWrap multi-multi, unsigned, limb aligned" { var b = try Managed.initSet(testing.allocator, maxInt(DoubleLimb)); defer b.deinit(); - try a.addWrap(a.toConst(), b.toConst(), .unsigned, @bitSizeOf(DoubleLimb)); + const wrapped = try a.addWrap(a.toConst(), b.toConst(), .unsigned, @bitSizeOf(DoubleLimb)); + try testing.expect(wrapped); try testing.expect((try a.to(DoubleLimb)) == maxInt(DoubleLimb) - 1); } @@ -626,8 +629,9 @@ test "big.int subWrap single-multi, unsigned, limb aligned" { var b = try Managed.initSet(testing.allocator, maxInt(DoubleLimb) + 100); defer b.deinit(); - try a.subWrap(a.toConst(), b.toConst(), .unsigned, @bitSizeOf(DoubleLimb)); + const wrapped = try a.subWrap(a.toConst(), b.toConst(), .unsigned, @bitSizeOf(DoubleLimb)); + try testing.expect(wrapped); try testing.expect((try a.to(DoubleLimb)) == maxInt(DoubleLimb) - 88); } @@ -638,8 +642,9 @@ test "big.int addWrap single-single, signed" { var b = try Managed.initSet(testing.allocator, 1 + 1 + maxInt(u21)); defer b.deinit(); - try a.addWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(i21)); + const wrapped = try a.addWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(i21)); + try testing.expect(wrapped); try testing.expect((try a.to(i21)) == minInt(i21)); } @@ -650,8 +655,9 @@ test "big.int subWrap single-single, signed" { var b = try Managed.initSet(testing.allocator, 1); defer b.deinit(); - try a.subWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(i21)); + const wrapped = try a.subWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(i21)); + try testing.expect(wrapped); try testing.expect((try a.to(i21)) == maxInt(i21)); } @@ -662,8 +668,9 @@ test "big.int addWrap multi-multi, signed, limb aligned" { var b = try Managed.initSet(testing.allocator, maxInt(SignedDoubleLimb)); defer b.deinit(); - try a.addWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(SignedDoubleLimb)); + const wrapped = try a.addWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(SignedDoubleLimb)); + try testing.expect(wrapped); try testing.expect((try a.to(SignedDoubleLimb)) == -2); } @@ -674,8 +681,9 @@ test "big.int subWrap single-multi, signed, limb aligned" { var b = try Managed.initSet(testing.allocator, 1); defer b.deinit(); - try a.subWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(SignedDoubleLimb)); + const wrapped = try a.subWrap(a.toConst(), b.toConst(), .signed, @bitSizeOf(SignedDoubleLimb)); + try testing.expect(wrapped); try testing.expect((try a.to(SignedDoubleLimb)) == maxInt(SignedDoubleLimb)); } diff --git a/src/Air.zig b/src/Air.zig index 0e8a63acb1..72e281d03e 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -135,6 +135,12 @@ pub const Inst = struct { /// is the same as both operands. /// Uses the `bin_op` field. min, + /// Integer addition with overflow. Both operands are guaranteed to be the same type, + /// and the result is bool. The wrapped value is written to the pointer given by the in + /// operand of the `pl_op` field. Payload is `Bin` with `lhs` and `rhs` the relevant types + /// of the operation. + /// Uses the `pl_op` field with payload `Bin`. + add_with_overflow, /// Allocates stack local memory. /// Uses the `ty` field. alloc, @@ -804,6 +810,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { const ptr_ty = air.typeOf(datas[inst].pl_op.operand); return ptr_ty.elemType(); }, + + .add_with_overflow => return Type.initTag(.bool), } } diff --git a/src/Liveness.zig b/src/Liveness.zig index f090329d5a..a7128e2cc2 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -381,7 +381,7 @@ fn analyzeInst( const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none }); }, - .memset, .memcpy => { + .memset, .memcpy, .add_with_overflow => { const pl_op = inst_datas[inst].pl_op; const extra = a.air.extraData(Air.Bin, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs }); diff --git a/src/Sema.zig b/src/Sema.zig index 05cdcb794f..82901e8a8c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1051,10 +1051,10 @@ fn zirExtended(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .@"asm" => return sema.zirAsm( block, extended, inst), .typeof_peer => return sema.zirTypeofPeer( block, extended), .compile_log => return sema.zirCompileLog( block, extended), - .add_with_overflow => return sema.zirOverflowArithmetic(block, extended), - .sub_with_overflow => return sema.zirOverflowArithmetic(block, extended), - .mul_with_overflow => return sema.zirOverflowArithmetic(block, extended), - .shl_with_overflow => return sema.zirOverflowArithmetic(block, extended), + .add_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode), + .sub_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode), + .mul_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode), + .shl_with_overflow => return sema.zirOverflowArithmetic(block, extended, extended.opcode), .c_undef => return sema.zirCUndef( block, extended), .c_include => return sema.zirCInclude( block, extended), .c_define => return sema.zirCDefine( block, extended), @@ -7310,6 +7310,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, + zir_tag: Zir.Inst.Extended, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -7317,7 +7318,95 @@ fn zirOverflowArithmetic( const extra = sema.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; - return sema.fail(block, src, "TODO implement Sema.zirOverflowArithmetic", .{}); + const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; + const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; + const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node }; + + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); + const ptr = sema.resolveInst(extra.ptr); + + const lhs_ty = sema.typeOf(lhs); + + // Note, the types of lhs/rhs (also for shifting)/ptr are already correct as ensured by astgen. + const dest_ty = lhs_ty; + if (dest_ty.zigTypeTag() != .Int) { + return sema.fail(block, src, "expected integer type, found '{}'", .{dest_ty}); + } + + const target = sema.mod.getTarget(); + + const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs); + const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs); + + const result: struct { + overflowed: enum { yes, no, undef }, + wrapped: Air.Inst.Ref, + } = result: { + const air_tag: Air.Inst.Tag = switch (zir_tag) { + .add_with_overflow => blk: { + // If either of the arguments is zero, `false` is returned and the other is stored + // to the result, even if it is undefined.. + // Otherwise, if either of the argument is undefined, undefined is returned. + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { + break :result .{ .overflowed = .no, .wrapped = rhs }; + } + } + if (maybe_rhs_val) |rhs_val| { + if (!rhs_val.isUndef() and rhs_val.compareWithZero(.eq)) { + break :result .{ .overflowed = .no, .wrapped = lhs }; + } + } + if (maybe_lhs_val) |lhs_val| { + if (maybe_rhs_val) |rhs_val| { + if (lhs_val.isUndef() or rhs_val.isUndef()) { + break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; + } + + const result = try lhs_val.intAddWithOverflow(rhs_val, dest_ty, sema.arena, target); + const inst = try sema.addConstant( + dest_ty, + result.wrapped_result, + ); + + if (result.overflowed) { + break :result .{ .overflowed = .yes, .wrapped = inst }; + } else { + break :result .{ .overflowed = .no, .wrapped = inst }; + } + } + } + + break :blk .add_with_overflow; + }, + .sub_with_overflow, + .mul_with_overflow, + .shl_with_overflow, + => return sema.fail(block, src, "TODO implement Sema.zirOverflowArithmetic for {}", .{zir_tag}), + else => unreachable, + }; + + try sema.requireRuntimeBlock(block, src); + return block.addInst(.{ + .tag = air_tag, + .data = .{ .pl_op = .{ + .operand = ptr, + .payload = try sema.addExtra(Air.Bin{ + .lhs = lhs, + .rhs = rhs, + }), + } }, + }); + }; + + try sema.storePtr2(block, src, ptr, ptr_src, result.wrapped, src, .store); + + return switch (result.overflowed) { + .yes => Air.Inst.Ref.bool_true, + .no => Air.Inst.Ref.bool_false, + .undef => try sema.addConstUndef(Type.initTag(.bool)), + }; } fn analyzeArithmetic( diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 4e77d67727..fda673631d 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -521,6 +521,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .max => try self.airMax(inst), .slice => try self.airSlice(inst), + .add_with_overflow => try self.airAddWithOverflow(inst), + .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), .cmp_lt => try self.airCmp(inst, .lt), @@ -968,6 +970,11 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index ae53d6cad8..bcc1b927e7 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -519,6 +519,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .max => try self.airMax(inst), .slice => try self.airSlice(inst), + .add_with_overflow => try self.airAddWithOverflow(inst), + .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), .cmp_lt => try self.airCmp(inst, .lt), @@ -998,6 +1000,11 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 17ef79b725..1d67ad0abf 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -500,6 +500,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .max => try self.airMax(inst), .slice => try self.airSlice(inst), + .add_with_overflow => try self.airAddWithOverflow(inst), + .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), .cmp_lt => try self.airCmp(inst, .lt), @@ -913,6 +915,11 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 378184f70a..8fadcdd5f5 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -553,6 +553,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .max => try self.airMax(inst), .slice => try self.airSlice(inst), + .add_with_overflow => try self.airAddWithOverflow(inst), + .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), .cmp_lt => try self.airCmp(inst, .lt), @@ -1027,6 +1029,11 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f54ae7f76d..b086d15b48 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1155,6 +1155,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .mul_sat => try airSatOp(f, inst, "muls_"), .shl_sat => try airSatOp(f, inst, "shls_"), + .add_with_overflow => try airAddWithOverflow(f, inst), + .min => try airMinMax(f, inst, "<"), .max => try airMinMax(f, inst, ">"), @@ -1864,6 +1866,12 @@ fn airSatOp(f: *Function, inst: Air.Inst.Index, fn_op: [*:0]const u8) !CValue { return ret; } +fn airAddWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue { + _ = f; + _ = inst; + return f.fail("TODO add with overflow", .{}); +} + fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 58239fdaea..d12dad2403 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1714,6 +1714,8 @@ pub const FuncGen = struct { .max => try self.airMax(inst), .slice => try self.airSlice(inst), + .add_with_overflow => try self.airAddWithOverflow(inst), + .bit_and, .bool_and => try self.airAnd(inst), .bit_or, .bool_or => try self.airOr(inst), .xor => try self.airXor(inst), @@ -3133,6 +3135,38 @@ pub const FuncGen = struct { } } + fn airAddWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Bin, pl_op.payload).data; + + const ptr = try self.resolveInst(pl_op.operand); + const lhs = try self.resolveInst(extra.lhs); + const rhs = try self.resolveInst(extra.rhs); + + const ptr_ty = self.air.typeOf(pl_op.operand); + const lhs_ty = self.air.typeOf(extra.lhs); + + const intrinsic_name: []const u8 = if (lhs_ty.isSignedInt()) + "llvm.sadd.with.overflow" + else + "llvm.uadd.with.overflow"; + + const llvm_lhs_ty = try self.dg.llvmType(lhs_ty); + + const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); + const result_struct = self.builder.buildCall(llvm_fn, &[_]*const llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); + + const result = self.builder.buildExtractValue(result_struct, 0, ""); + const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); + + self.store(ptr, ptr_ty, result, .NotAtomic); + + return overflow_bit; + } + fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; @@ -3511,7 +3545,7 @@ pub const FuncGen = struct { fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { _ = inst; - const llvm_fn = self.getIntrinsic("llvm.debugtrap"); + const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{}); _ = self.builder.buildCall(llvm_fn, undefined, 0, .C, .Auto, ""); return null; } @@ -3946,13 +3980,10 @@ pub const FuncGen = struct { return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, ""); } - fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value { + fn getIntrinsic(self: *FuncGen, name: []const u8, types: []*const llvm.Type) *const llvm.Value { const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); - // TODO: add support for overload intrinsics by passing the prefix of the intrinsic - // to `lookupIntrinsicID` and then passing the correct types to - // `getIntrinsicDeclaration` - return self.llvmModule().getIntrinsicDeclaration(id, null, 0); + return self.llvmModule().getIntrinsicDeclaration(id, types.ptr, types.len); } fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) ?*const llvm.Value { diff --git a/src/print_air.zig b/src/print_air.zig index 3e503735b9..e11826c874 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -228,6 +228,7 @@ const Writer = struct { .atomic_rmw => try w.writeAtomicRmw(s, inst), .memcpy => try w.writeMemcpy(s, inst), .memset => try w.writeMemset(s, inst), + .add_with_overflow => try w.writeAddWithOverflow(s, inst), } } @@ -348,6 +349,17 @@ const Writer = struct { try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) }); } + fn writeAddWithOverflow(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const pl_op = w.air.instructions.items(.data)[inst].pl_op; + const extra = w.air.extraData(Air.Bin, pl_op.payload).data; + + try w.writeOperand(s, inst, 0, pl_op.operand); + try s.writeAll(", "); + try w.writeOperand(s, inst, 1, extra.lhs); + try s.writeAll(", "); + try w.writeOperand(s, inst, 2, extra.rhs); + } + fn writeMemset(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; diff --git a/src/value.zig b/src/value.zig index e3d315b6e4..085883f7af 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1969,6 +1969,37 @@ pub const Value = extern union { return @divFloor(@floatToInt(std.math.big.Limb, std.math.log2(w_value)), @typeInfo(std.math.big.Limb).Int.bits) + 1; } + pub const OverflowArithmeticResult = struct { + overflowed: bool, + wrapped_result: Value, + }; + + pub fn intAddWithOverflow( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, + ) !OverflowArithmeticResult { + const info = ty.intInfo(target); + + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(info.bits), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); + const result = try fromBigInt(arena, result_bigint.toConst()); + return OverflowArithmeticResult{ + .overflowed = overflowed, + .wrapped_result = result, + }; + } + /// Supports both floats and ints; handles undefined. pub fn numberAddWrap( lhs: Value, @@ -1983,19 +2014,8 @@ pub const Value = extern union { return floatAdd(lhs, rhs, ty, arena); } - const info = ty.intInfo(target); - - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space); - const rhs_bigint = rhs.toBigInt(&rhs_space); - const limbs = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + const overflow_result = try intAddWithOverflow(lhs, rhs, ty, arena, target); + return overflow_result.wrapped_result; } fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value { @@ -2040,6 +2060,32 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } + pub fn intSubWithOverflow( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, + ) !OverflowArithmeticResult { + const info = ty.intInfo(target); + + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(info.bits), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); + const wrapped_result = try fromBigInt(arena, result_bigint.toConst()); + return OverflowArithmeticResult{ + .overflowed = overflowed, + .wrapped_result = wrapped_result, + }; + } + /// Supports both floats and ints; handles undefined. pub fn numberSubWrap( lhs: Value, @@ -2054,19 +2100,8 @@ pub const Value = extern union { return floatSub(lhs, rhs, ty, arena); } - const info = ty.intInfo(target); - - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space); - const rhs_bigint = rhs.toBigInt(&rhs_space); - const limbs = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + const overflow_result = try intSubWithOverflow(lhs, rhs, ty, arena, target); + return overflow_result.wrapped_result; } /// Supports integers only; asserts neither operand is undefined. diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 1073183a3c..2cd67854af 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -444,3 +444,30 @@ test "128-bit multiplication" { var c = a * b; try expect(c == 6); } + +test "@addWithOverflow" { + var result: u8 = undefined; + try expect(@addWithOverflow(u8, 250, 100, &result)); + try expect(result == 94); + try expect(!@addWithOverflow(u8, 100, 150, &result)); + try expect(result == 250); +} + +test "small int addition" { + var x: u2 = 0; + try expect(x == 0); + + x += 1; + try expect(x == 1); + + x += 1; + try expect(x == 2); + + x += 1; + try expect(x == 3); + + var result: @TypeOf(x) = 3; + try expect(@addWithOverflow(@TypeOf(x), x, 1, &result)); + + try expect(result == 0); +} diff --git a/test/behavior/math_stage1.zig b/test/behavior/math_stage1.zig index e665b90740..63ff2cdec0 100644 --- a/test/behavior/math_stage1.zig +++ b/test/behavior/math_stage1.zig @@ -6,14 +6,6 @@ const maxInt = std.math.maxInt; const minInt = std.math.minInt; const mem = std.mem; -test "@addWithOverflow" { - var result: u8 = undefined; - try expect(@addWithOverflow(u8, 250, 100, &result)); - try expect(result == 94); - try expect(!@addWithOverflow(u8, 100, 150, &result)); - try expect(result == 250); -} - test "@mulWithOverflow" { var result: u8 = undefined; try expect(@mulWithOverflow(u8, 86, 3, &result)); @@ -90,25 +82,6 @@ fn testCtzVectors() !void { try expectEqual(@ctz(u16, @splat(64, @as(u16, 0b00000000))), @splat(64, @as(u5, 16))); } -test "small int addition" { - var x: u2 = 0; - try expect(x == 0); - - x += 1; - try expect(x == 1); - - x += 1; - try expect(x == 2); - - x += 1; - try expect(x == 3); - - var result: @TypeOf(x) = 3; - try expect(@addWithOverflow(@TypeOf(x), x, 1, &result)); - - try expect(result == 0); -} - test "allow signed integer division/remainder when values are comptime known and positive or exact" { try expect(5 / 3 == 1); try expect(-5 / -3 == 1); From 2f7e98c129ca3c86fe20c043078bcc7f2fe477d6 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 18 Dec 2021 05:39:13 +0100 Subject: [PATCH 12/17] stage2: also write addrspace to pointer typeinfo --- src/Sema.zig | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 82901e8a8c..9b9371c856 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8881,7 +8881,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Pointer => { const info = ty.ptrInfo().data; - const field_values = try sema.arena.alloc(Value, 7); + const field_values = try sema.arena.alloc(Value, 8); // size: Size, field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size)); // is_const: bool, @@ -8890,12 +8890,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai field_values[2] = if (info.@"volatile") Value.initTag(.bool_true) else Value.initTag(.bool_false); // alignment: comptime_int, field_values[3] = try Value.Tag.int_u64.create(sema.arena, info.@"align"); + // address_space: AddressSpace + field_values[4] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace")); // child: type, - field_values[4] = try Value.Tag.ty.create(sema.arena, info.pointee_type); + field_values[5] = try Value.Tag.ty.create(sema.arena, info.pointee_type); // is_allowzero: bool, - field_values[5] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false); + field_values[6] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false); // sentinel: anytype, - field_values[6] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null"; + field_values[7] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null"; return sema.addConstant( type_info_ty, From ddd2ef822f99979d3ea61583a91ab236942e6367 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sat, 18 Dec 2021 06:11:46 +0100 Subject: [PATCH 13/17] stage2: @returnAddress() --- src/Air.zig | 4 ++++ src/Liveness.zig | 1 + src/Sema.zig | 6 +++++- src/arch/aarch64/CodeGen.zig | 5 +++++ src/arch/arm/CodeGen.zig | 5 +++++ src/arch/riscv64/CodeGen.zig | 5 +++++ src/arch/x86_64/CodeGen.zig | 5 +++++ src/codegen/c.zig | 5 +++++ src/codegen/llvm.zig | 10 ++++++++++ src/print_air.zig | 1 + 10 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/Air.zig b/src/Air.zig index 72e281d03e..9e4a61b9a2 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -195,6 +195,9 @@ pub const Inst = struct { /// Lowers to a hardware trap instruction, or the next best thing. /// Result type is always void. breakpoint, + /// Yields the return address of the current function. + /// Uses the `no_op` field. + ret_addr, /// Function call. /// Result type is the return type of the function being called. /// Uses the `pl_op` field with the `Call` payload. operand is the callee. @@ -785,6 +788,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .ptrtoint, .slice_len, + .ret_addr, => return Type.initTag(.usize), .bool_to_int => return Type.initTag(.u1), diff --git a/src/Liveness.zig b/src/Liveness.zig index a7128e2cc2..160a2e97d3 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -281,6 +281,7 @@ fn analyzeInst( .dbg_stmt, .unreach, .fence, + .ret_addr, => return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }), .not, diff --git a/src/Sema.zig b/src/Sema.zig index 9b9371c856..03ee58dc06 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8739,8 +8739,12 @@ fn zirRetAddr( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const tracy = trace(@src()); + defer tracy.end(); + const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; - return sema.fail(block, src, "TODO: implement Sema.zirRetAddr", .{}); + try sema.requireRuntimeBlock(block, src); + return try block.addNoOp(.ret_addr); } fn zirBuiltinSrc( diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index fda673631d..1c6d54485b 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -547,6 +547,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .block => try self.airBlock(inst), .br => try self.airBr(inst), .breakpoint => try self.airBreakpoint(), + .ret_addr => try self.airRetAddr(), .fence => try self.airFence(), .call => try self.airCall(inst), .cond_br => try self.airCondBr(inst), @@ -1416,6 +1417,10 @@ fn airBreakpoint(self: *Self) !void { return self.finishAirBookkeeping(); } +fn airRetAddr(self: *Self) !void { + return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch}); +} + fn airFence(self: *Self) !void { return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch}); //return self.finishAirBookkeeping(); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bcc1b927e7..0039d78434 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -545,6 +545,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .block => try self.airBlock(inst), .br => try self.airBr(inst), .breakpoint => try self.airBreakpoint(), + .ret_addr => try self.airRetAddr(), .fence => try self.airFence(), .call => try self.airCall(inst), .cond_br => try self.airCondBr(inst), @@ -1850,6 +1851,10 @@ fn airBreakpoint(self: *Self) !void { return self.finishAirBookkeeping(); } +fn airRetAddr(self: *Self) !void { + return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch}); +} + fn airFence(self: *Self) !void { return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch}); //return self.finishAirBookkeeping(); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1d67ad0abf..51703681e9 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -526,6 +526,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .block => try self.airBlock(inst), .br => try self.airBr(inst), .breakpoint => try self.airBreakpoint(), + .ret_addr => try self.airRetAddr(), .fence => try self.airFence(), .call => try self.airCall(inst), .cond_br => try self.airCondBr(inst), @@ -1354,6 +1355,10 @@ fn airBreakpoint(self: *Self) !void { return self.finishAirBookkeeping(); } +fn airRetAddr(self: *Self) !void { + return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch}); +} + fn airFence(self: *Self) !void { return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch}); //return self.finishAirBookkeeping(); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 8fadcdd5f5..3b40293527 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -579,6 +579,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .block => try self.airBlock(inst), .br => try self.airBr(inst), .breakpoint => try self.airBreakpoint(), + .ret_addr => try self.airRetAddr(), .fence => try self.airFence(), .call => try self.airCall(inst), .cond_br => try self.airCondBr(inst), @@ -1839,6 +1840,10 @@ fn airBreakpoint(self: *Self) !void { return self.finishAirBookkeeping(); } +fn airRetAddr(self: *Self) !void { + return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch}); +} + fn airFence(self: *Self) !void { return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch}); //return self.finishAirBookkeeping(); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index b086d15b48..070aa0a238 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1125,6 +1125,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .arg => airArg(f), .breakpoint => try airBreakpoint(f), + .ret_addr => try airRetAddr(f), .unreach => try airUnreach(f), .fence => try airFence(f, inst), @@ -2191,6 +2192,10 @@ fn airBreakpoint(f: *Function) !CValue { return CValue.none; } +fn airRetAddr(f: *Function) !CValue { + return f.fail("TODO implement codegen for airRetAddr", .{}); +} + fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { const atomic_order = f.air.instructions.items(.data)[inst].fence; const writer = f.object.writer(); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d12dad2403..0a2b12dd1e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1747,6 +1747,7 @@ pub const FuncGen = struct { .br => try self.airBr(inst), .switch_br => try self.airSwitchBr(inst), .breakpoint => try self.airBreakpoint(inst), + .ret_addr => try self.airRetAddr(inst), .call => try self.airCall(inst), .cond_br => try self.airCondBr(inst), .intcast => try self.airIntCast(inst), @@ -3550,6 +3551,15 @@ pub const FuncGen = struct { return null; } + fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + _ = inst; + const i32_zero = self.context.intType(32).constNull(); + const usize_llvm_ty = try self.dg.llvmType(Type.usize); + const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{}); + const ptr_val = self.builder.buildCall(llvm_fn, &[_]*const llvm.Value{i32_zero}, 1, .Fast, .Auto, ""); + return self.builder.buildPtrToInt(ptr_val, usize_llvm_ty, ""); + } + fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const atomic_order = self.air.instructions.items(.data)[inst].fence; const llvm_memory_order = toLlvmAtomicOrdering(atomic_order); diff --git a/src/print_air.zig b/src/print_air.zig index e11826c874..17af7ebf62 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -159,6 +159,7 @@ const Writer = struct { .breakpoint, .unreach, + .ret_addr, => try w.writeNoOp(s, inst), .const_ty, From c47ed0c912d2f445710fe4486fa071dd63601989 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Mon, 20 Dec 2021 02:06:09 +0100 Subject: [PATCH 14/17] stage2: @mulWithOverflow --- src/Air.zig | 10 ++++++- src/Liveness.zig | 2 +- src/Sema.zig | 56 ++++++++++++++++++++++++++++++++--- src/arch/aarch64/CodeGen.zig | 6 ++++ src/arch/arm/CodeGen.zig | 6 ++++ src/arch/riscv64/CodeGen.zig | 6 ++++ src/arch/x86_64/CodeGen.zig | 6 ++++ src/codegen/c.zig | 7 +++++ src/codegen/llvm.zig | 15 ++++++---- src/print_air.zig | 7 +++-- src/value.zig | 56 +++++++++++++++++++++++------------ test/behavior/math.zig | 24 +++++++++++++++ test/behavior/math_stage1.zig | 8 ----- 13 files changed, 168 insertions(+), 41 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 9e4a61b9a2..5d54be7392 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -141,6 +141,12 @@ pub const Inst = struct { /// of the operation. /// Uses the `pl_op` field with payload `Bin`. add_with_overflow, + /// Integer multiplication with overflow. Both operands are guaranteed to be the same type, + /// and the result is bool. The wrapped value is written to the pointer given by the in + /// operand of the `pl_op` field. Payload is `Bin` with `lhs` and `rhs` the relevant types + /// of the operation. + /// Uses the `pl_op` field with payload `Bin`. + mul_with_overflow, /// Allocates stack local memory. /// Uses the `ty` field. alloc, @@ -815,7 +821,9 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { return ptr_ty.elemType(); }, - .add_with_overflow => return Type.initTag(.bool), + .add_with_overflow, + .mul_with_overflow, + => return Type.initTag(.bool), } } diff --git a/src/Liveness.zig b/src/Liveness.zig index 160a2e97d3..6859f64660 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -382,7 +382,7 @@ fn analyzeInst( const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none }); }, - .memset, .memcpy, .add_with_overflow => { + .memset, .memcpy, .add_with_overflow, .mul_with_overflow => { const pl_op = inst_datas[inst].pl_op; const extra = a.air.extraData(Air.Bin, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs }); diff --git a/src/Sema.zig b/src/Sema.zig index 03ee58dc06..38183d1052 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7343,8 +7343,8 @@ fn zirOverflowArithmetic( overflowed: enum { yes, no, undef }, wrapped: Air.Inst.Ref, } = result: { - const air_tag: Air.Inst.Tag = switch (zir_tag) { - .add_with_overflow => blk: { + switch (zir_tag) { + .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. @@ -7377,14 +7377,62 @@ fn zirOverflowArithmetic( } } } + }, + .mul_with_overflow => { + // If either of the arguments is zero, the result is zero and no overflow occured. + // If either of the arguments is one, the result is the other and no overflow occured. + // Otherwise, if either of the arguments is undefined, both results are undefined. - break :blk .add_with_overflow; + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef()) { + if (lhs_val.compareWithZero(.eq)) { + break :result .{ .overflowed = .no, .wrapped = lhs }; + } else if (lhs_val.compare(.eq, Value.one, dest_ty)) { + break :result .{ .overflowed = .no, .wrapped = rhs }; + } + } + } + + if (maybe_rhs_val) |rhs_val| { + if (!rhs_val.isUndef()) { + if (rhs_val.compareWithZero(.eq)) { + break :result .{ .overflowed = .no, .wrapped = rhs }; + } else if (rhs_val.compare(.eq, Value.one, dest_ty)) { + break :result .{ .overflowed = .no, .wrapped = lhs }; + } + } + } + + if (maybe_lhs_val) |lhs_val| { + if (maybe_rhs_val) |rhs_val| { + if (lhs_val.isUndef() or rhs_val.isUndef()) { + break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; + } + + const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, target); + const inst = try sema.addConstant( + dest_ty, + result.wrapped_result, + ); + + if (result.overflowed) { + break :result .{ .overflowed = .yes, .wrapped = inst }; + } else { + break :result .{ .overflowed = .no, .wrapped = inst }; + } + } + } }, .sub_with_overflow, - .mul_with_overflow, .shl_with_overflow, => return sema.fail(block, src, "TODO implement Sema.zirOverflowArithmetic for {}", .{zir_tag}), else => unreachable, + } + + const air_tag: Air.Inst.Tag = switch (zir_tag) { + .add_with_overflow => .add_with_overflow, + .mul_with_overflow => .mul_with_overflow, + else => return sema.fail(block, src, "TODO implement runtime Sema.zirOverflowArithmetic for {}", .{zir_tag}), }; try sema.requireRuntimeBlock(block, src); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 1c6d54485b..e34475f1ee 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -522,6 +522,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice => try self.airSlice(inst), .add_with_overflow => try self.airAddWithOverflow(inst), + .mul_with_overflow => try self.airMulWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -976,6 +977,11 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); } +fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 0039d78434..779ce52036 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -520,6 +520,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice => try self.airSlice(inst), .add_with_overflow => try self.airAddWithOverflow(inst), + .mul_with_overflow => try self.airMulWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -1006,6 +1007,11 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); } +fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 51703681e9..c8f7173b1c 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -501,6 +501,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice => try self.airSlice(inst), .add_with_overflow => try self.airAddWithOverflow(inst), + .mul_with_overflow => try self.airMulWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -921,6 +922,11 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); } +fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 3b40293527..fe8d8b55f7 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -554,6 +554,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice => try self.airSlice(inst), .add_with_overflow => try self.airAddWithOverflow(inst), + .mul_with_overflow => try self.airMulWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -1035,6 +1036,11 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); } +fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 070aa0a238..f6a3105760 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1157,6 +1157,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .shl_sat => try airSatOp(f, inst, "shls_"), .add_with_overflow => try airAddWithOverflow(f, inst), + .mul_with_overflow => try airMulWithOverflow(f, inst), .min => try airMinMax(f, inst, "<"), .max => try airMinMax(f, inst, ">"), @@ -1873,6 +1874,12 @@ fn airAddWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue { return f.fail("TODO add with overflow", .{}); } +fn airMulWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue { + _ = f; + _ = inst; + return f.fail("TODO mul with overflow", .{}); +} + fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0a2b12dd1e..209e3fb4de 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1714,7 +1714,8 @@ pub const FuncGen = struct { .max => try self.airMax(inst), .slice => try self.airSlice(inst), - .add_with_overflow => try self.airAddWithOverflow(inst), + .add_with_overflow => try self.airOverflow(inst, "llvm.sadd.with.overflow", "llvm.uadd.with.overflow"), + .mul_with_overflow => try self.airOverflow(inst, "llvm.smul.with.overflow", "llvm.umul.with.overflow"), .bit_and, .bool_and => try self.airAnd(inst), .bit_or, .bool_or => try self.airOr(inst), @@ -3136,7 +3137,12 @@ pub const FuncGen = struct { } } - fn airAddWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + fn airOverflow( + self: *FuncGen, + inst: Air.Inst.Index, + signed_intrinsic: []const u8, + unsigned_intrinsic: []const u8, + ) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; @@ -3150,10 +3156,7 @@ pub const FuncGen = struct { const ptr_ty = self.air.typeOf(pl_op.operand); const lhs_ty = self.air.typeOf(extra.lhs); - const intrinsic_name: []const u8 = if (lhs_ty.isSignedInt()) - "llvm.sadd.with.overflow" - else - "llvm.uadd.with.overflow"; + const intrinsic_name = if (lhs_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic; const llvm_lhs_ty = try self.dg.llvmType(lhs_ty); diff --git a/src/print_air.zig b/src/print_air.zig index 17af7ebf62..2204d16bd6 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -229,7 +229,10 @@ const Writer = struct { .atomic_rmw => try w.writeAtomicRmw(s, inst), .memcpy => try w.writeMemcpy(s, inst), .memset => try w.writeMemset(s, inst), - .add_with_overflow => try w.writeAddWithOverflow(s, inst), + + .add_with_overflow, + .mul_with_overflow, + => try w.writeOverflow(s, inst), } } @@ -350,7 +353,7 @@ const Writer = struct { try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) }); } - fn writeAddWithOverflow(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + fn writeOverflow(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; diff --git a/src/value.zig b/src/value.zig index 085883f7af..19546ed8b9 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2130,6 +2130,41 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } + pub fn intMulWithOverflow( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, + ) !OverflowArithmeticResult { + const info = ty.intInfo(target); + + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs = try arena.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + rhs_bigint.limbs.len, + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var limbs_buffer = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), + ); + result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); + + const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits); + if (overflowed) { + result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits); + } + + return OverflowArithmeticResult{ + .overflowed = overflowed, + .wrapped_result = try fromBigInt(arena, result_bigint.toConst()), + }; + } + /// Supports both floats and ints; handles undefined. pub fn numberMulWrap( lhs: Value, @@ -2144,24 +2179,8 @@ pub const Value = extern union { return floatMul(lhs, rhs, ty, arena); } - const info = ty.intInfo(target); - - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space); - const rhs_bigint = rhs.toBigInt(&rhs_space); - const limbs = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - var limbs_buffer = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcMulWrapLimbsBufferLen(info.bits, lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), - ); - defer arena.free(limbs_buffer); - result_bigint.mulWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits, limbs_buffer, arena); - return fromBigInt(arena, result_bigint.toConst()); + const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, target); + return overflow_result.wrapped_result; } /// Supports integers only; asserts neither operand is undefined. @@ -2194,7 +2213,6 @@ pub const Value = extern union { std.math.big.Limb, std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), ); - defer arena.free(limbs_buffer); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits); return fromBigInt(arena, result_bigint.toConst()); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 2cd67854af..963fe3f0d5 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -451,6 +451,14 @@ test "@addWithOverflow" { try expect(result == 94); try expect(!@addWithOverflow(u8, 100, 150, &result)); try expect(result == 250); + + var a: u8 = 200; + var b: u8 = 99; + try expect(@addWithOverflow(u8, a, b, &result)); + try expect(result == 43); + b = 55; + try expect(!@addWithOverflow(u8, a, b, &result)); + try expect(result == 255); } test "small int addition" { @@ -471,3 +479,19 @@ test "small int addition" { try expect(result == 0); } + +test "@mulWithOverflow" { + var result: u8 = undefined; + try expect(@mulWithOverflow(u8, 86, 3, &result)); + try expect(result == 2); + try expect(!@mulWithOverflow(u8, 85, 3, &result)); + try expect(result == 255); + + var a: u8 = 123; + var b: u8 = 2; + try expect(!@mulWithOverflow(u8, a, b, &result)); + try expect(result == 246); + b = 4; + try expect(@mulWithOverflow(u8, a, b, &result)); + try expect(result == 236); +} diff --git a/test/behavior/math_stage1.zig b/test/behavior/math_stage1.zig index 63ff2cdec0..381f89634b 100644 --- a/test/behavior/math_stage1.zig +++ b/test/behavior/math_stage1.zig @@ -6,14 +6,6 @@ const maxInt = std.math.maxInt; const minInt = std.math.minInt; const mem = std.mem; -test "@mulWithOverflow" { - var result: u8 = undefined; - try expect(@mulWithOverflow(u8, 86, 3, &result)); - try expect(result == 2); - try expect(!@mulWithOverflow(u8, 85, 3, &result)); - try expect(result == 255); -} - test "@subWithOverflow" { var result: u8 = undefined; try expect(@subWithOverflow(u8, 1, 2, &result)); From 58d67a6718d5d0673389fa19f5bb20812b4bb22a Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Mon, 20 Dec 2021 03:58:19 +0100 Subject: [PATCH 15/17] stage2: make anyopaque sized While this is technically incorrect, proper handling of anyopaque, as well as regular opaque, is probably best left until pointers to zero-sized types having no bits is abolished. --- src/codegen/llvm.zig | 28 ++++++++++++++++------------ src/type.zig | 2 +- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 209e3fb4de..07fe138786 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -761,21 +761,25 @@ pub const DeclGen = struct { dg.context.intType(8); return llvm_elem_ty.pointerType(llvm_addrspace); }, - .Opaque => { - const gop = try dg.object.type_map.getOrPut(gpa, t); - if (gop.found_existing) return gop.value_ptr.*; + .Opaque => switch (t.tag()) { + .@"opaque" => { + const gop = try dg.object.type_map.getOrPut(gpa, t); + if (gop.found_existing) return gop.value_ptr.*; - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); + // The Type memory is ephemeral; since we want to store a longer-lived + // reference, we need to copy it here. + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const opaque_obj = t.castTag(.@"opaque").?.data; - const name = try opaque_obj.getFullyQualifiedName(gpa); - defer gpa.free(name); + const opaque_obj = t.castTag(.@"opaque").?.data; + const name = try opaque_obj.getFullyQualifiedName(gpa); + defer gpa.free(name); - const llvm_struct_ty = dg.context.structCreateNamed(name); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - return llvm_struct_ty; + const llvm_struct_ty = dg.context.structCreateNamed(name); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + return llvm_struct_ty; + }, + .anyopaque => return dg.context.intType(8), + else => unreachable, }, .Array => { const elem_type = try dg.llvmType(t.childType()); diff --git a/src/type.zig b/src/type.zig index a81bd3ed32..fb16a4d0f1 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1575,6 +1575,7 @@ pub const Type = extern union { .extern_options, .@"anyframe", .anyframe_T, + .anyopaque, .@"opaque", .single_const_pointer, .single_mut_pointer, @@ -1654,7 +1655,6 @@ pub const Type = extern union { return payload.error_set.hasCodeGenBits() or payload.payload.hasCodeGenBits(); }, - .anyopaque, .void, .type, .comptime_int, From 964dbeb82623515b8392c8c7cb9317246812174e Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Mon, 20 Dec 2021 21:53:40 +0100 Subject: [PATCH 16/17] stage2: @subWithOverflow --- src/Air.zig | 7 ++++++ src/Liveness.zig | 7 +++++- src/Sema.zig | 44 ++++++++++++++++++----------------- src/arch/aarch64/CodeGen.zig | 6 +++++ src/arch/arm/CodeGen.zig | 6 +++++ src/arch/riscv64/CodeGen.zig | 6 +++++ src/arch/x86_64/CodeGen.zig | 6 +++++ src/codegen/c.zig | 7 ++++++ src/codegen/llvm.zig | 1 + src/print_air.zig | 1 + test/behavior/math.zig | 16 +++++++++++++ test/behavior/math_stage1.zig | 8 ------- 12 files changed, 85 insertions(+), 30 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 5d54be7392..912e70daed 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -141,6 +141,12 @@ pub const Inst = struct { /// of the operation. /// Uses the `pl_op` field with payload `Bin`. add_with_overflow, + /// Integer subtraction with overflow. Both operands are guaranteed to be the same type, + /// and the result is bool. The wrapped value is written to the pointer given by the in + /// operand of the `pl_op` field. Payload is `Bin` with `lhs` and `rhs` the relevant types + /// of the operation. + /// Uses the `pl_op` field with payload `Bin`. + sub_with_overflow, /// Integer multiplication with overflow. Both operands are guaranteed to be the same type, /// and the result is bool. The wrapped value is written to the pointer given by the in /// operand of the `pl_op` field. Payload is `Bin` with `lhs` and `rhs` the relevant types @@ -822,6 +828,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { }, .add_with_overflow, + .sub_with_overflow, .mul_with_overflow, => return Type.initTag(.bool), } diff --git a/src/Liveness.zig b/src/Liveness.zig index 6859f64660..ad0ce7ffb9 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -382,7 +382,12 @@ fn analyzeInst( const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none }); }, - .memset, .memcpy, .add_with_overflow, .mul_with_overflow => { + .memset, + .memcpy, + .add_with_overflow, + .sub_with_overflow, + .mul_with_overflow, + => { const pl_op = inst_datas[inst].pl_op; const extra = a.air.extraData(Air.Bin, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs }); diff --git a/src/Sema.zig b/src/Sema.zig index 38183d1052..2eb661503e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7365,16 +7365,27 @@ fn zirOverflowArithmetic( } const result = try lhs_val.intAddWithOverflow(rhs_val, dest_ty, sema.arena, target); - const inst = try sema.addConstant( - dest_ty, - result.wrapped_result, - ); - - if (result.overflowed) { - break :result .{ .overflowed = .yes, .wrapped = inst }; - } else { - break :result .{ .overflowed = .no, .wrapped = inst }; + const inst = try sema.addConstant(dest_ty, result.wrapped_result); + break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst }; + } + } + }, + .sub_with_overflow => { + // If the rhs is zero, then the result is lhs and no overflow occured. + // Otherwise, if either result is undefined, both results are undefined. + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; + } else if (rhs_val.compareWithZero(.eq)) { + break :result .{ .overflowed = .no, .wrapped = lhs }; + } else if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; } + + const result = try lhs_val.intSubWithOverflow(rhs_val, dest_ty, sema.arena, target); + const inst = try sema.addConstant(dest_ty, result.wrapped_result); + break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst }; } } }, @@ -7382,7 +7393,6 @@ fn zirOverflowArithmetic( // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. - if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { @@ -7410,20 +7420,11 @@ fn zirOverflowArithmetic( } const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, target); - const inst = try sema.addConstant( - dest_ty, - result.wrapped_result, - ); - - if (result.overflowed) { - break :result .{ .overflowed = .yes, .wrapped = inst }; - } else { - break :result .{ .overflowed = .no, .wrapped = inst }; - } + const inst = try sema.addConstant(dest_ty, result.wrapped_result); + break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst }; } } }, - .sub_with_overflow, .shl_with_overflow, => return sema.fail(block, src, "TODO implement Sema.zirOverflowArithmetic for {}", .{zir_tag}), else => unreachable, @@ -7432,6 +7433,7 @@ fn zirOverflowArithmetic( const air_tag: Air.Inst.Tag = switch (zir_tag) { .add_with_overflow => .add_with_overflow, .mul_with_overflow => .mul_with_overflow, + .sub_with_overflow => .sub_with_overflow, else => return sema.fail(block, src, "TODO implement runtime Sema.zirOverflowArithmetic for {}", .{zir_tag}), }; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index e34475f1ee..b381116a51 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -522,6 +522,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice => try self.airSlice(inst), .add_with_overflow => try self.airAddWithOverflow(inst), + .sub_with_overflow => try self.airSubWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -977,6 +978,11 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); } +fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airSubResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 779ce52036..f887810d9a 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -520,6 +520,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice => try self.airSlice(inst), .add_with_overflow => try self.airAddWithOverflow(inst), + .sub_with_overflow => try self.airSubWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -1007,6 +1008,11 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); } +fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airSubResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index c8f7173b1c..56904206ab 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -501,6 +501,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice => try self.airSlice(inst), .add_with_overflow => try self.airAddWithOverflow(inst), + .sub_with_overflow => try self.airSubWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -922,6 +923,11 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); } +fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airSubResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index fe8d8b55f7..be26372031 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -554,6 +554,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice => try self.airSlice(inst), .add_with_overflow => try self.airAddWithOverflow(inst), + .sub_with_overflow => try self.airSubWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -1036,6 +1037,11 @@ fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); } +fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airSubResultWithOverflow for {}", .{self.target.cpu.arch}); +} + fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f6a3105760..68b700db17 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1157,6 +1157,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .shl_sat => try airSatOp(f, inst, "shls_"), .add_with_overflow => try airAddWithOverflow(f, inst), + .sub_with_overflow => try airSubWithOverflow(f, inst), .mul_with_overflow => try airMulWithOverflow(f, inst), .min => try airMinMax(f, inst, "<"), @@ -1874,6 +1875,12 @@ fn airAddWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue { return f.fail("TODO add with overflow", .{}); } +fn airSubWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue { + _ = f; + _ = inst; + return f.fail("TODO sub with overflow", .{}); +} + fn airMulWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue { _ = f; _ = inst; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 07fe138786..4089a39cb3 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1719,6 +1719,7 @@ pub const FuncGen = struct { .slice => try self.airSlice(inst), .add_with_overflow => try self.airOverflow(inst, "llvm.sadd.with.overflow", "llvm.uadd.with.overflow"), + .sub_with_overflow => try self.airOverflow(inst, "llvm.ssub.with.overflow", "llvm.usub.with.overflow"), .mul_with_overflow => try self.airOverflow(inst, "llvm.smul.with.overflow", "llvm.umul.with.overflow"), .bit_and, .bool_and => try self.airAnd(inst), diff --git a/src/print_air.zig b/src/print_air.zig index 2204d16bd6..d6bfaac4bc 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -231,6 +231,7 @@ const Writer = struct { .memset => try w.writeMemset(s, inst), .add_with_overflow, + .sub_with_overflow, .mul_with_overflow, => try w.writeOverflow(s, inst), } diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 963fe3f0d5..50a7267c93 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -495,3 +495,19 @@ test "@mulWithOverflow" { try expect(@mulWithOverflow(u8, a, b, &result)); try expect(result == 236); } + +test "@subWithOverflow" { + var result: u8 = undefined; + try expect(@subWithOverflow(u8, 1, 2, &result)); + try expect(result == 255); + try expect(!@subWithOverflow(u8, 1, 1, &result)); + try expect(result == 0); + + var a: u8 = 1; + var b: u8 = 2; + try expect(@subWithOverflow(u8, a, b, &result)); + try expect(result == 255); + b = 1; + try expect(!@subWithOverflow(u8, a, b, &result)); + try expect(result == 0); +} diff --git a/test/behavior/math_stage1.zig b/test/behavior/math_stage1.zig index 381f89634b..139593b9f5 100644 --- a/test/behavior/math_stage1.zig +++ b/test/behavior/math_stage1.zig @@ -6,14 +6,6 @@ const maxInt = std.math.maxInt; const minInt = std.math.minInt; const mem = std.mem; -test "@subWithOverflow" { - var result: u8 = undefined; - try expect(@subWithOverflow(u8, 1, 2, &result)); - try expect(result == 255); - try expect(!@subWithOverflow(u8, 1, 1, &result)); - try expect(result == 0); -} - test "@shlWithOverflow" { var result: u16 = undefined; try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result)); From e106e18d96595bdc4bc037e0b36900992a576160 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Tue, 21 Dec 2021 01:38:46 +0100 Subject: [PATCH 17/17] stage2: @shlWithOverflow --- src/Air.zig | 7 ++++++ src/Liveness.zig | 3 ++- src/Sema.zig | 47 +++++++++++++++++++++++++++++------ src/arch/aarch64/CodeGen.zig | 12 ++++++--- src/arch/arm/CodeGen.zig | 12 ++++++--- src/arch/riscv64/CodeGen.zig | 12 ++++++--- src/arch/x86_64/CodeGen.zig | 12 ++++++--- src/codegen/c.zig | 7 ++++++ src/codegen/llvm.zig | 36 +++++++++++++++++++++++++++ src/print_air.zig | 1 + src/value.zig | 31 +++++++++++++++++++++++ test/behavior/eval.zig | 16 ++++++++++++ test/behavior/eval_stage1.zig | 16 ------------ test/behavior/math.zig | 28 +++++++++++++++++++++ test/behavior/math_stage1.zig | 20 --------------- 15 files changed, 203 insertions(+), 57 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 912e70daed..0d660ff6ae 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -153,6 +153,12 @@ pub const Inst = struct { /// of the operation. /// Uses the `pl_op` field with payload `Bin`. mul_with_overflow, + /// Integer left-shift with overflow. Both operands are guaranteed to be the same type, + /// and the result is bool. The wrapped value is written to the pointer given by the in + /// operand of the `pl_op` field. Payload is `Bin` with `lhs` and `rhs` the relevant types + /// of the operation. + /// Uses the `pl_op` field with payload `Bin`. + shl_with_overflow, /// Allocates stack local memory. /// Uses the `ty` field. alloc, @@ -830,6 +836,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .add_with_overflow, .sub_with_overflow, .mul_with_overflow, + .shl_with_overflow, => return Type.initTag(.bool), } } diff --git a/src/Liveness.zig b/src/Liveness.zig index ad0ce7ffb9..a9f7fffca4 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -387,7 +387,8 @@ fn analyzeInst( .add_with_overflow, .sub_with_overflow, .mul_with_overflow, - => { + .shl_with_overflow, + => { const pl_op = inst_datas[inst].pl_op; const extra = a.air.extraData(Air.Bin, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs }); diff --git a/src/Sema.zig b/src/Sema.zig index 2eb661503e..2ac4d327b1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7425,8 +7425,32 @@ fn zirOverflowArithmetic( } } }, - .shl_with_overflow, - => return sema.fail(block, src, "TODO implement Sema.zirOverflowArithmetic for {}", .{zir_tag}), + .shl_with_overflow => { + // If lhs is zero, the result is zero and no overflow occurred. + // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. + // Oterhwise if either of the arguments is undefined, both results are undefined. + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { + break :result .{ .overflowed = .no, .wrapped = lhs }; + } + } + if (maybe_rhs_val) |rhs_val| { + if (!rhs_val.isUndef() and rhs_val.compareWithZero(.eq)) { + break :result .{ .overflowed = .no, .wrapped = lhs }; + } + } + if (maybe_lhs_val) |lhs_val| { + if (maybe_rhs_val) |rhs_val| { + if (lhs_val.isUndef() or rhs_val.isUndef()) { + break :result .{ .overflowed = .undef, .wrapped = try sema.addConstUndef(dest_ty) }; + } + + const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, target); + const inst = try sema.addConstant(dest_ty, result.wrapped_result); + break :result .{ .overflowed = if (result.overflowed) .yes else .no, .wrapped = inst }; + } + } + }, else => unreachable, } @@ -7434,7 +7458,8 @@ fn zirOverflowArithmetic( .add_with_overflow => .add_with_overflow, .mul_with_overflow => .mul_with_overflow, .sub_with_overflow => .sub_with_overflow, - else => return sema.fail(block, src, "TODO implement runtime Sema.zirOverflowArithmetic for {}", .{zir_tag}), + .shl_with_overflow => .shl_with_overflow, + else => unreachable, }; try sema.requireRuntimeBlock(block, src); @@ -9041,11 +9066,17 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi switch (operand.zigTypeTag()) { .ComptimeInt => return Air.Inst.Ref.comptime_int_type, .Int => { - var count: u16 = 0; - var s = operand.bitSize(sema.mod.getTarget()) - 1; - while (s != 0) : (s >>= 1) { - count += 1; - } + const bits = operand.bitSize(sema.mod.getTarget()); + const count = if (bits == 0) + 0 + else blk: { + var count: u16 = 0; + var s = bits - 1; + while (s != 0) : (s >>= 1) { + count += 1; + } + break :blk count; + }; const res = try Module.makeIntType(sema.arena, .unsigned, count); return sema.addType(res); }, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index b381116a51..0142e6abf8 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -524,6 +524,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .add_with_overflow => try self.airAddWithOverflow(inst), .sub_with_overflow => try self.airSubWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), + .shl_with_overflow => try self.airShlWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -975,17 +976,22 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch}); } fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airSubResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch}); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch}); +} + +fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch}); } fn airDiv(self: *Self, inst: Air.Inst.Index) !void { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index f887810d9a..9e43808d1f 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -522,6 +522,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .add_with_overflow => try self.airAddWithOverflow(inst), .sub_with_overflow => try self.airSubWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), + .shl_with_overflow => try self.airShlWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -1005,17 +1006,22 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch}); } fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airSubResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch}); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch}); +} + +fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch}); } fn airDiv(self: *Self, inst: Air.Inst.Index) !void { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 56904206ab..3930e7364e 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -503,6 +503,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .add_with_overflow => try self.airAddWithOverflow(inst), .sub_with_overflow => try self.airSubWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), + .shl_with_overflow => try self.airShlWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -920,17 +921,22 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch}); } fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airSubResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch}); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch}); +} + +fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch}); } fn airDiv(self: *Self, inst: Air.Inst.Index) !void { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index be26372031..9629280583 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -556,6 +556,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .add_with_overflow => try self.airAddWithOverflow(inst), .sub_with_overflow => try self.airSubWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), + .shl_with_overflow => try self.airShlWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), @@ -1034,17 +1035,22 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airAddResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch}); } fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airSubResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch}); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = inst; - return self.fail("TODO implement airMulResultWithOverflow for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch}); +} + +fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch}); } fn airDiv(self: *Self, inst: Air.Inst.Index) !void { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 68b700db17..12ce813a0f 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1159,6 +1159,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .add_with_overflow => try airAddWithOverflow(f, inst), .sub_with_overflow => try airSubWithOverflow(f, inst), .mul_with_overflow => try airMulWithOverflow(f, inst), + .shl_with_overflow => try airShlWithOverflow(f, inst), .min => try airMinMax(f, inst, "<"), .max => try airMinMax(f, inst, ">"), @@ -1887,6 +1888,12 @@ fn airMulWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue { return f.fail("TODO mul with overflow", .{}); } +fn airShlWithOverflow(f: *Function, inst: Air.Inst.Index) !CValue { + _ = f; + _ = inst; + return f.fail("TODO shl with overflow", .{}); +} + fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 4089a39cb3..78848bee28 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1721,6 +1721,7 @@ pub const FuncGen = struct { .add_with_overflow => try self.airOverflow(inst, "llvm.sadd.with.overflow", "llvm.uadd.with.overflow"), .sub_with_overflow => try self.airOverflow(inst, "llvm.ssub.with.overflow", "llvm.usub.with.overflow"), .mul_with_overflow => try self.airOverflow(inst, "llvm.smul.with.overflow", "llvm.umul.with.overflow"), + .shl_with_overflow => try self.airShlWithOverflow(inst), .bit_and, .bool_and => try self.airAnd(inst), .bit_or, .bool_or => try self.airOr(inst), @@ -3176,6 +3177,41 @@ pub const FuncGen = struct { return overflow_bit; } + fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Bin, pl_op.payload).data; + + const ptr = try self.resolveInst(pl_op.operand); + const lhs = try self.resolveInst(extra.lhs); + const rhs = try self.resolveInst(extra.rhs); + + const ptr_ty = self.air.typeOf(pl_op.operand); + const lhs_ty = self.air.typeOf(extra.lhs); + const rhs_ty = self.air.typeOf(extra.rhs); + + const tg = self.dg.module.getTarget(); + + const casted_rhs = if (rhs_ty.bitSize(tg) < lhs_ty.bitSize(tg)) + self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") + else + rhs; + + const result = self.builder.buildShl(lhs, casted_rhs, ""); + const reconstructed = if (lhs_ty.isSignedInt()) + self.builder.buildAShr(result, casted_rhs, "") + else + self.builder.buildLShr(result, casted_rhs, ""); + + const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); + + self.store(ptr, ptr_ty, result, .NotAtomic); + + return overflow_bit; + } + fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; diff --git a/src/print_air.zig b/src/print_air.zig index d6bfaac4bc..b3a1be28f4 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -233,6 +233,7 @@ const Writer = struct { .add_with_overflow, .sub_with_overflow, .mul_with_overflow, + .shl_with_overflow, => try w.writeOverflow(s, inst), } } diff --git a/src/value.zig b/src/value.zig index 19546ed8b9..c105e3e742 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2548,6 +2548,37 @@ pub const Value = extern union { return fromBigInt(allocator, result_bigint.toConst()); } + pub fn shlWithOverflow( + lhs: Value, + rhs: Value, + ty: Type, + allocator: Allocator, + target: Target, + ) !OverflowArithmeticResult { + const info = ty.intInfo(target); + var lhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const shift = @intCast(usize, rhs.toUnsignedInt()); + const limbs = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, + ); + var result_bigint = BigIntMutable{ + .limbs = limbs, + .positive = undefined, + .len = undefined, + }; + result_bigint.shiftLeft(lhs_bigint, shift); + const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits); + if (overflowed) { + result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits); + } + return OverflowArithmeticResult{ + .overflowed = overflowed, + .wrapped_result = try fromBigInt(allocator, result_bigint.toConst()), + }; + } + pub fn shlSat( lhs: Value, rhs: Value, diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 84af3ecfbb..01d62f353f 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -451,3 +451,19 @@ test "comptime bitwise operators" { try expect(~@as(u128, 0) == 0xffffffffffffffffffffffffffffffff); } } + +test "comptime shlWithOverflow" { + const ct_shifted: u64 = comptime amt: { + var amt = @as(u64, 0); + _ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt); + break :amt amt; + }; + + const rt_shifted: u64 = amt: { + var amt = @as(u64, 0); + _ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt); + break :amt amt; + }; + + try expect(ct_shifted == rt_shifted); +} diff --git a/test/behavior/eval_stage1.zig b/test/behavior/eval_stage1.zig index 348c685a26..8f7e7dbe0d 100644 --- a/test/behavior/eval_stage1.zig +++ b/test/behavior/eval_stage1.zig @@ -162,22 +162,6 @@ test "const ptr to comptime mutable data is not memoized" { } } -test "comptime shlWithOverflow" { - const ct_shifted: u64 = comptime amt: { - var amt = @as(u64, 0); - _ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt); - break :amt amt; - }; - - const rt_shifted: u64 = amt: { - var amt = @as(u64, 0); - _ = @shlWithOverflow(u64, ~@as(u64, 0), 16, &amt); - break :amt amt; - }; - - try expect(ct_shifted == rt_shifted); -} - test "runtime 128 bit integer division" { var a: u128 = 152313999999999991610955792383; var b: u128 = 10000000000000000000; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 50a7267c93..b3821f7732 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -511,3 +511,31 @@ test "@subWithOverflow" { try expect(!@subWithOverflow(u8, a, b, &result)); try expect(result == 0); } + +test "@shlWithOverflow" { + var result: u16 = undefined; + try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result)); + try expect(result == 0b0111111111111000); + try expect(!@shlWithOverflow(u16, 0b0010111111111111, 2, &result)); + try expect(result == 0b1011111111111100); + + var a: u16 = 0b0000_0000_0000_0011; + var b: u4 = 15; + try expect(@shlWithOverflow(u16, a, b, &result)); + try expect(result == 0b1000_0000_0000_0000); + b = 14; + try expect(!@shlWithOverflow(u16, a, b, &result)); + try expect(result == 0b1100_0000_0000_0000); +} + +test "overflow arithmetic with u0 values" { + var result: u0 = undefined; + try expect(!@addWithOverflow(u0, 0, 0, &result)); + try expect(result == 0); + try expect(!@subWithOverflow(u0, 0, 0, &result)); + try expect(result == 0); + try expect(!@mulWithOverflow(u0, 0, 0, &result)); + try expect(result == 0); + try expect(!@shlWithOverflow(u0, 0, 0, &result)); + try expect(result == 0); +} diff --git a/test/behavior/math_stage1.zig b/test/behavior/math_stage1.zig index 139593b9f5..2633f23aac 100644 --- a/test/behavior/math_stage1.zig +++ b/test/behavior/math_stage1.zig @@ -6,26 +6,6 @@ const maxInt = std.math.maxInt; const minInt = std.math.minInt; const mem = std.mem; -test "@shlWithOverflow" { - var result: u16 = undefined; - try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result)); - try expect(result == 0b0111111111111000); - try expect(!@shlWithOverflow(u16, 0b0010111111111111, 2, &result)); - try expect(result == 0b1011111111111100); -} - -test "overflow arithmetic with u0 values" { - var result: u0 = undefined; - try expect(!@addWithOverflow(u0, 0, 0, &result)); - try expect(result == 0); - try expect(!@subWithOverflow(u0, 0, 0, &result)); - try expect(result == 0); - try expect(!@mulWithOverflow(u0, 0, 0, &result)); - try expect(result == 0); - try expect(!@shlWithOverflow(u0, 0, 0, &result)); - try expect(result == 0); -} - test "@clz vectors" { try testClzVectors(); comptime try testClzVectors();