- This is guaranteed to be a tail call, and therefore will not cause a new stack frame.
+ This is guaranteed to tail call, and therefore will not cause a new stack frame.
{#header_close#}
{#header_close#}
- {#header_open|Await#}
+
+ {#header_open|Async and Await#}
- The {#syntax#}await{#endsyntax#} keyword is used to coordinate with an async function's
- {#syntax#}return{#endsyntax#} statement.
-
-
- {#syntax#}await{#endsyntax#} is valid only in an {#syntax#}async{#endsyntax#} function, and it takes
- as an operand a promise handle.
- If the async function associated with the promise handle has already returned,
- then {#syntax#}await{#endsyntax#} destroys the target async function, and gives the return value.
- Otherwise, {#syntax#}await{#endsyntax#} suspends the current async function, registering its
- promise handle with the target coroutine. It becomes the target coroutine's responsibility
- to have ensured that it will be resumed or destroyed. When the target coroutine reaches
- its return statement, it gives the return value to the awaiter, destroys itself, and then
- resumes the awaiter.
-
-
- A promise handle must be consumed exactly once after it is created, either by {#syntax#}cancel{#endsyntax#} or {#syntax#}await{#endsyntax#}.
-
-
- {#syntax#}await{#endsyntax#} counts as a suspend point, and therefore at every {#syntax#}await{#endsyntax#},
- a coroutine can be potentially destroyed, which would run {#syntax#}defer{#endsyntax#} and {#syntax#}errdefer{#endsyntax#} expressions.
+ In the same way that every {#syntax#}suspend{#endsyntax#} has a matching
+ {#syntax#}resume{#endsyntax#}, every {#syntax#}async{#endsyntax#} has a matching {#syntax#}await{#endsyntax#}.
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
-var a_promise: promise = undefined;
+test "async and await" {
+ // Here we have an exception where we do not match an async
+ // with an await. The test block is not async and so cannot
+ // have a suspend point in it.
+ // This is well-defined behavior, and everything is OK here.
+ // Note however that there would be no way to collect the
+ // return value of amain, if it were something other than void.
+ _ = async amain();
+}
+
+fn amain() void {
+ var frame = async func();
+ comptime assert(@typeOf(frame) == @Frame(func));
+
+ const ptr: anyframe->void = &frame;
+ const any_ptr: anyframe = ptr;
+
+ resume any_ptr;
+ await ptr;
+}
+
+fn func() void {
+ suspend;
+}
+ {#code_end#}
+
+ The {#syntax#}await{#endsyntax#} keyword is used to coordinate with an async function's
+ {#syntax#}return{#endsyntax#} statement.
+
+
+ {#syntax#}await{#endsyntax#} is a suspend point, and takes as an operand anything that
+ implicitly casts to {#syntax#}anyframe->T{#endsyntax#}.
+
+
+ There is a common misconception that {#syntax#}await{#endsyntax#} resumes the target function.
+ It is the other way around: it suspends until the target function completes.
+ In the event that the target function has already completed, {#syntax#}await{#endsyntax#}
+ does not suspend; instead it copies the
+ return value directly from the target function's frame.
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+
+var the_frame: anyframe = undefined;
var final_result: i32 = 0;
-test "coroutine await" {
+test "async function await" {
seq('a');
- const p = async amain() catch unreachable;
+ _ = async amain();
seq('f');
- resume a_promise;
+ resume the_frame;
seq('i');
assert(final_result == 1234);
assert(std.mem.eql(u8, seq_points, "abcdefghi"));
}
-async fn amain() void {
+fn amain() void {
seq('b');
- const p = async another() catch unreachable;
+ var f = async another();
seq('e');
- final_result = await p;
+ final_result = await f;
seq('h');
}
-async fn another() i32 {
+fn another() i32 {
seq('c');
suspend {
seq('d');
- a_promise = @handle();
+ the_frame = @frame();
}
seq('g');
return 1234;
@@ -6211,31 +6170,156 @@ fn seq(c: u8) void {
{#code_end#}
In general, {#syntax#}suspend{#endsyntax#} is lower level than {#syntax#}await{#endsyntax#}. Most application
- code will use only {#syntax#}async{#endsyntax#} and {#syntax#}await{#endsyntax#}, but event loop
- implementations will make use of {#syntax#}suspend{#endsyntax#} internally.
+ code will use only {#syntax#}async{#endsyntax#} and {#syntax#}await{#endsyntax#}, but event loop
+ implementations will make use of {#syntax#}suspend{#endsyntax#} internally.
{#header_close#}
- {#header_open|Open Issues#}
+
+ {#header_open|Async Function Example#}
- There are a few issues with coroutines that are considered unresolved. Best be aware of them,
- as the situation is likely to change before 1.0.0:
+ Putting all of this together, here is an example of typical
+ {#syntax#}async{#endsyntax#}/{#syntax#}await{#endsyntax#} usage:
+
+ {#code_begin|exe|async#}
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+
+pub fn main() void {
+ _ = async amainWrap();
+
+ // Typically we would use an event loop to manage resuming async functions,
+ // but in this example we hard code what the event loop would do,
+ // to make things deterministic.
+ resume global_file_frame;
+ resume global_download_frame;
+}
+
+fn amainWrap() void {
+ amain() catch |e| {
+ std.debug.warn("{}\n", e);
+ if (@errorReturnTrace()) |trace| {
+ std.debug.dumpStackTrace(trace.*);
+ }
+ std.process.exit(1);
+ };
+}
+
+fn amain() !void {
+ const allocator = std.heap.direct_allocator;
+ var download_frame = async fetchUrl(allocator, "https://example.com/");
+ var awaited_download_frame = false;
+ errdefer if (!awaited_download_frame) {
+ if (await download_frame) |r| allocator.free(r) else |_| {}
+ };
+
+ var file_frame = async readFile(allocator, "something.txt");
+ var awaited_file_frame = false;
+ errdefer if (!awaited_file_frame) {
+ if (await file_frame) |r| allocator.free(r) else |_| {}
+ };
+
+ awaited_file_frame = true;
+ const file_text = try await file_frame;
+ defer allocator.free(file_text);
+
+ awaited_download_frame = true;
+ const download_text = try await download_frame;
+ defer allocator.free(download_text);
+
+ std.debug.warn("download_text: {}\n", download_text);
+ std.debug.warn("file_text: {}\n", file_text);
+}
+
+var global_download_frame: anyframe = undefined;
+fn fetchUrl(allocator: *Allocator, url: []const u8) ![]u8 {
+ const result = try std.mem.dupe(allocator, u8, "this is the downloaded url contents");
+ errdefer allocator.free(result);
+ suspend {
+ global_download_frame = @frame();
+ }
+ std.debug.warn("fetchUrl returning\n");
+ return result;
+}
+
+var global_file_frame: anyframe = undefined;
+fn readFile(allocator: *Allocator, filename: []const u8) ![]u8 {
+ const result = try std.mem.dupe(allocator, u8, "this is the file contents");
+ errdefer allocator.free(result);
+ suspend {
+ global_file_frame = @frame();
+ }
+ std.debug.warn("readFile returning\n");
+ return result;
+}
+ {#code_end#}
+
+ Now we remove the {#syntax#}suspend{#endsyntax#} and {#syntax#}resume{#endsyntax#} code, and
+ observe the same behavior, with one tiny difference:
+
+ {#code_begin|exe|blocking#}
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+
+pub fn main() void {
+ _ = async amainWrap();
+}
+
+fn amainWrap() void {
+ amain() catch |e| {
+ std.debug.warn("{}\n", e);
+ if (@errorReturnTrace()) |trace| {
+ std.debug.dumpStackTrace(trace.*);
+ }
+ std.process.exit(1);
+ };
+}
+
+fn amain() !void {
+ const allocator = std.heap.direct_allocator;
+ var download_frame = async fetchUrl(allocator, "https://example.com/");
+ var awaited_download_frame = false;
+ errdefer if (!awaited_download_frame) {
+ if (await download_frame) |r| allocator.free(r) else |_| {}
+ };
+
+ var file_frame = async readFile(allocator, "something.txt");
+ var awaited_file_frame = false;
+ errdefer if (!awaited_file_frame) {
+ if (await file_frame) |r| allocator.free(r) else |_| {}
+ };
+
+ awaited_file_frame = true;
+ const file_text = try await file_frame;
+ defer allocator.free(file_text);
+
+ awaited_download_frame = true;
+ const download_text = try await download_frame;
+ defer allocator.free(download_text);
+
+ std.debug.warn("download_text: {}\n", download_text);
+ std.debug.warn("file_text: {}\n", file_text);
+}
+
+fn fetchUrl(allocator: *Allocator, url: []const u8) ![]u8 {
+ const result = try std.mem.dupe(allocator, u8, "this is the downloaded url contents");
+ errdefer allocator.free(result);
+ std.debug.warn("fetchUrl returning\n");
+ return result;
+}
+
+fn readFile(allocator: *Allocator, filename: []const u8) ![]u8 {
+ const result = try std.mem.dupe(allocator, u8, "this is the file contents");
+ errdefer allocator.free(result);
+ std.debug.warn("readFile returning\n");
+ return result;
+}
+ {#code_end#}
+
+ Previously, the {#syntax#}fetchUrl{#endsyntax#} and {#syntax#}readFile{#endsyntax#} functions suspended,
+ and were resumed in an order determined by the {#syntax#}main{#endsyntax#} function. Now,
+ since there are no suspend points, the order of the printed "... returning" messages
+ is determined by the order of {#syntax#}async{#endsyntax#} callsites.
-
- - Async functions have optimizations disabled - even in release modes - due to an
- LLVM bug.
-
- -
- There are some situations where we can know statically that there will not be
- memory allocation failure, but Zig still forces us to handle it.
- TODO file an issue for this and link it here.
-
- -
- Zig does not take advantage of LLVM's allocation elision optimization for
- coroutines. It crashed LLVM when I tried to do it the first time. This is
- related to the other 2 bullet points here. See
- #802.
-
-
{#header_close#}
{#header_close#}
@@ -6293,6 +6377,49 @@ comptime {
Note: This function is deprecated. Use {#link|@typeInfo#} instead.
{#header_close#}
+
+ {#header_open|@asyncCall#}
+ {#syntax#}@asyncCall(frame_buffer: []u8, result_ptr, function_ptr, args: ...) anyframe->T{#endsyntax#}
+
+ {#syntax#}@asyncCall{#endsyntax#} performs an {#syntax#}async{#endsyntax#} call on a function pointer,
+ which may or may not be an {#link|async function|Async Functions#}.
+
+
+ The provided {#syntax#}frame_buffer{#endsyntax#} must be large enough to fit the entire function frame.
+ This size can be determined with {#link|@frameSize#}. To provide a too-small buffer
+ invokes safety-checked {#link|Undefined Behavior#}.
+
+
+ {#syntax#}result_ptr{#endsyntax#} is optional ({#link|null#} may be provided). If provided,
+ the function call will write its result directly to the result pointer, which will be available to
+ read after {#link|await|Async and Await#} completes. Any result location provided to
+ {#syntax#}await{#endsyntax#} will copy the result from {#syntax#}result_ptr{#endsyntax#}.
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+
+test "async fn pointer in a struct field" {
+ var data: i32 = 1;
+ const Foo = struct {
+ bar: async fn (*i32) void,
+ };
+ var foo = Foo{ .bar = func };
+ var bytes: [64]u8 = undefined;
+ const f = @asyncCall(&bytes, {}, foo.bar, &data);
+ assert(data == 2);
+ resume f;
+ assert(data == 4);
+}
+
+async fn func(y: *i32) void {
+ defer y.* += 2;
+ y.* += 1;
+ suspend;
+}
+ {#code_end#}
+ {#header_close#}
+
{#header_open|@atomicLoad#}
{#syntax#}@atomicLoad(comptime T: type, ptr: *const T, comptime ordering: builtin.AtomicOrder) T{#endsyntax#}
@@ -6883,6 +7010,44 @@ export fn @"A function name that is a complete sentence."() void {}
{#see_also|@intToFloat#}
{#header_close#}
+ {#header_open|@frame#}
+
{#syntax#}@frame() *@Frame(func){#endsyntax#}
+
+ This function returns a pointer to the frame for a given function. This type
+ can be {#link|implicitly cast|Implicit Casts#} to {#syntax#}anyframe->T{#endsyntax#} and
+ to {#syntax#}anyframe{#endsyntax#}, where {#syntax#}T{#endsyntax#} is the return type
+ of the function in scope.
+
+
+ This function does not mark a suspension point, but it does cause the function in scope
+ to become an {#link|async function|Async Functions#}.
+
+ {#header_close#}
+
+ {#header_open|@Frame#}
+ {#syntax#}@Frame(func: var) type{#endsyntax#}
+
+ This function returns the frame type of a function. This works for {#link|Async Functions#}
+ as well as any function without a specific calling convention.
+
+
+ This type is suitable to be used as the return type of {#link|async|Async and Await#} which
+ allows one to, for example, heap-allocate an async function frame:
+
+ {#code_begin|test#}
+const std = @import("std");
+
+test "heap allocated frame" {
+ const frame = try std.heap.direct_allocator.create(@Frame(func));
+ frame.* = async func();
+}
+
+fn func() void {
+ suspend;
+}
+ {#code_end#}
+ {#header_close#}
+
{#header_open|@frameAddress#}
{#syntax#}@frameAddress() usize{#endsyntax#}
@@ -6898,14 +7063,14 @@ export fn @"A function name that is a complete sentence."() void {}
{#header_close#}
- {#header_open|@handle#}
- {#syntax#}@handle(){#endsyntax#}
+ {#header_open|@frameSize#}
+ {#syntax#}@frameSize() usize{#endsyntax#}
- This function returns a {#syntax#}promise->T{#endsyntax#} type, where {#syntax#}T{#endsyntax#}
- is the return type of the async function in scope.
+ This is the same as {#syntax#}@sizeOf(@Frame(func)){#endsyntax#}, where {#syntax#}func{#endsyntax#}
+ may be runtime-known.
- This function is only valid within an async function scope.
+ This function is typically used in conjunction with {#link|@asyncCall#}.
{#header_close#}
@@ -8045,8 +8210,7 @@ pub fn build(b: *Builder) void {
Zig has a compile option --single-threaded which has the following effects:
- All {#link|Thread Local Variables#} are treated as {#link|Global Variables#}.
- - The overhead of {#link|Coroutines#} becomes equivalent to function call overhead.
- TODO: please note this will not be implemented until the upcoming Coroutine Rewrite
+ - The overhead of {#link|Async Functions#} becomes equivalent to function call overhead.
- The {#syntax#}@import("builtin").single_threaded{#endsyntax#} becomes {#syntax#}true{#endsyntax#}
and therefore various userland APIs which read this variable become more efficient.
For example {#syntax#}std.Mutex{#endsyntax#} becomes
@@ -9794,7 +9958,6 @@ PrimaryExpr
<- AsmExpr
/ IfExpr
/ KEYWORD_break BreakLabel? Expr?
- / KEYWORD_cancel Expr
/ KEYWORD_comptime Expr
/ KEYWORD_continue BreakLabel?
/ KEYWORD_resume Expr
@@ -9825,7 +9988,7 @@ TypeExpr <- PrefixTypeOp* ErrorUnionExpr
ErrorUnionExpr <- SuffixExpr (EXCLAMATIONMARK TypeExpr)?
SuffixExpr
- <- AsyncPrefix PrimaryTypeExpr SuffixOp* FnCallArguments
+ <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments
/ PrimaryTypeExpr (SuffixOp / FnCallArguments)*
PrimaryTypeExpr
@@ -9901,7 +10064,7 @@ FnCC
<- KEYWORD_nakedcc
/ KEYWORD_stdcallcc
/ KEYWORD_extern
- / KEYWORD_async (LARROW TypeExpr RARROW)?
+ / KEYWORD_async
ParamDecl <- (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
@@ -10006,8 +10169,6 @@ SuffixOp
/ DOTASTERISK
/ DOTQUESTIONMARK
-AsyncPrefix <- KEYWORD_async (LARROW PrefixExpr RARROW)?
-
FnCallArguments <- LPAREN ExprList RPAREN
# Ptr specific
@@ -10150,7 +10311,6 @@ KEYWORD_asm <- 'asm' end_of_word
KEYWORD_async <- 'async' end_of_word
KEYWORD_await <- 'await' end_of_word
KEYWORD_break <- 'break' end_of_word
-KEYWORD_cancel <- 'cancel' end_of_word
KEYWORD_catch <- 'catch' end_of_word
KEYWORD_comptime <- 'comptime' end_of_word
KEYWORD_const <- 'const' end_of_word
@@ -10195,7 +10355,7 @@ KEYWORD_volatile <- 'volatile' end_of_word
KEYWORD_while <- 'while' end_of_word
keyword <- KEYWORD_align / KEYWORD_and / KEYWORD_allowzero / KEYWORD_asm
- / KEYWORD_async / KEYWORD_await / KEYWORD_break / KEYWORD_cancel
+ / KEYWORD_async / KEYWORD_await / KEYWORD_break
/ KEYWORD_catch / KEYWORD_comptime / KEYWORD_const / KEYWORD_continue
/ KEYWORD_defer / KEYWORD_else / KEYWORD_enum / KEYWORD_errdefer
/ KEYWORD_error / KEYWORD_export / KEYWORD_extern / KEYWORD_false
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index db89af7a42..df4d436b50 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -1181,7 +1181,6 @@ pub const Builder = struct {
ast.Node.Id.ErrorTag => return error.Unimplemented,
ast.Node.Id.AsmInput => return error.Unimplemented,
ast.Node.Id.AsmOutput => return error.Unimplemented,
- ast.Node.Id.AsyncAttribute => return error.Unimplemented,
ast.Node.Id.ParamDecl => return error.Unimplemented,
ast.Node.Id.FieldInitializer => return error.Unimplemented,
ast.Node.Id.EnumLiteral => return error.Unimplemented,
@@ -1904,20 +1903,6 @@ pub const Builder = struct {
}
return error.Unimplemented;
- //ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
- //IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node,
- // get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
- //// TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig
- //IrInstruction *replacement_value = irb->exec->coro_handle;
- //IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node,
- // promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr,
- // AtomicRmwOp_xchg, AtomicOrderSeqCst);
- //ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, maybe_await_handle);
- //IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle);
- //IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- //return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final,
- // is_comptime);
- //// the above blocks are rendered by ir_gen after the rest of codegen
}
const Ident = union(enum) {
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 95b71a55c9..1f5f07eff0 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -627,7 +627,7 @@ fn constructLinkerArgsWasm(ctx: *Context) void {
fn addFnObjects(ctx: *Context) !void {
// at this point it's guaranteed nobody else has this lock, so we circumvent it
- // and avoid having to be a coroutine
+ // and avoid having to be an async function
const fn_link_set = &ctx.comp.fn_link_set.private_data;
var it = fn_link_set.first;
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 8917809533..5136b32735 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -52,7 +52,7 @@ const Command = struct {
pub fn main() !void {
// This allocator needs to be thread-safe because we use it for the event.Loop
- // which multiplexes coroutines onto kernel threads.
+ // which multiplexes async functions onto kernel threads.
// libc allocator is guaranteed to have this property.
const allocator = std.heap.c_allocator;
@@ -466,8 +466,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.link_objects = link_objects;
comp.start();
- const process_build_events_handle = try async processBuildEvents(comp, color);
- defer cancel process_build_events_handle;
+ // TODO const process_build_events_handle = try async processBuildEvents(comp, color);
loop.run();
}
@@ -578,8 +577,7 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
var zig_compiler = try ZigCompiler.init(&loop);
defer zig_compiler.deinit();
- const handle = try async findLibCAsync(&zig_compiler);
- defer cancel handle;
+ // TODO const handle = try async findLibCAsync(&zig_compiler);
loop.run();
}
@@ -663,13 +661,12 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
defer loop.deinit();
var result: FmtError!void = undefined;
- const main_handle = try async asyncFmtMainChecked(
- &result,
- &loop,
- &flags,
- color,
- );
- defer cancel main_handle;
+ // TODO const main_handle = try async asyncFmtMainChecked(
+ // TODO &result,
+ // TODO &loop,
+ // TODO &flags,
+ // TODO color,
+ // TODO );
loop.run();
return result;
}
diff --git a/src-self-hosted/stage1.zig b/src-self-hosted/stage1.zig
index dd26e9594c..b8f13b5d03 100644
--- a/src-self-hosted/stage1.zig
+++ b/src-self-hosted/stage1.zig
@@ -142,7 +142,8 @@ export fn stage2_render_ast(tree: *ast.Tree, output_file: *FILE) Error {
return Error.None;
}
-// TODO: just use the actual self-hosted zig fmt. Until the coroutine rewrite, we use a blocking implementation.
+// TODO: just use the actual self-hosted zig fmt. Until https://github.com/ziglang/zig/issues/2377,
+// we use a blocking implementation.
export fn stage2_fmt(argc: c_int, argv: [*]const [*]const u8) c_int {
if (std.debug.runtime_safety) {
fmtMain(argc, argv) catch unreachable;
diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig
index b18397ede2..6a91b8e7bf 100644
--- a/src-self-hosted/translate_c.zig
+++ b/src-self-hosted/translate_c.zig
@@ -1037,7 +1037,7 @@ fn transCreateNodeFnCall(c: *Context, fn_expr: *ast.Node) !*ast.Node.SuffixOp {
.op = ast.Node.SuffixOp.Op{
.Call = ast.Node.SuffixOp.Op.Call{
.params = ast.Node.SuffixOp.Op.Call.ParamList.init(c.a()),
- .async_attr = null,
+ .async_token = null,
},
},
.rtoken = undefined, // set after appending args
@@ -1355,7 +1355,6 @@ fn finishTransFnProto(
.var_args_token = null, // TODO this field is broken in the AST data model
.extern_export_inline_token = extern_export_inline_tok,
.cc_token = cc_tok,
- .async_attr = null,
.body_node = null,
.lib_name = null,
.align_expr = null,
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 4c3aeade9e..f1c699ba10 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -35,6 +35,7 @@ struct ConstExprValue;
struct IrInstruction;
struct IrInstructionCast;
struct IrInstructionAllocaGen;
+struct IrInstructionCallGen;
struct IrBasicBlock;
struct ScopeDecls;
struct ZigWindowsSDK;
@@ -70,20 +71,10 @@ struct IrExecutable {
Scope *begin_scope;
ZigList tld_list;
- IrInstruction *coro_handle;
- IrInstruction *atomic_state_field_ptr; // this one is shared and in the promise
- IrInstruction *coro_result_ptr_field_ptr;
- IrInstruction *coro_result_field_ptr;
- IrInstruction *await_handle_var_ptr; // this one is where we put the one we extracted from the promise
- IrBasicBlock *coro_early_final;
- IrBasicBlock *coro_normal_final;
- IrBasicBlock *coro_suspend_block;
- IrBasicBlock *coro_final_cleanup_block;
- ZigVar *coro_allocator_var;
-
bool invalid;
bool is_inline;
bool is_generic_instantiation;
+ bool need_err_code_spill;
};
enum OutType {
@@ -485,11 +476,10 @@ enum NodeType {
NodeTypeIfErrorExpr,
NodeTypeIfOptional,
NodeTypeErrorSetDecl,
- NodeTypeCancel,
NodeTypeResume,
NodeTypeAwaitExpr,
NodeTypeSuspend,
- NodeTypePromiseType,
+ NodeTypeAnyFrameType,
NodeTypeEnumLiteral,
};
@@ -522,7 +512,6 @@ struct AstNodeFnProto {
AstNode *section_expr;
bool auto_err_set;
- AstNode *async_allocator_type;
};
struct AstNodeFnDef {
@@ -657,7 +646,6 @@ struct AstNodeFnCallExpr {
bool is_builtin;
bool is_async;
bool seen; // used by @compileLog
- AstNode *async_allocator;
};
struct AstNodeArrayAccessExpr {
@@ -922,10 +910,6 @@ struct AstNodeBreakExpr {
AstNode *expr; // may be null
};
-struct AstNodeCancelExpr {
- AstNode *expr;
-};
-
struct AstNodeResumeExpr {
AstNode *expr;
};
@@ -949,7 +933,7 @@ struct AstNodeSuspend {
AstNode *block;
};
-struct AstNodePromiseType {
+struct AstNodeAnyFrameType {
AstNode *payload_type; // can be NULL
};
@@ -1014,13 +998,16 @@ struct AstNode {
AstNodeInferredArrayType inferred_array_type;
AstNodeErrorType error_type;
AstNodeErrorSetDecl err_set_decl;
- AstNodeCancelExpr cancel_expr;
AstNodeResumeExpr resume_expr;
AstNodeAwaitExpr await_expr;
AstNodeSuspend suspend;
- AstNodePromiseType promise_type;
+ AstNodeAnyFrameType anyframe_type;
AstNodeEnumLiteral enum_literal;
} data;
+
+ // This is a function for use in the debugger to print
+ // the source location.
+ void src();
};
// this struct is allocated with allocate_nonzero
@@ -1047,7 +1034,6 @@ struct FnTypeId {
bool is_var_args;
CallingConvention cc;
uint32_t alignment;
- ZigType *async_allocator_type;
};
uint32_t fn_type_id_hash(FnTypeId*);
@@ -1095,6 +1081,7 @@ struct TypeStructField {
ConstExprValue *init_val; // null and then memoized
uint32_t bit_offset_in_host; // offset from the memory at gen_index
uint32_t host_int_bytes; // size of host integer
+ uint32_t align;
};
enum ResolveStatus {
@@ -1156,6 +1143,8 @@ struct ZigTypeOptional {
struct ZigTypeErrorUnion {
ZigType *err_set_type;
ZigType *payload_type;
+ size_t pad_bytes;
+ LLVMTypeRef pad_llvm_type;
};
struct ZigTypeErrorSet {
@@ -1241,11 +1230,6 @@ struct ZigTypeBoundFn {
ZigType *fn_type;
};
-struct ZigTypePromise {
- // null if `promise` instead of `promise->T`
- ZigType *result_type;
-};
-
struct ZigTypeVector {
// The type must be a pointer, integer, or float
ZigType *elem_type;
@@ -1276,7 +1260,8 @@ enum ZigTypeId {
ZigTypeIdBoundFn,
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
- ZigTypeIdPromise,
+ ZigTypeIdFnFrame,
+ ZigTypeIdAnyFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -1291,6 +1276,15 @@ struct ZigTypeOpaque {
Buf *bare_name;
};
+struct ZigTypeFnFrame {
+ ZigFn *fn;
+ ZigType *locals_struct;
+};
+
+struct ZigTypeAnyFrame {
+ ZigType *result_type; // null if `anyframe` instead of `anyframe->T`
+};
+
struct ZigType {
ZigTypeId id;
Buf name;
@@ -1314,16 +1308,16 @@ struct ZigType {
ZigTypeUnion unionation;
ZigTypeFn fn;
ZigTypeBoundFn bound_fn;
- ZigTypePromise promise;
ZigTypeVector vector;
ZigTypeOpaque opaque;
+ ZigTypeFnFrame frame;
+ ZigTypeAnyFrame any_frame;
} data;
// use these fields to make sure we don't duplicate type table entries for the same type
ZigType *pointer_parent[2]; // [0 - mut, 1 - const]
ZigType *optional_parent;
- ZigType *promise_parent;
- ZigType *promise_frame_parent;
+ ZigType *any_frame_parent;
// If we generate a constant name value for this type, we memoize it here.
// The type of this is array
ConstExprValue *cached_const_name_val;
@@ -1359,7 +1353,6 @@ struct GlobalExport {
};
struct ZigFn {
- CodeGen *codegen;
LLVMValueRef llvm_value;
const char *llvm_name;
AstNode *proto_node;
@@ -1368,7 +1361,17 @@ struct ZigFn {
Scope *child_scope; // parent is scope for last parameter
ScopeBlock *def_scope; // parent is child_scope
Buf symbol_name;
- ZigType *type_entry; // function type
+ // This is the function type assuming the function does not suspend.
+ // Note that for an async function, this can be shared with non-async functions. So the value here
+ // should only be read for things in common between non-async and async function types.
+ ZigType *type_entry;
+ // For normal functions one could use the type_entry->raw_type_ref and type_entry->raw_di_type.
+ // However for functions that suspend, those values could possibly be their non-suspending equivalents.
+ // So these values should be preferred.
+ LLVMTypeRef raw_type_ref;
+ ZigLLVMDIType *raw_di_type;
+
+ ZigType *frame_type;
// in the case of normal functions this is the implicit return type
// in the case of async functions this is the implicit return type according to the
// zig source code, not according to zig ir
@@ -1379,6 +1382,7 @@ struct ZigFn {
size_t prealloc_backward_branch_quota;
AstNode **param_source_nodes;
Buf **param_names;
+ IrInstruction *err_code_spill;
AstNode *fn_no_inline_set_node;
AstNode *fn_static_eval_set_node;
@@ -1390,8 +1394,11 @@ struct ZigFn {
AstNode *set_alignstack_node;
AstNode *set_cold_node;
+ const AstNode *inferred_async_node;
+ ZigFn *inferred_async_fn;
ZigList export_list;
+ ZigList call_list;
LLVMValueRef valgrind_client_request_array;
@@ -1442,8 +1449,6 @@ enum BuiltinFnId {
BuiltinFnIdErrName,
BuiltinFnIdBreakpoint,
BuiltinFnIdReturnAddress,
- BuiltinFnIdFrameAddress,
- BuiltinFnIdHandle,
BuiltinFnIdEmbedFile,
BuiltinFnIdCmpxchgWeak,
BuiltinFnIdCmpxchgStrong,
@@ -1499,6 +1504,7 @@ enum BuiltinFnId {
BuiltinFnIdInlineCall,
BuiltinFnIdNoInlineCall,
BuiltinFnIdNewStackCall,
+ BuiltinFnIdAsyncCall,
BuiltinFnIdTypeId,
BuiltinFnIdShlExact,
BuiltinFnIdShrExact,
@@ -1514,6 +1520,10 @@ enum BuiltinFnId {
BuiltinFnIdAtomicLoad,
BuiltinFnIdHasDecl,
BuiltinFnIdUnionInit,
+ BuiltinFnIdFrameAddress,
+ BuiltinFnIdFrameType,
+ BuiltinFnIdFrameHandle,
+ BuiltinFnIdFrameSize,
};
struct BuiltinFnEntry {
@@ -1541,6 +1551,12 @@ enum PanicMsgId {
PanicMsgIdBadEnumValue,
PanicMsgIdFloatToInt,
PanicMsgIdPtrCastNull,
+ PanicMsgIdBadResume,
+ PanicMsgIdBadAwait,
+ PanicMsgIdBadReturn,
+ PanicMsgIdResumedAnAwaitingFn,
+ PanicMsgIdFrameTooSmall,
+ PanicMsgIdResumedFnPendingAwait,
PanicMsgIdCount,
};
@@ -1701,7 +1717,13 @@ struct CodeGen {
LLVMTargetMachineRef target_machine;
ZigLLVMDIFile *dummy_di_file;
LLVMValueRef cur_ret_ptr;
+ LLVMValueRef cur_frame_ptr;
LLVMValueRef cur_fn_val;
+ LLVMValueRef cur_async_switch_instr;
+ LLVMValueRef cur_async_resume_index_ptr;
+ LLVMValueRef cur_async_awaiter_ptr;
+ LLVMBasicBlockRef cur_preamble_llvm_block;
+ size_t cur_resume_block_count;
LLVMValueRef cur_err_ret_trace_val_arg;
LLVMValueRef cur_err_ret_trace_val_stack;
LLVMValueRef memcpy_fn_val;
@@ -1709,28 +1731,16 @@ struct CodeGen {
LLVMValueRef trap_fn_val;
LLVMValueRef return_address_fn_val;
LLVMValueRef frame_address_fn_val;
- LLVMValueRef coro_destroy_fn_val;
- LLVMValueRef coro_id_fn_val;
- LLVMValueRef coro_alloc_fn_val;
- LLVMValueRef coro_size_fn_val;
- LLVMValueRef coro_begin_fn_val;
- LLVMValueRef coro_suspend_fn_val;
- LLVMValueRef coro_end_fn_val;
- LLVMValueRef coro_free_fn_val;
- LLVMValueRef coro_resume_fn_val;
- LLVMValueRef coro_save_fn_val;
- LLVMValueRef coro_promise_fn_val;
- LLVMValueRef coro_alloc_helper_fn_val;
- LLVMValueRef coro_frame_fn_val;
- LLVMValueRef merge_err_ret_traces_fn_val;
LLVMValueRef add_error_return_trace_addr_fn_val;
LLVMValueRef stacksave_fn_val;
LLVMValueRef stackrestore_fn_val;
LLVMValueRef write_register_fn_val;
+ LLVMValueRef merge_err_ret_traces_fn_val;
LLVMValueRef sp_md_node;
LLVMValueRef err_name_table;
LLVMValueRef safety_crash_err_fn;
LLVMValueRef return_err_fn;
+ LLVMTypeRef anyframe_fn_type;
// reminder: hash tables must be initialized before use
HashMap import_table;
@@ -1797,12 +1807,12 @@ struct CodeGen {
ZigType *entry_var;
ZigType *entry_global_error_set;
ZigType *entry_arg_tuple;
- ZigType *entry_promise;
ZigType *entry_enum_literal;
+ ZigType *entry_any_frame;
} builtin_types;
+
ZigType *align_amt_type;
ZigType *stack_trace_type;
- ZigType *ptr_to_stack_trace_type;
ZigType *err_tag_type;
ZigType *test_fn_type;
@@ -1938,6 +1948,7 @@ struct ZigVar {
ZigType *var_type;
LLVMValueRef value_ref;
IrInstruction *is_comptime;
+ IrInstruction *ptr_instruction;
// which node is the declaration of the variable
AstNode *decl_node;
ZigLLVMDILocalVariable *di_loc_var;
@@ -1985,7 +1996,6 @@ enum ScopeId {
ScopeIdSuspend,
ScopeIdFnDef,
ScopeIdCompTime,
- ScopeIdCoroPrelude,
ScopeIdRuntime,
};
@@ -2109,7 +2119,6 @@ struct ScopeRuntime {
struct ScopeSuspend {
Scope base;
- IrBasicBlock *resume_block;
bool reported_err;
};
@@ -2128,12 +2137,6 @@ struct ScopeFnDef {
ZigFn *fn_entry;
};
-// This scope is created to indicate that the code in the scope
-// is auto-generated coroutine prelude stuff.
-struct ScopeCoroPrelude {
- Scope base;
-};
-
// synchronized with code in define_builtin_compile_vars
enum AtomicOrder {
AtomicOrderUnordered,
@@ -2231,7 +2234,7 @@ enum IrInstructionId {
IrInstructionIdSetRuntimeSafety,
IrInstructionIdSetFloatMode,
IrInstructionIdArrayType,
- IrInstructionIdPromiseType,
+ IrInstructionIdAnyFrameType,
IrInstructionIdSliceType,
IrInstructionIdGlobalAsm,
IrInstructionIdAsm,
@@ -2278,7 +2281,10 @@ enum IrInstructionId {
IrInstructionIdBreakpoint,
IrInstructionIdReturnAddress,
IrInstructionIdFrameAddress,
- IrInstructionIdHandle,
+ IrInstructionIdFrameHandle,
+ IrInstructionIdFrameType,
+ IrInstructionIdFrameSizeSrc,
+ IrInstructionIdFrameSizeGen,
IrInstructionIdAlignOf,
IrInstructionIdOverflowOp,
IrInstructionIdTestErrSrc,
@@ -2321,35 +2327,16 @@ enum IrInstructionId {
IrInstructionIdImplicitCast,
IrInstructionIdResolveResult,
IrInstructionIdResetResult,
- IrInstructionIdResultPtr,
IrInstructionIdOpaqueType,
IrInstructionIdSetAlignStack,
IrInstructionIdArgType,
IrInstructionIdExport,
IrInstructionIdErrorReturnTrace,
IrInstructionIdErrorUnion,
- IrInstructionIdCancel,
- IrInstructionIdGetImplicitAllocator,
- IrInstructionIdCoroId,
- IrInstructionIdCoroAlloc,
- IrInstructionIdCoroSize,
- IrInstructionIdCoroBegin,
- IrInstructionIdCoroAllocFail,
- IrInstructionIdCoroSuspend,
- IrInstructionIdCoroEnd,
- IrInstructionIdCoroFree,
- IrInstructionIdCoroResume,
- IrInstructionIdCoroSave,
- IrInstructionIdCoroPromise,
- IrInstructionIdCoroAllocHelper,
IrInstructionIdAtomicRmw,
IrInstructionIdAtomicLoad,
- IrInstructionIdPromiseResultType,
- IrInstructionIdAwaitBookkeeping,
IrInstructionIdSaveErrRetAddr,
IrInstructionIdAddImplicitReturnType,
- IrInstructionIdMergeErrRetTraces,
- IrInstructionIdMarkErrRetTracePtr,
IrInstructionIdErrSetCast,
IrInstructionIdToBytes,
IrInstructionIdFromBytes,
@@ -2365,6 +2352,13 @@ enum IrInstructionId {
IrInstructionIdEndExpr,
IrInstructionIdPtrOfArrayToSlice,
IrInstructionIdUnionInitNamedField,
+ IrInstructionIdSuspendBegin,
+ IrInstructionIdSuspendFinish,
+ IrInstructionIdAwaitSrc,
+ IrInstructionIdAwaitGen,
+ IrInstructionIdResume,
+ IrInstructionIdSpillBegin,
+ IrInstructionIdSpillEnd,
};
struct IrInstruction {
@@ -2607,7 +2601,6 @@ struct IrInstructionCallSrc {
IrInstruction **args;
ResultLoc *result_loc;
- IrInstruction *async_allocator;
IrInstruction *new_stack;
FnInline fn_inline;
bool is_async;
@@ -2622,8 +2615,8 @@ struct IrInstructionCallGen {
size_t arg_count;
IrInstruction **args;
IrInstruction *result_loc;
+ IrInstruction *frame_result_loc;
- IrInstruction *async_allocator;
IrInstruction *new_stack;
FnInline fn_inline;
bool is_async;
@@ -2639,7 +2632,7 @@ struct IrInstructionConst {
struct IrInstructionReturn {
IrInstruction base;
- IrInstruction *value;
+ IrInstruction *operand;
};
enum CastOp {
@@ -2744,7 +2737,7 @@ struct IrInstructionPtrType {
bool is_allow_zero;
};
-struct IrInstructionPromiseType {
+struct IrInstructionAnyFrameType {
IrInstruction base;
IrInstruction *payload_type;
@@ -3084,10 +3077,28 @@ struct IrInstructionFrameAddress {
IrInstruction base;
};
-struct IrInstructionHandle {
+struct IrInstructionFrameHandle {
IrInstruction base;
};
+struct IrInstructionFrameType {
+ IrInstruction base;
+
+ IrInstruction *fn;
+};
+
+struct IrInstructionFrameSizeSrc {
+ IrInstruction base;
+
+ IrInstruction *fn;
+};
+
+struct IrInstructionFrameSizeGen {
+ IrInstruction base;
+
+ IrInstruction *fn;
+};
+
enum IrOverflowOp {
IrOverflowOpAdd,
IrOverflowOpSub,
@@ -3127,6 +3138,7 @@ struct IrInstructionTestErrSrc {
IrInstruction base;
bool resolve_err_set;
+ bool base_ptr_is_payload;
IrInstruction *base_ptr;
};
@@ -3179,7 +3191,6 @@ struct IrInstructionFnProto {
IrInstruction **param_types;
IrInstruction *align_value;
IrInstruction *return_type;
- IrInstruction *async_allocator_type_value;
bool is_var_args;
};
@@ -3409,95 +3420,6 @@ struct IrInstructionErrorUnion {
IrInstruction *payload;
};
-struct IrInstructionCancel {
- IrInstruction base;
-
- IrInstruction *target;
-};
-
-enum ImplicitAllocatorId {
- ImplicitAllocatorIdArg,
- ImplicitAllocatorIdLocalVar,
-};
-
-struct IrInstructionGetImplicitAllocator {
- IrInstruction base;
-
- ImplicitAllocatorId id;
-};
-
-struct IrInstructionCoroId {
- IrInstruction base;
-
- IrInstruction *promise_ptr;
-};
-
-struct IrInstructionCoroAlloc {
- IrInstruction base;
-
- IrInstruction *coro_id;
-};
-
-struct IrInstructionCoroSize {
- IrInstruction base;
-};
-
-struct IrInstructionCoroBegin {
- IrInstruction base;
-
- IrInstruction *coro_id;
- IrInstruction *coro_mem_ptr;
-};
-
-struct IrInstructionCoroAllocFail {
- IrInstruction base;
-
- IrInstruction *err_val;
-};
-
-struct IrInstructionCoroSuspend {
- IrInstruction base;
-
- IrInstruction *save_point;
- IrInstruction *is_final;
-};
-
-struct IrInstructionCoroEnd {
- IrInstruction base;
-};
-
-struct IrInstructionCoroFree {
- IrInstruction base;
-
- IrInstruction *coro_id;
- IrInstruction *coro_handle;
-};
-
-struct IrInstructionCoroResume {
- IrInstruction base;
-
- IrInstruction *awaiter_handle;
-};
-
-struct IrInstructionCoroSave {
- IrInstruction base;
-
- IrInstruction *coro_handle;
-};
-
-struct IrInstructionCoroPromise {
- IrInstruction base;
-
- IrInstruction *coro_handle;
-};
-
-struct IrInstructionCoroAllocHelper {
- IrInstruction base;
-
- IrInstruction *realloc_fn;
- IrInstruction *coro_size;
-};
-
struct IrInstructionAtomicRmw {
IrInstruction base;
@@ -3519,18 +3441,6 @@ struct IrInstructionAtomicLoad {
AtomicOrder resolved_ordering;
};
-struct IrInstructionPromiseResultType {
- IrInstruction base;
-
- IrInstruction *promise_type;
-};
-
-struct IrInstructionAwaitBookkeeping {
- IrInstruction base;
-
- IrInstruction *promise_result_type;
-};
-
struct IrInstructionSaveErrRetAddr {
IrInstruction base;
};
@@ -3541,20 +3451,6 @@ struct IrInstructionAddImplicitReturnType {
IrInstruction *value;
};
-struct IrInstructionMergeErrRetTraces {
- IrInstruction base;
-
- IrInstruction *coro_promise_ptr;
- IrInstruction *src_err_ret_trace_ptr;
- IrInstruction *dest_err_ret_trace_ptr;
-};
-
-struct IrInstructionMarkErrRetTracePtr {
- IrInstruction base;
-
- IrInstruction *err_ret_trace_ptr;
-};
-
// For float ops which take a single argument
struct IrInstructionFloatOp {
IrInstruction base;
@@ -3645,6 +3541,7 @@ struct IrInstructionAllocaGen {
uint32_t align;
const char *name_hint;
+ size_t field_index;
};
struct IrInstructionEndExpr {
@@ -3692,6 +3589,56 @@ struct IrInstructionPtrOfArrayToSlice {
IrInstruction *result_loc;
};
+struct IrInstructionSuspendBegin {
+ IrInstruction base;
+
+ LLVMBasicBlockRef resume_bb;
+};
+
+struct IrInstructionSuspendFinish {
+ IrInstruction base;
+
+ IrInstructionSuspendBegin *begin;
+};
+
+struct IrInstructionAwaitSrc {
+ IrInstruction base;
+
+ IrInstruction *frame;
+ ResultLoc *result_loc;
+};
+
+struct IrInstructionAwaitGen {
+ IrInstruction base;
+
+ IrInstruction *frame;
+ IrInstruction *result_loc;
+};
+
+struct IrInstructionResume {
+ IrInstruction base;
+
+ IrInstruction *frame;
+};
+
+enum SpillId {
+ SpillIdInvalid,
+ SpillIdRetErrCode,
+};
+
+struct IrInstructionSpillBegin {
+ IrInstruction base;
+
+ SpillId spill_id;
+ IrInstruction *operand;
+};
+
+struct IrInstructionSpillEnd {
+ IrInstruction base;
+
+ IrInstructionSpillBegin *begin;
+};
+
enum ResultLocId {
ResultLocIdInvalid,
ResultLocIdNone,
@@ -3772,23 +3719,19 @@ static const size_t slice_len_index = 1;
static const size_t maybe_child_index = 0;
static const size_t maybe_null_index = 1;
-static const size_t err_union_err_index = 0;
-static const size_t err_union_payload_index = 1;
+static const size_t err_union_payload_index = 0;
+static const size_t err_union_err_index = 1;
-// TODO call graph analysis to find out what this number needs to be for every function
-// MUST BE A POWER OF TWO.
-static const size_t stack_trace_ptr_count = 32;
+// label (grep this): [fn_frame_struct_layout]
+static const size_t frame_fn_ptr_index = 0;
+static const size_t frame_resume_index = 1;
+static const size_t frame_awaiter_index = 2;
+static const size_t frame_ret_start = 3;
-// these belong to the async function
-#define RETURN_ADDRESSES_FIELD_NAME "return_addresses"
-#define ERR_RET_TRACE_FIELD_NAME "err_ret_trace"
-#define RESULT_FIELD_NAME "result"
-#define ASYNC_REALLOC_FIELD_NAME "reallocFn"
-#define ASYNC_SHRINK_FIELD_NAME "shrinkFn"
-#define ATOMIC_STATE_FIELD_NAME "atomic_state"
-// these point to data belonging to the awaiter
-#define ERR_RET_TRACE_PTR_FIELD_NAME "err_ret_trace_ptr"
-#define RESULT_PTR_FIELD_NAME "result_ptr"
+// TODO https://github.com/ziglang/zig/issues/3056
+// We require this to be a power of 2 so that we can use shifting rather than
+// remainder division.
+static const size_t stack_trace_ptr_count = 32; // Must be a power of 2.
#define NAMESPACE_SEP_CHAR '.'
#define NAMESPACE_SEP_STR "."
@@ -3811,11 +3754,13 @@ enum FnWalkId {
struct FnWalkAttrs {
ZigFn *fn;
+ LLVMValueRef llvm_fn;
unsigned gen_i;
};
struct FnWalkCall {
ZigList *gen_param_values;
+ ZigList *gen_param_types;
IrInstructionCallGen *inst;
bool is_var_args;
};
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 0af1baec35..4aff6da8e9 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -7,6 +7,7 @@
#include "analyze.hpp"
#include "ast_render.hpp"
+#include "codegen.hpp"
#include "config.h"
#include "error.hpp"
#include "ir.hpp"
@@ -31,6 +32,11 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope);
static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope);
+// nullptr means not analyzed yet; this one means currently being analyzed
+static const AstNode *inferred_async_checking = reinterpret_cast(0x1);
+// this one means analyzed and it's not async
+static const AstNode *inferred_async_none = reinterpret_cast(0x2);
+
static bool is_top_level_struct(ZigType *import) {
return import->id == ZigTypeIdStruct && import->data.structure.root_struct != nullptr;
}
@@ -56,14 +62,14 @@ ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg) {
return err;
}
-ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) {
+ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg) {
Token fake_token;
fake_token.start_line = node->line;
fake_token.start_column = node->column;
return add_token_error(g, node->owner, &fake_token, msg);
}
-ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg) {
+ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg) {
Token fake_token;
fake_token.start_line = node->line;
fake_token.start_column = node->column;
@@ -188,12 +194,6 @@ Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent) {
return &scope->base;
}
-Scope *create_coro_prelude_scope(CodeGen *g, AstNode *node, Scope *parent) {
- ScopeCoroPrelude *scope = allocate(1);
- init_scope(g, &scope->base, ScopeIdCoroPrelude, node, parent);
- return &scope->base;
-}
-
ZigType *get_scope_import(Scope *scope) {
while (scope) {
if (scope->id == ScopeIdDecls) {
@@ -234,6 +234,8 @@ AstNode *type_decl_node(ZigType *type_entry) {
return type_entry->data.enumeration.decl_node;
case ZigTypeIdUnion:
return type_entry->data.unionation.decl_node;
+ case ZigTypeIdFnFrame:
+ return type_entry->data.frame.fn->proto_node;
case ZigTypeIdOpaque:
case ZigTypeIdMetaType:
case ZigTypeIdVoid:
@@ -254,8 +256,8 @@ AstNode *type_decl_node(ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return nullptr;
}
zig_unreachable();
@@ -269,6 +271,20 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
return type_entry->data.structure.resolve_status >= status;
case ZigTypeIdUnion:
return type_entry->data.unionation.resolve_status >= status;
+ case ZigTypeIdFnFrame:
+ switch (status) {
+ case ResolveStatusInvalid:
+ zig_unreachable();
+ case ResolveStatusUnstarted:
+ case ResolveStatusZeroBitsKnown:
+ return true;
+ case ResolveStatusAlignmentKnown:
+ case ResolveStatusSizeKnown:
+ return type_entry->data.frame.locals_struct != nullptr;
+ case ResolveStatusLLVMFwdDecl:
+ case ResolveStatusLLVMFull:
+ return type_entry->llvm_type != nullptr;
+ }
case ZigTypeIdEnum:
switch (status) {
case ResolveStatusUnstarted:
@@ -307,8 +323,8 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return true;
}
zig_unreachable();
@@ -341,27 +357,27 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x) {
return get_int_type(g, false, bits_needed_for_unsigned(x));
}
-ZigType *get_promise_type(CodeGen *g, ZigType *result_type) {
- if (result_type != nullptr && result_type->promise_parent != nullptr) {
- return result_type->promise_parent;
- } else if (result_type == nullptr && g->builtin_types.entry_promise != nullptr) {
- return g->builtin_types.entry_promise;
+ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type) {
+ if (result_type != nullptr && result_type->any_frame_parent != nullptr) {
+ return result_type->any_frame_parent;
+ } else if (result_type == nullptr && g->builtin_types.entry_any_frame != nullptr) {
+ return g->builtin_types.entry_any_frame;
}
- ZigType *entry = new_type_table_entry(ZigTypeIdPromise);
+ ZigType *entry = new_type_table_entry(ZigTypeIdAnyFrame);
entry->abi_size = g->builtin_types.entry_usize->abi_size;
entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
entry->abi_align = g->builtin_types.entry_usize->abi_align;
- entry->data.promise.result_type = result_type;
- buf_init_from_str(&entry->name, "promise");
+ entry->data.any_frame.result_type = result_type;
+ buf_init_from_str(&entry->name, "anyframe");
if (result_type != nullptr) {
buf_appendf(&entry->name, "->%s", buf_ptr(&result_type->name));
}
if (result_type != nullptr) {
- result_type->promise_parent = entry;
+ result_type->any_frame_parent = entry;
} else if (result_type == nullptr) {
- g->builtin_types.entry_promise = entry;
+ g->builtin_types.entry_any_frame = entry;
}
return entry;
}
@@ -378,6 +394,25 @@ static const char *ptr_len_to_star_str(PtrLen ptr_len) {
zig_unreachable();
}
+ZigType *get_fn_frame_type(CodeGen *g, ZigFn *fn) {
+ if (fn->frame_type != nullptr) {
+ return fn->frame_type;
+ }
+
+ ZigType *entry = new_type_table_entry(ZigTypeIdFnFrame);
+ buf_resize(&entry->name, 0);
+ buf_appendf(&entry->name, "@Frame(%s)", buf_ptr(&fn->symbol_name));
+
+ entry->data.frame.fn = fn;
+
+ // Async function frames are always non-zero bits because they always have a resume index.
+ entry->abi_size = SIZE_MAX;
+ entry->size_in_bits = SIZE_MAX;
+
+ fn->frame_type = entry;
+ return entry;
+}
+
ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const,
bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment,
uint32_t bit_offset_in_host, uint32_t host_int_bytes, bool allow_zero)
@@ -490,42 +525,6 @@ ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const) {
return get_pointer_to_type_extra(g, child_type, is_const, false, PtrLenSingle, 0, 0, 0, false);
}
-ZigType *get_promise_frame_type(CodeGen *g, ZigType *return_type) {
- if (return_type->promise_frame_parent != nullptr) {
- return return_type->promise_frame_parent;
- }
-
- ZigType *atomic_state_type = g->builtin_types.entry_usize;
- ZigType *result_ptr_type = get_pointer_to_type(g, return_type, false);
-
- ZigList field_names = {};
- field_names.append(ATOMIC_STATE_FIELD_NAME);
- field_names.append(RESULT_FIELD_NAME);
- field_names.append(RESULT_PTR_FIELD_NAME);
- if (g->have_err_ret_tracing) {
- field_names.append(ERR_RET_TRACE_PTR_FIELD_NAME);
- field_names.append(ERR_RET_TRACE_FIELD_NAME);
- field_names.append(RETURN_ADDRESSES_FIELD_NAME);
- }
-
- ZigList field_types = {};
- field_types.append(atomic_state_type);
- field_types.append(return_type);
- field_types.append(result_ptr_type);
- if (g->have_err_ret_tracing) {
- field_types.append(get_ptr_to_stack_trace_type(g));
- field_types.append(g->stack_trace_type);
- field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count));
- }
-
- assert(field_names.length == field_types.length);
- Buf *name = buf_sprintf("AsyncFramePromise(%s)", buf_ptr(&return_type->name));
- ZigType *entry = get_struct_type(g, buf_ptr(name), field_names.items, field_types.items, field_names.length);
-
- return_type->promise_frame_parent = entry;
- return entry;
-}
-
ZigType *get_optional_type(CodeGen *g, ZigType *child_type) {
if (child_type->optional_parent != nullptr) {
return child_type->optional_parent;
@@ -631,6 +630,7 @@ ZigType *get_error_union_type(CodeGen *g, ZigType *err_set_type, ZigType *payloa
size_t field2_offset = next_field_offset(0, entry->abi_align, field_sizes[0], field_aligns[1]);
entry->abi_size = next_field_offset(field2_offset, entry->abi_align, field_sizes[1], entry->abi_align);
entry->size_in_bits = entry->abi_size * 8;
+ entry->data.error_union.pad_bytes = entry->abi_size - (field2_offset + field_sizes[1]);
}
g->type_table.put(type_id, entry);
@@ -828,17 +828,15 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
zig_unreachable();
}
-ZigType *get_ptr_to_stack_trace_type(CodeGen *g) {
+ZigType *get_stack_trace_type(CodeGen *g) {
if (g->stack_trace_type == nullptr) {
ConstExprValue *stack_trace_type_val = get_builtin_value(g, "StackTrace");
assert(stack_trace_type_val->type->id == ZigTypeIdMetaType);
g->stack_trace_type = stack_trace_type_val->data.x_type;
assertNoError(type_resolve(g, g->stack_trace_type, ResolveStatusZeroBitsKnown));
-
- g->ptr_to_stack_trace_type = get_pointer_to_type(g, g->stack_trace_type, false);
}
- return g->ptr_to_stack_trace_type;
+ return g->stack_trace_type;
}
bool want_first_arg_sret(CodeGen *g, FnTypeId *fn_type_id) {
@@ -879,13 +877,8 @@ ZigType *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
// populate the name of the type
buf_resize(&fn_type->name, 0);
- if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- assert(fn_type_id->async_allocator_type != nullptr);
- buf_appendf(&fn_type->name, "async<%s> ", buf_ptr(&fn_type_id->async_allocator_type->name));
- } else {
- const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
- buf_appendf(&fn_type->name, "%s", cc_str);
- }
+ const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
+ buf_appendf(&fn_type->name, "%s", cc_str);
buf_appendf(&fn_type->name, "fn(");
for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
FnTypeParamInfo *param_info = &fn_type_id->param_info[i];
@@ -998,14 +991,8 @@ ZigType *analyze_type_expr(CodeGen *g, Scope *scope, AstNode *node) {
ZigType *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
ZigType *fn_type = new_type_table_entry(ZigTypeIdFn);
buf_resize(&fn_type->name, 0);
- if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- const char *async_allocator_type_str = (fn_type->data.fn.fn_type_id.async_allocator_type == nullptr) ?
- "var" : buf_ptr(&fn_type_id->async_allocator_type->name);
- buf_appendf(&fn_type->name, "async(%s) ", async_allocator_type_str);
- } else {
- const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
- buf_appendf(&fn_type->name, "%s", cc_str);
- }
+ const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
+ buf_appendf(&fn_type->name, "%s", cc_str);
buf_appendf(&fn_type->name, "fn(");
size_t i = 0;
for (; i < fn_type_id->next_param_index; i += 1) {
@@ -1119,7 +1106,8 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
add_node_error(g, source_node,
buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation",
buf_ptr(&type_entry->name)));
@@ -1207,8 +1195,9 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdErrorSet:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVoid:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdOpaque:
case ZigTypeIdUnreachable:
@@ -1378,8 +1367,9 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, type_entry)) {
case ReqCompTimeNo:
break;
@@ -1474,8 +1464,9 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, fn_type_id.return_type)) {
case ReqCompTimeInvalid:
return g->builtin_types.entry_invalid;
@@ -1487,16 +1478,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
break;
}
- if (fn_type_id.cc == CallingConventionAsync) {
- if (fn_proto->async_allocator_type == nullptr) {
- return get_generic_fn_type(g, &fn_type_id);
- }
- fn_type_id.async_allocator_type = analyze_type_expr(g, child_scope, fn_proto->async_allocator_type);
- if (type_is_invalid(fn_type_id.async_allocator_type)) {
- return g->builtin_types.entry_invalid;
- }
- }
-
return get_fn_type(g, &fn_type_id);
}
@@ -1516,9 +1497,14 @@ bool type_is_invalid(ZigType *type_entry) {
zig_unreachable();
}
+struct SrcField {
+ const char *name;
+ ZigType *ty;
+ unsigned align;
+};
-ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[],
- ZigType *field_types[], size_t field_count)
+static ZigType *get_struct_type(CodeGen *g, const char *type_name, SrcField fields[], size_t field_count,
+ unsigned min_abi_align)
{
ZigType *struct_type = new_type_table_entry(ZigTypeIdStruct);
@@ -1530,22 +1516,20 @@ ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_na
struct_type->data.structure.fields = allocate(field_count);
struct_type->data.structure.fields_by_name.init(field_count);
- size_t abi_align = 0;
+ size_t abi_align = min_abi_align;
for (size_t i = 0; i < field_count; i += 1) {
TypeStructField *field = &struct_type->data.structure.fields[i];
- field->name = buf_create_from_str(field_names[i]);
- field->type_entry = field_types[i];
+ field->name = buf_create_from_str(fields[i].name);
+ field->type_entry = fields[i].ty;
field->src_index = i;
+ field->align = fields[i].align;
if (type_has_bits(field->type_entry)) {
assert(type_is_resolved(field->type_entry, ResolveStatusSizeKnown));
- if (field->type_entry->abi_align > abi_align) {
- abi_align = field->type_entry->abi_align;
+ unsigned field_abi_align = max(field->align, field->type_entry->abi_align);
+ if (field_abi_align > abi_align) {
+ abi_align = field_abi_align;
}
- field->gen_index = struct_type->data.structure.gen_field_count;
- struct_type->data.structure.gen_field_count += 1;
- } else {
- field->gen_index = SIZE_MAX;
}
auto prev_entry = struct_type->data.structure.fields_by_name.put_unique(field->name, field);
@@ -1555,17 +1539,24 @@ ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_na
size_t next_offset = 0;
for (size_t i = 0; i < field_count; i += 1) {
TypeStructField *field = &struct_type->data.structure.fields[i];
- if (field->gen_index == SIZE_MAX)
+ if (!type_has_bits(field->type_entry))
continue;
+
field->offset = next_offset;
+
+ // find the next non-zero-byte field for offset calculations
size_t next_src_field_index = i + 1;
for (; next_src_field_index < field_count; next_src_field_index += 1) {
- if (struct_type->data.structure.fields[next_src_field_index].gen_index != SIZE_MAX) {
+ if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry))
break;
- }
}
- size_t next_abi_align = (next_src_field_index == field_count) ?
- abi_align : struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align;
+ size_t next_abi_align;
+ if (next_src_field_index == field_count) {
+ next_abi_align = abi_align;
+ } else {
+ next_abi_align = max(fields[next_src_field_index].align,
+ struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align);
+ }
next_offset = next_field_offset(next_offset, abi_align, field->type_entry->abi_size, next_abi_align);
}
@@ -2653,7 +2644,6 @@ ZigFn *create_fn_raw(CodeGen *g, FnInline inline_value) {
fn_entry->prealloc_backward_branch_quota = default_backward_branch_quota;
- fn_entry->codegen = g;
fn_entry->analyzed_executable.backward_branch_count = &fn_entry->prealloc_bbc;
fn_entry->analyzed_executable.backward_branch_quota = &fn_entry->prealloc_backward_branch_quota;
fn_entry->analyzed_executable.fn_entry = fn_entry;
@@ -2781,6 +2771,7 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
}
}
} else {
+ fn_table_entry->inferred_async_node = inferred_async_none;
g->external_prototypes.put_unique(tld_fn->base.name, &tld_fn->base);
}
@@ -2802,6 +2793,13 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
g->fn_defs.append(fn_table_entry);
}
+ // if the calling convention implies that it cannot be async, we save that for later
+ // and leave the value to be nullptr to indicate that we have not emitted possible
+ // compile errors for improperly calling async functions.
+ if (fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
+ fn_table_entry->inferred_async_node = fn_table_entry->proto_node;
+ }
+
if (scope_is_root_decls(tld_fn->base.parent_scope) &&
(import == g->root_import || import->data.structure.root_struct->package == g->panic_package))
{
@@ -3035,12 +3033,11 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeIfErrorExpr:
case NodeTypeIfOptional:
case NodeTypeErrorSetDecl:
- case NodeTypeCancel:
case NodeTypeResume:
case NodeTypeAwaitExpr:
case NodeTypeSuspend:
- case NodeTypePromiseType:
case NodeTypeEnumLiteral:
+ case NodeTypeAnyFrameType:
zig_unreachable();
}
}
@@ -3091,8 +3088,9 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return type_entry;
}
zig_unreachable();
@@ -3592,8 +3590,9 @@ bool is_container(ZigType *type_entry) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return false;
}
zig_unreachable();
@@ -3649,8 +3648,9 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdInvalid:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
}
zig_unreachable();
@@ -3659,13 +3659,13 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
ZigType *get_src_ptr_type(ZigType *type) {
if (type->id == ZigTypeIdPointer) return type;
if (type->id == ZigTypeIdFn) return type;
- if (type->id == ZigTypeIdPromise) return type;
+ if (type->id == ZigTypeIdAnyFrame) return type;
if (type->id == ZigTypeIdOptional) {
if (type->data.maybe.child_type->id == ZigTypeIdPointer) {
return type->data.maybe.child_type->data.pointer.allow_zero ? nullptr : type->data.maybe.child_type;
}
if (type->data.maybe.child_type->id == ZigTypeIdFn) return type->data.maybe.child_type;
- if (type->data.maybe.child_type->id == ZigTypeIdPromise) return type->data.maybe.child_type;
+ if (type->data.maybe.child_type->id == ZigTypeIdAnyFrame) return type->data.maybe.child_type;
}
return nullptr;
}
@@ -3681,6 +3681,13 @@ bool type_is_nonnull_ptr(ZigType *type) {
return get_codegen_ptr_type(type) == type && !ptr_allows_addr_zero(type);
}
+static uint32_t get_async_frame_align_bytes(CodeGen *g) {
+ uint32_t a = g->pointer_size_bytes * 2;
+ // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
+ if (a < 8) a = 8;
+ return a;
+}
+
uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
ZigType *ptr_type = get_src_ptr_type(type);
if (ptr_type->id == ZigTypeIdPointer) {
@@ -3692,8 +3699,8 @@ uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
// when getting the alignment of `?extern fn() void`.
// See http://lists.llvm.org/pipermail/llvm-dev/2018-September/126142.html
return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment;
- } else if (ptr_type->id == ZigTypeIdPromise) {
- return get_coro_frame_align_bytes(g);
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
+ return get_async_frame_align_bytes(g);
} else {
zig_unreachable();
}
@@ -3705,7 +3712,7 @@ bool get_ptr_const(ZigType *type) {
return ptr_type->data.pointer.is_const;
} else if (ptr_type->id == ZigTypeIdFn) {
return true;
- } else if (ptr_type->id == ZigTypeIdPromise) {
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
return true;
} else {
zig_unreachable();
@@ -3780,18 +3787,128 @@ bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *sour
return true;
}
-void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node) {
- ZigType *fn_type = fn_table_entry->type_entry;
+static void resolve_async_fn_frame(CodeGen *g, ZigFn *fn) {
+ ZigType *frame_type = get_fn_frame_type(g, fn);
+ Error err;
+ if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+}
+
+bool fn_is_async(ZigFn *fn) {
+ assert(fn->inferred_async_node != nullptr);
+ assert(fn->inferred_async_node != inferred_async_checking);
+ return fn->inferred_async_node != inferred_async_none;
+}
+
+static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
+ assert(fn->inferred_async_node != nullptr);
+ assert(fn->inferred_async_node != inferred_async_checking);
+ assert(fn->inferred_async_node != inferred_async_none);
+ if (fn->inferred_async_fn != nullptr) {
+ ErrorMsg *new_msg = add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("async function call here"));
+ return add_async_error_notes(g, new_msg, fn->inferred_async_fn);
+ } else if (fn->inferred_async_node->type == NodeTypeFnProto) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("async calling convention here"));
+ } else if (fn->inferred_async_node->type == NodeTypeSuspend) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("suspends here"));
+ } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("await is a suspend point"));
+ } else if (fn->inferred_async_node->type == NodeTypeFnCallExpr &&
+ fn->inferred_async_node->data.fn_call_expr.is_builtin)
+ {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("@frame() causes function to be async"));
+ } else {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("suspends here"));
+ }
+}
+
+// This function resolves functions being inferred async.
+static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) {
+ if (fn->inferred_async_node == inferred_async_checking) {
+ // TODO call graph cycle detected, disallow the recursion
+ fn->inferred_async_node = inferred_async_none;
+ return;
+ }
+ if (fn->inferred_async_node == inferred_async_none) {
+ return;
+ }
+ if (fn->inferred_async_node != nullptr) {
+ if (resolve_frame) {
+ resolve_async_fn_frame(g, fn);
+ }
+ return;
+ }
+ fn->inferred_async_node = inferred_async_checking;
+
+ bool must_not_be_async = false;
+ if (fn->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) {
+ must_not_be_async = true;
+ fn->inferred_async_node = inferred_async_none;
+ }
+
+ for (size_t i = 0; i < fn->call_list.length; i += 1) {
+ IrInstructionCallGen *call = fn->call_list.at(i);
+ ZigFn *callee = call->fn_entry;
+ if (callee == nullptr) {
+ // TODO function pointer call here, could be anything
+ continue;
+ }
+
+ if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
+ continue;
+ if (callee->anal_state == FnAnalStateReady) {
+ analyze_fn_body(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ }
+ assert(callee->anal_state == FnAnalStateComplete);
+ analyze_fn_async(g, callee, true);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ if (fn_is_async(callee)) {
+ fn->inferred_async_node = call->base.source_node;
+ fn->inferred_async_fn = callee;
+ if (must_not_be_async) {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("function with calling convention '%s' cannot be async",
+ calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc)));
+ add_async_error_notes(g, msg, fn);
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ if (resolve_frame) {
+ resolve_async_fn_frame(g, fn);
+ }
+ return;
+ }
+ }
+ fn->inferred_async_node = inferred_async_none;
+}
+
+static void analyze_fn_ir(CodeGen *g, ZigFn *fn, AstNode *return_type_node) {
+ ZigType *fn_type = fn->type_entry;
assert(!fn_type->data.fn.is_generic);
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
- ZigType *block_return_type = ir_analyze(g, &fn_table_entry->ir_executable,
- &fn_table_entry->analyzed_executable, fn_type_id->return_type, return_type_node);
- fn_table_entry->src_implicit_return_type = block_return_type;
+ ZigType *block_return_type = ir_analyze(g, &fn->ir_executable,
+ &fn->analyzed_executable, fn_type_id->return_type, return_type_node);
+ fn->src_implicit_return_type = block_return_type;
- if (type_is_invalid(block_return_type) || fn_table_entry->analyzed_executable.invalid) {
+ if (type_is_invalid(block_return_type) || fn->analyzed_executable.invalid) {
assert(g->errors.length > 0);
- fn_table_entry->anal_state = FnAnalStateInvalid;
+ fn->anal_state = FnAnalStateInvalid;
return;
}
@@ -3799,20 +3916,20 @@ void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node)
ZigType *return_err_set_type = fn_type_id->return_type->data.error_union.err_set_type;
if (return_err_set_type->data.error_set.infer_fn != nullptr) {
ZigType *inferred_err_set_type;
- if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorSet) {
- inferred_err_set_type = fn_table_entry->src_implicit_return_type;
- } else if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorUnion) {
- inferred_err_set_type = fn_table_entry->src_implicit_return_type->data.error_union.err_set_type;
+ if (fn->src_implicit_return_type->id == ZigTypeIdErrorSet) {
+ inferred_err_set_type = fn->src_implicit_return_type;
+ } else if (fn->src_implicit_return_type->id == ZigTypeIdErrorUnion) {
+ inferred_err_set_type = fn->src_implicit_return_type->data.error_union.err_set_type;
} else {
add_node_error(g, return_type_node,
buf_sprintf("function with inferred error set must return at least one possible error"));
- fn_table_entry->anal_state = FnAnalStateInvalid;
+ fn->anal_state = FnAnalStateInvalid;
return;
}
if (inferred_err_set_type->data.error_set.infer_fn != nullptr) {
if (!resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) {
- fn_table_entry->anal_state = FnAnalStateInvalid;
+ fn->anal_state = FnAnalStateInvalid;
return;
}
}
@@ -3832,13 +3949,25 @@ void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node)
}
}
- if (g->verbose_ir) {
- fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn_table_entry->symbol_name));
- ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4);
- fprintf(stderr, "}\n");
+ CallingConvention cc = fn->type_entry->data.fn.fn_type_id.cc;
+ if (cc != CallingConventionUnspecified && cc != CallingConventionAsync &&
+ fn->inferred_async_node != nullptr &&
+ fn->inferred_async_node != inferred_async_checking &&
+ fn->inferred_async_node != inferred_async_none)
+ {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("function with calling convention '%s' cannot be async",
+ calling_convention_name(cc)));
+ add_async_error_notes(g, msg, fn);
+ fn->anal_state = FnAnalStateInvalid;
}
- fn_table_entry->anal_state = FnAnalStateComplete;
+ if (g->verbose_ir) {
+ fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn->symbol_name));
+ ir_print(g, stderr, &fn->analyzed_executable, 4);
+ fprintf(stderr, "}\n");
+ }
+ fn->anal_state = FnAnalStateComplete;
}
static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) {
@@ -4008,6 +4137,16 @@ void semantic_analyze(CodeGen *g) {
analyze_fn_body(g, fn_entry);
}
}
+
+ if (g->errors.length != 0) {
+ return;
+ }
+
+ // second pass over functions for detecting async
+ for (g->fn_defs_index = 0; g->fn_defs_index < g->fn_defs.length; g->fn_defs_index += 1) {
+ ZigFn *fn_entry = g->fn_defs.at(g->fn_defs_index);
+ analyze_fn_async(g, fn_entry, true);
+ }
}
ZigType *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
@@ -4103,11 +4242,12 @@ bool handle_is_ptr(ZigType *type_entry) {
case ZigTypeIdErrorSet:
case ZigTypeIdFn:
case ZigTypeIdEnum:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdArray:
case ZigTypeIdStruct:
+ case ZigTypeIdFnFrame:
return type_has_bits(type_entry);
case ZigTypeIdErrorUnion:
return type_has_bits(type_entry->data.error_union.payload_type);
@@ -4143,7 +4283,6 @@ uint32_t fn_type_id_hash(FnTypeId *id) {
result += ((uint32_t)(id->cc)) * (uint32_t)3349388391;
result += id->is_var_args ? (uint32_t)1931444534 : 0;
result += hash_ptr(id->return_type);
- result += hash_ptr(id->async_allocator_type);
result += id->alignment * 0xd3b3f3e2;
for (size_t i = 0; i < id->param_count; i += 1) {
FnTypeParamInfo *info = &id->param_info[i];
@@ -4158,8 +4297,7 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) {
a->return_type != b->return_type ||
a->is_var_args != b->is_var_args ||
a->param_count != b->param_count ||
- a->alignment != b->alignment ||
- a->async_allocator_type != b->async_allocator_type)
+ a->alignment != b->alignment)
{
return false;
}
@@ -4321,9 +4459,6 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
case ZigTypeIdPointer:
return hash_const_val_ptr(const_val);
- case ZigTypeIdPromise:
- // TODO better hashing algorithm
- return 223048345;
case ZigTypeIdUndefined:
return 162837799;
case ZigTypeIdNull:
@@ -4357,6 +4492,12 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case ZigTypeIdVector:
// TODO better hashing algorithm
return 3647867726;
+ case ZigTypeIdFnFrame:
+ // TODO better hashing algorithm
+ return 675741936;
+ case ZigTypeIdAnyFrame:
+ // TODO better hashing algorithm
+ return 3747294894;
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
@@ -4389,7 +4530,7 @@ bool generic_fn_type_id_eql(GenericFnTypeId *a, GenericFnTypeId *b) {
if (a_val->special != ConstValSpecialRuntime && b_val->special != ConstValSpecialRuntime) {
assert(a_val->special == ConstValSpecialStatic);
assert(b_val->special == ConstValSpecialStatic);
- if (!const_values_equal(a->fn_entry->codegen, a_val, b_val)) {
+ if (!const_values_equal(a->codegen, a_val, b_val)) {
return false;
}
} else {
@@ -4419,9 +4560,10 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
case ZigTypeIdBoundFn:
case ZigTypeIdFn:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdPointer:
@@ -4489,11 +4631,12 @@ static bool return_type_is_cacheable(ZigType *return_type) {
case ZigTypeIdBoundFn:
case ZigTypeIdFn:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdPointer:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return true;
case ZigTypeIdArray:
@@ -4624,8 +4767,9 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdBool:
case ZigTypeIdFloat:
- case ZigTypeIdPromise:
case ZigTypeIdErrorUnion:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return OnePossibleValueNo;
case ZigTypeIdUndefined:
case ZigTypeIdNull:
@@ -4713,7 +4857,8 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFloat:
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return ReqCompTimeNo;
}
zig_unreachable();
@@ -5032,6 +5177,223 @@ Error ensure_complete_type(CodeGen *g, ZigType *type_entry) {
return type_resolve(g, type_entry, ResolveStatusSizeKnown);
}
+static ZigType *get_async_fn_type(CodeGen *g, ZigType *orig_fn_type) {
+ if (orig_fn_type->data.fn.fn_type_id.cc == CallingConventionAsync)
+ return orig_fn_type;
+
+ ZigType *fn_type = allocate_nonzero(1);
+ *fn_type = *orig_fn_type;
+ fn_type->data.fn.fn_type_id.cc = CallingConventionAsync;
+ fn_type->llvm_type = nullptr;
+ fn_type->llvm_di_type = nullptr;
+
+ return fn_type;
+}
+
+static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
+ Error err;
+
+ if (frame_type->data.frame.locals_struct != nullptr)
+ return ErrorNone;
+
+ ZigFn *fn = frame_type->data.frame.fn;
+ assert(!fn->type_entry->data.fn.is_generic);
+
+ switch (fn->anal_state) {
+ case FnAnalStateInvalid:
+ return ErrorSemanticAnalyzeFail;
+ case FnAnalStateComplete:
+ break;
+ case FnAnalStateReady:
+ analyze_fn_body(g, fn);
+ if (fn->anal_state == FnAnalStateInvalid)
+ return ErrorSemanticAnalyzeFail;
+ break;
+ case FnAnalStateProbing: {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("cannot resolve '%s': function not fully analyzed yet",
+ buf_ptr(&frame_type->name)));
+ ir_add_analysis_trace(fn->ir_executable.analysis, msg,
+ buf_sprintf("depends on its own frame here"));
+ return ErrorSemanticAnalyzeFail;
+ }
+ }
+ analyze_fn_async(g, fn, false);
+ if (fn->anal_state == FnAnalStateInvalid)
+ return ErrorSemanticAnalyzeFail;
+
+ if (!fn_is_async(fn)) {
+ ZigType *fn_type = fn->type_entry;
+ FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
+ ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
+
+ // label (grep this): [fn_frame_struct_layout]
+ ZigList fields = {};
+
+ fields.append({"@fn_ptr", g->builtin_types.entry_usize, 0});
+ fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
+ fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
+
+ fields.append({"@result_ptr_callee", ptr_return_type, 0});
+ fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
+ fields.append({"@result", fn_type_id->return_type, 0});
+
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ ZigType *ptr_to_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ fields.append({"@ptr_stack_trace_callee", ptr_to_stack_trace_type, 0});
+ fields.append({"@ptr_stack_trace_awaiter", ptr_to_stack_trace_type, 0});
+
+ fields.append({"@stack_trace", get_stack_trace_type(g), 0});
+ fields.append({"@instruction_addresses",
+ get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
+ }
+
+ frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
+ fields.items, fields.length, target_fn_align(g->zig_target));
+ frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
+ frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
+ frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
+
+ return ErrorNone;
+ }
+
+ ZigType *fn_type = get_async_fn_type(g, fn->type_entry);
+
+ if (fn->analyzed_executable.need_err_code_spill) {
+ IrInstructionAllocaGen *alloca_gen = allocate(1);
+ alloca_gen->base.id = IrInstructionIdAllocaGen;
+ alloca_gen->base.source_node = fn->proto_node;
+ alloca_gen->base.scope = fn->child_scope;
+ alloca_gen->base.value.type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
+ alloca_gen->base.ref_count = 1;
+ alloca_gen->name_hint = "";
+ fn->alloca_gen_list.append(alloca_gen);
+ fn->err_code_spill = &alloca_gen->base;
+ }
+
+ for (size_t i = 0; i < fn->call_list.length; i += 1) {
+ IrInstructionCallGen *call = fn->call_list.at(i);
+ ZigFn *callee = call->fn_entry;
+ if (callee == nullptr) {
+ add_node_error(g, call->base.source_node,
+ buf_sprintf("function is not comptime-known; @asyncCall required"));
+ return ErrorSemanticAnalyzeFail;
+ }
+ if (callee->body_node == nullptr) {
+ continue;
+ }
+ if (callee->anal_state == FnAnalStateProbing) {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("unable to determine async function frame of '%s'", buf_ptr(&fn->symbol_name)));
+ ErrorMsg *note = add_error_note(g, msg, call->base.source_node,
+ buf_sprintf("analysis of function '%s' depends on the frame", buf_ptr(&callee->symbol_name)));
+ ir_add_analysis_trace(callee->ir_executable.analysis, note,
+ buf_sprintf("depends on the frame here"));
+ return ErrorSemanticAnalyzeFail;
+ }
+
+ analyze_fn_body(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ frame_type->data.frame.locals_struct = g->builtin_types.entry_invalid;
+ return ErrorSemanticAnalyzeFail;
+ }
+ analyze_fn_async(g, callee, true);
+ if (!fn_is_async(callee))
+ continue;
+
+ ZigType *callee_frame_type = get_fn_frame_type(g, callee);
+
+ IrInstructionAllocaGen *alloca_gen = allocate(1);
+ alloca_gen->base.id = IrInstructionIdAllocaGen;
+ alloca_gen->base.source_node = call->base.source_node;
+ alloca_gen->base.scope = call->base.scope;
+ alloca_gen->base.value.type = get_pointer_to_type(g, callee_frame_type, false);
+ alloca_gen->base.ref_count = 1;
+ alloca_gen->name_hint = "";
+ fn->alloca_gen_list.append(alloca_gen);
+ call->frame_result_loc = &alloca_gen->base;
+ }
+ FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
+ ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
+
+ // label (grep this): [fn_frame_struct_layout]
+ ZigList fields = {};
+
+ fields.append({"@fn_ptr", fn_type, 0});
+ fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
+ fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
+
+ fields.append({"@result_ptr_callee", ptr_return_type, 0});
+ fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
+ fields.append({"@result", fn_type_id->return_type, 0});
+
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ ZigType *ptr_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ fields.append({"@ptr_stack_trace_callee", ptr_stack_trace_type, 0});
+ fields.append({"@ptr_stack_trace_awaiter", ptr_stack_trace_type, 0});
+ }
+
+ for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
+ FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i];
+ AstNode *param_decl_node = get_param_decl_node(fn, arg_i);
+ Buf *param_name;
+ bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args;
+ if (param_decl_node && !is_var_args) {
+ param_name = param_decl_node->data.param_decl.name;
+ } else {
+ param_name = buf_sprintf("@arg%" ZIG_PRI_usize, arg_i);
+ }
+ ZigType *param_type = param_info->type;
+
+ fields.append({buf_ptr(param_name), param_type, 0});
+ }
+
+ if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) {
+ fields.append({"@stack_trace", get_stack_trace_type(g), 0});
+ fields.append({"@instruction_addresses",
+ get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
+ }
+
+ for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) {
+ IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i);
+ instruction->field_index = SIZE_MAX;
+ ZigType *ptr_type = instruction->base.value.type;
+ assert(ptr_type->id == ZigTypeIdPointer);
+ ZigType *child_type = ptr_type->data.pointer.child_type;
+ if (!type_has_bits(child_type))
+ continue;
+ if (instruction->base.ref_count == 0)
+ continue;
+ if (instruction->base.value.special != ConstValSpecialRuntime) {
+ if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
+ ConstValSpecialRuntime)
+ {
+ continue;
+ }
+ }
+ if ((err = type_resolve(g, child_type, ResolveStatusSizeKnown))) {
+ return err;
+ }
+ const char *name;
+ if (*instruction->name_hint == 0) {
+ name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i));
+ } else {
+ name = buf_ptr(buf_sprintf("%s.%" ZIG_PRI_usize, instruction->name_hint, alloca_i));
+ }
+ instruction->field_index = fields.length;
+
+ fields.append({name, child_type, instruction->align});
+ }
+
+
+ frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
+ fields.items, fields.length, target_fn_align(g->zig_target));
+ frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
+ frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
+ frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
+ return ErrorNone;
+}
+
Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
if (type_is_invalid(ty))
return ErrorSemanticAnalyzeFail;
@@ -5056,6 +5418,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
return resolve_enum_zero_bits(g, ty);
} else if (ty->id == ZigTypeIdUnion) {
return resolve_union_alignment(g, ty);
+ } else if (ty->id == ZigTypeIdFnFrame) {
+ return resolve_async_frame(g, ty);
}
return ErrorNone;
case ResolveStatusSizeKnown:
@@ -5065,6 +5429,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
return resolve_enum_zero_bits(g, ty);
} else if (ty->id == ZigTypeIdUnion) {
return resolve_union_type(g, ty);
+ } else if (ty->id == ZigTypeIdFnFrame) {
+ return resolve_async_frame(g, ty);
}
return ErrorNone;
case ResolveStatusLLVMFwdDecl:
@@ -5259,6 +5625,10 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
return false;
}
return true;
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO");
case ZigTypeIdUndefined:
zig_panic("TODO");
case ZigTypeIdNull:
@@ -5279,7 +5649,6 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
- case ZigTypeIdPromise:
zig_unreachable();
}
zig_unreachable();
@@ -5612,8 +5981,14 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
buf_appendf(buf, "(args value)");
return;
}
- case ZigTypeIdPromise:
- zig_unreachable();
+ case ZigTypeIdFnFrame:
+ buf_appendf(buf, "(TODO: async function frame value)");
+ return;
+
+ case ZigTypeIdAnyFrame:
+ buf_appendf(buf, "(TODO: anyframe value)");
+ return;
+
}
zig_unreachable();
}
@@ -5627,6 +6002,15 @@ ZigType *make_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
entry->llvm_type = LLVMIntType(size_in_bits);
entry->abi_size = LLVMABISizeOfType(g->target_data_ref, entry->llvm_type);
entry->abi_align = LLVMABIAlignmentOfType(g->target_data_ref, entry->llvm_type);
+
+ if (size_in_bits >= 128) {
+ // Override the incorrect alignment reported by LLVM. Clang does this as well.
+ // On x86_64 there are some instructions like CMPXCHG16B which require this.
+ // On all targets, integers 128 bits and above have ABI alignment of 16.
+ // See: https://github.com/ziglang/zig/issues/2987
+ assert(entry->abi_align == 8); // if this trips we can remove the workaround
+ entry->abi_align = 16;
+ }
}
const char u_or_i = is_signed ? 'i' : 'u';
@@ -5660,7 +6044,8 @@ uint32_t type_id_hash(TypeId x) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type);
@@ -5702,7 +6087,6 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdOptional:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdUnion:
@@ -5710,6 +6094,8 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return a.data.error_union.err_set_type == b.data.error_union.err_set_type &&
@@ -5875,7 +6261,8 @@ static const ZigTypeId all_type_ids[] = {
ZigTypeIdBoundFn,
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
- ZigTypeIdPromise,
+ ZigTypeIdFnFrame,
+ ZigTypeIdAnyFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -5939,12 +6326,14 @@ size_t type_id_index(ZigType *entry) {
return 20;
case ZigTypeIdOpaque:
return 21;
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
return 22;
- case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return 23;
- case ZigTypeIdEnumLiteral:
+ case ZigTypeIdVector:
return 24;
+ case ZigTypeIdEnumLiteral:
+ return 25;
}
zig_unreachable();
}
@@ -5999,10 +6388,12 @@ const char *type_id_name(ZigTypeId id) {
return "ArgTuple";
case ZigTypeIdOpaque:
return "Opaque";
- case ZigTypeIdPromise:
- return "Promise";
case ZigTypeIdVector:
return "Vector";
+ case ZigTypeIdFnFrame:
+ return "Frame";
+ case ZigTypeIdAnyFrame:
+ return "AnyFrame";
}
zig_unreachable();
}
@@ -6067,19 +6458,12 @@ bool type_is_global_error_set(ZigType *err_set_type) {
return err_set_type->data.error_set.err_count == UINT32_MAX;
}
-uint32_t get_coro_frame_align_bytes(CodeGen *g) {
- uint32_t a = g->pointer_size_bytes * 2;
- // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
- if (a < 8) a = 8;
- return a;
-}
-
bool type_can_fail(ZigType *type_entry) {
return type_entry->id == ZigTypeIdErrorUnion || type_entry->id == ZigTypeIdErrorSet;
}
bool fn_type_can_fail(FnTypeId *fn_type_id) {
- return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync;
+ return type_can_fail(fn_type_id->return_type);
}
// ErrorNone - result pointer has the type
@@ -6449,7 +6833,9 @@ static void resolve_llvm_types_slice(CodeGen *g, ZigType *type, ResolveStatus wa
type->data.structure.resolve_status = ResolveStatusLLVMFull;
}
-static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status) {
+static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status,
+ ZigType *async_frame_type)
+{
assert(struct_type->id == ZigTypeIdStruct);
assert(struct_type->data.structure.resolve_status != ResolveStatusInvalid);
assert(struct_type->data.structure.resolve_status >= ResolveStatusSizeKnown);
@@ -6486,10 +6872,9 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
size_t field_count = struct_type->data.structure.src_field_count;
- size_t gen_field_count = struct_type->data.structure.gen_field_count;
- LLVMTypeRef *element_types = allocate(gen_field_count);
+ // Every field could potentially have a generated padding field after it.
+ LLVMTypeRef *element_types = allocate(field_count * 2);
- size_t gen_field_index = 0;
bool packed = (struct_type->data.structure.layout == ContainerLayoutPacked);
size_t packed_bits_offset = 0;
size_t first_packed_bits_offset_misalign = SIZE_MAX;
@@ -6497,20 +6882,36 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
// trigger all the recursive get_llvm_type calls
for (size_t i = 0; i < field_count; i += 1) {
- TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
- ZigType *field_type = type_struct_field->type_entry;
+ TypeStructField *field = &struct_type->data.structure.fields[i];
+ ZigType *field_type = field->type_entry;
if (!type_has_bits(field_type))
continue;
(void)get_llvm_type(g, field_type);
if (struct_type->data.structure.resolve_status >= wanted_resolve_status) return;
}
- for (size_t i = 0; i < field_count; i += 1) {
- TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
- ZigType *field_type = type_struct_field->type_entry;
+ size_t gen_field_index = 0;
+ // Calculate what LLVM thinks the ABI align of the struct will be. We do this to avoid
+ // inserting padding bytes where LLVM would do it automatically.
+ size_t llvm_struct_abi_align = 0;
+ for (size_t i = 0; i < field_count; i += 1) {
+ ZigType *field_type = struct_type->data.structure.fields[i].type_entry;
if (!type_has_bits(field_type))
continue;
+ LLVMTypeRef field_llvm_type = get_llvm_type(g, field_type);
+ size_t llvm_field_abi_align = LLVMABIAlignmentOfType(g->target_data_ref, field_llvm_type);
+ llvm_struct_abi_align = max(llvm_struct_abi_align, llvm_field_abi_align);
+ }
+
+ for (size_t i = 0; i < field_count; i += 1) {
+ TypeStructField *field = &struct_type->data.structure.fields[i];
+ ZigType *field_type = field->type_entry;
+
+ if (!type_has_bits(field_type)) {
+ field->gen_index = SIZE_MAX;
+ continue;
+ }
if (packed) {
size_t field_size_in_bits = type_size_bits(g, field_type);
@@ -6537,12 +6938,61 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
packed_bits_offset = next_packed_bits_offset;
} else {
- element_types[gen_field_index] = get_llvm_type(g, field_type);
-
+ LLVMTypeRef llvm_type;
+ if (i == 0 && async_frame_type != nullptr) {
+ assert(async_frame_type->id == ZigTypeIdFnFrame);
+ assert(field_type->id == ZigTypeIdFn);
+ resolve_llvm_types_fn(g, async_frame_type->data.frame.fn);
+ llvm_type = LLVMPointerType(async_frame_type->data.frame.fn->raw_type_ref, 0);
+ } else {
+ llvm_type = get_llvm_type(g, field_type);
+ }
+ element_types[gen_field_index] = llvm_type;
+ field->gen_index = gen_field_index;
gen_field_index += 1;
+
+ // find the next non-zero-byte field for offset calculations
+ size_t next_src_field_index = i + 1;
+ for (; next_src_field_index < field_count; next_src_field_index += 1) {
+ if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry))
+ break;
+ }
+ size_t next_abi_align;
+ if (next_src_field_index == field_count) {
+ next_abi_align = struct_type->abi_align;
+ } else {
+ if (struct_type->data.structure.fields[next_src_field_index].align == 0) {
+ next_abi_align = struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align;
+ } else {
+ next_abi_align = struct_type->data.structure.fields[next_src_field_index].align;
+ }
+ }
+ size_t llvm_next_abi_align = (next_src_field_index == field_count) ?
+ llvm_struct_abi_align :
+ LLVMABIAlignmentOfType(g->target_data_ref,
+ get_llvm_type(g, struct_type->data.structure.fields[next_src_field_index].type_entry));
+
+ size_t next_offset = next_field_offset(field->offset, struct_type->abi_align,
+ field_type->abi_size, next_abi_align);
+ size_t llvm_next_offset = next_field_offset(field->offset, llvm_struct_abi_align,
+ LLVMABISizeOfType(g->target_data_ref, llvm_type), llvm_next_abi_align);
+
+ assert(next_offset >= llvm_next_offset);
+ if (next_offset > llvm_next_offset) {
+ size_t pad_bytes = next_offset - (field->offset + field_type->abi_size);
+ if (pad_bytes != 0) {
+ LLVMTypeRef pad_llvm_type = LLVMArrayType(LLVMInt8Type(), pad_bytes);
+ element_types[gen_field_index] = pad_llvm_type;
+ gen_field_index += 1;
+ }
+ }
}
debug_field_count += 1;
}
+ if (!packed) {
+ struct_type->data.structure.gen_field_count = gen_field_index;
+ }
+
if (first_packed_bits_offset_misalign != SIZE_MAX) {
size_t full_bit_count = packed_bits_offset - first_packed_bits_offset_misalign;
size_t full_abi_size = get_abi_size_bytes(full_bit_count, g->pointer_size_bytes);
@@ -6551,19 +7001,20 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
if (type_has_bits(struct_type)) {
- LLVMStructSetBody(struct_type->llvm_type, element_types, (unsigned)gen_field_count, packed);
+ LLVMStructSetBody(struct_type->llvm_type, element_types,
+ (unsigned)struct_type->data.structure.gen_field_count, packed);
}
ZigLLVMDIType **di_element_types = allocate(debug_field_count);
size_t debug_field_index = 0;
for (size_t i = 0; i < field_count; i += 1) {
- TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
- size_t gen_field_index = type_struct_field->gen_index;
+ TypeStructField *field = &struct_type->data.structure.fields[i];
+ size_t gen_field_index = field->gen_index;
if (gen_field_index == SIZE_MAX) {
continue;
}
- ZigType *field_type = type_struct_field->type_entry;
+ ZigType *field_type = field->type_entry;
// if the field is a function, actually the debug info should be a pointer.
ZigLLVMDIType *field_di_type;
@@ -6581,13 +7032,13 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
uint64_t debug_align_in_bits;
uint64_t debug_offset_in_bits;
if (packed) {
- debug_size_in_bits = type_struct_field->type_entry->size_in_bits;
- debug_align_in_bits = 8 * type_struct_field->type_entry->abi_align;
- debug_offset_in_bits = 8 * type_struct_field->offset + type_struct_field->bit_offset_in_host;
+ debug_size_in_bits = field->type_entry->size_in_bits;
+ debug_align_in_bits = 8 * field->type_entry->abi_align;
+ debug_offset_in_bits = 8 * field->offset + field->bit_offset_in_host;
} else {
debug_size_in_bits = 8 * get_store_size_bytes(field_type->size_in_bits);
debug_align_in_bits = 8 * field_type->abi_align;
- debug_offset_in_bits = 8 * type_struct_field->offset;
+ debug_offset_in_bits = 8 * field->offset;
}
unsigned line;
if (decl_node != nullptr) {
@@ -6597,7 +7048,7 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
line = 0;
}
di_element_types[debug_field_index] = ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(struct_type->llvm_di_type), buf_ptr(type_struct_field->name),
+ ZigLLVMTypeToScope(struct_type->llvm_di_type), buf_ptr(field->name),
di_file, line,
debug_size_in_bits,
debug_align_in_bits,
@@ -6838,7 +7289,7 @@ static void resolve_llvm_types_union(CodeGen *g, ZigType *union_type, ResolveSta
union_type->data.unionation.resolve_status = ResolveStatusLLVMFull;
}
-static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type) {
+static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
if (type->llvm_di_type != nullptr) return;
if (!type_has_bits(type)) {
@@ -6867,7 +7318,7 @@ static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type) {
uint64_t debug_align_in_bits = 8*type->abi_align;
type->llvm_di_type = ZigLLVMCreateDebugPointerType(g->dbuilder, elem_type->llvm_di_type,
debug_size_in_bits, debug_align_in_bits, buf_ptr(&type->name));
- assertNoError(type_resolve(g, elem_type, ResolveStatusLLVMFull));
+ assertNoError(type_resolve(g, elem_type, wanted_resolve_status));
} else {
ZigType *host_int_type = get_int_type(g, false, type->data.pointer.host_int_bytes * 8);
LLVMTypeRef host_int_llvm_type = get_llvm_type(g, host_int_type);
@@ -6993,10 +7444,17 @@ static void resolve_llvm_types_error_union(CodeGen *g, ZigType *type) {
} else {
LLVMTypeRef err_set_llvm_type = get_llvm_type(g, err_set_type);
LLVMTypeRef payload_llvm_type = get_llvm_type(g, payload_type);
- LLVMTypeRef elem_types[2];
+ LLVMTypeRef elem_types[3];
elem_types[err_union_err_index] = err_set_llvm_type;
elem_types[err_union_payload_index] = payload_llvm_type;
+
type->llvm_type = LLVMStructType(elem_types, 2, false);
+ if (LLVMABISizeOfType(g->target_data_ref, type->llvm_type) != type->abi_size) {
+ // we need to do our own padding
+ type->data.error_union.pad_llvm_type = LLVMArrayType(LLVMInt8Type(), type->data.error_union.pad_bytes);
+ elem_types[2] = type->data.error_union.pad_llvm_type;
+ type->llvm_type = LLVMStructType(elem_types, 3, false);
+ }
ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
ZigLLVMDIFile *di_file = nullptr;
@@ -7017,20 +7475,21 @@ static void resolve_llvm_types_error_union(CodeGen *g, ZigType *type) {
uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, type->llvm_type);
uint64_t debug_align_in_bits = 8*LLVMABISizeOfType(g->target_data_ref, type->llvm_type);
- ZigLLVMDIType *di_element_types[] = {
- ZigLLVMCreateDebugMemberType(g->dbuilder, ZigLLVMTypeToScope(type->llvm_di_type),
+ ZigLLVMDIType *di_element_types[2];
+ di_element_types[err_union_err_index] = ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(type->llvm_di_type),
"tag", di_file, line,
tag_debug_size_in_bits,
tag_debug_align_in_bits,
tag_offset_in_bits,
- ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, err_set_type)),
- ZigLLVMCreateDebugMemberType(g->dbuilder, ZigLLVMTypeToScope(type->llvm_di_type),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, err_set_type));
+ di_element_types[err_union_payload_index] = ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(type->llvm_di_type),
"value", di_file, line,
value_debug_size_in_bits,
value_debug_align_in_bits,
value_offset_in_bits,
- ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, payload_type)),
- };
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, payload_type));
ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
compile_unit_scope,
@@ -7067,7 +7526,7 @@ static void resolve_llvm_types_array(CodeGen *g, ZigType *type) {
debug_align_in_bits, get_llvm_di_type(g, elem_type), (int)type->data.array.len);
}
-static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
+static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
if (fn_type->llvm_di_type != nullptr) return;
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
@@ -7084,67 +7543,73 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
// +1 for maybe first argument the error return trace
// +2 for maybe arguments async allocator and error code pointer
ZigList param_di_types = {};
- param_di_types.append(get_llvm_di_type(g, fn_type_id->return_type));
ZigType *gen_return_type;
if (is_async) {
- gen_return_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false);
+ gen_return_type = g->builtin_types.entry_void;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
} else if (!type_has_bits(fn_type_id->return_type)) {
gen_return_type = g->builtin_types.entry_void;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
} else if (first_arg_return) {
+ gen_return_type = g->builtin_types.entry_void;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
ZigType *gen_type = get_pointer_to_type(g, fn_type_id->return_type, false);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
- gen_return_type = g->builtin_types.entry_void;
} else {
gen_return_type = fn_type_id->return_type;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
}
fn_type->data.fn.gen_return_type = gen_return_type;
- if (prefix_arg_error_return_trace) {
- ZigType *gen_type = get_ptr_to_stack_trace_type(g);
+ if (prefix_arg_error_return_trace && !is_async) {
+ ZigType *gen_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
}
if (is_async) {
- {
- // async allocator param
- ZigType *gen_type = fn_type_id->async_allocator_type;
+ fn_type->data.fn.gen_param_info = allocate(2);
+
+ ZigType *frame_type = get_any_frame_type(g, fn_type_id->return_type);
+ gen_param_types.append(get_llvm_type(g, frame_type));
+ param_di_types.append(get_llvm_di_type(g, frame_type));
+
+ fn_type->data.fn.gen_param_info[0].src_index = 0;
+ fn_type->data.fn.gen_param_info[0].gen_index = 0;
+ fn_type->data.fn.gen_param_info[0].type = frame_type;
+
+ gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize));
+ param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize));
+
+ fn_type->data.fn.gen_param_info[1].src_index = 1;
+ fn_type->data.fn.gen_param_info[1].gen_index = 1;
+ fn_type->data.fn.gen_param_info[1].type = g->builtin_types.entry_usize;
+ } else {
+ fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count);
+ for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
+ FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i];
+ ZigType *type_entry = src_param_info->type;
+ FnGenParamInfo *gen_param_info = &fn_type->data.fn.gen_param_info[i];
+
+ gen_param_info->src_index = i;
+ gen_param_info->gen_index = SIZE_MAX;
+
+ if (is_c_abi || !type_has_bits(type_entry))
+ continue;
+
+ ZigType *gen_type;
+ if (handle_is_ptr(type_entry)) {
+ gen_type = get_pointer_to_type(g, type_entry, true);
+ gen_param_info->is_byval = true;
+ } else {
+ gen_type = type_entry;
+ }
+ gen_param_info->gen_index = gen_param_types.length;
+ gen_param_info->type = gen_type;
gen_param_types.append(get_llvm_type(g, gen_type));
+
param_di_types.append(get_llvm_di_type(g, gen_type));
}
-
- {
- // error code pointer
- ZigType *gen_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
- gen_param_types.append(get_llvm_type(g, gen_type));
- param_di_types.append(get_llvm_di_type(g, gen_type));
- }
- }
-
- fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count);
- for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
- FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i];
- ZigType *type_entry = src_param_info->type;
- FnGenParamInfo *gen_param_info = &fn_type->data.fn.gen_param_info[i];
-
- gen_param_info->src_index = i;
- gen_param_info->gen_index = SIZE_MAX;
-
- if (is_c_abi || !type_has_bits(type_entry))
- continue;
-
- ZigType *gen_type;
- if (handle_is_ptr(type_entry)) {
- gen_type = get_pointer_to_type(g, type_entry, true);
- gen_param_info->is_byval = true;
- } else {
- gen_type = type_entry;
- }
- gen_param_info->gen_index = gen_param_types.length;
- gen_param_info->type = gen_type;
- gen_param_types.append(get_llvm_type(g, gen_type));
-
- param_di_types.append(get_llvm_di_type(g, gen_type));
}
if (is_c_abi) {
@@ -7160,6 +7625,7 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
for (size_t i = 0; i < gen_param_types.length; i += 1) {
assert(gen_param_types.items[i] != nullptr);
}
+
fn_type->data.fn.raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type),
gen_param_types.items, (unsigned int)gen_param_types.length, fn_type_id->is_var_args);
fn_type->llvm_type = LLVMPointerType(fn_type->data.fn.raw_type_ref, 0);
@@ -7169,6 +7635,40 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
LLVMABIAlignmentOfType(g->target_data_ref, fn_type->llvm_type), "");
}
+void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) {
+ Error err;
+ if (fn->raw_di_type != nullptr) return;
+
+ ZigType *fn_type = fn->type_entry;
+ if (!fn_is_async(fn)) {
+ resolve_llvm_types_fn_type(g, fn_type);
+ fn->raw_type_ref = fn_type->data.fn.raw_type_ref;
+ fn->raw_di_type = fn_type->data.fn.raw_di_type;
+ return;
+ }
+
+ ZigType *gen_return_type = g->builtin_types.entry_void;
+ ZigList param_di_types = {};
+ ZigList gen_param_types = {};
+ // first "parameter" is return value
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
+
+ ZigType *frame_type = get_fn_frame_type(g, fn);
+ ZigType *ptr_type = get_pointer_to_type(g, frame_type, false);
+ if ((err = type_resolve(g, ptr_type, ResolveStatusLLVMFwdDecl)))
+ zig_unreachable();
+ gen_param_types.append(ptr_type->llvm_type);
+ param_di_types.append(ptr_type->llvm_di_type);
+
+ // this parameter is used to pass the result pointer when await completes
+ gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize));
+ param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize));
+
+ fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type),
+ gen_param_types.items, gen_param_types.length, false);
+ fn->raw_di_type = ZigLLVMCreateSubroutineType(g->dbuilder, param_di_types.items, (int)param_di_types.length, 0);
+}
+
static void resolve_llvm_types_anyerror(CodeGen *g) {
ZigType *entry = g->builtin_types.entry_global_error_set;
entry->llvm_type = get_llvm_type(g, g->err_tag_type);
@@ -7193,6 +7693,147 @@ static void resolve_llvm_types_anyerror(CodeGen *g) {
get_llvm_di_type(g, g->err_tag_type), "");
}
+static void resolve_llvm_types_async_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) {
+ ZigType *passed_frame_type = fn_is_async(frame_type->data.frame.fn) ? frame_type : nullptr;
+ resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status, passed_frame_type);
+ frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type;
+ frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type;
+}
+
+static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, ResolveStatus wanted_resolve_status) {
+ if (any_frame_type->llvm_di_type != nullptr) return;
+
+ Buf *name = buf_sprintf("(%s header)", buf_ptr(&any_frame_type->name));
+ LLVMTypeRef frame_header_type = LLVMStructCreateNamed(LLVMGetGlobalContext(), buf_ptr(name));
+ any_frame_type->llvm_type = LLVMPointerType(frame_header_type, 0);
+
+ unsigned dwarf_kind = ZigLLVMTag_DW_structure_type();
+ ZigLLVMDIFile *di_file = nullptr;
+ ZigLLVMDIScope *di_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
+ unsigned line = 0;
+ ZigLLVMDIType *frame_header_di_type = ZigLLVMCreateReplaceableCompositeType(g->dbuilder,
+ dwarf_kind, buf_ptr(name), di_scope, di_file, line);
+ any_frame_type->llvm_di_type = ZigLLVMCreateDebugPointerType(g->dbuilder, frame_header_di_type,
+ 8*g->pointer_size_bytes, 8*g->builtin_types.entry_usize->abi_align, buf_ptr(&any_frame_type->name));
+
+ LLVMTypeRef llvm_void = LLVMVoidType();
+ LLVMTypeRef arg_types[] = {any_frame_type->llvm_type, g->builtin_types.entry_usize->llvm_type};
+ LLVMTypeRef fn_type = LLVMFunctionType(llvm_void, arg_types, 2, false);
+ LLVMTypeRef usize_type_ref = get_llvm_type(g, g->builtin_types.entry_usize);
+ ZigLLVMDIType *usize_di_type = get_llvm_di_type(g, g->builtin_types.entry_usize);
+ ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
+
+ ZigType *result_type = any_frame_type->data.any_frame.result_type;
+ ZigType *ptr_result_type = (result_type == nullptr) ? nullptr : get_pointer_to_type(g, result_type, false);
+ LLVMTypeRef ptr_fn_llvm_type = LLVMPointerType(fn_type, 0);
+ if (result_type == nullptr) {
+ g->anyframe_fn_type = ptr_fn_llvm_type;
+ }
+
+ ZigList field_types = {};
+ ZigList di_element_types = {};
+
+ // label (grep this): [fn_frame_struct_layout]
+ field_types.append(ptr_fn_llvm_type); // fn_ptr
+ field_types.append(usize_type_ref); // resume_index
+ field_types.append(usize_type_ref); // awaiter
+
+ bool have_result_type = result_type != nullptr && type_has_bits(result_type);
+ if (have_result_type) {
+ field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_callee
+ field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_awaiter
+ field_types.append(get_llvm_type(g, result_type)); // result
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_callee
+ field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_awaiter
+ }
+ }
+ LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false);
+
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "resume_index",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+
+ if (have_result_type) {
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_callee",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)));
+
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_callee",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
+ }
+ };
+
+ ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
+ compile_unit_scope, buf_ptr(name),
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
+ ZigLLVM_DIFlags_Zero,
+ nullptr, di_element_types.items, di_element_types.length, 0, nullptr, "");
+
+ ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
+}
+
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
assert(type->id == ZigTypeIdOpaque || type_is_resolved(type, ResolveStatusSizeKnown));
assert(wanted_resolve_status > ResolveStatusSizeKnown);
@@ -7218,20 +7859,13 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
if (type->data.structure.is_slice)
return resolve_llvm_types_slice(g, type, wanted_resolve_status);
else
- return resolve_llvm_types_struct(g, type, wanted_resolve_status);
+ return resolve_llvm_types_struct(g, type, wanted_resolve_status, nullptr);
case ZigTypeIdEnum:
return resolve_llvm_types_enum(g, type);
case ZigTypeIdUnion:
return resolve_llvm_types_union(g, type, wanted_resolve_status);
case ZigTypeIdPointer:
- return resolve_llvm_types_pointer(g, type);
- case ZigTypeIdPromise: {
- if (type->llvm_di_type != nullptr) return;
- ZigType *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false);
- type->llvm_type = get_llvm_type(g, u8_ptr_type);
- type->llvm_di_type = get_llvm_di_type(g, u8_ptr_type);
- return;
- }
+ return resolve_llvm_types_pointer(g, type, wanted_resolve_status);
case ZigTypeIdInt:
return resolve_llvm_types_integer(g, type);
case ZigTypeIdOptional:
@@ -7241,7 +7875,7 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
case ZigTypeIdArray:
return resolve_llvm_types_array(g, type);
case ZigTypeIdFn:
- return resolve_llvm_types_fn(g, type);
+ return resolve_llvm_types_fn_type(g, type);
case ZigTypeIdErrorSet: {
if (type->llvm_di_type != nullptr) return;
@@ -7260,14 +7894,18 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
type->abi_align, get_llvm_di_type(g, type->data.vector.elem_type), type->data.vector.len);
return;
}
+ case ZigTypeIdFnFrame:
+ return resolve_llvm_types_async_frame(g, type, wanted_resolve_status);
+ case ZigTypeIdAnyFrame:
+ return resolve_llvm_types_any_frame(g, type, wanted_resolve_status);
}
zig_unreachable();
}
LLVMTypeRef get_llvm_type(CodeGen *g, ZigType *type) {
assertNoError(type_resolve(g, type, ResolveStatusLLVMFull));
- assert(type->abi_size == 0 || type->abi_size == LLVMABISizeOfType(g->target_data_ref, type->llvm_type));
- assert(type->abi_align == 0 || type->abi_align == LLVMABIAlignmentOfType(g->target_data_ref, type->llvm_type));
+ assert(type->abi_size == 0 || type->abi_size >= LLVMABISizeOfType(g->target_data_ref, type->llvm_type));
+ assert(type->abi_align == 0 || type->abi_align >= LLVMABIAlignmentOfType(g->target_data_ref, type->llvm_type));
return type->llvm_type;
}
diff --git a/src/analyze.hpp b/src/analyze.hpp
index b9e9f2df7d..5752c74751 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -11,11 +11,12 @@
#include "all_types.hpp"
void semantic_analyze(CodeGen *g);
-ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg);
+ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg);
ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg);
-ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg);
+ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg);
void emit_error_notes_for_ref_stack(CodeGen *g, ErrorMsg *msg);
ZigType *new_type_table_entry(ZigTypeId id);
+ZigType *get_fn_frame_type(CodeGen *g, ZigFn *fn);
ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const);
ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const,
bool is_volatile, PtrLen ptr_len,
@@ -37,11 +38,8 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x);
ZigType *get_error_union_type(CodeGen *g, ZigType *err_set_type, ZigType *payload_type);
ZigType *get_bound_fn_type(CodeGen *g, ZigFn *fn_entry);
ZigType *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const char *full_name, Buf *bare_name);
-ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[],
- ZigType *field_types[], size_t field_count);
-ZigType *get_promise_type(CodeGen *g, ZigType *result_type);
-ZigType *get_promise_frame_type(CodeGen *g, ZigType *return_type);
ZigType *get_test_fn_type(CodeGen *g);
+ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type);
bool handle_is_ptr(ZigType *type_entry);
bool type_has_bits(ZigType *type_entry);
@@ -106,7 +104,6 @@ void eval_min_max_value(CodeGen *g, ZigType *type_entry, ConstExprValue *const_v
void eval_min_max_value_int(CodeGen *g, ZigType *int_type, BigInt *bigint, bool is_max);
void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val);
-void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node);
ScopeBlock *create_block_scope(CodeGen *g, AstNode *node, Scope *parent);
ScopeDefer *create_defer_scope(CodeGen *g, AstNode *node, Scope *parent);
@@ -117,7 +114,6 @@ ScopeLoop *create_loop_scope(CodeGen *g, AstNode *node, Scope *parent);
ScopeSuspend *create_suspend_scope(CodeGen *g, AstNode *node, Scope *parent);
ScopeFnDef *create_fndef_scope(CodeGen *g, AstNode *node, Scope *parent, ZigFn *fn_entry);
Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent);
-Scope *create_coro_prelude_scope(CodeGen *g, AstNode *node, Scope *parent);
Scope *create_runtime_scope(CodeGen *g, AstNode *node, Scope *parent, IrInstruction *is_comptime);
void init_const_str_lit(CodeGen *g, ConstExprValue *const_val, Buf *str);
@@ -199,12 +195,11 @@ void add_var_export(CodeGen *g, ZigVar *fn_table_entry, Buf *symbol_name, Global
ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name);
-ZigType *get_ptr_to_stack_trace_type(CodeGen *g);
+ZigType *get_stack_trace_type(CodeGen *g);
bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *source_node);
ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry);
-uint32_t get_coro_frame_align_bytes(CodeGen *g);
bool fn_type_can_fail(FnTypeId *fn_type_id);
bool type_can_fail(ZigType *type_entry);
bool fn_eval_cacheable(Scope *scope, ZigType *return_type);
@@ -251,4 +246,7 @@ void src_assert(bool ok, AstNode *source_node);
bool is_container(ZigType *type_entry);
ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *type_entry, Buf *type_name);
+void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn);
+bool fn_is_async(ZigFn *fn);
+
#endif
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index af134d29b5..334dc37b59 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -249,18 +249,16 @@ static const char *node_type_str(NodeType node_type) {
return "IfOptional";
case NodeTypeErrorSetDecl:
return "ErrorSetDecl";
- case NodeTypeCancel:
- return "Cancel";
case NodeTypeResume:
return "Resume";
case NodeTypeAwaitExpr:
return "AwaitExpr";
case NodeTypeSuspend:
return "Suspend";
- case NodeTypePromiseType:
- return "PromiseType";
case NodeTypePointerType:
return "PointerType";
+ case NodeTypeAnyFrameType:
+ return "AnyFrameType";
case NodeTypeEnumLiteral:
return "EnumLiteral";
}
@@ -699,13 +697,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "@");
}
if (node->data.fn_call_expr.is_async) {
- fprintf(ar->f, "async");
- if (node->data.fn_call_expr.async_allocator != nullptr) {
- fprintf(ar->f, "<");
- render_node_extra(ar, node->data.fn_call_expr.async_allocator, true);
- fprintf(ar->f, ">");
- }
- fprintf(ar->f, " ");
+ fprintf(ar->f, "async ");
}
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType);
@@ -862,15 +854,14 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
render_node_ungrouped(ar, node->data.inferred_array_type.child_type);
break;
}
- case NodeTypePromiseType:
- {
- fprintf(ar->f, "promise");
- if (node->data.promise_type.payload_type != nullptr) {
- fprintf(ar->f, "->");
- render_node_grouped(ar, node->data.promise_type.payload_type);
- }
- break;
+ case NodeTypeAnyFrameType: {
+ fprintf(ar->f, "anyframe");
+ if (node->data.anyframe_type.payload_type != nullptr) {
+ fprintf(ar->f, "->");
+ render_node_grouped(ar, node->data.anyframe_type.payload_type);
}
+ break;
+ }
case NodeTypeErrorType:
fprintf(ar->f, "anyerror");
break;
@@ -1143,12 +1134,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "}");
break;
}
- case NodeTypeCancel:
- {
- fprintf(ar->f, "cancel ");
- render_node_grouped(ar, node->data.cancel_expr.expr);
- break;
- }
case NodeTypeResume:
{
fprintf(ar->f, "resume ");
@@ -1163,9 +1148,11 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
}
case NodeTypeSuspend:
{
- fprintf(ar->f, "suspend");
if (node->data.suspend.block != nullptr) {
+ fprintf(ar->f, "suspend ");
render_node_grouped(ar, node->data.suspend.block);
+ } else {
+ fprintf(ar->f, "suspend\n");
}
break;
}
@@ -1191,3 +1178,9 @@ void ast_render(FILE *f, AstNode *node, int indent_size) {
render_node_grouped(&ar, node);
}
+
+void AstNode::src() {
+ fprintf(stderr, "%s:%" ZIG_PRI_usize ":%" ZIG_PRI_usize "\n",
+ buf_ptr(this->owner->data.structure.root_struct->path),
+ this->line + 1, this->column + 1);
+}
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 49aeb8fb87..d758d34192 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -24,6 +24,12 @@
#include
#include
+enum ResumeId {
+ ResumeIdManual,
+ ResumeIdReturn,
+ ResumeIdCall,
+};
+
static void init_darwin_native(CodeGen *g) {
char *osx_target = getenv("MACOSX_DEPLOYMENT_TARGET");
char *ios_target = getenv("IPHONEOS_DEPLOYMENT_TARGET");
@@ -297,12 +303,42 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) {
zig_unreachable();
}
+// label (grep this): [fn_frame_struct_layout]
+static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) {
+ // [0] *ReturnType (callee's)
+ // [1] *ReturnType (awaiter's)
+ // [2] ReturnType
+ uint32_t return_field_count = type_has_bits(return_type) ? 3 : 0;
+ return frame_ret_start + return_field_count;
+}
+
+// label (grep this): [fn_frame_struct_layout]
+static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) {
+ bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type);
+ // [0] *StackTrace (callee's)
+ // [1] *StackTrace (awaiter's)
+ uint32_t trace_field_count = have_stack_trace ? 2 : 0;
+ return frame_index_trace_arg(g, return_type) + trace_field_count;
+}
+
+// label (grep this): [fn_frame_struct_layout]
+static uint32_t frame_index_trace_stack(CodeGen *g, FnTypeId *fn_type_id) {
+ uint32_t result = frame_index_arg(g, fn_type_id->return_type);
+ for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
+ if (type_has_bits(fn_type_id->param_info->type)) {
+ result += 1;
+ }
+ }
+ return result;
+}
+
+
static uint32_t get_err_ret_trace_arg_index(CodeGen *g, ZigFn *fn_table_entry) {
if (!g->have_err_ret_tracing) {
return UINT32_MAX;
}
- if (fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
- return 0;
+ if (fn_is_async(fn_table_entry)) {
+ return UINT32_MAX;
}
ZigType *fn_type = fn_table_entry->type_entry;
if (!fn_type_can_fail(&fn_type->data.fn.fn_type_id)) {
@@ -343,27 +379,28 @@ static bool cc_want_sret_attr(CallingConvention cc) {
zig_unreachable();
}
-static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
- if (fn_table_entry->llvm_value)
- return fn_table_entry->llvm_value;
+static bool codegen_have_frame_pointer(CodeGen *g) {
+ return g->build_mode == BuildModeDebug;
+}
- Buf *unmangled_name = &fn_table_entry->symbol_name;
+static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) {
+ Buf *unmangled_name = &fn->symbol_name;
Buf *symbol_name;
GlobalLinkageId linkage;
- if (fn_table_entry->body_node == nullptr) {
+ if (fn->body_node == nullptr) {
symbol_name = unmangled_name;
linkage = GlobalLinkageIdStrong;
- } else if (fn_table_entry->export_list.length == 0) {
+ } else if (fn->export_list.length == 0) {
symbol_name = get_mangled_name(g, unmangled_name, false);
linkage = GlobalLinkageIdInternal;
} else {
- GlobalExport *fn_export = &fn_table_entry->export_list.items[0];
+ GlobalExport *fn_export = &fn->export_list.items[0];
symbol_name = &fn_export->name;
linkage = fn_export->linkage;
}
bool external_linkage = linkage != GlobalLinkageIdInternal;
- CallingConvention cc = fn_table_entry->type_entry->data.fn.fn_type_id.cc;
+ CallingConvention cc = fn->type_entry->data.fn.fn_type_id.cc;
if (cc == CallingConventionStdcall && external_linkage &&
g->zig_target->arch == ZigLLVM_x86)
{
@@ -371,130 +408,125 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
symbol_name = buf_sprintf("\x01_%s", buf_ptr(symbol_name));
}
+ bool is_async = fn_is_async(fn);
- ZigType *fn_type = fn_table_entry->type_entry;
+
+ ZigType *fn_type = fn->type_entry;
// Make the raw_type_ref populated
- (void)get_llvm_type(g, fn_type);
- LLVMTypeRef fn_llvm_type = fn_type->data.fn.raw_type_ref;
- if (fn_table_entry->body_node == nullptr) {
+ resolve_llvm_types_fn(g, fn);
+ LLVMTypeRef fn_llvm_type = fn->raw_type_ref;
+ LLVMValueRef llvm_fn = nullptr;
+ if (fn->body_node == nullptr) {
LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, buf_ptr(symbol_name));
if (existing_llvm_fn) {
- fn_table_entry->llvm_value = LLVMConstBitCast(existing_llvm_fn, LLVMPointerType(fn_llvm_type, 0));
- return fn_table_entry->llvm_value;
+ return LLVMConstBitCast(existing_llvm_fn, LLVMPointerType(fn_llvm_type, 0));
} else {
auto entry = g->exported_symbol_names.maybe_get(symbol_name);
if (entry == nullptr) {
- fn_table_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
+ llvm_fn = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
if (target_is_wasm(g->zig_target)) {
- assert(fn_table_entry->proto_node->type == NodeTypeFnProto);
- AstNodeFnProto *fn_proto = &fn_table_entry->proto_node->data.fn_proto;
+ assert(fn->proto_node->type == NodeTypeFnProto);
+ AstNodeFnProto *fn_proto = &fn->proto_node->data.fn_proto;
if (fn_proto-> is_extern && fn_proto->lib_name != nullptr ) {
- addLLVMFnAttrStr(fn_table_entry->llvm_value, "wasm-import-module", buf_ptr(fn_proto->lib_name));
+ addLLVMFnAttrStr(llvm_fn, "wasm-import-module", buf_ptr(fn_proto->lib_name));
}
}
} else {
assert(entry->value->id == TldIdFn);
TldFn *tld_fn = reinterpret_cast(entry->value);
// Make the raw_type_ref populated
- (void)get_llvm_type(g, tld_fn->fn_entry->type_entry);
+ resolve_llvm_types_fn(g, tld_fn->fn_entry);
tld_fn->fn_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name),
- tld_fn->fn_entry->type_entry->data.fn.raw_type_ref);
- fn_table_entry->llvm_value = LLVMConstBitCast(tld_fn->fn_entry->llvm_value,
- LLVMPointerType(fn_llvm_type, 0));
- return fn_table_entry->llvm_value;
+ tld_fn->fn_entry->raw_type_ref);
+ llvm_fn = LLVMConstBitCast(tld_fn->fn_entry->llvm_value, LLVMPointerType(fn_llvm_type, 0));
+ return llvm_fn;
}
}
} else {
- if (fn_table_entry->llvm_value == nullptr) {
- fn_table_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
+ if (llvm_fn == nullptr) {
+ llvm_fn = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
}
- for (size_t i = 1; i < fn_table_entry->export_list.length; i += 1) {
- GlobalExport *fn_export = &fn_table_entry->export_list.items[i];
- LLVMAddAlias(g->module, LLVMTypeOf(fn_table_entry->llvm_value),
- fn_table_entry->llvm_value, buf_ptr(&fn_export->name));
+ for (size_t i = 1; i < fn->export_list.length; i += 1) {
+ GlobalExport *fn_export = &fn->export_list.items[i];
+ LLVMAddAlias(g->module, LLVMTypeOf(llvm_fn), llvm_fn, buf_ptr(&fn_export->name));
}
}
- fn_table_entry->llvm_name = strdup(LLVMGetValueName(fn_table_entry->llvm_value));
- switch (fn_table_entry->fn_inline) {
+ switch (fn->fn_inline) {
case FnInlineAlways:
- addLLVMFnAttr(fn_table_entry->llvm_value, "alwaysinline");
- g->inline_fns.append(fn_table_entry);
+ addLLVMFnAttr(llvm_fn, "alwaysinline");
+ g->inline_fns.append(fn);
break;
case FnInlineNever:
- addLLVMFnAttr(fn_table_entry->llvm_value, "noinline");
+ addLLVMFnAttr(llvm_fn, "noinline");
break;
case FnInlineAuto:
- if (fn_table_entry->alignstack_value != 0) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "noinline");
+ if (fn->alignstack_value != 0) {
+ addLLVMFnAttr(llvm_fn, "noinline");
}
break;
}
if (cc == CallingConventionNaked) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "naked");
+ addLLVMFnAttr(llvm_fn, "naked");
} else {
- LLVMSetFunctionCallConv(fn_table_entry->llvm_value, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc));
- }
- if (cc == CallingConventionAsync) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "optnone");
- addLLVMFnAttr(fn_table_entry->llvm_value, "noinline");
+ LLVMSetFunctionCallConv(llvm_fn, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc));
}
- bool want_cold = fn_table_entry->is_cold || cc == CallingConventionCold;
+ bool want_cold = fn->is_cold || cc == CallingConventionCold;
if (want_cold) {
- ZigLLVMAddFunctionAttrCold(fn_table_entry->llvm_value);
+ ZigLLVMAddFunctionAttrCold(llvm_fn);
}
- LLVMSetLinkage(fn_table_entry->llvm_value, to_llvm_linkage(linkage));
+ LLVMSetLinkage(llvm_fn, to_llvm_linkage(linkage));
if (linkage == GlobalLinkageIdInternal) {
- LLVMSetUnnamedAddr(fn_table_entry->llvm_value, true);
+ LLVMSetUnnamedAddr(llvm_fn, true);
}
ZigType *return_type = fn_type->data.fn.fn_type_id.return_type;
if (return_type->id == ZigTypeIdUnreachable) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "noreturn");
+ addLLVMFnAttr(llvm_fn, "noreturn");
}
- if (fn_table_entry->body_node != nullptr) {
- maybe_export_dll(g, fn_table_entry->llvm_value, linkage);
+ if (fn->body_node != nullptr) {
+ maybe_export_dll(g, llvm_fn, linkage);
bool want_fn_safety = g->build_mode != BuildModeFastRelease &&
g->build_mode != BuildModeSmallRelease &&
- !fn_table_entry->def_scope->safety_off;
+ !fn->def_scope->safety_off;
if (want_fn_safety) {
if (g->libc_link_lib != nullptr) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "sspstrong");
- addLLVMFnAttrStr(fn_table_entry->llvm_value, "stack-protector-buffer-size", "4");
+ addLLVMFnAttr(llvm_fn, "sspstrong");
+ addLLVMFnAttrStr(llvm_fn, "stack-protector-buffer-size", "4");
}
}
- if (g->have_stack_probing && !fn_table_entry->def_scope->safety_off) {
- addLLVMFnAttrStr(fn_table_entry->llvm_value, "probe-stack", "__zig_probe_stack");
+ if (g->have_stack_probing && !fn->def_scope->safety_off) {
+ addLLVMFnAttrStr(llvm_fn, "probe-stack", "__zig_probe_stack");
}
} else {
- maybe_import_dll(g, fn_table_entry->llvm_value, linkage);
+ maybe_import_dll(g, llvm_fn, linkage);
}
- if (fn_table_entry->alignstack_value != 0) {
- addLLVMFnAttrInt(fn_table_entry->llvm_value, "alignstack", fn_table_entry->alignstack_value);
+ if (fn->alignstack_value != 0) {
+ addLLVMFnAttrInt(llvm_fn, "alignstack", fn->alignstack_value);
}
- addLLVMFnAttr(fn_table_entry->llvm_value, "nounwind");
- add_uwtable_attr(g, fn_table_entry->llvm_value);
- addLLVMFnAttr(fn_table_entry->llvm_value, "nobuiltin");
- if (g->build_mode == BuildModeDebug && fn_table_entry->fn_inline != FnInlineAlways) {
- ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim", "true");
- ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim-non-leaf", nullptr);
+ addLLVMFnAttr(llvm_fn, "nounwind");
+ add_uwtable_attr(g, llvm_fn);
+ addLLVMFnAttr(llvm_fn, "nobuiltin");
+ if (codegen_have_frame_pointer(g) && fn->fn_inline != FnInlineAlways) {
+ ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim", "true");
+ ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim-non-leaf", nullptr);
}
- if (fn_table_entry->section_name) {
- LLVMSetSection(fn_table_entry->llvm_value, buf_ptr(fn_table_entry->section_name));
+ if (fn->section_name) {
+ LLVMSetSection(llvm_fn, buf_ptr(fn->section_name));
}
- if (fn_table_entry->align_bytes > 0) {
- LLVMSetAlignment(fn_table_entry->llvm_value, (unsigned)fn_table_entry->align_bytes);
+ if (fn->align_bytes > 0) {
+ LLVMSetAlignment(llvm_fn, (unsigned)fn->align_bytes);
} else {
// We'd like to set the best alignment for the function here, but on Darwin LLVM gives
// "Cannot getTypeInfo() on a type that is unsized!" assertion failure when calling
@@ -502,36 +534,50 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
// use the ABI alignment, which is fine.
}
- unsigned init_gen_i = 0;
- if (!type_has_bits(return_type)) {
- // nothing to do
- } else if (type_is_nonnull_ptr(return_type)) {
- addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull");
- } else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) {
- // Sret pointers must not be address 0
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull");
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret");
- if (cc_want_sret_attr(cc)) {
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "noalias");
+ if (is_async) {
+ addLLVMArgAttr(llvm_fn, 0, "nonnull");
+ } else {
+ unsigned init_gen_i = 0;
+ if (!type_has_bits(return_type)) {
+ // nothing to do
+ } else if (type_is_nonnull_ptr(return_type)) {
+ addLLVMAttr(llvm_fn, 0, "nonnull");
+ } else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) {
+ // Sret pointers must not be address 0
+ addLLVMArgAttr(llvm_fn, 0, "nonnull");
+ addLLVMArgAttr(llvm_fn, 0, "sret");
+ if (cc_want_sret_attr(cc)) {
+ addLLVMArgAttr(llvm_fn, 0, "noalias");
+ }
+ init_gen_i = 1;
+ }
+
+ // set parameter attributes
+ FnWalk fn_walk = {};
+ fn_walk.id = FnWalkIdAttrs;
+ fn_walk.data.attrs.fn = fn;
+ fn_walk.data.attrs.llvm_fn = llvm_fn;
+ fn_walk.data.attrs.gen_i = init_gen_i;
+ walk_function_params(g, fn_type, &fn_walk);
+
+ uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn);
+ if (err_ret_trace_arg_index != UINT32_MAX) {
+ // Error return trace memory is in the stack, which is impossible to be at address 0
+ // on any architecture.
+ addLLVMArgAttr(llvm_fn, (unsigned)err_ret_trace_arg_index, "nonnull");
}
- init_gen_i = 1;
}
- // set parameter attributes
- FnWalk fn_walk = {};
- fn_walk.id = FnWalkIdAttrs;
- fn_walk.data.attrs.fn = fn_table_entry;
- fn_walk.data.attrs.gen_i = init_gen_i;
- walk_function_params(g, fn_type, &fn_walk);
+ return llvm_fn;
+}
- uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
- if (err_ret_trace_arg_index != UINT32_MAX) {
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull");
- }
+static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn) {
+ if (fn->llvm_value)
+ return fn->llvm_value;
- return fn_table_entry->llvm_value;
+ fn->llvm_value = make_fn_llvm_value(g, fn);
+ fn->llvm_name = strdup(LLVMGetValueName(fn->llvm_value));
+ return fn->llvm_value;
}
static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
@@ -559,10 +605,11 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
unsigned flags = ZigLLVM_DIFlags_StaticMember;
ZigLLVMDIScope *fn_di_scope = get_di_scope(g, scope->parent);
assert(fn_di_scope != nullptr);
+ assert(fn_table_entry->raw_di_type != nullptr);
ZigLLVMDISubprogram *subprogram = ZigLLVMCreateFunction(g->dbuilder,
fn_di_scope, buf_ptr(&fn_table_entry->symbol_name), "",
import->data.structure.root_struct->di_file, line_number,
- fn_table_entry->type_entry->data.fn.raw_di_type, is_internal_linkage,
+ fn_table_entry->raw_di_type, is_internal_linkage,
is_definition, scope_line, flags, is_optimized, nullptr);
scope->di_scope = ZigLLVMSubprogramToScope(subprogram);
@@ -597,7 +644,6 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
case ScopeIdLoop:
case ScopeIdSuspend:
case ScopeIdCompTime:
- case ScopeIdCoroPrelude:
case ScopeIdRuntime:
return get_di_scope(g, scope->parent);
}
@@ -798,9 +844,8 @@ static bool ir_want_fast_math(CodeGen *g, IrInstruction *instruction) {
return false;
}
-static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
+static bool ir_want_runtime_safety_scope(CodeGen *g, Scope *scope) {
// TODO memoize
- Scope *scope = instruction->scope;
while (scope) {
if (scope->id == ScopeIdBlock) {
ScopeBlock *block_scope = (ScopeBlock *)scope;
@@ -818,6 +863,10 @@ static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
g->build_mode != BuildModeSmallRelease);
}
+static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
+ return ir_want_runtime_safety_scope(g, instruction->scope);
+}
+
static Buf *panic_msg_buf(PanicMsgId msg_id) {
switch (msg_id) {
case PanicMsgIdCount:
@@ -858,6 +907,18 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("integer part of floating point value out of bounds");
case PanicMsgIdPtrCastNull:
return buf_create_from_str("cast causes pointer to be null");
+ case PanicMsgIdBadResume:
+ return buf_create_from_str("resumed an async function which already returned");
+ case PanicMsgIdBadAwait:
+ return buf_create_from_str("async function awaited twice");
+ case PanicMsgIdBadReturn:
+ return buf_create_from_str("async function returned twice");
+ case PanicMsgIdResumedAnAwaitingFn:
+ return buf_create_from_str("awaiting function resumed");
+ case PanicMsgIdFrameTooSmall:
+ return buf_create_from_str("frame too small");
+ case PanicMsgIdResumedFnPendingAwait:
+ return buf_create_from_str("resumed an async function which can only be awaited");
}
zig_unreachable();
}
@@ -882,13 +943,16 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) {
return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(get_llvm_type(g, str_type), 0));
}
+static ZigType *ptr_to_stack_trace_type(CodeGen *g) {
+ return get_pointer_to_type(g, get_stack_trace_type(g), false);
+}
+
static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace_arg) {
assert(g->panic_fn != nullptr);
LLVMValueRef fn_val = fn_llvm_value(g, g->panic_fn);
LLVMCallConv llvm_cc = get_llvm_cc(g, g->panic_fn->type_entry->data.fn.fn_type_id.cc);
if (stack_trace_arg == nullptr) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
- stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
+ stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
LLVMValueRef args[] = {
msg_arg,
@@ -904,14 +968,18 @@ static void gen_safety_crash(CodeGen *g, PanicMsgId msg_id) {
gen_panic(g, get_panic_msg_ptr_val(g, msg_id), nullptr);
}
-static void gen_assertion(CodeGen *g, PanicMsgId msg_id, IrInstruction *source_instruction) {
- if (ir_want_runtime_safety(g, source_instruction)) {
+static void gen_assertion_scope(CodeGen *g, PanicMsgId msg_id, Scope *source_scope) {
+ if (ir_want_runtime_safety_scope(g, source_scope)) {
gen_safety_crash(g, msg_id);
} else {
LLVMBuildUnreachable(g->builder);
}
}
+static void gen_assertion(CodeGen *g, PanicMsgId msg_id, IrInstruction *source_instruction) {
+ return gen_assertion_scope(g, msg_id, source_instruction->scope);
+}
+
static LLVMValueRef get_stacksave_fn_val(CodeGen *g) {
if (g->stacksave_fn_val)
return g->stacksave_fn_val;
@@ -959,177 +1027,6 @@ static LLVMValueRef get_write_register_fn_val(CodeGen *g) {
return g->write_register_fn_val;
}
-static LLVMValueRef get_coro_destroy_fn_val(CodeGen *g) {
- if (g->coro_destroy_fn_val)
- return g->coro_destroy_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.destroy");
- g->coro_destroy_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_destroy_fn_val));
-
- return g->coro_destroy_fn_val;
-}
-
-static LLVMValueRef get_coro_id_fn_val(CodeGen *g) {
- if (g->coro_id_fn_val)
- return g->coro_id_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMInt32Type(),
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 4, false);
- Buf *name = buf_sprintf("llvm.coro.id");
- g->coro_id_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_id_fn_val));
-
- return g->coro_id_fn_val;
-}
-
-static LLVMValueRef get_coro_alloc_fn_val(CodeGen *g) {
- if (g->coro_alloc_fn_val)
- return g->coro_alloc_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.alloc");
- g->coro_alloc_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_alloc_fn_val));
-
- return g->coro_alloc_fn_val;
-}
-
-static LLVMValueRef get_coro_size_fn_val(CodeGen *g) {
- if (g->coro_size_fn_val)
- return g->coro_size_fn_val;
-
- LLVMTypeRef fn_type = LLVMFunctionType(g->builtin_types.entry_usize->llvm_type, nullptr, 0, false);
- Buf *name = buf_sprintf("llvm.coro.size.i%d", g->pointer_size_bytes * 8);
- g->coro_size_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_size_fn_val));
-
- return g->coro_size_fn_val;
-}
-
-static LLVMValueRef get_coro_begin_fn_val(CodeGen *g) {
- if (g->coro_begin_fn_val)
- return g->coro_begin_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.begin");
- g->coro_begin_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_begin_fn_val));
-
- return g->coro_begin_fn_val;
-}
-
-static LLVMValueRef get_coro_suspend_fn_val(CodeGen *g) {
- if (g->coro_suspend_fn_val)
- return g->coro_suspend_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- LLVMInt1Type(),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt8Type(), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.suspend");
- g->coro_suspend_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_suspend_fn_val));
-
- return g->coro_suspend_fn_val;
-}
-
-static LLVMValueRef get_coro_end_fn_val(CodeGen *g) {
- if (g->coro_end_fn_val)
- return g->coro_end_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMInt1Type(),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.end");
- g->coro_end_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_end_fn_val));
-
- return g->coro_end_fn_val;
-}
-
-static LLVMValueRef get_coro_free_fn_val(CodeGen *g) {
- if (g->coro_free_fn_val)
- return g->coro_free_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.free");
- g->coro_free_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_free_fn_val));
-
- return g->coro_free_fn_val;
-}
-
-static LLVMValueRef get_coro_resume_fn_val(CodeGen *g) {
- if (g->coro_resume_fn_val)
- return g->coro_resume_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.resume");
- g->coro_resume_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_resume_fn_val));
-
- return g->coro_resume_fn_val;
-}
-
-static LLVMValueRef get_coro_save_fn_val(CodeGen *g) {
- if (g->coro_save_fn_val)
- return g->coro_save_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.save");
- g->coro_save_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_save_fn_val));
-
- return g->coro_save_fn_val;
-}
-
-static LLVMValueRef get_coro_promise_fn_val(CodeGen *g) {
- if (g->coro_promise_fn_val)
- return g->coro_promise_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMInt32Type(),
- LLVMInt1Type(),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 3, false);
- Buf *name = buf_sprintf("llvm.coro.promise");
- g->coro_promise_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_promise_fn_val));
-
- return g->coro_promise_fn_val;
-}
-
static LLVMValueRef get_return_address_fn_val(CodeGen *g) {
if (g->return_address_fn_val)
return g->return_address_fn_val;
@@ -1149,7 +1046,7 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) {
return g->add_error_return_trace_addr_fn_val;
LLVMTypeRef arg_types[] = {
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
g->builtin_types.entry_usize->llvm_type,
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false);
@@ -1164,7 +1061,7 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) {
// Error return trace memory is in the stack, which is impossible to be at address 0
// on any architecture.
addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- if (g->build_mode == BuildModeDebug) {
+ if (codegen_have_frame_pointer(g)) {
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
}
@@ -1222,140 +1119,6 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) {
return fn_val;
}
-static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
- if (g->merge_err_ret_traces_fn_val)
- return g->merge_err_ret_traces_fn_val;
-
- assert(g->stack_trace_type != nullptr);
-
- LLVMTypeRef param_types[] = {
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
- };
- LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false);
-
- Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_merge_error_return_traces"), false);
- LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
- LLVMSetLinkage(fn_val, LLVMInternalLinkage);
- LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
- addLLVMFnAttr(fn_val, "nounwind");
- add_uwtable_attr(g, fn_val);
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- addLLVMArgAttr(fn_val, (unsigned)0, "noalias");
- addLLVMArgAttr(fn_val, (unsigned)0, "writeonly");
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_val, (unsigned)1, "nonnull");
- addLLVMArgAttr(fn_val, (unsigned)1, "noalias");
- addLLVMArgAttr(fn_val, (unsigned)1, "readonly");
- if (g->build_mode == BuildModeDebug) {
- ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
- ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
- }
-
- // this is above the ZigLLVMClearCurrentDebugLocation
- LLVMValueRef add_error_return_trace_addr_fn_val = get_add_error_return_trace_addr_fn(g);
-
- LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
- LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
- LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
- LLVMPositionBuilderAtEnd(g->builder, entry_block);
- ZigLLVMClearCurrentDebugLocation(g->builder);
-
- // var frame_index: usize = undefined;
- // var frames_left: usize = undefined;
- // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) {
- // frame_index = 0;
- // frames_left = src_stack_trace.index;
- // if (frames_left == 0) return;
- // } else {
- // frame_index = (src_stack_trace.index + 1) % src_stack_trace.instruction_addresses.len;
- // frames_left = src_stack_trace.instruction_addresses.len;
- // }
- // while (true) {
- // __zig_add_err_ret_trace_addr(dest_stack_trace, src_stack_trace.instruction_addresses[frame_index]);
- // frames_left -= 1;
- // if (frames_left == 0) return;
- // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len;
- // }
- LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
-
- LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index");
- LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left");
-
- LLVMValueRef dest_stack_trace_ptr = LLVMGetParam(fn_val, 0);
- LLVMValueRef src_stack_trace_ptr = LLVMGetParam(fn_val, 1);
-
- size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index;
- size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index;
- LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
- (unsigned)src_index_field_index, "");
- LLVMValueRef src_addresses_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
- (unsigned)src_addresses_field_index, "");
- ZigType *slice_type = g->stack_trace_type->data.structure.fields[1].type_entry;
- size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
- LLVMValueRef src_ptr_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)ptr_field_index, "");
- size_t len_field_index = slice_type->data.structure.fields[slice_len_index].gen_index;
- LLVMValueRef src_len_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)len_field_index, "");
- LLVMValueRef src_index_val = LLVMBuildLoad(g->builder, src_index_field_ptr, "");
- LLVMValueRef src_ptr_val = LLVMBuildLoad(g->builder, src_ptr_field_ptr, "");
- LLVMValueRef src_len_val = LLVMBuildLoad(g->builder, src_len_field_ptr, "");
- LLVMValueRef no_wrap_bit = LLVMBuildICmp(g->builder, LLVMIntULT, src_index_val, src_len_val, "");
- LLVMBasicBlockRef no_wrap_block = LLVMAppendBasicBlock(fn_val, "NoWrap");
- LLVMBasicBlockRef yes_wrap_block = LLVMAppendBasicBlock(fn_val, "YesWrap");
- LLVMBasicBlockRef loop_block = LLVMAppendBasicBlock(fn_val, "Loop");
- LLVMBuildCondBr(g->builder, no_wrap_bit, no_wrap_block, yes_wrap_block);
-
- LLVMPositionBuilderAtEnd(g->builder, no_wrap_block);
- LLVMValueRef usize_zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
- LLVMBuildStore(g->builder, usize_zero, frame_index_ptr);
- LLVMBuildStore(g->builder, src_index_val, frames_left_ptr);
- LLVMValueRef frames_left_eq_zero_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_index_val, usize_zero, "");
- LLVMBuildCondBr(g->builder, frames_left_eq_zero_bit, return_block, loop_block);
-
- LLVMPositionBuilderAtEnd(g->builder, yes_wrap_block);
- LLVMValueRef usize_one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false);
- LLVMValueRef plus_one = LLVMBuildNUWAdd(g->builder, src_index_val, usize_one, "");
- LLVMValueRef mod_len = LLVMBuildURem(g->builder, plus_one, src_len_val, "");
- LLVMBuildStore(g->builder, mod_len, frame_index_ptr);
- LLVMBuildStore(g->builder, src_len_val, frames_left_ptr);
- LLVMBuildBr(g->builder, loop_block);
-
- LLVMPositionBuilderAtEnd(g->builder, loop_block);
- LLVMValueRef ptr_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
- LLVMValueRef addr_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr_val, &ptr_index, 1, "");
- LLVMValueRef this_addr_val = LLVMBuildLoad(g->builder, addr_ptr, "");
- LLVMValueRef args[] = {dest_stack_trace_ptr, this_addr_val};
- ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
- LLVMValueRef prev_frames_left = LLVMBuildLoad(g->builder, frames_left_ptr, "");
- LLVMValueRef new_frames_left = LLVMBuildNUWSub(g->builder, prev_frames_left, usize_one, "");
- LLVMValueRef done_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, new_frames_left, usize_zero, "");
- LLVMBasicBlockRef continue_block = LLVMAppendBasicBlock(fn_val, "Continue");
- LLVMBuildCondBr(g->builder, done_bit, return_block, continue_block);
-
- LLVMPositionBuilderAtEnd(g->builder, return_block);
- LLVMBuildRetVoid(g->builder);
-
- LLVMPositionBuilderAtEnd(g->builder, continue_block);
- LLVMBuildStore(g->builder, new_frames_left, frames_left_ptr);
- LLVMValueRef prev_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
- LLVMValueRef index_plus_one = LLVMBuildNUWAdd(g->builder, prev_index, usize_one, "");
- LLVMValueRef index_mod_len = LLVMBuildURem(g->builder, index_plus_one, src_len_val, "");
- LLVMBuildStore(g->builder, index_mod_len, frame_index_ptr);
- LLVMBuildBr(g->builder, loop_block);
-
- LLVMPositionBuilderAtEnd(g->builder, prev_block);
- if (!g->strip_debug_symbols) {
- LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
- }
-
- g->merge_err_ret_traces_fn_val = fn_val;
- return fn_val;
-
-}
-
static LLVMValueRef get_return_err_fn(CodeGen *g) {
if (g->return_err_fn != nullptr)
return g->return_err_fn;
@@ -1364,7 +1127,7 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMTypeRef arg_types[] = {
// error return trace pointer
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 1, false);
@@ -1376,10 +1139,7 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
addLLVMFnAttr(fn_val, "nounwind");
add_uwtable_attr(g, fn_val);
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- if (g->build_mode == BuildModeDebug) {
+ if (codegen_have_frame_pointer(g)) {
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
}
@@ -1400,6 +1160,17 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMValueRef return_address_ptr = LLVMBuildCall(g->builder, get_return_address_fn_val(g), &zero, 1, "");
LLVMValueRef return_address = LLVMBuildPtrToInt(g->builder, return_address_ptr, usize_type_ref, "");
+ LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
+ LLVMBasicBlockRef dest_non_null_block = LLVMAppendBasicBlock(fn_val, "DestNonNull");
+
+ LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, err_ret_trace_ptr,
+ LLVMConstNull(LLVMTypeOf(err_ret_trace_ptr)), "");
+ LLVMBuildCondBr(g->builder, null_dest_bit, return_block, dest_non_null_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, return_block);
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block);
LLVMValueRef args[] = { err_ret_trace_ptr, return_address };
ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
LLVMBuildRetVoid(g->builder);
@@ -1434,7 +1205,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
LLVMTypeRef fn_type_ref;
if (g->have_err_ret_tracing) {
LLVMTypeRef arg_types[] = {
- get_llvm_type(g, g->ptr_to_stack_trace_type),
+ get_llvm_type(g, get_pointer_to_type(g, get_stack_trace_type(g), false)),
get_llvm_type(g, g->err_tag_type),
};
fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false);
@@ -1451,7 +1222,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
addLLVMFnAttr(fn_val, "nounwind");
add_uwtable_attr(g, fn_val);
- if (g->build_mode == BuildModeDebug) {
+ if (codegen_have_frame_pointer(g)) {
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
}
@@ -1543,25 +1314,10 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
return fn_val;
}
-static bool is_coro_prelude_scope(Scope *scope) {
- while (scope != nullptr) {
- if (scope->id == ScopeIdCoroPrelude) {
- return true;
- } else if (scope->id == ScopeIdFnDef) {
- break;
- }
- scope = scope->parent;
- }
- return false;
-}
-
static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) {
if (!g->have_err_ret_tracing) {
return nullptr;
}
- if (g->cur_fn->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
- return is_coro_prelude_scope(scope) ? g->cur_err_ret_trace_val_arg : g->cur_err_ret_trace_val_stack;
- }
if (g->cur_err_ret_trace_val_stack != nullptr) {
return g->cur_err_ret_trace_val_stack;
}
@@ -1574,8 +1330,7 @@ static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *sc
if (g->have_err_ret_tracing) {
LLVMValueRef err_ret_trace_val = get_cur_err_ret_trace_val(g, scope);
if (err_ret_trace_val == nullptr) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
- err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
+ err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
LLVMValueRef args[] = {
err_ret_trace_val,
@@ -1820,14 +1575,14 @@ static LLVMRealPredicate cmp_op_to_real_predicate(IrBinOp cmp_op) {
}
}
-static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_type,
+static void gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_type,
LLVMValueRef value)
{
assert(ptr_type->id == ZigTypeIdPointer);
ZigType *child_type = ptr_type->data.pointer.child_type;
if (!type_has_bits(child_type))
- return nullptr;
+ return;
if (handle_is_ptr(child_type)) {
assert(LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMPointerTypeKind);
@@ -1847,13 +1602,13 @@ static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_ty
ZigLLVMBuildMemCpy(g->builder, dest_ptr, align_bytes, src_ptr, align_bytes,
LLVMConstInt(usize->llvm_type, size_bytes, false),
ptr_type->data.pointer.is_volatile);
- return nullptr;
+ return;
}
uint32_t host_int_bytes = ptr_type->data.pointer.host_int_bytes;
if (host_int_bytes == 0) {
gen_store(g, value, ptr, ptr_type);
- return nullptr;
+ return;
}
bool big_endian = g->is_big_endian;
@@ -1883,7 +1638,7 @@ static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_ty
LLVMValueRef ored_value = LLVMBuildOr(g->builder, shifted_value, anded_containing_int, "");
gen_store(g, ored_value, ptr, ptr_type);
- return nullptr;
+ return;
}
static void gen_var_debug_decl(CodeGen *g, ZigVar *var) {
@@ -1967,7 +1722,7 @@ static bool iter_function_params_c_abi(CodeGen *g, ZigType *fn_type, FnWalk *fn_
param_info = &fn_type->data.fn.fn_type_id.param_info[src_i];
ty = param_info->type;
source_node = fn_walk->data.attrs.fn->proto_node;
- llvm_fn = fn_walk->data.attrs.fn->llvm_value;
+ llvm_fn = fn_walk->data.attrs.llvm_fn;
break;
case FnWalkIdCall: {
if (src_i >= fn_walk->data.call.inst->arg_count)
@@ -2149,10 +1904,12 @@ static bool iter_function_params_c_abi(CodeGen *g, ZigType *fn_type, FnWalk *fn_
}
case FnWalkIdInits: {
clear_debug_source_node(g);
- LLVMValueRef arg = LLVMGetParam(llvm_fn, fn_walk->data.inits.gen_i);
- LLVMTypeRef ptr_to_int_type_ref = LLVMPointerType(LLVMIntType((unsigned)ty_size * 8), 0);
- LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, var->value_ref, ptr_to_int_type_ref, "");
- gen_store_untyped(g, arg, bitcasted, var->align_bytes, false);
+ if (!fn_is_async(fn_walk->data.inits.fn)) {
+ LLVMValueRef arg = LLVMGetParam(llvm_fn, fn_walk->data.inits.gen_i);
+ LLVMTypeRef ptr_to_int_type_ref = LLVMPointerType(LLVMIntType((unsigned)ty_size * 8), 0);
+ LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, var->value_ref, ptr_to_int_type_ref, "");
+ gen_store_untyped(g, arg, bitcasted, var->align_bytes, false);
+ }
if (var->decl_node) {
gen_var_debug_decl(g, var);
}
@@ -2201,6 +1958,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
LLVMValueRef param_value = ir_llvm_value(g, param_instruction);
assert(param_value);
fn_walk->data.call.gen_param_values->append(param_value);
+ fn_walk->data.call.gen_param_types->append(param_type);
}
}
return;
@@ -2216,7 +1974,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
switch (fn_walk->id) {
case FnWalkIdAttrs: {
- LLVMValueRef llvm_fn = fn_walk->data.attrs.fn->llvm_value;
+ LLVMValueRef llvm_fn = fn_walk->data.attrs.llvm_fn;
bool is_byval = gen_info->is_byval;
FnTypeParamInfo *param_info = &fn_type->data.fn.fn_type_id.param_info[param_i];
@@ -2245,7 +2003,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
assert(variable);
assert(variable->value_ref);
- if (!handle_is_ptr(variable->var_type)) {
+ if (!handle_is_ptr(variable->var_type) && !fn_is_async(fn_walk->data.inits.fn)) {
clear_debug_source_node(g);
ZigType *fn_type = fn_table_entry->type_entry;
unsigned gen_arg_index = fn_type->data.fn.gen_param_info[variable->src_arg_index].gen_index;
@@ -2271,48 +2029,357 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
}
}
+static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
+ if (g->merge_err_ret_traces_fn_val)
+ return g->merge_err_ret_traces_fn_val;
+
+ assert(g->stack_trace_type != nullptr);
+
+ LLVMTypeRef param_types[] = {
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
+ };
+ LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false);
+
+ Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_merge_error_return_traces"), false);
+ LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
+ LLVMSetLinkage(fn_val, LLVMInternalLinkage);
+ LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
+ addLLVMFnAttr(fn_val, "nounwind");
+ add_uwtable_attr(g, fn_val);
+ addLLVMArgAttr(fn_val, (unsigned)0, "noalias");
+ addLLVMArgAttr(fn_val, (unsigned)0, "writeonly");
+
+ addLLVMArgAttr(fn_val, (unsigned)1, "noalias");
+ addLLVMArgAttr(fn_val, (unsigned)1, "readonly");
+ if (g->build_mode == BuildModeDebug) {
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
+ }
+
+ // this is above the ZigLLVMClearCurrentDebugLocation
+ LLVMValueRef add_error_return_trace_addr_fn_val = get_add_error_return_trace_addr_fn(g);
+
+ LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
+ LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
+ LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
+ LLVMPositionBuilderAtEnd(g->builder, entry_block);
+ ZigLLVMClearCurrentDebugLocation(g->builder);
+
+ // if (dest_stack_trace == null or src_stack_trace == null) return;
+ // var frame_index: usize = undefined;
+ // var frames_left: usize = undefined;
+ // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) {
+ // frame_index = 0;
+ // frames_left = src_stack_trace.index;
+ // if (frames_left == 0) return;
+ // } else {
+ // frame_index = (src_stack_trace.index + 1) % src_stack_trace.instruction_addresses.len;
+ // frames_left = src_stack_trace.instruction_addresses.len;
+ // }
+ // while (true) {
+ // __zig_add_err_ret_trace_addr(dest_stack_trace, src_stack_trace.instruction_addresses[frame_index]);
+ // frames_left -= 1;
+ // if (frames_left == 0) return;
+ // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len;
+ // }
+ LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
+ LLVMBasicBlockRef non_null_block = LLVMAppendBasicBlock(fn_val, "NonNull");
+
+ LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index");
+ LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left");
+
+ LLVMValueRef dest_stack_trace_ptr = LLVMGetParam(fn_val, 0);
+ LLVMValueRef src_stack_trace_ptr = LLVMGetParam(fn_val, 1);
+
+ LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, dest_stack_trace_ptr,
+ LLVMConstNull(LLVMTypeOf(dest_stack_trace_ptr)), "");
+ LLVMValueRef null_src_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_stack_trace_ptr,
+ LLVMConstNull(LLVMTypeOf(src_stack_trace_ptr)), "");
+ LLVMValueRef null_bit = LLVMBuildOr(g->builder, null_dest_bit, null_src_bit, "");
+ LLVMBuildCondBr(g->builder, null_bit, return_block, non_null_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, non_null_block);
+ size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index;
+ size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index;
+ LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
+ (unsigned)src_index_field_index, "");
+ LLVMValueRef src_addresses_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
+ (unsigned)src_addresses_field_index, "");
+ ZigType *slice_type = g->stack_trace_type->data.structure.fields[1].type_entry;
+ size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
+ LLVMValueRef src_ptr_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)ptr_field_index, "");
+ size_t len_field_index = slice_type->data.structure.fields[slice_len_index].gen_index;
+ LLVMValueRef src_len_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)len_field_index, "");
+ LLVMValueRef src_index_val = LLVMBuildLoad(g->builder, src_index_field_ptr, "");
+ LLVMValueRef src_ptr_val = LLVMBuildLoad(g->builder, src_ptr_field_ptr, "");
+ LLVMValueRef src_len_val = LLVMBuildLoad(g->builder, src_len_field_ptr, "");
+ LLVMValueRef no_wrap_bit = LLVMBuildICmp(g->builder, LLVMIntULT, src_index_val, src_len_val, "");
+ LLVMBasicBlockRef no_wrap_block = LLVMAppendBasicBlock(fn_val, "NoWrap");
+ LLVMBasicBlockRef yes_wrap_block = LLVMAppendBasicBlock(fn_val, "YesWrap");
+ LLVMBasicBlockRef loop_block = LLVMAppendBasicBlock(fn_val, "Loop");
+ LLVMBuildCondBr(g->builder, no_wrap_bit, no_wrap_block, yes_wrap_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, no_wrap_block);
+ LLVMValueRef usize_zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
+ LLVMBuildStore(g->builder, usize_zero, frame_index_ptr);
+ LLVMBuildStore(g->builder, src_index_val, frames_left_ptr);
+ LLVMValueRef frames_left_eq_zero_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_index_val, usize_zero, "");
+ LLVMBuildCondBr(g->builder, frames_left_eq_zero_bit, return_block, loop_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, yes_wrap_block);
+ LLVMValueRef usize_one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false);
+ LLVMValueRef plus_one = LLVMBuildNUWAdd(g->builder, src_index_val, usize_one, "");
+ LLVMValueRef mod_len = LLVMBuildURem(g->builder, plus_one, src_len_val, "");
+ LLVMBuildStore(g->builder, mod_len, frame_index_ptr);
+ LLVMBuildStore(g->builder, src_len_val, frames_left_ptr);
+ LLVMBuildBr(g->builder, loop_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, loop_block);
+ LLVMValueRef ptr_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
+ LLVMValueRef addr_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr_val, &ptr_index, 1, "");
+ LLVMValueRef this_addr_val = LLVMBuildLoad(g->builder, addr_ptr, "");
+ LLVMValueRef args[] = {dest_stack_trace_ptr, this_addr_val};
+ ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
+ LLVMValueRef prev_frames_left = LLVMBuildLoad(g->builder, frames_left_ptr, "");
+ LLVMValueRef new_frames_left = LLVMBuildNUWSub(g->builder, prev_frames_left, usize_one, "");
+ LLVMValueRef done_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, new_frames_left, usize_zero, "");
+ LLVMBasicBlockRef continue_block = LLVMAppendBasicBlock(fn_val, "Continue");
+ LLVMBuildCondBr(g->builder, done_bit, return_block, continue_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, return_block);
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, continue_block);
+ LLVMBuildStore(g->builder, new_frames_left, frames_left_ptr);
+ LLVMValueRef prev_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
+ LLVMValueRef index_plus_one = LLVMBuildNUWAdd(g->builder, prev_index, usize_one, "");
+ LLVMValueRef index_mod_len = LLVMBuildURem(g->builder, index_plus_one, src_len_val, "");
+ LLVMBuildStore(g->builder, index_mod_len, frame_index_ptr);
+ LLVMBuildBr(g->builder, loop_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, prev_block);
+ if (!g->strip_debug_symbols) {
+ LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
+ }
+
+ g->merge_err_ret_traces_fn_val = fn_val;
+ return fn_val;
+
+}
static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *executable,
IrInstructionSaveErrRetAddr *save_err_ret_addr_instruction)
{
assert(g->have_err_ret_tracing);
LLVMValueRef return_err_fn = get_return_err_fn(g);
- LLVMValueRef args[] = {
- get_cur_err_ret_trace_val(g, save_err_ret_addr_instruction->base.scope),
- };
- LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, return_err_fn, args, 1,
+ LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, save_err_ret_addr_instruction->base.scope);
+ ZigLLVMBuildCall(g->builder, return_err_fn, &my_err_trace_val, 1,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- return call_instruction;
+
+ ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
+ if (fn_is_async(g->cur_fn) && codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
+ LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ frame_index_trace_arg(g, ret_type), "");
+ LLVMBuildStore(g->builder, my_err_trace_val, trace_ptr_ptr);
+ }
+
+ return nullptr;
}
-static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) {
+static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, ResumeId resume_id, PanicMsgId msg_id,
+ LLVMBasicBlockRef end_bb)
+{
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
+ if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
+ LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, resume_id, false));
+ LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, "");
+ LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
+ gen_assertion(g, msg_id, source_instr);
+
+ LLVMPositionBuilderAtEnd(g->builder, end_bb);
+}
+
+static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr, ResumeId resume_id) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ if (fn_val == nullptr) {
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_fn_ptr_index, "");
+ fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
+ }
+ LLVMValueRef arg_val = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, resume_id, false));
+ LLVMValueRef args[] = {target_frame_ptr, arg_val};
+ return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
+}
+
+static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint);
+ size_t new_block_index = g->cur_resume_block_count;
+ g->cur_resume_block_count += 1;
+ LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
+ LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb);
+ LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
+ return resume_bb;
+}
+
+static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) {
+ LLVMSetTailCall(call_inst, true);
+}
+
+static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMValueRef ptr, LLVMValueRef val,
+ LLVMAtomicOrdering order)
+{
+ if (g->is_single_threaded) {
+ LLVMValueRef loaded = LLVMBuildLoad(g->builder, ptr, "");
+ LLVMValueRef modified;
+ switch (op) {
+ case LLVMAtomicRMWBinOpXchg:
+ modified = val;
+ break;
+ case LLVMAtomicRMWBinOpXor:
+ modified = LLVMBuildXor(g->builder, loaded, val, "");
+ break;
+ default:
+ zig_unreachable();
+ }
+ LLVMBuildStore(g->builder, modified, ptr);
+ return loaded;
+ } else {
+ return LLVMBuildAtomicRMW(g->builder, op, ptr, val, order, false);
+ }
+}
+
+static void gen_async_return(CodeGen *g, IrInstructionReturn *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+
+ ZigType *operand_type = (instruction->operand != nullptr) ? instruction->operand->value.type : nullptr;
+ bool operand_has_bits = (operand_type != nullptr) && type_has_bits(operand_type);
+ ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
+ bool ret_type_has_bits = type_has_bits(ret_type);
+
+ if (operand_has_bits && instruction->operand != nullptr) {
+ bool need_store = instruction->operand->value.special != ConstValSpecialRuntime || !handle_is_ptr(ret_type);
+ if (need_store) {
+ // It didn't get written to the result ptr. We do that now.
+ ZigType *ret_ptr_type = get_pointer_to_type(g, ret_type, true);
+ gen_assign_raw(g, g->cur_ret_ptr, ret_ptr_type, ir_llvm_value(g, instruction->operand));
+ }
+ }
+
+ // Whether we tail resume the awaiter, or do an early return, we are done and will not be resumed.
+ if (ir_want_runtime_safety(g, &instruction->base)) {
+ LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref);
+ LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
+ }
+
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
+
+ LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXor, g->cur_async_awaiter_ptr,
+ all_ones, LLVMAtomicOrderingAcquire);
+
+ LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn");
+ LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
+ LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem");
+
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2);
+
+ LLVMAddCase(switch_instr, zero, early_return_block);
+ LLVMAddCase(switch_instr, all_ones, bad_return_block);
+
+ // Something has gone horribly wrong, and this is an invalid second return.
+ LLVMPositionBuilderAtEnd(g->builder, bad_return_block);
+ gen_assertion(g, PanicMsgIdBadReturn, &instruction->base);
+
+ // There is no awaiter yet, but we're completely done.
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
+ LLVMBuildRetVoid(g->builder);
+
+ // We need to resume the caller by tail calling them,
+ // but first write through the result pointer and possibly
+ // error return trace pointer.
+ LLVMPositionBuilderAtEnd(g->builder, resume_them_block);
+
+ if (ret_type_has_bits) {
+ // If the awaiter result pointer is non-null, we need to copy the result to there.
+ LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult");
+ LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd");
+ LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, "");
+ LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr));
+ LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, "");
+ LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, copy_block);
+ LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
+ LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, "");
+ bool is_volatile = false;
+ uint32_t abi_align = get_abi_alignment(g, ret_type);
+ LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false);
+ ZigLLVMBuildMemCpy(g->builder,
+ dest_ptr_casted, abi_align,
+ src_ptr_casted, abi_align, byte_count_val, is_volatile);
+ LLVMBuildBr(g->builder, copy_end_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, copy_end_block);
+ if (codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
+ LLVMValueRef awaiter_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ frame_index_trace_arg(g, ret_type) + 1, "");
+ LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, awaiter_trace_ptr_ptr, "");
+ LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val };
+ ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ }
+ }
+
+ // Resume the caller by tail calling them.
+ ZigType *any_frame_type = get_any_frame_type(g, ret_type);
+ LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val, get_llvm_type(g, any_frame_type), "");
+ LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn);
+ set_tail_call_if_appropriate(g, call_inst);
+ LLVMBuildRetVoid(g->builder);
+}
+
+static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) {
+ if (fn_is_async(g->cur_fn)) {
+ gen_async_return(g, instruction);
+ return nullptr;
+ }
+
if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) {
- if (return_instruction->value == nullptr) {
+ if (instruction->operand == nullptr) {
LLVMBuildRetVoid(g->builder);
return nullptr;
}
assert(g->cur_ret_ptr);
- src_assert(return_instruction->value->value.special != ConstValSpecialRuntime,
- return_instruction->base.source_node);
- LLVMValueRef value = ir_llvm_value(g, return_instruction->value);
- ZigType *return_type = return_instruction->value->value.type;
+ src_assert(instruction->operand->value.special != ConstValSpecialRuntime,
+ instruction->base.source_node);
+ LLVMValueRef value = ir_llvm_value(g, instruction->operand);
+ ZigType *return_type = instruction->operand->value.type;
gen_assign_raw(g, g->cur_ret_ptr, get_pointer_to_type(g, return_type, false), value);
LLVMBuildRetVoid(g->builder);
} else if (g->cur_fn->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync &&
handle_is_ptr(g->cur_fn->type_entry->data.fn.fn_type_id.return_type))
{
- if (return_instruction->value == nullptr) {
+ if (instruction->operand == nullptr) {
LLVMValueRef by_val_value = gen_load_untyped(g, g->cur_ret_ptr, 0, false, "");
LLVMBuildRet(g->builder, by_val_value);
} else {
- LLVMValueRef value = ir_llvm_value(g, return_instruction->value);
+ LLVMValueRef value = ir_llvm_value(g, instruction->operand);
LLVMValueRef by_val_value = gen_load_untyped(g, value, 0, false, "");
LLVMBuildRet(g->builder, by_val_value);
}
- } else if (return_instruction->value == nullptr) {
+ } else if (instruction->operand == nullptr) {
LLVMBuildRetVoid(g->builder);
} else {
- LLVMValueRef value = ir_llvm_value(g, return_instruction->value);
+ LLVMValueRef value = ir_llvm_value(g, instruction->operand);
LLVMBuildRet(g->builder, value);
}
return nullptr;
@@ -3242,14 +3309,17 @@ static LLVMValueRef ir_render_bool_not(CodeGen *g, IrExecutable *executable, IrI
return LLVMBuildICmp(g->builder, LLVMIntEQ, value, zero, "");
}
-static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, IrInstructionDeclVarGen *instruction) {
- ZigVar *var = instruction->var;
-
+static void render_decl_var(CodeGen *g, ZigVar *var) {
if (!type_has_bits(var->var_type))
- return nullptr;
+ return;
- var->value_ref = ir_llvm_value(g, instruction->var_ptr);
+ var->value_ref = ir_llvm_value(g, var->ptr_instruction);
gen_var_debug_decl(g, var);
+}
+
+static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, IrInstructionDeclVarGen *instruction) {
+ instruction->var->ptr_instruction = instruction->var_ptr;
+ render_decl_var(g, instruction->var);
return nullptr;
}
@@ -3467,8 +3537,9 @@ static LLVMValueRef ir_render_var_ptr(CodeGen *g, IrExecutable *executable, IrIn
static LLVMValueRef ir_render_return_ptr(CodeGen *g, IrExecutable *executable,
IrInstructionReturnPtr *instruction)
{
- src_assert(g->cur_ret_ptr != nullptr || !type_has_bits(instruction->base.value.type),
- instruction->base.source_node);
+ if (!type_has_bits(instruction->base.value.type))
+ return nullptr;
+ src_assert(g->cur_ret_ptr != nullptr, instruction->base.source_node);
return g->cur_ret_ptr;
}
@@ -3566,26 +3637,6 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI
}
}
-static bool get_prefix_arg_err_ret_stack(CodeGen *g, FnTypeId *fn_type_id) {
- return g->have_err_ret_tracing &&
- (fn_type_id->return_type->id == ZigTypeIdErrorUnion ||
- fn_type_id->return_type->id == ZigTypeIdErrorSet ||
- fn_type_id->cc == CallingConventionAsync);
-}
-
-static size_t get_async_allocator_arg_index(CodeGen *g, FnTypeId *fn_type_id) {
- // 0 1 2 3
- // err_ret_stack allocator_ptr err_code other_args...
- return get_prefix_arg_err_ret_stack(g, fn_type_id) ? 1 : 0;
-}
-
-static size_t get_async_err_code_arg_index(CodeGen *g, FnTypeId *fn_type_id) {
- // 0 1 2 3
- // err_ret_stack allocator_ptr err_code other_args...
- return 1 + get_async_allocator_arg_index(g, fn_type_id);
-}
-
-
static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) {
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_ptr_index, "");
LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_len_index, "");
@@ -3623,16 +3674,124 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) {
LLVMAddCallSiteAttribute(call_instr, 1, sret_attr);
}
+static void render_async_spills(CodeGen *g) {
+ ZigType *fn_type = g->cur_fn->type_entry;
+ ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base);
+ uint32_t async_var_index = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type);
+ for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) {
+ ZigVar *var = g->cur_fn->variable_list.at(var_i);
+
+ if (!type_has_bits(var->var_type)) {
+ continue;
+ }
+ if (ir_get_var_is_comptime(var))
+ continue;
+ switch (type_requires_comptime(g, var->var_type)) {
+ case ReqCompTimeInvalid:
+ zig_unreachable();
+ case ReqCompTimeYes:
+ continue;
+ case ReqCompTimeNo:
+ break;
+ }
+ if (var->src_arg_index == SIZE_MAX) {
+ continue;
+ }
+
+ var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, async_var_index,
+ buf_ptr(&var->name));
+ async_var_index += 1;
+ if (var->decl_node) {
+ var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
+ buf_ptr(&var->name), import->data.structure.root_struct->di_file,
+ (unsigned)(var->decl_node->line + 1),
+ get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0);
+ gen_var_debug_decl(g, var);
+ }
+ }
+
+ ZigType *frame_type = g->cur_fn->frame_type->data.frame.locals_struct;
+
+ for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) {
+ IrInstructionAllocaGen *instruction = g->cur_fn->alloca_gen_list.at(alloca_i);
+ if (instruction->field_index == SIZE_MAX)
+ continue;
+
+ size_t gen_index = frame_type->data.structure.fields[instruction->field_index].gen_index;
+ instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, gen_index,
+ instruction->name_hint);
+ }
+}
+
+static void render_async_var_decls(CodeGen *g, Scope *scope) {
+ for (;;) {
+ switch (scope->id) {
+ case ScopeIdCImport:
+ zig_unreachable();
+ case ScopeIdFnDef:
+ return;
+ case ScopeIdVarDecl: {
+ ZigVar *var = reinterpret_cast(scope)->var;
+ if (var->ptr_instruction != nullptr) {
+ render_decl_var(g, var);
+ }
+ // fallthrough
+ }
+ case ScopeIdDecls:
+ case ScopeIdBlock:
+ case ScopeIdDefer:
+ case ScopeIdDeferExpr:
+ case ScopeIdLoop:
+ case ScopeIdSuspend:
+ case ScopeIdCompTime:
+ case ScopeIdRuntime:
+ scope = scope->parent;
+ continue;
+ }
+ }
+}
+
+static LLVMValueRef gen_frame_size(CodeGen *g, LLVMValueRef fn_val) {
+ LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type;
+ LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0);
+ LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, "");
+ LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true);
+ LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, "");
+ return LLVMBuildLoad(g->builder, prefix_ptr, "");
+}
+
+static void gen_init_stack_trace(CodeGen *g, LLVMValueRef trace_field_ptr, LLVMValueRef addrs_field_ptr) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+
+ LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, "");
+ LLVMBuildStore(g->builder, zero, index_ptr);
+
+ LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, "");
+ LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, "");
+ LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) };
+ LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, addrs_field_ptr, indices, 2, "");
+ LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr);
+
+ LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, "");
+ LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr);
+}
+
static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+
LLVMValueRef fn_val;
ZigType *fn_type;
+ bool callee_is_async;
if (instruction->fn_entry) {
fn_val = fn_llvm_value(g, instruction->fn_entry);
fn_type = instruction->fn_entry->type_entry;
+ callee_is_async = fn_is_async(instruction->fn_entry);
} else {
assert(instruction->fn_ref);
fn_val = ir_llvm_value(g, instruction->fn_ref);
fn_type = instruction->fn_ref->value.type;
+ callee_is_async = fn_type->data.fn.fn_type_id.cc == CallingConventionAsync;
}
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
@@ -3643,27 +3802,156 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
CallingConvention cc = fn_type->data.fn.fn_type_id.cc;
bool first_arg_ret = ret_has_bits && want_first_arg_sret(g, fn_type_id);
- bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, fn_type_id);
+ bool prefix_arg_err_ret_stack = codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type);
bool is_var_args = fn_type_id->is_var_args;
ZigList gen_param_values = {};
+ ZigList gen_param_types = {};
LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr;
- if (first_arg_ret) {
- gen_param_values.append(result_loc);
- }
- if (prefix_arg_err_ret_stack) {
- gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope));
- }
- if (instruction->is_async) {
- gen_param_values.append(ir_llvm_value(g, instruction->async_allocator));
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef frame_result_loc;
+ LLVMValueRef awaiter_init_val;
+ LLVMValueRef ret_ptr;
+ if (callee_is_async) {
+ if (instruction->is_async) {
+ if (instruction->new_stack == nullptr) {
+ awaiter_init_val = zero;
+ frame_result_loc = result_loc;
- LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_err_index, "");
- gen_param_values.append(err_val_ptr);
+ if (ret_has_bits) {
+ // Use the result location which is inside the frame if this is an async call.
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ }
+ } else if (cc == CallingConventionAsync) {
+ awaiter_init_val = zero;
+ LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack);
+ if (ir_want_runtime_safety(g, &instruction->base)) {
+ LLVMValueRef given_len_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_len_index, "");
+ LLVMValueRef given_frame_len = LLVMBuildLoad(g->builder, given_len_ptr, "");
+ LLVMValueRef actual_frame_len = gen_frame_size(g, fn_val);
+
+ LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckFail");
+ LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckOk");
+
+ LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntUGE, given_frame_len, actual_frame_len, "");
+ LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, fail_block);
+ gen_safety_crash(g, PanicMsgIdFrameTooSmall);
+
+ LLVMPositionBuilderAtEnd(g->builder, ok_block);
+ }
+ LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, "");
+ LLVMValueRef frame_ptr = LLVMBuildLoad(g->builder, frame_ptr_ptr, "");
+ frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr,
+ get_llvm_type(g, instruction->base.value.type), "");
+
+ if (ret_has_bits) {
+ // Use the result location provided to the @asyncCall builtin
+ ret_ptr = result_loc;
+ }
+ } else {
+ zig_unreachable();
+ }
+
+ // even if prefix_arg_err_ret_stack is true, let the async function do its own
+ // initialization.
+ } else {
+ // async function called as a normal function
+
+ frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
+ awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer
+ if (ret_has_bits) {
+ if (result_loc == nullptr) {
+ // return type is a scalar, but we still need a pointer to it. Use the async fn frame.
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ } else {
+ // Use the call instruction's result location.
+ ret_ptr = result_loc;
+ }
+
+ // Store a zero in the awaiter's result ptr to indicate we do not need a copy made.
+ LLVMValueRef awaiter_ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 1, "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr)));
+ LLVMBuildStore(g->builder, zero_ptr, awaiter_ret_ptr);
+ }
+
+ if (prefix_arg_err_ret_stack) {
+ LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ frame_index_trace_arg(g, src_return_type) + 1, "");
+ LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
+ }
+ }
+
+ assert(frame_result_loc != nullptr);
+
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_fn_ptr_index, "");
+ LLVMValueRef bitcasted_fn_val = LLVMBuildBitCast(g->builder, fn_val,
+ LLVMGetElementType(LLVMTypeOf(fn_ptr_ptr)), "");
+ LLVMBuildStore(g->builder, bitcasted_fn_val, fn_ptr_ptr);
+
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_resume_index, "");
+ LLVMBuildStore(g->builder, zero, resume_index_ptr);
+
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_awaiter_index, "");
+ LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr);
+
+ if (ret_has_bits) {
+ LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, "");
+ LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
+ }
+ } else if (instruction->is_async) {
+ // Async call of blocking function
+ if (instruction->new_stack != nullptr) {
+ zig_panic("TODO @asyncCall of non-async function");
+ }
+ frame_result_loc = result_loc;
+ awaiter_init_val = LLVMConstAllOnes(usize_type_ref);
+
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_awaiter_index, "");
+ LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr);
+
+ if (ret_has_bits) {
+ LLVMValueRef ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, "");
+ LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
+
+ if (first_arg_ret) {
+ gen_param_values.append(ret_ptr);
+ }
+ if (prefix_arg_err_ret_stack) {
+ // Set up the callee stack trace pointer pointing into the frame.
+ // Then we have to wire up the StackTrace pointers.
+ // Await is responsible for merging error return traces.
+ uint32_t trace_field_index_start = frame_index_trace_arg(g, src_return_type);
+ LLVMValueRef callee_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ trace_field_index_start, "");
+ LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ trace_field_index_start + 2, "");
+ LLVMValueRef addrs_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ trace_field_index_start + 3, "");
+
+ LLVMBuildStore(g->builder, trace_field_ptr, callee_trace_ptr_ptr);
+
+ gen_init_stack_trace(g, trace_field_ptr, addrs_field_ptr);
+
+ gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope));
+ }
+ }
+ } else {
+ if (first_arg_ret) {
+ gen_param_values.append(result_loc);
+ }
+ if (prefix_arg_err_ret_stack) {
+ gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope));
+ }
}
FnWalk fn_walk = {};
fn_walk.id = FnWalkIdCall;
fn_walk.data.call.inst = instruction;
fn_walk.data.call.is_var_args = is_var_args;
fn_walk.data.call.gen_param_values = &gen_param_values;
+ fn_walk.data.call.gen_param_types = &gen_param_types;
walk_function_params(g, fn_type, &fn_walk);
ZigLLVM_FnInline fn_inline;
@@ -3682,9 +3970,68 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMCallConv llvm_cc = get_llvm_cc(g, cc);
LLVMValueRef result;
+ if (callee_is_async) {
+ uint32_t arg_start_i = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type);
+
+ LLVMValueRef casted_frame;
+ if (instruction->new_stack != nullptr) {
+ // We need the frame type to be a pointer to a struct that includes the args
+ size_t field_count = arg_start_i + gen_param_values.length;
+ LLVMTypeRef *field_types = allocate_nonzero(field_count);
+ LLVMGetStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc)), field_types);
+ assert(LLVMCountStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc))) == arg_start_i);
+ for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
+ field_types[arg_start_i + arg_i] = LLVMTypeOf(gen_param_values.at(arg_i));
+ }
+ LLVMTypeRef frame_with_args_type = LLVMStructType(field_types, field_count, false);
+ LLVMTypeRef ptr_frame_with_args_type = LLVMPointerType(frame_with_args_type, 0);
+
+ casted_frame = LLVMBuildBitCast(g->builder, frame_result_loc, ptr_frame_with_args_type, "");
+ } else {
+ casted_frame = frame_result_loc;
+ }
+
+ for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
+ LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, casted_frame, arg_start_i + arg_i, "");
+ gen_assign_raw(g, arg_ptr, get_pointer_to_type(g, gen_param_types.at(arg_i), true),
+ gen_param_values.at(arg_i));
+ }
+
+ if (instruction->is_async) {
+ gen_resume(g, fn_val, frame_result_loc, ResumeIdCall);
+ if (instruction->new_stack != nullptr) {
+ return frame_result_loc;
+ }
+ return nullptr;
+ } else {
+ ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true);
+
+ LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume");
+
+ LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall);
+ set_tail_call_if_appropriate(g, call_inst);
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, call_bb);
+ gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr);
+ render_async_var_decls(g, instruction->base.scope);
+
+ if (!type_has_bits(src_return_type))
+ return nullptr;
+
+ if (result_loc != nullptr)
+ return get_handle_value(g, result_loc, src_return_type, ptr_result_type);
+
+ LLVMValueRef result_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ return LLVMBuildLoad(g->builder, result_ptr, "");
+ }
+ }
+
if (instruction->new_stack == nullptr) {
result = ZigLLVMBuildCall(g->builder, fn_val,
gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, "");
+ } else if (instruction->is_async) {
+ zig_panic("TODO @asyncCall of non-async function");
} else {
LLVMValueRef stacksave_fn_val = get_stacksave_fn_val(g);
LLVMValueRef stackrestore_fn_val = get_stackrestore_fn_val(g);
@@ -3697,13 +4044,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMBuildCall(g->builder, stackrestore_fn_val, &old_stack_ref, 1, "");
}
-
- if (instruction->is_async) {
- LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_payload_index, "");
- LLVMBuildStore(g->builder, result, payload_ptr);
- return result_loc;
- }
-
if (src_return_type->id == ZigTypeIdUnreachable) {
return LLVMBuildUnreachable(g->builder);
} else if (!ret_has_bits) {
@@ -4200,7 +4540,7 @@ static LLVMValueRef get_enum_tag_name_function(CodeGen *g, ZigType *enum_type) {
LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
addLLVMFnAttr(fn_val, "nounwind");
add_uwtable_attr(g, fn_val);
- if (g->build_mode == BuildModeDebug) {
+ if (codegen_have_frame_pointer(g)) {
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
}
@@ -4347,10 +4687,6 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I
{
align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment;
ptr_val = target_val;
- } else if (target_type->id == ZigTypeIdOptional &&
- target_type->data.maybe.child_type->id == ZigTypeIdPromise)
- {
- zig_panic("TODO audit this function");
} else if (target_type->id == ZigTypeIdStruct && target_type->data.structure.is_slice) {
ZigType *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index].type_entry;
align_bytes = get_ptr_align(g, slice_ptr_type);
@@ -4388,26 +4724,11 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu
{
LLVMValueRef cur_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
if (cur_err_ret_trace_val == nullptr) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
- return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
+ return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
return cur_err_ret_trace_val;
}
-static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) {
- LLVMValueRef target_handle = ir_llvm_value(g, instruction->target);
- LLVMBuildCall(g->builder, get_coro_destroy_fn_val(g), &target_handle, 1, "");
- return nullptr;
-}
-
-static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable,
- IrInstructionGetImplicitAllocator *instruction)
-{
- assert(instruction->id == ImplicitAllocatorIdArg);
- size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
- return LLVMGetParam(g->cur_fn_val, allocator_arg_index);
-}
-
static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) {
switch (atomic_order) {
case AtomicOrderUnordered: return LLVMAtomicOrderingUnordered;
@@ -4722,24 +5043,8 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable
return LLVMBuildPtrToInt(g->builder, ptr_val, g->builtin_types.entry_usize->llvm_type, "");
}
-static LLVMValueRef get_handle_fn_val(CodeGen *g) {
- if (g->coro_frame_fn_val)
- return g->coro_frame_fn_val;
-
- LLVMTypeRef fn_type = LLVMFunctionType( LLVMPointerType(LLVMInt8Type(), 0)
- , nullptr, 0, false);
- Buf *name = buf_sprintf("llvm.coro.frame");
- g->coro_frame_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_frame_fn_val));
-
- return g->coro_frame_fn_val;
-}
-
-static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable,
- IrInstructionHandle *instruction)
-{
- LLVMValueRef zero = LLVMConstNull(get_llvm_type(g, g->builtin_types.entry_promise));
- return LLVMBuildCall(g->builder, get_handle_fn_val(g), &zero, 0, "");
+static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable, IrInstructionFrameHandle *instruction) {
+ return g->cur_frame_ptr;
}
static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) {
@@ -5005,248 +5310,6 @@ static LLVMValueRef ir_render_panic(CodeGen *g, IrExecutable *executable, IrInst
return nullptr;
}
-static LLVMValueRef ir_render_coro_id(CodeGen *g, IrExecutable *executable, IrInstructionCoroId *instruction) {
- LLVMValueRef promise_ptr = ir_llvm_value(g, instruction->promise_ptr);
- LLVMValueRef align_val = LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false);
- LLVMValueRef null = LLVMConstIntToPtr(LLVMConstNull(g->builtin_types.entry_usize->llvm_type),
- LLVMPointerType(LLVMInt8Type(), 0));
- LLVMValueRef params[] = {
- align_val,
- promise_ptr,
- null,
- null,
- };
- return LLVMBuildCall(g->builder, get_coro_id_fn_val(g), params, 4, "");
-}
-
-static LLVMValueRef ir_render_coro_alloc(CodeGen *g, IrExecutable *executable, IrInstructionCoroAlloc *instruction) {
- LLVMValueRef token = ir_llvm_value(g, instruction->coro_id);
- return LLVMBuildCall(g->builder, get_coro_alloc_fn_val(g), &token, 1, "");
-}
-
-static LLVMValueRef ir_render_coro_size(CodeGen *g, IrExecutable *executable, IrInstructionCoroSize *instruction) {
- return LLVMBuildCall(g->builder, get_coro_size_fn_val(g), nullptr, 0, "");
-}
-
-static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, IrInstructionCoroBegin *instruction) {
- LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id);
- LLVMValueRef coro_mem_ptr = ir_llvm_value(g, instruction->coro_mem_ptr);
- LLVMValueRef params[] = {
- coro_id,
- coro_mem_ptr,
- };
- return LLVMBuildCall(g->builder, get_coro_begin_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executable,
- IrInstructionCoroAllocFail *instruction)
-{
- size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
- LLVMValueRef err_code_ptr_val = LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index);
- LLVMValueRef err_code = ir_llvm_value(g, instruction->err_val);
- LLVMBuildStore(g->builder, err_code, err_code_ptr_val);
-
- LLVMValueRef return_value;
- if (ir_want_runtime_safety(g, &instruction->base)) {
- return_value = LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0));
- } else {
- return_value = LLVMGetUndef(LLVMPointerType(LLVMInt8Type(), 0));
- }
- LLVMBuildRet(g->builder, return_value);
- return nullptr;
-}
-
-static LLVMValueRef ir_render_coro_suspend(CodeGen *g, IrExecutable *executable, IrInstructionCoroSuspend *instruction) {
- LLVMValueRef save_point;
- if (instruction->save_point == nullptr) {
- save_point = LLVMConstNull(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()));
- } else {
- save_point = ir_llvm_value(g, instruction->save_point);
- }
- LLVMValueRef is_final = ir_llvm_value(g, instruction->is_final);
- LLVMValueRef params[] = {
- save_point,
- is_final,
- };
- return LLVMBuildCall(g->builder, get_coro_suspend_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_end(CodeGen *g, IrExecutable *executable, IrInstructionCoroEnd *instruction) {
- LLVMValueRef params[] = {
- LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)),
- LLVMConstNull(LLVMInt1Type()),
- };
- return LLVMBuildCall(g->builder, get_coro_end_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_free(CodeGen *g, IrExecutable *executable, IrInstructionCoroFree *instruction) {
- LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id);
- LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle);
- LLVMValueRef params[] = {
- coro_id,
- coro_handle,
- };
- return LLVMBuildCall(g->builder, get_coro_free_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) {
- LLVMValueRef awaiter_handle = ir_llvm_value(g, instruction->awaiter_handle);
- return LLVMBuildCall(g->builder, get_coro_resume_fn_val(g), &awaiter_handle, 1, "");
-}
-
-static LLVMValueRef ir_render_coro_save(CodeGen *g, IrExecutable *executable, IrInstructionCoroSave *instruction) {
- LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle);
- return LLVMBuildCall(g->builder, get_coro_save_fn_val(g), &coro_handle, 1, "");
-}
-
-static LLVMValueRef ir_render_coro_promise(CodeGen *g, IrExecutable *executable, IrInstructionCoroPromise *instruction) {
- LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle);
- LLVMValueRef params[] = {
- coro_handle,
- LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false),
- LLVMConstNull(LLVMInt1Type()),
- };
- LLVMValueRef uncasted_result = LLVMBuildCall(g->builder, get_coro_promise_fn_val(g), params, 3, "");
- return LLVMBuildBitCast(g->builder, uncasted_result, get_llvm_type(g, instruction->base.value.type), "");
-}
-
-static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_fn_type_ref, ZigType *fn_type) {
- if (g->coro_alloc_helper_fn_val != nullptr)
- return g->coro_alloc_helper_fn_val;
-
- assert(fn_type->id == ZigTypeIdFn);
-
- ZigType *ptr_to_err_code_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
-
- LLVMTypeRef alloc_raw_fn_type_ref = LLVMGetElementType(alloc_fn_type_ref);
- LLVMTypeRef *alloc_fn_arg_types = allocate(LLVMCountParamTypes(alloc_raw_fn_type_ref));
- LLVMGetParamTypes(alloc_raw_fn_type_ref, alloc_fn_arg_types);
-
- ZigList arg_types = {};
- arg_types.append(alloc_fn_type_ref);
- if (g->have_err_ret_tracing) {
- arg_types.append(alloc_fn_arg_types[1]);
- }
- arg_types.append(alloc_fn_arg_types[g->have_err_ret_tracing ? 2 : 1]);
- arg_types.append(get_llvm_type(g, ptr_to_err_code_type));
- arg_types.append(g->builtin_types.entry_usize->llvm_type);
-
- LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0),
- arg_types.items, arg_types.length, false);
-
- Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_coro_alloc_helper"), false);
- LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
- LLVMSetLinkage(fn_val, LLVMInternalLinkage);
- LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
- addLLVMFnAttr(fn_val, "nounwind");
- addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- addLLVMArgAttr(fn_val, (unsigned)1, "nonnull");
-
- LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
- LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
- ZigFn *prev_cur_fn = g->cur_fn;
- LLVMValueRef prev_cur_fn_val = g->cur_fn_val;
-
- LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
- LLVMPositionBuilderAtEnd(g->builder, entry_block);
- ZigLLVMClearCurrentDebugLocation(g->builder);
- g->cur_fn = nullptr;
- g->cur_fn_val = fn_val;
-
- LLVMValueRef sret_ptr = LLVMBuildAlloca(g->builder, LLVMGetElementType(alloc_fn_arg_types[0]), "");
-
- size_t next_arg = 0;
- LLVMValueRef realloc_fn_val = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
-
- LLVMValueRef stack_trace_val;
- if (g->have_err_ret_tracing) {
- stack_trace_val = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- }
-
- LLVMValueRef allocator_val = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- LLVMValueRef err_code_ptr = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- LLVMValueRef coro_size = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- LLVMValueRef alignment_val = LLVMConstInt(g->builtin_types.entry_u29->llvm_type,
- get_coro_frame_align_bytes(g), false);
-
- ConstExprValue *zero_array = create_const_str_lit(g, buf_create_from_str(""));
- ConstExprValue *undef_slice_zero = create_const_slice(g, zero_array, 0, 0, false);
- render_const_val(g, undef_slice_zero, "");
- render_const_val_global(g, undef_slice_zero, "");
-
- ZigList args = {};
- args.append(sret_ptr);
- if (g->have_err_ret_tracing) {
- args.append(stack_trace_val);
- }
- args.append(allocator_val);
- args.append(undef_slice_zero->global_refs->llvm_global);
- args.append(LLVMGetUndef(g->builtin_types.entry_u29->llvm_type));
- args.append(coro_size);
- args.append(alignment_val);
- LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, realloc_fn_val, args.items, args.length,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- set_call_instr_sret(g, call_instruction);
- LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_err_index, "");
- LLVMValueRef err_val = LLVMBuildLoad(g->builder, err_val_ptr, "");
- LLVMBuildStore(g->builder, err_val, err_code_ptr);
- LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, err_val, LLVMConstNull(LLVMTypeOf(err_val)), "");
- LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(fn_val, "AllocOk");
- LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(fn_val, "AllocFail");
- LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
-
- LLVMPositionBuilderAtEnd(g->builder, ok_block);
- LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_payload_index, "");
- ZigType *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false,
- PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0, false);
- ZigType *slice_type = get_slice_type(g, u8_ptr_type);
- size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
- LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, payload_ptr, ptr_field_index, "");
- LLVMValueRef ptr_val = LLVMBuildLoad(g->builder, ptr_field_ptr, "");
- LLVMBuildRet(g->builder, ptr_val);
-
- LLVMPositionBuilderAtEnd(g->builder, fail_block);
- LLVMBuildRet(g->builder, LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)));
-
- g->cur_fn = prev_cur_fn;
- g->cur_fn_val = prev_cur_fn_val;
- LLVMPositionBuilderAtEnd(g->builder, prev_block);
- if (!g->strip_debug_symbols) {
- LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
- }
-
- g->coro_alloc_helper_fn_val = fn_val;
- return fn_val;
-}
-
-static LLVMValueRef ir_render_coro_alloc_helper(CodeGen *g, IrExecutable *executable,
- IrInstructionCoroAllocHelper *instruction)
-{
- LLVMValueRef realloc_fn = ir_llvm_value(g, instruction->realloc_fn);
- LLVMValueRef coro_size = ir_llvm_value(g, instruction->coro_size);
- LLVMValueRef fn_val = get_coro_alloc_helper_fn_val(g, LLVMTypeOf(realloc_fn), instruction->realloc_fn->value.type);
- size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
- size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
-
- ZigList params = {};
- params.append(realloc_fn);
- uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, g->cur_fn);
- if (err_ret_trace_arg_index != UINT32_MAX) {
- params.append(LLVMGetParam(g->cur_fn_val, err_ret_trace_arg_index));
- }
- params.append(LLVMGetParam(g->cur_fn_val, allocator_arg_index));
- params.append(LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index));
- params.append(coro_size);
-
- return ZigLLVMBuildCall(g->builder, fn_val, params.items, params.length,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
-}
-
static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable,
IrInstructionAtomicRmw *instruction)
{
@@ -5263,14 +5326,15 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable,
LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
if (get_codegen_ptr_type(operand_type) == nullptr) {
- return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, false);
+ return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, g->is_single_threaded);
}
// it's a pointer but we need to treat it as an int
LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr,
LLVMPointerType(g->builtin_types.entry_usize->llvm_type, 0), "");
LLVMValueRef casted_operand = LLVMBuildPtrToInt(g->builder, operand, g->builtin_types.entry_usize->llvm_type, "");
- LLVMValueRef uncasted_result = LLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering, false);
+ LLVMValueRef uncasted_result = LLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering,
+ g->is_single_threaded);
return LLVMBuildIntToPtr(g->builder, uncasted_result, get_llvm_type(g, operand_type), "");
}
@@ -5284,27 +5348,6 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable,
return load_inst;
}
-static LLVMValueRef ir_render_merge_err_ret_traces(CodeGen *g, IrExecutable *executable,
- IrInstructionMergeErrRetTraces *instruction)
-{
- assert(g->have_err_ret_tracing);
-
- LLVMValueRef src_trace_ptr = ir_llvm_value(g, instruction->src_err_ret_trace_ptr);
- LLVMValueRef dest_trace_ptr = ir_llvm_value(g, instruction->dest_err_ret_trace_ptr);
-
- LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
- ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- return nullptr;
-}
-
-static LLVMValueRef ir_render_mark_err_ret_trace_ptr(CodeGen *g, IrExecutable *executable,
- IrInstructionMarkErrRetTracePtr *instruction)
-{
- assert(g->have_err_ret_tracing);
- g->cur_err_ret_trace_val_stack = ir_llvm_value(g, instruction->err_ret_trace_ptr);
- return nullptr;
-}
-
static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) {
LLVMValueRef op = ir_llvm_value(g, instruction->op1);
assert(instruction->base.value.type->id == ZigTypeIdFloat);
@@ -5424,6 +5467,176 @@ static LLVMValueRef ir_render_assert_non_null(CodeGen *g, IrExecutable *executab
return nullptr;
}
+static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable,
+ IrInstructionSuspendBegin *instruction)
+{
+ if (fn_is_async(g->cur_fn)) {
+ instruction->resume_bb = gen_suspend_begin(g, "SuspendResume");
+ }
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executable,
+ IrInstructionSuspendFinish *instruction)
+{
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, instruction->begin->resume_bb);
+ render_async_var_decls(g, instruction->base.scope);
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame);
+ ZigType *result_type = instruction->base.value.type;
+ ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true);
+
+ // Prepare to be suspended
+ LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume");
+ LLVMBasicBlockRef end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitEnd");
+
+ // At this point resuming the function will continue from resume_bb.
+ // This code is as if it is running inside the suspend block.
+
+ // supply the awaiter return pointer
+ LLVMValueRef result_loc = (instruction->result_loc == nullptr) ?
+ nullptr : ir_llvm_value(g, instruction->result_loc);
+ if (type_has_bits(result_type)) {
+ LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, "");
+ if (result_loc == nullptr) {
+ // no copy needed
+ LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))),
+ awaiter_ret_ptr_ptr);
+ } else {
+ LLVMBuildStore(g->builder, result_loc, awaiter_ret_ptr_ptr);
+ }
+ }
+
+ // supply the error return trace pointer
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ assert(my_err_ret_trace_val != nullptr);
+ LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
+ frame_index_trace_arg(g, result_type) + 1, "");
+ LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
+ }
+
+ // caller's own frame pointer
+ LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, "");
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, "");
+ LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
+ LLVMAtomicOrderingRelease);
+
+ LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait");
+ LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend");
+ LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
+
+ LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, bad_await_block, 2);
+
+ LLVMAddCase(switch_instr, zero, complete_suspend_block);
+ LLVMAddCase(switch_instr, all_ones, early_return_block);
+
+ // We discovered that another awaiter was already here.
+ LLVMPositionBuilderAtEnd(g->builder, bad_await_block);
+ gen_assertion(g, PanicMsgIdBadAwait, &instruction->base);
+
+ // Rely on the target to resume us from suspension.
+ LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
+ LLVMBuildRetVoid(g->builder);
+
+ // Early return: The async function has already completed. We must copy the result and
+ // the error return trace if applicable.
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
+ if (type_has_bits(result_type) && result_loc != nullptr) {
+ LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, "");
+ LLVMValueRef their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, "");
+ LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
+ LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, "");
+ bool is_volatile = false;
+ uint32_t abi_align = get_abi_alignment(g, result_type);
+ LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false);
+ ZigLLVMBuildMemCpy(g->builder,
+ dest_ptr_casted, abi_align,
+ src_ptr_casted, abi_align, byte_count_val, is_volatile);
+ }
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
+ frame_index_trace_arg(g, result_type), "");
+ LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, "");
+ LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
+ ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ }
+ LLVMBuildBr(g->builder, end_bb);
+
+ LLVMPositionBuilderAtEnd(g->builder, resume_bb);
+ gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr);
+ LLVMBuildBr(g->builder, end_bb);
+
+ LLVMPositionBuilderAtEnd(g->builder, end_bb);
+ if (type_has_bits(result_type) && result_loc != nullptr) {
+ return get_handle_value(g, result_loc, result_type, ptr_result_type);
+ }
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_resume(CodeGen *g, IrExecutable *executable, IrInstructionResume *instruction) {
+ LLVMValueRef frame = ir_llvm_value(g, instruction->frame);
+ ZigType *frame_type = instruction->frame->value.type;
+ assert(frame_type->id == ZigTypeIdAnyFrame);
+
+ gen_resume(g, nullptr, frame, ResumeIdManual);
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable,
+ IrInstructionFrameSizeGen *instruction)
+{
+ LLVMValueRef fn_val = ir_llvm_value(g, instruction->fn);
+ return gen_frame_size(g, fn_val);
+}
+
+static LLVMValueRef ir_render_spill_begin(CodeGen *g, IrExecutable *executable,
+ IrInstructionSpillBegin *instruction)
+{
+ if (!fn_is_async(g->cur_fn))
+ return nullptr;
+
+ switch (instruction->spill_id) {
+ case SpillIdInvalid:
+ zig_unreachable();
+ case SpillIdRetErrCode: {
+ LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
+ LLVMValueRef ptr = ir_llvm_value(g, g->cur_fn->err_code_spill);
+ LLVMBuildStore(g->builder, operand, ptr);
+ return nullptr;
+ }
+
+ }
+ zig_unreachable();
+}
+
+static LLVMValueRef ir_render_spill_end(CodeGen *g, IrExecutable *executable, IrInstructionSpillEnd *instruction) {
+ if (!fn_is_async(g->cur_fn))
+ return ir_llvm_value(g, instruction->begin->operand);
+
+ switch (instruction->begin->spill_id) {
+ case SpillIdInvalid:
+ zig_unreachable();
+ case SpillIdRetErrCode: {
+ LLVMValueRef ptr = ir_llvm_value(g, g->cur_fn->err_code_spill);
+ return LLVMBuildLoad(g->builder, ptr, "");
+ }
+
+ }
+ zig_unreachable();
+}
+
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
AstNode *source_node = instruction->source_node;
Scope *scope = instruction->scope;
@@ -5445,7 +5658,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdSetRuntimeSafety:
case IrInstructionIdSetFloatMode:
case IrInstructionIdArrayType:
- case IrInstructionIdPromiseType:
+ case IrInstructionIdAnyFrameType:
case IrInstructionIdSliceType:
case IrInstructionIdSizeOf:
case IrInstructionIdSwitchTarget:
@@ -5485,8 +5698,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdTagType:
case IrInstructionIdExport:
case IrInstructionIdErrorUnion:
- case IrInstructionIdPromiseResultType:
- case IrInstructionIdAwaitBookkeeping:
case IrInstructionIdAddImplicitReturnType:
case IrInstructionIdIntCast:
case IrInstructionIdFloatCast:
@@ -5508,17 +5719,19 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdCallSrc:
case IrInstructionIdAllocaSrc:
case IrInstructionIdEndExpr:
- case IrInstructionIdAllocaGen:
case IrInstructionIdImplicitCast:
case IrInstructionIdResolveResult:
case IrInstructionIdResetResult:
- case IrInstructionIdResultPtr:
case IrInstructionIdContainerInitList:
case IrInstructionIdSliceSrc:
case IrInstructionIdRef:
case IrInstructionIdBitCastSrc:
case IrInstructionIdTestErrSrc:
case IrInstructionIdUnionInitNamedField:
+ case IrInstructionIdFrameType:
+ case IrInstructionIdFrameSizeSrc:
+ case IrInstructionIdAllocaGen:
+ case IrInstructionIdAwaitSrc:
zig_unreachable();
case IrInstructionIdDeclVarGen:
@@ -5597,8 +5810,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_return_address(g, executable, (IrInstructionReturnAddress *)instruction);
case IrInstructionIdFrameAddress:
return ir_render_frame_address(g, executable, (IrInstructionFrameAddress *)instruction);
- case IrInstructionIdHandle:
- return ir_render_handle(g, executable, (IrInstructionHandle *)instruction);
+ case IrInstructionIdFrameHandle:
+ return ir_render_handle(g, executable, (IrInstructionFrameHandle *)instruction);
case IrInstructionIdOverflowOp:
return ir_render_overflow_op(g, executable, (IrInstructionOverflowOp *)instruction);
case IrInstructionIdTestErrGen:
@@ -5641,44 +5854,12 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_align_cast(g, executable, (IrInstructionAlignCast *)instruction);
case IrInstructionIdErrorReturnTrace:
return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction);
- case IrInstructionIdCancel:
- return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction);
- case IrInstructionIdGetImplicitAllocator:
- return ir_render_get_implicit_allocator(g, executable, (IrInstructionGetImplicitAllocator *)instruction);
- case IrInstructionIdCoroId:
- return ir_render_coro_id(g, executable, (IrInstructionCoroId *)instruction);
- case IrInstructionIdCoroAlloc:
- return ir_render_coro_alloc(g, executable, (IrInstructionCoroAlloc *)instruction);
- case IrInstructionIdCoroSize:
- return ir_render_coro_size(g, executable, (IrInstructionCoroSize *)instruction);
- case IrInstructionIdCoroBegin:
- return ir_render_coro_begin(g, executable, (IrInstructionCoroBegin *)instruction);
- case IrInstructionIdCoroAllocFail:
- return ir_render_coro_alloc_fail(g, executable, (IrInstructionCoroAllocFail *)instruction);
- case IrInstructionIdCoroSuspend:
- return ir_render_coro_suspend(g, executable, (IrInstructionCoroSuspend *)instruction);
- case IrInstructionIdCoroEnd:
- return ir_render_coro_end(g, executable, (IrInstructionCoroEnd *)instruction);
- case IrInstructionIdCoroFree:
- return ir_render_coro_free(g, executable, (IrInstructionCoroFree *)instruction);
- case IrInstructionIdCoroResume:
- return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction);
- case IrInstructionIdCoroSave:
- return ir_render_coro_save(g, executable, (IrInstructionCoroSave *)instruction);
- case IrInstructionIdCoroPromise:
- return ir_render_coro_promise(g, executable, (IrInstructionCoroPromise *)instruction);
- case IrInstructionIdCoroAllocHelper:
- return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction);
case IrInstructionIdAtomicRmw:
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction);
case IrInstructionIdSaveErrRetAddr:
return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction);
- case IrInstructionIdMergeErrRetTraces:
- return ir_render_merge_err_ret_traces(g, executable, (IrInstructionMergeErrRetTraces *)instruction);
- case IrInstructionIdMarkErrRetTracePtr:
- return ir_render_mark_err_ret_trace_ptr(g, executable, (IrInstructionMarkErrRetTracePtr *)instruction);
case IrInstructionIdFloatOp:
return ir_render_float_op(g, executable, (IrInstructionFloatOp *)instruction);
case IrInstructionIdMulAdd:
@@ -5695,6 +5876,20 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_resize_slice(g, executable, (IrInstructionResizeSlice *)instruction);
case IrInstructionIdPtrOfArrayToSlice:
return ir_render_ptr_of_array_to_slice(g, executable, (IrInstructionPtrOfArrayToSlice *)instruction);
+ case IrInstructionIdSuspendBegin:
+ return ir_render_suspend_begin(g, executable, (IrInstructionSuspendBegin *)instruction);
+ case IrInstructionIdSuspendFinish:
+ return ir_render_suspend_finish(g, executable, (IrInstructionSuspendFinish *)instruction);
+ case IrInstructionIdResume:
+ return ir_render_resume(g, executable, (IrInstructionResume *)instruction);
+ case IrInstructionIdFrameSizeGen:
+ return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction);
+ case IrInstructionIdAwaitGen:
+ return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction);
+ case IrInstructionIdSpillBegin:
+ return ir_render_spill_begin(g, executable, (IrInstructionSpillBegin *)instruction);
+ case IrInstructionIdSpillEnd:
+ return ir_render_spill_end(g, executable, (IrInstructionSpillEnd *)instruction);
}
zig_unreachable();
}
@@ -5704,6 +5899,7 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) {
IrExecutable *executable = &fn_entry->analyzed_executable;
assert(executable->basic_block_list.length > 0);
+
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *current_block = executable->basic_block_list.at(block_i);
assert(current_block->llvm_block);
@@ -5894,7 +6090,6 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
case ZigTypeIdPointer:
case ZigTypeIdFn:
case ZigTypeIdOptional:
- case ZigTypeIdPromise:
{
LLVMValueRef ptr_val = gen_const_val(g, const_val, "");
LLVMValueRef ptr_size_int_val = LLVMConstPtrToInt(ptr_val, g->builtin_types.entry_usize->llvm_type);
@@ -5957,7 +6152,10 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
}
return val;
}
-
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO bit pack an async function frame");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO bit pack an anyframe");
}
zig_unreachable();
}
@@ -6110,6 +6308,9 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
break;
}
+ if ((err = type_resolve(g, type_entry, ResolveStatusLLVMFull)))
+ zig_unreachable();
+
switch (type_entry->id) {
case ZigTypeIdInt:
return bigint_to_llvm_const(get_llvm_type(g, type_entry), &const_val->data.x_bigint);
@@ -6181,6 +6382,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
LLVMValueRef *fields = allocate(type_entry->data.structure.gen_field_count);
size_t src_field_count = type_entry->data.structure.src_field_count;
bool make_unnamed_struct = false;
+ assert(type_entry->data.structure.resolve_status == ResolveStatusLLVMFull);
if (type_entry->data.structure.layout == ContainerLayoutPacked) {
size_t src_field_index = 0;
while (src_field_index < src_field_count) {
@@ -6250,6 +6452,22 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
LLVMValueRef val = gen_const_val(g, field_val, "");
fields[type_struct_field->gen_index] = val;
make_unnamed_struct = make_unnamed_struct || is_llvm_value_unnamed_type(g, field_val->type, val);
+
+ size_t end_pad_gen_index = (i + 1 < src_field_count) ?
+ type_entry->data.structure.fields[i + 1].gen_index :
+ type_entry->data.structure.gen_field_count;
+ size_t next_offset = (i + 1 < src_field_count) ?
+ type_entry->data.structure.fields[i + 1].offset : type_entry->abi_size;
+ if (end_pad_gen_index != SIZE_MAX) {
+ for (size_t gen_i = type_struct_field->gen_index + 1; gen_i < end_pad_gen_index;
+ gen_i += 1)
+ {
+ size_t pad_bytes = next_offset -
+ (type_struct_field->offset + type_struct_field->type_entry->abi_size);
+ LLVMTypeRef llvm_array_type = LLVMArrayType(LLVMInt8Type(), pad_bytes);
+ fields[gen_i] = LLVMGetUndef(llvm_array_type);
+ }
+ }
}
}
if (make_unnamed_struct) {
@@ -6437,30 +6655,18 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
err_payload_value = gen_const_val(g, payload_val, "");
make_unnamed_struct = is_llvm_value_unnamed_type(g, payload_val->type, err_payload_value);
}
+ LLVMValueRef fields[3];
+ fields[err_union_err_index] = err_tag_value;
+ fields[err_union_payload_index] = err_payload_value;
+ size_t field_count = 2;
+ if (type_entry->data.error_union.pad_llvm_type != nullptr) {
+ fields[2] = LLVMGetUndef(type_entry->data.error_union.pad_llvm_type);
+ field_count = 3;
+ }
if (make_unnamed_struct) {
- uint64_t payload_off = LLVMOffsetOfElement(g->target_data_ref, get_llvm_type(g, type_entry), 1);
- uint64_t err_sz = LLVMStoreSizeOfType(g->target_data_ref, LLVMTypeOf(err_tag_value));
- unsigned pad_sz = payload_off - err_sz;
- if (pad_sz == 0) {
- LLVMValueRef fields[] = {
- err_tag_value,
- err_payload_value,
- };
- return LLVMConstStruct(fields, 2, false);
- } else {
- LLVMValueRef fields[] = {
- err_tag_value,
- LLVMGetUndef(LLVMArrayType(LLVMInt8Type(), pad_sz)),
- err_payload_value,
- };
- return LLVMConstStruct(fields, 3, false);
- }
+ return LLVMConstStruct(fields, field_count, false);
} else {
- LLVMValueRef fields[] = {
- err_tag_value,
- err_payload_value,
- };
- return LLVMConstNamedStruct(get_llvm_type(g, type_entry), fields, 2);
+ return LLVMConstNamedStruct(get_llvm_type(g, type_entry), fields, field_count);
}
}
}
@@ -6477,9 +6683,11 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
zig_unreachable();
-
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO");
}
zig_unreachable();
}
@@ -6563,12 +6771,20 @@ static void generate_error_name_table(CodeGen *g) {
static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) {
IrExecutable *executable = &fn->analyzed_executable;
assert(executable->basic_block_list.length > 0);
+ LLVMValueRef fn_val = fn_llvm_value(g, fn);
+ LLVMBasicBlockRef first_bb = nullptr;
+ if (fn_is_async(fn)) {
+ first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch");
+ g->cur_preamble_llvm_block = first_bb;
+ }
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *bb = executable->basic_block_list.at(block_i);
- bb->llvm_block = LLVMAppendBasicBlock(fn_llvm_value(g, fn), bb->name_hint);
+ bb->llvm_block = LLVMAppendBasicBlock(fn_val, bb->name_hint);
}
- IrBasicBlock *entry_bb = executable->basic_block_list.at(0);
- LLVMPositionBuilderAtEnd(g->builder, entry_bb->llvm_block);
+ if (first_bb == nullptr) {
+ first_bb = executable->basic_block_list.at(0)->llvm_block;
+ }
+ LLVMPositionBuilderAtEnd(g->builder, first_bb);
}
static void gen_global_var(CodeGen *g, ZigVar *var, LLVMValueRef init_val,
@@ -6745,13 +6961,19 @@ static void do_code_gen(CodeGen *g) {
build_all_basic_blocks(g, fn_table_entry);
clear_debug_source_node(g);
- if (want_sret) {
- g->cur_ret_ptr = LLVMGetParam(fn, 0);
- } else if (handle_is_ptr(fn_type_id->return_type)) {
- g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0);
- // TODO add debug info variable for this
+ bool is_async = fn_is_async(fn_table_entry);
+
+ if (is_async) {
+ g->cur_frame_ptr = LLVMGetParam(fn, 0);
} else {
- g->cur_ret_ptr = nullptr;
+ if (want_sret) {
+ g->cur_ret_ptr = LLVMGetParam(fn, 0);
+ } else if (handle_is_ptr(fn_type_id->return_type)) {
+ g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0);
+ // TODO add debug info variable for this
+ } else {
+ g->cur_ret_ptr = nullptr;
+ }
}
uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
@@ -6763,39 +6985,45 @@ static void do_code_gen(CodeGen *g) {
}
// error return tracing setup
- bool is_async = cc == CallingConventionAsync;
- bool have_err_ret_trace_stack = g->have_err_ret_tracing && fn_table_entry->calls_or_awaits_errorable_fn && !is_async && !have_err_ret_trace_arg;
+ bool have_err_ret_trace_stack = g->have_err_ret_tracing && fn_table_entry->calls_or_awaits_errorable_fn &&
+ !is_async && !have_err_ret_trace_arg;
LLVMValueRef err_ret_array_val = nullptr;
if (have_err_ret_trace_stack) {
ZigType *array_type = get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count);
err_ret_array_val = build_alloca(g, array_type, "error_return_trace_addresses", get_abi_alignment(g, array_type));
- // populate g->stack_trace_type
- (void)get_ptr_to_stack_trace_type(g);
- g->cur_err_ret_trace_val_stack = build_alloca(g, g->stack_trace_type, "error_return_trace", get_abi_alignment(g, g->stack_trace_type));
+ (void)get_llvm_type(g, get_stack_trace_type(g));
+ g->cur_err_ret_trace_val_stack = build_alloca(g, get_stack_trace_type(g), "error_return_trace",
+ get_abi_alignment(g, g->stack_trace_type));
} else {
g->cur_err_ret_trace_val_stack = nullptr;
}
- // allocate temporary stack data
- for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) {
- IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i);
- ZigType *ptr_type = instruction->base.value.type;
- assert(ptr_type->id == ZigTypeIdPointer);
- ZigType *child_type = ptr_type->data.pointer.child_type;
- if (!type_has_bits(child_type))
- continue;
- if (instruction->base.ref_count == 0)
- continue;
- if (instruction->base.value.special != ConstValSpecialRuntime) {
- if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
- ConstValSpecialRuntime)
- {
+ if (!is_async) {
+ // allocate temporary stack data
+ for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) {
+ IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i);
+ ZigType *ptr_type = instruction->base.value.type;
+ assert(ptr_type->id == ZigTypeIdPointer);
+ ZigType *child_type = ptr_type->data.pointer.child_type;
+ if (type_resolve(g, child_type, ResolveStatusSizeKnown))
+ zig_unreachable();
+ if (!type_has_bits(child_type))
continue;
+ if (instruction->base.ref_count == 0)
+ continue;
+ if (instruction->base.value.special != ConstValSpecialRuntime) {
+ if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
+ ConstValSpecialRuntime)
+ {
+ continue;
+ }
}
+ if (type_resolve(g, child_type, ResolveStatusLLVMFull))
+ zig_unreachable();
+ instruction->base.llvm_value = build_alloca(g, child_type, instruction->name_hint,
+ get_ptr_align(g, ptr_type));
}
- instruction->base.llvm_value = build_alloca(g, child_type, instruction->name_hint,
- get_ptr_align(g, ptr_type));
}
ZigType *import = get_scope_import(&fn_table_entry->fndef_scope->base);
@@ -6833,7 +7061,7 @@ static void do_code_gen(CodeGen *g) {
} else if (is_c_abi) {
fn_walk_var.data.vars.var = var;
iter_function_params_c_abi(g, fn_table_entry->type_entry, &fn_walk_var, var->src_arg_index);
- } else {
+ } else if (!is_async) {
ZigType *gen_type;
FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index];
assert(gen_info->gen_index != SIZE_MAX);
@@ -6884,14 +7112,76 @@ static void do_code_gen(CodeGen *g) {
gen_store(g, LLVMConstInt(usize->llvm_type, stack_trace_ptr_count, false), len_field_ptr, get_pointer_to_type(g, usize, false));
}
- // create debug variable declarations for parameters
- // rely on the first variables in the variable_list being parameters.
- FnWalk fn_walk_init = {};
- fn_walk_init.id = FnWalkIdInits;
- fn_walk_init.data.inits.fn = fn_table_entry;
- fn_walk_init.data.inits.llvm_fn = fn;
- fn_walk_init.data.inits.gen_i = gen_i_init;
- walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init);
+ if (is_async) {
+ (void)get_llvm_type(g, fn_table_entry->frame_type);
+ g->cur_resume_block_count = 0;
+
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false);
+ ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val);
+
+ if (!g->strip_debug_symbols) {
+ AstNode *source_node = fn_table_entry->proto_node;
+ ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1,
+ (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope));
+ }
+ IrExecutable *executable = &fn_table_entry->analyzed_executable;
+ LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
+ LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
+ gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope);
+
+ LLVMPositionBuilderAtEnd(g->builder, g->cur_preamble_llvm_block);
+ render_async_spills(g);
+ g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_awaiter_index, "");
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_resume_index, "");
+ g->cur_async_resume_index_ptr = resume_index_ptr;
+
+ if (type_has_bits(fn_type_id->return_type)) {
+ LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start, "");
+ g->cur_ret_ptr = LLVMBuildLoad(g->builder, cur_ret_ptr_ptr, "");
+ }
+ uint32_t trace_field_index_stack = UINT32_MAX;
+ if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) {
+ trace_field_index_stack = frame_index_trace_stack(g, fn_type_id);
+ g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ trace_field_index_stack, "");
+ }
+
+ LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4);
+ g->cur_async_switch_instr = switch_instr;
+
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ IrBasicBlock *entry_block = executable->basic_block_list.at(0);
+ LLVMAddCase(switch_instr, zero, entry_block->llvm_block);
+ g->cur_resume_block_count += 1;
+ LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
+ if (trace_field_index_stack != UINT32_MAX) {
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ frame_index_trace_arg(g, fn_type_id->return_type), "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(trace_ptr_ptr)));
+ LLVMBuildStore(g->builder, zero_ptr, trace_ptr_ptr);
+ }
+
+ LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ trace_field_index_stack, "");
+ LLVMValueRef addrs_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ trace_field_index_stack + 1, "");
+
+ gen_init_stack_trace(g, trace_field_ptr, addrs_field_ptr);
+ }
+ render_async_var_decls(g, entry_block->instruction_list.at(0)->scope);
+ } else {
+ // create debug variable declarations for parameters
+ // rely on the first variables in the variable_list being parameters.
+ FnWalk fn_walk_init = {};
+ fn_walk_init.id = FnWalkIdInits;
+ fn_walk_init.data.inits.fn = fn_table_entry;
+ fn_walk_init.data.inits.llvm_fn = fn;
+ fn_walk_init.data.inits.gen_i = gen_i_init;
+ walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init);
+ }
ir_render(g, fn_table_entry);
@@ -6910,8 +7200,6 @@ static void do_code_gen(CodeGen *g) {
LLVMDumpModule(g->module);
}
- // in release mode, we're sooooo confident that we've generated correct ir,
- // that we skip the verify module step in order to get better performance.
#ifndef NDEBUG
char *error = nullptr;
LLVMVerifyModule(g->module, LLVMAbortProcessAction, &error);
@@ -7180,16 +7468,8 @@ static void define_builtin_types(CodeGen *g) {
g->primitive_type_table.put(&entry->name, entry);
}
- {
- ZigType *entry = get_promise_type(g, nullptr);
- g->primitive_type_table.put(&entry->name, entry);
- entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
- entry->abi_align = g->builtin_types.entry_usize->abi_align;
- entry->abi_size = g->builtin_types.entry_usize->abi_size;
- }
}
-
static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char *name, size_t count) {
BuiltinFnEntry *builtin_fn = allocate(1);
buf_init_from_str(&builtin_fn->name, name);
@@ -7202,8 +7482,6 @@ static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char
static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdBreakpoint, "breakpoint", 0);
create_builtin_fn(g, BuiltinFnIdReturnAddress, "returnAddress", 0);
- create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
- create_builtin_fn(g, BuiltinFnIdHandle, "handle", 0);
create_builtin_fn(g, BuiltinFnIdMemcpy, "memcpy", 3);
create_builtin_fn(g, BuiltinFnIdMemset, "memset", 3);
create_builtin_fn(g, BuiltinFnIdSizeof, "sizeOf", 1);
@@ -7279,13 +7557,13 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdFloor, "floor", 2);
create_builtin_fn(g, BuiltinFnIdCeil, "ceil", 2);
create_builtin_fn(g, BuiltinFnIdTrunc, "trunc", 2);
- //Needs library support on Windows
- //create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2);
+ create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2);
create_builtin_fn(g, BuiltinFnIdRound, "round", 2);
create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4);
create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX);
+ create_builtin_fn(g, BuiltinFnIdAsyncCall, "asyncCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1);
create_builtin_fn(g, BuiltinFnIdShlExact, "shlExact", 2);
create_builtin_fn(g, BuiltinFnIdShrExact, "shrExact", 2);
@@ -7304,6 +7582,10 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdThis, "This", 0);
create_builtin_fn(g, BuiltinFnIdHasDecl, "hasDecl", 2);
create_builtin_fn(g, BuiltinFnIdUnionInit, "unionInit", 3);
+ create_builtin_fn(g, BuiltinFnIdFrameHandle, "frame", 0);
+ create_builtin_fn(g, BuiltinFnIdFrameType, "Frame", 1);
+ create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
+ create_builtin_fn(g, BuiltinFnIdFrameSize, "frameSize", 1);
}
static const char *bool_to_str(bool b) {
@@ -7615,7 +7897,8 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" BoundFn: Fn,\n"
" ArgTuple: void,\n"
" Opaque: void,\n"
- " Promise: Promise,\n"
+ " Frame: void,\n"
+ " AnyFrame: AnyFrame,\n"
" Vector: Vector,\n"
" EnumLiteral: void,\n"
"\n\n"
@@ -7728,11 +8011,10 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" is_generic: bool,\n"
" is_var_args: bool,\n"
" return_type: ?type,\n"
- " async_allocator_type: ?type,\n"
" args: []FnArg,\n"
" };\n"
"\n"
- " pub const Promise = struct {\n"
+ " pub const AnyFrame = struct {\n"
" child: ?type,\n"
" };\n"
"\n"
@@ -8297,15 +8579,17 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa
}
// According to Rich Felker libc headers are supposed to go before C language headers.
+ // However as noted by @dimenus, appending libc headers before c_headers breaks intrinsics
+ // and other compiler specific items.
+ args.append("-isystem");
+ args.append(buf_ptr(g->zig_c_headers_dir));
+
for (size_t i = 0; i < g->libc_include_dir_len; i += 1) {
Buf *include_dir = g->libc_include_dir_list[i];
args.append("-isystem");
args.append(buf_ptr(include_dir));
}
- args.append("-isystem");
- args.append(buf_ptr(g->zig_c_headers_dir));
-
if (g->zig_target->is_native) {
args.append("-march=native");
} else {
@@ -8336,6 +8620,12 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa
args.append("-g");
}
+ if (codegen_have_frame_pointer(g)) {
+ args.append("-fno-omit-frame-pointer");
+ } else {
+ args.append("-fomit-frame-pointer");
+ }
+
switch (g->build_mode) {
case BuildModeDebug:
// windows c runtime requires -D_DEBUG if using debug libraries
@@ -8348,7 +8638,6 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa
} else {
args.append("-fno-stack-protector");
}
- args.append("-fno-omit-frame-pointer");
break;
case BuildModeSafeRelease:
// See the comment in the BuildModeFastRelease case for why we pass -O2 rather
@@ -8362,7 +8651,6 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa
} else {
args.append("-fno-stack-protector");
}
- args.append("-fomit-frame-pointer");
break;
case BuildModeFastRelease:
args.append("-DNDEBUG");
@@ -8373,13 +8661,11 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa
// running in -O2 and thus the -O3 path has been tested less.
args.append("-O2");
args.append("-fno-stack-protector");
- args.append("-fomit-frame-pointer");
break;
case BuildModeSmallRelease:
args.append("-DNDEBUG");
args.append("-Os");
args.append("-fno-stack-protector");
- args.append("-fomit-frame-pointer");
break;
}
@@ -8907,7 +9193,8 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e
case ZigTypeIdArgTuple:
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
@@ -9091,7 +9378,8 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
}
}
@@ -9258,9 +9546,11 @@ static void gen_h_file(CodeGen *g) {
case ZigTypeIdArgTuple:
case ZigTypeIdOptional:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
+
case ZigTypeIdEnum:
if (type_entry->data.enumeration.layout == ContainerLayoutExtern) {
fprintf(out_h, "enum %s {\n", buf_ptr(type_h_name(type_entry)));
@@ -9799,3 +10089,18 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget
return g;
}
+bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) {
+ return g->have_err_ret_tracing &&
+ (return_type->id == ZigTypeIdErrorUnion ||
+ return_type->id == ZigTypeIdErrorSet);
+}
+
+bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async) {
+ if (is_async) {
+ return g->have_err_ret_tracing && (fn->calls_or_awaits_errorable_fn ||
+ codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type));
+ } else {
+ return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn &&
+ !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type);
+ }
+}
diff --git a/src/codegen.hpp b/src/codegen.hpp
index cdff61a26f..794a0fd5a6 100644
--- a/src/codegen.hpp
+++ b/src/codegen.hpp
@@ -61,5 +61,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g);
TargetSubsystem detect_subsystem(CodeGen *g);
void codegen_release_caches(CodeGen *codegen);
+bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type);
+bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async);
#endif
diff --git a/src/ir.cpp b/src/ir.cpp
index 65a21a418d..0129081e22 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -26,6 +26,7 @@ struct IrBuilder {
CodeGen *codegen;
IrExecutable *exec;
IrBasicBlock *current_basic_block;
+ AstNode *main_block_node;
};
struct IrAnalyze {
@@ -99,7 +100,6 @@ struct ConstCastOnly {
ConstCastErrUnionErrSetMismatch *error_union_error_set;
ConstCastTypeMismatch *type_mismatch;
ConstCastOnly *return_type;
- ConstCastOnly *async_allocator_type;
ConstCastOnly *null_wrap_ptr_child;
ConstCastArg fn_arg;
ConstCastArgNoAlias arg_no_alias;
@@ -305,6 +305,7 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) {
case ZigTypeIdBoundFn:
case ZigTypeIdErrorSet:
case ZigTypeIdOpaque:
+ case ZigTypeIdAnyFrame:
return true;
case ZigTypeIdFloat:
return a->data.floating.bit_count == b->data.floating.bit_count;
@@ -319,8 +320,8 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) {
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
return false;
}
zig_unreachable();
@@ -565,8 +566,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionArrayType *) {
return IrInstructionIdArrayType;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseType *) {
- return IrInstructionIdPromiseType;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAnyFrameType *) {
+ return IrInstructionIdAnyFrameType;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionSliceType *) {
@@ -761,8 +762,20 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameAddress *)
return IrInstructionIdFrameAddress;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionHandle *) {
- return IrInstructionIdHandle;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameHandle *) {
+ return IrInstructionIdFrameHandle;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameType *) {
+ return IrInstructionIdFrameType;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameSizeSrc *) {
+ return IrInstructionIdFrameSizeSrc;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameSizeGen *) {
+ return IrInstructionIdFrameSizeGen;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignOf *) {
@@ -933,10 +946,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionResetResult *) {
return IrInstructionIdResetResult;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionResultPtr *) {
- return IrInstructionIdResultPtr;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrOfArrayToSlice *) {
return IrInstructionIdPtrOfArrayToSlice;
}
@@ -961,62 +970,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionErrorUnion *) {
return IrInstructionIdErrorUnion;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) {
- return IrInstructionIdCancel;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionGetImplicitAllocator *) {
- return IrInstructionIdGetImplicitAllocator;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroId *) {
- return IrInstructionIdCoroId;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAlloc *) {
- return IrInstructionIdCoroAlloc;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSize *) {
- return IrInstructionIdCoroSize;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroBegin *) {
- return IrInstructionIdCoroBegin;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocFail *) {
- return IrInstructionIdCoroAllocFail;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSuspend *) {
- return IrInstructionIdCoroSuspend;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroEnd *) {
- return IrInstructionIdCoroEnd;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroFree *) {
- return IrInstructionIdCoroFree;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) {
- return IrInstructionIdCoroResume;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSave *) {
- return IrInstructionIdCoroSave;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroPromise *) {
- return IrInstructionIdCoroPromise;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocHelper *) {
- return IrInstructionIdCoroAllocHelper;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) {
return IrInstructionIdAtomicRmw;
}
@@ -1025,14 +978,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) {
return IrInstructionIdAtomicLoad;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseResultType *) {
- return IrInstructionIdPromiseResultType;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitBookkeeping *) {
- return IrInstructionIdAwaitBookkeeping;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionSaveErrRetAddr *) {
return IrInstructionIdSaveErrRetAddr;
}
@@ -1041,14 +986,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAddImplicitRetur
return IrInstructionIdAddImplicitReturnType;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionMergeErrRetTraces *) {
- return IrInstructionIdMergeErrRetTraces;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionMarkErrRetTracePtr *) {
- return IrInstructionIdMarkErrRetTracePtr;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatOp *) {
return IrInstructionIdFloatOp;
}
@@ -1097,6 +1034,34 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUnionInitNamedFi
return IrInstructionIdUnionInitNamedField;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBegin *) {
+ return IrInstructionIdSuspendBegin;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendFinish *) {
+ return IrInstructionIdSuspendFinish;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitSrc *) {
+ return IrInstructionIdAwaitSrc;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitGen *) {
+ return IrInstructionIdAwaitGen;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionResume *) {
+ return IrInstructionIdResume;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillBegin *) {
+ return IrInstructionIdSpillBegin;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillEnd *) {
+ return IrInstructionIdSpillEnd;
+}
+
template
static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) {
T *special_instruction = allocate(1);
@@ -1149,14 +1114,14 @@ static IrInstruction *ir_build_cond_br(IrBuilder *irb, Scope *scope, AstNode *so
}
static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *return_value)
+ IrInstruction *operand)
{
IrInstructionReturn *return_instruction = ir_build_instruction(irb, scope, source_node);
return_instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable;
return_instruction->base.value.special = ConstValSpecialStatic;
- return_instruction->value = return_value;
+ return_instruction->operand = operand;
- if (return_value != nullptr) ir_ref_instruction(return_value, irb->current_basic_block);
+ if (operand != nullptr) ir_ref_instruction(operand, irb->current_basic_block);
return &return_instruction->base;
}
@@ -1214,14 +1179,6 @@ static IrInstruction *ir_build_const_usize(IrBuilder *irb, Scope *scope, AstNode
return &const_instruction->base;
}
-static IrInstruction *ir_build_const_u8(IrBuilder *irb, Scope *scope, AstNode *source_node, uint8_t value) {
- IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node);
- const_instruction->base.value.type = irb->codegen->builtin_types.entry_u8;
- const_instruction->base.value.special = ConstValSpecialStatic;
- bigint_init_unsigned(&const_instruction->base.value.data.x_bigint, value);
- return &const_instruction->base;
-}
-
static IrInstruction *ir_create_const_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
ZigType *type_entry)
{
@@ -1429,7 +1386,7 @@ static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, Ast
static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator,
+ bool is_comptime, FnInline fn_inline, bool is_async,
IrInstruction *new_stack, ResultLoc *result_loc)
{
IrInstructionCallSrc *call_instruction = ir_build_instruction(irb, scope, source_node);
@@ -1440,22 +1397,24 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->is_async = is_async;
- call_instruction->async_allocator = async_allocator;
call_instruction->new_stack = new_stack;
call_instruction->result_loc = result_loc;
if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block);
for (size_t i = 0; i < arg_count; i += 1)
ir_ref_instruction(args[i], irb->current_basic_block);
- if (async_allocator != nullptr) ir_ref_instruction(async_allocator, irb->current_basic_block);
+ if (is_async && new_stack != nullptr) {
+ // in this case the arg at the end is the return pointer
+ ir_ref_instruction(args[arg_count], irb->current_basic_block);
+ }
if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block);
return &call_instruction->base;
}
-static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- FnInline fn_inline, bool is_async, IrInstruction *async_allocator, IrInstruction *new_stack,
+ FnInline fn_inline, bool is_async, IrInstruction *new_stack,
IrInstruction *result_loc, ZigType *return_type)
{
IrInstructionCallGen *call_instruction = ir_build_instruction(&ira->new_irb,
@@ -1467,18 +1426,16 @@ static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_in
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->is_async = is_async;
- call_instruction->async_allocator = async_allocator;
call_instruction->new_stack = new_stack;
call_instruction->result_loc = result_loc;
if (fn_ref != nullptr) ir_ref_instruction(fn_ref, ira->new_irb.current_basic_block);
for (size_t i = 0; i < arg_count; i += 1)
ir_ref_instruction(args[i], ira->new_irb.current_basic_block);
- if (async_allocator != nullptr) ir_ref_instruction(async_allocator, ira->new_irb.current_basic_block);
if (new_stack != nullptr) ir_ref_instruction(new_stack, ira->new_irb.current_basic_block);
if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block);
- return &call_instruction->base;
+ return call_instruction;
}
static IrInstruction *ir_build_phi(IrBuilder *irb, Scope *scope, AstNode *source_node,
@@ -1754,17 +1711,16 @@ static IrInstruction *ir_build_array_type(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_promise_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
+static IrInstruction *ir_build_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *payload_type)
{
- IrInstructionPromiseType *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionAnyFrameType *instruction = ir_build_instruction(irb, scope, source_node);
instruction->payload_type = payload_type;
if (payload_type != nullptr) ir_ref_instruction(payload_type, irb->current_basic_block);
return &instruction->base;
}
-
static IrInstruction *ir_build_slice_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, bool is_allow_zero)
{
@@ -2443,7 +2399,35 @@ static IrInstruction *ir_build_frame_address(IrBuilder *irb, Scope *scope, AstNo
}
static IrInstruction *ir_build_handle(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionHandle *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionFrameHandle *instruction = ir_build_instruction(irb, scope, source_node);
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_frame_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn) {
+ IrInstructionFrameType *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->fn = fn;
+
+ ir_ref_instruction(fn, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_frame_size_src(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn) {
+ IrInstructionFrameSizeSrc *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->fn = fn;
+
+ ir_ref_instruction(fn, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_frame_size_gen(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn)
+{
+ IrInstructionFrameSizeGen *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->fn = fn;
+
+ ir_ref_instruction(fn, irb->current_basic_block);
+
return &instruction->base;
}
@@ -2546,11 +2530,12 @@ static IrInstruction *ir_build_align_of(IrBuilder *irb, Scope *scope, AstNode *s
}
static IrInstruction *ir_build_test_err_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *base_ptr, bool resolve_err_set)
+ IrInstruction *base_ptr, bool resolve_err_set, bool base_ptr_is_payload)
{
IrInstructionTestErrSrc *instruction = ir_build_instruction(irb, scope, source_node);
instruction->base_ptr = base_ptr;
instruction->resolve_err_set = resolve_err_set;
+ instruction->base_ptr_is_payload = base_ptr_is_payload;
ir_ref_instruction(base_ptr, irb->current_basic_block);
@@ -2596,13 +2581,12 @@ static IrInstruction *ir_build_unwrap_err_payload(IrBuilder *irb, Scope *scope,
static IrInstruction *ir_build_fn_proto(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction **param_types, IrInstruction *align_value, IrInstruction *return_type,
- IrInstruction *async_allocator_type_value, bool is_var_args)
+ bool is_var_args)
{
IrInstructionFnProto *instruction = ir_build_instruction(irb, scope, source_node);
instruction->param_types = param_types;
instruction->align_value = align_value;
instruction->return_type = return_type;
- instruction->async_allocator_type_value = async_allocator_type_value;
instruction->is_var_args = is_var_args;
assert(source_node->type == NodeTypeFnProto);
@@ -2612,7 +2596,6 @@ static IrInstruction *ir_build_fn_proto(IrBuilder *irb, Scope *scope, AstNode *s
if (param_types[i] != nullptr) ir_ref_instruction(param_types[i], irb->current_basic_block);
}
if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block);
- if (async_allocator_type_value != nullptr) ir_ref_instruction(async_allocator_type_value, irb->current_basic_block);
ir_ref_instruction(return_type, irb->current_basic_block);
return &instruction->base;
@@ -2994,18 +2977,6 @@ static IrInstruction *ir_build_reset_result(IrBuilder *irb, Scope *scope, AstNod
return &instruction->base;
}
-static IrInstruction *ir_build_result_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
- ResultLoc *result_loc, IrInstruction *result)
-{
- IrInstructionResultPtr *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->result_loc = result_loc;
- instruction->result = result;
-
- ir_ref_instruction(result, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_opaque_type(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionOpaqueType *instruction = ir_build_instruction(irb, scope, source_node);
@@ -3056,149 +3027,6 @@ static IrInstruction *ir_build_error_union(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *target)
-{
- IrInstructionCancel *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->target = target;
-
- ir_ref_instruction(target, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node,
- ImplicitAllocatorId id)
-{
- IrInstructionGetImplicitAllocator *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->id = id;
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_id(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *promise_ptr) {
- IrInstructionCoroId *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->promise_ptr = promise_ptr;
-
- ir_ref_instruction(promise_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_alloc(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id) {
- IrInstructionCoroAlloc *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_id = coro_id;
-
- ir_ref_instruction(coro_id, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_size(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionCoroSize *instruction = ir_build_instruction(irb, scope, source_node);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id, IrInstruction *coro_mem_ptr) {
- IrInstructionCoroBegin *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_id = coro_id;
- instruction->coro_mem_ptr = coro_mem_ptr;
-
- ir_ref_instruction(coro_id, irb->current_basic_block);
- ir_ref_instruction(coro_mem_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_alloc_fail(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_val) {
- IrInstructionCoroAllocFail *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable;
- instruction->base.value.special = ConstValSpecialStatic;
- instruction->err_val = err_val;
-
- ir_ref_instruction(err_val, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_suspend(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *save_point, IrInstruction *is_final)
-{
- IrInstructionCoroSuspend *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->save_point = save_point;
- instruction->is_final = is_final;
-
- if (save_point != nullptr) ir_ref_instruction(save_point, irb->current_basic_block);
- ir_ref_instruction(is_final, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_end(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionCoroEnd *instruction = ir_build_instruction(irb, scope, source_node);
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_free(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_id, IrInstruction *coro_handle)
-{
- IrInstructionCoroFree *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_id = coro_id;
- instruction->coro_handle = coro_handle;
-
- ir_ref_instruction(coro_id, irb->current_basic_block);
- ir_ref_instruction(coro_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *awaiter_handle)
-{
- IrInstructionCoroResume *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->awaiter_handle = awaiter_handle;
-
- ir_ref_instruction(awaiter_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_save(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_handle)
-{
- IrInstructionCoroSave *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_handle = coro_handle;
-
- ir_ref_instruction(coro_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_promise(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_handle)
-{
- IrInstructionCoroPromise *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_handle = coro_handle;
-
- ir_ref_instruction(coro_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_alloc_helper(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *realloc_fn, IrInstruction *coro_size)
-{
- IrInstructionCoroAllocHelper *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->realloc_fn = realloc_fn;
- instruction->coro_size = coro_size;
-
- ir_ref_instruction(realloc_fn, irb->current_basic_block);
- ir_ref_instruction(coro_size, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_atomic_rmw(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *op, IrInstruction *operand,
IrInstruction *ordering, AtomicRmwOp resolved_op, AtomicOrder resolved_ordering)
@@ -3238,28 +3066,6 @@ static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_promise_result_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *promise_type)
-{
- IrInstructionPromiseResultType *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->promise_type = promise_type;
-
- ir_ref_instruction(promise_type, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_await_bookkeeping(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *promise_result_type)
-{
- IrInstructionAwaitBookkeeping *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->promise_result_type = promise_result_type;
-
- ir_ref_instruction(promise_result_type, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_save_err_ret_addr(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionSaveErrRetAddr *instruction = ir_build_instruction(irb, scope, source_node);
return &instruction->base;
@@ -3276,30 +3082,6 @@ static IrInstruction *ir_build_add_implicit_return_type(IrBuilder *irb, Scope *s
return &instruction->base;
}
-static IrInstruction *ir_build_merge_err_ret_traces(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_promise_ptr, IrInstruction *src_err_ret_trace_ptr, IrInstruction *dest_err_ret_trace_ptr)
-{
- IrInstructionMergeErrRetTraces *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_promise_ptr = coro_promise_ptr;
- instruction->src_err_ret_trace_ptr = src_err_ret_trace_ptr;
- instruction->dest_err_ret_trace_ptr = dest_err_ret_trace_ptr;
-
- ir_ref_instruction(coro_promise_ptr, irb->current_basic_block);
- ir_ref_instruction(src_err_ret_trace_ptr, irb->current_basic_block);
- ir_ref_instruction(dest_err_ret_trace_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_mark_err_ret_trace_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_ret_trace_ptr) {
- IrInstructionMarkErrRetTracePtr *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->err_ret_trace_ptr = err_ret_trace_ptr;
-
- ir_ref_instruction(err_ret_trace_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_has_decl(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *container, IrInstruction *name)
{
@@ -3435,7 +3217,7 @@ static IrInstruction *ir_build_alloca_src(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstructionAllocaGen *ir_create_alloca_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+static IrInstructionAllocaGen *ir_build_alloca_gen(IrAnalyze *ira, IrInstruction *source_instruction,
uint32_t align, const char *name_hint)
{
IrInstructionAllocaGen *instruction = ir_create_instruction(&ira->new_irb,
@@ -3459,6 +3241,87 @@ static IrInstruction *ir_build_end_expr(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
+static IrInstructionSuspendBegin *ir_build_suspend_begin(IrBuilder *irb, Scope *scope, AstNode *source_node) {
+ IrInstructionSuspendBegin *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+
+ return instruction;
+}
+
+static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstructionSuspendBegin *begin)
+{
+ IrInstructionSuspendFinish *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->begin = begin;
+
+ ir_ref_instruction(&begin->base, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *frame, ResultLoc *result_loc)
+{
+ IrInstructionAwaitSrc *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->frame = frame;
+ instruction->result_loc = result_loc;
+
+ ir_ref_instruction(frame, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+ IrInstruction *frame, ZigType *result_type, IrInstruction *result_loc)
+{
+ IrInstructionAwaitGen *instruction = ir_build_instruction(&ira->new_irb,
+ source_instruction->scope, source_instruction->source_node);
+ instruction->base.value.type = result_type;
+ instruction->frame = frame;
+ instruction->result_loc = result_loc;
+
+ ir_ref_instruction(frame, ira->new_irb.current_basic_block);
+ if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) {
+ IrInstructionResume *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->frame = frame;
+
+ ir_ref_instruction(frame, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstructionSpillBegin *ir_build_spill_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *operand, SpillId spill_id)
+{
+ IrInstructionSpillBegin *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.special = ConstValSpecialStatic;
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->operand = operand;
+ instruction->spill_id = spill_id;
+
+ ir_ref_instruction(operand, irb->current_basic_block);
+
+ return instruction;
+}
+
+static IrInstruction *ir_build_spill_end(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstructionSpillBegin *begin)
+{
+ IrInstructionSpillEnd *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->begin = begin;
+
+ ir_ref_instruction(&begin->base, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
results[ReturnKindUnconditional] = 0;
results[ReturnKindError] = 0;
@@ -3489,7 +3352,6 @@ static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_sco
continue;
case ScopeIdDeferExpr:
case ScopeIdCImport:
- case ScopeIdCoroPrelude:
zig_unreachable();
}
}
@@ -3545,7 +3407,6 @@ static bool ir_gen_defers_for_block(IrBuilder *irb, Scope *inner_scope, Scope *o
continue;
case ScopeIdDeferExpr:
case ScopeIdCImport:
- case ScopeIdCoroPrelude:
zig_unreachable();
}
}
@@ -3588,66 +3449,6 @@ static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) {
return nullptr;
}
-static bool exec_is_async(IrExecutable *exec) {
- ZigFn *fn_entry = exec_fn_entry(exec);
- return fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
-}
-
-static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *return_value,
- bool is_generated_code)
-{
- ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
-
- bool is_async = exec_is_async(irb->exec);
- if (!is_async) {
- IrInstruction *return_inst = ir_build_return(irb, scope, node, return_value);
- return_inst->is_gen = is_generated_code;
- return return_inst;
- }
-
- IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "Suspended");
- IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "NotSuspended");
- IrBasicBlock *store_awaiter_block = ir_create_basic_block(irb, scope, "StoreAwaiter");
- IrBasicBlock *check_canceled_block = ir_create_basic_block(irb, scope, "CheckCanceled");
-
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
- IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
-
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, ptr_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, suspended_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- // if we ever add null checking safety to the ptrtoint instruction, it needs to be disabled here
- IrInstruction *have_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- ir_build_cond_br(irb, scope, node, have_await_handle, store_awaiter_block, check_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, store_awaiter_block);
- IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr);
- ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, await_handle);
- ir_build_br(irb, scope, node, irb->exec->coro_normal_final, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, check_canceled_block);
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- return ir_build_cond_br(irb, scope, node, is_canceled_bool, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, is_comptime);
-}
-
static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
assert(node->type == NodeTypeReturnExpr);
@@ -3689,57 +3490,58 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
return_value = ir_build_const_void(irb, scope, node);
}
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
+
size_t defer_counts[2];
ir_count_defers(irb, scope, outer_scope, defer_counts);
bool have_err_defers = defer_counts[ReturnKindError] > 0;
- if (have_err_defers || irb->codegen->have_err_ret_tracing) {
- IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
- IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
- if (!have_err_defers) {
- ir_gen_defers_for_block(irb, scope, outer_scope, false);
- }
-
- IrInstruction *ret_ptr = ir_build_result_ptr(irb, scope, node, &result_loc_ret->base,
- return_value);
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, ret_ptr, false);
-
- bool should_inline = ir_should_inline(irb->exec, scope);
- IrInstruction *is_comptime;
- if (should_inline) {
- is_comptime = ir_build_const_bool(irb, scope, node, true);
- } else {
- is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
- }
-
- ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime));
- IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
-
- ir_set_cursor_at_end_and_append_block(irb, err_block);
- if (have_err_defers) {
- ir_gen_defers_for_block(irb, scope, outer_scope, true);
- }
- if (irb->codegen->have_err_ret_tracing && !should_inline) {
- ir_build_save_err_ret_addr(irb, scope, node);
- }
- ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, ok_block);
- if (have_err_defers) {
- ir_gen_defers_for_block(irb, scope, outer_scope, false);
- }
- ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
- IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false);
- result_loc_ret->base.source_instruction = result;
- return result;
- } else {
- // generate unconditional defers
+ if (!have_err_defers && !irb->codegen->have_err_ret_tracing) {
+ // only generate unconditional defers
ir_gen_defers_for_block(irb, scope, outer_scope, false);
- IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false);
+ IrInstruction *result = ir_build_return(irb, scope, node, return_value);
result_loc_ret->base.source_instruction = result;
return result;
}
+ bool should_inline = ir_should_inline(irb->exec, scope);
+
+ IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
+ IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
+
+ if (!have_err_defers) {
+ ir_gen_defers_for_block(irb, scope, outer_scope, false);
+ }
+
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);
+
+ IrInstruction *is_comptime;
+ if (should_inline) {
+ is_comptime = ir_build_const_bool(irb, scope, node, should_inline);
+ } else {
+ is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
+ }
+
+ ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime));
+ IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
+
+ ir_set_cursor_at_end_and_append_block(irb, err_block);
+ if (have_err_defers) {
+ ir_gen_defers_for_block(irb, scope, outer_scope, true);
+ }
+ if (irb->codegen->have_err_ret_tracing && !should_inline) {
+ ir_build_save_err_ret_addr(irb, scope, node);
+ }
+ ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, ok_block);
+ if (have_err_defers) {
+ ir_gen_defers_for_block(irb, scope, outer_scope, false);
+ }
+ ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
+ IrInstruction *result = ir_build_return(irb, scope, node, return_value);
+ result_loc_ret->base.source_instruction = result;
+ return result;
}
case ReturnKindError:
{
@@ -3747,7 +3549,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- IrInstruction *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true);
+ IrInstruction *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true, false);
IrBasicBlock *return_block = ir_create_basic_block(irb, scope, "ErrRetReturn");
IrBasicBlock *continue_block = ir_create_basic_block(irb, scope, "ErrRetContinue");
@@ -3761,19 +3563,21 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err_val, return_block, continue_block, is_comptime));
ir_set_cursor_at_end_and_append_block(irb, return_block);
+ IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr);
+ IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val));
+ IrInstructionSpillBegin *spill_begin = ir_build_spill_begin(irb, scope, node, err_val,
+ SpillIdRetErrCode);
+ ResultLocReturn *result_loc_ret = allocate(1);
+ result_loc_ret->base.id = ResultLocIdReturn;
+ ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
+ ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base);
if (!ir_gen_defers_for_block(irb, scope, outer_scope, true)) {
- IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr);
- IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
-
- ResultLocReturn *result_loc_ret = allocate(1);
- result_loc_ret->base.id = ResultLocIdReturn;
- ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
- ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base);
-
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
- IrInstruction *ret_inst = ir_gen_async_return(irb, scope, node, err_val, false);
+ err_val = ir_build_spill_end(irb, scope, node, spill_begin);
+ IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val);
result_loc_ret->base.source_instruction = ret_inst;
}
@@ -3971,18 +3775,31 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode
incoming_values.append(else_expr_result);
}
- if (block_node->data.block.name != nullptr) {
+ bool is_return_from_fn = block_node == irb->main_block_node;
+ if (!is_return_from_fn) {
ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
+ }
+
+ IrInstruction *result;
+ if (block_node->data.block.name != nullptr) {
ir_mark_gen(ir_build_br(irb, parent_scope, block_node, scope_block->end_block, scope_block->is_comptime));
ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block);
IrInstruction *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length,
incoming_blocks.items, incoming_values.items, scope_block->peer_parent);
- return ir_expr_wrap(irb, parent_scope, phi, result_loc);
+ result = ir_expr_wrap(irb, parent_scope, phi, result_loc);
} else {
- ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
IrInstruction *void_inst = ir_mark_gen(ir_build_const_void(irb, child_scope, block_node));
- return ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc);
+ result = ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc);
}
+ if (!is_return_from_fn)
+ return result;
+
+ // no need for save_err_ret_addr because this cannot return error
+ // only generate unconditional defers
+
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result));
+ ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
+ return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result));
}
static IrInstruction *ir_gen_bin_op_id(IrBuilder *irb, Scope *scope, AstNode *node, IrBinOp op_id) {
@@ -4561,8 +4378,6 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return irb->codegen->invalid_instruction;
}
- bool is_async = exec_is_async(irb->exec);
-
switch (builtin_fn->id) {
case BuiltinFnIdInvalid:
zig_unreachable();
@@ -5185,16 +5000,30 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return ir_lval_wrap(irb, scope, ir_build_return_address(irb, scope, node), lval, result_loc);
case BuiltinFnIdFrameAddress:
return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval, result_loc);
- case BuiltinFnIdHandle:
+ case BuiltinFnIdFrameHandle:
if (!irb->exec->fn_entry) {
- add_node_error(irb->codegen, node, buf_sprintf("@handle() called outside of function definition"));
- return irb->codegen->invalid_instruction;
- }
- if (!is_async) {
- add_node_error(irb->codegen, node, buf_sprintf("@handle() in non-async function"));
+ add_node_error(irb->codegen, node, buf_sprintf("@frame() called outside of function definition"));
return irb->codegen->invalid_instruction;
}
return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval, result_loc);
+ case BuiltinFnIdFrameType: {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *frame_type = ir_build_frame_type(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, frame_type, lval, result_loc);
+ }
+ case BuiltinFnIdFrameSize: {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *frame_size = ir_build_frame_size_src(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, frame_size, lval, result_loc);
+ }
case BuiltinFnIdAlignOf:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -5395,13 +5224,15 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever;
IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- fn_inline, false, nullptr, nullptr, result_loc);
+ fn_inline, false, nullptr, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
case BuiltinFnIdNewStackCall:
{
- if (node->data.fn_call_expr.params.length == 0) {
- add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0"));
+ if (node->data.fn_call_expr.params.length < 2) {
+ add_node_error(irb->codegen, node,
+ buf_sprintf("expected at least 2 arguments, found %" ZIG_PRI_usize,
+ node->data.fn_call_expr.params.length));
return irb->codegen->invalid_instruction;
}
@@ -5426,7 +5257,51 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
}
IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- FnInlineAuto, false, nullptr, new_stack, result_loc);
+ FnInlineAuto, false, new_stack, result_loc);
+ return ir_lval_wrap(irb, scope, call, lval, result_loc);
+ }
+ case BuiltinFnIdAsyncCall:
+ {
+ size_t arg_offset = 3;
+ if (node->data.fn_call_expr.params.length < arg_offset) {
+ add_node_error(irb->codegen, node,
+ buf_sprintf("expected at least %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize,
+ arg_offset, node->data.fn_call_expr.params.length));
+ return irb->codegen->invalid_instruction;
+ }
+
+ AstNode *bytes_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *bytes = ir_gen_node(irb, bytes_node, scope);
+ if (bytes == irb->codegen->invalid_instruction)
+ return bytes;
+
+ AstNode *ret_ptr_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *ret_ptr = ir_gen_node(irb, ret_ptr_node, scope);
+ if (ret_ptr == irb->codegen->invalid_instruction)
+ return ret_ptr;
+
+ AstNode *fn_ref_node = node->data.fn_call_expr.params.at(2);
+ IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
+ if (fn_ref == irb->codegen->invalid_instruction)
+ return fn_ref;
+
+ size_t arg_count = node->data.fn_call_expr.params.length - arg_offset;
+
+ // last "arg" is return pointer
+ IrInstruction **args = allocate(arg_count + 1);
+
+ for (size_t i = 0; i < arg_count; i += 1) {
+ AstNode *arg_node = node->data.fn_call_expr.params.at(i + arg_offset);
+ IrInstruction *arg = ir_gen_node(irb, arg_node, scope);
+ if (arg == irb->codegen->invalid_instruction)
+ return arg;
+ args[i] = arg;
+ }
+
+ args[arg_count] = ret_ptr;
+
+ IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
+ FnInlineAuto, true, bytes, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
case BuiltinFnIdTypeId:
@@ -5731,17 +5606,8 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node
}
bool is_async = node->data.fn_call_expr.is_async;
- IrInstruction *async_allocator = nullptr;
- if (is_async) {
- if (node->data.fn_call_expr.async_allocator) {
- async_allocator = ir_gen_node(irb, node->data.fn_call_expr.async_allocator, scope);
- if (async_allocator == irb->codegen->invalid_instruction)
- return async_allocator;
- }
- }
-
- IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto,
- is_async, async_allocator, nullptr, result_loc);
+ IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
+ FnInlineAuto, is_async, nullptr, result_loc);
return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
}
@@ -6254,7 +6120,8 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
LValPtr, nullptr);
if (err_val_ptr == irb->codegen->invalid_instruction)
return err_val_ptr;
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr, true);
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr,
+ true, false);
IrBasicBlock *after_cond_block = irb->current_basic_block;
IrInstruction *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node));
IrInstruction *cond_br_inst;
@@ -6762,10 +6629,10 @@ static IrInstruction *ir_gen_array_type(IrBuilder *irb, Scope *scope, AstNode *n
}
}
-static IrInstruction *ir_gen_promise_type(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypePromiseType);
+static IrInstruction *ir_gen_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *node) {
+ assert(node->type == NodeTypeAnyFrameType);
- AstNode *payload_type_node = node->data.promise_type.payload_type;
+ AstNode *payload_type_node = node->data.anyframe_type.payload_type;
IrInstruction *payload_type_value = nullptr;
if (payload_type_node != nullptr) {
@@ -6775,7 +6642,7 @@ static IrInstruction *ir_gen_promise_type(IrBuilder *irb, Scope *scope, AstNode
}
- return ir_build_promise_type(irb, scope, node, payload_type_value);
+ return ir_build_anyframe_type(irb, scope, node, payload_type_value);
}
static IrInstruction *ir_gen_undefined_literal(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -7070,7 +6937,7 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode *
return err_val_ptr;
IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true);
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true, false);
IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "TryOk");
IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "TryElse");
@@ -7686,7 +7553,7 @@ static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- IrInstruction *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true);
+ IrInstruction *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true, false);
IrInstruction *is_comptime;
if (ir_should_inline(irb->exec, parent_scope)) {
@@ -7980,339 +7847,45 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
//return_type = nullptr;
}
- IrInstruction *async_allocator_type_value = nullptr;
- if (node->data.fn_proto.async_allocator_type != nullptr) {
- async_allocator_type_value = ir_gen_node(irb, node->data.fn_proto.async_allocator_type, parent_scope);
- if (async_allocator_type_value == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
- }
-
- return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type,
- async_allocator_type_value, is_var_args);
-}
-
-static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode *node,
- IrInstruction *target_inst, bool cancel_non_suspended, bool cancel_awaited)
-{
- IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
- IrBasicBlock *pre_return_block = ir_create_basic_block(irb, scope, "PreReturn");
- IrBasicBlock *post_return_block = ir_create_basic_block(irb, scope, "PostReturn");
- IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
-
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
- get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
-
- // TODO relies on Zig not re-ordering fields
- IrInstruction *casted_target_inst = ir_build_ptr_cast_src(irb, scope, node, promise_T_type_val, target_inst,
- false);
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
-
- // set the is_canceled bit
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, is_canceled_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *awaiter_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *is_returned_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, awaiter_addr, ptr_mask, false);
- ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, pre_return_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, post_return_block);
- if (cancel_awaited) {
- ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
- } else {
- IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
- IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime);
- }
-
- ir_set_cursor_at_end_and_append_block(irb, pre_return_block);
- if (cancel_awaited) {
- if (cancel_non_suspended) {
- ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
- } else {
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, do_cancel_block, done_block, is_comptime);
- }
- } else {
- ir_build_br(irb, scope, node, done_block, is_comptime);
- }
-
- ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
- ir_build_cancel(irb, scope, node, target_inst);
- ir_build_br(irb, scope, node, done_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, done_block);
- return ir_build_const_void(irb, scope, node);
-}
-
-static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypeCancel);
-
- IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, scope);
- if (target_inst == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
- return ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
-}
-
-static IrInstruction *ir_gen_resume_target(IrBuilder *irb, Scope *scope, AstNode *node,
- IrInstruction *target_inst)
-{
- IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "ResumeDone");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
- IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "IsSuspended");
- IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "IsNotSuspended");
-
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
- IrInstruction *and_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, is_suspended_mask);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
- get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
-
- // TODO relies on Zig not re-ordering fields
- IrInstruction *casted_target_inst = ir_build_ptr_cast_src(irb, scope, node, promise_T_type_val, target_inst,
- false);
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
-
- // clear the is_suspended bit
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, and_mask, nullptr,
- AtomicRmwOp_and, AtomicOrderSeqCst);
-
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, suspended_block);
- ir_build_coro_resume(irb, scope, node, target_inst);
- ir_build_br(irb, scope, node, done_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, done_block);
- return ir_build_const_void(irb, scope, node);
+ return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, is_var_args);
}
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeResume);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, scope);
+ IrInstruction *target_inst = ir_gen_node_extra(irb, node->data.resume_expr.expr, scope, LValPtr, nullptr);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_gen_resume_target(irb, scope, node, target_inst);
+ return ir_build_resume(irb, scope, node, target_inst);
}
-static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
+static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval,
+ ResultLoc *result_loc)
+{
assert(node->type == NodeTypeAwaitExpr);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, scope);
- if (target_inst == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
ZigFn *fn_entry = exec_fn_entry(irb->exec);
if (!fn_entry) {
add_node_error(irb->codegen, node, buf_sprintf("await outside function definition"));
return irb->codegen->invalid_instruction;
}
- if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) {
- add_node_error(irb->codegen, node, buf_sprintf("await in non-async function"));
- return irb->codegen->invalid_instruction;
- }
-
- ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(scope);
- if (scope_defer_expr) {
- if (!scope_defer_expr->reported_err) {
- add_node_error(irb->codegen, node, buf_sprintf("cannot await inside defer expression"));
- scope_defer_expr->reported_err = true;
+ ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope);
+ if (existing_suspend_scope) {
+ if (!existing_suspend_scope->reported_err) {
+ ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot await inside suspend block"));
+ add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here"));
+ existing_suspend_scope->reported_err = true;
}
return irb->codegen->invalid_instruction;
}
- Scope *outer_scope = irb->exec->begin_scope;
+ IrInstruction *target_inst = ir_gen_node_extra(irb, node->data.await_expr.expr, scope, LValPtr, nullptr);
+ if (target_inst == irb->codegen->invalid_instruction)
+ return irb->codegen->invalid_instruction;
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, target_inst);
- Buf *result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
- IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false);
-
- if (irb->codegen->have_err_ret_tracing) {
- IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
- Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME);
- IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false);
- ir_build_store_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr);
- }
-
- IrBasicBlock *already_awaited_block = ir_create_basic_block(irb, scope, "AlreadyAwaited");
- IrBasicBlock *not_awaited_block = ir_create_basic_block(irb, scope, "NotAwaited");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
- IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend");
- IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, scope, "NoSuspend");
- IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend");
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume");
- IrBasicBlock *cancel_target_block = ir_create_basic_block(irb, scope, "CancelTarget");
- IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
- IrBasicBlock *do_defers_block = ir_create_basic_block(irb, scope, "DoDefers");
- IrBasicBlock *destroy_block = ir_create_basic_block(irb, scope, "DestroyBlock");
- IrBasicBlock *my_suspended_block = ir_create_basic_block(irb, scope, "AlreadySuspended");
- IrBasicBlock *my_not_suspended_block = ir_create_basic_block(irb, scope, "NotAlreadySuspended");
- IrBasicBlock *do_suspend_block = ir_create_basic_block(irb, scope, "DoSuspend");
-
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
-
- IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
- IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *undef = ir_build_const_undefined(irb, scope, node);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
-
- ZigVar *result_var = ir_create_var(irb, node, scope, nullptr,
- false, false, true, const_bool_false);
- IrInstruction *target_promise_type = ir_build_typeof(irb, scope, node, target_inst);
- IrInstruction *promise_result_type = ir_build_promise_result_type(irb, scope, node, target_promise_type);
- ir_build_await_bookkeeping(irb, scope, node, promise_result_type);
- IrInstruction *undef_promise_result = ir_build_implicit_cast(irb, scope, node, promise_result_type, undef, nullptr);
- build_decl_var_and_init(irb, scope, node, result_var, undef_promise_result, "result", const_bool_false);
- IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, scope, node, result_var);
- ir_build_store_ptr(irb, scope, node, result_ptr_field_ptr, my_result_var_ptr);
- IrInstruction *save_token = ir_build_coro_save(irb, scope, node, irb->exec->coro_handle);
-
- IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, scope, node, irb->exec->coro_handle);
- IrInstruction *mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, coro_handle_addr, await_mask, false);
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, mask_bits, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
- IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_awaited_bool, already_awaited_block, not_awaited_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, already_awaited_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, not_awaited_block);
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, cancel_target_block, not_canceled_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- ir_build_cond_br(irb, scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, cancel_target_block);
- ir_build_cancel(irb, scope, node, target_inst);
- ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, no_suspend_block);
- if (irb->codegen->have_err_ret_tracing) {
- Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME);
- IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false);
- IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
- ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
- }
- Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
- IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false);
- // If the type of the result handle_is_ptr then this does not actually perform a load. But we need it to,
- // because we're about to destroy the memory. So we store it into our result variable.
- IrInstruction *no_suspend_result = ir_build_load_ptr(irb, scope, node, promise_result_ptr);
- ir_build_store_ptr(irb, scope, node, my_result_var_ptr, no_suspend_result);
- ir_build_cancel(irb, scope, node, target_inst);
- ir_build_br(irb, scope, node, merge_block, const_bool_false);
-
-
- ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block);
- IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *my_is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_suspended_mask, false);
- IrInstruction *my_is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, my_is_suspended_bool, my_suspended_block, my_not_suspended_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, my_suspended_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, my_not_suspended_block);
- IrInstruction *my_is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_canceled_mask, false);
- IrInstruction *my_is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, my_is_canceled_bool, cleanup_block, do_suspend_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, do_suspend_block);
- IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false);
-
- IrInstructionSwitchBrCase *cases = allocate(2);
- cases[0].value = ir_build_const_u8(irb, scope, node, 0);
- cases[0].block = resume_block;
- cases[1].value = ir_build_const_u8(irb, scope, node, 1);
- cases[1].block = destroy_block;
- ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block,
- 2, cases, const_bool_false, nullptr);
-
- ir_set_cursor_at_end_and_append_block(irb, destroy_block);
- ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
- ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
- IrInstruction *my_mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, ptr_mask, is_canceled_mask, false);
- IrInstruction *b_my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, my_mask_bits, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, b_my_prev_atomic_value, ptr_mask, false);
- IrInstruction *dont_have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, my_await_handle_addr, zero, false);
- IrInstruction *dont_destroy_ourselves = ir_build_bin_op(irb, scope, node, IrBinOpBoolAnd, dont_have_my_await_handle, is_canceled_bool, false);
- ir_build_cond_br(irb, scope, node, dont_have_my_await_handle, do_defers_block, do_cancel_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
- IrInstruction *my_await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, my_await_handle_addr);
- ir_gen_cancel_target(irb, scope, node, my_await_handle, true, false);
- ir_mark_gen(ir_build_br(irb, scope, node, do_defers_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, do_defers_block);
- ir_gen_defers_for_block(irb, scope, outer_scope, true);
- ir_mark_gen(ir_build_cond_br(irb, scope, node, dont_destroy_ourselves, irb->exec->coro_early_final, irb->exec->coro_final_cleanup_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- ir_build_br(irb, scope, node, merge_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, merge_block);
- return ir_build_load_ptr(irb, scope, node, my_result_var_ptr);
+ IrInstruction *await_inst = ir_build_await_src(irb, scope, node, target_inst, result_loc);
+ return ir_lval_wrap(irb, scope, await_inst, lval, result_loc);
}
static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
@@ -8323,20 +7896,6 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition"));
return irb->codegen->invalid_instruction;
}
- if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) {
- add_node_error(irb->codegen, node, buf_sprintf("suspend in non-async function"));
- return irb->codegen->invalid_instruction;
- }
-
- ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope);
- if (scope_defer_expr) {
- if (!scope_defer_expr->reported_err) {
- ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression"));
- add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here"));
- scope_defer_expr->reported_err = true;
- }
- return irb->codegen->invalid_instruction;
- }
ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope);
if (existing_suspend_scope) {
if (!existing_suspend_scope->reported_err) {
@@ -8347,91 +7906,15 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
return irb->codegen->invalid_instruction;
}
- Scope *outer_scope = irb->exec->begin_scope;
-
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
- IrBasicBlock *suspended_block = ir_create_basic_block(irb, parent_scope, "AlreadySuspended");
- IrBasicBlock *canceled_block = ir_create_basic_block(irb, parent_scope, "IsCanceled");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, parent_scope, "NotCanceled");
- IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, parent_scope, "NotAlreadySuspended");
- IrBasicBlock *cancel_awaiter_block = ir_create_basic_block(irb, parent_scope, "CancelAwaiter");
-
- IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_promise);
- IrInstruction *const_bool_true = ir_build_const_bool(irb, parent_scope, node, true);
- IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
- IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, parent_scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, parent_scope, node, 0x2); // 0b010
- IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0);
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
-
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, parent_scope, node, is_canceled_bool, canceled_block, not_canceled_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, canceled_block);
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *have_await_handle = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- IrBasicBlock *post_canceled_block = irb->current_basic_block;
- ir_build_cond_br(irb, parent_scope, node, have_await_handle, cancel_awaiter_block, cleanup_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, cancel_awaiter_block);
- IrInstruction *await_handle = ir_build_int_to_ptr(irb, parent_scope, node, promise_type_val, await_handle_addr);
- ir_gen_cancel_target(irb, parent_scope, node, await_handle, true, false);
- IrBasicBlock *post_cancel_awaiter_block = irb->current_basic_block;
- ir_build_br(irb, parent_scope, node, cleanup_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, parent_scope, node, is_suspended_bool, suspended_block, not_suspended_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, suspended_block);
- ir_build_unreachable(irb, parent_scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- IrInstruction *suspend_code;
- if (node->data.suspend.block == nullptr) {
- suspend_code = ir_build_coro_suspend(irb, parent_scope, node, nullptr, const_bool_false);
- } else {
- Scope *child_scope;
+ IrInstructionSuspendBegin *begin = ir_build_suspend_begin(irb, parent_scope, node);
+ if (node->data.suspend.block != nullptr) {
ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope);
- suspend_scope->resume_block = resume_block;
- child_scope = &suspend_scope->base;
- IrInstruction *save_token = ir_build_coro_save(irb, child_scope, node, irb->exec->coro_handle);
- ir_gen_node(irb, node->data.suspend.block, child_scope);
- suspend_code = ir_mark_gen(ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false));
+ Scope *child_scope = &suspend_scope->base;
+ IrInstruction *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope);
+ ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res));
}
- IrInstructionSwitchBrCase *cases = allocate(2);
- cases[0].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 0));
- cases[0].block = resume_block;
- cases[1].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 1));
- cases[1].block = canceled_block;
- IrInstructionSwitchBr *switch_br = ir_build_switch_br(irb, parent_scope, node, suspend_code,
- irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr);
- ir_mark_gen(&switch_br->base);
-
- ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
- IrBasicBlock **incoming_blocks = allocate(2);
- IrInstruction **incoming_values = allocate(2);
- incoming_blocks[0] = post_canceled_block;
- incoming_values[0] = const_bool_true;
- incoming_blocks[1] = post_cancel_awaiter_block;
- incoming_values[1] = const_bool_false;
- IrInstruction *destroy_ourselves = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values,
- nullptr);
- ir_gen_defers_for_block(irb, parent_scope, outer_scope, true);
- ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, destroy_ourselves, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- return ir_mark_gen(ir_build_const_void(irb, parent_scope, node));
+ return ir_mark_gen(ir_build_suspend_finish(irb, parent_scope, node, begin));
}
static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope,
@@ -8523,8 +8006,8 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc);
case NodeTypePointerType:
return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc);
- case NodeTypePromiseType:
- return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval, result_loc);
+ case NodeTypeAnyFrameType:
+ return ir_lval_wrap(irb, scope, ir_gen_anyframe_type(irb, scope, node), lval, result_loc);
case NodeTypeStringLiteral:
return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc);
case NodeTypeUndefinedLiteral:
@@ -8561,12 +8044,10 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval, result_loc);
case NodeTypeErrorSetDecl:
return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval, result_loc);
- case NodeTypeCancel:
- return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval, result_loc);
case NodeTypeResume:
return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc);
case NodeTypeAwaitExpr:
- return ir_lval_wrap(irb, scope, ir_gen_await_expr(irb, scope, node), lval, result_loc);
+ return ir_gen_await_expr(irb, scope, node, lval, result_loc);
case NodeTypeSuspend:
return ir_lval_wrap(irb, scope, ir_gen_suspend(irb, scope, node), lval, result_loc);
case NodeTypeEnumLiteral:
@@ -8626,235 +8107,22 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
irb->codegen = codegen;
irb->exec = ir_executable;
+ irb->main_block_node = node;
IrBasicBlock *entry_block = ir_create_basic_block(irb, scope, "Entry");
ir_set_cursor_at_end_and_append_block(irb, entry_block);
// Entry block gets a reference because we enter it to begin.
ir_ref_bb(irb->current_basic_block);
- ZigFn *fn_entry = exec_fn_entry(irb->exec);
-
- bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
- IrInstruction *coro_id;
- IrInstruction *u8_ptr_type;
- IrInstruction *const_bool_false;
- IrInstruction *coro_promise_ptr;
- IrInstruction *err_ret_trace_ptr;
- ZigType *return_type;
- Buf *result_ptr_field_name;
- ZigVar *coro_size_var;
- if (is_async) {
- // create the coro promise
- Scope *coro_scope = create_coro_prelude_scope(irb->codegen, node, scope);
- const_bool_false = ir_build_const_bool(irb, coro_scope, node, false);
- ZigVar *promise_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
-
- return_type = fn_entry->type_entry->data.fn.fn_type_id.return_type;
- IrInstruction *undef = ir_build_const_undefined(irb, coro_scope, node);
- // TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa
- ZigType *coro_frame_type = get_promise_frame_type(irb->codegen, return_type);
- IrInstruction *coro_frame_type_value = ir_build_const_type(irb, coro_scope, node, coro_frame_type);
- IrInstruction *undef_coro_frame = ir_build_implicit_cast(irb, coro_scope, node, coro_frame_type_value, undef, nullptr);
- build_decl_var_and_init(irb, coro_scope, node, promise_var, undef_coro_frame, "promise", const_bool_false);
- coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var);
-
- ZigVar *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
- IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node);
- IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node,
- get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
- IrInstruction *null_await_handle = ir_build_implicit_cast(irb, coro_scope, node, await_handle_type_val, null_value, nullptr);
- build_decl_var_and_init(irb, coro_scope, node, await_handle_var, null_await_handle, "await_handle", const_bool_false);
- irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var);
-
- u8_ptr_type = ir_build_const_type(irb, coro_scope, node,
- get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false));
- IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast_src(irb, coro_scope, node, u8_ptr_type,
- coro_promise_ptr, false);
- coro_id = ir_build_coro_id(irb, coro_scope, node, promise_as_u8_ptr);
- coro_size_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
- IrInstruction *coro_size = ir_build_coro_size(irb, coro_scope, node);
- build_decl_var_and_init(irb, coro_scope, node, coro_size_var, coro_size, "coro_size", const_bool_false);
- IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, coro_scope, node,
- ImplicitAllocatorIdArg);
- irb->exec->coro_allocator_var = ir_create_var(irb, node, coro_scope, nullptr, true, true, true, const_bool_false);
- build_decl_var_and_init(irb, coro_scope, node, irb->exec->coro_allocator_var, implicit_allocator_ptr,
- "allocator", const_bool_false);
- Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME);
- IrInstruction *realloc_fn_ptr = ir_build_field_ptr(irb, coro_scope, node, implicit_allocator_ptr, realloc_field_name, false);
- IrInstruction *realloc_fn = ir_build_load_ptr(irb, coro_scope, node, realloc_fn_ptr);
- IrInstruction *maybe_coro_mem_ptr = ir_build_coro_alloc_helper(irb, coro_scope, node, realloc_fn, coro_size);
- IrInstruction *alloc_result_is_ok = ir_build_test_nonnull(irb, coro_scope, node, maybe_coro_mem_ptr);
- IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, coro_scope, "AllocError");
- IrBasicBlock *alloc_ok_block = ir_create_basic_block(irb, coro_scope, "AllocOk");
- ir_build_cond_br(irb, coro_scope, node, alloc_result_is_ok, alloc_ok_block, alloc_err_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, alloc_err_block);
- // we can return undefined here, because the caller passes a pointer to the error struct field
- // in the error union result, and we populate it in case of allocation failure.
- ir_build_return(irb, coro_scope, node, undef);
-
- ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block);
- IrInstruction *coro_mem_ptr = ir_build_ptr_cast_src(irb, coro_scope, node, u8_ptr_type, maybe_coro_mem_ptr,
- false);
- irb->exec->coro_handle = ir_build_coro_begin(irb, coro_scope, node, coro_id, coro_mem_ptr);
-
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- irb->exec->atomic_state_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- ir_build_store_ptr(irb, scope, node, irb->exec->atomic_state_field_ptr, zero);
- Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
- irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false);
- result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
- irb->exec->coro_result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false);
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr, irb->exec->coro_result_field_ptr);
- if (irb->codegen->have_err_ret_tracing) {
- // initialize the error return trace
- Buf *return_addresses_field_name = buf_create_from_str(RETURN_ADDRESSES_FIELD_NAME);
- IrInstruction *return_addresses_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, return_addresses_field_name, false);
-
- Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME);
- err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false);
- ir_build_mark_err_ret_trace_ptr(irb, scope, node, err_ret_trace_ptr);
-
- // coordinate with builtin.zig
- Buf *index_name = buf_create_from_str("index");
- IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name, false);
- ir_build_store_ptr(irb, scope, node, index_ptr, zero);
-
- Buf *instruction_addresses_name = buf_create_from_str("instruction_addresses");
- IrInstruction *addrs_slice_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, instruction_addresses_name, false);
-
- IrInstruction *slice_value = ir_build_slice_src(irb, scope, node, return_addresses_ptr, zero, nullptr, false, no_result_loc());
- ir_build_store_ptr(irb, scope, node, addrs_slice_ptr, slice_value);
- }
-
-
- irb->exec->coro_early_final = ir_create_basic_block(irb, scope, "CoroEarlyFinal");
- irb->exec->coro_normal_final = ir_create_basic_block(irb, scope, "CoroNormalFinal");
- irb->exec->coro_suspend_block = ir_create_basic_block(irb, scope, "Suspend");
- irb->exec->coro_final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup");
- }
-
IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone, nullptr);
assert(result);
if (irb->exec->invalid)
return false;
if (!instr_is_unreachable(result)) {
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result));
// no need for save_err_ret_addr because this cannot return error
- ir_gen_async_return(irb, scope, result->source_node, result, true);
- }
-
- if (is_async) {
- IrBasicBlock *invalid_resume_block = ir_create_basic_block(irb, scope, "InvalidResume");
- IrBasicBlock *check_free_block = ir_create_basic_block(irb, scope, "CheckFree");
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_early_final);
- IrInstruction *const_bool_true = ir_build_const_bool(irb, scope, node, true);
- IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, nullptr, const_bool_true);
- IrInstructionSwitchBrCase *cases = allocate(2);
- cases[0].value = ir_build_const_u8(irb, scope, node, 0);
- cases[0].block = invalid_resume_block;
- cases[1].value = ir_build_const_u8(irb, scope, node, 1);
- cases[1].block = irb->exec->coro_final_cleanup_block;
- ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr);
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_suspend_block);
- ir_build_coro_end(irb, scope, node);
- ir_build_return(irb, scope, node, irb->exec->coro_handle);
-
- ir_set_cursor_at_end_and_append_block(irb, invalid_resume_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_normal_final);
- if (type_has_bits(return_type)) {
- IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
- get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8,
- false, false, PtrLenUnknown, 0, 0, 0, false));
- IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr);
- IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast_src(irb, scope, node, u8_ptr_type_unknown_len,
- result_ptr, false);
- IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast_src(irb, scope, node,
- u8_ptr_type_unknown_len, irb->exec->coro_result_field_ptr, false);
- IrInstruction *return_type_inst = ir_build_const_type(irb, scope, node,
- fn_entry->type_entry->data.fn.fn_type_id.return_type);
- IrInstruction *size_of_ret_val = ir_build_size_of(irb, scope, node, return_type_inst);
- ir_build_memcpy(irb, scope, node, result_ptr_as_u8_ptr, return_value_ptr_as_u8_ptr, size_of_ret_val);
- }
- if (irb->codegen->have_err_ret_tracing) {
- Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME);
- IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false);
- IrInstruction *dest_err_ret_trace_ptr = ir_build_load_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr);
- ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr, dest_err_ret_trace_ptr);
- }
- // Before we destroy the coroutine frame, we need to load the target promise into
- // a register or local variable which does not get spilled into the frame,
- // otherwise llvm tries to access memory inside the destroyed frame.
- IrInstruction *unwrapped_await_handle_ptr = ir_build_optional_unwrap_ptr(irb, scope, node,
- irb->exec->await_handle_var_ptr, false, false);
- IrInstruction *await_handle_in_block = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr);
- ir_build_br(irb, scope, node, check_free_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_final_cleanup_block);
- ir_build_br(irb, scope, node, check_free_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, check_free_block);
- IrBasicBlock **incoming_blocks = allocate(2);
- IrInstruction **incoming_values = allocate(2);
- incoming_blocks[0] = irb->exec->coro_final_cleanup_block;
- incoming_values[0] = const_bool_false;
- incoming_blocks[1] = irb->exec->coro_normal_final;
- incoming_values[1] = const_bool_true;
- IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr);
-
- IrBasicBlock **merge_incoming_blocks = allocate(2);
- IrInstruction **merge_incoming_values = allocate(2);
- merge_incoming_blocks[0] = irb->exec->coro_final_cleanup_block;
- merge_incoming_values[0] = ir_build_const_undefined(irb, scope, node);
- merge_incoming_blocks[1] = irb->exec->coro_normal_final;
- merge_incoming_values[1] = await_handle_in_block;
- IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values, nullptr);
-
- Buf *shrink_field_name = buf_create_from_str(ASYNC_SHRINK_FIELD_NAME);
- IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node,
- ImplicitAllocatorIdLocalVar);
- IrInstruction *shrink_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, shrink_field_name, false);
- IrInstruction *shrink_fn = ir_build_load_ptr(irb, scope, node, shrink_fn_ptr);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle);
- IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
- get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8,
- false, false, PtrLenUnknown, 0, 0, 0, false));
- IrInstruction *coro_mem_ptr = ir_build_ptr_cast_src(irb, scope, node, u8_ptr_type_unknown_len,
- coro_mem_ptr_maybe, false);
- IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false);
- IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var);
- IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr);
- IrInstruction *mem_slice = ir_build_slice_src(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false,
- no_result_loc());
- size_t arg_count = 5;
- IrInstruction **args = allocate(arg_count);
- args[0] = implicit_allocator_ptr; // self
- args[1] = mem_slice; // old_mem
- args[2] = ir_build_const_usize(irb, scope, node, 8); // old_align
- // TODO: intentional memory leak here. If this is set to 0 then there is an issue where a coroutine
- // calls the function and it frees its own stack frame, but then the return value is a slice, which
- // is implemented as an sret struct. writing to the return pointer causes invalid memory write.
- // We could work around it by having a global helper function which has a void return type
- // and calling that instead. But instead this hack will suffice until I rework coroutines to be
- // non-allocating. Basically coroutines are not supported right now until they are reworked.
- args[3] = ir_build_const_usize(irb, scope, node, 1); // new_size
- args[4] = ir_build_const_usize(irb, scope, node, 1); // new_align
- ir_build_call_src(irb, scope, node, nullptr, shrink_fn, arg_count, args, false, FnInlineAuto, false, nullptr,
- nullptr, no_result_loc());
-
- IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume");
- ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- ir_gen_resume_target(irb, scope, node, awaiter_handle);
- ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false);
+ ir_mark_gen(ir_build_return(irb, scope, result->source_node, result));
}
return true;
@@ -8871,18 +8139,24 @@ bool ir_gen_fn(CodeGen *codegen, ZigFn *fn_entry) {
return ir_gen(codegen, body_node, fn_entry->child_scope, ir_executable);
}
-static void add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) {
+static void ir_add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) {
if (!exec || !exec->source_node || limit < 0) return;
add_error_note(codegen, err_msg, exec->source_node, buf_sprintf("called from here"));
- add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1);
+ ir_add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1);
+}
+
+void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text) {
+ IrInstruction *old_instruction = ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index);
+ add_error_note(ira->codegen, err_msg, old_instruction->source_node, text);
+ ir_add_call_stack_errors(ira->codegen, ira->new_irb.exec, err_msg, 10);
}
static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg) {
invalidate_exec(exec);
ErrorMsg *err_msg = add_node_error(codegen, source_node, msg);
if (exec->parent_exec) {
- add_call_stack_errors(codegen, exec, err_msg, 10);
+ ir_add_call_stack_errors(codegen, exec, err_msg, 10);
}
return err_msg;
}
@@ -8946,13 +8220,13 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec
IrInstruction *instruction = bb->instruction_list.at(i);
if (instruction->id == IrInstructionIdReturn) {
IrInstructionReturn *ret_inst = (IrInstructionReturn *)instruction;
- IrInstruction *value = ret_inst->value;
- if (value->value.special == ConstValSpecialRuntime) {
- exec_add_error_node(codegen, exec, value->source_node,
+ IrInstruction *operand = ret_inst->operand;
+ if (operand->value.special == ConstValSpecialRuntime) {
+ exec_add_error_node(codegen, exec, operand->source_node,
buf_sprintf("unable to evaluate constant expression"));
return &codegen->invalid_instruction->value;
}
- return &value->value;
+ return &operand->value;
} else if (ir_has_side_effects(instruction)) {
if (instr_is_comptime(instruction)) {
switch (instruction->id) {
@@ -9713,6 +8987,9 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc
bool const_val_is_float = (const_val->type->id == ZigTypeIdFloat || const_val->type->id == ZigTypeIdComptimeFloat);
assert(const_val_is_int || const_val_is_float);
+ if (const_val_is_int && other_type->id == ZigTypeIdComptimeFloat) {
+ return true;
+ }
if (other_type->id == ZigTypeIdFloat) {
if (const_val->type->id == ZigTypeIdComptimeInt || const_val->type->id == ZigTypeIdComptimeFloat) {
return true;
@@ -10200,12 +9477,6 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted
return result;
}
- if (wanted_type == ira->codegen->builtin_types.entry_promise &&
- actual_type->id == ZigTypeIdPromise)
- {
- return result;
- }
-
// fn
if (wanted_type->id == ZigTypeIdFn &&
actual_type->id == ZigTypeIdFn)
@@ -10240,20 +9511,6 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted
return result;
}
}
- if (!wanted_type->data.fn.is_generic && wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- ConstCastOnly child = types_match_const_cast_only(ira,
- actual_type->data.fn.fn_type_id.async_allocator_type,
- wanted_type->data.fn.fn_type_id.async_allocator_type,
- source_node, false);
- if (child.id == ConstCastResultIdInvalid)
- return child;
- if (child.id != ConstCastResultIdOk) {
- result.id = ConstCastResultIdAsyncAllocatorType;
- result.data.async_allocator_type = allocate_nonzero(1);
- *result.data.async_allocator_type = child;
- return result;
- }
- }
if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
result.id = ConstCastResultIdFnArgCount;
return result;
@@ -10358,6 +9615,10 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT
return cur_type;
}
+ if (prev_type == cur_type) {
+ continue;
+ }
+
if (prev_type->id == ZigTypeIdUnreachable) {
prev_inst = cur_inst;
continue;
@@ -10558,6 +9819,8 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT
ZigType *prev_err_set_type = (err_set_type == nullptr) ? prev_type->data.error_union.err_set_type : err_set_type;
ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type;
+ if (prev_err_set_type == cur_err_set_type)
+ continue;
if (!resolve_inferred_error_set(ira->codegen, prev_err_set_type, cur_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
@@ -11203,7 +10466,7 @@ static IrBasicBlock *ir_get_new_bb_runtime(IrAnalyze *ira, IrBasicBlock *old_bb,
}
static void ir_start_bb(IrAnalyze *ira, IrBasicBlock *old_bb, IrBasicBlock *const_predecessor_bb) {
- ir_assert(!old_bb->suspended, old_bb->instruction_list.at(0));
+ ir_assert(!old_bb->suspended, (old_bb->instruction_list.length != 0) ? old_bb->instruction_list.at(0) : nullptr);
ira->instruction_index = 0;
ira->old_irb.current_basic_block = old_bb;
ira->const_predecessor_bb = const_predecessor_bb;
@@ -11726,6 +10989,33 @@ static IrInstruction *ir_analyze_err_set_cast(IrAnalyze *ira, IrInstruction *sou
return result;
}
+static IrInstruction *ir_analyze_frame_ptr_to_anyframe(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, ZigType *wanted_type)
+{
+ if (instr_is_comptime(value)) {
+ zig_panic("TODO comptime frame pointer");
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+static IrInstruction *ir_analyze_anyframe_to_anyframe(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, ZigType *wanted_type)
+{
+ if (instr_is_comptime(value)) {
+ zig_panic("TODO comptime anyframe->T to anyframe");
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+
static IrInstruction *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value,
ZigType *wanted_type, ResultLoc *result_loc)
{
@@ -11935,7 +11225,6 @@ static IrInstruction *ir_analyze_enum_to_int(IrAnalyze *ira, IrInstruction *sour
if (enum_type->data.enumeration.layout == ContainerLayoutAuto &&
enum_type->data.enumeration.src_field_count == 1)
{
- assert(tag_type == ira->codegen->builtin_types.entry_num_lit_int);
IrInstruction *result = ir_const(ira, source_instr, tag_type);
init_const_bigint(&result->value, tag_type,
&enum_type->data.enumeration.fields[0].value);
@@ -12574,12 +11863,10 @@ static IrInstruction *ir_analyze_int_to_c_ptr(IrAnalyze *ira, IrInstruction *sou
static bool is_pointery_and_elem_is_not_pointery(ZigType *ty) {
if (ty->id == ZigTypeIdPointer) return ty->data.pointer.child_type->id != ZigTypeIdPointer;
if (ty->id == ZigTypeIdFn) return true;
- if (ty->id == ZigTypeIdPromise) return true;
if (ty->id == ZigTypeIdOptional) {
ZigType *ptr_ty = ty->data.maybe.child_type;
if (ptr_ty->id == ZigTypeIdPointer) return ptr_ty->data.pointer.child_type->id != ZigTypeIdPointer;
if (ptr_ty->id == ZigTypeIdFn) return true;
- if (ptr_ty->id == ZigTypeIdPromise) return true;
}
return false;
}
@@ -12827,6 +12114,30 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
+ // *@Frame(func) to anyframe->T or anyframe
+ if (actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ !actual_type->data.pointer.is_const &&
+ actual_type->data.pointer.child_type->id == ZigTypeIdFnFrame && wanted_type->id == ZigTypeIdAnyFrame)
+ {
+ bool ok = true;
+ if (wanted_type->data.any_frame.result_type != nullptr) {
+ ZigFn *fn = actual_type->data.pointer.child_type->data.frame.fn;
+ ZigType *fn_return_type = fn->type_entry->data.fn.fn_type_id.return_type;
+ if (wanted_type->data.any_frame.result_type != fn_return_type) {
+ ok = false;
+ }
+ }
+ if (ok) {
+ return ir_analyze_frame_ptr_to_anyframe(ira, source_instr, value, wanted_type);
+ }
+ }
+
+ // anyframe->T to anyframe
+ if (actual_type->id == ZigTypeIdAnyFrame && actual_type->data.any_frame.result_type != nullptr &&
+ wanted_type->id == ZigTypeIdAnyFrame && wanted_type->data.any_frame.result_type == nullptr)
+ {
+ return ir_analyze_anyframe_to_anyframe(ira, source_instr, value, wanted_type);
+ }
// cast from null literal to maybe type
if (wanted_type->id == ZigTypeIdOptional &&
@@ -13331,11 +12642,11 @@ static IrInstruction *ir_analyze_instruction_add_implicit_return_type(IrAnalyze
}
static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) {
- IrInstruction *value = instruction->value->child;
- if (type_is_invalid(value->value.type))
+ IrInstruction *operand = instruction->operand->child;
+ if (type_is_invalid(operand->value.type))
return ir_unreach_error(ira);
- if (!instr_is_comptime(value) && handle_is_ptr(ira->explicit_return_type)) {
+ if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) {
// result location mechanism took care of it.
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, nullptr);
@@ -13343,8 +12654,8 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio
return ir_finish_anal(ira, result);
}
- IrInstruction *casted_value = ir_implicit_cast(ira, value, ira->explicit_return_type);
- if (type_is_invalid(casted_value->value.type)) {
+ IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
+ if (type_is_invalid(casted_operand->value.type)) {
AstNode *source_node = ira->explicit_return_type_source_node;
if (source_node != nullptr) {
ErrorMsg *msg = ira->codegen->errors.last();
@@ -13354,15 +12665,16 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio
return ir_unreach_error(ira);
}
- if (casted_value->value.special == ConstValSpecialRuntime &&
- casted_value->value.type->id == ZigTypeIdPointer &&
- casted_value->value.data.rh_ptr == RuntimeHintPtrStack)
+ if (casted_operand->value.special == ConstValSpecialRuntime &&
+ casted_operand->value.type->id == ZigTypeIdPointer &&
+ casted_operand->value.data.rh_ptr == RuntimeHintPtrStack)
{
- ir_add_error(ira, casted_value, buf_sprintf("function returns address of local variable"));
+ ir_add_error(ira, casted_operand, buf_sprintf("function returns address of local variable"));
return ir_unreach_error(ira);
}
+
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, casted_value);
+ instruction->base.source_node, casted_operand);
result->value.type = ira->codegen->builtin_types.entry_unreachable;
return ir_finish_anal(ira, result);
}
@@ -13656,9 +12968,9 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
case ZigTypeIdOpaque:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdEnum:
case ZigTypeIdEnumLiteral:
+ case ZigTypeIdAnyFrame:
operator_allowed = is_equality_cmp;
break;
@@ -13673,6 +12985,7 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
case ZigTypeIdNull:
case ZigTypeIdErrorUnion:
case ZigTypeIdUnion:
+ case ZigTypeIdFnFrame:
operator_allowed = false;
break;
case ZigTypeIdOptional:
@@ -15037,7 +14350,8 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name)));
break;
@@ -15061,8 +14375,9 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdEnumLiteral:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name)));
break;
@@ -15089,8 +14404,8 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) {
static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
IrInstructionErrorReturnTrace *instruction)
{
+ ZigType *ptr_to_stack_trace_type = get_pointer_to_type(ira->codegen, get_stack_trace_type(ira->codegen), false);
if (instruction->optional == IrInstructionErrorReturnTrace::Null) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen);
ZigType *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type);
if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) {
IrInstruction *result = ir_const(ira, &instruction->base, optional_type);
@@ -15108,7 +14423,7 @@ static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
assert(ira->codegen->have_err_ret_tracing);
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, instruction->optional);
- new_instruction->value.type = get_ptr_to_stack_trace_type(ira->codegen);
+ new_instruction->value.type = ptr_to_stack_trace_type;
return new_instruction;
}
}
@@ -15140,42 +14455,6 @@ static IrInstruction *ir_analyze_instruction_error_union(IrAnalyze *ira,
return ir_const_type(ira, &instruction->base, result_type);
}
-IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr, ImplicitAllocatorId id) {
- ZigFn *parent_fn_entry = exec_fn_entry(ira->new_irb.exec);
- if (parent_fn_entry == nullptr) {
- ir_add_error(ira, source_instr, buf_sprintf("no implicit allocator available"));
- return ira->codegen->invalid_instruction;
- }
-
- FnTypeId *parent_fn_type = &parent_fn_entry->type_entry->data.fn.fn_type_id;
- if (parent_fn_type->cc != CallingConventionAsync) {
- ir_add_error(ira, source_instr, buf_sprintf("async function call from non-async caller requires allocator parameter"));
- return ira->codegen->invalid_instruction;
- }
-
- assert(parent_fn_type->async_allocator_type != nullptr);
-
- switch (id) {
- case ImplicitAllocatorIdArg:
- {
- IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope,
- source_instr->source_node, ImplicitAllocatorIdArg);
- result->value.type = parent_fn_type->async_allocator_type;
- return result;
- }
- case ImplicitAllocatorIdLocalVar:
- {
- ZigVar *coro_allocator_var = ira->old_irb.exec->coro_allocator_var;
- assert(coro_allocator_var != nullptr);
- IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var);
- IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst, nullptr);
- assert(result->value.type != nullptr);
- return result;
- }
- }
- zig_unreachable();
-}
-
static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_inst, ZigType *var_type,
uint32_t align, const char *name_hint, bool force_comptime)
{
@@ -15184,7 +14463,7 @@ static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_in
ConstExprValue *pointee = create_const_vals(1);
pointee->special = ConstValSpecialUndef;
- IrInstructionAllocaGen *result = ir_create_alloca_gen(ira, source_inst, align, name_hint);
+ IrInstructionAllocaGen *result = ir_build_alloca_gen(ira, source_inst, align, name_hint);
result->base.value.special = ConstValSpecialStatic;
result->base.value.data.x_ptr.special = ConstPtrSpecialRef;
result->base.value.data.x_ptr.mut = force_comptime ? ConstPtrMutComptimeVar : ConstPtrMutInfer;
@@ -15281,7 +14560,7 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
return nullptr;
}
// need to return a result location and don't have one. use a stack allocation
- IrInstructionAllocaGen *alloca_gen = ir_create_alloca_gen(ira, suspend_source_instr, 0, "");
+ IrInstructionAllocaGen *alloca_gen = ir_build_alloca_gen(ira, suspend_source_instr, 0, "");
if ((err = type_resolve(ira->codegen, value_type, ResolveStatusZeroBitsKnown)))
return ira->codegen->invalid_instruction;
alloca_gen->base.value.type = get_pointer_to_type_extra(ira->codegen, value_type, false, false,
@@ -15351,8 +14630,12 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
if ((err = type_resolve(ira->codegen, ira->explicit_return_type, ResolveStatusZeroBitsKnown))) {
return ira->codegen->invalid_instruction;
}
- if (!type_has_bits(ira->explicit_return_type) || !handle_is_ptr(ira->explicit_return_type))
- return nullptr;
+ if (!type_has_bits(ira->explicit_return_type) || !handle_is_ptr(ira->explicit_return_type)) {
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ if (fn_entry == nullptr || fn_entry->inferred_async_node == nullptr) {
+ return nullptr;
+ }
+ }
ZigType *ptr_return_type = get_pointer_to_type(ira->codegen, ira->explicit_return_type, false);
result_loc->written = true;
@@ -15614,48 +14897,43 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst
static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry,
ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count,
- IrInstruction *async_allocator_inst)
+ IrInstruction *casted_new_stack)
{
- Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME);
- ir_assert(async_allocator_inst->value.type->id == ZigTypeIdPointer, &call_instruction->base);
- ZigType *container_type = async_allocator_inst->value.type->data.pointer.child_type;
- IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, realloc_field_name, &call_instruction->base,
- async_allocator_inst, container_type, false);
- if (type_is_invalid(field_ptr_inst->value.type)) {
- return ira->codegen->invalid_instruction;
- }
- ZigType *ptr_to_realloc_fn_type = field_ptr_inst->value.type;
- ir_assert(ptr_to_realloc_fn_type->id == ZigTypeIdPointer, &call_instruction->base);
+ if (casted_new_stack != nullptr) {
+ // this is an @asyncCall
- ZigType *realloc_fn_type = ptr_to_realloc_fn_type->data.pointer.child_type;
- if (realloc_fn_type->id != ZigTypeIdFn) {
- ir_add_error(ira, &call_instruction->base,
- buf_sprintf("expected reallocation function, found '%s'", buf_ptr(&realloc_fn_type->name)));
+ if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) {
+ ir_add_error(ira, fn_ref,
+ buf_sprintf("expected async function, found '%s'", buf_ptr(&fn_type->name)));
+ return ira->codegen->invalid_instruction;
+ }
+
+ IrInstruction *ret_ptr = call_instruction->args[call_instruction->arg_count]->child;
+ if (type_is_invalid(ret_ptr->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_type->data.fn.fn_type_id.return_type);
+
+ IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, nullptr, fn_ref,
+ arg_count, casted_args, FnInlineAuto, true, casted_new_stack, ret_ptr, anyframe_type);
+ return &call_gen->base;
+ } else if (fn_entry == nullptr) {
+ ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required"));
return ira->codegen->invalid_instruction;
}
- ZigType *realloc_fn_return_type = realloc_fn_type->data.fn.fn_type_id.return_type;
- if (realloc_fn_return_type->id != ZigTypeIdErrorUnion) {
- ir_add_error(ira, fn_ref,
- buf_sprintf("expected allocation function to return error union, but it returns '%s'", buf_ptr(&realloc_fn_return_type->name)));
- return ira->codegen->invalid_instruction;
- }
- ZigType *alloc_fn_error_set_type = realloc_fn_return_type->data.error_union.err_set_type;
- ZigType *return_type = fn_type->data.fn.fn_type_id.return_type;
- ZigType *promise_type = get_promise_type(ira->codegen, return_type);
- ZigType *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type);
-
- IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, no_result_loc(),
- async_return_type, nullptr, true, true, false);
+ ZigType *frame_type = get_fn_frame_type(ira->codegen, fn_entry);
+ IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
+ frame_type, nullptr, true, true, false);
if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
return result_loc;
}
-
- return ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
- casted_args, FnInlineAuto, true, async_allocator_inst, nullptr, result_loc,
- async_return_type);
+ result_loc = ir_implicit_cast(ira, result_loc, get_pointer_to_type(ira->codegen, frame_type, false));
+ if (type_is_invalid(result_loc->value.type))
+ return ira->codegen->invalid_instruction;
+ return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
+ casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type)->base;
}
-
static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node,
IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i)
{
@@ -16002,20 +15280,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
}
return ira->codegen->invalid_instruction;
}
- if (fn_type_id->cc == CallingConventionAsync && !call_instruction->is_async) {
- ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("must use async keyword to call async function"));
- if (fn_proto_node) {
- add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here"));
- }
- return ira->codegen->invalid_instruction;
- }
- if (fn_type_id->cc != CallingConventionAsync && call_instruction->is_async) {
- ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("cannot use async keyword to call non-async function"));
- if (fn_proto_node) {
- add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here"));
- }
- return ira->codegen->invalid_instruction;
- }
if (fn_type_id->is_var_args) {
@@ -16352,33 +15616,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
break;
}
}
- IrInstruction *async_allocator_inst = nullptr;
- if (call_instruction->is_async) {
- AstNode *async_allocator_type_node = fn_proto_node->data.fn_proto.async_allocator_type;
- if (async_allocator_type_node != nullptr) {
- ZigType *async_allocator_type = ir_analyze_type_expr(ira, impl_fn->child_scope, async_allocator_type_node);
- if (type_is_invalid(async_allocator_type))
- return ira->codegen->invalid_instruction;
- inst_fn_type_id.async_allocator_type = async_allocator_type;
- }
- IrInstruction *uncasted_async_allocator_inst;
- if (call_instruction->async_allocator == nullptr) {
- uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base,
- ImplicitAllocatorIdLocalVar);
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- } else {
- uncasted_async_allocator_inst = call_instruction->async_allocator->child;
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- }
- if (inst_fn_type_id.async_allocator_type == nullptr) {
- inst_fn_type_id.async_allocator_type = uncasted_async_allocator_inst->value.type;
- }
- async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, inst_fn_type_id.async_allocator_type);
- if (type_is_invalid(async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- }
auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn);
if (existing_entry) {
@@ -16405,10 +15642,14 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (handle_is_ptr(impl_fn_type_id->return_type)) {
result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
impl_fn_type_id->return_type, nullptr, true, true, false);
- if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) ||
- instr_is_unreachable(result_loc)))
- {
- return result_loc;
+ if (result_loc != nullptr) {
+ if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
+ return result_loc;
+ }
+ if (!handle_is_ptr(result_loc->value.type->data.pointer.child_type)) {
+ ir_reset_result(call_instruction->result_loc);
+ result_loc = nullptr;
+ }
}
} else {
result_loc = nullptr;
@@ -16421,17 +15662,23 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
size_t impl_param_count = impl_fn_type_id->param_count;
if (call_instruction->is_async) {
IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry,
- fn_ref, casted_args, impl_param_count, async_allocator_inst);
+ nullptr, casted_args, impl_param_count, casted_new_stack);
return ir_finish_anal(ira, result);
}
- assert(async_allocator_inst == nullptr);
- IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
+ if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
+ parent_fn_entry->inferred_async_node = fn_ref->source_node;
+ parent_fn_entry->inferred_async_fn = impl_fn;
+ }
+
+ IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
impl_fn, nullptr, impl_param_count, casted_args, fn_inline,
- call_instruction->is_async, nullptr, casted_new_stack, result_loc,
+ false, casted_new_stack, result_loc,
impl_fn_type_id->return_type);
- return ir_finish_anal(ira, new_call_instruction);
+ parent_fn_entry->call_list.append(new_call_instruction);
+
+ return ir_finish_anal(ira, &new_call_instruction->base);
}
ZigFn *parent_fn_entry = exec_fn_entry(ira->new_irb.exec);
@@ -16473,20 +15720,56 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstruction *old_arg = call_instruction->args[call_i]->child;
if (type_is_invalid(old_arg->value.type))
return ira->codegen->invalid_instruction;
- IrInstruction *casted_arg;
- if (next_arg_index < src_param_count) {
- ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
- if (type_is_invalid(param_type))
- return ira->codegen->invalid_instruction;
- casted_arg = ir_implicit_cast(ira, old_arg, param_type);
- if (type_is_invalid(casted_arg->value.type))
- return ira->codegen->invalid_instruction;
- } else {
- casted_arg = old_arg;
- }
- casted_args[next_arg_index] = casted_arg;
- next_arg_index += 1;
+ if (old_arg->value.type->id == ZigTypeIdArgTuple) {
+ for (size_t arg_tuple_i = old_arg->value.data.x_arg_tuple.start_index;
+ arg_tuple_i < old_arg->value.data.x_arg_tuple.end_index; arg_tuple_i += 1)
+ {
+ ZigVar *arg_var = get_fn_var_by_index(parent_fn_entry, arg_tuple_i);
+ if (arg_var == nullptr) {
+ ir_add_error(ira, old_arg,
+ buf_sprintf("compiler bug: var args can't handle void. https://github.com/ziglang/zig/issues/557"));
+ return ira->codegen->invalid_instruction;
+ }
+ IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, old_arg, arg_var);
+ if (type_is_invalid(arg_var_ptr_inst->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *arg_tuple_arg = ir_get_deref(ira, old_arg, arg_var_ptr_inst, nullptr);
+ if (type_is_invalid(arg_tuple_arg->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *casted_arg;
+ if (next_arg_index < src_param_count) {
+ ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
+ if (type_is_invalid(param_type))
+ return ira->codegen->invalid_instruction;
+ casted_arg = ir_implicit_cast(ira, arg_tuple_arg, param_type);
+ if (type_is_invalid(casted_arg->value.type))
+ return ira->codegen->invalid_instruction;
+ } else {
+ casted_arg = arg_tuple_arg;
+ }
+
+ casted_args[next_arg_index] = casted_arg;
+ next_arg_index += 1;
+ }
+ } else {
+ IrInstruction *casted_arg;
+ if (next_arg_index < src_param_count) {
+ ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
+ if (type_is_invalid(param_type))
+ return ira->codegen->invalid_instruction;
+ casted_arg = ir_implicit_cast(ira, old_arg, param_type);
+ if (type_is_invalid(casted_arg->value.type))
+ return ira->codegen->invalid_instruction;
+ } else {
+ casted_arg = old_arg;
+ }
+
+ casted_args[next_arg_index] = casted_arg;
+ next_arg_index += 1;
+ }
}
assert(next_arg_index == call_param_count);
@@ -16495,49 +15778,45 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (type_is_invalid(return_type))
return ira->codegen->invalid_instruction;
- if (call_instruction->is_async) {
- IrInstruction *uncasted_async_allocator_inst;
- if (call_instruction->async_allocator == nullptr) {
- uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base,
- ImplicitAllocatorIdLocalVar);
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- } else {
- uncasted_async_allocator_inst = call_instruction->async_allocator->child;
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
-
- }
- IrInstruction *async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, fn_type_id->async_allocator_type);
- if (type_is_invalid(async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref,
- casted_args, call_param_count, async_allocator_inst);
- return ir_finish_anal(ira, result);
- }
-
if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && fn_inline == FnInlineNever) {
ir_add_error(ira, &call_instruction->base,
buf_sprintf("no-inline call of inline function"));
return ira->codegen->invalid_instruction;
}
+ if (call_instruction->is_async) {
+ IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref,
+ casted_args, call_param_count, casted_new_stack);
+ return ir_finish_anal(ira, result);
+ }
+
+ if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
+ parent_fn_entry->inferred_async_node = fn_ref->source_node;
+ parent_fn_entry->inferred_async_fn = fn_entry;
+ }
+
IrInstruction *result_loc;
if (handle_is_ptr(return_type)) {
result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
return_type, nullptr, true, true, false);
- if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) {
- return result_loc;
+ if (result_loc != nullptr) {
+ if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
+ return result_loc;
+ }
+ if (!handle_is_ptr(result_loc->value.type->data.pointer.child_type)) {
+ ir_reset_result(call_instruction->result_loc);
+ result_loc = nullptr;
+ }
}
} else {
result_loc = nullptr;
}
- IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
- call_param_count, casted_args, fn_inline, false, nullptr, casted_new_stack,
+ IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
+ call_param_count, casted_args, fn_inline, false, casted_new_stack,
result_loc, return_type);
- return ir_finish_anal(ira, new_call_instruction);
+ parent_fn_entry->call_list.append(new_call_instruction);
+ return ir_finish_anal(ira, &new_call_instruction->base);
}
static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction) {
@@ -16682,7 +15961,7 @@ static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source
zig_unreachable();
}
-static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_instruction) {
+static IrInstruction *ir_analyze_optional_type(IrAnalyze *ira, IrInstructionUnOp *un_op_instruction) {
Error err;
IrInstruction *value = un_op_instruction->value->child;
ZigType *type_entry = ir_resolve_type(ira, value);
@@ -16716,8 +15995,10 @@ static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry));
+
case ZigTypeIdUnreachable:
case ZigTypeIdOpaque:
ir_add_error_node(ira, un_op_instruction->base.source_node,
@@ -16881,7 +16162,7 @@ static IrInstruction *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstruction
return result;
}
case IrUnOpOptional:
- return ir_analyze_maybe(ira, instruction);
+ return ir_analyze_optional_type(ira, instruction);
}
zig_unreachable();
}
@@ -18441,6 +17722,20 @@ static IrInstruction *ir_analyze_instruction_set_float_mode(IrAnalyze *ira,
return ir_const_void(ira, &instruction->base);
}
+static IrInstruction *ir_analyze_instruction_any_frame_type(IrAnalyze *ira,
+ IrInstructionAnyFrameType *instruction)
+{
+ ZigType *payload_type = nullptr;
+ if (instruction->payload_type != nullptr) {
+ payload_type = ir_resolve_type(ira, instruction->payload_type->child);
+ if (type_is_invalid(payload_type))
+ return ira->codegen->invalid_instruction;
+ }
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, payload_type);
+ return ir_const_type(ira, &instruction->base, any_frame_type);
+}
+
static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
IrInstructionSliceType *slice_type_instruction)
{
@@ -18488,8 +17783,9 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
{
ResolveStatus needed_status = (align_bytes == 0) ?
ResolveStatusZeroBitsKnown : ResolveStatusAlignmentKnown;
@@ -18603,8 +17899,9 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
{
if ((err = ensure_complete_type(ira->codegen, child_type)))
return ira->codegen->invalid_instruction;
@@ -18615,22 +17912,6 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
zig_unreachable();
}
-static IrInstruction *ir_analyze_instruction_promise_type(IrAnalyze *ira, IrInstructionPromiseType *instruction) {
- ZigType *promise_type;
-
- if (instruction->payload_type == nullptr) {
- promise_type = ira->codegen->builtin_types.entry_promise;
- } else {
- ZigType *payload_type = ir_resolve_type(ira, instruction->payload_type->child);
- if (type_is_invalid(payload_type))
- return ira->codegen->invalid_instruction;
-
- promise_type = get_promise_type(ira->codegen, payload_type);
- }
-
- return ir_const_type(ira, &instruction->base, promise_type);
-}
-
static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
IrInstructionSizeOf *size_of_instruction)
{
@@ -18670,8 +17951,9 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
{
uint64_t size_in_bytes = type_size(ira->codegen, type_entry);
return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes);
@@ -19157,7 +18439,6 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case ZigTypeIdComptimeInt:
case ZigTypeIdEnumLiteral:
case ZigTypeIdPointer:
- case ZigTypeIdPromise:
case ZigTypeIdFn:
case ZigTypeIdErrorSet: {
if (pointee_val) {
@@ -19236,6 +18517,8 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, &switch_target_instruction->base,
buf_sprintf("invalid switch target type '%s'", buf_ptr(&target_type->name)));
return ira->codegen->invalid_instruction;
@@ -20670,32 +19953,22 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
break;
}
- case ZigTypeIdPromise:
- {
- result = create_const_vals(1);
- result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Promise", nullptr);
+ case ZigTypeIdAnyFrame: {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "AnyFrame", nullptr);
- ConstExprValue *fields = create_const_vals(1);
- result->data.x_struct.fields = fields;
+ ConstExprValue *fields = create_const_vals(1);
+ result->data.x_struct.fields = fields;
- // child: ?type
- ensure_field_index(result->type, "child", 0);
- fields[0].special = ConstValSpecialStatic;
- fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
-
- if (type_entry->data.promise.result_type == nullptr)
- fields[0].data.x_optional = nullptr;
- else {
- ConstExprValue *child_type = create_const_vals(1);
- child_type->special = ConstValSpecialStatic;
- child_type->type = ira->codegen->builtin_types.entry_type;
- child_type->data.x_type = type_entry->data.promise.result_type;
- fields[0].data.x_optional = child_type;
- }
-
- break;
- }
+ // child: ?type
+ ensure_field_index(result->type, "child", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+ fields[0].data.x_optional = (type_entry->data.any_frame.result_type == nullptr) ? nullptr :
+ create_const_type(ira->codegen, type_entry->data.any_frame.result_type);
+ break;
+ }
case ZigTypeIdEnum:
{
result = create_const_vals(1);
@@ -21005,7 +20278,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Fn", nullptr);
- ConstExprValue *fields = create_const_vals(6);
+ ConstExprValue *fields = create_const_vals(5);
result->data.x_struct.fields = fields;
// calling_convention: TypeInfo.CallingConvention
@@ -21038,19 +20311,6 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
return_type->data.x_type = type_entry->data.fn.fn_type_id.return_type;
fields[3].data.x_optional = return_type;
}
- // async_allocator_type: type
- ensure_field_index(result->type, "async_allocator_type", 4);
- fields[4].special = ConstValSpecialStatic;
- fields[4].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
- if (type_entry->data.fn.fn_type_id.async_allocator_type == nullptr)
- fields[4].data.x_optional = nullptr;
- else {
- ConstExprValue *async_alloc_type = create_const_vals(1);
- async_alloc_type->special = ConstValSpecialStatic;
- async_alloc_type->type = ira->codegen->builtin_types.entry_type;
- async_alloc_type->data.x_type = type_entry->data.fn.fn_type_id.async_allocator_type;
- fields[4].data.x_optional = async_alloc_type;
- }
// args: []TypeInfo.FnArg
ZigType *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg", nullptr);
if ((err = type_resolve(ira->codegen, type_info_fn_arg_type, ResolveStatusSizeKnown))) {
@@ -21065,10 +20325,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
fn_arg_array->data.x_array.special = ConstArraySpecialNone;
fn_arg_array->data.x_array.data.s_none.elements = create_const_vals(fn_arg_count);
- init_const_slice(ira->codegen, &fields[5], fn_arg_array, 0, fn_arg_count, false);
+ init_const_slice(ira->codegen, &fields[4], fn_arg_array, 0, fn_arg_count, false);
- for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++)
- {
+ for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) {
FnTypeParamInfo *fn_param_info = &type_entry->data.fn.fn_type_id.param_info[fn_arg_index];
ConstExprValue *fn_arg_val = &fn_arg_array->data.x_array.data.s_none.elements[fn_arg_index];
@@ -21115,6 +20374,8 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
break;
}
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO @typeInfo for async function frames");
}
assert(result != nullptr);
@@ -21312,12 +20573,15 @@ static IrInstruction *ir_analyze_instruction_c_import(IrAnalyze *ira, IrInstruct
}
for (size_t i = 0; i < errors_len; i += 1) {
Stage2ErrorMsg *clang_err = &errors_ptr[i];
- ErrorMsg *err_msg = err_msg_create_with_offset(
- clang_err->filename_ptr ?
- buf_create_from_mem(clang_err->filename_ptr, clang_err->filename_len) : buf_alloc(),
- clang_err->line, clang_err->column, clang_err->offset, clang_err->source,
- buf_create_from_mem(clang_err->msg_ptr, clang_err->msg_len));
- err_msg_add_note(parent_err_msg, err_msg);
+ // Clang can emit "too many errors, stopping now", in which case `source` and `filename_ptr` are null
+ if (clang_err->source && clang_err->filename_ptr) {
+ ErrorMsg *err_msg = err_msg_create_with_offset(
+ clang_err->filename_ptr ?
+ buf_create_from_mem(clang_err->filename_ptr, clang_err->filename_len) : buf_alloc(),
+ clang_err->line, clang_err->column, clang_err->offset, clang_err->source,
+ buf_create_from_mem(clang_err->msg_ptr, clang_err->msg_len));
+ err_msg_add_note(parent_err_msg, err_msg);
+ }
}
return ira->codegen->invalid_instruction;
@@ -22825,11 +22089,51 @@ static IrInstruction *ir_analyze_instruction_frame_address(IrAnalyze *ira, IrIns
return result;
}
-static IrInstruction *ir_analyze_instruction_handle(IrAnalyze *ira, IrInstructionHandle *instruction) {
+static IrInstruction *ir_analyze_instruction_frame_handle(IrAnalyze *ira, IrInstructionFrameHandle *instruction) {
+ ZigFn *fn = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn != nullptr, &instruction->base);
+
+ if (fn->inferred_async_node == nullptr) {
+ fn->inferred_async_node = instruction->base.source_node;
+ }
+
+ ZigType *frame_type = get_fn_frame_type(ira->codegen, fn);
+ ZigType *ptr_frame_type = get_pointer_to_type(ira->codegen, frame_type, false);
+
IrInstruction *result = ir_build_handle(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- assert(fn_entry != nullptr);
- result->value.type = get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type);
+ result->value.type = ptr_frame_type;
+ return result;
+}
+
+static IrInstruction *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstructionFrameType *instruction) {
+ ZigFn *fn = ir_resolve_fn(ira, instruction->fn->child);
+ if (fn == nullptr)
+ return ira->codegen->invalid_instruction;
+
+ if (fn->type_entry->data.fn.is_generic) {
+ ir_add_error(ira, &instruction->base,
+ buf_sprintf("@Frame() of generic function"));
+ return ira->codegen->invalid_instruction;
+ }
+
+ ZigType *ty = get_fn_frame_type(ira->codegen, fn);
+ return ir_const_type(ira, &instruction->base, ty);
+}
+
+static IrInstruction *ir_analyze_instruction_frame_size(IrAnalyze *ira, IrInstructionFrameSizeSrc *instruction) {
+ IrInstruction *fn = instruction->fn->child;
+ if (type_is_invalid(fn->value.type))
+ return ira->codegen->invalid_instruction;
+
+ if (fn->value.type->id != ZigTypeIdFn) {
+ ir_add_error(ira, fn,
+ buf_sprintf("expected function, found '%s'", buf_ptr(&fn->value.type->name)));
+ return ira->codegen->invalid_instruction;
+ }
+
+ IrInstruction *result = ir_build_frame_size_gen(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, fn);
+ result->value.type = ira->codegen->builtin_types.entry_usize;
return result;
}
@@ -22864,7 +22168,6 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct
case ZigTypeIdInt:
case ZigTypeIdFloat:
case ZigTypeIdPointer:
- case ZigTypeIdPromise:
case ZigTypeIdArray:
case ZigTypeIdStruct:
case ZigTypeIdOptional:
@@ -22874,6 +22177,8 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
{
uint64_t align_in_bytes = get_abi_alignment(ira->codegen, type_entry);
return ir_const_unsigned(ira, &instruction->base, align_in_bytes);
@@ -22988,19 +22293,6 @@ static IrInstruction *ir_analyze_instruction_overflow_op(IrAnalyze *ira, IrInstr
return result;
}
-static IrInstruction *ir_analyze_instruction_result_ptr(IrAnalyze *ira, IrInstructionResultPtr *instruction) {
- IrInstruction *result = instruction->result->child;
- if (type_is_invalid(result->value.type))
- return result;
-
- if (instruction->result_loc->written && instruction->result_loc->resolved_loc != nullptr &&
- !instr_is_comptime(result))
- {
- return instruction->result_loc->resolved_loc;
- }
- return ir_get_ref(ira, &instruction->base, result, true, false);
-}
-
static void ir_eval_mul_add(IrAnalyze *ira, IrInstructionMulAdd *source_instr, ZigType *float_type,
ConstExprValue *op1, ConstExprValue *op2, ConstExprValue *op3, ConstExprValue *out_val) {
if (float_type->id == ZigTypeIdComptimeFloat) {
@@ -23125,11 +22417,16 @@ static IrInstruction *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruct
if (type_is_invalid(base_ptr->value.type))
return ira->codegen->invalid_instruction;
- IrInstruction *value = ir_get_deref(ira, &instruction->base, base_ptr, nullptr);
+ IrInstruction *value;
+ if (instruction->base_ptr_is_payload) {
+ value = base_ptr;
+ } else {
+ value = ir_get_deref(ira, &instruction->base, base_ptr, nullptr);
+ }
+
ZigType *type_entry = value->value.type;
if (type_is_invalid(type_entry))
return ira->codegen->invalid_instruction;
-
if (type_entry->id == ZigTypeIdErrorUnion) {
if (instr_is_comptime(value)) {
ConstExprValue *err_union_val = ir_resolve_const(ira, value, UndefBad);
@@ -23423,18 +22720,6 @@ static IrInstruction *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstruct
return ira->codegen->invalid_instruction;
}
- if (fn_type_id.cc == CallingConventionAsync) {
- if (instruction->async_allocator_type_value == nullptr) {
- ir_add_error(ira, &instruction->base,
- buf_sprintf("async fn proto missing allocator type"));
- return ira->codegen->invalid_instruction;
- }
- IrInstruction *async_allocator_type_value = instruction->async_allocator_type_value->child;
- fn_type_id.async_allocator_type = ir_resolve_type(ira, async_allocator_type_value);
- if (type_is_invalid(fn_type_id.async_allocator_type))
- return ira->codegen->invalid_instruction;
- }
-
return ir_const_type(ira, &instruction->base, get_fn_type(ira->codegen, &fn_type_id));
}
@@ -23673,7 +22958,7 @@ static IrInstruction *ir_analyze_instruction_check_statement_is_void(IrAnalyze *
if (type_is_invalid(statement_type))
return ira->codegen->invalid_instruction;
- if (statement_type->id != ZigTypeIdVoid) {
+ if (statement_type->id != ZigTypeIdVoid && statement_type->id != ZigTypeIdUnreachable) {
ir_add_error(ira, &instruction->base, buf_sprintf("expression value is ignored"));
}
@@ -23928,7 +23213,6 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
case ZigTypeIdEnumLiteral:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
- case ZigTypeIdPromise:
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
zig_unreachable();
@@ -24038,6 +23322,10 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
zig_panic("TODO buf_write_value_bytes fn type");
case ZigTypeIdUnion:
zig_panic("TODO buf_write_value_bytes union type");
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO buf_write_value_bytes async fn frame type");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO buf_write_value_bytes anyframe type");
}
zig_unreachable();
}
@@ -24082,7 +23370,6 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
case ZigTypeIdEnumLiteral:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
- case ZigTypeIdPromise:
zig_unreachable();
case ZigTypeIdVoid:
return ErrorNone;
@@ -24218,6 +23505,10 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
zig_panic("TODO buf_read_value_bytes fn type");
case ZigTypeIdUnion:
zig_panic("TODO buf_read_value_bytes union type");
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO buf_read_value_bytes async fn frame type");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO buf_read_value_bytes anyframe type");
}
zig_unreachable();
}
@@ -24568,184 +23859,6 @@ static IrInstruction *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstruct
}
}
-static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
- IrInstruction *target_inst = instruction->target->child;
- if (type_is_invalid(target_inst->value.type))
- return ira->codegen->invalid_instruction;
- IrInstruction *casted_target = ir_implicit_cast(ira, target_inst, ira->codegen->builtin_types.entry_promise);
- if (type_is_invalid(casted_target->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_target);
- result->value.type = ira->codegen->builtin_types.entry_void;
- result->value.special = ConstValSpecialStatic;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_id(IrAnalyze *ira, IrInstructionCoroId *instruction) {
- IrInstruction *promise_ptr = instruction->promise_ptr->child;
- if (type_is_invalid(promise_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_id(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- promise_ptr);
- result->value.type = ira->codegen->builtin_types.entry_usize;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_alloc(IrAnalyze *ira, IrInstructionCoroAlloc *instruction) {
- IrInstruction *coro_id = instruction->coro_id->child;
- if (type_is_invalid(coro_id->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_alloc(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- coro_id);
- result->value.type = ira->codegen->builtin_types.entry_bool;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_size(IrAnalyze *ira, IrInstructionCoroSize *instruction) {
- IrInstruction *result = ir_build_coro_size(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
- result->value.type = ira->codegen->builtin_types.entry_usize;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstructionCoroBegin *instruction) {
- IrInstruction *coro_id = instruction->coro_id->child;
- if (type_is_invalid(coro_id->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *coro_mem_ptr = instruction->coro_mem_ptr->child;
- if (type_is_invalid(coro_mem_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- ir_assert(fn_entry != nullptr, &instruction->base);
- IrInstruction *result = ir_build_coro_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- coro_id, coro_mem_ptr);
- result->value.type = get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_get_implicit_allocator(IrAnalyze *ira, IrInstructionGetImplicitAllocator *instruction) {
- return ir_get_implicit_allocator(ira, &instruction->base, instruction->id);
-}
-
-static IrInstruction *ir_analyze_instruction_coro_alloc_fail(IrAnalyze *ira, IrInstructionCoroAllocFail *instruction) {
- IrInstruction *err_val = instruction->err_val->child;
- if (type_is_invalid(err_val->value.type))
- return ir_unreach_error(ira);
-
- IrInstruction *result = ir_build_coro_alloc_fail(&ira->new_irb, instruction->base.scope, instruction->base.source_node, err_val);
- result->value.type = ira->codegen->builtin_types.entry_unreachable;
- return ir_finish_anal(ira, result);
-}
-
-static IrInstruction *ir_analyze_instruction_coro_suspend(IrAnalyze *ira, IrInstructionCoroSuspend *instruction) {
- IrInstruction *save_point = nullptr;
- if (instruction->save_point != nullptr) {
- save_point = instruction->save_point->child;
- if (type_is_invalid(save_point->value.type))
- return ira->codegen->invalid_instruction;
- }
-
- IrInstruction *is_final = instruction->is_final->child;
- if (type_is_invalid(is_final->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_suspend(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, save_point, is_final);
- result->value.type = ira->codegen->builtin_types.entry_u8;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_end(IrAnalyze *ira, IrInstructionCoroEnd *instruction) {
- IrInstruction *result = ir_build_coro_end(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_free(IrAnalyze *ira, IrInstructionCoroFree *instruction) {
- IrInstruction *coro_id = instruction->coro_id->child;
- if (type_is_invalid(coro_id->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *coro_handle = instruction->coro_handle->child;
- if (type_is_invalid(coro_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_free(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_id, coro_handle);
- ZigType *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_optional_type(ira->codegen, ptr_type);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) {
- IrInstruction *awaiter_handle = instruction->awaiter_handle->child;
- if (type_is_invalid(awaiter_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *casted_target = ir_implicit_cast(ira, awaiter_handle, ira->codegen->builtin_types.entry_promise);
- if (type_is_invalid(casted_target->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_resume(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, casted_target);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_save(IrAnalyze *ira, IrInstructionCoroSave *instruction) {
- IrInstruction *coro_handle = instruction->coro_handle->child;
- if (type_is_invalid(coro_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_save(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_handle);
- result->value.type = ira->codegen->builtin_types.entry_usize;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_promise(IrAnalyze *ira, IrInstructionCoroPromise *instruction) {
- IrInstruction *coro_handle = instruction->coro_handle->child;
- if (type_is_invalid(coro_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- if (coro_handle->value.type->id != ZigTypeIdPromise ||
- coro_handle->value.type->data.promise.result_type == nullptr)
- {
- ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'",
- buf_ptr(&coro_handle->value.type->name)));
- return ira->codegen->invalid_instruction;
- }
-
- ZigType *coro_frame_type = get_promise_frame_type(ira->codegen,
- coro_handle->value.type->data.promise.result_type);
-
- IrInstruction *result = ir_build_coro_promise(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_handle);
- result->value.type = get_pointer_to_type(ira->codegen, coro_frame_type, false);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, IrInstructionCoroAllocHelper *instruction) {
- IrInstruction *realloc_fn = instruction->realloc_fn->child;
- if (type_is_invalid(realloc_fn->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *coro_size = instruction->coro_size->child;
- if (type_is_invalid(coro_size->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_alloc_helper(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, realloc_fn, coro_size);
- ZigType *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_optional_type(ira->codegen, u8_ptr_type);
- return result;
-}
-
static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op) {
ZigType *operand_type = ir_resolve_type(ira, op);
if (type_is_invalid(operand_type))
@@ -24877,65 +23990,6 @@ static IrInstruction *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstr
return result;
}
-static IrInstruction *ir_analyze_instruction_promise_result_type(IrAnalyze *ira, IrInstructionPromiseResultType *instruction) {
- ZigType *promise_type = ir_resolve_type(ira, instruction->promise_type->child);
- if (type_is_invalid(promise_type))
- return ira->codegen->invalid_instruction;
-
- if (promise_type->id != ZigTypeIdPromise || promise_type->data.promise.result_type == nullptr) {
- ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'",
- buf_ptr(&promise_type->name)));
- return ira->codegen->invalid_instruction;
- }
-
- return ir_const_type(ira, &instruction->base, promise_type->data.promise.result_type);
-}
-
-static IrInstruction *ir_analyze_instruction_await_bookkeeping(IrAnalyze *ira, IrInstructionAwaitBookkeeping *instruction) {
- ZigType *promise_result_type = ir_resolve_type(ira, instruction->promise_result_type->child);
- if (type_is_invalid(promise_result_type))
- return ira->codegen->invalid_instruction;
-
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- ir_assert(fn_entry != nullptr, &instruction->base);
-
- if (type_can_fail(promise_result_type)) {
- fn_entry->calls_or_awaits_errorable_fn = true;
- }
-
- return ir_const_void(ira, &instruction->base);
-}
-
-static IrInstruction *ir_analyze_instruction_merge_err_ret_traces(IrAnalyze *ira,
- IrInstructionMergeErrRetTraces *instruction)
-{
- IrInstruction *coro_promise_ptr = instruction->coro_promise_ptr->child;
- if (type_is_invalid(coro_promise_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- ir_assert(coro_promise_ptr->value.type->id == ZigTypeIdPointer, &instruction->base);
- ZigType *promise_frame_type = coro_promise_ptr->value.type->data.pointer.child_type;
- ir_assert(promise_frame_type->id == ZigTypeIdStruct, &instruction->base);
- ZigType *promise_result_type = promise_frame_type->data.structure.fields[1].type_entry;
-
- if (!type_can_fail(promise_result_type)) {
- return ir_const_void(ira, &instruction->base);
- }
-
- IrInstruction *src_err_ret_trace_ptr = instruction->src_err_ret_trace_ptr->child;
- if (type_is_invalid(src_err_ret_trace_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *dest_err_ret_trace_ptr = instruction->dest_err_ret_trace_ptr->child;
- if (type_is_invalid(dest_err_ret_trace_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_merge_err_ret_traces(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstructionSaveErrRetAddr *instruction) {
IrInstruction *result = ir_build_save_err_ret_addr(&ira->new_irb, instruction->base.scope,
instruction->base.source_node);
@@ -24943,17 +23997,6 @@ static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, I
return result;
}
-static IrInstruction *ir_analyze_instruction_mark_err_ret_trace_ptr(IrAnalyze *ira, IrInstructionMarkErrRetTracePtr *instruction) {
- IrInstruction *err_ret_trace_ptr = instruction->err_ret_trace_ptr->child;
- if (type_is_invalid(err_ret_trace_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_mark_err_ret_trace_ptr(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, err_ret_trace_ptr);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
static void ir_eval_float_op(IrAnalyze *ira, IrInstructionFloatOp *source_instr, ZigType *float_type,
ConstExprValue *op, ConstExprValue *out_val) {
assert(ira && source_instr && float_type && out_val && op);
@@ -25480,6 +24523,162 @@ static IrInstruction *ir_analyze_instruction_union_init_named_field(IrAnalyze *i
union_type, field_name, field_result_loc, result_loc);
}
+static IrInstruction *ir_analyze_instruction_suspend_begin(IrAnalyze *ira, IrInstructionSuspendBegin *instruction) {
+ IrInstructionSuspendBegin *result = ir_build_suspend_begin(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node);
+ return &result->base;
+}
+
+static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira,
+ IrInstructionSuspendFinish *instruction)
+{
+ IrInstruction *begin_base = instruction->begin->base.child;
+ if (type_is_invalid(begin_base->value.type))
+ return ira->codegen->invalid_instruction;
+ ir_assert(begin_base->id == IrInstructionIdSuspendBegin, &instruction->base);
+ IrInstructionSuspendBegin *begin = reinterpret_cast(begin_base);
+
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn_entry != nullptr, &instruction->base);
+
+ if (fn_entry->inferred_async_node == nullptr) {
+ fn_entry->inferred_async_node = instruction->base.source_node;
+ }
+
+ return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin);
+}
+
+static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *frame_ptr)
+{
+ if (type_is_invalid(frame_ptr->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigType *result_type;
+ IrInstruction *frame;
+ if (frame_ptr->value.type->id == ZigTypeIdPointer &&
+ frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
+ {
+ result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
+ frame = frame_ptr;
+ } else {
+ frame = ir_get_deref(ira, source_instr, frame_ptr, nullptr);
+ if (frame->value.type->id == ZigTypeIdPointer &&
+ frame->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
+ {
+ result_type = frame->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
+ } else if (frame->value.type->id != ZigTypeIdAnyFrame ||
+ frame->value.type->data.any_frame.result_type == nullptr)
+ {
+ ir_add_error(ira, source_instr,
+ buf_sprintf("expected anyframe->T, found '%s'", buf_ptr(&frame->value.type->name)));
+ return ira->codegen->invalid_instruction;
+ } else {
+ result_type = frame->value.type->data.any_frame.result_type;
+ }
+ }
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, result_type);
+ IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
+ if (type_is_invalid(casted_frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ return casted_frame;
+}
+
+static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) {
+ IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
+ if (type_is_invalid(frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigType *result_type = frame->value.type->data.any_frame.result_type;
+
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn_entry != nullptr, &instruction->base);
+
+ if (fn_entry->inferred_async_node == nullptr) {
+ fn_entry->inferred_async_node = instruction->base.source_node;
+ }
+
+ if (type_can_fail(result_type)) {
+ fn_entry->calls_or_awaits_errorable_fn = true;
+ }
+
+ IrInstruction *result_loc;
+ if (type_has_bits(result_type)) {
+ result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
+ result_type, nullptr, true, true, true);
+ if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)))
+ return result_loc;
+ } else {
+ result_loc = nullptr;
+ }
+
+ IrInstruction *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc);
+ return ir_finish_anal(ira, result);
+}
+
+static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructionResume *instruction) {
+ IrInstruction *frame_ptr = instruction->frame->child;
+ if (type_is_invalid(frame_ptr->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *frame;
+ if (frame_ptr->value.type->id == ZigTypeIdPointer &&
+ frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
+ {
+ frame = frame_ptr;
+ } else {
+ frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
+ }
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr);
+ IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
+ if (type_is_invalid(casted_frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ return ir_build_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame);
+}
+
+static IrInstruction *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstructionSpillBegin *instruction) {
+ if (ir_should_inline(ira->new_irb.exec, instruction->base.scope))
+ return ir_const_void(ira, &instruction->base);
+
+ IrInstruction *operand = instruction->operand->child;
+ if (type_is_invalid(operand->value.type))
+ return ira->codegen->invalid_instruction;
+
+ if (!type_has_bits(operand->value.type))
+ return ir_const_void(ira, &instruction->base);
+
+ ir_assert(instruction->spill_id == SpillIdRetErrCode, &instruction->base);
+ ira->new_irb.exec->need_err_code_spill = true;
+
+ IrInstructionSpillBegin *result = ir_build_spill_begin(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, operand, instruction->spill_id);
+ return &result->base;
+}
+
+static IrInstruction *ir_analyze_instruction_spill_end(IrAnalyze *ira, IrInstructionSpillEnd *instruction) {
+ IrInstruction *operand = instruction->begin->operand->child;
+ if (type_is_invalid(operand->value.type))
+ return ira->codegen->invalid_instruction;
+
+ if (ir_should_inline(ira->new_irb.exec, instruction->base.scope) || !type_has_bits(operand->value.type))
+ return operand;
+
+ ir_assert(instruction->begin->base.child->id == IrInstructionIdSpillBegin, &instruction->base);
+ IrInstructionSpillBegin *begin = reinterpret_cast(instruction->begin->base.child);
+
+ IrInstruction *result = ir_build_spill_end(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, begin);
+ result->value.type = operand->value.type;
+ return result;
+}
+
static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
@@ -25507,6 +24706,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
case IrInstructionIdSliceGen:
case IrInstructionIdRefGen:
case IrInstructionIdTestErrGen:
+ case IrInstructionIdFrameSizeGen:
+ case IrInstructionIdAwaitGen:
zig_unreachable();
case IrInstructionIdReturn:
@@ -25547,6 +24748,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_set_runtime_safety(ira, (IrInstructionSetRuntimeSafety *)instruction);
case IrInstructionIdSetFloatMode:
return ir_analyze_instruction_set_float_mode(ira, (IrInstructionSetFloatMode *)instruction);
+ case IrInstructionIdAnyFrameType:
+ return ir_analyze_instruction_any_frame_type(ira, (IrInstructionAnyFrameType *)instruction);
case IrInstructionIdSliceType:
return ir_analyze_instruction_slice_type(ira, (IrInstructionSliceType *)instruction);
case IrInstructionIdGlobalAsm:
@@ -25555,8 +24758,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_asm(ira, (IrInstructionAsm *)instruction);
case IrInstructionIdArrayType:
return ir_analyze_instruction_array_type(ira, (IrInstructionArrayType *)instruction);
- case IrInstructionIdPromiseType:
- return ir_analyze_instruction_promise_type(ira, (IrInstructionPromiseType *)instruction);
case IrInstructionIdSizeOf:
return ir_analyze_instruction_size_of(ira, (IrInstructionSizeOf *)instruction);
case IrInstructionIdTestNonNull:
@@ -25655,8 +24856,12 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_return_address(ira, (IrInstructionReturnAddress *)instruction);
case IrInstructionIdFrameAddress:
return ir_analyze_instruction_frame_address(ira, (IrInstructionFrameAddress *)instruction);
- case IrInstructionIdHandle:
- return ir_analyze_instruction_handle(ira, (IrInstructionHandle *)instruction);
+ case IrInstructionIdFrameHandle:
+ return ir_analyze_instruction_frame_handle(ira, (IrInstructionFrameHandle *)instruction);
+ case IrInstructionIdFrameType:
+ return ir_analyze_instruction_frame_type(ira, (IrInstructionFrameType *)instruction);
+ case IrInstructionIdFrameSizeSrc:
+ return ir_analyze_instruction_frame_size(ira, (IrInstructionFrameSizeSrc *)instruction);
case IrInstructionIdAlignOf:
return ir_analyze_instruction_align_of(ira, (IrInstructionAlignOf *)instruction);
case IrInstructionIdOverflowOp:
@@ -25711,8 +24916,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_resolve_result(ira, (IrInstructionResolveResult *)instruction);
case IrInstructionIdResetResult:
return ir_analyze_instruction_reset_result(ira, (IrInstructionResetResult *)instruction);
- case IrInstructionIdResultPtr:
- return ir_analyze_instruction_result_ptr(ira, (IrInstructionResultPtr *)instruction);
case IrInstructionIdOpaqueType:
return ir_analyze_instruction_opaque_type(ira, (IrInstructionOpaqueType *)instruction);
case IrInstructionIdSetAlignStack:
@@ -25727,50 +24930,14 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_error_return_trace(ira, (IrInstructionErrorReturnTrace *)instruction);
case IrInstructionIdErrorUnion:
return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction);
- case IrInstructionIdCancel:
- return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction);
- case IrInstructionIdCoroId:
- return ir_analyze_instruction_coro_id(ira, (IrInstructionCoroId *)instruction);
- case IrInstructionIdCoroAlloc:
- return ir_analyze_instruction_coro_alloc(ira, (IrInstructionCoroAlloc *)instruction);
- case IrInstructionIdCoroSize:
- return ir_analyze_instruction_coro_size(ira, (IrInstructionCoroSize *)instruction);
- case IrInstructionIdCoroBegin:
- return ir_analyze_instruction_coro_begin(ira, (IrInstructionCoroBegin *)instruction);
- case IrInstructionIdGetImplicitAllocator:
- return ir_analyze_instruction_get_implicit_allocator(ira, (IrInstructionGetImplicitAllocator *)instruction);
- case IrInstructionIdCoroAllocFail:
- return ir_analyze_instruction_coro_alloc_fail(ira, (IrInstructionCoroAllocFail *)instruction);
- case IrInstructionIdCoroSuspend:
- return ir_analyze_instruction_coro_suspend(ira, (IrInstructionCoroSuspend *)instruction);
- case IrInstructionIdCoroEnd:
- return ir_analyze_instruction_coro_end(ira, (IrInstructionCoroEnd *)instruction);
- case IrInstructionIdCoroFree:
- return ir_analyze_instruction_coro_free(ira, (IrInstructionCoroFree *)instruction);
- case IrInstructionIdCoroResume:
- return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction);
- case IrInstructionIdCoroSave:
- return ir_analyze_instruction_coro_save(ira, (IrInstructionCoroSave *)instruction);
- case IrInstructionIdCoroPromise:
- return ir_analyze_instruction_coro_promise(ira, (IrInstructionCoroPromise *)instruction);
- case IrInstructionIdCoroAllocHelper:
- return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction);
case IrInstructionIdAtomicRmw:
return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
return ir_analyze_instruction_atomic_load(ira, (IrInstructionAtomicLoad *)instruction);
- case IrInstructionIdPromiseResultType:
- return ir_analyze_instruction_promise_result_type(ira, (IrInstructionPromiseResultType *)instruction);
- case IrInstructionIdAwaitBookkeeping:
- return ir_analyze_instruction_await_bookkeeping(ira, (IrInstructionAwaitBookkeeping *)instruction);
case IrInstructionIdSaveErrRetAddr:
return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstructionSaveErrRetAddr *)instruction);
case IrInstructionIdAddImplicitReturnType:
return ir_analyze_instruction_add_implicit_return_type(ira, (IrInstructionAddImplicitReturnType *)instruction);
- case IrInstructionIdMergeErrRetTraces:
- return ir_analyze_instruction_merge_err_ret_traces(ira, (IrInstructionMergeErrRetTraces *)instruction);
- case IrInstructionIdMarkErrRetTracePtr:
- return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction);
case IrInstructionIdFloatOp:
return ir_analyze_instruction_float_op(ira, (IrInstructionFloatOp *)instruction);
case IrInstructionIdMulAdd:
@@ -25797,6 +24964,18 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_bit_cast_src(ira, (IrInstructionBitCastSrc *)instruction);
case IrInstructionIdUnionInitNamedField:
return ir_analyze_instruction_union_init_named_field(ira, (IrInstructionUnionInitNamedField *)instruction);
+ case IrInstructionIdSuspendBegin:
+ return ir_analyze_instruction_suspend_begin(ira, (IrInstructionSuspendBegin *)instruction);
+ case IrInstructionIdSuspendFinish:
+ return ir_analyze_instruction_suspend_finish(ira, (IrInstructionSuspendFinish *)instruction);
+ case IrInstructionIdResume:
+ return ir_analyze_instruction_resume(ira, (IrInstructionResume *)instruction);
+ case IrInstructionIdAwaitSrc:
+ return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction);
+ case IrInstructionIdSpillBegin:
+ return ir_analyze_instruction_spill_begin(ira, (IrInstructionSpillBegin *)instruction);
+ case IrInstructionIdSpillEnd:
+ return ir_analyze_instruction_spill_end(ira, (IrInstructionSpillEnd *)instruction);
}
zig_unreachable();
}
@@ -25813,9 +24992,7 @@ ZigType *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutable *new_
old_exec->analysis = ira;
ira->codegen = codegen;
- ZigFn *fn_entry = exec_fn_entry(old_exec);
- bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
- ira->explicit_return_type = is_async ? get_promise_type(codegen, expected_type) : expected_type;
+ ira->explicit_return_type = expected_type;
ira->explicit_return_type_source_node = expected_type_source_node;
ira->old_irb.codegen = codegen;
@@ -25913,19 +25090,8 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdPtrType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdExport:
- case IrInstructionIdCancel:
- case IrInstructionIdCoroId:
- case IrInstructionIdCoroBegin:
- case IrInstructionIdCoroAllocFail:
- case IrInstructionIdCoroEnd:
- case IrInstructionIdCoroResume:
- case IrInstructionIdCoroSave:
- case IrInstructionIdCoroAllocHelper:
- case IrInstructionIdAwaitBookkeeping:
case IrInstructionIdSaveErrRetAddr:
case IrInstructionIdAddImplicitReturnType:
- case IrInstructionIdMergeErrRetTraces:
- case IrInstructionIdMarkErrRetTracePtr:
case IrInstructionIdAtomicRmw:
case IrInstructionIdCmpxchgGen:
case IrInstructionIdCmpxchgSrc:
@@ -25940,6 +25106,12 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdOptionalWrap:
case IrInstructionIdVectorToArray:
case IrInstructionIdResetResult:
+ case IrInstructionIdSuspendBegin:
+ case IrInstructionIdSuspendFinish:
+ case IrInstructionIdResume:
+ case IrInstructionIdAwaitSrc:
+ case IrInstructionIdAwaitGen:
+ case IrInstructionIdSpillBegin:
return true;
case IrInstructionIdPhi:
@@ -25958,8 +25130,8 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdTypeOf:
case IrInstructionIdStructFieldPtr:
case IrInstructionIdArrayType:
- case IrInstructionIdPromiseType:
case IrInstructionIdSliceType:
+ case IrInstructionIdAnyFrameType:
case IrInstructionIdSizeOf:
case IrInstructionIdTestNonNull:
case IrInstructionIdOptionalUnwrapPtr:
@@ -25985,7 +25157,10 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdAlignOf:
case IrInstructionIdReturnAddress:
case IrInstructionIdFrameAddress:
- case IrInstructionIdHandle:
+ case IrInstructionIdFrameHandle:
+ case IrInstructionIdFrameType:
+ case IrInstructionIdFrameSizeSrc:
+ case IrInstructionIdFrameSizeGen:
case IrInstructionIdTestErrSrc:
case IrInstructionIdTestErrGen:
case IrInstructionIdFnProto:
@@ -26018,13 +25193,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdTagType:
case IrInstructionIdErrorReturnTrace:
case IrInstructionIdErrorUnion:
- case IrInstructionIdGetImplicitAllocator:
- case IrInstructionIdCoroAlloc:
- case IrInstructionIdCoroSize:
- case IrInstructionIdCoroSuspend:
- case IrInstructionIdCoroFree:
- case IrInstructionIdCoroPromise:
- case IrInstructionIdPromiseResultType:
case IrInstructionIdFloatOp:
case IrInstructionIdMulAdd:
case IrInstructionIdAtomicLoad:
@@ -26041,7 +25209,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdHasDecl:
case IrInstructionIdAllocaSrc:
case IrInstructionIdAllocaGen:
- case IrInstructionIdResultPtr:
+ case IrInstructionIdSpillEnd:
return false;
case IrInstructionIdAsm:
diff --git a/src/ir.hpp b/src/ir.hpp
index 597624e2e6..3761c5a97d 100644
--- a/src/ir.hpp
+++ b/src/ir.hpp
@@ -28,4 +28,6 @@ ConstExprValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ConstExprVal
AstNode *source_node);
const char *float_op_to_name(BuiltinFnId op, bool llvm_name);
+void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text);
+
#endif
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 588a9b2882..7580f19059 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -64,11 +64,9 @@ static void ir_print_other_block(IrPrint *irp, IrBasicBlock *bb) {
}
}
-static void ir_print_return(IrPrint *irp, IrInstructionReturn *return_instruction) {
+static void ir_print_return(IrPrint *irp, IrInstructionReturn *instruction) {
fprintf(irp->f, "return ");
- if (return_instruction->value != nullptr) {
- ir_print_other_instruction(irp, return_instruction->value);
- }
+ ir_print_other_instruction(irp, instruction->operand);
}
static void ir_print_const(IrPrint *irp, IrInstructionConst *const_instruction) {
@@ -257,13 +255,7 @@ static void ir_print_result_loc(IrPrint *irp, ResultLoc *result_loc) {
static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instruction) {
if (call_instruction->is_async) {
- fprintf(irp->f, "async");
- if (call_instruction->async_allocator != nullptr) {
- fprintf(irp->f, "<");
- ir_print_other_instruction(irp, call_instruction->async_allocator);
- fprintf(irp->f, ">");
- }
- fprintf(irp->f, " ");
+ fprintf(irp->f, "async ");
}
if (call_instruction->fn_entry) {
fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name));
@@ -284,13 +276,7 @@ static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instructi
static void ir_print_call_gen(IrPrint *irp, IrInstructionCallGen *call_instruction) {
if (call_instruction->is_async) {
- fprintf(irp->f, "async");
- if (call_instruction->async_allocator != nullptr) {
- fprintf(irp->f, "<");
- ir_print_other_instruction(irp, call_instruction->async_allocator);
- fprintf(irp->f, ">");
- }
- fprintf(irp->f, " ");
+ fprintf(irp->f, "async ");
}
if (call_instruction->fn_entry) {
fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name));
@@ -477,20 +463,21 @@ static void ir_print_array_type(IrPrint *irp, IrInstructionArrayType *instructio
ir_print_other_instruction(irp, instruction->child_type);
}
-static void ir_print_promise_type(IrPrint *irp, IrInstructionPromiseType *instruction) {
- fprintf(irp->f, "promise");
- if (instruction->payload_type != nullptr) {
- fprintf(irp->f, "->");
- ir_print_other_instruction(irp, instruction->payload_type);
- }
-}
-
static void ir_print_slice_type(IrPrint *irp, IrInstructionSliceType *instruction) {
const char *const_kw = instruction->is_const ? "const " : "";
fprintf(irp->f, "[]%s", const_kw);
ir_print_other_instruction(irp, instruction->child_type);
}
+static void ir_print_any_frame_type(IrPrint *irp, IrInstructionAnyFrameType *instruction) {
+ if (instruction->payload_type == nullptr) {
+ fprintf(irp->f, "anyframe");
+ } else {
+ fprintf(irp->f, "anyframe->");
+ ir_print_other_instruction(irp, instruction->payload_type);
+ }
+}
+
static void ir_print_global_asm(IrPrint *irp, IrInstructionGlobalAsm *instruction) {
fprintf(irp->f, "asm(\"%s\")", buf_ptr(instruction->asm_code));
}
@@ -926,8 +913,26 @@ static void ir_print_frame_address(IrPrint *irp, IrInstructionFrameAddress *inst
fprintf(irp->f, "@frameAddress()");
}
-static void ir_print_handle(IrPrint *irp, IrInstructionHandle *instruction) {
- fprintf(irp->f, "@handle()");
+static void ir_print_handle(IrPrint *irp, IrInstructionFrameHandle *instruction) {
+ fprintf(irp->f, "@frame()");
+}
+
+static void ir_print_frame_type(IrPrint *irp, IrInstructionFrameType *instruction) {
+ fprintf(irp->f, "@Frame(");
+ ir_print_other_instruction(irp, instruction->fn);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_frame_size_src(IrPrint *irp, IrInstructionFrameSizeSrc *instruction) {
+ fprintf(irp->f, "@frameSize(");
+ ir_print_other_instruction(irp, instruction->fn);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_frame_size_gen(IrPrint *irp, IrInstructionFrameSizeGen *instruction) {
+ fprintf(irp->f, "@frameSize(");
+ ir_print_other_instruction(irp, instruction->fn);
+ fprintf(irp->f, ")");
}
static void ir_print_return_address(IrPrint *irp, IrInstructionReturnAddress *instruction) {
@@ -1322,14 +1327,6 @@ static void ir_print_reset_result(IrPrint *irp, IrInstructionResetResult *instru
fprintf(irp->f, ")");
}
-static void ir_print_result_ptr(IrPrint *irp, IrInstructionResultPtr *instruction) {
- fprintf(irp->f, "ResultPtr(");
- ir_print_result_loc(irp, instruction->result_loc);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->result);
- fprintf(irp->f, ")");
-}
-
static void ir_print_opaque_type(IrPrint *irp, IrInstructionOpaqueType *instruction) {
fprintf(irp->f, "@OpaqueType()");
}
@@ -1391,110 +1388,6 @@ static void ir_print_error_union(IrPrint *irp, IrInstructionErrorUnion *instruct
ir_print_other_instruction(irp, instruction->payload);
}
-static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) {
- fprintf(irp->f, "cancel ");
- ir_print_other_instruction(irp, instruction->target);
-}
-
-static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplicitAllocator *instruction) {
- fprintf(irp->f, "@getImplicitAllocator(");
- switch (instruction->id) {
- case ImplicitAllocatorIdArg:
- fprintf(irp->f, "Arg");
- break;
- case ImplicitAllocatorIdLocalVar:
- fprintf(irp->f, "LocalVar");
- break;
- }
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_id(IrPrint *irp, IrInstructionCoroId *instruction) {
- fprintf(irp->f, "@coroId(");
- ir_print_other_instruction(irp, instruction->promise_ptr);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_alloc(IrPrint *irp, IrInstructionCoroAlloc *instruction) {
- fprintf(irp->f, "@coroAlloc(");
- ir_print_other_instruction(irp, instruction->coro_id);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_size(IrPrint *irp, IrInstructionCoroSize *instruction) {
- fprintf(irp->f, "@coroSize()");
-}
-
-static void ir_print_coro_begin(IrPrint *irp, IrInstructionCoroBegin *instruction) {
- fprintf(irp->f, "@coroBegin(");
- ir_print_other_instruction(irp, instruction->coro_id);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->coro_mem_ptr);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_alloc_fail(IrPrint *irp, IrInstructionCoroAllocFail *instruction) {
- fprintf(irp->f, "@coroAllocFail(");
- ir_print_other_instruction(irp, instruction->err_val);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_suspend(IrPrint *irp, IrInstructionCoroSuspend *instruction) {
- fprintf(irp->f, "@coroSuspend(");
- if (instruction->save_point != nullptr) {
- ir_print_other_instruction(irp, instruction->save_point);
- } else {
- fprintf(irp->f, "null");
- }
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->is_final);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_end(IrPrint *irp, IrInstructionCoroEnd *instruction) {
- fprintf(irp->f, "@coroEnd()");
-}
-
-static void ir_print_coro_free(IrPrint *irp, IrInstructionCoroFree *instruction) {
- fprintf(irp->f, "@coroFree(");
- ir_print_other_instruction(irp, instruction->coro_id);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->coro_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) {
- fprintf(irp->f, "@coroResume(");
- ir_print_other_instruction(irp, instruction->awaiter_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_save(IrPrint *irp, IrInstructionCoroSave *instruction) {
- fprintf(irp->f, "@coroSave(");
- ir_print_other_instruction(irp, instruction->coro_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_promise(IrPrint *irp, IrInstructionCoroPromise *instruction) {
- fprintf(irp->f, "@coroPromise(");
- ir_print_other_instruction(irp, instruction->coro_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_promise_result_type(IrPrint *irp, IrInstructionPromiseResultType *instruction) {
- fprintf(irp->f, "@PromiseResultType(");
- ir_print_other_instruction(irp, instruction->promise_type);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_alloc_helper(IrPrint *irp, IrInstructionCoroAllocHelper *instruction) {
- fprintf(irp->f, "@coroAllocHelper(");
- ir_print_other_instruction(irp, instruction->realloc_fn);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->coro_size);
- fprintf(irp->f, ")");
-}
-
static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) {
fprintf(irp->f, "@atomicRmw(");
if (instruction->operand_type != nullptr) {
@@ -1539,12 +1432,6 @@ static void ir_print_atomic_load(IrPrint *irp, IrInstructionAtomicLoad *instruct
fprintf(irp->f, ")");
}
-static void ir_print_await_bookkeeping(IrPrint *irp, IrInstructionAwaitBookkeeping *instruction) {
- fprintf(irp->f, "@awaitBookkeeping(");
- ir_print_other_instruction(irp, instruction->promise_result_type);
- fprintf(irp->f, ")");
-}
-
static void ir_print_save_err_ret_addr(IrPrint *irp, IrInstructionSaveErrRetAddr *instruction) {
fprintf(irp->f, "@saveErrRetAddr()");
}
@@ -1555,22 +1442,6 @@ static void ir_print_add_implicit_return_type(IrPrint *irp, IrInstructionAddImpl
fprintf(irp->f, ")");
}
-static void ir_print_merge_err_ret_traces(IrPrint *irp, IrInstructionMergeErrRetTraces *instruction) {
- fprintf(irp->f, "@mergeErrRetTraces(");
- ir_print_other_instruction(irp, instruction->coro_promise_ptr);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->src_err_ret_trace_ptr);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->dest_err_ret_trace_ptr);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_mark_err_ret_trace_ptr(IrPrint *irp, IrInstructionMarkErrRetTracePtr *instruction) {
- fprintf(irp->f, "@markErrRetTracePtr(");
- ir_print_other_instruction(irp, instruction->err_ret_trace_ptr);
- fprintf(irp->f, ")");
-}
-
static void ir_print_float_op(IrPrint *irp, IrInstructionFloatOp *instruction) {
fprintf(irp->f, "@%s(", float_op_to_name(instruction->op, false));
@@ -1638,6 +1509,47 @@ static void ir_print_union_init_named_field(IrPrint *irp, IrInstructionUnionInit
fprintf(irp->f, ")");
}
+static void ir_print_suspend_begin(IrPrint *irp, IrInstructionSuspendBegin *instruction) {
+ fprintf(irp->f, "@suspendBegin()");
+}
+
+static void ir_print_suspend_finish(IrPrint *irp, IrInstructionSuspendFinish *instruction) {
+ fprintf(irp->f, "@suspendFinish()");
+}
+
+static void ir_print_resume(IrPrint *irp, IrInstructionResume *instruction) {
+ fprintf(irp->f, "resume ");
+ ir_print_other_instruction(irp, instruction->frame);
+}
+
+static void ir_print_await_src(IrPrint *irp, IrInstructionAwaitSrc *instruction) {
+ fprintf(irp->f, "@await(");
+ ir_print_other_instruction(irp, instruction->frame);
+ fprintf(irp->f, ",");
+ ir_print_result_loc(irp, instruction->result_loc);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction) {
+ fprintf(irp->f, "@await(");
+ ir_print_other_instruction(irp, instruction->frame);
+ fprintf(irp->f, ",");
+ ir_print_other_instruction(irp, instruction->result_loc);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_spill_begin(IrPrint *irp, IrInstructionSpillBegin *instruction) {
+ fprintf(irp->f, "@spillBegin(");
+ ir_print_other_instruction(irp, instruction->operand);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_spill_end(IrPrint *irp, IrInstructionSpillEnd *instruction) {
+ fprintf(irp->f, "@spillEnd(");
+ ir_print_other_instruction(irp, &instruction->begin->base);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@@ -1727,12 +1639,12 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdArrayType:
ir_print_array_type(irp, (IrInstructionArrayType *)instruction);
break;
- case IrInstructionIdPromiseType:
- ir_print_promise_type(irp, (IrInstructionPromiseType *)instruction);
- break;
case IrInstructionIdSliceType:
ir_print_slice_type(irp, (IrInstructionSliceType *)instruction);
break;
+ case IrInstructionIdAnyFrameType:
+ ir_print_any_frame_type(irp, (IrInstructionAnyFrameType *)instruction);
+ break;
case IrInstructionIdGlobalAsm:
ir_print_global_asm(irp, (IrInstructionGlobalAsm *)instruction);
break;
@@ -1886,8 +1798,17 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdFrameAddress:
ir_print_frame_address(irp, (IrInstructionFrameAddress *)instruction);
break;
- case IrInstructionIdHandle:
- ir_print_handle(irp, (IrInstructionHandle *)instruction);
+ case IrInstructionIdFrameHandle:
+ ir_print_handle(irp, (IrInstructionFrameHandle *)instruction);
+ break;
+ case IrInstructionIdFrameType:
+ ir_print_frame_type(irp, (IrInstructionFrameType *)instruction);
+ break;
+ case IrInstructionIdFrameSizeSrc:
+ ir_print_frame_size_src(irp, (IrInstructionFrameSizeSrc *)instruction);
+ break;
+ case IrInstructionIdFrameSizeGen:
+ ir_print_frame_size_gen(irp, (IrInstructionFrameSizeGen *)instruction);
break;
case IrInstructionIdAlignOf:
ir_print_align_of(irp, (IrInstructionAlignOf *)instruction);
@@ -2006,9 +1927,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdResetResult:
ir_print_reset_result(irp, (IrInstructionResetResult *)instruction);
break;
- case IrInstructionIdResultPtr:
- ir_print_result_ptr(irp, (IrInstructionResultPtr *)instruction);
- break;
case IrInstructionIdOpaqueType:
ir_print_opaque_type(irp, (IrInstructionOpaqueType *)instruction);
break;
@@ -2030,69 +1948,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdErrorUnion:
ir_print_error_union(irp, (IrInstructionErrorUnion *)instruction);
break;
- case IrInstructionIdCancel:
- ir_print_cancel(irp, (IrInstructionCancel *)instruction);
- break;
- case IrInstructionIdGetImplicitAllocator:
- ir_print_get_implicit_allocator(irp, (IrInstructionGetImplicitAllocator *)instruction);
- break;
- case IrInstructionIdCoroId:
- ir_print_coro_id(irp, (IrInstructionCoroId *)instruction);
- break;
- case IrInstructionIdCoroAlloc:
- ir_print_coro_alloc(irp, (IrInstructionCoroAlloc *)instruction);
- break;
- case IrInstructionIdCoroSize:
- ir_print_coro_size(irp, (IrInstructionCoroSize *)instruction);
- break;
- case IrInstructionIdCoroBegin:
- ir_print_coro_begin(irp, (IrInstructionCoroBegin *)instruction);
- break;
- case IrInstructionIdCoroAllocFail:
- ir_print_coro_alloc_fail(irp, (IrInstructionCoroAllocFail *)instruction);
- break;
- case IrInstructionIdCoroSuspend:
- ir_print_coro_suspend(irp, (IrInstructionCoroSuspend *)instruction);
- break;
- case IrInstructionIdCoroEnd:
- ir_print_coro_end(irp, (IrInstructionCoroEnd *)instruction);
- break;
- case IrInstructionIdCoroFree:
- ir_print_coro_free(irp, (IrInstructionCoroFree *)instruction);
- break;
- case IrInstructionIdCoroResume:
- ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction);
- break;
- case IrInstructionIdCoroSave:
- ir_print_coro_save(irp, (IrInstructionCoroSave *)instruction);
- break;
- case IrInstructionIdCoroAllocHelper:
- ir_print_coro_alloc_helper(irp, (IrInstructionCoroAllocHelper *)instruction);
- break;
case IrInstructionIdAtomicRmw:
ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction);
break;
- case IrInstructionIdCoroPromise:
- ir_print_coro_promise(irp, (IrInstructionCoroPromise *)instruction);
- break;
- case IrInstructionIdPromiseResultType:
- ir_print_promise_result_type(irp, (IrInstructionPromiseResultType *)instruction);
- break;
- case IrInstructionIdAwaitBookkeeping:
- ir_print_await_bookkeeping(irp, (IrInstructionAwaitBookkeeping *)instruction);
- break;
case IrInstructionIdSaveErrRetAddr:
ir_print_save_err_ret_addr(irp, (IrInstructionSaveErrRetAddr *)instruction);
break;
case IrInstructionIdAddImplicitReturnType:
ir_print_add_implicit_return_type(irp, (IrInstructionAddImplicitReturnType *)instruction);
break;
- case IrInstructionIdMergeErrRetTraces:
- ir_print_merge_err_ret_traces(irp, (IrInstructionMergeErrRetTraces *)instruction);
- break;
- case IrInstructionIdMarkErrRetTracePtr:
- ir_print_mark_err_ret_trace_ptr(irp, (IrInstructionMarkErrRetTracePtr *)instruction);
- break;
case IrInstructionIdFloatOp:
ir_print_float_op(irp, (IrInstructionFloatOp *)instruction);
break;
@@ -2147,6 +2011,27 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdUnionInitNamedField:
ir_print_union_init_named_field(irp, (IrInstructionUnionInitNamedField *)instruction);
break;
+ case IrInstructionIdSuspendBegin:
+ ir_print_suspend_begin(irp, (IrInstructionSuspendBegin *)instruction);
+ break;
+ case IrInstructionIdSuspendFinish:
+ ir_print_suspend_finish(irp, (IrInstructionSuspendFinish *)instruction);
+ break;
+ case IrInstructionIdResume:
+ ir_print_resume(irp, (IrInstructionResume *)instruction);
+ break;
+ case IrInstructionIdAwaitSrc:
+ ir_print_await_src(irp, (IrInstructionAwaitSrc *)instruction);
+ break;
+ case IrInstructionIdAwaitGen:
+ ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction);
+ break;
+ case IrInstructionIdSpillBegin:
+ ir_print_spill_begin(irp, (IrInstructionSpillBegin *)instruction);
+ break;
+ case IrInstructionIdSpillEnd:
+ ir_print_spill_end(irp, (IrInstructionSpillEnd *)instruction);
+ break;
}
fprintf(irp->f, "\n");
}
diff --git a/src/main.cpp b/src/main.cpp
index 42d0850046..c0945ef180 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -85,7 +85,7 @@ static int print_full_usage(const char *arg0, FILE *file, int return_code) {
" --verbose-cc enable compiler debug output for C compilation\n"
" -dirafter [dir] same as -isystem but do it last\n"
" -isystem [dir] add additional search path for other .h files\n"
- " -mllvm [arg] forward an arg to LLVM's option processing\n"
+ " -mllvm [arg] (unsupported) forward an arg to LLVM's option processing\n"
" --override-std-dir [arg] override path to Zig standard library\n"
" --override-lib-dir [arg] override path to Zig lib library\n"
" -ffunction-sections places each function in a seperate section\n"
diff --git a/src/parser.cpp b/src/parser.cpp
index fe1f89ac92..1e7e36d0bd 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -113,7 +113,6 @@ static AstNode *ast_parse_multiply_op(ParseContext *pc);
static AstNode *ast_parse_prefix_op(ParseContext *pc);
static AstNode *ast_parse_prefix_type_op(ParseContext *pc);
static AstNode *ast_parse_suffix_op(ParseContext *pc);
-static AstNode *ast_parse_async_prefix(ParseContext *pc);
static AstNode *ast_parse_fn_call_argumnets(ParseContext *pc);
static AstNode *ast_parse_array_type_start(ParseContext *pc);
static AstNode *ast_parse_ptr_type_start(ParseContext *pc);
@@ -282,8 +281,8 @@ static AstNode *ast_parse_prefix_op_expr(
case NodeTypeAwaitExpr:
right = &prefix->data.await_expr.expr;
break;
- case NodeTypePromiseType:
- right = &prefix->data.promise_type.payload_type;
+ case NodeTypeAnyFrameType:
+ right = &prefix->data.anyframe_type.payload_type;
break;
case NodeTypeArrayType:
right = &prefix->data.array_type.child_type;
@@ -1167,7 +1166,6 @@ static AstNode *ast_parse_prefix_expr(ParseContext *pc) {
// <- AsmExpr
// / IfExpr
// / KEYWORD_break BreakLabel? Expr?
-// / KEYWORD_cancel Expr
// / KEYWORD_comptime Expr
// / KEYWORD_continue BreakLabel?
// / KEYWORD_resume Expr
@@ -1195,14 +1193,6 @@ static AstNode *ast_parse_primary_expr(ParseContext *pc) {
return res;
}
- Token *cancel = eat_token_if(pc, TokenIdKeywordCancel);
- if (cancel != nullptr) {
- AstNode *expr = ast_expect(pc, ast_parse_expr);
- AstNode *res = ast_create_node(pc, NodeTypeCancel, cancel);
- res->data.cancel_expr.expr = expr;
- return res;
- }
-
Token *comptime = eat_token_if(pc, TokenIdKeywordCompTime);
if (comptime != nullptr) {
AstNode *expr = ast_expect(pc, ast_parse_expr);
@@ -1398,22 +1388,18 @@ static AstNode *ast_parse_error_union_expr(ParseContext *pc) {
}
// SuffixExpr
-// <- AsyncPrefix PrimaryTypeExpr SuffixOp* FnCallArguments
+// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments
// / PrimaryTypeExpr (SuffixOp / FnCallArguments)*
static AstNode *ast_parse_suffix_expr(ParseContext *pc) {
- AstNode *async_call = ast_parse_async_prefix(pc);
- if (async_call != nullptr) {
+ Token *async_token = eat_token_if(pc, TokenIdKeywordAsync);
+ if (async_token != nullptr) {
if (eat_token_if(pc, TokenIdKeywordFn) != nullptr) {
// HACK: If we see the keyword `fn`, then we assume that
// we are parsing an async fn proto, and not a call.
// We therefore put back all tokens consumed by the async
// prefix...
- // HACK: This loop is not actually enough to put back all the
- // tokens. Let's hope this is fine for most code right now
- // and wait till we get the async rework for a syntax update.
- do {
- put_back_token(pc);
- } while (peek_token(pc)->id != TokenIdKeywordAsync);
+ put_back_token(pc);
+ put_back_token(pc);
return ast_parse_primary_type_expr(pc);
}
@@ -1455,10 +1441,14 @@ static AstNode *ast_parse_suffix_expr(ParseContext *pc) {
ast_invalid_token_error(pc, peek_token(pc));
assert(args->type == NodeTypeFnCallExpr);
- async_call->data.fn_call_expr.fn_ref_expr = child;
- async_call->data.fn_call_expr.params = args->data.fn_call_expr.params;
- async_call->data.fn_call_expr.is_builtin = false;
- return async_call;
+
+ AstNode *res = ast_create_node(pc, NodeTypeFnCallExpr, async_token);
+ res->data.fn_call_expr.is_async = true;
+ res->data.fn_call_expr.seen = false;
+ res->data.fn_call_expr.fn_ref_expr = child;
+ res->data.fn_call_expr.params = args->data.fn_call_expr.params;
+ res->data.fn_call_expr.is_builtin = false;
+ return res;
}
AstNode *res = ast_parse_primary_type_expr(pc);
@@ -1510,7 +1500,7 @@ static AstNode *ast_parse_suffix_expr(ParseContext *pc) {
// <- BUILTINIDENTIFIER FnCallArguments
// / CHAR_LITERAL
// / ContainerDecl
-// / DOT IDENTIFIER
+// / DOT IDENTIFIER
// / ErrorSetDecl
// / FLOAT
// / FnProto
@@ -1643,9 +1633,9 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) {
if (null != nullptr)
return ast_create_node(pc, NodeTypeNullLiteral, null);
- Token *promise = eat_token_if(pc, TokenIdKeywordPromise);
- if (promise != nullptr)
- return ast_create_node(pc, NodeTypePromiseType, promise);
+ Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame);
+ if (anyframe != nullptr)
+ return ast_create_node(pc, NodeTypeAnyFrameType, anyframe);
Token *true_token = eat_token_if(pc, TokenIdKeywordTrue);
if (true_token != nullptr) {
@@ -2025,7 +2015,7 @@ static AstNode *ast_parse_link_section(ParseContext *pc) {
// <- KEYWORD_nakedcc
// / KEYWORD_stdcallcc
// / KEYWORD_extern
-// / KEYWORD_async (LARROW TypeExpr RARROW)?
+// / KEYWORD_async
static Optional ast_parse_fn_cc(ParseContext *pc) {
AstNodeFnProto res = {};
if (eat_token_if(pc, TokenIdKeywordNakedCC) != nullptr) {
@@ -2042,11 +2032,6 @@ static Optional ast_parse_fn_cc(ParseContext *pc) {
}
if (eat_token_if(pc, TokenIdKeywordAsync) != nullptr) {
res.cc = CallingConventionAsync;
- if (eat_token_if(pc, TokenIdCmpLessThan) == nullptr)
- return Optional::some(res);
-
- res.async_allocator_type = ast_expect(pc, ast_parse_type_expr);
- expect_token(pc, TokenIdCmpGreaterThan);
return Optional::some(res);
}
@@ -2522,7 +2507,7 @@ static AstNode *ast_parse_prefix_op(ParseContext *pc) {
// PrefixTypeOp
// <- QUESTIONMARK
-// / KEYWORD_promise MINUSRARROW
+// / KEYWORD_anyframe MINUSRARROW
// / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile)*
// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile)*
static AstNode *ast_parse_prefix_type_op(ParseContext *pc) {
@@ -2533,10 +2518,10 @@ static AstNode *ast_parse_prefix_type_op(ParseContext *pc) {
return res;
}
- Token *promise = eat_token_if(pc, TokenIdKeywordPromise);
- if (promise != nullptr) {
+ Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame);
+ if (anyframe != nullptr) {
if (eat_token_if(pc, TokenIdArrow) != nullptr) {
- AstNode *res = ast_create_node(pc, NodeTypePromiseType, promise);
+ AstNode *res = ast_create_node(pc, NodeTypeAnyFrameType, anyframe);
return res;
}
@@ -2671,24 +2656,6 @@ static AstNode *ast_parse_suffix_op(ParseContext *pc) {
return nullptr;
}
-// AsyncPrefix <- KEYWORD_async (LARROW PrefixExpr RARROW)?
-static AstNode *ast_parse_async_prefix(ParseContext *pc) {
- Token *async = eat_token_if(pc, TokenIdKeywordAsync);
- if (async == nullptr)
- return nullptr;
-
- AstNode *res = ast_create_node(pc, NodeTypeFnCallExpr, async);
- res->data.fn_call_expr.is_async = true;
- res->data.fn_call_expr.seen = false;
- if (eat_token_if(pc, TokenIdCmpLessThan) != nullptr) {
- AstNode *prefix_expr = ast_expect(pc, ast_parse_prefix_expr);
- expect_token(pc, TokenIdCmpGreaterThan);
- res->data.fn_call_expr.async_allocator = prefix_expr;
- }
-
- return res;
-}
-
// FnCallArguments <- LPAREN ExprList RPAREN
static AstNode *ast_parse_fn_call_argumnets(ParseContext *pc) {
Token *paren = eat_token_if(pc, TokenIdLParen);
@@ -2858,7 +2825,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
visit_node_list(&node->data.fn_proto.params, visit, context);
visit_field(&node->data.fn_proto.align_expr, visit, context);
visit_field(&node->data.fn_proto.section_expr, visit, context);
- visit_field(&node->data.fn_proto.async_allocator_type, visit, context);
break;
case NodeTypeFnDef:
visit_field(&node->data.fn_def.fn_proto, visit, context);
@@ -2918,7 +2884,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeFnCallExpr:
visit_field(&node->data.fn_call_expr.fn_ref_expr, visit, context);
visit_node_list(&node->data.fn_call_expr.params, visit, context);
- visit_field(&node->data.fn_call_expr.async_allocator, visit, context);
break;
case NodeTypeArrayAccessExpr:
visit_field(&node->data.array_access_expr.array_ref_expr, visit, context);
@@ -3034,8 +2999,8 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeInferredArrayType:
visit_field(&node->data.array_type.child_type, visit, context);
break;
- case NodeTypePromiseType:
- visit_field(&node->data.promise_type.payload_type, visit, context);
+ case NodeTypeAnyFrameType:
+ visit_field(&node->data.anyframe_type.payload_type, visit, context);
break;
case NodeTypeErrorType:
// none
@@ -3047,9 +3012,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeErrorSetDecl:
visit_node_list(&node->data.err_set_decl.decls, visit, context);
break;
- case NodeTypeCancel:
- visit_field(&node->data.cancel_expr.expr, visit, context);
- break;
case NodeTypeResume:
visit_field(&node->data.resume_expr.expr, visit, context);
break;
diff --git a/src/target.cpp b/src/target.cpp
index 4de7c9ce23..346772ce13 100644
--- a/src/target.cpp
+++ b/src/target.cpp
@@ -665,7 +665,63 @@ ZigLLVM_SubArchType target_subarch_enum(SubArchList sub_arch_list, size_t i) {
}
const char *target_subarch_name(ZigLLVM_SubArchType subarch) {
- return ZigLLVMGetSubArchTypeName(subarch);
+ switch (subarch) {
+ case ZigLLVM_NoSubArch:
+ return "";
+ case ZigLLVM_ARMSubArch_v8_5a:
+ return "v8_5a";
+ case ZigLLVM_ARMSubArch_v8_4a:
+ return "v8_4a";
+ case ZigLLVM_ARMSubArch_v8_3a:
+ return "v8_3a";
+ case ZigLLVM_ARMSubArch_v8_2a:
+ return "v8_2a";
+ case ZigLLVM_ARMSubArch_v8_1a:
+ return "v8_1a";
+ case ZigLLVM_ARMSubArch_v8:
+ return "v8";
+ case ZigLLVM_ARMSubArch_v8r:
+ return "v8r";
+ case ZigLLVM_ARMSubArch_v8m_baseline:
+ return "v8m_baseline";
+ case ZigLLVM_ARMSubArch_v8m_mainline:
+ return "v8m_mainline";
+ case ZigLLVM_ARMSubArch_v7:
+ return "v7";
+ case ZigLLVM_ARMSubArch_v7em:
+ return "v7em";
+ case ZigLLVM_ARMSubArch_v7m:
+ return "v7m";
+ case ZigLLVM_ARMSubArch_v7s:
+ return "v7s";
+ case ZigLLVM_ARMSubArch_v7k:
+ return "v7k";
+ case ZigLLVM_ARMSubArch_v7ve:
+ return "v7ve";
+ case ZigLLVM_ARMSubArch_v6:
+ return "v6";
+ case ZigLLVM_ARMSubArch_v6m:
+ return "v6m";
+ case ZigLLVM_ARMSubArch_v6k:
+ return "v6k";
+ case ZigLLVM_ARMSubArch_v6t2:
+ return "v6t2";
+ case ZigLLVM_ARMSubArch_v5:
+ return "v5";
+ case ZigLLVM_ARMSubArch_v5te:
+ return "v5te";
+ case ZigLLVM_ARMSubArch_v4t:
+ return "v4t";
+ case ZigLLVM_KalimbaSubArch_v3:
+ return "v3";
+ case ZigLLVM_KalimbaSubArch_v4:
+ return "v4";
+ case ZigLLVM_KalimbaSubArch_v5:
+ return "v5";
+ case ZigLLVM_MipsSubArch_r6:
+ return "r6";
+ }
+ zig_unreachable();
}
size_t target_subarch_list_count(void) {
@@ -1827,3 +1883,7 @@ bool target_libc_needs_crti_crtn(const ZigTarget *target) {
bool target_is_riscv(const ZigTarget *target) {
return target->arch == ZigLLVM_riscv32 || target->arch == ZigLLVM_riscv64;
}
+
+unsigned target_fn_align(const ZigTarget *target) {
+ return 16;
+}
diff --git a/src/target.hpp b/src/target.hpp
index 5a9e119af1..b89b0bdc9d 100644
--- a/src/target.hpp
+++ b/src/target.hpp
@@ -201,4 +201,6 @@ size_t target_libc_count(void);
void target_libc_enum(size_t index, ZigTarget *out_target);
bool target_libc_needs_crti_crtn(const ZigTarget *target);
+unsigned target_fn_align(const ZigTarget *target);
+
#endif
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 783b6e0e20..84f3f2c0ec 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -109,11 +109,11 @@ static const struct ZigKeyword zig_keywords[] = {
{"align", TokenIdKeywordAlign},
{"allowzero", TokenIdKeywordAllowZero},
{"and", TokenIdKeywordAnd},
+ {"anyframe", TokenIdKeywordAnyFrame},
{"asm", TokenIdKeywordAsm},
{"async", TokenIdKeywordAsync},
{"await", TokenIdKeywordAwait},
{"break", TokenIdKeywordBreak},
- {"cancel", TokenIdKeywordCancel},
{"catch", TokenIdKeywordCatch},
{"comptime", TokenIdKeywordCompTime},
{"const", TokenIdKeywordConst},
@@ -136,7 +136,6 @@ static const struct ZigKeyword zig_keywords[] = {
{"or", TokenIdKeywordOr},
{"orelse", TokenIdKeywordOrElse},
{"packed", TokenIdKeywordPacked},
- {"promise", TokenIdKeywordPromise},
{"pub", TokenIdKeywordPub},
{"resume", TokenIdKeywordResume},
{"return", TokenIdKeywordReturn},
@@ -1531,9 +1530,9 @@ const char * token_name(TokenId id) {
case TokenIdKeywordAwait: return "await";
case TokenIdKeywordResume: return "resume";
case TokenIdKeywordSuspend: return "suspend";
- case TokenIdKeywordCancel: return "cancel";
case TokenIdKeywordAlign: return "align";
case TokenIdKeywordAnd: return "and";
+ case TokenIdKeywordAnyFrame: return "anyframe";
case TokenIdKeywordAsm: return "asm";
case TokenIdKeywordBreak: return "break";
case TokenIdKeywordCatch: return "catch";
@@ -1558,7 +1557,6 @@ const char * token_name(TokenId id) {
case TokenIdKeywordOr: return "or";
case TokenIdKeywordOrElse: return "orelse";
case TokenIdKeywordPacked: return "packed";
- case TokenIdKeywordPromise: return "promise";
case TokenIdKeywordPub: return "pub";
case TokenIdKeywordReturn: return "return";
case TokenIdKeywordLinkSection: return "linksection";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index 83dbe99471..ce62f5dc87 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -53,11 +53,11 @@ enum TokenId {
TokenIdKeywordAlign,
TokenIdKeywordAllowZero,
TokenIdKeywordAnd,
+ TokenIdKeywordAnyFrame,
TokenIdKeywordAsm,
TokenIdKeywordAsync,
TokenIdKeywordAwait,
TokenIdKeywordBreak,
- TokenIdKeywordCancel,
TokenIdKeywordCatch,
TokenIdKeywordCompTime,
TokenIdKeywordConst,
@@ -81,7 +81,6 @@ enum TokenId {
TokenIdKeywordOr,
TokenIdKeywordOrElse,
TokenIdKeywordPacked,
- TokenIdKeywordPromise,
TokenIdKeywordPub,
TokenIdKeywordResume,
TokenIdKeywordReturn,
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index b82d0c51ba..96be3344d9 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -42,7 +42,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -202,8 +201,6 @@ bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMM
PMBuilder->Inliner = createFunctionInliningPass(PMBuilder->OptLevel, PMBuilder->SizeLevel, false);
}
- addCoroutinePassesToExtensionPoints(*PMBuilder);
-
// Set up the per-function pass manager.
legacy::FunctionPassManager FPM = legacy::FunctionPassManager(module);
auto tliwp = new(std::nothrow) TargetLibraryInfoWrapperPass(tlii);
@@ -797,25 +794,25 @@ const char *ZigLLVMGetSubArchTypeName(ZigLLVM_SubArchType sub_arch) {
case ZigLLVM_NoSubArch:
return "";
case ZigLLVM_ARMSubArch_v8_5a:
- return "v8_5a";
+ return "v8.5a";
case ZigLLVM_ARMSubArch_v8_4a:
- return "v8_4a";
+ return "v8.4a";
case ZigLLVM_ARMSubArch_v8_3a:
- return "v8_3a";
+ return "v8.3a";
case ZigLLVM_ARMSubArch_v8_2a:
- return "v8_2a";
+ return "v8.2a";
case ZigLLVM_ARMSubArch_v8_1a:
- return "v8_1a";
+ return "v8.1a";
case ZigLLVM_ARMSubArch_v8:
return "v8";
case ZigLLVM_ARMSubArch_v8r:
return "v8r";
case ZigLLVM_ARMSubArch_v8m_baseline:
- return "v8m_baseline";
+ return "v8m.base";
case ZigLLVM_ARMSubArch_v8m_mainline:
- return "v8m_mainline";
+ return "v8m.main";
case ZigLLVM_ARMSubArch_v8_1m_mainline:
- return "v8_1m_mainline";
+ return "v8.1m.main";
case ZigLLVM_ARMSubArch_v7:
return "v7";
case ZigLLVM_ARMSubArch_v7em:
@@ -909,6 +906,14 @@ LLVMValueRef ZigLLVMBuildAShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLV
return wrap(unwrap(builder)->CreateAShr(unwrap(LHS), unwrap(RHS), name, true));
}
+void ZigLLVMSetTailCall(LLVMValueRef Call) {
+ unwrap(Call)->setTailCallKind(CallInst::TCK_MustTail);
+}
+
+void ZigLLVMFunctionSetPrefixData(LLVMValueRef function, LLVMValueRef data) {
+ unwrap(function)->setPrefixData(unwrap(data));
+}
+
class MyOStream: public raw_ostream {
public:
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index 10b6d24364..8f4d875cfe 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -211,6 +211,8 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMInsertDeclare(struct ZigLLVMDIBuilder *dibuilde
ZIG_EXTERN_C struct ZigLLVMDILocation *ZigLLVMGetDebugLoc(unsigned line, unsigned col, struct ZigLLVMDIScope *scope);
ZIG_EXTERN_C void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state);
+ZIG_EXTERN_C void ZigLLVMSetTailCall(LLVMValueRef Call);
+ZIG_EXTERN_C void ZigLLVMFunctionSetPrefixData(LLVMValueRef fn, LLVMValueRef data);
ZIG_EXTERN_C void ZigLLVMAddFunctionAttr(LLVMValueRef fn, const char *attr_name, const char *attr_value);
ZIG_EXTERN_C void ZigLLVMAddByValAttr(LLVMValueRef fn_ref, unsigned ArgNo, LLVMTypeRef type_val);
diff --git a/std/c/freebsd.zig b/std/c/freebsd.zig
index bcc60e65ed..3d5736d37b 100644
--- a/std/c/freebsd.zig
+++ b/std/c/freebsd.zig
@@ -6,3 +6,4 @@ pub const _errno = __error;
pub extern "c" fn getdents(fd: c_int, buf_ptr: [*]u8, nbytes: usize) usize;
pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
+pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) c_int;
diff --git a/std/debug.zig b/std/debug.zig
index 32f96d3e15..d1c17343ef 100644
--- a/std/debug.zig
+++ b/std/debug.zig
@@ -1024,8 +1024,7 @@ pub fn openElfDebugInfo(
elf_seekable_stream: *DwarfSeekableStream,
elf_in_stream: *DwarfInStream,
) !DwarfInfo {
- var efile: elf.Elf = undefined;
- try efile.openStream(allocator, elf_seekable_stream, elf_in_stream);
+ var efile = try elf.Elf.openStream(allocator, elf_seekable_stream, elf_in_stream);
errdefer efile.close();
var di = DwarfInfo{
diff --git a/std/elf.zig b/std/elf.zig
index c605a177a5..37635895fd 100644
--- a/std/elf.zig
+++ b/std/elf.zig
@@ -356,7 +356,6 @@ pub const SectionHeader = struct {
pub const Elf = struct {
seekable_stream: *io.SeekableStream(anyerror, anyerror),
in_stream: *io.InStream(anyerror),
- auto_close_stream: bool,
is_64: bool,
endian: builtin.Endian,
file_type: FileType,
@@ -368,25 +367,23 @@ pub const Elf = struct {
string_section: *SectionHeader,
section_headers: []SectionHeader,
allocator: *mem.Allocator,
- prealloc_file: File,
/// Call close when done.
- pub fn openPath(elf: *Elf, allocator: *mem.Allocator, path: []const u8) !void {
+ pub fn openPath(allocator: *mem.Allocator, path: []const u8) !Elf {
@compileError("TODO implement");
}
/// Call close when done.
- pub fn openFile(elf: *Elf, allocator: *mem.Allocator, file: File) !void {
+ pub fn openFile(allocator: *mem.Allocator, file: File) !Elf {
@compileError("TODO implement");
}
pub fn openStream(
- elf: *Elf,
allocator: *mem.Allocator,
seekable_stream: *io.SeekableStream(anyerror, anyerror),
in: *io.InStream(anyerror),
- ) !void {
- elf.auto_close_stream = false;
+ ) !Elf {
+ var elf: Elf = undefined;
elf.allocator = allocator;
elf.seekable_stream = seekable_stream;
elf.in_stream = in;
@@ -523,12 +520,12 @@ pub const Elf = struct {
// not a string table
return error.InvalidFormat;
}
+
+ return elf;
}
pub fn close(elf: *Elf) void {
elf.allocator.free(elf.section_headers);
-
- if (elf.auto_close_stream) elf.prealloc_file.close();
}
pub fn findSection(elf: *Elf, name: []const u8) !?*SectionHeader {
diff --git a/std/event/channel.zig b/std/event/channel.zig
index bb2fbbf126..a397d280de 100644
--- a/std/event/channel.zig
+++ b/std/event/channel.zig
@@ -2,8 +2,6 @@ const std = @import("../std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop;
/// many producer, many consumer, thread-safe, runtime configurable buffer size
@@ -77,24 +75,20 @@ pub fn Channel(comptime T: type) type {
/// must be called when all calls to put and get have suspended and no more calls occur
pub fn destroy(self: *SelfChannel) void {
while (self.getters.get()) |get_node| {
- cancel get_node.data.tick_node.data;
+ resume get_node.data.tick_node.data;
}
while (self.putters.get()) |put_node| {
- cancel put_node.data.tick_node.data;
+ resume put_node.data.tick_node.data;
}
self.loop.allocator.free(self.buffer_nodes);
self.loop.allocator.destroy(self);
}
- /// puts a data item in the channel. The promise completes when the value has been added to the
+ /// puts a data item in the channel. The function returns when the value has been added to the
/// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
- pub async fn put(self: *SelfChannel, data: T) void {
- // TODO fix this workaround
- suspend {
- resume @handle();
- }
-
- var my_tick_node = Loop.NextTickNode.init(@handle());
+ /// Or when the channel is destroyed.
+ pub fn put(self: *SelfChannel, data: T) void {
+ var my_tick_node = Loop.NextTickNode.init(@frame());
var queue_node = std.atomic.Queue(PutNode).Node.init(PutNode{
.tick_node = &my_tick_node,
.data = data,
@@ -102,35 +96,29 @@ pub fn Channel(comptime T: type) type {
// TODO test canceling a put()
errdefer {
- _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst);
const need_dispatch = !self.putters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the put_count incorrect for a period of time. fix by dispatching.
- _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.put_count, .Add, 1, .SeqCst);
self.dispatch();
}
}
suspend {
self.putters.put(&queue_node);
- _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.put_count, .Add, 1, .SeqCst);
self.dispatch();
}
}
- /// await this function to get an item from the channel. If the buffer is empty, the promise will
+ /// await this function to get an item from the channel. If the buffer is empty, the frame will
/// complete when the next item is put in the channel.
pub async fn get(self: *SelfChannel) T {
- // TODO fix this workaround
- suspend {
- resume @handle();
- }
-
- // TODO integrate this function with named return values
- // so we can get rid of this extra result copy
+ // TODO https://github.com/ziglang/zig/issues/2765
var result: T = undefined;
- var my_tick_node = Loop.NextTickNode.init(@handle());
+ var my_tick_node = Loop.NextTickNode.init(@frame());
var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{
.tick_node = &my_tick_node,
.data = GetNode.Data{
@@ -140,19 +128,19 @@ pub fn Channel(comptime T: type) type {
// TODO test canceling a get()
errdefer {
- _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst);
const need_dispatch = !self.getters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the get_count incorrect for a period of time. fix by dispatching.
- _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst);
self.dispatch();
}
}
suspend {
self.getters.put(&queue_node);
- _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst);
self.dispatch();
}
@@ -173,15 +161,10 @@ pub fn Channel(comptime T: type) type {
/// Await is necessary for locking purposes. The function will be resumed after checking the channel
/// for data and will not wait for data to be available.
pub async fn getOrNull(self: *SelfChannel) ?T {
- // TODO fix this workaround
- suspend {
- resume @handle();
- }
-
// TODO integrate this function with named return values
// so we can get rid of this extra result copy
var result: ?T = null;
- var my_tick_node = Loop.NextTickNode.init(@handle());
+ var my_tick_node = Loop.NextTickNode.init(@frame());
var or_null_node = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node.init(undefined);
var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{
.tick_node = &my_tick_node,
@@ -197,19 +180,19 @@ pub fn Channel(comptime T: type) type {
// TODO test canceling getOrNull
errdefer {
_ = self.or_null_queue.remove(&or_null_node);
- _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst);
const need_dispatch = !self.getters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the get_count incorrect for a period of time. fix by dispatching.
- _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst);
self.dispatch();
}
}
suspend {
self.getters.put(&queue_node);
- _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst);
self.or_null_queue.put(&or_null_node);
self.dispatch();
@@ -219,21 +202,21 @@ pub fn Channel(comptime T: type) type {
fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag
- _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 1, .SeqCst);
lock: while (true) {
// set the lock flag
- const prev_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ const prev_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 1, .SeqCst);
if (prev_lock != 0) return;
// clear the need_dispatch flag since we're about to do it
- _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
while (true) {
one_dispatch: {
// later we correct these extra subtractions
- var get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
- var put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ var get_count = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst);
+ var put_count = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst);
// transfer self.buffer to self.getters
while (self.buffer_len != 0) {
@@ -252,7 +235,7 @@ pub fn Channel(comptime T: type) type {
self.loop.onNextTick(get_node.tick_node);
self.buffer_len -= 1;
- get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ get_count = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst);
}
// direct transfer self.putters to self.getters
@@ -272,8 +255,8 @@ pub fn Channel(comptime T: type) type {
self.loop.onNextTick(get_node.tick_node);
self.loop.onNextTick(put_node.tick_node);
- get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
- put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ get_count = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst);
+ put_count = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst);
}
// transfer self.putters to self.buffer
@@ -285,13 +268,13 @@ pub fn Channel(comptime T: type) type {
self.buffer_index +%= 1;
self.buffer_len += 1;
- put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ put_count = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst);
}
}
// undo the extra subtractions
- _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
- _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst);
+ _ = @atomicRmw(usize, &self.put_count, .Add, 1, .SeqCst);
// All the "get or null" functions should resume now.
var remove_count: usize = 0;
@@ -300,18 +283,18 @@ pub fn Channel(comptime T: type) type {
self.loop.onNextTick(or_null_node.data.data.tick_node);
}
if (remove_count != 0) {
- _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, remove_count, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.get_count, .Sub, remove_count, .SeqCst);
}
// clear need-dispatch flag
- const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ const need_dispatch = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
if (need_dispatch != 0) continue;
- const my_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ const my_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 0, .SeqCst);
assert(my_lock != 0);
// we have to check again now that we unlocked
- if (@atomicLoad(u8, &self.need_dispatch, AtomicOrder.SeqCst) != 0) continue :lock;
+ if (@atomicLoad(u8, &self.need_dispatch, .SeqCst) != 0) continue :lock;
return;
}
@@ -324,51 +307,41 @@ test "std.event.Channel" {
// https://github.com/ziglang/zig/issues/1908
if (builtin.single_threaded) return error.SkipZigTest;
- const allocator = std.heap.direct_allocator;
-
var loop: Loop = undefined;
// TODO make a multi threaded test
- try loop.initSingleThreaded(allocator);
+ try loop.initSingleThreaded(std.heap.direct_allocator);
defer loop.deinit();
const channel = try Channel(i32).create(&loop, 0);
defer channel.destroy();
- const handle = try async testChannelGetter(&loop, channel);
- defer cancel handle;
-
- const putter = try async testChannelPutter(channel);
- defer cancel putter;
+ const handle = async testChannelGetter(&loop, channel);
+ const putter = async testChannelPutter(channel);
loop.run();
}
async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
- errdefer @panic("test failed");
-
- const value1_promise = try async channel.get();
- const value1 = await value1_promise;
+ const value1 = channel.get();
testing.expect(value1 == 1234);
- const value2_promise = try async channel.get();
- const value2 = await value2_promise;
+ const value2 = channel.get();
testing.expect(value2 == 4567);
- const value3_promise = try async channel.getOrNull();
- const value3 = await value3_promise;
+ const value3 = channel.getOrNull();
testing.expect(value3 == null);
- const last_put = try async testPut(channel, 4444);
- const value4 = await try async channel.getOrNull();
+ var last_put = async testPut(channel, 4444);
+ const value4 = channel.getOrNull();
testing.expect(value4.? == 4444);
await last_put;
}
async fn testChannelPutter(channel: *Channel(i32)) void {
- await (async channel.put(1234) catch @panic("out of memory"));
- await (async channel.put(4567) catch @panic("out of memory"));
+ channel.put(1234);
+ channel.put(4567);
}
async fn testPut(channel: *Channel(i32), value: i32) void {
- await (async channel.put(value) catch @panic("out of memory"));
+ channel.put(value);
}
diff --git a/std/event/fs.zig b/std/event/fs.zig
index c25426b98a..d6d8f2faef 100644
--- a/std/event/fs.zig
+++ b/std/event/fs.zig
@@ -76,17 +76,13 @@ pub const Request = struct {
pub const PWriteVError = error{OutOfMemory} || File.WriteError;
-/// data - just the inner references - must live until pwritev promise completes.
+/// data - just the inner references - must live until pwritev frame completes.
pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: usize) PWriteVError!void {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
switch (builtin.os) {
- builtin.Os.macosx,
- builtin.Os.linux,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .macosx,
+ .linux,
+ .freebsd,
+ .netbsd,
=> {
const iovecs = try loop.allocator.alloc(os.iovec_const, data.len);
defer loop.allocator.free(iovecs);
@@ -100,7 +96,7 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us
return await (async pwritevPosix(loop, fd, iovecs, offset) catch unreachable);
},
- builtin.Os.windows => {
+ .windows => {
const data_copy = try std.mem.dupe(loop.allocator, []const u8, data);
defer loop.allocator.free(data_copy);
return await (async pwritevWindows(loop, fd, data, offset) catch unreachable);
@@ -109,7 +105,7 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us
}
}
-/// data must outlive the returned promise
+/// data must outlive the returned frame
pub async fn pwritevWindows(loop: *Loop, fd: fd_t, data: []const []const u8, offset: usize) os.WindowsWriteError!void {
if (data.len == 0) return;
if (data.len == 1) return await (async pwriteWindows(loop, fd, data[0], offset) catch unreachable);
@@ -123,15 +119,10 @@ pub async fn pwritevWindows(loop: *Loop, fd: fd_t, data: []const []const u8, off
}
pub async fn pwriteWindows(loop: *Loop, fd: fd_t, data: []const u8, offset: u64) os.WindowsWriteError!void {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
var resume_node = Loop.ResumeNode.Basic{
.base = Loop.ResumeNode{
.id = Loop.ResumeNode.Id.Basic,
- .handle = @handle(),
+ .handle = @frame(),
.overlapped = windows.OVERLAPPED{
.Internal = 0,
.InternalHigh = 0,
@@ -166,18 +157,13 @@ pub async fn pwriteWindows(loop: *Loop, fd: fd_t, data: []const u8, offset: u64)
}
}
-/// iovecs must live until pwritev promise completes.
+/// iovecs must live until pwritev frame completes.
pub async fn pwritevPosix(
loop: *Loop,
fd: fd_t,
iovecs: []const os.iovec_const,
offset: usize,
) os.WriteError!void {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
var req_node = RequestNode{
.prev = null,
.next = null,
@@ -194,7 +180,7 @@ pub async fn pwritevPosix(
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
- .data = @handle(),
+ .data = @frame(),
},
},
},
@@ -211,19 +197,14 @@ pub async fn pwritevPosix(
pub const PReadVError = error{OutOfMemory} || File.ReadError;
-/// data - just the inner references - must live until preadv promise completes.
+/// data - just the inner references - must live until preadv frame completes.
pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PReadVError!usize {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
assert(data.len != 0);
switch (builtin.os) {
- builtin.Os.macosx,
- builtin.Os.linux,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .macosx,
+ .linux,
+ .freebsd,
+ .netbsd,
=> {
const iovecs = try loop.allocator.alloc(os.iovec, data.len);
defer loop.allocator.free(iovecs);
@@ -237,7 +218,7 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR
return await (async preadvPosix(loop, fd, iovecs, offset) catch unreachable);
},
- builtin.Os.windows => {
+ .windows => {
const data_copy = try std.mem.dupe(loop.allocator, []u8, data);
defer loop.allocator.free(data_copy);
return await (async preadvWindows(loop, fd, data_copy, offset) catch unreachable);
@@ -246,7 +227,7 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR
}
}
-/// data must outlive the returned promise
+/// data must outlive the returned frame
pub async fn preadvWindows(loop: *Loop, fd: fd_t, data: []const []u8, offset: u64) !usize {
assert(data.len != 0);
if (data.len == 1) return await (async preadWindows(loop, fd, data[0], offset) catch unreachable);
@@ -272,15 +253,10 @@ pub async fn preadvWindows(loop: *Loop, fd: fd_t, data: []const []u8, offset: u6
}
pub async fn preadWindows(loop: *Loop, fd: fd_t, data: []u8, offset: u64) !usize {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
var resume_node = Loop.ResumeNode.Basic{
.base = Loop.ResumeNode{
.id = Loop.ResumeNode.Id.Basic,
- .handle = @handle(),
+ .handle = @frame(),
.overlapped = windows.OVERLAPPED{
.Internal = 0,
.InternalHigh = 0,
@@ -314,18 +290,13 @@ pub async fn preadWindows(loop: *Loop, fd: fd_t, data: []u8, offset: u64) !usize
return usize(bytes_transferred);
}
-/// iovecs must live until preadv promise completes
+/// iovecs must live until preadv frame completes
pub async fn preadvPosix(
loop: *Loop,
fd: fd_t,
iovecs: []const os.iovec,
offset: usize,
) os.ReadError!usize {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
var req_node = RequestNode{
.prev = null,
.next = null,
@@ -342,7 +313,7 @@ pub async fn preadvPosix(
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
- .data = @handle(),
+ .data = @frame(),
},
},
},
@@ -363,11 +334,6 @@ pub async fn openPosix(
flags: u32,
mode: File.Mode,
) File.OpenError!fd_t {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
const path_c = try std.os.toPosixPath(path);
var req_node = RequestNode{
@@ -386,7 +352,7 @@ pub async fn openPosix(
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
- .data = @handle(),
+ .data = @frame(),
},
},
},
@@ -403,12 +369,12 @@ pub async fn openPosix(
pub async fn openRead(loop: *Loop, path: []const u8) File.OpenError!fd_t {
switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.linux, builtin.Os.freebsd, builtin.Os.netbsd => {
+ .macosx, .linux, .freebsd, .netbsd => {
const flags = os.O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC;
return await (async openPosix(loop, path, flags, File.default_mode) catch unreachable);
},
- builtin.Os.windows => return windows.CreateFile(
+ .windows => return windows.CreateFile(
path,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
@@ -431,15 +397,15 @@ pub async fn openWrite(loop: *Loop, path: []const u8) File.OpenError!fd_t {
/// Creates if does not exist. Truncates the file if it exists.
pub async fn openWriteMode(loop: *Loop, path: []const u8, mode: File.Mode) File.OpenError!fd_t {
switch (builtin.os) {
- builtin.Os.macosx,
- builtin.Os.linux,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .macosx,
+ .linux,
+ .freebsd,
+ .netbsd,
=> {
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC;
return await (async openPosix(loop, path, flags, File.default_mode) catch unreachable);
},
- builtin.Os.windows => return windows.CreateFile(
+ .windows => return windows.CreateFile(
path,
windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@@ -459,12 +425,12 @@ pub async fn openReadWrite(
mode: File.Mode,
) File.OpenError!fd_t {
switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.linux, builtin.Os.freebsd, builtin.Os.netbsd => {
+ .macosx, .linux, .freebsd, .netbsd => {
const flags = os.O_LARGEFILE | os.O_RDWR | os.O_CREAT | os.O_CLOEXEC;
return await (async openPosix(loop, path, flags, mode) catch unreachable);
},
- builtin.Os.windows => return windows.CreateFile(
+ .windows => return windows.CreateFile(
path,
windows.GENERIC_WRITE | windows.GENERIC_READ,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@@ -489,9 +455,9 @@ pub const CloseOperation = struct {
os_data: OsData,
const OsData = switch (builtin.os) {
- builtin.Os.linux, builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => OsDataPosix,
+ .linux, .macosx, .freebsd, .netbsd => OsDataPosix,
- builtin.Os.windows => struct {
+ .windows => struct {
handle: ?fd_t,
},
@@ -508,8 +474,8 @@ pub const CloseOperation = struct {
self.* = CloseOperation{
.loop = loop,
.os_data = switch (builtin.os) {
- builtin.Os.linux, builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => initOsDataPosix(self),
- builtin.Os.windows => OsData{ .handle = null },
+ .linux, .macosx, .freebsd, .netbsd => initOsDataPosix(self),
+ .windows => OsData{ .handle = null },
else => @compileError("Unsupported OS"),
},
};
@@ -535,10 +501,10 @@ pub const CloseOperation = struct {
/// Defer this after creating.
pub fn finish(self: *CloseOperation) void {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> {
if (self.os_data.have_fd) {
self.loop.posixFsRequest(&self.os_data.close_req_node);
@@ -546,7 +512,7 @@ pub const CloseOperation = struct {
self.loop.allocator.destroy(self);
}
},
- builtin.Os.windows => {
+ .windows => {
if (self.os_data.handle) |handle| {
os.close(handle);
}
@@ -558,15 +524,15 @@ pub const CloseOperation = struct {
pub fn setHandle(self: *CloseOperation, handle: fd_t) void {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> {
self.os_data.close_req_node.data.msg.Close.fd = handle;
self.os_data.have_fd = true;
},
- builtin.Os.windows => {
+ .windows => {
self.os_data.handle = handle;
},
else => @compileError("Unsupported OS"),
@@ -576,14 +542,14 @@ pub const CloseOperation = struct {
/// Undo a `setHandle`.
pub fn clearHandle(self: *CloseOperation) void {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> {
self.os_data.have_fd = false;
},
- builtin.Os.windows => {
+ .windows => {
self.os_data.handle = null;
},
else => @compileError("Unsupported OS"),
@@ -592,15 +558,15 @@ pub const CloseOperation = struct {
pub fn getHandle(self: *CloseOperation) fd_t {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> {
assert(self.os_data.have_fd);
return self.os_data.close_req_node.data.msg.Close.fd;
},
- builtin.Os.windows => {
+ .windows => {
return self.os_data.handle.?;
},
else => @compileError("Unsupported OS"),
@@ -617,12 +583,12 @@ pub async fn writeFile(loop: *Loop, path: []const u8, contents: []const u8) !voi
/// contents must remain alive until writeFile completes.
pub async fn writeFileMode(loop: *Loop, path: []const u8, contents: []const u8, mode: File.Mode) !void {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> return await (async writeFileModeThread(loop, path, contents, mode) catch unreachable),
- builtin.Os.windows => return await (async writeFileWindows(loop, path, contents) catch unreachable),
+ .windows => return await (async writeFileWindows(loop, path, contents) catch unreachable),
else => @compileError("Unsupported OS"),
}
}
@@ -643,11 +609,6 @@ async fn writeFileWindows(loop: *Loop, path: []const u8, contents: []const u8) !
}
async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8, mode: File.Mode) !void {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
const path_with_null = try std.cstr.addNullByte(loop.allocator, path);
defer loop.allocator.free(path_with_null);
@@ -667,7 +628,7 @@ async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
- .data = @handle(),
+ .data = @frame(),
},
},
},
@@ -682,7 +643,7 @@ async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8
return req_node.data.msg.WriteFile.result;
}
-/// The promise resumes when the last data has been confirmed written, but before the file handle
+/// The frame resumes when the last data has been confirmed written, but before the file handle
/// is closed.
/// Caller owns returned memory.
pub async fn readFile(loop: *Loop, file_path: []const u8, max_size: usize) ![]u8 {
@@ -715,598 +676,583 @@ pub const WatchEventId = enum {
Delete,
};
-pub const WatchEventError = error{
- UserResourceLimitReached,
- SystemResources,
- AccessDenied,
- Unexpected, // TODO remove this possibility
-};
-
-pub fn Watch(comptime V: type) type {
- return struct {
- channel: *event.Channel(Event.Error!Event),
- os_data: OsData,
-
- const OsData = switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => struct {
- file_table: FileTable,
- table_lock: event.Lock,
-
- const FileTable = std.AutoHashMap([]const u8, *Put);
- const Put = struct {
- putter: promise,
- value_ptr: *V,
- };
- },
-
- builtin.Os.linux => LinuxOsData,
- builtin.Os.windows => WindowsOsData,
-
- else => @compileError("Unsupported OS"),
- };
-
- const WindowsOsData = struct {
- table_lock: event.Lock,
- dir_table: DirTable,
- all_putters: std.atomic.Queue(promise),
- ref_count: std.atomic.Int(usize),
-
- const DirTable = std.AutoHashMap([]const u8, *Dir);
- const FileTable = std.AutoHashMap([]const u16, V);
-
- const Dir = struct {
- putter: promise,
- file_table: FileTable,
- table_lock: event.Lock,
- };
- };
-
- const LinuxOsData = struct {
- putter: promise,
- inotify_fd: i32,
- wd_table: WdTable,
- table_lock: event.Lock,
-
- const WdTable = std.AutoHashMap(i32, Dir);
- const FileTable = std.AutoHashMap([]const u8, V);
-
- const Dir = struct {
- dirname: []const u8,
- file_table: FileTable,
- };
- };
-
- const FileToHandle = std.AutoHashMap([]const u8, promise);
-
- const Self = @This();
-
- pub const Event = struct {
- id: Id,
- data: V,
-
- pub const Id = WatchEventId;
- pub const Error = WatchEventError;
- };
-
- pub fn create(loop: *Loop, event_buf_count: usize) !*Self {
- const channel = try event.Channel(Self.Event.Error!Self.Event).create(loop, event_buf_count);
- errdefer channel.destroy();
-
- switch (builtin.os) {
- builtin.Os.linux => {
- const inotify_fd = try os.inotify_init1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC);
- errdefer os.close(inotify_fd);
-
- var result: *Self = undefined;
- _ = try async linuxEventPutter(inotify_fd, channel, &result);
- return result;
- },
-
- builtin.Os.windows => {
- const self = try loop.allocator.create(Self);
- errdefer loop.allocator.destroy(self);
- self.* = Self{
- .channel = channel,
- .os_data = OsData{
- .table_lock = event.Lock.init(loop),
- .dir_table = OsData.DirTable.init(loop.allocator),
- .ref_count = std.atomic.Int(usize).init(1),
- .all_putters = std.atomic.Queue(promise).init(),
- },
- };
- return self;
- },
-
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
- const self = try loop.allocator.create(Self);
- errdefer loop.allocator.destroy(self);
-
- self.* = Self{
- .channel = channel,
- .os_data = OsData{
- .table_lock = event.Lock.init(loop),
- .file_table = OsData.FileTable.init(loop.allocator),
- },
- };
- return self;
- },
- else => @compileError("Unsupported OS"),
- }
- }
-
- /// All addFile calls and removeFile calls must have completed.
- pub fn destroy(self: *Self) void {
- switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
- // TODO we need to cancel the coroutines before destroying the lock
- self.os_data.table_lock.deinit();
- var it = self.os_data.file_table.iterator();
- while (it.next()) |entry| {
- cancel entry.value.putter;
- self.channel.loop.allocator.free(entry.key);
- }
- self.channel.destroy();
- },
- builtin.Os.linux => cancel self.os_data.putter,
- builtin.Os.windows => {
- while (self.os_data.all_putters.get()) |putter_node| {
- cancel putter_node.data;
- }
- self.deref();
- },
- else => @compileError("Unsupported OS"),
- }
- }
-
- fn ref(self: *Self) void {
- _ = self.os_data.ref_count.incr();
- }
-
- fn deref(self: *Self) void {
- if (self.os_data.ref_count.decr() == 1) {
- const allocator = self.channel.loop.allocator;
- self.os_data.table_lock.deinit();
- var it = self.os_data.dir_table.iterator();
- while (it.next()) |entry| {
- allocator.free(entry.key);
- allocator.destroy(entry.value);
- }
- self.os_data.dir_table.deinit();
- self.channel.destroy();
- allocator.destroy(self);
- }
- }
-
- pub async fn addFile(self: *Self, file_path: []const u8, value: V) !?V {
- switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable),
- builtin.Os.linux => return await (async addFileLinux(self, file_path, value) catch unreachable),
- builtin.Os.windows => return await (async addFileWindows(self, file_path, value) catch unreachable),
- else => @compileError("Unsupported OS"),
- }
- }
-
- async fn addFileKEvent(self: *Self, file_path: []const u8, value: V) !?V {
- const resolved_path = try std.fs.path.resolve(self.channel.loop.allocator, [_][]const u8{file_path});
- var resolved_path_consumed = false;
- defer if (!resolved_path_consumed) self.channel.loop.allocator.free(resolved_path);
-
- var close_op = try CloseOperation.start(self.channel.loop);
- var close_op_consumed = false;
- defer if (!close_op_consumed) close_op.finish();
-
- const flags = if (os.darwin.is_the_target) os.O_SYMLINK | os.O_EVTONLY else 0;
- const mode = 0;
- const fd = try await (async openPosix(self.channel.loop, resolved_path, flags, mode) catch unreachable);
- close_op.setHandle(fd);
-
- var put_data: *OsData.Put = undefined;
- const putter = try async self.kqPutEvents(close_op, value, &put_data);
- close_op_consumed = true;
- errdefer cancel putter;
-
- const result = blk: {
- const held = await (async self.os_data.table_lock.acquire() catch unreachable);
- defer held.release();
-
- const gop = try self.os_data.file_table.getOrPut(resolved_path);
- if (gop.found_existing) {
- const prev_value = gop.kv.value.value_ptr.*;
- cancel gop.kv.value.putter;
- gop.kv.value = put_data;
- break :blk prev_value;
- } else {
- resolved_path_consumed = true;
- gop.kv.value = put_data;
- break :blk null;
- }
- };
-
- return result;
- }
-
- async fn kqPutEvents(self: *Self, close_op: *CloseOperation, value: V, out_put: **OsData.Put) void {
- // TODO https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
- var value_copy = value;
- var put = OsData.Put{
- .putter = @handle(),
- .value_ptr = &value_copy,
- };
- out_put.* = &put;
- self.channel.loop.beginOneEvent();
-
- defer {
- close_op.finish();
- self.channel.loop.finishOneEvent();
- }
-
- while (true) {
- if (await (async self.channel.loop.bsdWaitKev(
- @intCast(usize, close_op.getHandle()),
- os.EVFILT_VNODE,
- os.NOTE_WRITE | os.NOTE_DELETE,
- ) catch unreachable)) |kev| {
- // TODO handle EV_ERROR
- if (kev.fflags & os.NOTE_DELETE != 0) {
- await (async self.channel.put(Self.Event{
- .id = Event.Id.Delete,
- .data = value_copy,
- }) catch unreachable);
- } else if (kev.fflags & os.NOTE_WRITE != 0) {
- await (async self.channel.put(Self.Event{
- .id = Event.Id.CloseWrite,
- .data = value_copy,
- }) catch unreachable);
- }
- } else |err| switch (err) {
- error.EventNotFound => unreachable,
- error.ProcessNotFound => unreachable,
- error.Overflow => unreachable,
- error.AccessDenied, error.SystemResources => |casted_err| {
- await (async self.channel.put(casted_err) catch unreachable);
- },
- }
- }
- }
-
- async fn addFileLinux(self: *Self, file_path: []const u8, value: V) !?V {
- const value_copy = value;
-
- const dirname = std.fs.path.dirname(file_path) orelse ".";
- const dirname_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, dirname);
- var dirname_with_null_consumed = false;
- defer if (!dirname_with_null_consumed) self.channel.loop.allocator.free(dirname_with_null);
-
- const basename = std.fs.path.basename(file_path);
- const basename_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, basename);
- var basename_with_null_consumed = false;
- defer if (!basename_with_null_consumed) self.channel.loop.allocator.free(basename_with_null);
-
- const wd = try os.inotify_add_watchC(
- self.os_data.inotify_fd,
- dirname_with_null.ptr,
- os.linux.IN_CLOSE_WRITE | os.linux.IN_ONLYDIR | os.linux.IN_EXCL_UNLINK,
- );
- // wd is either a newly created watch or an existing one.
-
- const held = await (async self.os_data.table_lock.acquire() catch unreachable);
- defer held.release();
-
- const gop = try self.os_data.wd_table.getOrPut(wd);
- if (!gop.found_existing) {
- gop.kv.value = OsData.Dir{
- .dirname = dirname_with_null,
- .file_table = OsData.FileTable.init(self.channel.loop.allocator),
- };
- dirname_with_null_consumed = true;
- }
- const dir = &gop.kv.value;
-
- const file_table_gop = try dir.file_table.getOrPut(basename_with_null);
- if (file_table_gop.found_existing) {
- const prev_value = file_table_gop.kv.value;
- file_table_gop.kv.value = value_copy;
- return prev_value;
- } else {
- file_table_gop.kv.value = value_copy;
- basename_with_null_consumed = true;
- return null;
- }
- }
-
- async fn addFileWindows(self: *Self, file_path: []const u8, value: V) !?V {
- const value_copy = value;
- // TODO we might need to convert dirname and basename to canonical file paths ("short"?)
-
- const dirname = try std.mem.dupe(self.channel.loop.allocator, u8, std.fs.path.dirname(file_path) orelse ".");
- var dirname_consumed = false;
- defer if (!dirname_consumed) self.channel.loop.allocator.free(dirname);
-
- const dirname_utf16le = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, dirname);
- defer self.channel.loop.allocator.free(dirname_utf16le);
-
- // TODO https://github.com/ziglang/zig/issues/265
- const basename = std.fs.path.basename(file_path);
- const basename_utf16le_null = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, basename);
- var basename_utf16le_null_consumed = false;
- defer if (!basename_utf16le_null_consumed) self.channel.loop.allocator.free(basename_utf16le_null);
- const basename_utf16le_no_null = basename_utf16le_null[0 .. basename_utf16le_null.len - 1];
-
- const dir_handle = try windows.CreateFileW(
- dirname_utf16le.ptr,
- windows.FILE_LIST_DIRECTORY,
- windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE | windows.FILE_SHARE_WRITE,
- null,
- windows.OPEN_EXISTING,
- windows.FILE_FLAG_BACKUP_SEMANTICS | windows.FILE_FLAG_OVERLAPPED,
- null,
- );
- var dir_handle_consumed = false;
- defer if (!dir_handle_consumed) windows.CloseHandle(dir_handle);
-
- const held = await (async self.os_data.table_lock.acquire() catch unreachable);
- defer held.release();
-
- const gop = try self.os_data.dir_table.getOrPut(dirname);
- if (gop.found_existing) {
- const dir = gop.kv.value;
- const held_dir_lock = await (async dir.table_lock.acquire() catch unreachable);
- defer held_dir_lock.release();
-
- const file_gop = try dir.file_table.getOrPut(basename_utf16le_no_null);
- if (file_gop.found_existing) {
- const prev_value = file_gop.kv.value;
- file_gop.kv.value = value_copy;
- return prev_value;
- } else {
- file_gop.kv.value = value_copy;
- basename_utf16le_null_consumed = true;
- return null;
- }
- } else {
- errdefer _ = self.os_data.dir_table.remove(dirname);
- const dir = try self.channel.loop.allocator.create(OsData.Dir);
- errdefer self.channel.loop.allocator.destroy(dir);
-
- dir.* = OsData.Dir{
- .file_table = OsData.FileTable.init(self.channel.loop.allocator),
- .table_lock = event.Lock.init(self.channel.loop),
- .putter = undefined,
- };
- gop.kv.value = dir;
- assert((try dir.file_table.put(basename_utf16le_no_null, value_copy)) == null);
- basename_utf16le_null_consumed = true;
-
- dir.putter = try async self.windowsDirReader(dir_handle, dir);
- dir_handle_consumed = true;
-
- dirname_consumed = true;
-
- return null;
- }
- }
-
- async fn windowsDirReader(self: *Self, dir_handle: windows.HANDLE, dir: *OsData.Dir) void {
- // TODO https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
- self.ref();
- defer self.deref();
-
- defer os.close(dir_handle);
-
- var putter_node = std.atomic.Queue(promise).Node{
- .data = @handle(),
- .prev = null,
- .next = null,
- };
- self.os_data.all_putters.put(&putter_node);
- defer _ = self.os_data.all_putters.remove(&putter_node);
-
- var resume_node = Loop.ResumeNode.Basic{
- .base = Loop.ResumeNode{
- .id = Loop.ResumeNode.Id.Basic,
- .handle = @handle(),
- .overlapped = windows.OVERLAPPED{
- .Internal = 0,
- .InternalHigh = 0,
- .Offset = 0,
- .OffsetHigh = 0,
- .hEvent = null,
- },
- },
- };
- var event_buf: [4096]u8 align(@alignOf(windows.FILE_NOTIFY_INFORMATION)) = undefined;
-
- // TODO handle this error not in the channel but in the setup
- _ = windows.CreateIoCompletionPort(
- dir_handle,
- self.channel.loop.os_data.io_port,
- undefined,
- undefined,
- ) catch |err| {
- await (async self.channel.put(err) catch unreachable);
- return;
- };
-
- while (true) {
- {
- // TODO only 1 beginOneEvent for the whole coroutine
- self.channel.loop.beginOneEvent();
- errdefer self.channel.loop.finishOneEvent();
- errdefer {
- _ = windows.kernel32.CancelIoEx(dir_handle, &resume_node.base.overlapped);
- }
- suspend {
- _ = windows.kernel32.ReadDirectoryChangesW(
- dir_handle,
- &event_buf,
- @intCast(windows.DWORD, event_buf.len),
- windows.FALSE, // watch subtree
- windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME |
- windows.FILE_NOTIFY_CHANGE_ATTRIBUTES | windows.FILE_NOTIFY_CHANGE_SIZE |
- windows.FILE_NOTIFY_CHANGE_LAST_WRITE | windows.FILE_NOTIFY_CHANGE_LAST_ACCESS |
- windows.FILE_NOTIFY_CHANGE_CREATION | windows.FILE_NOTIFY_CHANGE_SECURITY,
- null, // number of bytes transferred (unused for async)
- &resume_node.base.overlapped,
- null, // completion routine - unused because we use IOCP
- );
- }
- }
- var bytes_transferred: windows.DWORD = undefined;
- if (windows.kernel32.GetOverlappedResult(dir_handle, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) {
- const err = switch (windows.kernel32.GetLastError()) {
- else => |err| windows.unexpectedError(err),
- };
- await (async self.channel.put(err) catch unreachable);
- } else {
- // can't use @bytesToSlice because of the special variable length name field
- var ptr = event_buf[0..].ptr;
- const end_ptr = ptr + bytes_transferred;
- var ev: *windows.FILE_NOTIFY_INFORMATION = undefined;
- while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += ev.NextEntryOffset) {
- ev = @ptrCast(*windows.FILE_NOTIFY_INFORMATION, ptr);
- const emit = switch (ev.Action) {
- windows.FILE_ACTION_REMOVED => WatchEventId.Delete,
- windows.FILE_ACTION_MODIFIED => WatchEventId.CloseWrite,
- else => null,
- };
- if (emit) |id| {
- const basename_utf16le = ([*]u16)(&ev.FileName)[0 .. ev.FileNameLength / 2];
- const user_value = blk: {
- const held = await (async dir.table_lock.acquire() catch unreachable);
- defer held.release();
-
- if (dir.file_table.get(basename_utf16le)) |entry| {
- break :blk entry.value;
- } else {
- break :blk null;
- }
- };
- if (user_value) |v| {
- await (async self.channel.put(Event{
- .id = id,
- .data = v,
- }) catch unreachable);
- }
- }
- if (ev.NextEntryOffset == 0) break;
- }
- }
- }
- }
-
- pub async fn removeFile(self: *Self, file_path: []const u8) ?V {
- @panic("TODO");
- }
-
- async fn linuxEventPutter(inotify_fd: i32, channel: *event.Channel(Event.Error!Event), out_watch: **Self) void {
- // TODO https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
- const loop = channel.loop;
-
- var watch = Self{
- .channel = channel,
- .os_data = OsData{
- .putter = @handle(),
- .inotify_fd = inotify_fd,
- .wd_table = OsData.WdTable.init(loop.allocator),
- .table_lock = event.Lock.init(loop),
- },
- };
- out_watch.* = &watch;
-
- loop.beginOneEvent();
-
- defer {
- watch.os_data.table_lock.deinit();
- var wd_it = watch.os_data.wd_table.iterator();
- while (wd_it.next()) |wd_entry| {
- var file_it = wd_entry.value.file_table.iterator();
- while (file_it.next()) |file_entry| {
- loop.allocator.free(file_entry.key);
- }
- loop.allocator.free(wd_entry.value.dirname);
- }
- loop.finishOneEvent();
- os.close(inotify_fd);
- channel.destroy();
- }
-
- var event_buf: [4096]u8 align(@alignOf(os.linux.inotify_event)) = undefined;
-
- while (true) {
- const rc = os.linux.read(inotify_fd, &event_buf, event_buf.len);
- const errno = os.linux.getErrno(rc);
- switch (errno) {
- 0 => {
- // can't use @bytesToSlice because of the special variable length name field
- var ptr = event_buf[0..].ptr;
- const end_ptr = ptr + event_buf.len;
- var ev: *os.linux.inotify_event = undefined;
- while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += @sizeOf(os.linux.inotify_event) + ev.len) {
- ev = @ptrCast(*os.linux.inotify_event, ptr);
- if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) {
- const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
- const basename_with_null = basename_ptr[0 .. std.mem.len(u8, basename_ptr) + 1];
- const user_value = blk: {
- const held = await (async watch.os_data.table_lock.acquire() catch unreachable);
- defer held.release();
-
- const dir = &watch.os_data.wd_table.get(ev.wd).?.value;
- if (dir.file_table.get(basename_with_null)) |entry| {
- break :blk entry.value;
- } else {
- break :blk null;
- }
- };
- if (user_value) |v| {
- await (async channel.put(Event{
- .id = WatchEventId.CloseWrite,
- .data = v,
- }) catch unreachable);
- }
- }
- }
- },
- os.linux.EINTR => continue,
- os.linux.EINVAL => unreachable,
- os.linux.EFAULT => unreachable,
- os.linux.EAGAIN => {
- (await (async loop.linuxWaitFd(
- inotify_fd,
- os.linux.EPOLLET | os.linux.EPOLLIN,
- ) catch unreachable)) catch |err| {
- const transformed_err = switch (err) {
- error.FileDescriptorAlreadyPresentInSet => unreachable,
- error.OperationCausesCircularLoop => unreachable,
- error.FileDescriptorNotRegistered => unreachable,
- error.FileDescriptorIncompatibleWithEpoll => unreachable,
- error.Unexpected => unreachable,
- else => |e| e,
- };
- await (async channel.put(transformed_err) catch unreachable);
- };
- },
- else => unreachable,
- }
- }
- }
- };
-}
+//pub const WatchEventError = error{
+// UserResourceLimitReached,
+// SystemResources,
+// AccessDenied,
+// Unexpected, // TODO remove this possibility
+//};
+//
+//pub fn Watch(comptime V: type) type {
+// return struct {
+// channel: *event.Channel(Event.Error!Event),
+// os_data: OsData,
+//
+// const OsData = switch (builtin.os) {
+// .macosx, .freebsd, .netbsd => struct {
+// file_table: FileTable,
+// table_lock: event.Lock,
+//
+// const FileTable = std.AutoHashMap([]const u8, *Put);
+// const Put = struct {
+// putter: anyframe,
+// value_ptr: *V,
+// };
+// },
+//
+// .linux => LinuxOsData,
+// .windows => WindowsOsData,
+//
+// else => @compileError("Unsupported OS"),
+// };
+//
+// const WindowsOsData = struct {
+// table_lock: event.Lock,
+// dir_table: DirTable,
+// all_putters: std.atomic.Queue(anyframe),
+// ref_count: std.atomic.Int(usize),
+//
+// const DirTable = std.AutoHashMap([]const u8, *Dir);
+// const FileTable = std.AutoHashMap([]const u16, V);
+//
+// const Dir = struct {
+// putter: anyframe,
+// file_table: FileTable,
+// table_lock: event.Lock,
+// };
+// };
+//
+// const LinuxOsData = struct {
+// putter: anyframe,
+// inotify_fd: i32,
+// wd_table: WdTable,
+// table_lock: event.Lock,
+//
+// const WdTable = std.AutoHashMap(i32, Dir);
+// const FileTable = std.AutoHashMap([]const u8, V);
+//
+// const Dir = struct {
+// dirname: []const u8,
+// file_table: FileTable,
+// };
+// };
+//
+// const FileToHandle = std.AutoHashMap([]const u8, anyframe);
+//
+// const Self = @This();
+//
+// pub const Event = struct {
+// id: Id,
+// data: V,
+//
+// pub const Id = WatchEventId;
+// pub const Error = WatchEventError;
+// };
+//
+// pub fn create(loop: *Loop, event_buf_count: usize) !*Self {
+// const channel = try event.Channel(Self.Event.Error!Self.Event).create(loop, event_buf_count);
+// errdefer channel.destroy();
+//
+// switch (builtin.os) {
+// .linux => {
+// const inotify_fd = try os.inotify_init1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC);
+// errdefer os.close(inotify_fd);
+//
+// var result: *Self = undefined;
+// _ = try async linuxEventPutter(inotify_fd, channel, &result);
+// return result;
+// },
+//
+// .windows => {
+// const self = try loop.allocator.create(Self);
+// errdefer loop.allocator.destroy(self);
+// self.* = Self{
+// .channel = channel,
+// .os_data = OsData{
+// .table_lock = event.Lock.init(loop),
+// .dir_table = OsData.DirTable.init(loop.allocator),
+// .ref_count = std.atomic.Int(usize).init(1),
+// .all_putters = std.atomic.Queue(anyframe).init(),
+// },
+// };
+// return self;
+// },
+//
+// .macosx, .freebsd, .netbsd => {
+// const self = try loop.allocator.create(Self);
+// errdefer loop.allocator.destroy(self);
+//
+// self.* = Self{
+// .channel = channel,
+// .os_data = OsData{
+// .table_lock = event.Lock.init(loop),
+// .file_table = OsData.FileTable.init(loop.allocator),
+// },
+// };
+// return self;
+// },
+// else => @compileError("Unsupported OS"),
+// }
+// }
+//
+// /// All addFile calls and removeFile calls must have completed.
+// pub fn destroy(self: *Self) void {
+// switch (builtin.os) {
+// .macosx, .freebsd, .netbsd => {
+// // TODO we need to cancel the frames before destroying the lock
+// self.os_data.table_lock.deinit();
+// var it = self.os_data.file_table.iterator();
+// while (it.next()) |entry| {
+// cancel entry.value.putter;
+// self.channel.loop.allocator.free(entry.key);
+// }
+// self.channel.destroy();
+// },
+// .linux => cancel self.os_data.putter,
+// .windows => {
+// while (self.os_data.all_putters.get()) |putter_node| {
+// cancel putter_node.data;
+// }
+// self.deref();
+// },
+// else => @compileError("Unsupported OS"),
+// }
+// }
+//
+// fn ref(self: *Self) void {
+// _ = self.os_data.ref_count.incr();
+// }
+//
+// fn deref(self: *Self) void {
+// if (self.os_data.ref_count.decr() == 1) {
+// const allocator = self.channel.loop.allocator;
+// self.os_data.table_lock.deinit();
+// var it = self.os_data.dir_table.iterator();
+// while (it.next()) |entry| {
+// allocator.free(entry.key);
+// allocator.destroy(entry.value);
+// }
+// self.os_data.dir_table.deinit();
+// self.channel.destroy();
+// allocator.destroy(self);
+// }
+// }
+//
+// pub async fn addFile(self: *Self, file_path: []const u8, value: V) !?V {
+// switch (builtin.os) {
+// .macosx, .freebsd, .netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable),
+// .linux => return await (async addFileLinux(self, file_path, value) catch unreachable),
+// .windows => return await (async addFileWindows(self, file_path, value) catch unreachable),
+// else => @compileError("Unsupported OS"),
+// }
+// }
+//
+// async fn addFileKEvent(self: *Self, file_path: []const u8, value: V) !?V {
+// const resolved_path = try std.fs.path.resolve(self.channel.loop.allocator, [_][]const u8{file_path});
+// var resolved_path_consumed = false;
+// defer if (!resolved_path_consumed) self.channel.loop.allocator.free(resolved_path);
+//
+// var close_op = try CloseOperation.start(self.channel.loop);
+// var close_op_consumed = false;
+// defer if (!close_op_consumed) close_op.finish();
+//
+// const flags = if (os.darwin.is_the_target) os.O_SYMLINK | os.O_EVTONLY else 0;
+// const mode = 0;
+// const fd = try await (async openPosix(self.channel.loop, resolved_path, flags, mode) catch unreachable);
+// close_op.setHandle(fd);
+//
+// var put_data: *OsData.Put = undefined;
+// const putter = try async self.kqPutEvents(close_op, value, &put_data);
+// close_op_consumed = true;
+// errdefer cancel putter;
+//
+// const result = blk: {
+// const held = await (async self.os_data.table_lock.acquire() catch unreachable);
+// defer held.release();
+//
+// const gop = try self.os_data.file_table.getOrPut(resolved_path);
+// if (gop.found_existing) {
+// const prev_value = gop.kv.value.value_ptr.*;
+// cancel gop.kv.value.putter;
+// gop.kv.value = put_data;
+// break :blk prev_value;
+// } else {
+// resolved_path_consumed = true;
+// gop.kv.value = put_data;
+// break :blk null;
+// }
+// };
+//
+// return result;
+// }
+//
+// async fn kqPutEvents(self: *Self, close_op: *CloseOperation, value: V, out_put: **OsData.Put) void {
+// var value_copy = value;
+// var put = OsData.Put{
+// .putter = @frame(),
+// .value_ptr = &value_copy,
+// };
+// out_put.* = &put;
+// self.channel.loop.beginOneEvent();
+//
+// defer {
+// close_op.finish();
+// self.channel.loop.finishOneEvent();
+// }
+//
+// while (true) {
+// if (await (async self.channel.loop.bsdWaitKev(
+// @intCast(usize, close_op.getHandle()),
+// os.EVFILT_VNODE,
+// os.NOTE_WRITE | os.NOTE_DELETE,
+// ) catch unreachable)) |kev| {
+// // TODO handle EV_ERROR
+// if (kev.fflags & os.NOTE_DELETE != 0) {
+// await (async self.channel.put(Self.Event{
+// .id = Event.Id.Delete,
+// .data = value_copy,
+// }) catch unreachable);
+// } else if (kev.fflags & os.NOTE_WRITE != 0) {
+// await (async self.channel.put(Self.Event{
+// .id = Event.Id.CloseWrite,
+// .data = value_copy,
+// }) catch unreachable);
+// }
+// } else |err| switch (err) {
+// error.EventNotFound => unreachable,
+// error.ProcessNotFound => unreachable,
+// error.Overflow => unreachable,
+// error.AccessDenied, error.SystemResources => |casted_err| {
+// await (async self.channel.put(casted_err) catch unreachable);
+// },
+// }
+// }
+// }
+//
+// async fn addFileLinux(self: *Self, file_path: []const u8, value: V) !?V {
+// const value_copy = value;
+//
+// const dirname = std.fs.path.dirname(file_path) orelse ".";
+// const dirname_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, dirname);
+// var dirname_with_null_consumed = false;
+// defer if (!dirname_with_null_consumed) self.channel.loop.allocator.free(dirname_with_null);
+//
+// const basename = std.fs.path.basename(file_path);
+// const basename_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, basename);
+// var basename_with_null_consumed = false;
+// defer if (!basename_with_null_consumed) self.channel.loop.allocator.free(basename_with_null);
+//
+// const wd = try os.inotify_add_watchC(
+// self.os_data.inotify_fd,
+// dirname_with_null.ptr,
+// os.linux.IN_CLOSE_WRITE | os.linux.IN_ONLYDIR | os.linux.IN_EXCL_UNLINK,
+// );
+// // wd is either a newly created watch or an existing one.
+//
+// const held = await (async self.os_data.table_lock.acquire() catch unreachable);
+// defer held.release();
+//
+// const gop = try self.os_data.wd_table.getOrPut(wd);
+// if (!gop.found_existing) {
+// gop.kv.value = OsData.Dir{
+// .dirname = dirname_with_null,
+// .file_table = OsData.FileTable.init(self.channel.loop.allocator),
+// };
+// dirname_with_null_consumed = true;
+// }
+// const dir = &gop.kv.value;
+//
+// const file_table_gop = try dir.file_table.getOrPut(basename_with_null);
+// if (file_table_gop.found_existing) {
+// const prev_value = file_table_gop.kv.value;
+// file_table_gop.kv.value = value_copy;
+// return prev_value;
+// } else {
+// file_table_gop.kv.value = value_copy;
+// basename_with_null_consumed = true;
+// return null;
+// }
+// }
+//
+// async fn addFileWindows(self: *Self, file_path: []const u8, value: V) !?V {
+// const value_copy = value;
+// // TODO we might need to convert dirname and basename to canonical file paths ("short"?)
+//
+// const dirname = try std.mem.dupe(self.channel.loop.allocator, u8, std.fs.path.dirname(file_path) orelse ".");
+// var dirname_consumed = false;
+// defer if (!dirname_consumed) self.channel.loop.allocator.free(dirname);
+//
+// const dirname_utf16le = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, dirname);
+// defer self.channel.loop.allocator.free(dirname_utf16le);
+//
+// // TODO https://github.com/ziglang/zig/issues/265
+// const basename = std.fs.path.basename(file_path);
+// const basename_utf16le_null = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, basename);
+// var basename_utf16le_null_consumed = false;
+// defer if (!basename_utf16le_null_consumed) self.channel.loop.allocator.free(basename_utf16le_null);
+// const basename_utf16le_no_null = basename_utf16le_null[0 .. basename_utf16le_null.len - 1];
+//
+// const dir_handle = try windows.CreateFileW(
+// dirname_utf16le.ptr,
+// windows.FILE_LIST_DIRECTORY,
+// windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE | windows.FILE_SHARE_WRITE,
+// null,
+// windows.OPEN_EXISTING,
+// windows.FILE_FLAG_BACKUP_SEMANTICS | windows.FILE_FLAG_OVERLAPPED,
+// null,
+// );
+// var dir_handle_consumed = false;
+// defer if (!dir_handle_consumed) windows.CloseHandle(dir_handle);
+//
+// const held = await (async self.os_data.table_lock.acquire() catch unreachable);
+// defer held.release();
+//
+// const gop = try self.os_data.dir_table.getOrPut(dirname);
+// if (gop.found_existing) {
+// const dir = gop.kv.value;
+// const held_dir_lock = await (async dir.table_lock.acquire() catch unreachable);
+// defer held_dir_lock.release();
+//
+// const file_gop = try dir.file_table.getOrPut(basename_utf16le_no_null);
+// if (file_gop.found_existing) {
+// const prev_value = file_gop.kv.value;
+// file_gop.kv.value = value_copy;
+// return prev_value;
+// } else {
+// file_gop.kv.value = value_copy;
+// basename_utf16le_null_consumed = true;
+// return null;
+// }
+// } else {
+// errdefer _ = self.os_data.dir_table.remove(dirname);
+// const dir = try self.channel.loop.allocator.create(OsData.Dir);
+// errdefer self.channel.loop.allocator.destroy(dir);
+//
+// dir.* = OsData.Dir{
+// .file_table = OsData.FileTable.init(self.channel.loop.allocator),
+// .table_lock = event.Lock.init(self.channel.loop),
+// .putter = undefined,
+// };
+// gop.kv.value = dir;
+// assert((try dir.file_table.put(basename_utf16le_no_null, value_copy)) == null);
+// basename_utf16le_null_consumed = true;
+//
+// dir.putter = try async self.windowsDirReader(dir_handle, dir);
+// dir_handle_consumed = true;
+//
+// dirname_consumed = true;
+//
+// return null;
+// }
+// }
+//
+// async fn windowsDirReader(self: *Self, dir_handle: windows.HANDLE, dir: *OsData.Dir) void {
+// self.ref();
+// defer self.deref();
+//
+// defer os.close(dir_handle);
+//
+// var putter_node = std.atomic.Queue(anyframe).Node{
+// .data = @frame(),
+// .prev = null,
+// .next = null,
+// };
+// self.os_data.all_putters.put(&putter_node);
+// defer _ = self.os_data.all_putters.remove(&putter_node);
+//
+// var resume_node = Loop.ResumeNode.Basic{
+// .base = Loop.ResumeNode{
+// .id = Loop.ResumeNode.Id.Basic,
+// .handle = @frame(),
+// .overlapped = windows.OVERLAPPED{
+// .Internal = 0,
+// .InternalHigh = 0,
+// .Offset = 0,
+// .OffsetHigh = 0,
+// .hEvent = null,
+// },
+// },
+// };
+// var event_buf: [4096]u8 align(@alignOf(windows.FILE_NOTIFY_INFORMATION)) = undefined;
+//
+// // TODO handle this error not in the channel but in the setup
+// _ = windows.CreateIoCompletionPort(
+// dir_handle,
+// self.channel.loop.os_data.io_port,
+// undefined,
+// undefined,
+// ) catch |err| {
+// await (async self.channel.put(err) catch unreachable);
+// return;
+// };
+//
+// while (true) {
+// {
+// // TODO only 1 beginOneEvent for the whole function
+// self.channel.loop.beginOneEvent();
+// errdefer self.channel.loop.finishOneEvent();
+// errdefer {
+// _ = windows.kernel32.CancelIoEx(dir_handle, &resume_node.base.overlapped);
+// }
+// suspend {
+// _ = windows.kernel32.ReadDirectoryChangesW(
+// dir_handle,
+// &event_buf,
+// @intCast(windows.DWORD, event_buf.len),
+// windows.FALSE, // watch subtree
+// windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME |
+// windows.FILE_NOTIFY_CHANGE_ATTRIBUTES | windows.FILE_NOTIFY_CHANGE_SIZE |
+// windows.FILE_NOTIFY_CHANGE_LAST_WRITE | windows.FILE_NOTIFY_CHANGE_LAST_ACCESS |
+// windows.FILE_NOTIFY_CHANGE_CREATION | windows.FILE_NOTIFY_CHANGE_SECURITY,
+// null, // number of bytes transferred (unused for async)
+// &resume_node.base.overlapped,
+// null, // completion routine - unused because we use IOCP
+// );
+// }
+// }
+// var bytes_transferred: windows.DWORD = undefined;
+// if (windows.kernel32.GetOverlappedResult(dir_handle, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) {
+// const err = switch (windows.kernel32.GetLastError()) {
+// else => |err| windows.unexpectedError(err),
+// };
+// await (async self.channel.put(err) catch unreachable);
+// } else {
+// // can't use @bytesToSlice because of the special variable length name field
+// var ptr = event_buf[0..].ptr;
+// const end_ptr = ptr + bytes_transferred;
+// var ev: *windows.FILE_NOTIFY_INFORMATION = undefined;
+// while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += ev.NextEntryOffset) {
+// ev = @ptrCast(*windows.FILE_NOTIFY_INFORMATION, ptr);
+// const emit = switch (ev.Action) {
+// windows.FILE_ACTION_REMOVED => WatchEventId.Delete,
+// windows.FILE_ACTION_MODIFIED => WatchEventId.CloseWrite,
+// else => null,
+// };
+// if (emit) |id| {
+// const basename_utf16le = ([*]u16)(&ev.FileName)[0 .. ev.FileNameLength / 2];
+// const user_value = blk: {
+// const held = await (async dir.table_lock.acquire() catch unreachable);
+// defer held.release();
+//
+// if (dir.file_table.get(basename_utf16le)) |entry| {
+// break :blk entry.value;
+// } else {
+// break :blk null;
+// }
+// };
+// if (user_value) |v| {
+// await (async self.channel.put(Event{
+// .id = id,
+// .data = v,
+// }) catch unreachable);
+// }
+// }
+// if (ev.NextEntryOffset == 0) break;
+// }
+// }
+// }
+// }
+//
+// pub async fn removeFile(self: *Self, file_path: []const u8) ?V {
+// @panic("TODO");
+// }
+//
+// async fn linuxEventPutter(inotify_fd: i32, channel: *event.Channel(Event.Error!Event), out_watch: **Self) void {
+// const loop = channel.loop;
+//
+// var watch = Self{
+// .channel = channel,
+// .os_data = OsData{
+// .putter = @frame(),
+// .inotify_fd = inotify_fd,
+// .wd_table = OsData.WdTable.init(loop.allocator),
+// .table_lock = event.Lock.init(loop),
+// },
+// };
+// out_watch.* = &watch;
+//
+// loop.beginOneEvent();
+//
+// defer {
+// watch.os_data.table_lock.deinit();
+// var wd_it = watch.os_data.wd_table.iterator();
+// while (wd_it.next()) |wd_entry| {
+// var file_it = wd_entry.value.file_table.iterator();
+// while (file_it.next()) |file_entry| {
+// loop.allocator.free(file_entry.key);
+// }
+// loop.allocator.free(wd_entry.value.dirname);
+// }
+// loop.finishOneEvent();
+// os.close(inotify_fd);
+// channel.destroy();
+// }
+//
+// var event_buf: [4096]u8 align(@alignOf(os.linux.inotify_event)) = undefined;
+//
+// while (true) {
+// const rc = os.linux.read(inotify_fd, &event_buf, event_buf.len);
+// const errno = os.linux.getErrno(rc);
+// switch (errno) {
+// 0 => {
+// // can't use @bytesToSlice because of the special variable length name field
+// var ptr = event_buf[0..].ptr;
+// const end_ptr = ptr + event_buf.len;
+// var ev: *os.linux.inotify_event = undefined;
+// while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += @sizeOf(os.linux.inotify_event) + ev.len) {
+// ev = @ptrCast(*os.linux.inotify_event, ptr);
+// if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) {
+// const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
+// const basename_with_null = basename_ptr[0 .. std.mem.len(u8, basename_ptr) + 1];
+// const user_value = blk: {
+// const held = await (async watch.os_data.table_lock.acquire() catch unreachable);
+// defer held.release();
+//
+// const dir = &watch.os_data.wd_table.get(ev.wd).?.value;
+// if (dir.file_table.get(basename_with_null)) |entry| {
+// break :blk entry.value;
+// } else {
+// break :blk null;
+// }
+// };
+// if (user_value) |v| {
+// await (async channel.put(Event{
+// .id = WatchEventId.CloseWrite,
+// .data = v,
+// }) catch unreachable);
+// }
+// }
+// }
+// },
+// os.linux.EINTR => continue,
+// os.linux.EINVAL => unreachable,
+// os.linux.EFAULT => unreachable,
+// os.linux.EAGAIN => {
+// (await (async loop.linuxWaitFd(
+// inotify_fd,
+// os.linux.EPOLLET | os.linux.EPOLLIN,
+// ) catch unreachable)) catch |err| {
+// const transformed_err = switch (err) {
+// error.FileDescriptorAlreadyPresentInSet => unreachable,
+// error.OperationCausesCircularLoop => unreachable,
+// error.FileDescriptorNotRegistered => unreachable,
+// error.FileDescriptorIncompatibleWithEpoll => unreachable,
+// error.Unexpected => unreachable,
+// else => |e| e,
+// };
+// await (async channel.put(transformed_err) catch unreachable);
+// };
+// },
+// else => unreachable,
+// }
+// }
+// }
+// };
+//}
const test_tmp_dir = "std_event_fs_test";
-// TODO this test is disabled until the coroutine rewrite is finished.
+// TODO this test is disabled until the async function rewrite is finished.
//test "write a file, watch it, write it again" {
// return error.SkipZigTest;
// const allocator = std.heap.direct_allocator;
@@ -1355,7 +1301,7 @@ async fn testFsWatch(loop: *Loop) !void {
const ev = try async watch.channel.get();
var ev_consumed = false;
- defer if (!ev_consumed) cancel ev;
+ defer if (!ev_consumed) await ev;
// overwrite line 2
const fd = try await try async openReadWrite(loop, file_path, File.default_mode);
@@ -1397,11 +1343,11 @@ pub const OutStream = struct {
};
}
- async<*mem.Allocator> fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
const self = @fieldParentPtr(OutStream, "stream", out_stream);
const offset = self.offset;
self.offset += bytes.len;
- return await (async pwritev(self.loop, self.fd, [][]const u8{bytes}, offset) catch unreachable);
+ return pwritev(self.loop, self.fd, [][]const u8{bytes}, offset);
}
};
@@ -1423,9 +1369,9 @@ pub const InStream = struct {
};
}
- async<*mem.Allocator> fn readFn(in_stream: *Stream, bytes: []u8) Error!usize {
+ fn readFn(in_stream: *Stream, bytes: []u8) Error!usize {
const self = @fieldParentPtr(InStream, "stream", in_stream);
- const amt = try await (async preadv(self.loop, self.fd, [][]u8{bytes}, self.offset) catch unreachable);
+ const amt = try preadv(self.loop, self.fd, [][]u8{bytes}, self.offset);
self.offset += amt;
return amt;
}
diff --git a/std/event/future.zig b/std/event/future.zig
index 2e62ace978..70e20819be 100644
--- a/std/event/future.zig
+++ b/std/event/future.zig
@@ -2,13 +2,11 @@ const std = @import("../std.zig");
const assert = std.debug.assert;
const testing = std.testing;
const builtin = @import("builtin");
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
const Lock = std.event.Lock;
const Loop = std.event.Loop;
/// This is a value that starts out unavailable, until resolve() is called
-/// While it is unavailable, coroutines suspend when they try to get() it,
+/// While it is unavailable, functions suspend when they try to get() it,
/// and then are resumed when resolve() is called.
/// At this point the value remains forever available, and another resolve() is not allowed.
pub fn Future(comptime T: type) type {
@@ -23,7 +21,7 @@ pub fn Future(comptime T: type) type {
available: u8,
const Self = @This();
- const Queue = std.atomic.Queue(promise);
+ const Queue = std.atomic.Queue(anyframe);
pub fn init(loop: *Loop) Self {
return Self{
@@ -37,10 +35,10 @@ pub fn Future(comptime T: type) type {
/// available.
/// Thread-safe.
pub async fn get(self: *Self) *T {
- if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) {
+ if (@atomicLoad(u8, &self.available, .SeqCst) == 2) {
return &self.data;
}
- const held = await (async self.lock.acquire() catch unreachable);
+ const held = self.lock.acquire();
held.release();
return &self.data;
@@ -49,7 +47,7 @@ pub fn Future(comptime T: type) type {
/// Gets the data without waiting for it. If it's available, a pointer is
/// returned. Otherwise, null is returned.
pub fn getOrNull(self: *Self) ?*T {
- if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) {
+ if (@atomicLoad(u8, &self.available, .SeqCst) == 2) {
return &self.data;
} else {
return null;
@@ -62,10 +60,10 @@ pub fn Future(comptime T: type) type {
/// It's not required to call start() before resolve() but it can be useful since
/// this method is thread-safe.
pub async fn start(self: *Self) ?*T {
- const state = @cmpxchgStrong(u8, &self.available, 0, 1, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return null;
+ const state = @cmpxchgStrong(u8, &self.available, 0, 1, .SeqCst, .SeqCst) orelse return null;
switch (state) {
1 => {
- const held = await (async self.lock.acquire() catch unreachable);
+ const held = self.lock.acquire();
held.release();
return &self.data;
},
@@ -77,7 +75,7 @@ pub fn Future(comptime T: type) type {
/// Make the data become available. May be called only once.
/// Before calling this, modify the `data` property.
pub fn resolve(self: *Self) void {
- const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 2, AtomicOrder.SeqCst);
+ const prev = @atomicRmw(u8, &self.available, .Xchg, 2, .SeqCst);
assert(prev == 0 or prev == 1); // resolve() called twice
Lock.Held.release(Lock.Held{ .lock = &self.lock });
}
@@ -86,7 +84,7 @@ pub fn Future(comptime T: type) type {
test "std.event.Future" {
// https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest;
+ if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@@ -94,38 +92,33 @@ test "std.event.Future" {
try loop.initMultiThreaded(allocator);
defer loop.deinit();
- const handle = try async testFuture(&loop);
- defer cancel handle;
+ const handle = async testFuture(&loop);
loop.run();
}
async fn testFuture(loop: *Loop) void {
- suspend {
- resume @handle();
- }
var future = Future(i32).init(loop);
- const a = async waitOnFuture(&future) catch @panic("memory");
- const b = async waitOnFuture(&future) catch @panic("memory");
- const c = async resolveFuture(&future) catch @panic("memory");
+ var a = async waitOnFuture(&future);
+ var b = async waitOnFuture(&future);
+ var c = async resolveFuture(&future);
- const result = (await a) + (await b);
- cancel c;
+ // TODO make this work:
+ //const result = (await a) + (await b);
+ const a_result = await a;
+ const b_result = await b;
+ const result = a_result + b_result;
+
+ await c;
testing.expect(result == 12);
}
async fn waitOnFuture(future: *Future(i32)) i32 {
- suspend {
- resume @handle();
- }
- return (await (async future.get() catch @panic("memory"))).*;
+ return future.get().*;
}
async fn resolveFuture(future: *Future(i32)) void {
- suspend {
- resume @handle();
- }
future.data = 6;
future.resolve();
}
diff --git a/std/event/group.zig b/std/event/group.zig
index 36235eed74..f96b938f80 100644
--- a/std/event/group.zig
+++ b/std/event/group.zig
@@ -2,46 +2,33 @@ const std = @import("../std.zig");
const builtin = @import("builtin");
const Lock = std.event.Lock;
const Loop = std.event.Loop;
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
const testing = std.testing;
/// ReturnType must be `void` or `E!void`
pub fn Group(comptime ReturnType: type) type {
return struct {
- coro_stack: Stack,
+ frame_stack: Stack,
alloc_stack: Stack,
lock: Lock,
const Self = @This();
const Error = switch (@typeInfo(ReturnType)) {
- builtin.TypeId.ErrorUnion => |payload| payload.error_set,
+ .ErrorUnion => |payload| payload.error_set,
else => void,
};
- const Stack = std.atomic.Stack(promise->ReturnType);
+ const Stack = std.atomic.Stack(anyframe->ReturnType);
pub fn init(loop: *Loop) Self {
return Self{
- .coro_stack = Stack.init(),
+ .frame_stack = Stack.init(),
.alloc_stack = Stack.init(),
.lock = Lock.init(loop),
};
}
- /// Cancel all the outstanding promises. Can be called even if wait was already called.
- pub fn deinit(self: *Self) void {
- while (self.coro_stack.pop()) |node| {
- cancel node.data;
- }
- while (self.alloc_stack.pop()) |node| {
- cancel node.data;
- self.lock.loop.allocator.destroy(node);
- }
- }
-
- /// Add a promise to the group. Thread-safe.
- pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) {
+ /// Add a frame to the group. Thread-safe.
+ pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) {
const node = try self.lock.loop.allocator.create(Stack.Node);
node.* = Stack.Node{
.next = undefined,
@@ -51,57 +38,29 @@ pub fn Group(comptime ReturnType: type) type {
}
/// Add a node to the group. Thread-safe. Cannot fail.
- /// `node.data` should be the promise handle to add to the group.
- /// The node's memory should be in the coroutine frame of
+ /// `node.data` should be the frame handle to add to the group.
+ /// The node's memory should be in the function frame of
/// the handle that is in the node, or somewhere guaranteed to live
/// at least as long.
pub fn addNode(self: *Self, node: *Stack.Node) void {
- self.coro_stack.push(node);
- }
-
- /// This is equivalent to an async call, but the async function is added to the group, instead
- /// of returning a promise. func must be async and have return type ReturnType.
- /// Thread-safe.
- pub fn call(self: *Self, comptime func: var, args: ...) (error{OutOfMemory}!void) {
- const S = struct {
- async fn asyncFunc(node: **Stack.Node, args2: ...) ReturnType {
- // TODO this is a hack to make the memory following be inside the coro frame
- suspend {
- var my_node: Stack.Node = undefined;
- node.* = &my_node;
- resume @handle();
- }
-
- // TODO this allocation elision should be guaranteed because we await it in
- // this coro frame
- return await (async func(args2) catch unreachable);
- }
- };
- var node: *Stack.Node = undefined;
- const handle = try async S.asyncFunc(&node, args);
- node.* = Stack.Node{
- .next = undefined,
- .data = handle,
- };
- self.coro_stack.push(node);
+ self.frame_stack.push(node);
}
/// Wait for all the calls and promises of the group to complete.
/// Thread-safe.
/// Safe to call any number of times.
pub async fn wait(self: *Self) ReturnType {
- // TODO catch unreachable because the allocation can be grouped with
- // the coro frame allocation
- const held = await (async self.lock.acquire() catch unreachable);
+ const held = self.lock.acquire();
defer held.release();
- while (self.coro_stack.pop()) |node| {
+ var result: ReturnType = {};
+
+ while (self.frame_stack.pop()) |node| {
if (Error == void) {
await node.data;
} else {
(await node.data) catch |err| {
- self.deinit();
- return err;
+ result = err;
};
}
}
@@ -112,11 +71,11 @@ pub fn Group(comptime ReturnType: type) type {
await handle;
} else {
(await handle) catch |err| {
- self.deinit();
- return err;
+ result = err;
};
}
}
+ return result;
}
};
}
@@ -131,8 +90,7 @@ test "std.event.Group" {
try loop.initMultiThreaded(allocator);
defer loop.deinit();
- const handle = try async testGroup(&loop);
- defer cancel handle;
+ const handle = async testGroup(&loop);
loop.run();
}
@@ -140,26 +98,30 @@ test "std.event.Group" {
async fn testGroup(loop: *Loop) void {
var count: usize = 0;
var group = Group(void).init(loop);
- group.add(async sleepALittle(&count) catch @panic("memory")) catch @panic("memory");
- group.call(increaseByTen, &count) catch @panic("memory");
- await (async group.wait() catch @panic("memory"));
+ var sleep_a_little_frame = async sleepALittle(&count);
+ group.add(&sleep_a_little_frame) catch @panic("memory");
+ var increase_by_ten_frame = async increaseByTen(&count);
+ group.add(&increase_by_ten_frame) catch @panic("memory");
+ group.wait();
testing.expect(count == 11);
var another = Group(anyerror!void).init(loop);
- another.add(async somethingElse() catch @panic("memory")) catch @panic("memory");
- another.call(doSomethingThatFails) catch @panic("memory");
- testing.expectError(error.ItBroke, await (async another.wait() catch @panic("memory")));
+ var something_else_frame = async somethingElse();
+ another.add(&something_else_frame) catch @panic("memory");
+ var something_that_fails_frame = async doSomethingThatFails();
+ another.add(&something_that_fails_frame) catch @panic("memory");
+ testing.expectError(error.ItBroke, another.wait());
}
async fn sleepALittle(count: *usize) void {
std.time.sleep(1 * std.time.millisecond);
- _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
}
async fn increaseByTen(count: *usize) void {
var i: usize = 0;
while (i < 10) : (i += 1) {
- _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
}
}
diff --git a/std/event/io.zig b/std/event/io.zig
index 29419a792e..4b54822e68 100644
--- a/std/event/io.zig
+++ b/std/event/io.zig
@@ -1,6 +1,5 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
-const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const mem = std.mem;
@@ -12,13 +11,13 @@ pub fn InStream(comptime ReadError: type) type {
/// Return the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
- readFn: async<*Allocator> fn (self: *Self, buffer: []u8) Error!usize,
+ readFn: async fn (self: *Self, buffer: []u8) Error!usize,
/// Return the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
pub async fn read(self: *Self, buffer: []u8) !usize {
- return await (async self.readFn(self, buffer) catch unreachable);
+ return self.readFn(self, buffer);
}
/// Return the number of bytes read. If it is less than buffer.len
@@ -26,7 +25,7 @@ pub fn InStream(comptime ReadError: type) type {
pub async fn readFull(self: *Self, buffer: []u8) !usize {
var index: usize = 0;
while (index != buf.len) {
- const amt_read = try await (async self.read(buf[index..]) catch unreachable);
+ const amt_read = try self.read(buf[index..]);
if (amt_read == 0) return index;
index += amt_read;
}
@@ -35,25 +34,25 @@ pub fn InStream(comptime ReadError: type) type {
/// Same as `readFull` but end of stream returns `error.EndOfStream`.
pub async fn readNoEof(self: *Self, buf: []u8) !void {
- const amt_read = try await (async self.readFull(buf[index..]) catch unreachable);
+ const amt_read = try self.readFull(buf[index..]);
if (amt_read < buf.len) return error.EndOfStream;
}
pub async fn readIntLittle(self: *Self, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
- try await (async self.readNoEof(bytes[0..]) catch unreachable);
+ try self.readNoEof(bytes[0..]);
return mem.readIntLittle(T, &bytes);
}
pub async fn readIntBe(self: *Self, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
- try await (async self.readNoEof(bytes[0..]) catch unreachable);
+ try self.readNoEof(bytes[0..]);
return mem.readIntBig(T, &bytes);
}
pub async fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
- try await (async self.readNoEof(bytes[0..]) catch unreachable);
+ try self.readNoEof(bytes[0..]);
return mem.readInt(T, &bytes, endian);
}
@@ -61,7 +60,7 @@ pub fn InStream(comptime ReadError: type) type {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
var res: [1]T = undefined;
- try await (async self.readNoEof(@sliceToBytes(res[0..])) catch unreachable);
+ try self.readNoEof(@sliceToBytes(res[0..]));
return res[0];
}
};
@@ -72,6 +71,6 @@ pub fn OutStream(comptime WriteError: type) type {
const Self = @This();
pub const Error = WriteError;
- writeFn: async<*Allocator> fn (self: *Self, buffer: []u8) Error!void,
+ writeFn: async fn (self: *Self, buffer: []u8) Error!void,
};
}
diff --git a/std/event/lock.zig b/std/event/lock.zig
index d86902cc06..0fa65f031d 100644
--- a/std/event/lock.zig
+++ b/std/event/lock.zig
@@ -3,12 +3,10 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop;
/// Thread-safe async/await lock.
-/// coroutines which are waiting for the lock are suspended, and
+/// Functions which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order.
/// Allows only one actor to hold the lock.
pub const Lock = struct {
@@ -17,7 +15,7 @@ pub const Lock = struct {
queue: Queue,
queue_empty_bit: u8, // TODO make this a bool
- const Queue = std.atomic.Queue(promise);
+ const Queue = std.atomic.Queue(anyframe);
pub const Held = struct {
lock: *Lock,
@@ -30,19 +28,19 @@ pub const Lock = struct {
}
// We need to release the lock.
- _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
// There might be a queue item. If we know the queue is empty, we can be done,
// because the other actor will try to obtain the lock.
// But if there's a queue item, we are the actor which must loop and attempt
// to grab the lock again.
- if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) {
+ if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
return;
}
while (true) {
- const old_bit = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst);
if (old_bit != 0) {
// We did not obtain the lock. Great, the queue is someone else's problem.
return;
@@ -55,11 +53,11 @@ pub const Lock = struct {
}
// Release the lock again.
- _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
// Find out if we can be done.
- if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) {
+ if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
return;
}
}
@@ -88,28 +86,23 @@ pub const Lock = struct {
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *Lock) void {
assert(self.shared_bit == 0);
- while (self.queue.get()) |node| cancel node.data;
+ while (self.queue.get()) |node| resume node.data;
}
pub async fn acquire(self: *Lock) Held {
- // TODO explicitly put this memory in the coroutine frame #1194
- suspend {
- resume @handle();
- }
- var my_tick_node = Loop.NextTickNode.init(@handle());
+ var my_tick_node = Loop.NextTickNode.init(@frame());
errdefer _ = self.queue.remove(&my_tick_node); // TODO test canceling an acquire
suspend {
self.queue.put(&my_tick_node);
- // At this point, we are in the queue, so we might have already been resumed and this coroutine
- // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
+ // At this point, we are in the queue, so we might have already been resumed.
// We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
// will attempt to grab the lock.
- _ = @atomicRmw(u8, &self.queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst);
- const old_bit = @atomicRmw(u8, &self.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
if (old_bit == 0) {
if (self.queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
@@ -123,8 +116,7 @@ pub const Lock = struct {
};
test "std.event.Lock" {
- // TODO https://github.com/ziglang/zig/issues/2377
- if (true) return error.SkipZigTest;
+ // TODO https://github.com/ziglang/zig/issues/1908
if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@@ -136,39 +128,34 @@ test "std.event.Lock" {
var lock = Lock.init(&loop);
defer lock.deinit();
- const handle = try async testLock(&loop, &lock);
- defer cancel handle;
+ _ = async testLock(&loop, &lock);
loop.run();
testing.expectEqualSlices(i32, [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len, shared_test_data);
}
async fn testLock(loop: *Loop, lock: *Lock) void {
- // TODO explicitly put next tick node memory in the coroutine frame #1194
- suspend {
- resume @handle();
- }
- const handle1 = async lockRunner(lock) catch @panic("out of memory");
+ var handle1 = async lockRunner(lock);
var tick_node1 = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
- .data = handle1,
+ .data = &handle1,
};
loop.onNextTick(&tick_node1);
- const handle2 = async lockRunner(lock) catch @panic("out of memory");
+ var handle2 = async lockRunner(lock);
var tick_node2 = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
- .data = handle2,
+ .data = &handle2,
};
loop.onNextTick(&tick_node2);
- const handle3 = async lockRunner(lock) catch @panic("out of memory");
+ var handle3 = async lockRunner(lock);
var tick_node3 = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
- .data = handle3,
+ .data = &handle3,
};
loop.onNextTick(&tick_node3);
@@ -185,8 +172,8 @@ async fn lockRunner(lock: *Lock) void {
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
- const lock_promise = async lock.acquire() catch @panic("out of memory");
- const handle = await lock_promise;
+ var lock_frame = async lock.acquire();
+ const handle = await lock_frame;
defer handle.release();
shared_test_index = 0;
diff --git a/std/event/locked.zig b/std/event/locked.zig
index ede5fe3d95..aeedf3558a 100644
--- a/std/event/locked.zig
+++ b/std/event/locked.zig
@@ -3,7 +3,7 @@ const Lock = std.event.Lock;
const Loop = std.event.Loop;
/// Thread-safe async/await lock that protects one piece of data.
-/// coroutines which are waiting for the lock are suspended, and
+/// Functions which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order.
pub fn Locked(comptime T: type) type {
return struct {
diff --git a/std/event/loop.zig b/std/event/loop.zig
index aacd4bd7aa..a4605c8928 100644
--- a/std/event/loop.zig
+++ b/std/event/loop.zig
@@ -1,5 +1,6 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
+const root = @import("root");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
@@ -13,7 +14,7 @@ const Thread = std.Thread;
pub const Loop = struct {
allocator: *mem.Allocator,
- next_tick_queue: std.atomic.Queue(promise),
+ next_tick_queue: std.atomic.Queue(anyframe),
os_data: OsData,
final_resume_node: ResumeNode,
pending_event_count: usize,
@@ -24,11 +25,11 @@ pub const Loop = struct {
available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd),
eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node,
- pub const NextTickNode = std.atomic.Queue(promise).Node;
+ pub const NextTickNode = std.atomic.Queue(anyframe).Node;
pub const ResumeNode = struct {
id: Id,
- handle: promise,
+ handle: anyframe,
overlapped: Overlapped,
pub const overlapped_init = switch (builtin.os) {
@@ -85,18 +86,43 @@ pub const Loop = struct {
};
};
+ pub const IoMode = enum {
+ blocking,
+ evented,
+ };
+ pub const io_mode: IoMode = if (@hasDecl(root, "io_mode")) root.io_mode else IoMode.blocking;
+ var global_instance_state: Loop = undefined;
+ const default_instance: ?*Loop = switch (io_mode) {
+ .blocking => null,
+ .evented => &global_instance_state,
+ };
+ pub const instance: ?*Loop = if (@hasDecl(root, "event_loop")) root.event_loop else default_instance;
+
+ /// TODO copy elision / named return values so that the threads referencing *Loop
+ /// have the correct pointer value.
+ /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765
+ pub fn init(self: *Loop, allocator: *mem.Allocator) !void {
+ if (builtin.single_threaded) {
+ return self.initSingleThreaded(allocator);
+ } else {
+ return self.initMultiThreaded(allocator);
+ }
+ }
+
/// After initialization, call run().
/// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value.
+ /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765
pub fn initSingleThreaded(self: *Loop, allocator: *mem.Allocator) !void {
return self.initInternal(allocator, 1);
}
/// The allocator must be thread-safe because we use it for multiplexing
- /// coroutines onto kernel threads.
+ /// async functions onto kernel threads.
/// After initialization, call run().
/// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value.
+ /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765
pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
if (builtin.single_threaded) @compileError("initMultiThreaded unavailable when building in single-threaded mode");
const core_count = try Thread.cpuCount();
@@ -110,7 +136,7 @@ pub const Loop = struct {
.pending_event_count = 1,
.allocator = allocator,
.os_data = undefined,
- .next_tick_queue = std.atomic.Queue(promise).init(),
+ .next_tick_queue = std.atomic.Queue(anyframe).init(),
.extra_threads = undefined,
.available_eventfd_resume_nodes = std.atomic.Stack(ResumeNode.EventFd).init(),
.eventfd_resume_nodes = undefined,
@@ -397,7 +423,7 @@ pub const Loop = struct {
}
}
- /// resume_node must live longer than the promise that it holds a reference to.
+ /// resume_node must live longer than the anyframe that it holds a reference to.
/// flags must contain EPOLLET
pub fn linuxAddFd(self: *Loop, fd: i32, resume_node: *ResumeNode, flags: u32) !void {
assert(flags & os.EPOLLET == os.EPOLLET);
@@ -428,11 +454,10 @@ pub const Loop = struct {
pub async fn linuxWaitFd(self: *Loop, fd: i32, flags: u32) !void {
defer self.linuxRemoveFd(fd);
suspend {
- // TODO explicitly put this memory in the coroutine frame #1194
var resume_node = ResumeNode.Basic{
.base = ResumeNode{
.id = ResumeNode.Id.Basic,
- .handle = @handle(),
+ .handle = @frame(),
.overlapped = ResumeNode.overlapped_init,
},
};
@@ -441,14 +466,10 @@ pub const Loop = struct {
}
pub async fn bsdWaitKev(self: *Loop, ident: usize, filter: i16, fflags: u32) !os.Kevent {
- // TODO #1194
- suspend {
- resume @handle();
- }
var resume_node = ResumeNode.Basic{
.base = ResumeNode{
.id = ResumeNode.Id.Basic,
- .handle = @handle(),
+ .handle = @frame(),
.overlapped = ResumeNode.overlapped_init,
},
.kev = undefined,
@@ -460,7 +481,7 @@ pub const Loop = struct {
return resume_node.kev;
}
- /// resume_node must live longer than the promise that it holds a reference to.
+ /// resume_node must live longer than the anyframe that it holds a reference to.
pub fn bsdAddKev(self: *Loop, resume_node: *ResumeNode.Basic, ident: usize, filter: i16, fflags: u32) !void {
self.beginOneEvent();
errdefer self.finishOneEvent();
@@ -561,10 +582,10 @@ pub const Loop = struct {
self.workerRun();
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> self.os_data.fs_thread.wait(),
else => {},
}
@@ -574,45 +595,38 @@ pub const Loop = struct {
}
}
- /// This is equivalent to an async call, except instead of beginning execution of the async function,
- /// it immediately returns to the caller, and the async function is queued in the event loop. It still
- /// returns a promise to be awaited.
- pub fn call(self: *Loop, comptime func: var, args: ...) !(promise->@typeOf(func).ReturnType) {
- const S = struct {
- async fn asyncFunc(loop: *Loop, handle: *promise->@typeOf(func).ReturnType, args2: ...) @typeOf(func).ReturnType {
- suspend {
- handle.* = @handle();
- var my_tick_node = Loop.NextTickNode{
- .prev = undefined,
- .next = undefined,
- .data = @handle(),
- };
- loop.onNextTick(&my_tick_node);
- }
- // TODO guaranteed allocation elision for await in same func as async
- return await (async func(args2) catch unreachable);
- }
- };
- var handle: promise->@typeOf(func).ReturnType = undefined;
- return async S.asyncFunc(self, &handle, args);
+ /// This is equivalent to function call, except it calls `startCpuBoundOperation` first.
+ pub fn call(comptime func: var, args: ...) @typeOf(func).ReturnType {
+ startCpuBoundOperation();
+ return func(args);
}
- /// Awaiting a yield lets the event loop run, starting any unstarted async operations.
+ /// Yielding lets the event loop run, starting any unstarted async operations.
/// Note that async operations automatically start when a function yields for any other reason,
/// for example, when async I/O is performed. This function is intended to be used only when
/// CPU bound tasks would be waiting in the event loop but never get started because no async I/O
/// is performed.
- pub async fn yield(self: *Loop) void {
+ pub fn yield(self: *Loop) void {
suspend {
- var my_tick_node = Loop.NextTickNode{
+ var my_tick_node = NextTickNode{
.prev = undefined,
.next = undefined,
- .data = @handle(),
+ .data = @frame(),
};
self.onNextTick(&my_tick_node);
}
}
+ /// If the build is multi-threaded and there is an event loop, then it calls `yield`. Otherwise,
+ /// does nothing.
+ pub fn startCpuBoundOperation() void {
+ if (builtin.single_threaded) {
+ return;
+ } else if (instance) |event_loop| {
+ event_loop.yield();
+ }
+ }
+
/// call finishOneEvent when done
pub fn beginOneEvent(self: *Loop) void {
_ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
@@ -672,9 +686,9 @@ pub const Loop = struct {
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
- ResumeNode.Id.Basic => {},
- ResumeNode.Id.Stop => return,
- ResumeNode.Id.EventFd => {
+ .Basic => {},
+ .Stop => return,
+ .EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
event_fd_node.epoll_op = os.EPOLL_CTL_MOD;
const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
@@ -696,12 +710,12 @@ pub const Loop = struct {
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
- ResumeNode.Id.Basic => {
+ .Basic => {
const basic_node = @fieldParentPtr(ResumeNode.Basic, "base", resume_node);
basic_node.kev = ev;
},
- ResumeNode.Id.Stop => return,
- ResumeNode.Id.EventFd => {
+ .Stop => return,
+ .EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
self.available_eventfd_resume_nodes.push(stack_node);
@@ -730,9 +744,9 @@ pub const Loop = struct {
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
- ResumeNode.Id.Basic => {},
- ResumeNode.Id.Stop => return,
- ResumeNode.Id.EventFd => {
+ .Basic => {},
+ .Stop => return,
+ .EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
self.available_eventfd_resume_nodes.push(stack_node);
@@ -750,12 +764,12 @@ pub const Loop = struct {
self.beginOneEvent(); // finished in posixFsRun after processing the msg
self.os_data.fs_queue.put(request_node);
switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
+ .macosx, .freebsd, .netbsd => {
const fs_kevs = (*const [1]os.Kevent)(&self.os_data.fs_kevent_wake);
const empty_kevs = ([*]os.Kevent)(undefined)[0..0];
_ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
},
- builtin.Os.linux => {
+ .linux => {
_ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1);
switch (os.linux.getErrno(rc)) {
@@ -781,18 +795,18 @@ pub const Loop = struct {
}
while (self.os_data.fs_queue.get()) |node| {
switch (node.data.msg) {
- @TagType(fs.Request.Msg).End => return,
- @TagType(fs.Request.Msg).PWriteV => |*msg| {
+ .End => return,
+ .PWriteV => |*msg| {
msg.result = os.pwritev(msg.fd, msg.iov, msg.offset);
},
- @TagType(fs.Request.Msg).PReadV => |*msg| {
+ .PReadV => |*msg| {
msg.result = os.preadv(msg.fd, msg.iov, msg.offset);
},
- @TagType(fs.Request.Msg).Open => |*msg| {
+ .Open => |*msg| {
msg.result = os.openC(msg.path.ptr, msg.flags, msg.mode);
},
- @TagType(fs.Request.Msg).Close => |*msg| os.close(msg.fd),
- @TagType(fs.Request.Msg).WriteFile => |*msg| blk: {
+ .Close => |*msg| os.close(msg.fd),
+ .WriteFile => |*msg| blk: {
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT |
os.O_CLOEXEC | os.O_TRUNC;
const fd = os.openC(msg.path.ptr, flags, msg.mode) catch |err| {
@@ -804,11 +818,11 @@ pub const Loop = struct {
},
}
switch (node.data.finish) {
- @TagType(fs.Request.Finish).TickNode => |*tick_node| self.onNextTick(tick_node),
- @TagType(fs.Request.Finish).DeallocCloseOperation => |close_op| {
+ .TickNode => |*tick_node| self.onNextTick(tick_node),
+ .DeallocCloseOperation => |close_op| {
self.allocator.destroy(close_op);
},
- @TagType(fs.Request.Finish).NoAction => {},
+ .NoAction => {},
}
self.finishOneEvent();
}
@@ -864,7 +878,7 @@ pub const Loop = struct {
test "std.event.Loop - basic" {
// https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest;
+ if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@@ -877,7 +891,7 @@ test "std.event.Loop - basic" {
test "std.event.Loop - call" {
// https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest;
+ if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@@ -886,9 +900,8 @@ test "std.event.Loop - call" {
defer loop.deinit();
var did_it = false;
- const handle = try loop.call(testEventLoop);
- const handle2 = try loop.call(testEventLoop2, handle, &did_it);
- defer cancel handle2;
+ var handle = async Loop.call(testEventLoop);
+ var handle2 = async Loop.call(testEventLoop2, &handle, &did_it);
loop.run();
@@ -899,7 +912,7 @@ async fn testEventLoop() i32 {
return 1234;
}
-async fn testEventLoop2(h: promise->i32, did_it: *bool) void {
+async fn testEventLoop2(h: anyframe->i32, did_it: *bool) void {
const value = await h;
testing.expect(value == 1234);
did_it.* = true;
diff --git a/std/event/net.zig b/std/event/net.zig
index 46b724e32e..bed665dcdc 100644
--- a/std/event/net.zig
+++ b/std/event/net.zig
@@ -9,24 +9,24 @@ const File = std.fs.File;
const fd_t = os.fd_t;
pub const Server = struct {
- handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, File) void,
+ handleRequestFn: async fn (*Server, *const std.net.Address, File) void,
loop: *Loop,
sockfd: ?i32,
- accept_coro: ?promise,
+ accept_frame: ?anyframe,
listen_address: std.net.Address,
waiting_for_emfile_node: PromiseNode,
listen_resume_node: event.Loop.ResumeNode,
- const PromiseNode = std.TailQueue(promise).Node;
+ const PromiseNode = std.TailQueue(anyframe).Node;
pub fn init(loop: *Loop) Server {
- // TODO can't initialize handler coroutine here because we need well defined copy elision
+ // TODO can't initialize handler here because we need well defined copy elision
return Server{
.loop = loop,
.sockfd = null,
- .accept_coro = null,
+ .accept_frame = null,
.handleRequestFn = undefined,
.waiting_for_emfile_node = undefined,
.listen_address = undefined,
@@ -41,7 +41,7 @@ pub const Server = struct {
pub fn listen(
self: *Server,
address: *const std.net.Address,
- handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, File) void,
+ handleRequestFn: async fn (*Server, *const std.net.Address, File) void,
) !void {
self.handleRequestFn = handleRequestFn;
@@ -53,10 +53,10 @@ pub const Server = struct {
try os.listen(sockfd, os.SOMAXCONN);
self.listen_address = std.net.Address.initPosix(try os.getsockname(sockfd));
- self.accept_coro = try async Server.handler(self);
- errdefer cancel self.accept_coro.?;
+ self.accept_frame = async Server.handler(self);
+ errdefer await self.accept_frame.?;
- self.listen_resume_node.handle = self.accept_coro.?;
+ self.listen_resume_node.handle = self.accept_frame.?;
try self.loop.linuxAddFd(sockfd, &self.listen_resume_node, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
errdefer self.loop.removeFd(sockfd);
}
@@ -71,7 +71,7 @@ pub const Server = struct {
}
pub fn deinit(self: *Server) void {
- if (self.accept_coro) |accept_coro| cancel accept_coro;
+ if (self.accept_frame) |accept_frame| await accept_frame;
if (self.sockfd) |sockfd| os.close(sockfd);
}
@@ -86,12 +86,7 @@ pub const Server = struct {
continue;
}
var socket = File.openHandle(accepted_fd);
- _ = async self.handleRequestFn(self, &accepted_addr, socket) catch |err| switch (err) {
- error.OutOfMemory => {
- socket.close();
- continue;
- },
- };
+ self.handleRequestFn(self, &accepted_addr, socket);
} else |err| switch (err) {
error.ProcessFdQuotaExceeded => @panic("TODO handle this error"),
error.ConnectionAborted => continue,
@@ -124,7 +119,7 @@ pub async fn connectUnixSocket(loop: *Loop, path: []const u8) !i32 {
mem.copy(u8, sock_addr.path[0..], path);
const size = @intCast(u32, @sizeOf(os.sa_family_t) + path.len);
try os.connect_async(sockfd, &sock_addr, size);
- try await try async loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
+ try loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
try os.getsockoptError(sockfd);
return sockfd;
@@ -149,7 +144,7 @@ pub async fn read(loop: *std.event.Loop, fd: fd_t, buffer: []u8) ReadError!usize
.iov_len = buffer.len,
};
const iovs: *const [1]os.iovec = &iov;
- return await (async readvPosix(loop, fd, iovs, 1) catch unreachable);
+ return readvPosix(loop, fd, iovs, 1);
}
pub const WriteError = error{};
@@ -160,7 +155,7 @@ pub async fn write(loop: *std.event.Loop, fd: fd_t, buffer: []const u8) WriteErr
.iov_len = buffer.len,
};
const iovs: *const [1]os.iovec_const = &iov;
- return await (async writevPosix(loop, fd, iovs, 1) catch unreachable);
+ return writevPosix(loop, fd, iovs, 1);
}
pub async fn writevPosix(loop: *Loop, fd: i32, iov: [*]const os.iovec_const, count: usize) !void {
@@ -174,7 +169,7 @@ pub async fn writevPosix(loop: *Loop, fd: i32, iov: [*]const os.iovec_const, cou
os.EINVAL => unreachable,
os.EFAULT => unreachable,
os.EAGAIN => {
- try await (async loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLOUT) catch unreachable);
+ try loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLOUT);
continue;
},
os.EBADF => unreachable, // always a race condition
@@ -205,7 +200,7 @@ pub async fn readvPosix(loop: *std.event.Loop, fd: i32, iov: [*]os.iovec, count:
os.EINVAL => unreachable,
os.EFAULT => unreachable,
os.EAGAIN => {
- try await (async loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLIN) catch unreachable);
+ try loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLIN);
continue;
},
os.EBADF => unreachable, // always a race condition
@@ -232,7 +227,7 @@ pub async fn writev(loop: *Loop, fd: fd_t, data: []const []const u8) !void {
};
}
- return await (async writevPosix(loop, fd, iovecs.ptr, data.len) catch unreachable);
+ return writevPosix(loop, fd, iovecs.ptr, data.len);
}
pub async fn readv(loop: *Loop, fd: fd_t, data: []const []u8) !usize {
@@ -246,7 +241,7 @@ pub async fn readv(loop: *Loop, fd: fd_t, data: []const []u8) !usize {
};
}
- return await (async readvPosix(loop, fd, iovecs.ptr, data.len) catch unreachable);
+ return readvPosix(loop, fd, iovecs.ptr, data.len);
}
pub async fn connect(loop: *Loop, _address: *const std.net.Address) !File {
@@ -256,7 +251,7 @@ pub async fn connect(loop: *Loop, _address: *const std.net.Address) !File {
errdefer os.close(sockfd);
try os.connect_async(sockfd, &address.os_addr, @sizeOf(os.sockaddr_in));
- try await try async loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
+ try loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
try os.getsockoptError(sockfd);
return File.openHandle(sockfd);
@@ -275,18 +270,13 @@ test "listen on a port, send bytes, receive bytes" {
tcp_server: Server,
const Self = @This();
- async<*mem.Allocator> fn handler(tcp_server: *Server, _addr: *const std.net.Address, _socket: File) void {
+ async fn handler(tcp_server: *Server, _addr: *const std.net.Address, _socket: File) void {
const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
var socket = _socket; // TODO https://github.com/ziglang/zig/issues/1592
defer socket.close();
- // TODO guarantee elision of this allocation
- const next_handler = async errorableHandler(self, _addr, socket) catch unreachable;
- (await next_handler) catch |err| {
+ const next_handler = errorableHandler(self, _addr, socket) catch |err| {
std.debug.panic("unable to handle connection: {}\n", err);
};
- suspend {
- cancel @handle();
- }
}
async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: File) !void {
const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/1592
@@ -306,15 +296,14 @@ test "listen on a port, send bytes, receive bytes" {
defer server.tcp_server.deinit();
try server.tcp_server.listen(&addr, MyServer.handler);
- const p = try async doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server);
- defer cancel p;
+ _ = async doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server);
loop.run();
}
async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *Server) void {
errdefer @panic("test failure");
- var socket_file = try await try async connect(loop, address);
+ var socket_file = try connect(loop, address);
defer socket_file.close();
var buf: [512]u8 = undefined;
@@ -340,9 +329,9 @@ pub const OutStream = struct {
};
}
- async<*mem.Allocator> fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
+ async fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
const self = @fieldParentPtr(OutStream, "stream", out_stream);
- return await (async write(self.loop, self.fd, bytes) catch unreachable);
+ return write(self.loop, self.fd, bytes);
}
};
@@ -362,8 +351,8 @@ pub const InStream = struct {
};
}
- async<*mem.Allocator> fn readFn(in_stream: *Stream, bytes: []u8) Error!usize {
+ async fn readFn(in_stream: *Stream, bytes: []u8) Error!usize {
const self = @fieldParentPtr(InStream, "stream", in_stream);
- return await (async read(self.loop, self.fd, bytes) catch unreachable);
+ return read(self.loop, self.fd, bytes);
}
};
diff --git a/std/event/rwlock.zig b/std/event/rwlock.zig
index 7b97fa24c1..bf7ea0fa9f 100644
--- a/std/event/rwlock.zig
+++ b/std/event/rwlock.zig
@@ -3,12 +3,10 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop;
/// Thread-safe async/await lock.
-/// coroutines which are waiting for the lock are suspended, and
+/// Functions which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order.
/// Many readers can hold the lock at the same time; however locking for writing is exclusive.
/// When a read lock is held, it will not be released until the reader queue is empty.
@@ -28,19 +26,19 @@ pub const RwLock = struct {
const ReadLock = 2;
};
- const Queue = std.atomic.Queue(promise);
+ const Queue = std.atomic.Queue(anyframe);
pub const HeldRead = struct {
lock: *RwLock,
pub fn release(self: HeldRead) void {
// If other readers still hold the lock, we're done.
- if (@atomicRmw(usize, &self.lock.reader_lock_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) != 1) {
+ if (@atomicRmw(usize, &self.lock.reader_lock_count, .Sub, 1, .SeqCst) != 1) {
return;
}
- _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
+ if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}
@@ -61,17 +59,17 @@ pub const RwLock = struct {
}
// We need to release the write lock. Check if any readers are waiting to grab the lock.
- if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
+ if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) {
// Switch to a read lock.
- _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.ReadLock, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.ReadLock, .SeqCst);
while (self.lock.reader_queue.get()) |node| {
self.lock.loop.onNextTick(node);
}
return;
}
- _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst);
self.lock.commonPostUnlock();
}
@@ -93,32 +91,30 @@ pub const RwLock = struct {
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *RwLock) void {
assert(self.shared_state == State.Unlocked);
- while (self.writer_queue.get()) |node| cancel node.data;
- while (self.reader_queue.get()) |node| cancel node.data;
+ while (self.writer_queue.get()) |node| resume node.data;
+ while (self.reader_queue.get()) |node| resume node.data;
}
pub async fn acquireRead(self: *RwLock) HeldRead {
- _ = @atomicRmw(usize, &self.reader_lock_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.reader_lock_count, .Add, 1, .SeqCst);
suspend {
- // TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
- .data = @handle(),
+ .data = @frame(),
.prev = undefined,
.next = undefined,
};
self.reader_queue.put(&my_tick_node);
- // At this point, we are in the reader_queue, so we might have already been resumed and this coroutine
- // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
+ // At this point, we are in the reader_queue, so we might have already been resumed.
// We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
- _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst);
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
- const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |old_state| old_state == State.ReadLock else true;
+ const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == State.ReadLock else true;
if (have_read_lock) {
// Give out all the read locks.
if (self.reader_queue.get()) |first_node| {
@@ -134,24 +130,22 @@ pub const RwLock = struct {
pub async fn acquireWrite(self: *RwLock) HeldWrite {
suspend {
- // TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
- .data = @handle(),
+ .data = @frame(),
.prev = undefined,
.next = undefined,
};
self.writer_queue.put(&my_tick_node);
- // At this point, we are in the writer_queue, so we might have already been resumed and this coroutine
- // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
+ // At this point, we are in the writer_queue, so we might have already been resumed.
// We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
- _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst);
// Here we must be the one to acquire the write lock. It cannot already be locked.
- if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null) {
+ if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) == null) {
// We now have a write lock.
if (self.writer_queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
@@ -169,8 +163,8 @@ pub const RwLock = struct {
// obtain the lock.
// But if there's a writer_queue item or a reader_queue item,
// we are the actor which must loop and attempt to grab the lock again.
- if (@atomicLoad(u8, &self.writer_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
- if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ if (@atomicLoad(u8, &self.writer_queue_empty_bit, .SeqCst) == 0) {
+ if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
@@ -180,13 +174,13 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
- _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- _ = @atomicRmw(u8, &self.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
+ _ = @atomicRmw(u8, &self.shared_state, .Xchg, State.Unlocked, .SeqCst);
continue;
}
- if (@atomicLoad(u8, &self.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
- if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ if (@atomicLoad(u8, &self.reader_queue_empty_bit, .SeqCst) == 0) {
+ if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
@@ -199,8 +193,8 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
- _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
+ if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}
@@ -215,6 +209,9 @@ test "std.event.RwLock" {
// https://github.com/ziglang/zig/issues/2377
if (true) return error.SkipZigTest;
+ // https://github.com/ziglang/zig/issues/1908
+ if (builtin.single_threaded) return error.SkipZigTest;
+
const allocator = std.heap.direct_allocator;
var loop: Loop = undefined;
@@ -224,8 +221,7 @@ test "std.event.RwLock" {
var lock = RwLock.init(&loop);
defer lock.deinit();
- const handle = try async testLock(&loop, &lock);
- defer cancel handle;
+ const handle = testLock(&loop, &lock);
loop.run();
const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
@@ -233,28 +229,31 @@ test "std.event.RwLock" {
}
async fn testLock(loop: *Loop, lock: *RwLock) void {
- // TODO explicitly put next tick node memory in the coroutine frame #1194
- suspend {
- resume @handle();
- }
-
var read_nodes: [100]Loop.NextTickNode = undefined;
for (read_nodes) |*read_node| {
- read_node.data = async readRunner(lock) catch @panic("out of memory");
+ const frame = loop.allocator.create(@Frame(readRunner)) catch @panic("memory");
+ read_node.data = frame;
+ frame.* = async readRunner(lock);
loop.onNextTick(read_node);
}
var write_nodes: [shared_it_count]Loop.NextTickNode = undefined;
for (write_nodes) |*write_node| {
- write_node.data = async writeRunner(lock) catch @panic("out of memory");
+ const frame = loop.allocator.create(@Frame(writeRunner)) catch @panic("memory");
+ write_node.data = frame;
+ frame.* = async writeRunner(lock);
loop.onNextTick(write_node);
}
for (write_nodes) |*write_node| {
- await @ptrCast(promise->void, write_node.data);
+ const casted = @ptrCast(*const @Frame(writeRunner), write_node.data);
+ await casted;
+ loop.allocator.destroy(casted);
}
for (read_nodes) |*read_node| {
- await @ptrCast(promise->void, read_node.data);
+ const casted = @ptrCast(*const @Frame(readRunner), read_node.data);
+ await casted;
+ loop.allocator.destroy(casted);
}
}
@@ -269,7 +268,7 @@ async fn writeRunner(lock: *RwLock) void {
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
std.time.sleep(100 * std.time.microsecond);
- const lock_promise = async lock.acquireWrite() catch @panic("out of memory");
+ const lock_promise = async lock.acquireWrite();
const handle = await lock_promise;
defer handle.release();
@@ -287,7 +286,7 @@ async fn readRunner(lock: *RwLock) void {
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
- const lock_promise = async lock.acquireRead() catch @panic("out of memory");
+ const lock_promise = async lock.acquireRead();
const handle = await lock_promise;
defer handle.release();
diff --git a/std/event/rwlocked.zig b/std/event/rwlocked.zig
index 0448e0298e..386aa08407 100644
--- a/std/event/rwlocked.zig
+++ b/std/event/rwlocked.zig
@@ -3,7 +3,7 @@ const RwLock = std.event.RwLock;
const Loop = std.event.Loop;
/// Thread-safe async/await RW lock that protects one piece of data.
-/// coroutines which are waiting for the lock are suspended, and
+/// Functions which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order.
pub fn RwLocked(comptime T: type) type {
return struct {
diff --git a/std/fmt.zig b/std/fmt.zig
index 7c08cd14ee..8c4995cb91 100644
--- a/std/fmt.zig
+++ b/std/fmt.zig
@@ -328,9 +328,6 @@ pub fn formatType(
try output(context, "error.");
return output(context, @errorName(value));
},
- .Promise => {
- return format(context, Errors, output, "promise@{x}", @ptrToInt(value));
- },
.Enum => {
if (comptime std.meta.trait.hasFn("format")(T)) {
return value.format(fmt, options, context, Errors, output);
diff --git a/std/hash.zig b/std/hash.zig
index 148504aa39..ab3a0ea8f3 100644
--- a/std/hash.zig
+++ b/std/hash.zig
@@ -1,6 +1,9 @@
const adler = @import("hash/adler.zig");
pub const Adler32 = adler.Adler32;
+const auto_hash = @import("hash/auto_hash.zig");
+pub const autoHash = auto_hash.autoHash;
+
// pub for polynomials + generic crc32 construction
pub const crc = @import("hash/crc.zig");
pub const Crc32 = crc.Crc32;
@@ -16,6 +19,7 @@ pub const SipHash128 = siphash.SipHash128;
pub const murmur = @import("hash/murmur.zig");
pub const Murmur2_32 = murmur.Murmur2_32;
+
pub const Murmur2_64 = murmur.Murmur2_64;
pub const Murmur3_32 = murmur.Murmur3_32;
@@ -23,11 +27,16 @@ pub const cityhash = @import("hash/cityhash.zig");
pub const CityHash32 = cityhash.CityHash32;
pub const CityHash64 = cityhash.CityHash64;
+const wyhash = @import("hash/wyhash.zig");
+pub const Wyhash = wyhash.Wyhash;
+
test "hash" {
_ = @import("hash/adler.zig");
+ _ = @import("hash/auto_hash.zig");
_ = @import("hash/crc.zig");
_ = @import("hash/fnv.zig");
_ = @import("hash/siphash.zig");
_ = @import("hash/murmur.zig");
_ = @import("hash/cityhash.zig");
+ _ = @import("hash/wyhash.zig");
}
diff --git a/std/hash/auto_hash.zig b/std/hash/auto_hash.zig
new file mode 100644
index 0000000000..251f7e798a
--- /dev/null
+++ b/std/hash/auto_hash.zig
@@ -0,0 +1,211 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const mem = std.mem;
+const meta = std.meta;
+
+/// Provides generic hashing for any eligible type.
+/// Only hashes `key` itself, pointers are not followed.
+pub fn autoHash(hasher: var, key: var) void {
+ const Key = @typeOf(key);
+ switch (@typeInfo(Key)) {
+ .NoReturn,
+ .Opaque,
+ .Undefined,
+ .ArgTuple,
+ .Void,
+ .Null,
+ .BoundFn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Type,
+ .EnumLiteral,
+ .Frame,
+ => @compileError("cannot hash this type"),
+
+ // Help the optimizer see that hashing an int is easy by inlining!
+ // TODO Check if the situation is better after #561 is resolved.
+ .Int => @inlineCall(hasher.update, std.mem.asBytes(&key)),
+
+ .Float => |info| autoHash(hasher, @bitCast(@IntType(false, info.bits), key)),
+
+ .Bool => autoHash(hasher, @boolToInt(key)),
+ .Enum => autoHash(hasher, @enumToInt(key)),
+ .ErrorSet => autoHash(hasher, @errorToInt(key)),
+ .AnyFrame, .Fn => autoHash(hasher, @ptrToInt(key)),
+
+ .Pointer => |info| switch (info.size) {
+ builtin.TypeInfo.Pointer.Size.One,
+ builtin.TypeInfo.Pointer.Size.Many,
+ builtin.TypeInfo.Pointer.Size.C,
+ => autoHash(hasher, @ptrToInt(key)),
+
+ builtin.TypeInfo.Pointer.Size.Slice => {
+ autoHash(hasher, key.ptr);
+ autoHash(hasher, key.len);
+ },
+ },
+
+ .Optional => if (key) |k| autoHash(hasher, k),
+
+ .Array => {
+ // TODO detect via a trait when Key has no padding bits to
+ // hash it as an array of bytes.
+ // Otherwise, hash every element.
+ for (key) |element| {
+ autoHash(hasher, element);
+ }
+ },
+
+ .Vector => |info| {
+ if (info.child.bit_count % 8 == 0) {
+ // If there's no unused bits in the child type, we can just hash
+ // this as an array of bytes.
+ hasher.update(mem.asBytes(&key));
+ } else {
+ // Otherwise, hash every element.
+ // TODO remove the copy to an array once field access is done.
+ const array: [info.len]info.child = key;
+ comptime var i: u32 = 0;
+ inline while (i < info.len) : (i += 1) {
+ autoHash(hasher, array[i]);
+ }
+ }
+ },
+
+ .Struct => |info| {
+ // TODO detect via a trait when Key has no padding bits to
+ // hash it as an array of bytes.
+ // Otherwise, hash every field.
+ inline for (info.fields) |field| {
+ // We reuse the hash of the previous field as the seed for the
+ // next one so that they're dependant.
+ autoHash(hasher, @field(key, field.name));
+ }
+ },
+
+ .Union => |info| blk: {
+ if (info.tag_type) |tag_type| {
+ const tag = meta.activeTag(key);
+ const s = autoHash(hasher, tag);
+ inline for (info.fields) |field| {
+ const enum_field = field.enum_field.?;
+ if (enum_field.value == @enumToInt(tag)) {
+ autoHash(hasher, @field(key, enum_field.name));
+ // TODO use a labelled break when it does not crash the compiler.
+ // break :blk;
+ return;
+ }
+ }
+ unreachable;
+ } else @compileError("cannot hash untagged union type: " ++ @typeName(Key) ++ ", provide your own hash function");
+ },
+
+ .ErrorUnion => blk: {
+ const payload = key catch |err| {
+ autoHash(hasher, err);
+ break :blk;
+ };
+ autoHash(hasher, payload);
+ },
+ }
+}
+
+const testing = std.testing;
+const Wyhash = std.hash.Wyhash;
+
+fn testAutoHash(key: var) u64 {
+ // Any hash could be used here, for testing autoHash.
+ var hasher = Wyhash.init(0);
+ autoHash(&hasher, key);
+ return hasher.final();
+}
+
+test "autoHash slice" {
+ // Allocate one array dynamically so that we're assured it is not merged
+ // with the other by the optimization passes.
+ const array1 = try std.heap.direct_allocator.create([6]u32);
+ defer std.heap.direct_allocator.destroy(array1);
+ array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 };
+ const array2 = [_]u32{ 1, 2, 3, 4, 5, 6 };
+ const a = array1[0..];
+ const b = array2[0..];
+ const c = array1[0..3];
+ testing.expect(testAutoHash(a) == testAutoHash(a));
+ testing.expect(testAutoHash(a) != testAutoHash(array1));
+ testing.expect(testAutoHash(a) != testAutoHash(b));
+ testing.expect(testAutoHash(a) != testAutoHash(c));
+}
+
+test "testAutoHash optional" {
+ const a: ?u32 = 123;
+ const b: ?u32 = null;
+ testing.expectEqual(testAutoHash(a), testAutoHash(u32(123)));
+ testing.expect(testAutoHash(a) != testAutoHash(b));
+ testing.expectEqual(testAutoHash(b), 0);
+}
+
+test "testAutoHash array" {
+ const a = [_]u32{ 1, 2, 3 };
+ const h = testAutoHash(a);
+ var hasher = Wyhash.init(0);
+ autoHash(&hasher, u32(1));
+ autoHash(&hasher, u32(2));
+ autoHash(&hasher, u32(3));
+ testing.expectEqual(h, hasher.final());
+}
+
+test "testAutoHash struct" {
+ const Foo = struct {
+ a: u32 = 1,
+ b: u32 = 2,
+ c: u32 = 3,
+ };
+ const f = Foo{};
+ const h = testAutoHash(f);
+ var hasher = Wyhash.init(0);
+ autoHash(&hasher, u32(1));
+ autoHash(&hasher, u32(2));
+ autoHash(&hasher, u32(3));
+ testing.expectEqual(h, hasher.final());
+}
+
+test "testAutoHash union" {
+ const Foo = union(enum) {
+ A: u32,
+ B: f32,
+ C: u32,
+ };
+
+ const a = Foo{ .A = 18 };
+ var b = Foo{ .B = 12.34 };
+ const c = Foo{ .C = 18 };
+ testing.expect(testAutoHash(a) == testAutoHash(a));
+ testing.expect(testAutoHash(a) != testAutoHash(b));
+ testing.expect(testAutoHash(a) != testAutoHash(c));
+
+ b = Foo{ .A = 18 };
+ testing.expect(testAutoHash(a) == testAutoHash(b));
+}
+
+test "testAutoHash vector" {
+ const a: @Vector(4, u32) = [_]u32{ 1, 2, 3, 4 };
+ const b: @Vector(4, u32) = [_]u32{ 1, 2, 3, 5 };
+ const c: @Vector(4, u31) = [_]u31{ 1, 2, 3, 4 };
+ testing.expect(testAutoHash(a) == testAutoHash(a));
+ testing.expect(testAutoHash(a) != testAutoHash(b));
+ testing.expect(testAutoHash(a) != testAutoHash(c));
+}
+
+test "testAutoHash error union" {
+ const Errors = error{Test};
+ const Foo = struct {
+ a: u32 = 1,
+ b: u32 = 2,
+ c: u32 = 3,
+ };
+ const f = Foo{};
+ const g: Errors!Foo = Errors.Test;
+ testing.expect(testAutoHash(f) != testAutoHash(g));
+ testing.expect(testAutoHash(f) == testAutoHash(Foo{}));
+ testing.expect(testAutoHash(g) == testAutoHash(Errors.Test));
+}
diff --git a/std/hash/throughput_test.zig b/std/hash/throughput_test.zig
new file mode 100644
index 0000000000..4b7e8ef344
--- /dev/null
+++ b/std/hash/throughput_test.zig
@@ -0,0 +1,148 @@
+const builtin = @import("builtin");
+const std = @import("std");
+const time = std.time;
+const Timer = time.Timer;
+const hash = std.hash;
+
+const KiB = 1024;
+const MiB = 1024 * KiB;
+const GiB = 1024 * MiB;
+
+var prng = std.rand.DefaultPrng.init(0);
+
+const Hash = struct {
+ ty: type,
+ name: []const u8,
+ init_u8s: ?[]const u8 = null,
+ init_u64: ?u64 = null,
+};
+
+const siphash_key = "0123456789abcdef";
+
+const hashes = [_]Hash{
+ Hash{ .ty = hash.Wyhash, .name = "wyhash", .init_u64 = 0 },
+ Hash{ .ty = hash.SipHash64(1, 3), .name = "siphash(1,3)", .init_u8s = siphash_key },
+ Hash{ .ty = hash.SipHash64(2, 4), .name = "siphash(2,4)", .init_u8s = siphash_key },
+ Hash{ .ty = hash.Fnv1a_64, .name = "fnv1a" },
+ Hash{ .ty = hash.Crc32, .name = "crc32" },
+};
+
+const Result = struct {
+ hash: u64,
+ throughput: u64,
+};
+
+pub fn benchmarkHash(comptime H: var, bytes: usize) !Result {
+ var h = blk: {
+ if (H.init_u8s) |init| {
+ break :blk H.ty.init(init);
+ }
+ if (H.init_u64) |init| {
+ break :blk H.ty.init(init);
+ }
+ break :blk H.ty.init();
+ };
+
+ var block: [8192]u8 = undefined;
+ prng.random.bytes(block[0..]);
+
+ var offset: usize = 0;
+ var timer = try Timer.start();
+ const start = timer.lap();
+ while (offset < bytes) : (offset += block.len) {
+ h.update(block[0..]);
+ }
+ const end = timer.read();
+
+ const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
+ const throughput = @floatToInt(u64, @intToFloat(f64, bytes) / elapsed_s);
+
+ return Result{
+ .hash = h.final(),
+ .throughput = throughput,
+ };
+}
+
+fn usage() void {
+ std.debug.warn(
+ \\throughput_test [options]
+ \\
+ \\Options:
+ \\ --filter [test-name]
+ \\ --seed [int]
+ \\ --count [int]
+ \\ --help
+ \\
+ );
+}
+
+fn mode(comptime x: comptime_int) comptime_int {
+ return if (builtin.mode == builtin.Mode.Debug) x / 64 else x;
+}
+
+// TODO(#1358): Replace with builtin formatted padding when available.
+fn printPad(stdout: var, s: []const u8) !void {
+ var i: usize = 0;
+ while (i < 12 - s.len) : (i += 1) {
+ try stdout.print(" ");
+ }
+ try stdout.print("{}", s);
+}
+
+pub fn main() !void {
+ var stdout_file = try std.io.getStdOut();
+ var stdout_out_stream = stdout_file.outStream();
+ const stdout = &stdout_out_stream.stream;
+
+ var buffer: [1024]u8 = undefined;
+ var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ const args = try std.process.argsAlloc(&fixed.allocator);
+
+ var filter: ?[]u8 = "";
+ var count: usize = mode(128 * MiB);
+
+ var i: usize = 1;
+ while (i < args.len) : (i += 1) {
+ if (std.mem.eql(u8, args[i], "--seed")) {
+ i += 1;
+ if (i == args.len) {
+ usage();
+ std.os.exit(1);
+ }
+
+ const seed = try std.fmt.parseUnsigned(u32, args[i], 10);
+ prng.seed(seed);
+ } else if (std.mem.eql(u8, args[i], "--filter")) {
+ i += 1;
+ if (i == args.len) {
+ usage();
+ std.os.exit(1);
+ }
+
+ filter = args[i];
+ } else if (std.mem.eql(u8, args[i], "--count")) {
+ i += 1;
+ if (i == args.len) {
+ usage();
+ std.os.exit(1);
+ }
+
+ const c = try std.fmt.parseUnsigned(u32, args[i], 10);
+ count = c * MiB;
+ } else if (std.mem.eql(u8, args[i], "--help")) {
+ usage();
+ return;
+ } else {
+ usage();
+ std.os.exit(1);
+ }
+ }
+
+ inline for (hashes) |H| {
+ if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) {
+ const result = try benchmarkHash(H, count);
+ try printPad(stdout, H.name);
+ try stdout.print(": {:4} MiB/s [{:16}]\n", result.throughput / (1 * MiB), result.hash);
+ }
+ }
+}
diff --git a/std/hash/wyhash.zig b/std/hash/wyhash.zig
new file mode 100644
index 0000000000..dfa5156cad
--- /dev/null
+++ b/std/hash/wyhash.zig
@@ -0,0 +1,135 @@
+const std = @import("std");
+const mem = std.mem;
+
+const primes = [_]u64{
+ 0xa0761d6478bd642f,
+ 0xe7037ed1a0b428db,
+ 0x8ebc6af09c88c6e3,
+ 0x589965cc75374cc3,
+ 0x1d8e4e27c47d124f,
+};
+
+fn read_bytes(comptime bytes: u8, data: []const u8) u64 {
+ return mem.readVarInt(u64, data[0..bytes], .Little);
+}
+
+fn read_8bytes_swapped(data: []const u8) u64 {
+ return (read_bytes(4, data) << 32 | read_bytes(4, data[4..]));
+}
+
+fn mum(a: u64, b: u64) u64 {
+ var r = std.math.mulWide(u64, a, b);
+ r = (r >> 64) ^ r;
+ return @truncate(u64, r);
+}
+
+fn mix0(a: u64, b: u64, seed: u64) u64 {
+ return mum(a ^ seed ^ primes[0], b ^ seed ^ primes[1]);
+}
+
+fn mix1(a: u64, b: u64, seed: u64) u64 {
+ return mum(a ^ seed ^ primes[2], b ^ seed ^ primes[3]);
+}
+
+pub const Wyhash = struct {
+ seed: u64,
+ msg_len: usize,
+
+ pub fn init(seed: u64) Wyhash {
+ return Wyhash{
+ .seed = seed,
+ .msg_len = 0,
+ };
+ }
+
+ fn round(self: *Wyhash, b: []const u8) void {
+ std.debug.assert(b.len == 32);
+
+ self.seed = mix0(
+ read_bytes(8, b[0..]),
+ read_bytes(8, b[8..]),
+ self.seed,
+ ) ^ mix1(
+ read_bytes(8, b[16..]),
+ read_bytes(8, b[24..]),
+ self.seed,
+ );
+ }
+
+ fn partial(self: *Wyhash, b: []const u8) void {
+ const rem_key = b;
+ const rem_len = b.len;
+
+ var seed = self.seed;
+ seed = switch (@intCast(u5, rem_len)) {
+ 0 => seed,
+ 1 => mix0(read_bytes(1, rem_key), primes[4], seed),
+ 2 => mix0(read_bytes(2, rem_key), primes[4], seed),
+ 3 => mix0((read_bytes(2, rem_key) << 8) | read_bytes(1, rem_key[2..]), primes[4], seed),
+ 4 => mix0(read_bytes(4, rem_key), primes[4], seed),
+ 5 => mix0((read_bytes(4, rem_key) << 8) | read_bytes(1, rem_key[4..]), primes[4], seed),
+ 6 => mix0((read_bytes(4, rem_key) << 16) | read_bytes(2, rem_key[4..]), primes[4], seed),
+ 7 => mix0((read_bytes(4, rem_key) << 24) | (read_bytes(2, rem_key[4..]) << 8) | read_bytes(1, rem_key[6..]), primes[4], seed),
+ 8 => mix0(read_8bytes_swapped(rem_key), primes[4], seed),
+ 9 => mix0(read_8bytes_swapped(rem_key), read_bytes(1, rem_key[8..]), seed),
+ 10 => mix0(read_8bytes_swapped(rem_key), read_bytes(2, rem_key[8..]), seed),
+ 11 => mix0(read_8bytes_swapped(rem_key), (read_bytes(2, rem_key[8..]) << 8) | read_bytes(1, rem_key[10..]), seed),
+ 12 => mix0(read_8bytes_swapped(rem_key), read_bytes(4, rem_key[8..]), seed),
+ 13 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 8) | read_bytes(1, rem_key[12..]), seed),
+ 14 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 16) | read_bytes(2, rem_key[12..]), seed),
+ 15 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 24) | (read_bytes(2, rem_key[12..]) << 8) | read_bytes(1, rem_key[14..]), seed),
+ 16 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed),
+ 17 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(1, rem_key[16..]), primes[4], seed),
+ 18 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(2, rem_key[16..]), primes[4], seed),
+ 19 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(2, rem_key[16..]) << 8) | read_bytes(1, rem_key[18..]), primes[4], seed),
+ 20 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(4, rem_key[16..]), primes[4], seed),
+ 21 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 8) | read_bytes(1, rem_key[20..]), primes[4], seed),
+ 22 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 16) | read_bytes(2, rem_key[20..]), primes[4], seed),
+ 23 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 24) | (read_bytes(2, rem_key[20..]) << 8) | read_bytes(1, rem_key[22..]), primes[4], seed),
+ 24 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), primes[4], seed),
+ 25 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(1, rem_key[24..]), seed),
+ 26 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(2, rem_key[24..]), seed),
+ 27 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(2, rem_key[24..]) << 8) | read_bytes(1, rem_key[26..]), seed),
+ 28 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(4, rem_key[24..]), seed),
+ 29 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 8) | read_bytes(1, rem_key[28..]), seed),
+ 30 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 16) | read_bytes(2, rem_key[28..]), seed),
+ 31 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 24) | (read_bytes(2, rem_key[28..]) << 8) | read_bytes(1, rem_key[30..]), seed),
+ };
+ self.seed = seed;
+ }
+
+ pub fn update(self: *Wyhash, b: []const u8) void {
+ var off: usize = 0;
+
+ // Full middle blocks.
+ while (off + 32 <= b.len) : (off += 32) {
+ @inlineCall(self.round, b[off .. off + 32]);
+ }
+
+ self.partial(b[off..]);
+ self.msg_len += b.len;
+ }
+
+ pub fn final(self: *Wyhash) u64 {
+ return mum(self.seed ^ self.msg_len, primes[4]);
+ }
+
+ pub fn hash(seed: u64, input: []const u8) u64 {
+ var c = Wyhash.init(seed);
+ c.update(input);
+ return c.final();
+ }
+};
+
+test "test vectors" {
+ const expectEqual = std.testing.expectEqual;
+ const hash = Wyhash.hash;
+
+ expectEqual(hash(0, ""), 0x0);
+ expectEqual(hash(1, "a"), 0xbed235177f41d328);
+ expectEqual(hash(2, "abc"), 0xbe348debe59b27c3);
+ expectEqual(hash(3, "message digest"), 0x37320f657213a290);
+ expectEqual(hash(4, "abcdefghijklmnopqrstuvwxyz"), 0xd0b270e1d8a7019c);
+ expectEqual(hash(5, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 0x602a1894d3bbfe7f);
+ expectEqual(hash(6, "12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 0x829e9c148b75970e);
+}
diff --git a/std/hash_map.zig b/std/hash_map.zig
index c99d1d2490..ab3c4c248d 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -4,6 +4,9 @@ const assert = debug.assert;
const testing = std.testing;
const math = std.math;
const mem = std.mem;
+const meta = std.meta;
+const autoHash = std.hash.autoHash;
+const Wyhash = std.hash.Wyhash;
const Allocator = mem.Allocator;
const builtin = @import("builtin");
@@ -448,15 +451,17 @@ test "iterator hash map" {
try reset_map.putNoClobber(2, 22);
try reset_map.putNoClobber(3, 33);
+ // TODO this test depends on the hashing algorithm, because it assumes the
+ // order of the elements in the hashmap. This should not be the case.
var keys = [_]i32{
+ 1,
3,
2,
- 1,
};
var values = [_]i32{
+ 11,
33,
22,
- 11,
};
var it = reset_map.iterator();
@@ -518,8 +523,9 @@ pub fn getTrivialEqlFn(comptime K: type) (fn (K, K) bool) {
pub fn getAutoHashFn(comptime K: type) (fn (K) u32) {
return struct {
fn hash(key: K) u32 {
- comptime var rng = comptime std.rand.DefaultPrng.init(0);
- return autoHash(key, &rng.random, u32);
+ var hasher = Wyhash.init(0);
+ autoHash(&hasher, key);
+ return @truncate(u32, hasher.final());
}
}.hash;
}
@@ -527,114 +533,7 @@ pub fn getAutoHashFn(comptime K: type) (fn (K) u32) {
pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
return struct {
fn eql(a: K, b: K) bool {
- return autoEql(a, b);
+ return meta.eql(a, b);
}
}.eql;
}
-
-// TODO improve these hash functions
-pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type) HashInt {
- switch (@typeInfo(@typeOf(key))) {
- builtin.TypeId.NoReturn,
- builtin.TypeId.Opaque,
- builtin.TypeId.Undefined,
- builtin.TypeId.ArgTuple,
- => @compileError("cannot hash this type"),
-
- builtin.TypeId.Void,
- builtin.TypeId.Null,
- => return 0,
-
- builtin.TypeId.Int => |info| {
- const unsigned_x = @bitCast(@IntType(false, info.bits), key);
- if (info.bits <= HashInt.bit_count) {
- return HashInt(unsigned_x) ^ comptime rng.scalar(HashInt);
- } else {
- return @truncate(HashInt, unsigned_x ^ comptime rng.scalar(@typeOf(unsigned_x)));
- }
- },
-
- builtin.TypeId.Float => |info| {
- return autoHash(@bitCast(@IntType(false, info.bits), key), rng, HashInt);
- },
- builtin.TypeId.Bool => return autoHash(@boolToInt(key), rng, HashInt),
- builtin.TypeId.Enum => return autoHash(@enumToInt(key), rng, HashInt),
- builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), rng, HashInt),
- builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng, HashInt),
-
- builtin.TypeId.BoundFn,
- builtin.TypeId.ComptimeFloat,
- builtin.TypeId.ComptimeInt,
- builtin.TypeId.Type,
- builtin.TypeId.EnumLiteral,
- => return 0,
-
- builtin.TypeId.Pointer => |info| switch (info.size) {
- builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto hash for single item pointers"),
- builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto hash for many item pointers"),
- builtin.TypeInfo.Pointer.Size.C => @compileError("TODO auto hash C pointers"),
- builtin.TypeInfo.Pointer.Size.Slice => {
- const interval = std.math.max(1, key.len / 256);
- var i: usize = 0;
- var h = comptime rng.scalar(HashInt);
- while (i < key.len) : (i += interval) {
- h ^= autoHash(key[i], rng, HashInt);
- }
- return h;
- },
- },
-
- builtin.TypeId.Optional => @compileError("TODO auto hash for optionals"),
- builtin.TypeId.Array => @compileError("TODO auto hash for arrays"),
- builtin.TypeId.Vector => @compileError("TODO auto hash for vectors"),
- builtin.TypeId.Struct => @compileError("TODO auto hash for structs"),
- builtin.TypeId.Union => @compileError("TODO auto hash for unions"),
- builtin.TypeId.ErrorUnion => @compileError("TODO auto hash for unions"),
- }
-}
-
-pub fn autoEql(a: var, b: @typeOf(a)) bool {
- switch (@typeInfo(@typeOf(a))) {
- builtin.TypeId.NoReturn,
- builtin.TypeId.Opaque,
- builtin.TypeId.Undefined,
- builtin.TypeId.ArgTuple,
- => @compileError("cannot test equality of this type"),
- builtin.TypeId.Void,
- builtin.TypeId.Null,
- => return true,
- builtin.TypeId.Bool,
- builtin.TypeId.Int,
- builtin.TypeId.Float,
- builtin.TypeId.ComptimeFloat,
- builtin.TypeId.ComptimeInt,
- builtin.TypeId.EnumLiteral,
- builtin.TypeId.Promise,
- builtin.TypeId.Enum,
- builtin.TypeId.BoundFn,
- builtin.TypeId.Fn,
- builtin.TypeId.ErrorSet,
- builtin.TypeId.Type,
- => return a == b,
-
- builtin.TypeId.Pointer => |info| switch (info.size) {
- builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto eql for single item pointers"),
- builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto eql for many item pointers"),
- builtin.TypeInfo.Pointer.Size.C => @compileError("TODO auto eql for C pointers"),
- builtin.TypeInfo.Pointer.Size.Slice => {
- if (a.len != b.len) return false;
- for (a) |a_item, i| {
- if (!autoEql(a_item, b[i])) return false;
- }
- return true;
- },
- },
-
- builtin.TypeId.Optional => @compileError("TODO auto eql for optionals"),
- builtin.TypeId.Array => @compileError("TODO auto eql for arrays"),
- builtin.TypeId.Struct => @compileError("TODO auto eql for structs"),
- builtin.TypeId.Union => @compileError("TODO auto eql for unions"),
- builtin.TypeId.ErrorUnion => @compileError("TODO auto eql for unions"),
- builtin.TypeId.Vector => @compileError("TODO auto eql for vectors"),
- }
-}
diff --git a/std/http/headers.zig b/std/http/headers.zig
index 69ed494f3a..c588f2d055 100644
--- a/std/http/headers.zig
+++ b/std/http/headers.zig
@@ -102,9 +102,19 @@ test "HeaderEntry" {
testing.expectEqualSlices(u8, "x", e.value);
}
+fn stringEql(a: []const u8, b: []const u8) bool {
+ if (a.len != b.len) return false;
+ if (a.ptr == b.ptr) return true;
+ return mem.compare(u8, a, b) == .Equal;
+}
+
+fn stringHash(s: []const u8) u32 {
+ return @truncate(u32, std.hash.Wyhash.hash(0, s));
+}
+
const HeaderList = std.ArrayList(HeaderEntry);
const HeaderIndexList = std.ArrayList(usize);
-const HeaderIndex = std.AutoHashMap([]const u8, HeaderIndexList);
+const HeaderIndex = std.HashMap([]const u8, HeaderIndexList, stringHash, stringEql);
pub const Headers = struct {
// the owned header field name is stored in the index as part of the key
diff --git a/std/math.zig b/std/math.zig
index ac06a07953..e47021512e 100644
--- a/std/math.zig
+++ b/std/math.zig
@@ -242,12 +242,76 @@ pub fn floatExponentBits(comptime T: type) comptime_int {
};
}
-pub fn min(x: var, y: var) @typeOf(x + y) {
- return if (x < y) x else y;
+/// Given two types, returns the smallest one which is capable of holding the
+/// full range of the minimum value.
+pub fn Min(comptime A: type, comptime B: type) type {
+ switch (@typeInfo(A)) {
+ .Int => |a_info| switch (@typeInfo(B)) {
+ .Int => |b_info| if (!a_info.is_signed and !b_info.is_signed) {
+ if (a_info.bits < b_info.bits) {
+ return A;
+ } else {
+ return B;
+ }
+ },
+ else => {},
+ },
+ else => {},
+ }
+ return @typeOf(A(0) + B(0));
+}
+
+/// Returns the smaller number. When one of the parameter's type's full range fits in the other,
+/// the return type is the smaller type.
+pub fn min(x: var, y: var) Min(@typeOf(x), @typeOf(y)) {
+ const Result = Min(@typeOf(x), @typeOf(y));
+ if (x < y) {
+ // TODO Zig should allow this as an implicit cast because x is immutable and in this
+ // scope it is known to fit in the return type.
+ switch (@typeInfo(Result)) {
+ .Int => return @intCast(Result, x),
+ else => return x,
+ }
+ } else {
+ // TODO Zig should allow this as an implicit cast because y is immutable and in this
+ // scope it is known to fit in the return type.
+ switch (@typeInfo(Result)) {
+ .Int => return @intCast(Result, y),
+ else => return y,
+ }
+ }
}
test "math.min" {
testing.expect(min(i32(-1), i32(2)) == -1);
+ {
+ var a: u16 = 999;
+ var b: u32 = 10;
+ var result = min(a, b);
+ testing.expect(@typeOf(result) == u16);
+ testing.expect(result == 10);
+ }
+ {
+ var a: f64 = 10.34;
+ var b: f32 = 999.12;
+ var result = min(a, b);
+ testing.expect(@typeOf(result) == f64);
+ testing.expect(result == 10.34);
+ }
+ {
+ var a: i8 = -127;
+ var b: i16 = -200;
+ var result = min(a, b);
+ testing.expect(@typeOf(result) == i16);
+ testing.expect(result == -200);
+ }
+ {
+ const a = 10.34;
+ var b: f32 = 999.12;
+ var result = min(a, b);
+ testing.expect(@typeOf(result) == f32);
+ testing.expect(result == 10.34);
+ }
}
pub fn max(x: var, y: var) @typeOf(x + y) {
@@ -309,7 +373,7 @@ test "math.shl" {
}
/// Shifts right. Overflowed bits are truncated.
-/// A negative shift amount results in a lefft shift.
+/// A negative shift amount results in a left shift.
pub fn shr(comptime T: type, a: T, shift_amt: var) T {
const abs_shift_amt = absCast(shift_amt);
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
diff --git a/std/meta.zig b/std/meta.zig
index 0db76ce774..6b90727737 100644
--- a/std/meta.zig
+++ b/std/meta.zig
@@ -104,8 +104,7 @@ pub fn Child(comptime T: type) type {
TypeId.Array => |info| info.child,
TypeId.Pointer => |info| info.child,
TypeId.Optional => |info| info.child,
- TypeId.Promise => |info| if (info.child) |child| child else null,
- else => @compileError("Expected promise, pointer, optional, or array type, " ++ "found '" ++ @typeName(T) ++ "'"),
+ else => @compileError("Expected pointer, optional, or array type, " ++ "found '" ++ @typeName(T) ++ "'"),
};
}
@@ -114,7 +113,6 @@ test "std.meta.Child" {
testing.expect(Child(*u8) == u8);
testing.expect(Child([]u8) == u8);
testing.expect(Child(?u8) == u8);
- testing.expect(Child(promise->u8) == u8);
}
pub fn containerLayout(comptime T: type) TypeInfo.ContainerLayout {
diff --git a/std/os.zig b/std/os.zig
index c2010bf6a9..805f7f1645 100644
--- a/std/os.zig
+++ b/std/os.zig
@@ -120,6 +120,19 @@ pub fn getrandom(buf: []u8) GetRandomError!void {
}
}
}
+ if (freebsd.is_the_target) {
+ while (true) {
+ const err = std.c.getErrno(std.c.getrandom(buf.ptr, buf.len, 0));
+
+ switch (err) {
+ 0 => return,
+ EINVAL => unreachable,
+ EFAULT => unreachable,
+ EINTR => continue,
+ else => return unexpectedErrno(err),
+ }
+ }
+ }
if (wasi.is_the_target) {
switch (wasi.random_get(buf.ptr, buf.len)) {
0 => return,
diff --git a/std/os/bits/netbsd.zig b/std/os/bits/netbsd.zig
index ef58bd1356..ff19d090af 100644
--- a/std/os/bits/netbsd.zig
+++ b/std/os/bits/netbsd.zig
@@ -760,3 +760,62 @@ pub const stack_t = extern struct {
ss_size: isize,
ss_flags: i32,
};
+
+pub const S_IFMT = 0o170000;
+
+pub const S_IFIFO = 0o010000;
+pub const S_IFCHR = 0o020000;
+pub const S_IFDIR = 0o040000;
+pub const S_IFBLK = 0o060000;
+pub const S_IFREG = 0o100000;
+pub const S_IFLNK = 0o120000;
+pub const S_IFSOCK = 0o140000;
+pub const S_IFWHT = 0o160000;
+
+pub const S_ISUID = 0o4000;
+pub const S_ISGID = 0o2000;
+pub const S_ISVTX = 0o1000;
+pub const S_IRWXU = 0o700;
+pub const S_IRUSR = 0o400;
+pub const S_IWUSR = 0o200;
+pub const S_IXUSR = 0o100;
+pub const S_IRWXG = 0o070;
+pub const S_IRGRP = 0o040;
+pub const S_IWGRP = 0o020;
+pub const S_IXGRP = 0o010;
+pub const S_IRWXO = 0o007;
+pub const S_IROTH = 0o004;
+pub const S_IWOTH = 0o002;
+pub const S_IXOTH = 0o001;
+
+pub fn S_ISFIFO(m: u32) bool {
+ return m & S_IFMT == S_IFIFO;
+}
+
+pub fn S_ISCHR(m: u32) bool {
+ return m & S_IFMT == S_IFCHR;
+}
+
+pub fn S_ISDIR(m: u32) bool {
+ return m & S_IFMT == S_IFDIR;
+}
+
+pub fn S_ISBLK(m: u32) bool {
+ return m & S_IFMT == S_IFBLK;
+}
+
+pub fn S_ISREG(m: u32) bool {
+ return m & S_IFMT == S_IFREG;
+}
+
+pub fn S_ISLNK(m: u32) bool {
+ return m & S_IFMT == S_IFLNK;
+}
+
+pub fn S_ISSOCK(m: u32) bool {
+ return m & S_IFMT == S_IFSOCK;
+}
+
+pub fn S_IWHT(m: u32) bool {
+ return m & S_IFMT == S_IFWHT;
+}
diff --git a/std/os/darwin.zig b/std/os/darwin.zig
index c2b6801e22..0adf71affb 100644
--- a/std/os/darwin.zig
+++ b/std/os/darwin.zig
@@ -5,4 +5,4 @@ pub const is_the_target = switch (builtin.os) {
else => false,
};
pub usingnamespace std.c;
-pub usingnamespace @import("bits.zig");
\ No newline at end of file
+pub usingnamespace @import("bits.zig");
diff --git a/std/os/freebsd.zig b/std/os/freebsd.zig
index e9efe64920..ddbf98f2bc 100644
--- a/std/os/freebsd.zig
+++ b/std/os/freebsd.zig
@@ -2,4 +2,4 @@ const std = @import("../std.zig");
const builtin = @import("builtin");
pub const is_the_target = builtin.os == .freebsd;
pub usingnamespace std.c;
-pub usingnamespace @import("bits.zig");
\ No newline at end of file
+pub usingnamespace @import("bits.zig");
diff --git a/std/os/netbsd.zig b/std/os/netbsd.zig
index cd63e40f5c..d484e7374b 100644
--- a/std/os/netbsd.zig
+++ b/std/os/netbsd.zig
@@ -2,3 +2,4 @@ const builtin = @import("builtin");
const std = @import("../std.zig");
pub const is_the_target = builtin.os == .netbsd;
pub usingnamespace std.c;
+pub usingnamespace @import("bits.zig");
diff --git a/std/os/windows.zig b/std/os/windows.zig
index ac76e8f58f..7c1761a4b8 100644
--- a/std/os/windows.zig
+++ b/std/os/windows.zig
@@ -138,10 +138,19 @@ pub const RtlGenRandomError = error{Unexpected};
/// https://github.com/rust-lang-nursery/rand/issues/111
/// https://bugzilla.mozilla.org/show_bug.cgi?id=504270
pub fn RtlGenRandom(output: []u8) RtlGenRandomError!void {
- if (advapi32.RtlGenRandom(output.ptr, output.len) == 0) {
- switch (kernel32.GetLastError()) {
- else => |err| return unexpectedError(err),
+ var total_read: usize = 0;
+ var buff: []u8 = output[0..];
+ const max_read_size: ULONG = maxInt(ULONG);
+
+ while (total_read < output.len) {
+ const to_read: ULONG = math.min(buff.len, max_read_size);
+
+ if (advapi32.RtlGenRandom(buff.ptr, to_read) == 0) {
+ return unexpectedError(kernel32.GetLastError());
}
+
+ total_read += to_read;
+ buff = buff[to_read..];
}
}
@@ -866,7 +875,6 @@ pub fn unexpectedError(err: DWORD) std.os.UnexpectedError {
return error.Unexpected;
}
-
/// Call this when you made a windows NtDll call
/// and you get an unexpected status.
pub fn unexpectedStatus(status: NTSTATUS) std.os.UnexpectedError {
diff --git a/std/os/windows/advapi32.zig b/std/os/windows/advapi32.zig
index 165a2c10a3..940f10994c 100644
--- a/std/os/windows/advapi32.zig
+++ b/std/os/windows/advapi32.zig
@@ -19,5 +19,5 @@ pub extern "advapi32" stdcallcc fn RegQueryValueExW(
// RtlGenRandom is known as SystemFunction036 under advapi32
// http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx */
-pub extern "advapi32" stdcallcc fn SystemFunction036(output: [*]u8, length: usize) BOOL;
+pub extern "advapi32" stdcallcc fn SystemFunction036(output: [*]u8, length: ULONG) BOOL;
pub const RtlGenRandom = SystemFunction036;
diff --git a/std/os/windows/bits.zig b/std/os/windows/bits.zig
index 79697995f4..ddfdd27e1b 100644
--- a/std/os/windows/bits.zig
+++ b/std/os/windows/bits.zig
@@ -209,7 +209,7 @@ pub const FILE_INFORMATION_CLASS = extern enum {
FileLinkInformationExBypassAccessCheck,
FileStorageReserveIdInformation,
FileCaseSensitiveInformationForceAccessCheck,
- FileMaximumInformation
+ FileMaximumInformation,
};
pub const OVERLAPPED = extern struct {
@@ -731,4 +731,4 @@ pub const UNICODE_STRING = extern struct {
Length: USHORT,
MaximumLength: USHORT,
Buffer: [*]WCHAR,
-};
\ No newline at end of file
+};
diff --git a/std/os/windows/ntdll.zig b/std/os/windows/ntdll.zig
index 746403fa6d..bfc98aba8a 100644
--- a/std/os/windows/ntdll.zig
+++ b/std/os/windows/ntdll.zig
@@ -1,7 +1,13 @@
usingnamespace @import("bits.zig");
pub extern "NtDll" stdcallcc fn RtlCaptureStackBackTrace(FramesToSkip: DWORD, FramesToCapture: DWORD, BackTrace: **c_void, BackTraceHash: ?*DWORD) WORD;
-pub extern "NtDll" stdcallcc fn NtQueryInformationFile(FileHandle: HANDLE, IoStatusBlock: *IO_STATUS_BLOCK, FileInformation: *c_void, Length: ULONG, FileInformationClass: FILE_INFORMATION_CLASS,) NTSTATUS;
+pub extern "NtDll" stdcallcc fn NtQueryInformationFile(
+ FileHandle: HANDLE,
+ IoStatusBlock: *IO_STATUS_BLOCK,
+ FileInformation: *c_void,
+ Length: ULONG,
+ FileInformationClass: FILE_INFORMATION_CLASS,
+) NTSTATUS;
pub extern "NtDll" stdcallcc fn NtCreateFile(
FileHandle: *HANDLE,
DesiredAccess: ACCESS_MASK,
@@ -15,4 +21,4 @@ pub extern "NtDll" stdcallcc fn NtCreateFile(
EaBuffer: *c_void,
EaLength: ULONG,
) NTSTATUS;
-pub extern "NtDll" stdcallcc fn NtClose(Handle: HANDLE) NTSTATUS;
\ No newline at end of file
+pub extern "NtDll" stdcallcc fn NtClose(Handle: HANDLE) NTSTATUS;
diff --git a/std/os/windows/status.zig b/std/os/windows/status.zig
index 668a736e90..b9fd2b495f 100644
--- a/std/os/windows/status.zig
+++ b/std/os/windows/status.zig
@@ -1,5 +1,5 @@
-/// The operation completed successfully.
+/// The operation completed successfully.
pub const SUCCESS = 0x00000000;
/// The data was too large to fit into the specified buffer.
-pub const BUFFER_OVERFLOW = 0x80000005;
\ No newline at end of file
+pub const BUFFER_OVERFLOW = 0x80000005;
diff --git a/std/rb.zig b/std/rb.zig
index 0b84950544..3f2a2d5bb0 100644
--- a/std/rb.zig
+++ b/std/rb.zig
@@ -549,7 +549,6 @@ test "rb" {
}
}
-
test "inserting and looking up" {
var tree: Tree = undefined;
tree.init(testCompare);
diff --git a/std/testing.zig b/std/testing.zig
index 4568e024e2..7f347b0c24 100644
--- a/std/testing.zig
+++ b/std/testing.zig
@@ -25,36 +25,37 @@ pub fn expectError(expected_error: anyerror, actual_error_union: var) void {
/// The types must match exactly.
pub fn expectEqual(expected: var, actual: @typeOf(expected)) void {
switch (@typeInfo(@typeOf(actual))) {
- TypeId.NoReturn,
- TypeId.BoundFn,
- TypeId.ArgTuple,
- TypeId.Opaque,
+ .NoReturn,
+ .BoundFn,
+ .ArgTuple,
+ .Opaque,
+ .Frame,
+ .AnyFrame,
=> @compileError("value of type " ++ @typeName(@typeOf(actual)) ++ " encountered"),
- TypeId.Undefined,
- TypeId.Null,
- TypeId.Void,
+ .Undefined,
+ .Null,
+ .Void,
=> return,
- TypeId.Type,
- TypeId.Bool,
- TypeId.Int,
- TypeId.Float,
- TypeId.ComptimeFloat,
- TypeId.ComptimeInt,
- TypeId.EnumLiteral,
- TypeId.Enum,
- TypeId.Fn,
- TypeId.Promise,
- TypeId.Vector,
- TypeId.ErrorSet,
+ .Type,
+ .Bool,
+ .Int,
+ .Float,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .EnumLiteral,
+ .Enum,
+ .Fn,
+ .Vector,
+ .ErrorSet,
=> {
if (actual != expected) {
std.debug.panic("expected {}, found {}", expected, actual);
}
},
- TypeId.Pointer => |pointer| {
+ .Pointer => |pointer| {
switch (pointer.size) {
builtin.TypeInfo.Pointer.Size.One,
builtin.TypeInfo.Pointer.Size.Many,
@@ -76,22 +77,22 @@ pub fn expectEqual(expected: var, actual: @typeOf(expected)) void {
}
},
- TypeId.Array => |array| expectEqualSlices(array.child, &expected, &actual),
+ .Array => |array| expectEqualSlices(array.child, &expected, &actual),
- TypeId.Struct => |structType| {
+ .Struct => |structType| {
inline for (structType.fields) |field| {
expectEqual(@field(expected, field.name), @field(actual, field.name));
}
},
- TypeId.Union => |union_info| {
+ .Union => |union_info| {
if (union_info.tag_type == null) {
@compileError("Unable to compare untagged union values");
}
@compileError("TODO implement testing.expectEqual for tagged unions");
},
- TypeId.Optional => {
+ .Optional => {
if (expected) |expected_payload| {
if (actual) |actual_payload| {
expectEqual(expected_payload, actual_payload);
@@ -105,7 +106,7 @@ pub fn expectEqual(expected: var, actual: @typeOf(expected)) void {
}
},
- TypeId.ErrorUnion => {
+ .ErrorUnion => {
if (expected) |expected_payload| {
if (actual) |actual_payload| {
expectEqual(expected_payload, actual_payload);
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index 38bd94339f..e5781da035 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -400,7 +400,7 @@ pub const Node = struct {
VarType,
ErrorType,
FnProto,
- PromiseType,
+ AnyFrameType,
// Primary expressions
IntegerLiteral,
@@ -434,7 +434,6 @@ pub const Node = struct {
ErrorTag,
AsmInput,
AsmOutput,
- AsyncAttribute,
ParamDecl,
FieldInitializer,
};
@@ -838,36 +837,6 @@ pub const Node = struct {
}
};
- pub const AsyncAttribute = struct {
- base: Node,
- async_token: TokenIndex,
- allocator_type: ?*Node,
- rangle_bracket: ?TokenIndex,
-
- pub fn iterate(self: *AsyncAttribute, index: usize) ?*Node {
- var i = index;
-
- if (self.allocator_type) |allocator_type| {
- if (i < 1) return allocator_type;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: *const AsyncAttribute) TokenIndex {
- return self.async_token;
- }
-
- pub fn lastToken(self: *const AsyncAttribute) TokenIndex {
- if (self.rangle_bracket) |rangle_bracket| {
- return rangle_bracket;
- }
-
- return self.async_token;
- }
- };
-
pub const FnProto = struct {
base: Node,
doc_comments: ?*DocComment,
@@ -879,7 +848,6 @@ pub const Node = struct {
var_args_token: ?TokenIndex,
extern_export_inline_token: ?TokenIndex,
cc_token: ?TokenIndex,
- async_attr: ?*AsyncAttribute,
body_node: ?*Node,
lib_name: ?*Node, // populated if this is an extern declaration
align_expr: ?*Node, // populated if align(A) is present
@@ -935,7 +903,6 @@ pub const Node = struct {
pub fn firstToken(self: *const FnProto) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
- if (self.async_attr) |async_attr| return async_attr.firstToken();
if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
assert(self.lib_name == null);
if (self.cc_token) |cc_token| return cc_token;
@@ -952,9 +919,9 @@ pub const Node = struct {
}
};
- pub const PromiseType = struct {
+ pub const AnyFrameType = struct {
base: Node,
- promise_token: TokenIndex,
+ anyframe_token: TokenIndex,
result: ?Result,
pub const Result = struct {
@@ -962,7 +929,7 @@ pub const Node = struct {
return_type: *Node,
};
- pub fn iterate(self: *PromiseType, index: usize) ?*Node {
+ pub fn iterate(self: *AnyFrameType, index: usize) ?*Node {
var i = index;
if (self.result) |result| {
@@ -973,13 +940,13 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *const PromiseType) TokenIndex {
- return self.promise_token;
+ pub fn firstToken(self: *const AnyFrameType) TokenIndex {
+ return self.anyframe_token;
}
- pub fn lastToken(self: *const PromiseType) TokenIndex {
+ pub fn lastToken(self: *const AnyFrameType) TokenIndex {
if (self.result) |result| return result.return_type.lastToken();
- return self.promise_token;
+ return self.anyframe_token;
}
};
@@ -1699,7 +1666,7 @@ pub const Node = struct {
pub const Call = struct {
params: ParamList,
- async_attr: ?*AsyncAttribute,
+ async_token: ?TokenIndex,
pub const ParamList = SegmentedList(*Node, 2);
};
@@ -1752,7 +1719,7 @@ pub const Node = struct {
pub fn firstToken(self: *const SuffixOp) TokenIndex {
switch (self.op) {
- .Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(),
+ .Call => |*call_info| if (call_info.async_token) |async_token| return async_token,
else => {},
}
return self.lhs.firstToken();
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 59acf99890..0a2fbb4fa1 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -277,7 +277,7 @@ fn parseTopLevelDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
/// FnProto <- FnCC? KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? EXCLAMATIONMARK? (KEYWORD_var / TypeExpr)
fn parseFnProto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
- const cc = try parseFnCC(arena, it, tree);
+ const cc = parseFnCC(arena, it, tree);
const fn_token = eatToken(it, .Keyword_fn) orelse {
if (cc == null) return null else return error.ParseError;
};
@@ -320,7 +320,6 @@ fn parseFnProto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
.var_args_token = var_args_token,
.extern_export_inline_token = null,
.cc_token = null,
- .async_attr = null,
.body_node = null,
.lib_name = null,
.align_expr = align_expr,
@@ -331,7 +330,6 @@ fn parseFnProto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
switch (kind) {
.CC => |token| fn_proto_node.cc_token = token,
.Extern => |token| fn_proto_node.extern_export_inline_token = token,
- .Async => |node| fn_proto_node.async_attr = node,
}
}
@@ -814,7 +812,6 @@ fn parsePrefixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// <- AsmExpr
/// / IfExpr
/// / KEYWORD_break BreakLabel? Expr?
-/// / KEYWORD_cancel Expr
/// / KEYWORD_comptime Expr
/// / KEYWORD_continue BreakLabel?
/// / KEYWORD_resume Expr
@@ -839,20 +836,6 @@ fn parsePrimaryExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
return &node.base;
}
- if (eatToken(it, .Keyword_cancel)) |token| {
- const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{
- .ExpectedExpr = AstError.ExpectedExpr{ .token = it.index },
- });
- const node = try arena.create(Node.PrefixOp);
- node.* = Node.PrefixOp{
- .base = Node{ .id = .PrefixOp },
- .op_token = token,
- .op = Node.PrefixOp.Op.Cancel,
- .rhs = expr_node,
- };
- return &node.base;
- }
-
if (eatToken(it, .Keyword_comptime)) |token| {
const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{
.ExpectedExpr = AstError.ExpectedExpr{ .token = it.index },
@@ -1107,10 +1090,19 @@ fn parseErrorUnionExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*No
}
/// SuffixExpr
-/// <- AsyncPrefix PrimaryTypeExpr SuffixOp* FnCallArguments
+/// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments
/// / PrimaryTypeExpr (SuffixOp / FnCallArguments)*
fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
- if (try parseAsyncPrefix(arena, it, tree)) |async_node| {
+ if (eatToken(it, .Keyword_async)) |async_token| {
+ if (eatToken(it, .Keyword_fn)) |token_fn| {
+ // HACK: If we see the keyword `fn`, then we assume that
+ // we are parsing an async fn proto, and not a call.
+ // We therefore put back all tokens consumed by the async
+ // prefix...
+ putBackToken(it, token_fn);
+ putBackToken(it, async_token);
+ return parsePrimaryTypeExpr(arena, it, tree);
+ }
// TODO: Implement hack for parsing `async fn ...` in ast_parse_suffix_expr
var res = try expectNode(arena, it, tree, parsePrimaryTypeExpr, AstError{
.ExpectedPrimaryTypeExpr = AstError.ExpectedPrimaryTypeExpr{ .token = it.index },
@@ -1131,7 +1123,6 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
});
return null;
};
-
const node = try arena.create(Node.SuffixOp);
node.* = Node.SuffixOp{
.base = Node{ .id = .SuffixOp },
@@ -1139,14 +1130,13 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
.op = Node.SuffixOp.Op{
.Call = Node.SuffixOp.Op.Call{
.params = params.list,
- .async_attr = async_node.cast(Node.AsyncAttribute).?,
+ .async_token = async_token,
},
},
.rtoken = params.rparen,
};
return &node.base;
}
-
if (try parsePrimaryTypeExpr(arena, it, tree)) |expr| {
var res = expr;
@@ -1168,7 +1158,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
.op = Node.SuffixOp.Op{
.Call = Node.SuffixOp.Op.Call{
.params = params.list,
- .async_attr = null,
+ .async_token = null,
},
},
.rtoken = params.rparen,
@@ -1201,7 +1191,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / KEYWORD_error DOT IDENTIFIER
/// / KEYWORD_false
/// / KEYWORD_null
-/// / KEYWORD_promise
+/// / KEYWORD_anyframe
/// / KEYWORD_true
/// / KEYWORD_undefined
/// / KEYWORD_unreachable
@@ -1256,11 +1246,11 @@ fn parsePrimaryTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*N
}
if (eatToken(it, .Keyword_false)) |token| return createLiteral(arena, Node.BoolLiteral, token);
if (eatToken(it, .Keyword_null)) |token| return createLiteral(arena, Node.NullLiteral, token);
- if (eatToken(it, .Keyword_promise)) |token| {
- const node = try arena.create(Node.PromiseType);
- node.* = Node.PromiseType{
- .base = Node{ .id = .PromiseType },
- .promise_token = token,
+ if (eatToken(it, .Keyword_anyframe)) |token| {
+ const node = try arena.create(Node.AnyFrameType);
+ node.* = Node.AnyFrameType{
+ .base = Node{ .id = .AnyFrameType },
+ .anyframe_token = token,
.result = null,
};
return &node.base;
@@ -1668,36 +1658,18 @@ fn parseLinkSection(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
/// <- KEYWORD_nakedcc
/// / KEYWORD_stdcallcc
/// / KEYWORD_extern
-/// / KEYWORD_async (LARROW TypeExpr RARROW)?
-fn parseFnCC(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?FnCC {
+/// / KEYWORD_async
+fn parseFnCC(arena: *Allocator, it: *TokenIterator, tree: *Tree) ?FnCC {
if (eatToken(it, .Keyword_nakedcc)) |token| return FnCC{ .CC = token };
if (eatToken(it, .Keyword_stdcallcc)) |token| return FnCC{ .CC = token };
if (eatToken(it, .Keyword_extern)) |token| return FnCC{ .Extern = token };
- if (eatToken(it, .Keyword_async)) |token| {
- const node = try arena.create(Node.AsyncAttribute);
- node.* = Node.AsyncAttribute{
- .base = Node{ .id = .AsyncAttribute },
- .async_token = token,
- .allocator_type = null,
- .rangle_bracket = null,
- };
- if (eatToken(it, .AngleBracketLeft)) |_| {
- const type_expr = try expectNode(arena, it, tree, parseTypeExpr, AstError{
- .ExpectedTypeExpr = AstError.ExpectedTypeExpr{ .token = it.index },
- });
- const rarrow = try expectToken(it, tree, .AngleBracketRight);
- node.allocator_type = type_expr;
- node.rangle_bracket = rarrow;
- }
- return FnCC{ .Async = node };
- }
+ if (eatToken(it, .Keyword_async)) |token| return FnCC{ .CC = token };
return null;
}
const FnCC = union(enum) {
CC: TokenIndex,
Extern: TokenIndex,
- Async: *Node.AsyncAttribute,
};
/// ParamDecl <- (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
@@ -2194,7 +2166,7 @@ fn parsePrefixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// PrefixTypeOp
/// <- QUESTIONMARK
-/// / KEYWORD_promise MINUSRARROW
+/// / KEYWORD_anyframe MINUSRARROW
/// / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
@@ -2209,20 +2181,20 @@ fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
return &node.base;
}
- // TODO: Returning a PromiseType instead of PrefixOp makes casting and setting .rhs or
+ // TODO: Returning a AnyFrameType instead of PrefixOp makes casting and setting .rhs or
// .return_type more difficult for the caller (see parsePrefixOpExpr helper).
- // Consider making the PromiseType a member of PrefixOp and add a
- // PrefixOp.PromiseType variant?
- if (eatToken(it, .Keyword_promise)) |token| {
+ // Consider making the AnyFrameType a member of PrefixOp and add a
+ // PrefixOp.AnyFrameType variant?
+ if (eatToken(it, .Keyword_anyframe)) |token| {
const arrow = eatToken(it, .Arrow) orelse {
putBackToken(it, token);
return null;
};
- const node = try arena.create(Node.PromiseType);
- node.* = Node.PromiseType{
- .base = Node{ .id = .PromiseType },
- .promise_token = token,
- .result = Node.PromiseType.Result{
+ const node = try arena.create(Node.AnyFrameType);
+ node.* = Node.AnyFrameType{
+ .base = Node{ .id = .AnyFrameType },
+ .anyframe_token = token,
+ .result = Node.AnyFrameType.Result{
.arrow_token = arrow,
.return_type = undefined, // set by caller
},
@@ -2424,28 +2396,6 @@ fn parseSuffixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return &node.base;
}
-/// AsyncPrefix <- KEYWORD_async (LARROW PrefixExpr RARROW)?
-fn parseAsyncPrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
- const async_token = eatToken(it, .Keyword_async) orelse return null;
- var rangle_bracket: ?TokenIndex = null;
- const expr_node = if (eatToken(it, .AngleBracketLeft)) |_| blk: {
- const prefix_expr = try expectNode(arena, it, tree, parsePrefixExpr, AstError{
- .ExpectedPrefixExpr = AstError.ExpectedPrefixExpr{ .token = it.index },
- });
- rangle_bracket = try expectToken(it, tree, .AngleBracketRight);
- break :blk prefix_expr;
- } else null;
-
- const node = try arena.create(Node.AsyncAttribute);
- node.* = Node.AsyncAttribute{
- .base = Node{ .id = .AsyncAttribute },
- .async_token = async_token,
- .allocator_type = expr_node,
- .rangle_bracket = rangle_bracket,
- };
- return &node.base;
-}
-
/// FnCallArguments <- LPAREN ExprList RPAREN
/// ExprList <- (Expr COMMA)* Expr?
fn parseFnCallArguments(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?AnnotatedParamList {
@@ -2903,8 +2853,8 @@ fn parsePrefixOpExpr(
rightmost_op = rhs;
} else break;
},
- .PromiseType => {
- const prom = rightmost_op.cast(Node.PromiseType).?;
+ .AnyFrameType => {
+ const prom = rightmost_op.cast(Node.AnyFrameType).?;
if (try opParseFn(arena, it, tree)) |rhs| {
prom.result.?.return_type = rhs;
rightmost_op = rhs;
@@ -2922,8 +2872,8 @@ fn parsePrefixOpExpr(
.InvalidToken = AstError.InvalidToken{ .token = it.index },
});
},
- .PromiseType => {
- const prom = rightmost_op.cast(Node.PromiseType).?;
+ .AnyFrameType => {
+ const prom = rightmost_op.cast(Node.AnyFrameType).?;
prom.result.?.return_type = try expectNode(arena, it, tree, childParseFn, AstError{
.InvalidToken = AstError.InvalidToken{ .token = it.index },
});
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index 66da3d7328..871195280f 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -8,6 +8,18 @@ test "zig fmt: change use to usingnamespace" {
);
}
+test "zig fmt: async function" {
+ try testCanonical(
+ \\pub const Server = struct {
+ \\ handleRequestFn: async fn (*Server, *const std.net.Address, File) void,
+ \\};
+ \\test "hi" {
+ \\ var ptr = @ptrCast(async fn (i32) void, other);
+ \\}
+ \\
+ );
+}
+
test "zig fmt: whitespace fixes" {
try testTransform("test \"\" {\r\n\tconst hi = x;\r\n}\n// zig fmt: off\ntest \"\"{\r\n\tconst a = b;}\r\n",
\\test "" {
@@ -210,7 +222,7 @@ test "zig fmt: spaces around slice operator" {
test "zig fmt: async call in if condition" {
try testCanonical(
\\comptime {
- \\ if (async b()) {
+ \\ if (async b()) {
\\ a();
\\ }
\\}
@@ -1118,7 +1130,7 @@ test "zig fmt: first line comment in struct initializer" {
\\pub async fn acquire(self: *Self) HeldLock {
\\ return HeldLock{
\\ // guaranteed allocation elision
- \\ .held = await (async self.lock.acquire() catch unreachable),
+ \\ .held = self.lock.acquire(),
\\ .value = &self.private_data,
\\ };
\\}
@@ -1183,7 +1195,7 @@ test "zig fmt: resume from suspend block" {
try testCanonical(
\\fn foo() void {
\\ suspend {
- \\ resume @handle();
+ \\ resume @frame();
\\ }
\\}
\\
@@ -2103,7 +2115,7 @@ test "zig fmt: inline asm" {
);
}
-test "zig fmt: coroutines" {
+test "zig fmt: async functions" {
try testCanonical(
\\async fn simpleAsyncFn() void {
\\ const a = async a.b();
@@ -2111,14 +2123,14 @@ test "zig fmt: coroutines" {
\\ suspend;
\\ x += 1;
\\ suspend;
- \\ const p: promise->void = async simpleAsyncFn() catch unreachable;
+ \\ const p: anyframe->void = async simpleAsyncFn() catch unreachable;
\\ await p;
\\}
\\
- \\test "coroutine suspend, resume, cancel" {
- \\ const p: promise = try async testAsyncSeq();
+ \\test "suspend, resume, await" {
+ \\ const p: anyframe = async testAsyncSeq();
\\ resume p;
- \\ cancel p;
+ \\ await p;
\\}
\\
);
diff --git a/std/zig/render.zig b/std/zig/render.zig
index 6739db4285..035526ed11 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -284,20 +284,6 @@ fn renderExpression(
return renderExpression(allocator, stream, tree, indent, start_col, comptime_node.expr, space);
},
- ast.Node.Id.AsyncAttribute => {
- const async_attr = @fieldParentPtr(ast.Node.AsyncAttribute, "base", base);
-
- if (async_attr.allocator_type) |allocator_type| {
- try renderToken(tree, stream, async_attr.async_token, indent, start_col, Space.None); // async
-
- try renderToken(tree, stream, tree.nextToken(async_attr.async_token), indent, start_col, Space.None); // <
- try renderExpression(allocator, stream, tree, indent, start_col, allocator_type, Space.None); // allocator
- return renderToken(tree, stream, tree.nextToken(allocator_type.lastToken()), indent, start_col, space); // >
- } else {
- return renderToken(tree, stream, async_attr.async_token, indent, start_col, space); // async
- }
- },
-
ast.Node.Id.Suspend => {
const suspend_node = @fieldParentPtr(ast.Node.Suspend, "base", base);
@@ -459,8 +445,8 @@ fn renderExpression(
switch (suffix_op.op) {
@TagType(ast.Node.SuffixOp.Op).Call => |*call_info| {
- if (call_info.async_attr) |async_attr| {
- try renderExpression(allocator, stream, tree, indent, start_col, &async_attr.base, Space.Space);
+ if (call_info.async_token) |async_token| {
+ try renderToken(tree, stream, async_token, indent, start_col, Space.Space);
}
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
@@ -1121,10 +1107,6 @@ fn renderExpression(
try renderToken(tree, stream, cc_token, indent, start_col, Space.Space); // stdcallcc
}
- if (fn_proto.async_attr) |async_attr| {
- try renderExpression(allocator, stream, tree, indent, start_col, &async_attr.base, Space.Space);
- }
-
const lparen = if (fn_proto.name_token) |name_token| blk: {
try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn
try renderToken(tree, stream, name_token, indent, start_col, Space.None); // name
@@ -1205,15 +1187,15 @@ fn renderExpression(
}
},
- ast.Node.Id.PromiseType => {
- const promise_type = @fieldParentPtr(ast.Node.PromiseType, "base", base);
+ ast.Node.Id.AnyFrameType => {
+ const anyframe_type = @fieldParentPtr(ast.Node.AnyFrameType, "base", base);
- if (promise_type.result) |result| {
- try renderToken(tree, stream, promise_type.promise_token, indent, start_col, Space.None); // promise
+ if (anyframe_type.result) |result| {
+ try renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, Space.None); // anyframe
try renderToken(tree, stream, result.arrow_token, indent, start_col, Space.None); // ->
return renderExpression(allocator, stream, tree, indent, start_col, result.return_type, space);
} else {
- return renderToken(tree, stream, promise_type.promise_token, indent, start_col, space); // promise
+ return renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, space); // anyframe
}
},
diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig
index 4539e1e5b2..4d4ceb07db 100644
--- a/std/zig/tokenizer.zig
+++ b/std/zig/tokenizer.zig
@@ -15,12 +15,12 @@ pub const Token = struct {
Keyword{ .bytes = "align", .id = Id.Keyword_align },
Keyword{ .bytes = "allowzero", .id = Id.Keyword_allowzero },
Keyword{ .bytes = "and", .id = Id.Keyword_and },
+ Keyword{ .bytes = "anyframe", .id = Id.Keyword_anyframe },
Keyword{ .bytes = "asm", .id = Id.Keyword_asm },
Keyword{ .bytes = "async", .id = Id.Keyword_async },
Keyword{ .bytes = "await", .id = Id.Keyword_await },
Keyword{ .bytes = "break", .id = Id.Keyword_break },
Keyword{ .bytes = "catch", .id = Id.Keyword_catch },
- Keyword{ .bytes = "cancel", .id = Id.Keyword_cancel },
Keyword{ .bytes = "comptime", .id = Id.Keyword_comptime },
Keyword{ .bytes = "const", .id = Id.Keyword_const },
Keyword{ .bytes = "continue", .id = Id.Keyword_continue },
@@ -42,7 +42,6 @@ pub const Token = struct {
Keyword{ .bytes = "or", .id = Id.Keyword_or },
Keyword{ .bytes = "orelse", .id = Id.Keyword_orelse },
Keyword{ .bytes = "packed", .id = Id.Keyword_packed },
- Keyword{ .bytes = "promise", .id = Id.Keyword_promise },
Keyword{ .bytes = "pub", .id = Id.Keyword_pub },
Keyword{ .bytes = "resume", .id = Id.Keyword_resume },
Keyword{ .bytes = "return", .id = Id.Keyword_return },
@@ -151,7 +150,6 @@ pub const Token = struct {
Keyword_async,
Keyword_await,
Keyword_break,
- Keyword_cancel,
Keyword_catch,
Keyword_comptime,
Keyword_const,
@@ -174,7 +172,7 @@ pub const Token = struct {
Keyword_or,
Keyword_orelse,
Keyword_packed,
- Keyword_promise,
+ Keyword_anyframe,
Keyword_pub,
Keyword_resume,
Keyword_return,
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index a4bc2a66f0..5eec78fa7f 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2,6 +2,220 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "result location incompatibility mismatching handle_is_ptr (generic call)",
+ \\export fn entry() void {
+ \\ var damn = Container{
+ \\ .not_optional = getOptional(i32),
+ \\ };
+ \\}
+ \\pub fn getOptional(comptime T: type) ?T {
+ \\ return 0;
+ \\}
+ \\pub const Container = struct {
+ \\ not_optional: i32,
+ \\};
+ ,
+ "tmp.zig:3:36: error: expected type 'i32', found '?i32'",
+ );
+
+ cases.add(
+ "result location incompatibility mismatching handle_is_ptr",
+ \\export fn entry() void {
+ \\ var damn = Container{
+ \\ .not_optional = getOptional(),
+ \\ };
+ \\}
+ \\pub fn getOptional() ?i32 {
+ \\ return 0;
+ \\}
+ \\pub const Container = struct {
+ \\ not_optional: i32,
+ \\};
+ ,
+ "tmp.zig:3:36: error: expected type 'i32', found '?i32'",
+ );
+
+ cases.add(
+ "const frame cast to anyframe",
+ \\export fn a() void {
+ \\ const f = async func();
+ \\ resume f;
+ \\}
+ \\export fn b() void {
+ \\ const f = async func();
+ \\ var x: anyframe = &f;
+ \\}
+ \\fn func() void {
+ \\ suspend;
+ \\}
+ ,
+ "tmp.zig:3:12: error: expected type 'anyframe', found '*const @Frame(func)'",
+ "tmp.zig:7:24: error: expected type 'anyframe', found '*const @Frame(func)'",
+ );
+
+ cases.add(
+ "prevent bad implicit casting of anyframe types",
+ \\export fn a() void {
+ \\ var x: anyframe = undefined;
+ \\ var y: anyframe->i32 = x;
+ \\}
+ \\export fn b() void {
+ \\ var x: i32 = undefined;
+ \\ var y: anyframe->i32 = x;
+ \\}
+ \\export fn c() void {
+ \\ var x: @Frame(func) = undefined;
+ \\ var y: anyframe->i32 = &x;
+ \\}
+ \\fn func() void {}
+ ,
+ "tmp.zig:3:28: error: expected type 'anyframe->i32', found 'anyframe'",
+ "tmp.zig:7:28: error: expected type 'anyframe->i32', found 'i32'",
+ "tmp.zig:11:29: error: expected type 'anyframe->i32', found '*@Frame(func)'",
+ );
+
+ cases.add(
+ "wrong frame type used for async call",
+ \\export fn entry() void {
+ \\ var frame: @Frame(foo) = undefined;
+ \\ frame = async bar();
+ \\}
+ \\fn foo() void {
+ \\ suspend;
+ \\}
+ \\fn bar() void {
+ \\ suspend;
+ \\}
+ ,
+ "tmp.zig:3:5: error: expected type '*@Frame(bar)', found '*@Frame(foo)'",
+ );
+
+ cases.add(
+ "@Frame() of generic function",
+ \\export fn entry() void {
+ \\ var frame: @Frame(func) = undefined;
+ \\}
+ \\fn func(comptime T: type) void {
+ \\ var x: T = undefined;
+ \\}
+ ,
+ "tmp.zig:2:16: error: @Frame() of generic function",
+ );
+
+ cases.add(
+ "@frame() causes function to be async",
+ \\export fn entry() void {
+ \\ func();
+ \\}
+ \\fn func() void {
+ \\ _ = @frame();
+ \\}
+ ,
+ "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async",
+ "tmp.zig:5:9: note: @frame() causes function to be async",
+ );
+
+ cases.add(
+ "invalid suspend in exported function",
+ \\export fn entry() void {
+ \\ var frame = async func();
+ \\ var result = await frame;
+ \\}
+ \\fn func() void {
+ \\ suspend;
+ \\}
+ ,
+ "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async",
+ "tmp.zig:3:18: note: await is a suspend point",
+ );
+
+ cases.add(
+ "async function indirectly depends on its own frame",
+ \\export fn entry() void {
+ \\ _ = async amain();
+ \\}
+ \\async fn amain() void {
+ \\ other();
+ \\}
+ \\fn other() void {
+ \\ var x: [@sizeOf(@Frame(amain))]u8 = undefined;
+ \\}
+ ,
+ "tmp.zig:4:1: error: unable to determine async function frame of 'amain'",
+ "tmp.zig:5:10: note: analysis of function 'other' depends on the frame",
+ "tmp.zig:8:13: note: depends on the frame here",
+ );
+
+ cases.add(
+ "async function depends on its own frame",
+ \\export fn entry() void {
+ \\ _ = async amain();
+ \\}
+ \\async fn amain() void {
+ \\ var x: [@sizeOf(@Frame(amain))]u8 = undefined;
+ \\}
+ ,
+ "tmp.zig:4:1: error: cannot resolve '@Frame(amain)': function not fully analyzed yet",
+ "tmp.zig:5:13: note: depends on its own frame here",
+ );
+
+ cases.add(
+ "non async function pointer passed to @asyncCall",
+ \\export fn entry() void {
+ \\ var ptr = afunc;
+ \\ var bytes: [100]u8 = undefined;
+ \\ _ = @asyncCall(&bytes, {}, ptr);
+ \\}
+ \\fn afunc() void { }
+ ,
+ "tmp.zig:4:32: error: expected async function, found 'fn() void'",
+ );
+
+ cases.add(
+ "runtime-known async function called",
+ \\export fn entry() void {
+ \\ _ = async amain();
+ \\}
+ \\fn amain() void {
+ \\ var ptr = afunc;
+ \\ _ = ptr();
+ \\}
+ \\async fn afunc() void {}
+ ,
+ "tmp.zig:6:12: error: function is not comptime-known; @asyncCall required",
+ );
+
+ cases.add(
+ "runtime-known function called with async keyword",
+ \\export fn entry() void {
+ \\ var ptr = afunc;
+ \\ _ = async ptr();
+ \\}
+ \\
+ \\async fn afunc() void { }
+ ,
+ "tmp.zig:3:15: error: function is not comptime-known; @asyncCall required",
+ );
+
+ cases.add(
+ "function with ccc indirectly calling async function",
+ \\export fn entry() void {
+ \\ foo();
+ \\}
+ \\fn foo() void {
+ \\ bar();
+ \\}
+ \\fn bar() void {
+ \\ suspend;
+ \\}
+ ,
+ "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async",
+ "tmp.zig:2:8: note: async function call here",
+ "tmp.zig:5:8: note: async function call here",
+ "tmp.zig:8:5: note: suspends here",
+ );
+
cases.add(
"capture group on switch prong with incompatible payload types",
\\const Union = union(enum) {
@@ -1319,24 +1533,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "@handle() called outside of function definition",
- \\var handle_undef: promise = undefined;
- \\var handle_dummy: promise = @handle();
+ "@frame() called outside of function definition",
+ \\var handle_undef: anyframe = undefined;
+ \\var handle_dummy: anyframe = @frame();
\\export fn entry() bool {
\\ return handle_undef == handle_dummy;
\\}
,
- "tmp.zig:2:29: error: @handle() called outside of function definition",
- );
-
- cases.add(
- "@handle() in non-async function",
- \\export fn entry() bool {
- \\ var handle_undef: promise = undefined;
- \\ return handle_undef == @handle();
- \\}
- ,
- "tmp.zig:3:28: error: @handle() in non-async function",
+ "tmp.zig:2:30: error: @frame() called outside of function definition",
);
cases.add(
@@ -1712,15 +1916,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"suspend inside suspend block",
- \\const std = @import("std",);
- \\
\\export fn entry() void {
- \\ var buf: [500]u8 = undefined;
- \\ var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
- \\ const p = (async foo()) catch unreachable;
- \\ cancel p;
+ \\ _ = async foo();
\\}
- \\
\\async fn foo() void {
\\ suspend {
\\ suspend {
@@ -1728,8 +1926,8 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ }
\\}
,
- "tmp.zig:12:9: error: cannot suspend inside suspend block",
- "tmp.zig:11:5: note: other suspend block here",
+ "tmp.zig:6:9: error: cannot suspend inside suspend block",
+ "tmp.zig:5:5: note: other suspend block here",
);
cases.add(
@@ -1770,15 +1968,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"returning error from void async function",
- \\const std = @import("std",);
\\export fn entry() void {
- \\ const p = async amain() catch unreachable;
+ \\ _ = async amain();
\\}
\\async fn amain() void {
\\ return error.ShouldBeCompileError;
\\}
,
- "tmp.zig:6:17: error: expected type 'void', found 'error{ShouldBeCompileError}'",
+ "tmp.zig:5:17: error: expected type 'void', found 'error{ShouldBeCompileError}'",
);
cases.add(
@@ -3225,14 +3422,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:3:17: note: value 8 cannot fit into type u3",
);
- cases.add(
- "incompatible number literals",
- \\const x = 2 == 2.0;
- \\export fn entry() usize { return @sizeOf(@typeOf(x)); }
- ,
- "tmp.zig:1:11: error: integer value 2 cannot be implicitly casted to type 'comptime_float'",
- );
-
cases.add(
"missing function call param",
\\const Foo = struct {
@@ -3315,7 +3504,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(Foo)); }
,
- "tmp.zig:5:18: error: unable to evaluate constant expression",
+ "tmp.zig:5:25: error: unable to evaluate constant expression",
"tmp.zig:2:12: note: called from here",
"tmp.zig:2:8: note: called from here",
);
diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig
index aee607e10f..0fb593c0e2 100644
--- a/test/runtime_safety.zig
+++ b/test/runtime_safety.zig
@@ -1,6 +1,91 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompareOutputContext) void {
+ cases.addRuntimeSafety("awaiting twice",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\var frame: anyframe = undefined;
+ \\
+ \\pub fn main() void {
+ \\ _ = async amain();
+ \\ resume frame;
+ \\}
+ \\
+ \\fn amain() void {
+ \\ var f = async func();
+ \\ await f;
+ \\ await f;
+ \\}
+ \\
+ \\fn func() void {
+ \\ suspend {
+ \\ frame = @frame();
+ \\ }
+ \\}
+ );
+
+ cases.addRuntimeSafety("@asyncCall with too small a frame",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ var bytes: [1]u8 = undefined;
+ \\ var ptr = other;
+ \\ var frame = @asyncCall(&bytes, {}, ptr);
+ \\}
+ \\async fn other() void {
+ \\ suspend;
+ \\}
+ );
+
+ cases.addRuntimeSafety("resuming a function which is awaiting a frame",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ var frame = async first();
+ \\ resume frame;
+ \\}
+ \\fn first() void {
+ \\ var frame = async other();
+ \\ await frame;
+ \\}
+ \\fn other() void {
+ \\ suspend;
+ \\}
+ );
+
+ cases.addRuntimeSafety("resuming a function which is awaiting a call",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ var frame = async first();
+ \\ resume frame;
+ \\}
+ \\fn first() void {
+ \\ other();
+ \\}
+ \\fn other() void {
+ \\ suspend;
+ \\}
+ );
+
+ cases.addRuntimeSafety("invalid resume of async function",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ var p = async suspendOnce();
+ \\ resume p; //ok
+ \\ resume p; //bad
+ \\}
+ \\fn suspendOnce() void {
+ \\ suspend;
+ \\}
+ );
+
cases.addRuntimeSafety(".? operator on null pointer",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
@@ -483,23 +568,29 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ std.os.exit(126);
\\}
\\
+ \\var failing_frame: @Frame(failing) = undefined;
+ \\
\\pub fn main() void {
\\ const p = nonFailing();
\\ resume p;
- \\ const p2 = async printTrace(p) catch unreachable;
- \\ cancel p2;
+ \\ const p2 = async printTrace(p);
\\}
\\
- \\fn nonFailing() promise->anyerror!void {
- \\ return async failing() catch unreachable;
+ \\fn nonFailing() anyframe->anyerror!void {
+ \\ failing_frame = async failing();
+ \\ return &failing_frame;
\\}
\\
\\async fn failing() anyerror!void {
\\ suspend;
+ \\ return second();
+ \\}
+ \\
+ \\async fn second() anyerror!void {
\\ return error.Fail;
\\}
\\
- \\async fn printTrace(p: promise->anyerror!void) void {
+ \\async fn printTrace(p: anyframe->anyerror!void) void {
\\ (await p) catch unreachable;
\\}
);
diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig
index 71af5586ed..6ec1521048 100644
--- a/test/stage1/behavior.zig
+++ b/test/stage1/behavior.zig
@@ -3,12 +3,13 @@ comptime {
_ = @import("behavior/alignof.zig");
_ = @import("behavior/array.zig");
_ = @import("behavior/asm.zig");
+ _ = @import("behavior/async_fn.zig");
_ = @import("behavior/atomics.zig");
+ _ = @import("behavior/await_struct.zig");
_ = @import("behavior/bit_shifting.zig");
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bitreverse.zig");
_ = @import("behavior/bool.zig");
- _ = @import("behavior/byteswap.zig");
_ = @import("behavior/bugs/1025.zig");
_ = @import("behavior/bugs/1076.zig");
_ = @import("behavior/bugs/1111.zig");
@@ -38,23 +39,23 @@ comptime {
_ = @import("behavior/bugs/726.zig");
_ = @import("behavior/bugs/828.zig");
_ = @import("behavior/bugs/920.zig");
+ _ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
- _ = @import("behavior/cancel.zig");
_ = @import("behavior/cast.zig");
_ = @import("behavior/const_slice_child.zig");
- _ = @import("behavior/coroutine_await_struct.zig");
- _ = @import("behavior/coroutines.zig");
_ = @import("behavior/defer.zig");
_ = @import("behavior/enum.zig");
_ = @import("behavior/enum_with_members.zig");
_ = @import("behavior/error.zig");
_ = @import("behavior/eval.zig");
_ = @import("behavior/field_parent_ptr.zig");
+ _ = @import("behavior/floatop.zig");
_ = @import("behavior/fn.zig");
_ = @import("behavior/fn_in_struct_in_comptime.zig");
_ = @import("behavior/for.zig");
_ = @import("behavior/generics.zig");
_ = @import("behavior/hasdecl.zig");
+ _ = @import("behavior/hasfield.zig");
_ = @import("behavior/if.zig");
_ = @import("behavior/import.zig");
_ = @import("behavior/incomplete_struct_param_tld.zig");
@@ -63,14 +64,13 @@ comptime {
_ = @import("behavior/math.zig");
_ = @import("behavior/merge_error_sets.zig");
_ = @import("behavior/misc.zig");
+ _ = @import("behavior/muladd.zig");
_ = @import("behavior/namespace_depends_on_compile_var.zig");
_ = @import("behavior/new_stack_call.zig");
_ = @import("behavior/null.zig");
_ = @import("behavior/optional.zig");
_ = @import("behavior/pointers.zig");
_ = @import("behavior/popcount.zig");
- _ = @import("behavior/muladd.zig");
- _ = @import("behavior/floatop.zig");
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/pub_enum.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
@@ -99,5 +99,4 @@ comptime {
_ = @import("behavior/void.zig");
_ = @import("behavior/while.zig");
_ = @import("behavior/widening.zig");
- _ = @import("behavior/hasfield.zig");
}
diff --git a/test/stage1/behavior/align.zig b/test/stage1/behavior/align.zig
index b64f45bb6b..f607ac59d2 100644
--- a/test/stage1/behavior/align.zig
+++ b/test/stage1/behavior/align.zig
@@ -228,3 +228,65 @@ test "alignment of extern() void" {
}
extern fn nothing() void {}
+
+test "return error union with 128-bit integer" {
+ expect(3 == try give());
+}
+fn give() anyerror!u128 {
+ return 3;
+}
+
+test "alignment of >= 128-bit integer type" {
+ expect(@alignOf(u128) == 16);
+ expect(@alignOf(u129) == 16);
+}
+
+test "alignment of struct with 128-bit field" {
+ expect(@alignOf(struct {
+ x: u128,
+ }) == 16);
+
+ comptime {
+ expect(@alignOf(struct {
+ x: u128,
+ }) == 16);
+ }
+}
+
+test "size of extern struct with 128-bit field" {
+ expect(@sizeOf(extern struct {
+ x: u128,
+ y: u8,
+ }) == 32);
+
+ comptime {
+ expect(@sizeOf(extern struct {
+ x: u128,
+ y: u8,
+ }) == 32);
+ }
+}
+
+const DefaultAligned = struct {
+ nevermind: u32,
+ badguy: i128,
+};
+
+test "read 128-bit field from default aligned struct in stack memory" {
+ var default_aligned = DefaultAligned{
+ .nevermind = 1,
+ .badguy = 12,
+ };
+ expect((@ptrToInt(&default_aligned.badguy) % 16) == 0);
+ expect(12 == default_aligned.badguy);
+}
+
+var default_aligned_global = DefaultAligned{
+ .nevermind = 1,
+ .badguy = 12,
+};
+
+test "read 128-bit field from default aligned struct in global memory" {
+ expect((@ptrToInt(&default_aligned_global.badguy) % 16) == 0);
+ expect(12 == default_aligned_global.badguy);
+}
diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig
new file mode 100644
index 0000000000..e1b173292b
--- /dev/null
+++ b/test/stage1/behavior/async_fn.zig
@@ -0,0 +1,819 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
+
+var global_x: i32 = 1;
+
+test "simple coroutine suspend and resume" {
+ var frame = async simpleAsyncFn();
+ expect(global_x == 2);
+ resume frame;
+ expect(global_x == 3);
+ const af: anyframe->void = &frame;
+ resume frame;
+ expect(global_x == 4);
+}
+fn simpleAsyncFn() void {
+ global_x += 1;
+ suspend;
+ global_x += 1;
+ suspend;
+ global_x += 1;
+}
+
+var global_y: i32 = 1;
+
+test "pass parameter to coroutine" {
+ var p = async simpleAsyncFnWithArg(2);
+ expect(global_y == 3);
+ resume p;
+ expect(global_y == 5);
+}
+fn simpleAsyncFnWithArg(delta: i32) void {
+ global_y += delta;
+ suspend;
+ global_y += delta;
+}
+
+test "suspend at end of function" {
+ const S = struct {
+ var x: i32 = 1;
+
+ fn doTheTest() void {
+ expect(x == 1);
+ const p = async suspendAtEnd();
+ expect(x == 2);
+ }
+
+ fn suspendAtEnd() void {
+ x += 1;
+ suspend;
+ }
+ };
+ S.doTheTest();
+}
+
+test "local variable in async function" {
+ const S = struct {
+ var x: i32 = 0;
+
+ fn doTheTest() void {
+ expect(x == 0);
+ var p = async add(1, 2);
+ expect(x == 0);
+ resume p;
+ expect(x == 0);
+ resume p;
+ expect(x == 0);
+ resume p;
+ expect(x == 3);
+ }
+
+ fn add(a: i32, b: i32) void {
+ var accum: i32 = 0;
+ suspend;
+ accum += a;
+ suspend;
+ accum += b;
+ suspend;
+ x = accum;
+ }
+ };
+ S.doTheTest();
+}
+
+test "calling an inferred async function" {
+ const S = struct {
+ var x: i32 = 1;
+ var other_frame: *@Frame(other) = undefined;
+
+ fn doTheTest() void {
+ _ = async first();
+ expect(x == 1);
+ resume other_frame.*;
+ expect(x == 2);
+ }
+
+ fn first() void {
+ other();
+ }
+ fn other() void {
+ other_frame = @frame();
+ suspend;
+ x += 1;
+ }
+ };
+ S.doTheTest();
+}
+
+test "@frameSize" {
+ const S = struct {
+ fn doTheTest() void {
+ {
+ var ptr = @ptrCast(async fn (i32) void, other);
+ const size = @frameSize(ptr);
+ expect(size == @sizeOf(@Frame(other)));
+ }
+ {
+ var ptr = @ptrCast(async fn () void, first);
+ const size = @frameSize(ptr);
+ expect(size == @sizeOf(@Frame(first)));
+ }
+ }
+
+ fn first() void {
+ other(1);
+ }
+ fn other(param: i32) void {
+ var local: i32 = undefined;
+ suspend;
+ }
+ };
+ S.doTheTest();
+}
+
+test "coroutine suspend, resume" {
+ const S = struct {
+ var frame: anyframe = undefined;
+
+ fn doTheTest() void {
+ _ = async amain();
+ seq('d');
+ resume frame;
+ seq('h');
+
+ expect(std.mem.eql(u8, points, "abcdefgh"));
+ }
+
+ fn amain() void {
+ seq('a');
+ var f = async testAsyncSeq();
+ seq('c');
+ await f;
+ seq('g');
+ }
+
+ fn testAsyncSeq() void {
+ defer seq('f');
+
+ seq('b');
+ suspend {
+ frame = @frame();
+ }
+ seq('e');
+ }
+ var points = [_]u8{'x'} ** "abcdefgh".len;
+ var index: usize = 0;
+
+ fn seq(c: u8) void {
+ points[index] = c;
+ index += 1;
+ }
+ };
+ S.doTheTest();
+}
+
+test "coroutine suspend with block" {
+ const p = async testSuspendBlock();
+ expect(!global_result);
+ resume a_promise;
+ expect(global_result);
+}
+
+var a_promise: anyframe = undefined;
+var global_result = false;
+async fn testSuspendBlock() void {
+ suspend {
+ comptime expect(@typeOf(@frame()) == *@Frame(testSuspendBlock));
+ a_promise = @frame();
+ }
+
+ // Test to make sure that @frame() works as advertised (issue #1296)
+ // var our_handle: anyframe = @frame();
+ expect(a_promise == anyframe(@frame()));
+
+ global_result = true;
+}
+
+var await_a_promise: anyframe = undefined;
+var await_final_result: i32 = 0;
+
+test "coroutine await" {
+ await_seq('a');
+ var p = async await_amain();
+ await_seq('f');
+ resume await_a_promise;
+ await_seq('i');
+ expect(await_final_result == 1234);
+ expect(std.mem.eql(u8, await_points, "abcdefghi"));
+}
+async fn await_amain() void {
+ await_seq('b');
+ var p = async await_another();
+ await_seq('e');
+ await_final_result = await p;
+ await_seq('h');
+}
+async fn await_another() i32 {
+ await_seq('c');
+ suspend {
+ await_seq('d');
+ await_a_promise = @frame();
+ }
+ await_seq('g');
+ return 1234;
+}
+
+var await_points = [_]u8{0} ** "abcdefghi".len;
+var await_seq_index: usize = 0;
+
+fn await_seq(c: u8) void {
+ await_points[await_seq_index] = c;
+ await_seq_index += 1;
+}
+
+var early_final_result: i32 = 0;
+
+test "coroutine await early return" {
+ early_seq('a');
+ var p = async early_amain();
+ early_seq('f');
+ expect(early_final_result == 1234);
+ expect(std.mem.eql(u8, early_points, "abcdef"));
+}
+async fn early_amain() void {
+ early_seq('b');
+ var p = async early_another();
+ early_seq('d');
+ early_final_result = await p;
+ early_seq('e');
+}
+async fn early_another() i32 {
+ early_seq('c');
+ return 1234;
+}
+
+var early_points = [_]u8{0} ** "abcdef".len;
+var early_seq_index: usize = 0;
+
+fn early_seq(c: u8) void {
+ early_points[early_seq_index] = c;
+ early_seq_index += 1;
+}
+
+test "async function with dot syntax" {
+ const S = struct {
+ var y: i32 = 1;
+ async fn foo() void {
+ y += 1;
+ suspend;
+ }
+ };
+ const p = async S.foo();
+ expect(S.y == 2);
+}
+
+test "async fn pointer in a struct field" {
+ var data: i32 = 1;
+ const Foo = struct {
+ bar: async fn (*i32) void,
+ };
+ var foo = Foo{ .bar = simpleAsyncFn2 };
+ var bytes: [64]u8 = undefined;
+ const f = @asyncCall(&bytes, {}, foo.bar, &data);
+ comptime expect(@typeOf(f) == anyframe->void);
+ expect(data == 2);
+ resume f;
+ expect(data == 4);
+ _ = async doTheAwait(f);
+ expect(data == 4);
+}
+
+fn doTheAwait(f: anyframe->void) void {
+ await f;
+}
+
+async fn simpleAsyncFn2(y: *i32) void {
+ defer y.* += 2;
+ y.* += 1;
+ suspend;
+}
+
+test "@asyncCall with return type" {
+ const Foo = struct {
+ bar: async fn () i32,
+
+ var global_frame: anyframe = undefined;
+
+ async fn middle() i32 {
+ return afunc();
+ }
+
+ fn afunc() i32 {
+ global_frame = @frame();
+ suspend;
+ return 1234;
+ }
+ };
+ var foo = Foo{ .bar = Foo.middle };
+ var bytes: [150]u8 = undefined;
+ var aresult: i32 = 0;
+ _ = @asyncCall(&bytes, &aresult, foo.bar);
+ expect(aresult == 0);
+ resume Foo.global_frame;
+ expect(aresult == 1234);
+}
+
+test "async fn with inferred error set" {
+ const S = struct {
+ var global_frame: anyframe = undefined;
+
+ fn doTheTest() void {
+ var frame: [1]@Frame(middle) = undefined;
+ var result: anyerror!void = undefined;
+ _ = @asyncCall(@sliceToBytes(frame[0..]), &result, middle);
+ resume global_frame;
+ std.testing.expectError(error.Fail, result);
+ }
+
+ async fn middle() !void {
+ var f = async middle2();
+ return await f;
+ }
+
+ fn middle2() !void {
+ return failing();
+ }
+
+ fn failing() !void {
+ global_frame = @frame();
+ suspend;
+ return error.Fail;
+ }
+ };
+ S.doTheTest();
+}
+
+test "error return trace across suspend points - early return" {
+ const p = nonFailing();
+ resume p;
+ const p2 = async printTrace(p);
+}
+
+test "error return trace across suspend points - async return" {
+ const p = nonFailing();
+ const p2 = async printTrace(p);
+ resume p;
+}
+
+fn nonFailing() (anyframe->anyerror!void) {
+ const Static = struct {
+ var frame: @Frame(suspendThenFail) = undefined;
+ };
+ Static.frame = async suspendThenFail();
+ return &Static.frame;
+}
+async fn suspendThenFail() anyerror!void {
+ suspend;
+ return error.Fail;
+}
+async fn printTrace(p: anyframe->(anyerror!void)) void {
+ (await p) catch |e| {
+ std.testing.expect(e == error.Fail);
+ if (@errorReturnTrace()) |trace| {
+ expect(trace.index == 1);
+ } else switch (builtin.mode) {
+ .Debug, .ReleaseSafe => @panic("expected return trace"),
+ .ReleaseFast, .ReleaseSmall => {},
+ }
+ };
+}
+
+test "break from suspend" {
+ var my_result: i32 = 1;
+ const p = async testBreakFromSuspend(&my_result);
+ std.testing.expect(my_result == 2);
+}
+async fn testBreakFromSuspend(my_result: *i32) void {
+ suspend {
+ resume @frame();
+ }
+ my_result.* += 1;
+ suspend;
+ my_result.* += 1;
+}
+
+test "heap allocated async function frame" {
+ const S = struct {
+ var x: i32 = 42;
+
+ fn doTheTest() !void {
+ const frame = try std.heap.direct_allocator.create(@Frame(someFunc));
+ defer std.heap.direct_allocator.destroy(frame);
+
+ expect(x == 42);
+ frame.* = async someFunc();
+ expect(x == 43);
+ resume frame;
+ expect(x == 44);
+ }
+
+ fn someFunc() void {
+ x += 1;
+ suspend;
+ x += 1;
+ }
+ };
+ try S.doTheTest();
+}
+
+test "async function call return value" {
+ const S = struct {
+ var frame: anyframe = undefined;
+ var pt = Point{ .x = 10, .y = 11 };
+
+ fn doTheTest() void {
+ expectEqual(pt.x, 10);
+ expectEqual(pt.y, 11);
+ _ = async first();
+ expectEqual(pt.x, 10);
+ expectEqual(pt.y, 11);
+ resume frame;
+ expectEqual(pt.x, 1);
+ expectEqual(pt.y, 2);
+ }
+
+ fn first() void {
+ pt = second(1, 2);
+ }
+
+ fn second(x: i32, y: i32) Point {
+ return other(x, y);
+ }
+
+ fn other(x: i32, y: i32) Point {
+ frame = @frame();
+ suspend;
+ return Point{
+ .x = x,
+ .y = y,
+ };
+ }
+
+ const Point = struct {
+ x: i32,
+ y: i32,
+ };
+ };
+ S.doTheTest();
+}
+
+test "suspension points inside branching control flow" {
+ const S = struct {
+ var result: i32 = 10;
+
+ fn doTheTest() void {
+ expect(10 == result);
+ var frame = async func(true);
+ expect(10 == result);
+ resume frame;
+ expect(11 == result);
+ resume frame;
+ expect(12 == result);
+ resume frame;
+ expect(13 == result);
+ }
+
+ fn func(b: bool) void {
+ while (b) {
+ suspend;
+ result += 1;
+ }
+ }
+ };
+ S.doTheTest();
+}
+
+test "call async function which has struct return type" {
+ const S = struct {
+ var frame: anyframe = undefined;
+
+ fn doTheTest() void {
+ _ = async atest();
+ resume frame;
+ }
+
+ fn atest() void {
+ const result = func();
+ expect(result.x == 5);
+ expect(result.y == 6);
+ }
+
+ const Point = struct {
+ x: usize,
+ y: usize,
+ };
+
+ fn func() Point {
+ suspend {
+ frame = @frame();
+ }
+ return Point{
+ .x = 5,
+ .y = 6,
+ };
+ }
+ };
+ S.doTheTest();
+}
+
+test "pass string literal to async function" {
+ const S = struct {
+ var frame: anyframe = undefined;
+ var ok: bool = false;
+
+ fn doTheTest() void {
+ _ = async hello("hello");
+ resume frame;
+ expect(ok);
+ }
+
+ fn hello(msg: []const u8) void {
+ frame = @frame();
+ suspend;
+ expectEqual(([]const u8)("hello"), msg);
+ ok = true;
+ }
+ };
+ S.doTheTest();
+}
+
+test "await inside an errdefer" {
+ const S = struct {
+ var frame: anyframe = undefined;
+
+ fn doTheTest() void {
+ _ = async amainWrap();
+ resume frame;
+ }
+
+ fn amainWrap() !void {
+ var foo = async func();
+ errdefer await foo;
+ return error.Bad;
+ }
+
+ fn func() void {
+ frame = @frame();
+ suspend;
+ }
+ };
+ S.doTheTest();
+}
+
+test "try in an async function with error union and non-zero-bit payload" {
+ const S = struct {
+ var frame: anyframe = undefined;
+ var ok = false;
+
+ fn doTheTest() void {
+ _ = async amain();
+ resume frame;
+ expect(ok);
+ }
+
+ fn amain() void {
+ std.testing.expectError(error.Bad, theProblem());
+ ok = true;
+ }
+
+ fn theProblem() ![]u8 {
+ frame = @frame();
+ suspend;
+ const result = try other();
+ return result;
+ }
+
+ fn other() ![]u8 {
+ return error.Bad;
+ }
+ };
+ S.doTheTest();
+}
+
+test "returning a const error from async function" {
+ const S = struct {
+ var frame: anyframe = undefined;
+ var ok = false;
+
+ fn doTheTest() void {
+ _ = async amain();
+ resume frame;
+ expect(ok);
+ }
+
+ fn amain() !void {
+ var download_frame = async fetchUrl(10, "a string");
+ const download_text = try await download_frame;
+
+ @panic("should not get here");
+ }
+
+ fn fetchUrl(unused: i32, url: []const u8) ![]u8 {
+ frame = @frame();
+ suspend;
+ ok = true;
+ return error.OutOfMemory;
+ }
+ };
+ S.doTheTest();
+}
+
+test "async/await typical usage" {
+ inline for ([_]bool{ false, true }) |b1| {
+ inline for ([_]bool{ false, true }) |b2| {
+ inline for ([_]bool{ false, true }) |b3| {
+ inline for ([_]bool{ false, true }) |b4| {
+ testAsyncAwaitTypicalUsage(b1, b2, b3, b4).doTheTest();
+ }
+ }
+ }
+ }
+}
+
+fn testAsyncAwaitTypicalUsage(
+ comptime simulate_fail_download: bool,
+ comptime simulate_fail_file: bool,
+ comptime suspend_download: bool,
+ comptime suspend_file: bool,
+) type {
+ return struct {
+ fn doTheTest() void {
+ _ = async amainWrap();
+ if (suspend_file) {
+ resume global_file_frame;
+ }
+ if (suspend_download) {
+ resume global_download_frame;
+ }
+ }
+ fn amainWrap() void {
+ if (amain()) |_| {
+ expect(!simulate_fail_download);
+ expect(!simulate_fail_file);
+ } else |e| switch (e) {
+ error.NoResponse => expect(simulate_fail_download),
+ error.FileNotFound => expect(simulate_fail_file),
+ else => @panic("test failure"),
+ }
+ }
+
+ fn amain() !void {
+ const allocator = std.heap.direct_allocator; // TODO once we have the debug allocator, use that, so that this can detect leaks
+ var download_frame = async fetchUrl(allocator, "https://example.com/");
+ var download_awaited = false;
+ errdefer if (!download_awaited) {
+ if (await download_frame) |x| allocator.free(x) else |_| {}
+ };
+
+ var file_frame = async readFile(allocator, "something.txt");
+ var file_awaited = false;
+ errdefer if (!file_awaited) {
+ if (await file_frame) |x| allocator.free(x) else |_| {}
+ };
+
+ download_awaited = true;
+ const download_text = try await download_frame;
+ defer allocator.free(download_text);
+
+ file_awaited = true;
+ const file_text = try await file_frame;
+ defer allocator.free(file_text);
+
+ expect(std.mem.eql(u8, "expected download text", download_text));
+ expect(std.mem.eql(u8, "expected file text", file_text));
+ }
+
+ var global_download_frame: anyframe = undefined;
+ fn fetchUrl(allocator: *std.mem.Allocator, url: []const u8) anyerror![]u8 {
+ const result = try std.mem.dupe(allocator, u8, "expected download text");
+ errdefer allocator.free(result);
+ if (suspend_download) {
+ suspend {
+ global_download_frame = @frame();
+ }
+ }
+ if (simulate_fail_download) return error.NoResponse;
+ return result;
+ }
+
+ var global_file_frame: anyframe = undefined;
+ fn readFile(allocator: *std.mem.Allocator, filename: []const u8) anyerror![]u8 {
+ const result = try std.mem.dupe(allocator, u8, "expected file text");
+ errdefer allocator.free(result);
+ if (suspend_file) {
+ suspend {
+ global_file_frame = @frame();
+ }
+ }
+ if (simulate_fail_file) return error.FileNotFound;
+ return result;
+ }
+ };
+}
+
+test "alignment of local variables in async functions" {
+ const S = struct {
+ fn doTheTest() void {
+ var y: u8 = 123;
+ var x: u8 align(128) = 1;
+ expect(@ptrToInt(&x) % 128 == 0);
+ }
+ };
+ S.doTheTest();
+}
+
+test "no reason to resolve frame still works" {
+ _ = async simpleNothing();
+}
+fn simpleNothing() void {
+ var x: i32 = 1234;
+}
+
+test "async call a generic function" {
+ const S = struct {
+ fn doTheTest() void {
+ var f = async func(i32, 2);
+ const result = await f;
+ expect(result == 3);
+ }
+
+ fn func(comptime T: type, inc: T) T {
+ var x: T = 1;
+ suspend {
+ resume @frame();
+ }
+ x += inc;
+ return x;
+ }
+ };
+ _ = async S.doTheTest();
+}
+
+test "return from suspend block" {
+ const S = struct {
+ fn doTheTest() void {
+ expect(func() == 1234);
+ }
+ fn func() i32 {
+ suspend {
+ return 1234;
+ }
+ }
+ };
+ _ = async S.doTheTest();
+}
+
+test "struct parameter to async function is copied to the frame" {
+ const S = struct {
+ const Point = struct {
+ x: i32,
+ y: i32,
+ };
+
+ var frame: anyframe = undefined;
+
+ fn doTheTest() void {
+ _ = async atest();
+ resume frame;
+ }
+
+ fn atest() void {
+ var f: @Frame(foo) = undefined;
+ bar(&f);
+ clobberStack(10);
+ }
+
+ fn clobberStack(x: i32) void {
+ if (x == 0) return;
+ clobberStack(x - 1);
+ var y: i32 = x;
+ }
+
+ fn bar(f: *@Frame(foo)) void {
+ var pt = Point{ .x = 1, .y = 2 };
+ f.* = async foo(pt);
+ var result = await f;
+ expect(result == 1);
+ }
+
+ fn foo(point: Point) i32 {
+ suspend {
+ frame = @frame();
+ }
+ return point.x;
+ }
+ };
+ S.doTheTest();
+}
diff --git a/test/stage1/behavior/coroutine_await_struct.zig b/test/stage1/behavior/await_struct.zig
similarity index 79%
rename from test/stage1/behavior/coroutine_await_struct.zig
rename to test/stage1/behavior/await_struct.zig
index 66ff8bb492..6e4d330ea3 100644
--- a/test/stage1/behavior/coroutine_await_struct.zig
+++ b/test/stage1/behavior/await_struct.zig
@@ -6,12 +6,12 @@ const Foo = struct {
x: i32,
};
-var await_a_promise: promise = undefined;
+var await_a_promise: anyframe = undefined;
var await_final_result = Foo{ .x = 0 };
test "coroutine await struct" {
await_seq('a');
- const p = async await_amain() catch unreachable;
+ var p = async await_amain();
await_seq('f');
resume await_a_promise;
await_seq('i');
@@ -20,7 +20,7 @@ test "coroutine await struct" {
}
async fn await_amain() void {
await_seq('b');
- const p = async await_another() catch unreachable;
+ var p = async await_another();
await_seq('e');
await_final_result = await p;
await_seq('h');
@@ -29,7 +29,7 @@ async fn await_another() Foo {
await_seq('c');
suspend {
await_seq('d');
- await_a_promise = @handle();
+ await_a_promise = @frame();
}
await_seq('g');
return Foo{ .x = 1234 };
diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig
deleted file mode 100644
index efc0df1aed..0000000000
--- a/test/stage1/behavior/cancel.zig
+++ /dev/null
@@ -1,86 +0,0 @@
-const std = @import("std");
-
-var defer_f1: bool = false;
-var defer_f2: bool = false;
-var defer_f3: bool = false;
-
-test "cancel forwards" {
- const p = async f1() catch unreachable;
- cancel p;
- std.testing.expect(defer_f1);
- std.testing.expect(defer_f2);
- std.testing.expect(defer_f3);
-}
-
-async fn f1() void {
- defer {
- defer_f1 = true;
- }
- await (async f2() catch unreachable);
-}
-
-async fn f2() void {
- defer {
- defer_f2 = true;
- }
- await (async f3() catch unreachable);
-}
-
-async fn f3() void {
- defer {
- defer_f3 = true;
- }
- suspend;
-}
-
-var defer_b1: bool = false;
-var defer_b2: bool = false;
-var defer_b3: bool = false;
-var defer_b4: bool = false;
-
-test "cancel backwards" {
- const p = async b1() catch unreachable;
- cancel p;
- std.testing.expect(defer_b1);
- std.testing.expect(defer_b2);
- std.testing.expect(defer_b3);
- std.testing.expect(defer_b4);
-}
-
-async fn b1() void {
- defer {
- defer_b1 = true;
- }
- await (async b2() catch unreachable);
-}
-
-var b4_handle: promise = undefined;
-
-async fn b2() void {
- const b3_handle = async b3() catch unreachable;
- resume b4_handle;
- cancel b4_handle;
- defer {
- defer_b2 = true;
- }
- const value = await b3_handle;
- @panic("unreachable");
-}
-
-async fn b3() i32 {
- defer {
- defer_b3 = true;
- }
- await (async b4() catch unreachable);
- return 1234;
-}
-
-async fn b4() void {
- defer {
- defer_b4 = true;
- }
- suspend {
- b4_handle = @handle();
- }
- suspend;
-}
diff --git a/test/stage1/behavior/cast.zig b/test/stage1/behavior/cast.zig
index c243f18088..04c7fa606f 100644
--- a/test/stage1/behavior/cast.zig
+++ b/test/stage1/behavior/cast.zig
@@ -508,7 +508,7 @@ test "peer type resolution: unreachable, null, slice" {
}
test "peer type resolution: unreachable, error set, unreachable" {
- const Error = error {
+ const Error = error{
FileDescriptorAlreadyPresentInSet,
OperationCausesCircularLoop,
FileDescriptorNotRegistered,
@@ -529,3 +529,8 @@ test "peer type resolution: unreachable, error set, unreachable" {
};
expect(transformed_err == error.SystemResources);
}
+
+test "implicit cast comptime_int to comptime_float" {
+ comptime expect(comptime_float(10) == f32(10));
+ expect(2 == 2.0);
+}
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
deleted file mode 100644
index 7395f3e064..0000000000
--- a/test/stage1/behavior/coroutines.zig
+++ /dev/null
@@ -1,236 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const expect = std.testing.expect;
-const allocator = std.heap.direct_allocator;
-
-var x: i32 = 1;
-
-test "create a coroutine and cancel it" {
- const p = try async simpleAsyncFn();
- comptime expect(@typeOf(p) == promise->void);
- cancel p;
- expect(x == 2);
-}
-async fn simpleAsyncFn() void {
- x += 1;
- suspend;
- x += 1;
-}
-
-test "coroutine suspend, resume, cancel" {
- seq('a');
- const p = try async testAsyncSeq();
- seq('c');
- resume p;
- seq('f');
- cancel p;
- seq('g');
-
- expect(std.mem.eql(u8, points, "abcdefg"));
-}
-async fn testAsyncSeq() void {
- defer seq('e');
-
- seq('b');
- suspend;
- seq('d');
-}
-var points = [_]u8{0} ** "abcdefg".len;
-var index: usize = 0;
-
-fn seq(c: u8) void {
- points[index] = c;
- index += 1;
-}
-
-test "coroutine suspend with block" {
- const p = try async testSuspendBlock();
- std.testing.expect(!result);
- resume a_promise;
- std.testing.expect(result);
- cancel p;
-}
-
-var a_promise: promise = undefined;
-var result = false;
-async fn testSuspendBlock() void {
- suspend {
- comptime expect(@typeOf(@handle()) == promise->void);
- a_promise = @handle();
- }
-
- //Test to make sure that @handle() works as advertised (issue #1296)
- //var our_handle: promise = @handle();
- expect(a_promise == @handle());
-
- result = true;
-}
-
-var await_a_promise: promise = undefined;
-var await_final_result: i32 = 0;
-
-test "coroutine await" {
- await_seq('a');
- const p = async await_amain() catch unreachable;
- await_seq('f');
- resume await_a_promise;
- await_seq('i');
- expect(await_final_result == 1234);
- expect(std.mem.eql(u8, await_points, "abcdefghi"));
-}
-async fn await_amain() void {
- await_seq('b');
- const p = async await_another() catch unreachable;
- await_seq('e');
- await_final_result = await p;
- await_seq('h');
-}
-async fn await_another() i32 {
- await_seq('c');
- suspend {
- await_seq('d');
- await_a_promise = @handle();
- }
- await_seq('g');
- return 1234;
-}
-
-var await_points = [_]u8{0} ** "abcdefghi".len;
-var await_seq_index: usize = 0;
-
-fn await_seq(c: u8) void {
- await_points[await_seq_index] = c;
- await_seq_index += 1;
-}
-
-var early_final_result: i32 = 0;
-
-test "coroutine await early return" {
- early_seq('a');
- const p = async early_amain() catch @panic("out of memory");
- early_seq('f');
- expect(early_final_result == 1234);
- expect(std.mem.eql(u8, early_points, "abcdef"));
-}
-async fn early_amain() void {
- early_seq('b');
- const p = async early_another() catch @panic("out of memory");
- early_seq('d');
- early_final_result = await p;
- early_seq('e');
-}
-async fn early_another() i32 {
- early_seq('c');
- return 1234;
-}
-
-var early_points = [_]u8{0} ** "abcdef".len;
-var early_seq_index: usize = 0;
-
-fn early_seq(c: u8) void {
- early_points[early_seq_index] = c;
- early_seq_index += 1;
-}
-
-test "coro allocation failure" {
- var failing_allocator = std.debug.FailingAllocator.init(std.debug.global_allocator, 0);
- if (async<&failing_allocator.allocator> asyncFuncThatNeverGetsRun()) {
- @panic("expected allocation failure");
- } else |err| switch (err) {
- error.OutOfMemory => {},
- }
-}
-async fn asyncFuncThatNeverGetsRun() void {
- @panic("coro frame allocation should fail");
-}
-
-test "async function with dot syntax" {
- const S = struct {
- var y: i32 = 1;
- async fn foo() void {
- y += 1;
- suspend;
- }
- };
- const p = try async S.foo();
- cancel p;
- expect(S.y == 2);
-}
-
-test "async fn pointer in a struct field" {
- var data: i32 = 1;
- const Foo = struct {
- bar: async<*std.mem.Allocator> fn (*i32) void,
- };
- var foo = Foo{ .bar = simpleAsyncFn2 };
- const p = (async foo.bar(&data)) catch unreachable;
- expect(data == 2);
- cancel p;
- expect(data == 4);
-}
-async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void {
- defer y.* += 2;
- y.* += 1;
- suspend;
-}
-
-test "async fn with inferred error set" {
- const p = (async failing()) catch unreachable;
- resume p;
- cancel p;
-}
-
-async fn failing() !void {
- suspend;
- return error.Fail;
-}
-
-test "error return trace across suspend points - early return" {
- const p = nonFailing();
- resume p;
- const p2 = try async printTrace(p);
- cancel p2;
-}
-
-test "error return trace across suspend points - async return" {
- const p = nonFailing();
- const p2 = try async printTrace(p);
- resume p;
- cancel p2;
-}
-
-fn nonFailing() (promise->anyerror!void) {
- return async suspendThenFail() catch unreachable;
-}
-async fn suspendThenFail() anyerror!void {
- suspend;
- return error.Fail;
-}
-async fn printTrace(p: promise->(anyerror!void)) void {
- (await p) catch |e| {
- std.testing.expect(e == error.Fail);
- if (@errorReturnTrace()) |trace| {
- expect(trace.index == 1);
- } else switch (builtin.mode) {
- builtin.Mode.Debug, builtin.Mode.ReleaseSafe => @panic("expected return trace"),
- builtin.Mode.ReleaseFast, builtin.Mode.ReleaseSmall => {},
- }
- };
-}
-
-test "break from suspend" {
- var buf: [500]u8 = undefined;
- var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
- var my_result: i32 = 1;
- const p = try async testBreakFromSuspend(&my_result);
- cancel p;
- std.testing.expect(my_result == 2);
-}
-async fn testBreakFromSuspend(my_result: *i32) void {
- suspend {
- resume @handle();
- }
- my_result.* += 1;
- suspend;
- my_result.* += 1;
-}
diff --git a/test/stage1/behavior/enum.zig b/test/stage1/behavior/enum.zig
index 51f4f0e196..d7d34aec88 100644
--- a/test/stage1/behavior/enum.zig
+++ b/test/stage1/behavior/enum.zig
@@ -982,3 +982,14 @@ test "enum literal casting to tagged union" {
else => @panic("fail"),
}
}
+
+test "enum with one member and custom tag type" {
+ const E = enum(u2) {
+ One,
+ };
+ expect(@enumToInt(E.One) == 0);
+ const E2 = enum(u2) {
+ One = 2,
+ };
+ expect(@enumToInt(E2.One) == 2);
+}
diff --git a/test/stage1/behavior/muladd.zig b/test/stage1/behavior/muladd.zig
index 143e6a93e4..d507f503f5 100644
--- a/test/stage1/behavior/muladd.zig
+++ b/test/stage1/behavior/muladd.zig
@@ -31,4 +31,4 @@ fn testMulAdd() void {
// var c: f128 = 6.25;
// expect(@mulAdd(f128, a, b, c) == 20);
//}
-}
\ No newline at end of file
+}
diff --git a/test/stage1/behavior/type_info.zig b/test/stage1/behavior/type_info.zig
index 4ae81aff20..b86ba27c13 100644
--- a/test/stage1/behavior/type_info.zig
+++ b/test/stage1/behavior/type_info.zig
@@ -116,21 +116,6 @@ fn testOptional() void {
expect(null_info.Optional.child == void);
}
-test "type info: promise info" {
- testPromise();
- comptime testPromise();
-}
-
-fn testPromise() void {
- const null_promise_info = @typeInfo(promise);
- expect(TypeId(null_promise_info) == TypeId.Promise);
- expect(null_promise_info.Promise.child == null);
-
- const promise_info = @typeInfo(promise->usize);
- expect(TypeId(promise_info) == TypeId.Promise);
- expect(promise_info.Promise.child.? == usize);
-}
-
test "type info: error set, error union info" {
testErrorSet();
comptime testErrorSet();
@@ -192,7 +177,7 @@ fn testUnion() void {
expect(TypeId(typeinfo_info) == TypeId.Union);
expect(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto);
expect(typeinfo_info.Union.tag_type.? == TypeId);
- expect(typeinfo_info.Union.fields.len == 25);
+ expect(typeinfo_info.Union.fields.len == 26);
expect(typeinfo_info.Union.fields[4].enum_field != null);
expect(typeinfo_info.Union.fields[4].enum_field.?.value == 4);
expect(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int));
@@ -265,7 +250,6 @@ fn testFunction() void {
expect(fn_info.Fn.args.len == 2);
expect(fn_info.Fn.is_var_args);
expect(fn_info.Fn.return_type == null);
- expect(fn_info.Fn.async_allocator_type == null);
const test_instance: TestStruct = undefined;
const bound_fn_info = @typeInfo(@typeOf(test_instance.foo));
@@ -296,6 +280,25 @@ fn testVector() void {
expect(vec_info.Vector.child == i32);
}
+test "type info: anyframe and anyframe->T" {
+ testAnyFrame();
+ comptime testAnyFrame();
+}
+
+fn testAnyFrame() void {
+ {
+ const anyframe_info = @typeInfo(anyframe->i32);
+ expect(TypeId(anyframe_info) == .AnyFrame);
+ expect(anyframe_info.AnyFrame.child.? == i32);
+ }
+
+ {
+ const anyframe_info = @typeInfo(anyframe);
+ expect(TypeId(anyframe_info) == .AnyFrame);
+ expect(anyframe_info.AnyFrame.child == null);
+ }
+}
+
test "type info: optional field unwrapping" {
const Struct = struct {
cdOffset: u32,
diff --git a/tools/process_headers.zig b/tools/process_headers.zig
index f191721316..805fc3d5f7 100644
--- a/tools/process_headers.zig
+++ b/tools/process_headers.zig
@@ -504,6 +504,9 @@ const Contents = struct {
}
};
+comptime {
+ @compileError("the behavior of std.AutoHashMap changed and []const u8 will be treated as a pointer. will need to update the hash maps to actually do some kind of hashing on the slices.");
+}
const HashToContents = std.AutoHashMap([]const u8, Contents);
const TargetToHash = std.HashMap(DestTarget, []const u8, DestTarget.hash, DestTarget.eql);
const PathTable = std.AutoHashMap([]const u8, *TargetToHash);