From 54e716afdcb0609cfc42229ad925e6dc9b07a66f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 27 Jun 2019 23:40:36 -0400 Subject: [PATCH 001/125] remove coroutines implementation and promise type --- src-self-hosted/main.zig | 16 +- src/all_types.hpp | 191 ---- src/analyze.cpp | 172 +--- src/analyze.hpp | 4 - src/ast_render.cpp | 19 +- src/codegen.cpp | 674 +----------- src/ir.cpp | 1539 +--------------------------- src/ir_print.cpp | 190 +--- src/parser.cpp | 32 - src/tokenizer.cpp | 2 - src/tokenizer.hpp | 1 - std/fmt.zig | 3 - std/hash_map.zig | 2 +- std/meta.zig | 4 +- std/testing.zig | 1 - test/stage1/behavior.zig | 6 +- test/stage1/behavior/type_info.zig | 20 +- 17 files changed, 62 insertions(+), 2814 deletions(-) diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index 8917809533..63ac47147d 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -466,7 +466,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co comp.link_objects = link_objects; comp.start(); - const process_build_events_handle = try async processBuildEvents(comp, color); + // TODO const process_build_events_handle = try async processBuildEvents(comp, color); defer cancel process_build_events_handle; loop.run(); } @@ -578,7 +578,7 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void { var zig_compiler = try ZigCompiler.init(&loop); defer zig_compiler.deinit(); - const handle = try async findLibCAsync(&zig_compiler); + // TODO const handle = try async findLibCAsync(&zig_compiler); defer cancel handle; loop.run(); @@ -663,12 +663,12 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void { defer loop.deinit(); var result: FmtError!void = undefined; - const main_handle = try async asyncFmtMainChecked( - &result, - &loop, - &flags, - color, - ); + // TODO const main_handle = try async asyncFmtMainChecked( + // TODO &result, + // TODO &loop, + // TODO &flags, + // TODO color, + // TODO ); defer cancel main_handle; loop.run(); return result; diff --git a/src/all_types.hpp b/src/all_types.hpp index a6b2bc51c3..7fe035ad1c 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -70,17 +70,6 @@ struct IrExecutable { Scope *begin_scope; ZigList tld_list; - IrInstruction *coro_handle; - IrInstruction *atomic_state_field_ptr; // this one is shared and in the promise - IrInstruction *coro_result_ptr_field_ptr; - IrInstruction *coro_result_field_ptr; - IrInstruction *await_handle_var_ptr; // this one is where we put the one we extracted from the promise - IrBasicBlock *coro_early_final; - IrBasicBlock *coro_normal_final; - IrBasicBlock *coro_suspend_block; - IrBasicBlock *coro_final_cleanup_block; - ZigVar *coro_allocator_var; - bool invalid; bool is_inline; bool is_generic_instantiation; @@ -489,7 +478,6 @@ enum NodeType { NodeTypeResume, NodeTypeAwaitExpr, NodeTypeSuspend, - NodeTypePromiseType, NodeTypeEnumLiteral, }; @@ -522,7 +510,6 @@ struct AstNodeFnProto { AstNode *section_expr; bool auto_err_set; - AstNode *async_allocator_type; }; struct AstNodeFnDef { @@ -657,7 +644,6 @@ struct AstNodeFnCallExpr { bool is_builtin; bool is_async; bool seen; // used by @compileLog - AstNode *async_allocator; }; struct AstNodeArrayAccessExpr { @@ -949,10 +935,6 @@ struct AstNodeSuspend { AstNode *block; }; -struct AstNodePromiseType { - AstNode *payload_type; // can be NULL -}; - struct AstNodeEnumLiteral { Token *period; Token *identifier; @@ -1018,7 +1000,6 @@ struct AstNode { AstNodeResumeExpr resume_expr; AstNodeAwaitExpr await_expr; AstNodeSuspend suspend; - AstNodePromiseType promise_type; AstNodeEnumLiteral enum_literal; } data; }; @@ -1047,7 +1028,6 @@ struct FnTypeId { bool is_var_args; CallingConvention cc; uint32_t alignment; - ZigType *async_allocator_type; }; uint32_t fn_type_id_hash(FnTypeId*); @@ -1241,11 +1221,6 @@ struct ZigTypeBoundFn { ZigType *fn_type; }; -struct ZigTypePromise { - // null if `promise` instead of `promise->T` - ZigType *result_type; -}; - struct ZigTypeVector { // The type must be a pointer, integer, or float ZigType *elem_type; @@ -1276,7 +1251,6 @@ enum ZigTypeId { ZigTypeIdBoundFn, ZigTypeIdArgTuple, ZigTypeIdOpaque, - ZigTypeIdPromise, ZigTypeIdVector, ZigTypeIdEnumLiteral, }; @@ -1314,7 +1288,6 @@ struct ZigType { ZigTypeUnion unionation; ZigTypeFn fn; ZigTypeBoundFn bound_fn; - ZigTypePromise promise; ZigTypeVector vector; ZigTypeOpaque opaque; } data; @@ -1322,8 +1295,6 @@ struct ZigType { // use these fields to make sure we don't duplicate type table entries for the same type ZigType *pointer_parent[2]; // [0 - mut, 1 - const] ZigType *optional_parent; - ZigType *promise_parent; - ZigType *promise_frame_parent; // If we generate a constant name value for this type, we memoize it here. // The type of this is array ConstExprValue *cached_const_name_val; @@ -1709,20 +1680,6 @@ struct CodeGen { LLVMValueRef trap_fn_val; LLVMValueRef return_address_fn_val; LLVMValueRef frame_address_fn_val; - LLVMValueRef coro_destroy_fn_val; - LLVMValueRef coro_id_fn_val; - LLVMValueRef coro_alloc_fn_val; - LLVMValueRef coro_size_fn_val; - LLVMValueRef coro_begin_fn_val; - LLVMValueRef coro_suspend_fn_val; - LLVMValueRef coro_end_fn_val; - LLVMValueRef coro_free_fn_val; - LLVMValueRef coro_resume_fn_val; - LLVMValueRef coro_save_fn_val; - LLVMValueRef coro_promise_fn_val; - LLVMValueRef coro_alloc_helper_fn_val; - LLVMValueRef coro_frame_fn_val; - LLVMValueRef merge_err_ret_traces_fn_val; LLVMValueRef add_error_return_trace_addr_fn_val; LLVMValueRef stacksave_fn_val; LLVMValueRef stackrestore_fn_val; @@ -1797,7 +1754,6 @@ struct CodeGen { ZigType *entry_var; ZigType *entry_global_error_set; ZigType *entry_arg_tuple; - ZigType *entry_promise; ZigType *entry_enum_literal; } builtin_types; ZigType *align_amt_type; @@ -1985,7 +1941,6 @@ enum ScopeId { ScopeIdSuspend, ScopeIdFnDef, ScopeIdCompTime, - ScopeIdCoroPrelude, ScopeIdRuntime, }; @@ -2128,12 +2083,6 @@ struct ScopeFnDef { ZigFn *fn_entry; }; -// This scope is created to indicate that the code in the scope -// is auto-generated coroutine prelude stuff. -struct ScopeCoroPrelude { - Scope base; -}; - // synchronized with code in define_builtin_compile_vars enum AtomicOrder { AtomicOrderUnordered, @@ -2231,7 +2180,6 @@ enum IrInstructionId { IrInstructionIdSetRuntimeSafety, IrInstructionIdSetFloatMode, IrInstructionIdArrayType, - IrInstructionIdPromiseType, IrInstructionIdSliceType, IrInstructionIdGlobalAsm, IrInstructionIdAsm, @@ -2329,26 +2277,10 @@ enum IrInstructionId { IrInstructionIdErrorReturnTrace, IrInstructionIdErrorUnion, IrInstructionIdCancel, - IrInstructionIdGetImplicitAllocator, - IrInstructionIdCoroId, - IrInstructionIdCoroAlloc, - IrInstructionIdCoroSize, - IrInstructionIdCoroBegin, - IrInstructionIdCoroAllocFail, - IrInstructionIdCoroSuspend, - IrInstructionIdCoroEnd, - IrInstructionIdCoroFree, - IrInstructionIdCoroResume, - IrInstructionIdCoroSave, - IrInstructionIdCoroPromise, - IrInstructionIdCoroAllocHelper, IrInstructionIdAtomicRmw, IrInstructionIdAtomicLoad, - IrInstructionIdPromiseResultType, - IrInstructionIdAwaitBookkeeping, IrInstructionIdSaveErrRetAddr, IrInstructionIdAddImplicitReturnType, - IrInstructionIdMergeErrRetTraces, IrInstructionIdMarkErrRetTracePtr, IrInstructionIdErrSetCast, IrInstructionIdToBytes, @@ -2606,7 +2538,6 @@ struct IrInstructionCallSrc { IrInstruction **args; ResultLoc *result_loc; - IrInstruction *async_allocator; IrInstruction *new_stack; FnInline fn_inline; bool is_async; @@ -2622,7 +2553,6 @@ struct IrInstructionCallGen { IrInstruction **args; IrInstruction *result_loc; - IrInstruction *async_allocator; IrInstruction *new_stack; FnInline fn_inline; bool is_async; @@ -2743,12 +2673,6 @@ struct IrInstructionPtrType { bool is_allow_zero; }; -struct IrInstructionPromiseType { - IrInstruction base; - - IrInstruction *payload_type; -}; - struct IrInstructionSliceType { IrInstruction base; @@ -3178,7 +3102,6 @@ struct IrInstructionFnProto { IrInstruction **param_types; IrInstruction *align_value; IrInstruction *return_type; - IrInstruction *async_allocator_type_value; bool is_var_args; }; @@ -3414,89 +3337,6 @@ struct IrInstructionCancel { IrInstruction *target; }; -enum ImplicitAllocatorId { - ImplicitAllocatorIdArg, - ImplicitAllocatorIdLocalVar, -}; - -struct IrInstructionGetImplicitAllocator { - IrInstruction base; - - ImplicitAllocatorId id; -}; - -struct IrInstructionCoroId { - IrInstruction base; - - IrInstruction *promise_ptr; -}; - -struct IrInstructionCoroAlloc { - IrInstruction base; - - IrInstruction *coro_id; -}; - -struct IrInstructionCoroSize { - IrInstruction base; -}; - -struct IrInstructionCoroBegin { - IrInstruction base; - - IrInstruction *coro_id; - IrInstruction *coro_mem_ptr; -}; - -struct IrInstructionCoroAllocFail { - IrInstruction base; - - IrInstruction *err_val; -}; - -struct IrInstructionCoroSuspend { - IrInstruction base; - - IrInstruction *save_point; - IrInstruction *is_final; -}; - -struct IrInstructionCoroEnd { - IrInstruction base; -}; - -struct IrInstructionCoroFree { - IrInstruction base; - - IrInstruction *coro_id; - IrInstruction *coro_handle; -}; - -struct IrInstructionCoroResume { - IrInstruction base; - - IrInstruction *awaiter_handle; -}; - -struct IrInstructionCoroSave { - IrInstruction base; - - IrInstruction *coro_handle; -}; - -struct IrInstructionCoroPromise { - IrInstruction base; - - IrInstruction *coro_handle; -}; - -struct IrInstructionCoroAllocHelper { - IrInstruction base; - - IrInstruction *realloc_fn; - IrInstruction *coro_size; -}; - struct IrInstructionAtomicRmw { IrInstruction base; @@ -3518,18 +3358,6 @@ struct IrInstructionAtomicLoad { AtomicOrder resolved_ordering; }; -struct IrInstructionPromiseResultType { - IrInstruction base; - - IrInstruction *promise_type; -}; - -struct IrInstructionAwaitBookkeeping { - IrInstruction base; - - IrInstruction *promise_result_type; -}; - struct IrInstructionSaveErrRetAddr { IrInstruction base; }; @@ -3540,14 +3368,6 @@ struct IrInstructionAddImplicitReturnType { IrInstruction *value; }; -struct IrInstructionMergeErrRetTraces { - IrInstruction base; - - IrInstruction *coro_promise_ptr; - IrInstruction *src_err_ret_trace_ptr; - IrInstruction *dest_err_ret_trace_ptr; -}; - struct IrInstructionMarkErrRetTracePtr { IrInstruction base; @@ -3777,17 +3597,6 @@ static const size_t err_union_payload_index = 1; // MUST BE A POWER OF TWO. static const size_t stack_trace_ptr_count = 32; -// these belong to the async function -#define RETURN_ADDRESSES_FIELD_NAME "return_addresses" -#define ERR_RET_TRACE_FIELD_NAME "err_ret_trace" -#define RESULT_FIELD_NAME "result" -#define ASYNC_REALLOC_FIELD_NAME "reallocFn" -#define ASYNC_SHRINK_FIELD_NAME "shrinkFn" -#define ATOMIC_STATE_FIELD_NAME "atomic_state" -// these point to data belonging to the awaiter -#define ERR_RET_TRACE_PTR_FIELD_NAME "err_ret_trace_ptr" -#define RESULT_PTR_FIELD_NAME "result_ptr" - #define NAMESPACE_SEP_CHAR '.' #define NAMESPACE_SEP_STR "." diff --git a/src/analyze.cpp b/src/analyze.cpp index de4d64f5d6..15e12caa8d 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -188,12 +188,6 @@ Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent) { return &scope->base; } -Scope *create_coro_prelude_scope(CodeGen *g, AstNode *node, Scope *parent) { - ScopeCoroPrelude *scope = allocate(1); - init_scope(g, &scope->base, ScopeIdCoroPrelude, node, parent); - return &scope->base; -} - ZigType *get_scope_import(Scope *scope) { while (scope) { if (scope->id == ScopeIdDecls) { @@ -254,7 +248,6 @@ AstNode *type_decl_node(ZigType *type_entry) { case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: - case ZigTypeIdPromise: case ZigTypeIdVector: return nullptr; } @@ -307,7 +300,6 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) { case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: - case ZigTypeIdPromise: case ZigTypeIdVector: return true; } @@ -341,31 +333,6 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x) { return get_int_type(g, false, bits_needed_for_unsigned(x)); } -ZigType *get_promise_type(CodeGen *g, ZigType *result_type) { - if (result_type != nullptr && result_type->promise_parent != nullptr) { - return result_type->promise_parent; - } else if (result_type == nullptr && g->builtin_types.entry_promise != nullptr) { - return g->builtin_types.entry_promise; - } - - ZigType *entry = new_type_table_entry(ZigTypeIdPromise); - entry->abi_size = g->builtin_types.entry_usize->abi_size; - entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits; - entry->abi_align = g->builtin_types.entry_usize->abi_align; - entry->data.promise.result_type = result_type; - buf_init_from_str(&entry->name, "promise"); - if (result_type != nullptr) { - buf_appendf(&entry->name, "->%s", buf_ptr(&result_type->name)); - } - - if (result_type != nullptr) { - result_type->promise_parent = entry; - } else if (result_type == nullptr) { - g->builtin_types.entry_promise = entry; - } - return entry; -} - static const char *ptr_len_to_star_str(PtrLen ptr_len) { switch (ptr_len) { case PtrLenSingle: @@ -490,42 +457,6 @@ ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const) { return get_pointer_to_type_extra(g, child_type, is_const, false, PtrLenSingle, 0, 0, 0, false); } -ZigType *get_promise_frame_type(CodeGen *g, ZigType *return_type) { - if (return_type->promise_frame_parent != nullptr) { - return return_type->promise_frame_parent; - } - - ZigType *atomic_state_type = g->builtin_types.entry_usize; - ZigType *result_ptr_type = get_pointer_to_type(g, return_type, false); - - ZigList field_names = {}; - field_names.append(ATOMIC_STATE_FIELD_NAME); - field_names.append(RESULT_FIELD_NAME); - field_names.append(RESULT_PTR_FIELD_NAME); - if (g->have_err_ret_tracing) { - field_names.append(ERR_RET_TRACE_PTR_FIELD_NAME); - field_names.append(ERR_RET_TRACE_FIELD_NAME); - field_names.append(RETURN_ADDRESSES_FIELD_NAME); - } - - ZigList field_types = {}; - field_types.append(atomic_state_type); - field_types.append(return_type); - field_types.append(result_ptr_type); - if (g->have_err_ret_tracing) { - field_types.append(get_ptr_to_stack_trace_type(g)); - field_types.append(g->stack_trace_type); - field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count)); - } - - assert(field_names.length == field_types.length); - Buf *name = buf_sprintf("AsyncFramePromise(%s)", buf_ptr(&return_type->name)); - ZigType *entry = get_struct_type(g, buf_ptr(name), field_names.items, field_types.items, field_names.length); - - return_type->promise_frame_parent = entry; - return entry; -} - ZigType *get_optional_type(CodeGen *g, ZigType *child_type) { if (child_type->optional_parent != nullptr) { return child_type->optional_parent; @@ -879,13 +810,8 @@ ZigType *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { // populate the name of the type buf_resize(&fn_type->name, 0); - if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) { - assert(fn_type_id->async_allocator_type != nullptr); - buf_appendf(&fn_type->name, "async<%s> ", buf_ptr(&fn_type_id->async_allocator_type->name)); - } else { - const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc); - buf_appendf(&fn_type->name, "%s", cc_str); - } + const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc); + buf_appendf(&fn_type->name, "%s", cc_str); buf_appendf(&fn_type->name, "fn("); for (size_t i = 0; i < fn_type_id->param_count; i += 1) { FnTypeParamInfo *param_info = &fn_type_id->param_info[i]; @@ -998,14 +924,8 @@ ZigType *analyze_type_expr(CodeGen *g, Scope *scope, AstNode *node) { ZigType *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id) { ZigType *fn_type = new_type_table_entry(ZigTypeIdFn); buf_resize(&fn_type->name, 0); - if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) { - const char *async_allocator_type_str = (fn_type->data.fn.fn_type_id.async_allocator_type == nullptr) ? - "var" : buf_ptr(&fn_type_id->async_allocator_type->name); - buf_appendf(&fn_type->name, "async(%s) ", async_allocator_type_str); - } else { - const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc); - buf_appendf(&fn_type->name, "%s", cc_str); - } + const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc); + buf_appendf(&fn_type->name, "%s", cc_str); buf_appendf(&fn_type->name, "fn("); size_t i = 0; for (; i < fn_type_id->next_param_index; i += 1) { @@ -1119,7 +1039,6 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: - case ZigTypeIdPromise: add_node_error(g, source_node, buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation", buf_ptr(&type_entry->name))); @@ -1207,7 +1126,6 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) { case ZigTypeIdErrorSet: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: - case ZigTypeIdPromise: case ZigTypeIdVoid: return false; case ZigTypeIdOpaque: @@ -1378,7 +1296,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc case ZigTypeIdEnum: case ZigTypeIdUnion: case ZigTypeIdFn: - case ZigTypeIdPromise: case ZigTypeIdVector: switch (type_requires_comptime(g, type_entry)) { case ReqCompTimeNo: @@ -1474,7 +1391,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc case ZigTypeIdEnum: case ZigTypeIdUnion: case ZigTypeIdFn: - case ZigTypeIdPromise: case ZigTypeIdVector: switch (type_requires_comptime(g, fn_type_id.return_type)) { case ReqCompTimeInvalid: @@ -1487,16 +1403,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc break; } - if (fn_type_id.cc == CallingConventionAsync) { - if (fn_proto->async_allocator_type == nullptr) { - return get_generic_fn_type(g, &fn_type_id); - } - fn_type_id.async_allocator_type = analyze_type_expr(g, child_scope, fn_proto->async_allocator_type); - if (type_is_invalid(fn_type_id.async_allocator_type)) { - return g->builtin_types.entry_invalid; - } - } - return get_fn_type(g, &fn_type_id); } @@ -3039,7 +2945,6 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeResume: case NodeTypeAwaitExpr: case NodeTypeSuspend: - case NodeTypePromiseType: case NodeTypeEnumLiteral: zig_unreachable(); } @@ -3091,7 +2996,6 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdBoundFn: - case ZigTypeIdPromise: case ZigTypeIdVector: return type_entry; } @@ -3591,7 +3495,6 @@ bool is_container(ZigType *type_entry) { case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: - case ZigTypeIdPromise: case ZigTypeIdVector: return false; } @@ -3648,7 +3551,6 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) { case ZigTypeIdInvalid: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: - case ZigTypeIdPromise: case ZigTypeIdVector: zig_unreachable(); } @@ -3658,13 +3560,11 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) { ZigType *get_src_ptr_type(ZigType *type) { if (type->id == ZigTypeIdPointer) return type; if (type->id == ZigTypeIdFn) return type; - if (type->id == ZigTypeIdPromise) return type; if (type->id == ZigTypeIdOptional) { if (type->data.maybe.child_type->id == ZigTypeIdPointer) { return type->data.maybe.child_type->data.pointer.allow_zero ? nullptr : type->data.maybe.child_type; } if (type->data.maybe.child_type->id == ZigTypeIdFn) return type->data.maybe.child_type; - if (type->data.maybe.child_type->id == ZigTypeIdPromise) return type->data.maybe.child_type; } return nullptr; } @@ -3691,8 +3591,6 @@ uint32_t get_ptr_align(CodeGen *g, ZigType *type) { // when getting the alignment of `?extern fn() void`. // See http://lists.llvm.org/pipermail/llvm-dev/2018-September/126142.html return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment; - } else if (ptr_type->id == ZigTypeIdPromise) { - return get_coro_frame_align_bytes(g); } else { zig_unreachable(); } @@ -3704,8 +3602,6 @@ bool get_ptr_const(ZigType *type) { return ptr_type->data.pointer.is_const; } else if (ptr_type->id == ZigTypeIdFn) { return true; - } else if (ptr_type->id == ZigTypeIdPromise) { - return true; } else { zig_unreachable(); } @@ -4102,7 +3998,6 @@ bool handle_is_ptr(ZigType *type_entry) { case ZigTypeIdErrorSet: case ZigTypeIdFn: case ZigTypeIdEnum: - case ZigTypeIdPromise: case ZigTypeIdVector: return false; case ZigTypeIdArray: @@ -4142,7 +4037,6 @@ uint32_t fn_type_id_hash(FnTypeId *id) { result += ((uint32_t)(id->cc)) * (uint32_t)3349388391; result += id->is_var_args ? (uint32_t)1931444534 : 0; result += hash_ptr(id->return_type); - result += hash_ptr(id->async_allocator_type); result += id->alignment * 0xd3b3f3e2; for (size_t i = 0; i < id->param_count; i += 1) { FnTypeParamInfo *info = &id->param_info[i]; @@ -4157,8 +4051,7 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) { a->return_type != b->return_type || a->is_var_args != b->is_var_args || a->param_count != b->param_count || - a->alignment != b->alignment || - a->async_allocator_type != b->async_allocator_type) + a->alignment != b->alignment) { return false; } @@ -4320,9 +4213,6 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry); case ZigTypeIdPointer: return hash_const_val_ptr(const_val); - case ZigTypeIdPromise: - // TODO better hashing algorithm - return 223048345; case ZigTypeIdUndefined: return 162837799; case ZigTypeIdNull: @@ -4418,7 +4308,6 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { case ZigTypeIdBoundFn: case ZigTypeIdFn: case ZigTypeIdOpaque: - case ZigTypeIdPromise: case ZigTypeIdErrorSet: case ZigTypeIdEnum: return false; @@ -4488,7 +4377,6 @@ static bool return_type_is_cacheable(ZigType *return_type) { case ZigTypeIdBoundFn: case ZigTypeIdFn: case ZigTypeIdOpaque: - case ZigTypeIdPromise: case ZigTypeIdErrorSet: case ZigTypeIdEnum: case ZigTypeIdPointer: @@ -4623,7 +4511,6 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) { case ZigTypeIdFn: case ZigTypeIdBool: case ZigTypeIdFloat: - case ZigTypeIdPromise: case ZigTypeIdErrorUnion: return OnePossibleValueNo; case ZigTypeIdUndefined: @@ -4712,7 +4599,6 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) { case ZigTypeIdFloat: case ZigTypeIdVoid: case ZigTypeIdUnreachable: - case ZigTypeIdPromise: return ReqCompTimeNo; } zig_unreachable(); @@ -5278,7 +5164,6 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) { case ZigTypeIdBoundFn: case ZigTypeIdInvalid: case ZigTypeIdUnreachable: - case ZigTypeIdPromise: zig_unreachable(); } zig_unreachable(); @@ -5611,8 +5496,6 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { buf_appendf(buf, "(args value)"); return; } - case ZigTypeIdPromise: - zig_unreachable(); } zig_unreachable(); } @@ -5659,7 +5542,6 @@ uint32_t type_id_hash(TypeId x) { case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: - case ZigTypeIdPromise: zig_unreachable(); case ZigTypeIdErrorUnion: return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type); @@ -5701,7 +5583,6 @@ bool type_id_eql(TypeId a, TypeId b) { case ZigTypeIdUndefined: case ZigTypeIdNull: case ZigTypeIdOptional: - case ZigTypeIdPromise: case ZigTypeIdErrorSet: case ZigTypeIdEnum: case ZigTypeIdUnion: @@ -5874,7 +5755,6 @@ static const ZigTypeId all_type_ids[] = { ZigTypeIdBoundFn, ZigTypeIdArgTuple, ZigTypeIdOpaque, - ZigTypeIdPromise, ZigTypeIdVector, ZigTypeIdEnumLiteral, }; @@ -5938,12 +5818,10 @@ size_t type_id_index(ZigType *entry) { return 20; case ZigTypeIdOpaque: return 21; - case ZigTypeIdPromise: - return 22; case ZigTypeIdVector: - return 23; + return 22; case ZigTypeIdEnumLiteral: - return 24; + return 23; } zig_unreachable(); } @@ -5998,8 +5876,6 @@ const char *type_id_name(ZigTypeId id) { return "ArgTuple"; case ZigTypeIdOpaque: return "Opaque"; - case ZigTypeIdPromise: - return "Promise"; case ZigTypeIdVector: return "Vector"; } @@ -6066,13 +5942,6 @@ bool type_is_global_error_set(ZigType *err_set_type) { return err_set_type->data.error_set.err_count == UINT32_MAX; } -uint32_t get_coro_frame_align_bytes(CodeGen *g) { - uint32_t a = g->pointer_size_bytes * 2; - // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw - if (a < 8) a = 8; - return a; -} - bool type_can_fail(ZigType *type_entry) { return type_entry->id == ZigTypeIdErrorUnion || type_entry->id == ZigTypeIdErrorSet; } @@ -7105,19 +6974,13 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) { param_di_types.append(get_llvm_di_type(g, gen_type)); } if (is_async) { - { - // async allocator param - ZigType *gen_type = fn_type_id->async_allocator_type; - gen_param_types.append(get_llvm_type(g, gen_type)); - param_di_types.append(get_llvm_di_type(g, gen_type)); - } - - { - // error code pointer - ZigType *gen_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false); - gen_param_types.append(get_llvm_type(g, gen_type)); - param_di_types.append(get_llvm_di_type(g, gen_type)); - } + // coroutine frame pointer + // TODO if we can make this typed a little more it will be better for + // debug symbols. + // TODO do we need to make this aligned more? + ZigType *void_star = get_pointer_to_type(g, g->builtin_types.entry_c_void, false); + gen_param_types.append(get_llvm_type(g, void_star)); + param_di_types.append(get_llvm_di_type(g, void_star)); } fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count); @@ -7224,13 +7087,6 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r return resolve_llvm_types_union(g, type, wanted_resolve_status); case ZigTypeIdPointer: return resolve_llvm_types_pointer(g, type); - case ZigTypeIdPromise: { - if (type->llvm_di_type != nullptr) return; - ZigType *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false); - type->llvm_type = get_llvm_type(g, u8_ptr_type); - type->llvm_di_type = get_llvm_di_type(g, u8_ptr_type); - return; - } case ZigTypeIdInt: return resolve_llvm_types_integer(g, type); case ZigTypeIdOptional: diff --git a/src/analyze.hpp b/src/analyze.hpp index b9e9f2df7d..fbbdece8ba 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -39,8 +39,6 @@ ZigType *get_bound_fn_type(CodeGen *g, ZigFn *fn_entry); ZigType *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const char *full_name, Buf *bare_name); ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], ZigType *field_types[], size_t field_count); -ZigType *get_promise_type(CodeGen *g, ZigType *result_type); -ZigType *get_promise_frame_type(CodeGen *g, ZigType *return_type); ZigType *get_test_fn_type(CodeGen *g); bool handle_is_ptr(ZigType *type_entry); @@ -117,7 +115,6 @@ ScopeLoop *create_loop_scope(CodeGen *g, AstNode *node, Scope *parent); ScopeSuspend *create_suspend_scope(CodeGen *g, AstNode *node, Scope *parent); ScopeFnDef *create_fndef_scope(CodeGen *g, AstNode *node, Scope *parent, ZigFn *fn_entry); Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent); -Scope *create_coro_prelude_scope(CodeGen *g, AstNode *node, Scope *parent); Scope *create_runtime_scope(CodeGen *g, AstNode *node, Scope *parent, IrInstruction *is_comptime); void init_const_str_lit(CodeGen *g, ConstExprValue *const_val, Buf *str); @@ -204,7 +201,6 @@ bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *sour ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry); -uint32_t get_coro_frame_align_bytes(CodeGen *g); bool fn_type_can_fail(FnTypeId *fn_type_id); bool type_can_fail(ZigType *type_entry); bool fn_eval_cacheable(Scope *scope, ZigType *return_type); diff --git a/src/ast_render.cpp b/src/ast_render.cpp index fe131ab65f..d97f58fdec 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -257,8 +257,6 @@ static const char *node_type_str(NodeType node_type) { return "AwaitExpr"; case NodeTypeSuspend: return "Suspend"; - case NodeTypePromiseType: - return "PromiseType"; case NodeTypePointerType: return "PointerType"; case NodeTypeEnumLiteral: @@ -692,13 +690,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, "@"); } if (node->data.fn_call_expr.is_async) { - fprintf(ar->f, "async"); - if (node->data.fn_call_expr.async_allocator != nullptr) { - fprintf(ar->f, "<"); - render_node_extra(ar, node->data.fn_call_expr.async_allocator, true); - fprintf(ar->f, ">"); - } - fprintf(ar->f, " "); + fprintf(ar->f, "async "); } AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr; bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType); @@ -855,15 +847,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { render_node_ungrouped(ar, node->data.inferred_array_type.child_type); break; } - case NodeTypePromiseType: - { - fprintf(ar->f, "promise"); - if (node->data.promise_type.payload_type != nullptr) { - fprintf(ar->f, "->"); - render_node_grouped(ar, node->data.promise_type.payload_type); - } - break; - } case NodeTypeErrorType: fprintf(ar->f, "anyerror"); break; diff --git a/src/codegen.cpp b/src/codegen.cpp index 188c5ccc8d..4cc99b39a8 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -721,7 +721,6 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) { case ScopeIdLoop: case ScopeIdSuspend: case ScopeIdCompTime: - case ScopeIdCoroPrelude: case ScopeIdRuntime: return get_di_scope(g, scope->parent); } @@ -1083,177 +1082,6 @@ static LLVMValueRef get_write_register_fn_val(CodeGen *g) { return g->write_register_fn_val; } -static LLVMValueRef get_coro_destroy_fn_val(CodeGen *g) { - if (g->coro_destroy_fn_val) - return g->coro_destroy_fn_val; - - LLVMTypeRef param_types[] = { - LLVMPointerType(LLVMInt8Type(), 0), - }; - LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false); - Buf *name = buf_sprintf("llvm.coro.destroy"); - g->coro_destroy_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_destroy_fn_val)); - - return g->coro_destroy_fn_val; -} - -static LLVMValueRef get_coro_id_fn_val(CodeGen *g) { - if (g->coro_id_fn_val) - return g->coro_id_fn_val; - - LLVMTypeRef param_types[] = { - LLVMInt32Type(), - LLVMPointerType(LLVMInt8Type(), 0), - LLVMPointerType(LLVMInt8Type(), 0), - LLVMPointerType(LLVMInt8Type(), 0), - }; - LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 4, false); - Buf *name = buf_sprintf("llvm.coro.id"); - g->coro_id_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_id_fn_val)); - - return g->coro_id_fn_val; -} - -static LLVMValueRef get_coro_alloc_fn_val(CodeGen *g) { - if (g->coro_alloc_fn_val) - return g->coro_alloc_fn_val; - - LLVMTypeRef param_types[] = { - ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), - }; - LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 1, false); - Buf *name = buf_sprintf("llvm.coro.alloc"); - g->coro_alloc_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_alloc_fn_val)); - - return g->coro_alloc_fn_val; -} - -static LLVMValueRef get_coro_size_fn_val(CodeGen *g) { - if (g->coro_size_fn_val) - return g->coro_size_fn_val; - - LLVMTypeRef fn_type = LLVMFunctionType(g->builtin_types.entry_usize->llvm_type, nullptr, 0, false); - Buf *name = buf_sprintf("llvm.coro.size.i%d", g->pointer_size_bytes * 8); - g->coro_size_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_size_fn_val)); - - return g->coro_size_fn_val; -} - -static LLVMValueRef get_coro_begin_fn_val(CodeGen *g) { - if (g->coro_begin_fn_val) - return g->coro_begin_fn_val; - - LLVMTypeRef param_types[] = { - ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), - LLVMPointerType(LLVMInt8Type(), 0), - }; - LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false); - Buf *name = buf_sprintf("llvm.coro.begin"); - g->coro_begin_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_begin_fn_val)); - - return g->coro_begin_fn_val; -} - -static LLVMValueRef get_coro_suspend_fn_val(CodeGen *g) { - if (g->coro_suspend_fn_val) - return g->coro_suspend_fn_val; - - LLVMTypeRef param_types[] = { - ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), - LLVMInt1Type(), - }; - LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt8Type(), param_types, 2, false); - Buf *name = buf_sprintf("llvm.coro.suspend"); - g->coro_suspend_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_suspend_fn_val)); - - return g->coro_suspend_fn_val; -} - -static LLVMValueRef get_coro_end_fn_val(CodeGen *g) { - if (g->coro_end_fn_val) - return g->coro_end_fn_val; - - LLVMTypeRef param_types[] = { - LLVMPointerType(LLVMInt8Type(), 0), - LLVMInt1Type(), - }; - LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 2, false); - Buf *name = buf_sprintf("llvm.coro.end"); - g->coro_end_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_end_fn_val)); - - return g->coro_end_fn_val; -} - -static LLVMValueRef get_coro_free_fn_val(CodeGen *g) { - if (g->coro_free_fn_val) - return g->coro_free_fn_val; - - LLVMTypeRef param_types[] = { - ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), - LLVMPointerType(LLVMInt8Type(), 0), - }; - LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false); - Buf *name = buf_sprintf("llvm.coro.free"); - g->coro_free_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_free_fn_val)); - - return g->coro_free_fn_val; -} - -static LLVMValueRef get_coro_resume_fn_val(CodeGen *g) { - if (g->coro_resume_fn_val) - return g->coro_resume_fn_val; - - LLVMTypeRef param_types[] = { - LLVMPointerType(LLVMInt8Type(), 0), - }; - LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false); - Buf *name = buf_sprintf("llvm.coro.resume"); - g->coro_resume_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_resume_fn_val)); - - return g->coro_resume_fn_val; -} - -static LLVMValueRef get_coro_save_fn_val(CodeGen *g) { - if (g->coro_save_fn_val) - return g->coro_save_fn_val; - - LLVMTypeRef param_types[] = { - LLVMPointerType(LLVMInt8Type(), 0), - }; - LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 1, false); - Buf *name = buf_sprintf("llvm.coro.save"); - g->coro_save_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_save_fn_val)); - - return g->coro_save_fn_val; -} - -static LLVMValueRef get_coro_promise_fn_val(CodeGen *g) { - if (g->coro_promise_fn_val) - return g->coro_promise_fn_val; - - LLVMTypeRef param_types[] = { - LLVMPointerType(LLVMInt8Type(), 0), - LLVMInt32Type(), - LLVMInt1Type(), - }; - LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 3, false); - Buf *name = buf_sprintf("llvm.coro.promise"); - g->coro_promise_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_promise_fn_val)); - - return g->coro_promise_fn_val; -} - static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -1346,140 +1174,6 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) { return fn_val; } -static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { - if (g->merge_err_ret_traces_fn_val) - return g->merge_err_ret_traces_fn_val; - - assert(g->stack_trace_type != nullptr); - - LLVMTypeRef param_types[] = { - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), - }; - LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false); - - Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_merge_error_return_traces"), false); - LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref); - LLVMSetLinkage(fn_val, LLVMInternalLinkage); - LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); - addLLVMFnAttr(fn_val, "nounwind"); - add_uwtable_attr(g, fn_val); - // Error return trace memory is in the stack, which is impossible to be at address 0 - // on any architecture. - addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); - addLLVMArgAttr(fn_val, (unsigned)0, "noalias"); - addLLVMArgAttr(fn_val, (unsigned)0, "writeonly"); - // Error return trace memory is in the stack, which is impossible to be at address 0 - // on any architecture. - addLLVMArgAttr(fn_val, (unsigned)1, "nonnull"); - addLLVMArgAttr(fn_val, (unsigned)1, "noalias"); - addLLVMArgAttr(fn_val, (unsigned)1, "readonly"); - if (g->build_mode == BuildModeDebug) { - ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true"); - ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr); - } - - // this is above the ZigLLVMClearCurrentDebugLocation - LLVMValueRef add_error_return_trace_addr_fn_val = get_add_error_return_trace_addr_fn(g); - - LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry"); - LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder); - LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder); - LLVMPositionBuilderAtEnd(g->builder, entry_block); - ZigLLVMClearCurrentDebugLocation(g->builder); - - // var frame_index: usize = undefined; - // var frames_left: usize = undefined; - // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) { - // frame_index = 0; - // frames_left = src_stack_trace.index; - // if (frames_left == 0) return; - // } else { - // frame_index = (src_stack_trace.index + 1) % src_stack_trace.instruction_addresses.len; - // frames_left = src_stack_trace.instruction_addresses.len; - // } - // while (true) { - // __zig_add_err_ret_trace_addr(dest_stack_trace, src_stack_trace.instruction_addresses[frame_index]); - // frames_left -= 1; - // if (frames_left == 0) return; - // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len; - // } - LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return"); - - LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index"); - LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left"); - - LLVMValueRef dest_stack_trace_ptr = LLVMGetParam(fn_val, 0); - LLVMValueRef src_stack_trace_ptr = LLVMGetParam(fn_val, 1); - - size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index; - size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index; - LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr, - (unsigned)src_index_field_index, ""); - LLVMValueRef src_addresses_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr, - (unsigned)src_addresses_field_index, ""); - ZigType *slice_type = g->stack_trace_type->data.structure.fields[1].type_entry; - size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index; - LLVMValueRef src_ptr_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)ptr_field_index, ""); - size_t len_field_index = slice_type->data.structure.fields[slice_len_index].gen_index; - LLVMValueRef src_len_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)len_field_index, ""); - LLVMValueRef src_index_val = LLVMBuildLoad(g->builder, src_index_field_ptr, ""); - LLVMValueRef src_ptr_val = LLVMBuildLoad(g->builder, src_ptr_field_ptr, ""); - LLVMValueRef src_len_val = LLVMBuildLoad(g->builder, src_len_field_ptr, ""); - LLVMValueRef no_wrap_bit = LLVMBuildICmp(g->builder, LLVMIntULT, src_index_val, src_len_val, ""); - LLVMBasicBlockRef no_wrap_block = LLVMAppendBasicBlock(fn_val, "NoWrap"); - LLVMBasicBlockRef yes_wrap_block = LLVMAppendBasicBlock(fn_val, "YesWrap"); - LLVMBasicBlockRef loop_block = LLVMAppendBasicBlock(fn_val, "Loop"); - LLVMBuildCondBr(g->builder, no_wrap_bit, no_wrap_block, yes_wrap_block); - - LLVMPositionBuilderAtEnd(g->builder, no_wrap_block); - LLVMValueRef usize_zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type); - LLVMBuildStore(g->builder, usize_zero, frame_index_ptr); - LLVMBuildStore(g->builder, src_index_val, frames_left_ptr); - LLVMValueRef frames_left_eq_zero_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_index_val, usize_zero, ""); - LLVMBuildCondBr(g->builder, frames_left_eq_zero_bit, return_block, loop_block); - - LLVMPositionBuilderAtEnd(g->builder, yes_wrap_block); - LLVMValueRef usize_one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false); - LLVMValueRef plus_one = LLVMBuildNUWAdd(g->builder, src_index_val, usize_one, ""); - LLVMValueRef mod_len = LLVMBuildURem(g->builder, plus_one, src_len_val, ""); - LLVMBuildStore(g->builder, mod_len, frame_index_ptr); - LLVMBuildStore(g->builder, src_len_val, frames_left_ptr); - LLVMBuildBr(g->builder, loop_block); - - LLVMPositionBuilderAtEnd(g->builder, loop_block); - LLVMValueRef ptr_index = LLVMBuildLoad(g->builder, frame_index_ptr, ""); - LLVMValueRef addr_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr_val, &ptr_index, 1, ""); - LLVMValueRef this_addr_val = LLVMBuildLoad(g->builder, addr_ptr, ""); - LLVMValueRef args[] = {dest_stack_trace_ptr, this_addr_val}; - ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, ""); - LLVMValueRef prev_frames_left = LLVMBuildLoad(g->builder, frames_left_ptr, ""); - LLVMValueRef new_frames_left = LLVMBuildNUWSub(g->builder, prev_frames_left, usize_one, ""); - LLVMValueRef done_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, new_frames_left, usize_zero, ""); - LLVMBasicBlockRef continue_block = LLVMAppendBasicBlock(fn_val, "Continue"); - LLVMBuildCondBr(g->builder, done_bit, return_block, continue_block); - - LLVMPositionBuilderAtEnd(g->builder, return_block); - LLVMBuildRetVoid(g->builder); - - LLVMPositionBuilderAtEnd(g->builder, continue_block); - LLVMBuildStore(g->builder, new_frames_left, frames_left_ptr); - LLVMValueRef prev_index = LLVMBuildLoad(g->builder, frame_index_ptr, ""); - LLVMValueRef index_plus_one = LLVMBuildNUWAdd(g->builder, prev_index, usize_one, ""); - LLVMValueRef index_mod_len = LLVMBuildURem(g->builder, index_plus_one, src_len_val, ""); - LLVMBuildStore(g->builder, index_mod_len, frame_index_ptr); - LLVMBuildBr(g->builder, loop_block); - - LLVMPositionBuilderAtEnd(g->builder, prev_block); - if (!g->strip_debug_symbols) { - LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); - } - - g->merge_err_ret_traces_fn_val = fn_val; - return fn_val; - -} - static LLVMValueRef get_return_err_fn(CodeGen *g) { if (g->return_err_fn != nullptr) return g->return_err_fn; @@ -1667,24 +1361,12 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { return fn_val; } -static bool is_coro_prelude_scope(Scope *scope) { - while (scope != nullptr) { - if (scope->id == ScopeIdCoroPrelude) { - return true; - } else if (scope->id == ScopeIdFnDef) { - break; - } - scope = scope->parent; - } - return false; -} - static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) { if (!g->have_err_ret_tracing) { return nullptr; } if (g->cur_fn->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) { - return is_coro_prelude_scope(scope) ? g->cur_err_ret_trace_val_arg : g->cur_err_ret_trace_val_stack; + return g->cur_err_ret_trace_val_stack; } if (g->cur_err_ret_trace_val_stack != nullptr) { return g->cur_err_ret_trace_val_stack; @@ -3697,19 +3379,6 @@ static bool get_prefix_arg_err_ret_stack(CodeGen *g, FnTypeId *fn_type_id) { fn_type_id->cc == CallingConventionAsync); } -static size_t get_async_allocator_arg_index(CodeGen *g, FnTypeId *fn_type_id) { - // 0 1 2 3 - // err_ret_stack allocator_ptr err_code other_args... - return get_prefix_arg_err_ret_stack(g, fn_type_id) ? 1 : 0; -} - -static size_t get_async_err_code_arg_index(CodeGen *g, FnTypeId *fn_type_id) { - // 0 1 2 3 - // err_ret_stack allocator_ptr err_code other_args... - return 1 + get_async_allocator_arg_index(g, fn_type_id); -} - - static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) { LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_ptr_index, ""); LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_len_index, ""); @@ -3778,10 +3447,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); } if (instruction->is_async) { - gen_param_values.append(ir_llvm_value(g, instruction->async_allocator)); - - LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_err_index, ""); - gen_param_values.append(err_val_ptr); + zig_panic("TODO codegen async call"); } FnWalk fn_walk = {}; fn_walk.id = FnWalkIdCall; @@ -4471,10 +4137,6 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I { align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment; ptr_val = target_val; - } else if (target_type->id == ZigTypeIdOptional && - target_type->data.maybe.child_type->id == ZigTypeIdPromise) - { - zig_panic("TODO audit this function"); } else if (target_type->id == ZigTypeIdStruct && target_type->data.structure.is_slice) { ZigType *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index].type_entry; align_bytes = get_ptr_align(g, slice_ptr_type); @@ -4519,17 +4181,7 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu } static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) { - LLVMValueRef target_handle = ir_llvm_value(g, instruction->target); - LLVMBuildCall(g->builder, get_coro_destroy_fn_val(g), &target_handle, 1, ""); - return nullptr; -} - -static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable, - IrInstructionGetImplicitAllocator *instruction) -{ - assert(instruction->id == ImplicitAllocatorIdArg); - size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id); - return LLVMGetParam(g->cur_fn_val, allocator_arg_index); + zig_panic("TODO cancel"); } static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) { @@ -4840,24 +4492,10 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable return LLVMBuildPtrToInt(g->builder, ptr_val, g->builtin_types.entry_usize->llvm_type, ""); } -static LLVMValueRef get_handle_fn_val(CodeGen *g) { - if (g->coro_frame_fn_val) - return g->coro_frame_fn_val; - - LLVMTypeRef fn_type = LLVMFunctionType( LLVMPointerType(LLVMInt8Type(), 0) - , nullptr, 0, false); - Buf *name = buf_sprintf("llvm.coro.frame"); - g->coro_frame_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); - assert(LLVMGetIntrinsicID(g->coro_frame_fn_val)); - - return g->coro_frame_fn_val; -} - static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable, IrInstructionHandle *instruction) { - LLVMValueRef zero = LLVMConstNull(get_llvm_type(g, g->builtin_types.entry_promise)); - return LLVMBuildCall(g->builder, get_handle_fn_val(g), &zero, 0, ""); + zig_panic("TODO @handle() codegen"); } static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) { @@ -5123,248 +4761,6 @@ static LLVMValueRef ir_render_panic(CodeGen *g, IrExecutable *executable, IrInst return nullptr; } -static LLVMValueRef ir_render_coro_id(CodeGen *g, IrExecutable *executable, IrInstructionCoroId *instruction) { - LLVMValueRef promise_ptr = ir_llvm_value(g, instruction->promise_ptr); - LLVMValueRef align_val = LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false); - LLVMValueRef null = LLVMConstIntToPtr(LLVMConstNull(g->builtin_types.entry_usize->llvm_type), - LLVMPointerType(LLVMInt8Type(), 0)); - LLVMValueRef params[] = { - align_val, - promise_ptr, - null, - null, - }; - return LLVMBuildCall(g->builder, get_coro_id_fn_val(g), params, 4, ""); -} - -static LLVMValueRef ir_render_coro_alloc(CodeGen *g, IrExecutable *executable, IrInstructionCoroAlloc *instruction) { - LLVMValueRef token = ir_llvm_value(g, instruction->coro_id); - return LLVMBuildCall(g->builder, get_coro_alloc_fn_val(g), &token, 1, ""); -} - -static LLVMValueRef ir_render_coro_size(CodeGen *g, IrExecutable *executable, IrInstructionCoroSize *instruction) { - return LLVMBuildCall(g->builder, get_coro_size_fn_val(g), nullptr, 0, ""); -} - -static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, IrInstructionCoroBegin *instruction) { - LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id); - LLVMValueRef coro_mem_ptr = ir_llvm_value(g, instruction->coro_mem_ptr); - LLVMValueRef params[] = { - coro_id, - coro_mem_ptr, - }; - return LLVMBuildCall(g->builder, get_coro_begin_fn_val(g), params, 2, ""); -} - -static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executable, - IrInstructionCoroAllocFail *instruction) -{ - size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id); - LLVMValueRef err_code_ptr_val = LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index); - LLVMValueRef err_code = ir_llvm_value(g, instruction->err_val); - LLVMBuildStore(g->builder, err_code, err_code_ptr_val); - - LLVMValueRef return_value; - if (ir_want_runtime_safety(g, &instruction->base)) { - return_value = LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)); - } else { - return_value = LLVMGetUndef(LLVMPointerType(LLVMInt8Type(), 0)); - } - LLVMBuildRet(g->builder, return_value); - return nullptr; -} - -static LLVMValueRef ir_render_coro_suspend(CodeGen *g, IrExecutable *executable, IrInstructionCoroSuspend *instruction) { - LLVMValueRef save_point; - if (instruction->save_point == nullptr) { - save_point = LLVMConstNull(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext())); - } else { - save_point = ir_llvm_value(g, instruction->save_point); - } - LLVMValueRef is_final = ir_llvm_value(g, instruction->is_final); - LLVMValueRef params[] = { - save_point, - is_final, - }; - return LLVMBuildCall(g->builder, get_coro_suspend_fn_val(g), params, 2, ""); -} - -static LLVMValueRef ir_render_coro_end(CodeGen *g, IrExecutable *executable, IrInstructionCoroEnd *instruction) { - LLVMValueRef params[] = { - LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)), - LLVMConstNull(LLVMInt1Type()), - }; - return LLVMBuildCall(g->builder, get_coro_end_fn_val(g), params, 2, ""); -} - -static LLVMValueRef ir_render_coro_free(CodeGen *g, IrExecutable *executable, IrInstructionCoroFree *instruction) { - LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id); - LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle); - LLVMValueRef params[] = { - coro_id, - coro_handle, - }; - return LLVMBuildCall(g->builder, get_coro_free_fn_val(g), params, 2, ""); -} - -static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) { - LLVMValueRef awaiter_handle = ir_llvm_value(g, instruction->awaiter_handle); - return LLVMBuildCall(g->builder, get_coro_resume_fn_val(g), &awaiter_handle, 1, ""); -} - -static LLVMValueRef ir_render_coro_save(CodeGen *g, IrExecutable *executable, IrInstructionCoroSave *instruction) { - LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle); - return LLVMBuildCall(g->builder, get_coro_save_fn_val(g), &coro_handle, 1, ""); -} - -static LLVMValueRef ir_render_coro_promise(CodeGen *g, IrExecutable *executable, IrInstructionCoroPromise *instruction) { - LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle); - LLVMValueRef params[] = { - coro_handle, - LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false), - LLVMConstNull(LLVMInt1Type()), - }; - LLVMValueRef uncasted_result = LLVMBuildCall(g->builder, get_coro_promise_fn_val(g), params, 3, ""); - return LLVMBuildBitCast(g->builder, uncasted_result, get_llvm_type(g, instruction->base.value.type), ""); -} - -static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_fn_type_ref, ZigType *fn_type) { - if (g->coro_alloc_helper_fn_val != nullptr) - return g->coro_alloc_helper_fn_val; - - assert(fn_type->id == ZigTypeIdFn); - - ZigType *ptr_to_err_code_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false); - - LLVMTypeRef alloc_raw_fn_type_ref = LLVMGetElementType(alloc_fn_type_ref); - LLVMTypeRef *alloc_fn_arg_types = allocate(LLVMCountParamTypes(alloc_raw_fn_type_ref)); - LLVMGetParamTypes(alloc_raw_fn_type_ref, alloc_fn_arg_types); - - ZigList arg_types = {}; - arg_types.append(alloc_fn_type_ref); - if (g->have_err_ret_tracing) { - arg_types.append(alloc_fn_arg_types[1]); - } - arg_types.append(alloc_fn_arg_types[g->have_err_ret_tracing ? 2 : 1]); - arg_types.append(get_llvm_type(g, ptr_to_err_code_type)); - arg_types.append(g->builtin_types.entry_usize->llvm_type); - - LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), - arg_types.items, arg_types.length, false); - - Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_coro_alloc_helper"), false); - LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref); - LLVMSetLinkage(fn_val, LLVMInternalLinkage); - LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); - addLLVMFnAttr(fn_val, "nounwind"); - addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); - addLLVMArgAttr(fn_val, (unsigned)1, "nonnull"); - - LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder); - LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder); - ZigFn *prev_cur_fn = g->cur_fn; - LLVMValueRef prev_cur_fn_val = g->cur_fn_val; - - LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry"); - LLVMPositionBuilderAtEnd(g->builder, entry_block); - ZigLLVMClearCurrentDebugLocation(g->builder); - g->cur_fn = nullptr; - g->cur_fn_val = fn_val; - - LLVMValueRef sret_ptr = LLVMBuildAlloca(g->builder, LLVMGetElementType(alloc_fn_arg_types[0]), ""); - - size_t next_arg = 0; - LLVMValueRef realloc_fn_val = LLVMGetParam(fn_val, next_arg); - next_arg += 1; - - LLVMValueRef stack_trace_val; - if (g->have_err_ret_tracing) { - stack_trace_val = LLVMGetParam(fn_val, next_arg); - next_arg += 1; - } - - LLVMValueRef allocator_val = LLVMGetParam(fn_val, next_arg); - next_arg += 1; - LLVMValueRef err_code_ptr = LLVMGetParam(fn_val, next_arg); - next_arg += 1; - LLVMValueRef coro_size = LLVMGetParam(fn_val, next_arg); - next_arg += 1; - LLVMValueRef alignment_val = LLVMConstInt(g->builtin_types.entry_u29->llvm_type, - get_coro_frame_align_bytes(g), false); - - ConstExprValue *zero_array = create_const_str_lit(g, buf_create_from_str("")); - ConstExprValue *undef_slice_zero = create_const_slice(g, zero_array, 0, 0, false); - render_const_val(g, undef_slice_zero, ""); - render_const_val_global(g, undef_slice_zero, ""); - - ZigList args = {}; - args.append(sret_ptr); - if (g->have_err_ret_tracing) { - args.append(stack_trace_val); - } - args.append(allocator_val); - args.append(undef_slice_zero->global_refs->llvm_global); - args.append(LLVMGetUndef(g->builtin_types.entry_u29->llvm_type)); - args.append(coro_size); - args.append(alignment_val); - LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, realloc_fn_val, args.items, args.length, - get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); - set_call_instr_sret(g, call_instruction); - LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_err_index, ""); - LLVMValueRef err_val = LLVMBuildLoad(g->builder, err_val_ptr, ""); - LLVMBuildStore(g->builder, err_val, err_code_ptr); - LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, err_val, LLVMConstNull(LLVMTypeOf(err_val)), ""); - LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(fn_val, "AllocOk"); - LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(fn_val, "AllocFail"); - LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block); - - LLVMPositionBuilderAtEnd(g->builder, ok_block); - LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_payload_index, ""); - ZigType *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false, - PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0, false); - ZigType *slice_type = get_slice_type(g, u8_ptr_type); - size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index; - LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, payload_ptr, ptr_field_index, ""); - LLVMValueRef ptr_val = LLVMBuildLoad(g->builder, ptr_field_ptr, ""); - LLVMBuildRet(g->builder, ptr_val); - - LLVMPositionBuilderAtEnd(g->builder, fail_block); - LLVMBuildRet(g->builder, LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0))); - - g->cur_fn = prev_cur_fn; - g->cur_fn_val = prev_cur_fn_val; - LLVMPositionBuilderAtEnd(g->builder, prev_block); - if (!g->strip_debug_symbols) { - LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); - } - - g->coro_alloc_helper_fn_val = fn_val; - return fn_val; -} - -static LLVMValueRef ir_render_coro_alloc_helper(CodeGen *g, IrExecutable *executable, - IrInstructionCoroAllocHelper *instruction) -{ - LLVMValueRef realloc_fn = ir_llvm_value(g, instruction->realloc_fn); - LLVMValueRef coro_size = ir_llvm_value(g, instruction->coro_size); - LLVMValueRef fn_val = get_coro_alloc_helper_fn_val(g, LLVMTypeOf(realloc_fn), instruction->realloc_fn->value.type); - size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id); - size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id); - - ZigList params = {}; - params.append(realloc_fn); - uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, g->cur_fn); - if (err_ret_trace_arg_index != UINT32_MAX) { - params.append(LLVMGetParam(g->cur_fn_val, err_ret_trace_arg_index)); - } - params.append(LLVMGetParam(g->cur_fn_val, allocator_arg_index)); - params.append(LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index)); - params.append(coro_size); - - return ZigLLVMBuildCall(g->builder, fn_val, params.items, params.length, - get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); -} - static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable, IrInstructionAtomicRmw *instruction) { @@ -5402,19 +4798,6 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable, return load_inst; } -static LLVMValueRef ir_render_merge_err_ret_traces(CodeGen *g, IrExecutable *executable, - IrInstructionMergeErrRetTraces *instruction) -{ - assert(g->have_err_ret_tracing); - - LLVMValueRef src_trace_ptr = ir_llvm_value(g, instruction->src_err_ret_trace_ptr); - LLVMValueRef dest_trace_ptr = ir_llvm_value(g, instruction->dest_err_ret_trace_ptr); - - LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr }; - ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); - return nullptr; -} - static LLVMValueRef ir_render_mark_err_ret_trace_ptr(CodeGen *g, IrExecutable *executable, IrInstructionMarkErrRetTracePtr *instruction) { @@ -5559,7 +4942,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdSetRuntimeSafety: case IrInstructionIdSetFloatMode: case IrInstructionIdArrayType: - case IrInstructionIdPromiseType: case IrInstructionIdSliceType: case IrInstructionIdSizeOf: case IrInstructionIdSwitchTarget: @@ -5599,8 +4981,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdTagType: case IrInstructionIdExport: case IrInstructionIdErrorUnion: - case IrInstructionIdPromiseResultType: - case IrInstructionIdAwaitBookkeeping: case IrInstructionIdAddImplicitReturnType: case IrInstructionIdIntCast: case IrInstructionIdFloatCast: @@ -5757,40 +5137,12 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction); case IrInstructionIdCancel: return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction); - case IrInstructionIdGetImplicitAllocator: - return ir_render_get_implicit_allocator(g, executable, (IrInstructionGetImplicitAllocator *)instruction); - case IrInstructionIdCoroId: - return ir_render_coro_id(g, executable, (IrInstructionCoroId *)instruction); - case IrInstructionIdCoroAlloc: - return ir_render_coro_alloc(g, executable, (IrInstructionCoroAlloc *)instruction); - case IrInstructionIdCoroSize: - return ir_render_coro_size(g, executable, (IrInstructionCoroSize *)instruction); - case IrInstructionIdCoroBegin: - return ir_render_coro_begin(g, executable, (IrInstructionCoroBegin *)instruction); - case IrInstructionIdCoroAllocFail: - return ir_render_coro_alloc_fail(g, executable, (IrInstructionCoroAllocFail *)instruction); - case IrInstructionIdCoroSuspend: - return ir_render_coro_suspend(g, executable, (IrInstructionCoroSuspend *)instruction); - case IrInstructionIdCoroEnd: - return ir_render_coro_end(g, executable, (IrInstructionCoroEnd *)instruction); - case IrInstructionIdCoroFree: - return ir_render_coro_free(g, executable, (IrInstructionCoroFree *)instruction); - case IrInstructionIdCoroResume: - return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); - case IrInstructionIdCoroSave: - return ir_render_coro_save(g, executable, (IrInstructionCoroSave *)instruction); - case IrInstructionIdCoroPromise: - return ir_render_coro_promise(g, executable, (IrInstructionCoroPromise *)instruction); - case IrInstructionIdCoroAllocHelper: - return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction); case IrInstructionIdAtomicRmw: return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction); case IrInstructionIdAtomicLoad: return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction); case IrInstructionIdSaveErrRetAddr: return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction); - case IrInstructionIdMergeErrRetTraces: - return ir_render_merge_err_ret_traces(g, executable, (IrInstructionMergeErrRetTraces *)instruction); case IrInstructionIdMarkErrRetTracePtr: return ir_render_mark_err_ret_trace_ptr(g, executable, (IrInstructionMarkErrRetTracePtr *)instruction); case IrInstructionIdFloatOp: @@ -6008,7 +5360,6 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con case ZigTypeIdPointer: case ZigTypeIdFn: case ZigTypeIdOptional: - case ZigTypeIdPromise: { LLVMValueRef ptr_val = gen_const_val(g, const_val, ""); LLVMValueRef ptr_size_int_val = LLVMConstPtrToInt(ptr_val, g->builtin_types.entry_usize->llvm_type); @@ -6591,7 +5942,6 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: - case ZigTypeIdPromise: zig_unreachable(); } @@ -7294,13 +6644,6 @@ static void define_builtin_types(CodeGen *g) { g->primitive_type_table.put(&entry->name, entry); } - { - ZigType *entry = get_promise_type(g, nullptr); - g->primitive_type_table.put(&entry->name, entry); - entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits; - entry->abi_align = g->builtin_types.entry_usize->abi_align; - entry->abi_size = g->builtin_types.entry_usize->abi_size; - } } @@ -7729,7 +7072,6 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { " BoundFn: Fn,\n" " ArgTuple: void,\n" " Opaque: void,\n" - " Promise: Promise,\n" " Vector: Vector,\n" " EnumLiteral: void,\n" "\n\n" @@ -7842,14 +7184,9 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { " is_generic: bool,\n" " is_var_args: bool,\n" " return_type: ?type,\n" - " async_allocator_type: ?type,\n" " args: []FnArg,\n" " };\n" "\n" - " pub const Promise = struct {\n" - " child: ?type,\n" - " };\n" - "\n" " pub const Vector = struct {\n" " len: comptime_int,\n" " child: type,\n" @@ -8998,7 +8335,6 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e case ZigTypeIdArgTuple: case ZigTypeIdErrorUnion: case ZigTypeIdErrorSet: - case ZigTypeIdPromise: zig_unreachable(); case ZigTypeIdVoid: case ZigTypeIdUnreachable: @@ -9182,7 +8518,6 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu case ZigTypeIdUndefined: case ZigTypeIdNull: case ZigTypeIdArgTuple: - case ZigTypeIdPromise: zig_unreachable(); } } @@ -9349,7 +8684,6 @@ static void gen_h_file(CodeGen *g) { case ZigTypeIdArgTuple: case ZigTypeIdOptional: case ZigTypeIdFn: - case ZigTypeIdPromise: case ZigTypeIdVector: zig_unreachable(); case ZigTypeIdEnum: diff --git a/src/ir.cpp b/src/ir.cpp index be7a8e2e51..f23fe1b7d0 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -99,7 +99,6 @@ struct ConstCastOnly { ConstCastErrUnionErrSetMismatch *error_union_error_set; ConstCastTypeMismatch *type_mismatch; ConstCastOnly *return_type; - ConstCastOnly *async_allocator_type; ConstCastOnly *null_wrap_ptr_child; ConstCastArg fn_arg; ConstCastArgNoAlias arg_no_alias; @@ -318,7 +317,6 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) { case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdArgTuple: - case ZigTypeIdPromise: case ZigTypeIdVector: return false; } @@ -564,10 +562,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionArrayType *) { return IrInstructionIdArrayType; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseType *) { - return IrInstructionIdPromiseType; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionSliceType *) { return IrInstructionIdSliceType; } @@ -964,58 +958,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) { return IrInstructionIdCancel; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionGetImplicitAllocator *) { - return IrInstructionIdGetImplicitAllocator; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroId *) { - return IrInstructionIdCoroId; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAlloc *) { - return IrInstructionIdCoroAlloc; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSize *) { - return IrInstructionIdCoroSize; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroBegin *) { - return IrInstructionIdCoroBegin; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocFail *) { - return IrInstructionIdCoroAllocFail; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSuspend *) { - return IrInstructionIdCoroSuspend; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroEnd *) { - return IrInstructionIdCoroEnd; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroFree *) { - return IrInstructionIdCoroFree; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) { - return IrInstructionIdCoroResume; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSave *) { - return IrInstructionIdCoroSave; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroPromise *) { - return IrInstructionIdCoroPromise; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocHelper *) { - return IrInstructionIdCoroAllocHelper; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) { return IrInstructionIdAtomicRmw; } @@ -1024,14 +966,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) { return IrInstructionIdAtomicLoad; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseResultType *) { - return IrInstructionIdPromiseResultType; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitBookkeeping *) { - return IrInstructionIdAwaitBookkeeping; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionSaveErrRetAddr *) { return IrInstructionIdSaveErrRetAddr; } @@ -1040,10 +974,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAddImplicitRetur return IrInstructionIdAddImplicitReturnType; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionMergeErrRetTraces *) { - return IrInstructionIdMergeErrRetTraces; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionMarkErrRetTracePtr *) { return IrInstructionIdMarkErrRetTracePtr; } @@ -1213,14 +1143,6 @@ static IrInstruction *ir_build_const_usize(IrBuilder *irb, Scope *scope, AstNode return &const_instruction->base; } -static IrInstruction *ir_build_const_u8(IrBuilder *irb, Scope *scope, AstNode *source_node, uint8_t value) { - IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); - const_instruction->base.value.type = irb->codegen->builtin_types.entry_u8; - const_instruction->base.value.special = ConstValSpecialStatic; - bigint_init_unsigned(&const_instruction->base.value.data.x_bigint, value); - return &const_instruction->base; -} - static IrInstruction *ir_create_const_type(IrBuilder *irb, Scope *scope, AstNode *source_node, ZigType *type_entry) { @@ -1428,7 +1350,7 @@ static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, Ast static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *source_node, ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *new_stack, ResultLoc *result_loc) { IrInstructionCallSrc *call_instruction = ir_build_instruction(irb, scope, source_node); @@ -1439,14 +1361,12 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s call_instruction->args = args; call_instruction->arg_count = arg_count; call_instruction->is_async = is_async; - call_instruction->async_allocator = async_allocator; call_instruction->new_stack = new_stack; call_instruction->result_loc = result_loc; if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block); for (size_t i = 0; i < arg_count; i += 1) ir_ref_instruction(args[i], irb->current_basic_block); - if (async_allocator != nullptr) ir_ref_instruction(async_allocator, irb->current_basic_block); if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block); return &call_instruction->base; @@ -1454,7 +1374,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction, ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - FnInline fn_inline, bool is_async, IrInstruction *async_allocator, IrInstruction *new_stack, + FnInline fn_inline, bool is_async, IrInstruction *new_stack, IrInstruction *result_loc, ZigType *return_type) { IrInstructionCallGen *call_instruction = ir_build_instruction(&ira->new_irb, @@ -1466,14 +1386,12 @@ static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_in call_instruction->args = args; call_instruction->arg_count = arg_count; call_instruction->is_async = is_async; - call_instruction->async_allocator = async_allocator; call_instruction->new_stack = new_stack; call_instruction->result_loc = result_loc; if (fn_ref != nullptr) ir_ref_instruction(fn_ref, ira->new_irb.current_basic_block); for (size_t i = 0; i < arg_count; i += 1) ir_ref_instruction(args[i], ira->new_irb.current_basic_block); - if (async_allocator != nullptr) ir_ref_instruction(async_allocator, ira->new_irb.current_basic_block); if (new_stack != nullptr) ir_ref_instruction(new_stack, ira->new_irb.current_basic_block); if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); @@ -1753,17 +1671,6 @@ static IrInstruction *ir_build_array_type(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } -static IrInstruction *ir_build_promise_type(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *payload_type) -{ - IrInstructionPromiseType *instruction = ir_build_instruction(irb, scope, source_node); - instruction->payload_type = payload_type; - - if (payload_type != nullptr) ir_ref_instruction(payload_type, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_slice_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, bool is_allow_zero) { @@ -2595,13 +2502,12 @@ static IrInstruction *ir_build_unwrap_err_payload(IrBuilder *irb, Scope *scope, static IrInstruction *ir_build_fn_proto(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction **param_types, IrInstruction *align_value, IrInstruction *return_type, - IrInstruction *async_allocator_type_value, bool is_var_args) + bool is_var_args) { IrInstructionFnProto *instruction = ir_build_instruction(irb, scope, source_node); instruction->param_types = param_types; instruction->align_value = align_value; instruction->return_type = return_type; - instruction->async_allocator_type_value = async_allocator_type_value; instruction->is_var_args = is_var_args; assert(source_node->type == NodeTypeFnProto); @@ -2611,7 +2517,6 @@ static IrInstruction *ir_build_fn_proto(IrBuilder *irb, Scope *scope, AstNode *s if (param_types[i] != nullptr) ir_ref_instruction(param_types[i], irb->current_basic_block); } if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block); - if (async_allocator_type_value != nullptr) ir_ref_instruction(async_allocator_type_value, irb->current_basic_block); ir_ref_instruction(return_type, irb->current_basic_block); return &instruction->base; @@ -3055,149 +2960,6 @@ static IrInstruction *ir_build_error_union(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } -static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *target) -{ - IrInstructionCancel *instruction = ir_build_instruction(irb, scope, source_node); - instruction->target = target; - - ir_ref_instruction(target, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node, - ImplicitAllocatorId id) -{ - IrInstructionGetImplicitAllocator *instruction = ir_build_instruction(irb, scope, source_node); - instruction->id = id; - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_id(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *promise_ptr) { - IrInstructionCoroId *instruction = ir_build_instruction(irb, scope, source_node); - instruction->promise_ptr = promise_ptr; - - ir_ref_instruction(promise_ptr, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_alloc(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id) { - IrInstructionCoroAlloc *instruction = ir_build_instruction(irb, scope, source_node); - instruction->coro_id = coro_id; - - ir_ref_instruction(coro_id, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_size(IrBuilder *irb, Scope *scope, AstNode *source_node) { - IrInstructionCoroSize *instruction = ir_build_instruction(irb, scope, source_node); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id, IrInstruction *coro_mem_ptr) { - IrInstructionCoroBegin *instruction = ir_build_instruction(irb, scope, source_node); - instruction->coro_id = coro_id; - instruction->coro_mem_ptr = coro_mem_ptr; - - ir_ref_instruction(coro_id, irb->current_basic_block); - ir_ref_instruction(coro_mem_ptr, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_alloc_fail(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_val) { - IrInstructionCoroAllocFail *instruction = ir_build_instruction(irb, scope, source_node); - instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable; - instruction->base.value.special = ConstValSpecialStatic; - instruction->err_val = err_val; - - ir_ref_instruction(err_val, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_suspend(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *save_point, IrInstruction *is_final) -{ - IrInstructionCoroSuspend *instruction = ir_build_instruction(irb, scope, source_node); - instruction->save_point = save_point; - instruction->is_final = is_final; - - if (save_point != nullptr) ir_ref_instruction(save_point, irb->current_basic_block); - ir_ref_instruction(is_final, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_end(IrBuilder *irb, Scope *scope, AstNode *source_node) { - IrInstructionCoroEnd *instruction = ir_build_instruction(irb, scope, source_node); - return &instruction->base; -} - -static IrInstruction *ir_build_coro_free(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *coro_id, IrInstruction *coro_handle) -{ - IrInstructionCoroFree *instruction = ir_build_instruction(irb, scope, source_node); - instruction->coro_id = coro_id; - instruction->coro_handle = coro_handle; - - ir_ref_instruction(coro_id, irb->current_basic_block); - ir_ref_instruction(coro_handle, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *awaiter_handle) -{ - IrInstructionCoroResume *instruction = ir_build_instruction(irb, scope, source_node); - instruction->awaiter_handle = awaiter_handle; - - ir_ref_instruction(awaiter_handle, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_save(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *coro_handle) -{ - IrInstructionCoroSave *instruction = ir_build_instruction(irb, scope, source_node); - instruction->coro_handle = coro_handle; - - ir_ref_instruction(coro_handle, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_promise(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *coro_handle) -{ - IrInstructionCoroPromise *instruction = ir_build_instruction(irb, scope, source_node); - instruction->coro_handle = coro_handle; - - ir_ref_instruction(coro_handle, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_coro_alloc_helper(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *realloc_fn, IrInstruction *coro_size) -{ - IrInstructionCoroAllocHelper *instruction = ir_build_instruction(irb, scope, source_node); - instruction->realloc_fn = realloc_fn; - instruction->coro_size = coro_size; - - ir_ref_instruction(realloc_fn, irb->current_basic_block); - ir_ref_instruction(coro_size, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_atomic_rmw(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *op, IrInstruction *operand, IrInstruction *ordering, AtomicRmwOp resolved_op, AtomicOrder resolved_ordering) @@ -3237,28 +2999,6 @@ static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } -static IrInstruction *ir_build_promise_result_type(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *promise_type) -{ - IrInstructionPromiseResultType *instruction = ir_build_instruction(irb, scope, source_node); - instruction->promise_type = promise_type; - - ir_ref_instruction(promise_type, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_await_bookkeeping(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *promise_result_type) -{ - IrInstructionAwaitBookkeeping *instruction = ir_build_instruction(irb, scope, source_node); - instruction->promise_result_type = promise_result_type; - - ir_ref_instruction(promise_result_type, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_save_err_ret_addr(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionSaveErrRetAddr *instruction = ir_build_instruction(irb, scope, source_node); return &instruction->base; @@ -3275,21 +3015,6 @@ static IrInstruction *ir_build_add_implicit_return_type(IrBuilder *irb, Scope *s return &instruction->base; } -static IrInstruction *ir_build_merge_err_ret_traces(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *coro_promise_ptr, IrInstruction *src_err_ret_trace_ptr, IrInstruction *dest_err_ret_trace_ptr) -{ - IrInstructionMergeErrRetTraces *instruction = ir_build_instruction(irb, scope, source_node); - instruction->coro_promise_ptr = coro_promise_ptr; - instruction->src_err_ret_trace_ptr = src_err_ret_trace_ptr; - instruction->dest_err_ret_trace_ptr = dest_err_ret_trace_ptr; - - ir_ref_instruction(coro_promise_ptr, irb->current_basic_block); - ir_ref_instruction(src_err_ret_trace_ptr, irb->current_basic_block); - ir_ref_instruction(dest_err_ret_trace_ptr, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_mark_err_ret_trace_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_ret_trace_ptr) { IrInstructionMarkErrRetTracePtr *instruction = ir_build_instruction(irb, scope, source_node); instruction->err_ret_trace_ptr = err_ret_trace_ptr; @@ -3488,7 +3213,6 @@ static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_sco continue; case ScopeIdDeferExpr: case ScopeIdCImport: - case ScopeIdCoroPrelude: zig_unreachable(); } } @@ -3544,7 +3268,6 @@ static bool ir_gen_defers_for_block(IrBuilder *irb, Scope *inner_scope, Scope *o continue; case ScopeIdDeferExpr: case ScopeIdCImport: - case ScopeIdCoroPrelude: zig_unreachable(); } } @@ -3563,18 +3286,6 @@ static void ir_set_cursor_at_end_and_append_block(IrBuilder *irb, IrBasicBlock * ir_set_cursor_at_end(irb, basic_block); } -static ScopeSuspend *get_scope_suspend(Scope *scope) { - while (scope) { - if (scope->id == ScopeIdSuspend) - return (ScopeSuspend *)scope; - if (scope->id == ScopeIdFnDef) - return nullptr; - - scope = scope->parent; - } - return nullptr; -} - static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) { while (scope) { if (scope->id == ScopeIdDeferExpr) @@ -3604,47 +3315,7 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode return return_inst; } - IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "Suspended"); - IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "NotSuspended"); - IrBasicBlock *store_awaiter_block = ir_create_basic_block(irb, scope, "StoreAwaiter"); - IrBasicBlock *check_canceled_block = ir_create_basic_block(irb, scope, "CheckCanceled"); - - IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111 - IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 - IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001 - IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010 - IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise); - IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); - IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); - - ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value); - IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); - IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, - usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, ptr_mask, nullptr, - AtomicRmwOp_or, AtomicOrderSeqCst); - - IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false); - IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false); - ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, suspended_block); - ir_build_unreachable(irb, scope, node); - - ir_set_cursor_at_end_and_append_block(irb, not_suspended_block); - IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); - // if we ever add null checking safety to the ptrtoint instruction, it needs to be disabled here - IrInstruction *have_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false); - ir_build_cond_br(irb, scope, node, have_await_handle, store_awaiter_block, check_canceled_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, store_awaiter_block); - IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr); - ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, await_handle); - ir_build_br(irb, scope, node, irb->exec->coro_normal_final, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, check_canceled_block); - IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); - IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); - return ir_build_cond_br(irb, scope, node, is_canceled_bool, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, is_comptime); + zig_panic("TODO async return"); } static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) { @@ -5386,7 +5057,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever; IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, - fn_inline, false, nullptr, nullptr, result_loc); + fn_inline, false, nullptr, result_loc); return ir_lval_wrap(irb, scope, call, lval, result_loc); } case BuiltinFnIdNewStackCall: @@ -5417,7 +5088,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, - FnInlineAuto, false, nullptr, new_stack, result_loc); + FnInlineAuto, false, new_stack, result_loc); return ir_lval_wrap(irb, scope, call, lval, result_loc); } case BuiltinFnIdTypeId: @@ -5722,17 +5393,12 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node } bool is_async = node->data.fn_call_expr.is_async; - IrInstruction *async_allocator = nullptr; if (is_async) { - if (node->data.fn_call_expr.async_allocator) { - async_allocator = ir_gen_node(irb, node->data.fn_call_expr.async_allocator, scope); - if (async_allocator == irb->codegen->invalid_instruction) - return async_allocator; - } + zig_panic("TODO async fn call"); } IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, - is_async, async_allocator, nullptr, result_loc); + is_async, nullptr, result_loc); return ir_lval_wrap(irb, scope, fn_call, lval, result_loc); } @@ -6751,22 +6417,6 @@ static IrInstruction *ir_gen_array_type(IrBuilder *irb, Scope *scope, AstNode *n } } -static IrInstruction *ir_gen_promise_type(IrBuilder *irb, Scope *scope, AstNode *node) { - assert(node->type == NodeTypePromiseType); - - AstNode *payload_type_node = node->data.promise_type.payload_type; - IrInstruction *payload_type_value = nullptr; - - if (payload_type_node != nullptr) { - payload_type_value = ir_gen_node(irb, payload_type_node, scope); - if (payload_type_value == irb->codegen->invalid_instruction) - return payload_type_value; - - } - - return ir_build_promise_type(irb, scope, node, payload_type_value); -} - static IrInstruction *ir_gen_undefined_literal(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeUndefinedLiteral); return ir_build_const_undefined(irb, scope, node); @@ -7969,87 +7619,7 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo //return_type = nullptr; } - IrInstruction *async_allocator_type_value = nullptr; - if (node->data.fn_proto.async_allocator_type != nullptr) { - async_allocator_type_value = ir_gen_node(irb, node->data.fn_proto.async_allocator_type, parent_scope); - if (async_allocator_type_value == irb->codegen->invalid_instruction) - return irb->codegen->invalid_instruction; - } - - return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, - async_allocator_type_value, is_var_args); -} - -static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode *node, - IrInstruction *target_inst, bool cancel_non_suspended, bool cancel_awaited) -{ - IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone"); - IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled"); - IrBasicBlock *pre_return_block = ir_create_basic_block(irb, scope, "PreReturn"); - IrBasicBlock *post_return_block = ir_create_basic_block(irb, scope, "PostReturn"); - IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel"); - - IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); - IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); - IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); - IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001 - IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node, - get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void)); - IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111 - IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 - IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100 - IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010 - - // TODO relies on Zig not re-ordering fields - IrInstruction *casted_target_inst = ir_build_ptr_cast_src(irb, scope, node, promise_T_type_val, target_inst, - false); - IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst); - Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); - IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - atomic_state_field_name, false); - - // set the is_canceled bit - IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, - usize_type_val, atomic_state_ptr, nullptr, is_canceled_mask, nullptr, - AtomicRmwOp_or, AtomicOrderSeqCst); - - IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); - IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); - ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, not_canceled_block); - IrInstruction *awaiter_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); - IrInstruction *is_returned_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, awaiter_addr, ptr_mask, false); - ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, pre_return_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, post_return_block); - if (cancel_awaited) { - ir_build_br(irb, scope, node, do_cancel_block, is_comptime); - } else { - IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false); - IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false); - ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime); - } - - ir_set_cursor_at_end_and_append_block(irb, pre_return_block); - if (cancel_awaited) { - if (cancel_non_suspended) { - ir_build_br(irb, scope, node, do_cancel_block, is_comptime); - } else { - IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false); - IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false); - ir_build_cond_br(irb, scope, node, is_suspended_bool, do_cancel_block, done_block, is_comptime); - } - } else { - ir_build_br(irb, scope, node, done_block, is_comptime); - } - - ir_set_cursor_at_end_and_append_block(irb, do_cancel_block); - ir_build_cancel(irb, scope, node, target_inst); - ir_build_br(irb, scope, node, done_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, done_block); - return ir_build_const_void(irb, scope, node); + return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, is_var_args); } static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -8059,57 +7629,7 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - return ir_gen_cancel_target(irb, scope, node, target_inst, false, true); -} - -static IrInstruction *ir_gen_resume_target(IrBuilder *irb, Scope *scope, AstNode *node, - IrInstruction *target_inst) -{ - IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "ResumeDone"); - IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled"); - IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "IsSuspended"); - IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "IsNotSuspended"); - - IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); - IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001 - IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010 - IrInstruction *and_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, is_suspended_mask); - IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); - IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); - IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node, - get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void)); - - // TODO relies on Zig not re-ordering fields - IrInstruction *casted_target_inst = ir_build_ptr_cast_src(irb, scope, node, promise_T_type_val, target_inst, - false); - IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst); - Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); - IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - atomic_state_field_name, false); - - // clear the is_suspended bit - IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, - usize_type_val, atomic_state_ptr, nullptr, and_mask, nullptr, - AtomicRmwOp_and, AtomicOrderSeqCst); - - IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); - IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); - ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, not_canceled_block); - IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false); - IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false); - ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, not_suspended_block); - ir_build_unreachable(irb, scope, node); - - ir_set_cursor_at_end_and_append_block(irb, suspended_block); - ir_build_coro_resume(irb, scope, node, target_inst); - ir_build_br(irb, scope, node, done_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, done_block); - return ir_build_const_void(irb, scope, node); + zig_panic("TODO ir_gen_cancel"); } static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -8119,7 +7639,7 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - return ir_gen_resume_target(irb, scope, node, target_inst); + zig_panic("TODO ir_gen_resume"); } static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -8129,298 +7649,13 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - ZigFn *fn_entry = exec_fn_entry(irb->exec); - if (!fn_entry) { - add_node_error(irb->codegen, node, buf_sprintf("await outside function definition")); - return irb->codegen->invalid_instruction; - } - if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) { - add_node_error(irb->codegen, node, buf_sprintf("await in non-async function")); - return irb->codegen->invalid_instruction; - } - - ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(scope); - if (scope_defer_expr) { - if (!scope_defer_expr->reported_err) { - add_node_error(irb->codegen, node, buf_sprintf("cannot await inside defer expression")); - scope_defer_expr->reported_err = true; - } - return irb->codegen->invalid_instruction; - } - - Scope *outer_scope = irb->exec->begin_scope; - - IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, target_inst); - Buf *result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); - IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false); - - if (irb->codegen->have_err_ret_tracing) { - IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull); - Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME); - IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false); - ir_build_store_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr); - } - - IrBasicBlock *already_awaited_block = ir_create_basic_block(irb, scope, "AlreadyAwaited"); - IrBasicBlock *not_awaited_block = ir_create_basic_block(irb, scope, "NotAwaited"); - IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled"); - IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend"); - IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, scope, "NoSuspend"); - IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend"); - IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup"); - IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume"); - IrBasicBlock *cancel_target_block = ir_create_basic_block(irb, scope, "CancelTarget"); - IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel"); - IrBasicBlock *do_defers_block = ir_create_basic_block(irb, scope, "DoDefers"); - IrBasicBlock *destroy_block = ir_create_basic_block(irb, scope, "DestroyBlock"); - IrBasicBlock *my_suspended_block = ir_create_basic_block(irb, scope, "AlreadySuspended"); - IrBasicBlock *my_not_suspended_block = ir_create_basic_block(irb, scope, "NotAlreadySuspended"); - IrBasicBlock *do_suspend_block = ir_create_basic_block(irb, scope, "DoSuspend"); - - Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); - IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - atomic_state_field_name, false); - - IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise); - IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false); - IrInstruction *undef = ir_build_const_undefined(irb, scope, node); - IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); - IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); - IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111 - IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 - IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100 - IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001 - IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010 - - ZigVar *result_var = ir_create_var(irb, node, scope, nullptr, - false, false, true, const_bool_false); - IrInstruction *target_promise_type = ir_build_typeof(irb, scope, node, target_inst); - IrInstruction *promise_result_type = ir_build_promise_result_type(irb, scope, node, target_promise_type); - ir_build_await_bookkeeping(irb, scope, node, promise_result_type); - IrInstruction *undef_promise_result = ir_build_implicit_cast(irb, scope, node, promise_result_type, undef, nullptr); - build_decl_var_and_init(irb, scope, node, result_var, undef_promise_result, "result", const_bool_false); - IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, scope, node, result_var); - ir_build_store_ptr(irb, scope, node, result_ptr_field_ptr, my_result_var_ptr); - IrInstruction *save_token = ir_build_coro_save(irb, scope, node, irb->exec->coro_handle); - - IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, scope, node, irb->exec->coro_handle); - IrInstruction *mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, coro_handle_addr, await_mask, false); - IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, - usize_type_val, atomic_state_ptr, nullptr, mask_bits, nullptr, - AtomicRmwOp_or, AtomicOrderSeqCst); - - IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false); - IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false); - ir_build_cond_br(irb, scope, node, is_awaited_bool, already_awaited_block, not_awaited_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, already_awaited_block); - ir_build_unreachable(irb, scope, node); - - ir_set_cursor_at_end_and_append_block(irb, not_awaited_block); - IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); - IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false); - IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); - IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); - ir_build_cond_br(irb, scope, node, is_canceled_bool, cancel_target_block, not_canceled_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, not_canceled_block); - ir_build_cond_br(irb, scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, cancel_target_block); - ir_build_cancel(irb, scope, node, target_inst); - ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false)); - - ir_set_cursor_at_end_and_append_block(irb, no_suspend_block); - if (irb->codegen->have_err_ret_tracing) { - Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME); - IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false); - IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull); - ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr); - } - Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); - IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false); - // If the type of the result handle_is_ptr then this does not actually perform a load. But we need it to, - // because we're about to destroy the memory. So we store it into our result variable. - IrInstruction *no_suspend_result = ir_build_load_ptr(irb, scope, node, promise_result_ptr); - ir_build_store_ptr(irb, scope, node, my_result_var_ptr, no_suspend_result); - ir_build_cancel(irb, scope, node, target_inst); - ir_build_br(irb, scope, node, merge_block, const_bool_false); - - - ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block); - IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, - usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr, - AtomicRmwOp_or, AtomicOrderSeqCst); - IrInstruction *my_is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_suspended_mask, false); - IrInstruction *my_is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_suspended_value, zero, false); - ir_build_cond_br(irb, scope, node, my_is_suspended_bool, my_suspended_block, my_not_suspended_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, my_suspended_block); - ir_build_unreachable(irb, scope, node); - - ir_set_cursor_at_end_and_append_block(irb, my_not_suspended_block); - IrInstruction *my_is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_canceled_mask, false); - IrInstruction *my_is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_canceled_value, zero, false); - ir_build_cond_br(irb, scope, node, my_is_canceled_bool, cleanup_block, do_suspend_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, do_suspend_block); - IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false); - - IrInstructionSwitchBrCase *cases = allocate(2); - cases[0].value = ir_build_const_u8(irb, scope, node, 0); - cases[0].block = resume_block; - cases[1].value = ir_build_const_u8(irb, scope, node, 1); - cases[1].block = destroy_block; - ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, - 2, cases, const_bool_false, nullptr); - - ir_set_cursor_at_end_and_append_block(irb, destroy_block); - ir_gen_cancel_target(irb, scope, node, target_inst, false, true); - ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false)); - - ir_set_cursor_at_end_and_append_block(irb, cleanup_block); - IrInstruction *my_mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, ptr_mask, is_canceled_mask, false); - IrInstruction *b_my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, - usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, my_mask_bits, nullptr, - AtomicRmwOp_or, AtomicOrderSeqCst); - IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, b_my_prev_atomic_value, ptr_mask, false); - IrInstruction *dont_have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, my_await_handle_addr, zero, false); - IrInstruction *dont_destroy_ourselves = ir_build_bin_op(irb, scope, node, IrBinOpBoolAnd, dont_have_my_await_handle, is_canceled_bool, false); - ir_build_cond_br(irb, scope, node, dont_have_my_await_handle, do_defers_block, do_cancel_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, do_cancel_block); - IrInstruction *my_await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, my_await_handle_addr); - ir_gen_cancel_target(irb, scope, node, my_await_handle, true, false); - ir_mark_gen(ir_build_br(irb, scope, node, do_defers_block, const_bool_false)); - - ir_set_cursor_at_end_and_append_block(irb, do_defers_block); - ir_gen_defers_for_block(irb, scope, outer_scope, true); - ir_mark_gen(ir_build_cond_br(irb, scope, node, dont_destroy_ourselves, irb->exec->coro_early_final, irb->exec->coro_final_cleanup_block, const_bool_false)); - - ir_set_cursor_at_end_and_append_block(irb, resume_block); - ir_build_br(irb, scope, node, merge_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, merge_block); - return ir_build_load_ptr(irb, scope, node, my_result_var_ptr); + zig_panic("TODO ir_gen_await_expr"); } static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) { assert(node->type == NodeTypeSuspend); - ZigFn *fn_entry = exec_fn_entry(irb->exec); - if (!fn_entry) { - add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition")); - return irb->codegen->invalid_instruction; - } - if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) { - add_node_error(irb->codegen, node, buf_sprintf("suspend in non-async function")); - return irb->codegen->invalid_instruction; - } - - ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope); - if (scope_defer_expr) { - if (!scope_defer_expr->reported_err) { - ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression")); - add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here")); - scope_defer_expr->reported_err = true; - } - return irb->codegen->invalid_instruction; - } - ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope); - if (existing_suspend_scope) { - if (!existing_suspend_scope->reported_err) { - ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside suspend block")); - add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("other suspend block here")); - existing_suspend_scope->reported_err = true; - } - return irb->codegen->invalid_instruction; - } - - Scope *outer_scope = irb->exec->begin_scope; - - IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup"); - IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); - IrBasicBlock *suspended_block = ir_create_basic_block(irb, parent_scope, "AlreadySuspended"); - IrBasicBlock *canceled_block = ir_create_basic_block(irb, parent_scope, "IsCanceled"); - IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, parent_scope, "NotCanceled"); - IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, parent_scope, "NotAlreadySuspended"); - IrBasicBlock *cancel_awaiter_block = ir_create_basic_block(irb, parent_scope, "CancelAwaiter"); - - IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_promise); - IrInstruction *const_bool_true = ir_build_const_bool(irb, parent_scope, node, true); - IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false); - IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize); - IrInstruction *is_canceled_mask = ir_build_const_usize(irb, parent_scope, node, 0x1); // 0b001 - IrInstruction *is_suspended_mask = ir_build_const_usize(irb, parent_scope, node, 0x2); // 0b010 - IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0); - IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111 - IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 - - IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node, - usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr, - AtomicRmwOp_or, AtomicOrderSeqCst); - - IrInstruction *is_canceled_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); - IrInstruction *is_canceled_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); - ir_build_cond_br(irb, parent_scope, node, is_canceled_bool, canceled_block, not_canceled_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, canceled_block); - IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); - IrInstruction *have_await_handle = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false); - IrBasicBlock *post_canceled_block = irb->current_basic_block; - ir_build_cond_br(irb, parent_scope, node, have_await_handle, cancel_awaiter_block, cleanup_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, cancel_awaiter_block); - IrInstruction *await_handle = ir_build_int_to_ptr(irb, parent_scope, node, promise_type_val, await_handle_addr); - ir_gen_cancel_target(irb, parent_scope, node, await_handle, true, false); - IrBasicBlock *post_cancel_awaiter_block = irb->current_basic_block; - ir_build_br(irb, parent_scope, node, cleanup_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, not_canceled_block); - IrInstruction *is_suspended_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false); - IrInstruction *is_suspended_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false); - ir_build_cond_br(irb, parent_scope, node, is_suspended_bool, suspended_block, not_suspended_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, suspended_block); - ir_build_unreachable(irb, parent_scope, node); - - ir_set_cursor_at_end_and_append_block(irb, not_suspended_block); - IrInstruction *suspend_code; - if (node->data.suspend.block == nullptr) { - suspend_code = ir_build_coro_suspend(irb, parent_scope, node, nullptr, const_bool_false); - } else { - Scope *child_scope; - ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope); - suspend_scope->resume_block = resume_block; - child_scope = &suspend_scope->base; - IrInstruction *save_token = ir_build_coro_save(irb, child_scope, node, irb->exec->coro_handle); - ir_gen_node(irb, node->data.suspend.block, child_scope); - suspend_code = ir_mark_gen(ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false)); - } - - IrInstructionSwitchBrCase *cases = allocate(2); - cases[0].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 0)); - cases[0].block = resume_block; - cases[1].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 1)); - cases[1].block = canceled_block; - IrInstructionSwitchBr *switch_br = ir_build_switch_br(irb, parent_scope, node, suspend_code, - irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr); - ir_mark_gen(&switch_br->base); - - ir_set_cursor_at_end_and_append_block(irb, cleanup_block); - IrBasicBlock **incoming_blocks = allocate(2); - IrInstruction **incoming_values = allocate(2); - incoming_blocks[0] = post_canceled_block; - incoming_values[0] = const_bool_true; - incoming_blocks[1] = post_cancel_awaiter_block; - incoming_values[1] = const_bool_false; - IrInstruction *destroy_ourselves = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values, - nullptr); - ir_gen_defers_for_block(irb, parent_scope, outer_scope, true); - ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, destroy_ourselves, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, const_bool_false)); - - ir_set_cursor_at_end_and_append_block(irb, resume_block); - return ir_mark_gen(ir_build_const_void(irb, parent_scope, node)); + zig_panic("TODO ir_gen_suspend"); } static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, @@ -8512,8 +7747,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc); case NodeTypePointerType: return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc); - case NodeTypePromiseType: - return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval, result_loc); case NodeTypeStringLiteral: return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc); case NodeTypeUndefinedLiteral: @@ -8624,105 +7857,8 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ZigFn *fn_entry = exec_fn_entry(irb->exec); bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; - IrInstruction *coro_id; - IrInstruction *u8_ptr_type; - IrInstruction *const_bool_false; - IrInstruction *coro_promise_ptr; - IrInstruction *err_ret_trace_ptr; - ZigType *return_type; - Buf *result_ptr_field_name; - ZigVar *coro_size_var; if (is_async) { - // create the coro promise - Scope *coro_scope = create_coro_prelude_scope(irb->codegen, node, scope); - const_bool_false = ir_build_const_bool(irb, coro_scope, node, false); - ZigVar *promise_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false); - - return_type = fn_entry->type_entry->data.fn.fn_type_id.return_type; - IrInstruction *undef = ir_build_const_undefined(irb, coro_scope, node); - // TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa - ZigType *coro_frame_type = get_promise_frame_type(irb->codegen, return_type); - IrInstruction *coro_frame_type_value = ir_build_const_type(irb, coro_scope, node, coro_frame_type); - IrInstruction *undef_coro_frame = ir_build_implicit_cast(irb, coro_scope, node, coro_frame_type_value, undef, nullptr); - build_decl_var_and_init(irb, coro_scope, node, promise_var, undef_coro_frame, "promise", const_bool_false); - coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var); - - ZigVar *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false); - IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node); - IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node, - get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); - IrInstruction *null_await_handle = ir_build_implicit_cast(irb, coro_scope, node, await_handle_type_val, null_value, nullptr); - build_decl_var_and_init(irb, coro_scope, node, await_handle_var, null_await_handle, "await_handle", const_bool_false); - irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var); - - u8_ptr_type = ir_build_const_type(irb, coro_scope, node, - get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); - IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast_src(irb, coro_scope, node, u8_ptr_type, - coro_promise_ptr, false); - coro_id = ir_build_coro_id(irb, coro_scope, node, promise_as_u8_ptr); - coro_size_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false); - IrInstruction *coro_size = ir_build_coro_size(irb, coro_scope, node); - build_decl_var_and_init(irb, coro_scope, node, coro_size_var, coro_size, "coro_size", const_bool_false); - IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, coro_scope, node, - ImplicitAllocatorIdArg); - irb->exec->coro_allocator_var = ir_create_var(irb, node, coro_scope, nullptr, true, true, true, const_bool_false); - build_decl_var_and_init(irb, coro_scope, node, irb->exec->coro_allocator_var, implicit_allocator_ptr, - "allocator", const_bool_false); - Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME); - IrInstruction *realloc_fn_ptr = ir_build_field_ptr(irb, coro_scope, node, implicit_allocator_ptr, realloc_field_name, false); - IrInstruction *realloc_fn = ir_build_load_ptr(irb, coro_scope, node, realloc_fn_ptr); - IrInstruction *maybe_coro_mem_ptr = ir_build_coro_alloc_helper(irb, coro_scope, node, realloc_fn, coro_size); - IrInstruction *alloc_result_is_ok = ir_build_test_nonnull(irb, coro_scope, node, maybe_coro_mem_ptr); - IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, coro_scope, "AllocError"); - IrBasicBlock *alloc_ok_block = ir_create_basic_block(irb, coro_scope, "AllocOk"); - ir_build_cond_br(irb, coro_scope, node, alloc_result_is_ok, alloc_ok_block, alloc_err_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, alloc_err_block); - // we can return undefined here, because the caller passes a pointer to the error struct field - // in the error union result, and we populate it in case of allocation failure. - ir_build_return(irb, coro_scope, node, undef); - - ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block); - IrInstruction *coro_mem_ptr = ir_build_ptr_cast_src(irb, coro_scope, node, u8_ptr_type, maybe_coro_mem_ptr, - false); - irb->exec->coro_handle = ir_build_coro_begin(irb, coro_scope, node, coro_id, coro_mem_ptr); - - Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); - irb->exec->atomic_state_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - atomic_state_field_name, false); - IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); - ir_build_store_ptr(irb, scope, node, irb->exec->atomic_state_field_ptr, zero); - Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); - irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false); - result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); - irb->exec->coro_result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false); - ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr, irb->exec->coro_result_field_ptr); - if (irb->codegen->have_err_ret_tracing) { - // initialize the error return trace - Buf *return_addresses_field_name = buf_create_from_str(RETURN_ADDRESSES_FIELD_NAME); - IrInstruction *return_addresses_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, return_addresses_field_name, false); - - Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME); - err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false); - ir_build_mark_err_ret_trace_ptr(irb, scope, node, err_ret_trace_ptr); - - // coordinate with builtin.zig - Buf *index_name = buf_create_from_str("index"); - IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name, false); - ir_build_store_ptr(irb, scope, node, index_ptr, zero); - - Buf *instruction_addresses_name = buf_create_from_str("instruction_addresses"); - IrInstruction *addrs_slice_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, instruction_addresses_name, false); - - IrInstruction *slice_value = ir_build_slice_src(irb, scope, node, return_addresses_ptr, zero, nullptr, false, no_result_loc()); - ir_build_store_ptr(irb, scope, node, addrs_slice_ptr, slice_value); - } - - - irb->exec->coro_early_final = ir_create_basic_block(irb, scope, "CoroEarlyFinal"); - irb->exec->coro_normal_final = ir_create_basic_block(irb, scope, "CoroNormalFinal"); - irb->exec->coro_suspend_block = ir_create_basic_block(irb, scope, "Suspend"); - irb->exec->coro_final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup"); + zig_panic("ir_gen async fn"); } IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone, nullptr); @@ -8735,117 +7871,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_gen_async_return(irb, scope, result->source_node, result, true); } - if (is_async) { - IrBasicBlock *invalid_resume_block = ir_create_basic_block(irb, scope, "InvalidResume"); - IrBasicBlock *check_free_block = ir_create_basic_block(irb, scope, "CheckFree"); - - ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_early_final); - IrInstruction *const_bool_true = ir_build_const_bool(irb, scope, node, true); - IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, nullptr, const_bool_true); - IrInstructionSwitchBrCase *cases = allocate(2); - cases[0].value = ir_build_const_u8(irb, scope, node, 0); - cases[0].block = invalid_resume_block; - cases[1].value = ir_build_const_u8(irb, scope, node, 1); - cases[1].block = irb->exec->coro_final_cleanup_block; - ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr); - - ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_suspend_block); - ir_build_coro_end(irb, scope, node); - ir_build_return(irb, scope, node, irb->exec->coro_handle); - - ir_set_cursor_at_end_and_append_block(irb, invalid_resume_block); - ir_build_unreachable(irb, scope, node); - - ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_normal_final); - if (type_has_bits(return_type)) { - IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node, - get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8, - false, false, PtrLenUnknown, 0, 0, 0, false)); - IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr); - IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast_src(irb, scope, node, u8_ptr_type_unknown_len, - result_ptr, false); - IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast_src(irb, scope, node, - u8_ptr_type_unknown_len, irb->exec->coro_result_field_ptr, false); - IrInstruction *return_type_inst = ir_build_const_type(irb, scope, node, - fn_entry->type_entry->data.fn.fn_type_id.return_type); - IrInstruction *size_of_ret_val = ir_build_size_of(irb, scope, node, return_type_inst); - ir_build_memcpy(irb, scope, node, result_ptr_as_u8_ptr, return_value_ptr_as_u8_ptr, size_of_ret_val); - } - if (irb->codegen->have_err_ret_tracing) { - Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME); - IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false); - IrInstruction *dest_err_ret_trace_ptr = ir_build_load_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr); - ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr, dest_err_ret_trace_ptr); - } - // Before we destroy the coroutine frame, we need to load the target promise into - // a register or local variable which does not get spilled into the frame, - // otherwise llvm tries to access memory inside the destroyed frame. - IrInstruction *unwrapped_await_handle_ptr = ir_build_optional_unwrap_ptr(irb, scope, node, - irb->exec->await_handle_var_ptr, false, false); - IrInstruction *await_handle_in_block = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); - ir_build_br(irb, scope, node, check_free_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_final_cleanup_block); - ir_build_br(irb, scope, node, check_free_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, check_free_block); - IrBasicBlock **incoming_blocks = allocate(2); - IrInstruction **incoming_values = allocate(2); - incoming_blocks[0] = irb->exec->coro_final_cleanup_block; - incoming_values[0] = const_bool_false; - incoming_blocks[1] = irb->exec->coro_normal_final; - incoming_values[1] = const_bool_true; - IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr); - - IrBasicBlock **merge_incoming_blocks = allocate(2); - IrInstruction **merge_incoming_values = allocate(2); - merge_incoming_blocks[0] = irb->exec->coro_final_cleanup_block; - merge_incoming_values[0] = ir_build_const_undefined(irb, scope, node); - merge_incoming_blocks[1] = irb->exec->coro_normal_final; - merge_incoming_values[1] = await_handle_in_block; - IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values, nullptr); - - Buf *shrink_field_name = buf_create_from_str(ASYNC_SHRINK_FIELD_NAME); - IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node, - ImplicitAllocatorIdLocalVar); - IrInstruction *shrink_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, shrink_field_name, false); - IrInstruction *shrink_fn = ir_build_load_ptr(irb, scope, node, shrink_fn_ptr); - IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); - IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle); - IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node, - get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8, - false, false, PtrLenUnknown, 0, 0, 0, false)); - IrInstruction *coro_mem_ptr = ir_build_ptr_cast_src(irb, scope, node, u8_ptr_type_unknown_len, - coro_mem_ptr_maybe, false); - IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false); - IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var); - IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr); - IrInstruction *mem_slice = ir_build_slice_src(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false, - no_result_loc()); - size_t arg_count = 5; - IrInstruction **args = allocate(arg_count); - args[0] = implicit_allocator_ptr; // self - args[1] = mem_slice; // old_mem - args[2] = ir_build_const_usize(irb, scope, node, 8); // old_align - // TODO: intentional memory leak here. If this is set to 0 then there is an issue where a coroutine - // calls the function and it frees its own stack frame, but then the return value is a slice, which - // is implemented as an sret struct. writing to the return pointer causes invalid memory write. - // We could work around it by having a global helper function which has a void return type - // and calling that instead. But instead this hack will suffice until I rework coroutines to be - // non-allocating. Basically coroutines are not supported right now until they are reworked. - args[3] = ir_build_const_usize(irb, scope, node, 1); // new_size - args[4] = ir_build_const_usize(irb, scope, node, 1); // new_align - ir_build_call_src(irb, scope, node, nullptr, shrink_fn, arg_count, args, false, FnInlineAuto, false, nullptr, - nullptr, no_result_loc()); - - IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); - ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, resume_block); - ir_gen_resume_target(irb, scope, node, awaiter_handle); - ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false); - } - return true; } @@ -10189,12 +9214,6 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted return result; } - if (wanted_type == ira->codegen->builtin_types.entry_promise && - actual_type->id == ZigTypeIdPromise) - { - return result; - } - // fn if (wanted_type->id == ZigTypeIdFn && actual_type->id == ZigTypeIdFn) @@ -10229,20 +9248,6 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted return result; } } - if (!wanted_type->data.fn.is_generic && wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync) { - ConstCastOnly child = types_match_const_cast_only(ira, - actual_type->data.fn.fn_type_id.async_allocator_type, - wanted_type->data.fn.fn_type_id.async_allocator_type, - source_node, false); - if (child.id == ConstCastResultIdInvalid) - return child; - if (child.id != ConstCastResultIdOk) { - result.id = ConstCastResultIdAsyncAllocatorType; - result.data.async_allocator_type = allocate_nonzero(1); - *result.data.async_allocator_type = child; - return result; - } - } if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) { result.id = ConstCastResultIdFnArgCount; return result; @@ -12559,12 +11564,10 @@ static IrInstruction *ir_analyze_int_to_c_ptr(IrAnalyze *ira, IrInstruction *sou static bool is_pointery_and_elem_is_not_pointery(ZigType *ty) { if (ty->id == ZigTypeIdPointer) return ty->data.pointer.child_type->id != ZigTypeIdPointer; if (ty->id == ZigTypeIdFn) return true; - if (ty->id == ZigTypeIdPromise) return true; if (ty->id == ZigTypeIdOptional) { ZigType *ptr_ty = ty->data.maybe.child_type; if (ptr_ty->id == ZigTypeIdPointer) return ptr_ty->data.pointer.child_type->id != ZigTypeIdPointer; if (ptr_ty->id == ZigTypeIdFn) return true; - if (ptr_ty->id == ZigTypeIdPromise) return true; } return false; } @@ -13640,7 +12643,6 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp * case ZigTypeIdOpaque: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: - case ZigTypeIdPromise: case ZigTypeIdEnum: case ZigTypeIdEnumLiteral: operator_allowed = is_equality_cmp; @@ -15021,7 +14023,6 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: - case ZigTypeIdPromise: ir_add_error(ira, target, buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name))); break; @@ -15045,7 +14046,6 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: - case ZigTypeIdPromise: case ZigTypeIdEnumLiteral: ir_add_error(ira, target, buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name))); @@ -15124,42 +14124,6 @@ static IrInstruction *ir_analyze_instruction_error_union(IrAnalyze *ira, return ir_const_type(ira, &instruction->base, result_type); } -IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr, ImplicitAllocatorId id) { - ZigFn *parent_fn_entry = exec_fn_entry(ira->new_irb.exec); - if (parent_fn_entry == nullptr) { - ir_add_error(ira, source_instr, buf_sprintf("no implicit allocator available")); - return ira->codegen->invalid_instruction; - } - - FnTypeId *parent_fn_type = &parent_fn_entry->type_entry->data.fn.fn_type_id; - if (parent_fn_type->cc != CallingConventionAsync) { - ir_add_error(ira, source_instr, buf_sprintf("async function call from non-async caller requires allocator parameter")); - return ira->codegen->invalid_instruction; - } - - assert(parent_fn_type->async_allocator_type != nullptr); - - switch (id) { - case ImplicitAllocatorIdArg: - { - IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope, - source_instr->source_node, ImplicitAllocatorIdArg); - result->value.type = parent_fn_type->async_allocator_type; - return result; - } - case ImplicitAllocatorIdLocalVar: - { - ZigVar *coro_allocator_var = ira->old_irb.exec->coro_allocator_var; - assert(coro_allocator_var != nullptr); - IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var); - IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst, nullptr); - assert(result->value.type != nullptr); - return result; - } - } - zig_unreachable(); -} - static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_inst, ZigType *var_type, uint32_t align, const char *name_hint, bool force_comptime) { @@ -15589,50 +14553,6 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst return ir_const_void(ira, &instruction->base); } -static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry, - ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, - IrInstruction *async_allocator_inst) -{ - Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME); - ir_assert(async_allocator_inst->value.type->id == ZigTypeIdPointer, &call_instruction->base); - ZigType *container_type = async_allocator_inst->value.type->data.pointer.child_type; - IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, realloc_field_name, &call_instruction->base, - async_allocator_inst, container_type, false); - if (type_is_invalid(field_ptr_inst->value.type)) { - return ira->codegen->invalid_instruction; - } - ZigType *ptr_to_realloc_fn_type = field_ptr_inst->value.type; - ir_assert(ptr_to_realloc_fn_type->id == ZigTypeIdPointer, &call_instruction->base); - - ZigType *realloc_fn_type = ptr_to_realloc_fn_type->data.pointer.child_type; - if (realloc_fn_type->id != ZigTypeIdFn) { - ir_add_error(ira, &call_instruction->base, - buf_sprintf("expected reallocation function, found '%s'", buf_ptr(&realloc_fn_type->name))); - return ira->codegen->invalid_instruction; - } - - ZigType *realloc_fn_return_type = realloc_fn_type->data.fn.fn_type_id.return_type; - if (realloc_fn_return_type->id != ZigTypeIdErrorUnion) { - ir_add_error(ira, fn_ref, - buf_sprintf("expected allocation function to return error union, but it returns '%s'", buf_ptr(&realloc_fn_return_type->name))); - return ira->codegen->invalid_instruction; - } - ZigType *alloc_fn_error_set_type = realloc_fn_return_type->data.error_union.err_set_type; - ZigType *return_type = fn_type->data.fn.fn_type_id.return_type; - ZigType *promise_type = get_promise_type(ira->codegen, return_type); - ZigType *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type); - - IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, no_result_loc(), - async_return_type, nullptr, true, true); - if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) { - return result_loc; - } - - return ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count, - casted_args, FnInlineAuto, true, async_allocator_inst, nullptr, result_loc, - async_return_type); -} - static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node, IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i) { @@ -16330,32 +15250,8 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c break; } } - IrInstruction *async_allocator_inst = nullptr; if (call_instruction->is_async) { - AstNode *async_allocator_type_node = fn_proto_node->data.fn_proto.async_allocator_type; - if (async_allocator_type_node != nullptr) { - ZigType *async_allocator_type = ir_analyze_type_expr(ira, impl_fn->child_scope, async_allocator_type_node); - if (type_is_invalid(async_allocator_type)) - return ira->codegen->invalid_instruction; - inst_fn_type_id.async_allocator_type = async_allocator_type; - } - IrInstruction *uncasted_async_allocator_inst; - if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, - ImplicitAllocatorIdLocalVar); - if (type_is_invalid(uncasted_async_allocator_inst->value.type)) - return ira->codegen->invalid_instruction; - } else { - uncasted_async_allocator_inst = call_instruction->async_allocator->child; - if (type_is_invalid(uncasted_async_allocator_inst->value.type)) - return ira->codegen->invalid_instruction; - } - if (inst_fn_type_id.async_allocator_type == nullptr) { - inst_fn_type_id.async_allocator_type = uncasted_async_allocator_inst->value.type; - } - async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, inst_fn_type_id.async_allocator_type); - if (type_is_invalid(async_allocator_inst->value.type)) - return ira->codegen->invalid_instruction; + zig_panic("TODO async call"); } auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn); @@ -16398,15 +15294,12 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c size_t impl_param_count = impl_fn_type_id->param_count; if (call_instruction->is_async) { - IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, - fn_ref, casted_args, impl_param_count, async_allocator_inst); - return ir_finish_anal(ira, result); + zig_panic("TODO async call"); } - assert(async_allocator_inst == nullptr); IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, impl_fn, nullptr, impl_param_count, casted_args, fn_inline, - call_instruction->is_async, nullptr, casted_new_stack, result_loc, + call_instruction->is_async, casted_new_stack, result_loc, impl_fn_type_id->return_type); return ir_finish_anal(ira, new_call_instruction); @@ -16474,25 +15367,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c return ira->codegen->invalid_instruction; if (call_instruction->is_async) { - IrInstruction *uncasted_async_allocator_inst; - if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, - ImplicitAllocatorIdLocalVar); - if (type_is_invalid(uncasted_async_allocator_inst->value.type)) - return ira->codegen->invalid_instruction; - } else { - uncasted_async_allocator_inst = call_instruction->async_allocator->child; - if (type_is_invalid(uncasted_async_allocator_inst->value.type)) - return ira->codegen->invalid_instruction; - - } - IrInstruction *async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, fn_type_id->async_allocator_type); - if (type_is_invalid(async_allocator_inst->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, - casted_args, call_param_count, async_allocator_inst); - return ir_finish_anal(ira, result); + zig_panic("TODO async call"); } if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && fn_inline == FnInlineNever) { @@ -16513,7 +15388,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c } IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, - call_param_count, casted_args, fn_inline, false, nullptr, casted_new_stack, + call_param_count, casted_args, fn_inline, false, casted_new_stack, result_loc, return_type); return ir_finish_anal(ira, new_call_instruction); } @@ -16694,7 +15569,6 @@ static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_ case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: - case ZigTypeIdPromise: return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry)); case ZigTypeIdUnreachable: case ZigTypeIdOpaque: @@ -18465,7 +17339,6 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira, case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdBoundFn: - case ZigTypeIdPromise: case ZigTypeIdVector: { ResolveStatus needed_status = (align_bytes == 0) ? @@ -18580,7 +17453,6 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira, case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdBoundFn: - case ZigTypeIdPromise: case ZigTypeIdVector: { if ((err = ensure_complete_type(ira->codegen, child_type))) @@ -18592,22 +17464,6 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira, zig_unreachable(); } -static IrInstruction *ir_analyze_instruction_promise_type(IrAnalyze *ira, IrInstructionPromiseType *instruction) { - ZigType *promise_type; - - if (instruction->payload_type == nullptr) { - promise_type = ira->codegen->builtin_types.entry_promise; - } else { - ZigType *payload_type = ir_resolve_type(ira, instruction->payload_type->child); - if (type_is_invalid(payload_type)) - return ira->codegen->invalid_instruction; - - promise_type = get_promise_type(ira->codegen, payload_type); - } - - return ir_const_type(ira, &instruction->base, promise_type); -} - static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira, IrInstructionSizeOf *size_of_instruction) { @@ -18647,7 +17503,6 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira, case ZigTypeIdEnum: case ZigTypeIdUnion: case ZigTypeIdFn: - case ZigTypeIdPromise: case ZigTypeIdVector: { uint64_t size_in_bytes = type_size(ira->codegen, type_entry); @@ -19134,7 +17989,6 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira, case ZigTypeIdComptimeInt: case ZigTypeIdEnumLiteral: case ZigTypeIdPointer: - case ZigTypeIdPromise: case ZigTypeIdFn: case ZigTypeIdErrorSet: { if (pointee_val) { @@ -20645,32 +19499,6 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr fields[0].type = ira->codegen->builtin_types.entry_type; fields[0].data.x_type = type_entry->data.maybe.child_type; - break; - } - case ZigTypeIdPromise: - { - result = create_const_vals(1); - result->special = ConstValSpecialStatic; - result->type = ir_type_info_get_type(ira, "Promise", nullptr); - - ConstExprValue *fields = create_const_vals(1); - result->data.x_struct.fields = fields; - - // child: ?type - ensure_field_index(result->type, "child", 0); - fields[0].special = ConstValSpecialStatic; - fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); - - if (type_entry->data.promise.result_type == nullptr) - fields[0].data.x_optional = nullptr; - else { - ConstExprValue *child_type = create_const_vals(1); - child_type->special = ConstValSpecialStatic; - child_type->type = ira->codegen->builtin_types.entry_type; - child_type->data.x_type = type_entry->data.promise.result_type; - fields[0].data.x_optional = child_type; - } - break; } case ZigTypeIdEnum: @@ -20982,7 +19810,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr result->special = ConstValSpecialStatic; result->type = ir_type_info_get_type(ira, "Fn", nullptr); - ConstExprValue *fields = create_const_vals(6); + ConstExprValue *fields = create_const_vals(5); result->data.x_struct.fields = fields; // calling_convention: TypeInfo.CallingConvention @@ -21015,19 +19843,6 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr return_type->data.x_type = type_entry->data.fn.fn_type_id.return_type; fields[3].data.x_optional = return_type; } - // async_allocator_type: type - ensure_field_index(result->type, "async_allocator_type", 4); - fields[4].special = ConstValSpecialStatic; - fields[4].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); - if (type_entry->data.fn.fn_type_id.async_allocator_type == nullptr) - fields[4].data.x_optional = nullptr; - else { - ConstExprValue *async_alloc_type = create_const_vals(1); - async_alloc_type->special = ConstValSpecialStatic; - async_alloc_type->type = ira->codegen->builtin_types.entry_type; - async_alloc_type->data.x_type = type_entry->data.fn.fn_type_id.async_allocator_type; - fields[4].data.x_optional = async_alloc_type; - } // args: []TypeInfo.FnArg ZigType *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg", nullptr); if ((err = type_resolve(ira->codegen, type_info_fn_arg_type, ResolveStatusSizeKnown))) { @@ -21042,10 +19857,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr fn_arg_array->data.x_array.special = ConstArraySpecialNone; fn_arg_array->data.x_array.data.s_none.elements = create_const_vals(fn_arg_count); - init_const_slice(ira->codegen, &fields[5], fn_arg_array, 0, fn_arg_count, false); + init_const_slice(ira->codegen, &fields[4], fn_arg_array, 0, fn_arg_count, false); - for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) - { + for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) { FnTypeParamInfo *fn_param_info = &type_entry->data.fn.fn_type_id.param_info[fn_arg_index]; ConstExprValue *fn_arg_val = &fn_arg_array->data.x_array.data.s_none.elements[fn_arg_index]; @@ -22803,11 +21617,7 @@ static IrInstruction *ir_analyze_instruction_frame_address(IrAnalyze *ira, IrIns } static IrInstruction *ir_analyze_instruction_handle(IrAnalyze *ira, IrInstructionHandle *instruction) { - IrInstruction *result = ir_build_handle(&ira->new_irb, instruction->base.scope, instruction->base.source_node); - ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); - assert(fn_entry != nullptr); - result->value.type = get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type); - return result; + zig_panic("TODO anlayze @handle()"); } static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstructionAlignOf *instruction) { @@ -22841,7 +21651,6 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct case ZigTypeIdInt: case ZigTypeIdFloat: case ZigTypeIdPointer: - case ZigTypeIdPromise: case ZigTypeIdArray: case ZigTypeIdStruct: case ZigTypeIdOptional: @@ -23401,15 +22210,7 @@ static IrInstruction *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstruct } if (fn_type_id.cc == CallingConventionAsync) { - if (instruction->async_allocator_type_value == nullptr) { - ir_add_error(ira, &instruction->base, - buf_sprintf("async fn proto missing allocator type")); - return ira->codegen->invalid_instruction; - } - IrInstruction *async_allocator_type_value = instruction->async_allocator_type_value->child; - fn_type_id.async_allocator_type = ir_resolve_type(ira, async_allocator_type_value); - if (type_is_invalid(fn_type_id.async_allocator_type)) - return ira->codegen->invalid_instruction; + zig_panic("TODO"); } return ir_const_type(ira, &instruction->base, get_fn_type(ira->codegen, &fn_type_id)); @@ -23905,7 +22706,6 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case ZigTypeIdEnumLiteral: case ZigTypeIdUndefined: case ZigTypeIdNull: - case ZigTypeIdPromise: case ZigTypeIdErrorUnion: case ZigTypeIdErrorSet: zig_unreachable(); @@ -24059,7 +22859,6 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou case ZigTypeIdEnumLiteral: case ZigTypeIdUndefined: case ZigTypeIdNull: - case ZigTypeIdPromise: zig_unreachable(); case ZigTypeIdVoid: return ErrorNone; @@ -24546,181 +23345,7 @@ static IrInstruction *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstruct } static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { - IrInstruction *target_inst = instruction->target->child; - if (type_is_invalid(target_inst->value.type)) - return ira->codegen->invalid_instruction; - IrInstruction *casted_target = ir_implicit_cast(ira, target_inst, ira->codegen->builtin_types.entry_promise); - if (type_is_invalid(casted_target->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_target); - result->value.type = ira->codegen->builtin_types.entry_void; - result->value.special = ConstValSpecialStatic; - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_id(IrAnalyze *ira, IrInstructionCoroId *instruction) { - IrInstruction *promise_ptr = instruction->promise_ptr->child; - if (type_is_invalid(promise_ptr->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_coro_id(&ira->new_irb, instruction->base.scope, instruction->base.source_node, - promise_ptr); - result->value.type = ira->codegen->builtin_types.entry_usize; - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_alloc(IrAnalyze *ira, IrInstructionCoroAlloc *instruction) { - IrInstruction *coro_id = instruction->coro_id->child; - if (type_is_invalid(coro_id->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_coro_alloc(&ira->new_irb, instruction->base.scope, instruction->base.source_node, - coro_id); - result->value.type = ira->codegen->builtin_types.entry_bool; - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_size(IrAnalyze *ira, IrInstructionCoroSize *instruction) { - IrInstruction *result = ir_build_coro_size(&ira->new_irb, instruction->base.scope, instruction->base.source_node); - result->value.type = ira->codegen->builtin_types.entry_usize; - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstructionCoroBegin *instruction) { - IrInstruction *coro_id = instruction->coro_id->child; - if (type_is_invalid(coro_id->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *coro_mem_ptr = instruction->coro_mem_ptr->child; - if (type_is_invalid(coro_mem_ptr->value.type)) - return ira->codegen->invalid_instruction; - - ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); - ir_assert(fn_entry != nullptr, &instruction->base); - IrInstruction *result = ir_build_coro_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node, - coro_id, coro_mem_ptr); - result->value.type = get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type); - return result; -} - -static IrInstruction *ir_analyze_instruction_get_implicit_allocator(IrAnalyze *ira, IrInstructionGetImplicitAllocator *instruction) { - return ir_get_implicit_allocator(ira, &instruction->base, instruction->id); -} - -static IrInstruction *ir_analyze_instruction_coro_alloc_fail(IrAnalyze *ira, IrInstructionCoroAllocFail *instruction) { - IrInstruction *err_val = instruction->err_val->child; - if (type_is_invalid(err_val->value.type)) - return ir_unreach_error(ira); - - IrInstruction *result = ir_build_coro_alloc_fail(&ira->new_irb, instruction->base.scope, instruction->base.source_node, err_val); - result->value.type = ira->codegen->builtin_types.entry_unreachable; - return ir_finish_anal(ira, result); -} - -static IrInstruction *ir_analyze_instruction_coro_suspend(IrAnalyze *ira, IrInstructionCoroSuspend *instruction) { - IrInstruction *save_point = nullptr; - if (instruction->save_point != nullptr) { - save_point = instruction->save_point->child; - if (type_is_invalid(save_point->value.type)) - return ira->codegen->invalid_instruction; - } - - IrInstruction *is_final = instruction->is_final->child; - if (type_is_invalid(is_final->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_coro_suspend(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, save_point, is_final); - result->value.type = ira->codegen->builtin_types.entry_u8; - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_end(IrAnalyze *ira, IrInstructionCoroEnd *instruction) { - IrInstruction *result = ir_build_coro_end(&ira->new_irb, instruction->base.scope, - instruction->base.source_node); - result->value.type = ira->codegen->builtin_types.entry_void; - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_free(IrAnalyze *ira, IrInstructionCoroFree *instruction) { - IrInstruction *coro_id = instruction->coro_id->child; - if (type_is_invalid(coro_id->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *coro_handle = instruction->coro_handle->child; - if (type_is_invalid(coro_handle->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_coro_free(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, coro_id, coro_handle); - ZigType *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); - result->value.type = get_optional_type(ira->codegen, ptr_type); - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) { - IrInstruction *awaiter_handle = instruction->awaiter_handle->child; - if (type_is_invalid(awaiter_handle->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *casted_target = ir_implicit_cast(ira, awaiter_handle, ira->codegen->builtin_types.entry_promise); - if (type_is_invalid(casted_target->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_coro_resume(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, casted_target); - result->value.type = ira->codegen->builtin_types.entry_void; - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_save(IrAnalyze *ira, IrInstructionCoroSave *instruction) { - IrInstruction *coro_handle = instruction->coro_handle->child; - if (type_is_invalid(coro_handle->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_coro_save(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, coro_handle); - result->value.type = ira->codegen->builtin_types.entry_usize; - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_promise(IrAnalyze *ira, IrInstructionCoroPromise *instruction) { - IrInstruction *coro_handle = instruction->coro_handle->child; - if (type_is_invalid(coro_handle->value.type)) - return ira->codegen->invalid_instruction; - - if (coro_handle->value.type->id != ZigTypeIdPromise || - coro_handle->value.type->data.promise.result_type == nullptr) - { - ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'", - buf_ptr(&coro_handle->value.type->name))); - return ira->codegen->invalid_instruction; - } - - ZigType *coro_frame_type = get_promise_frame_type(ira->codegen, - coro_handle->value.type->data.promise.result_type); - - IrInstruction *result = ir_build_coro_promise(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, coro_handle); - result->value.type = get_pointer_to_type(ira->codegen, coro_frame_type, false); - return result; -} - -static IrInstruction *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, IrInstructionCoroAllocHelper *instruction) { - IrInstruction *realloc_fn = instruction->realloc_fn->child; - if (type_is_invalid(realloc_fn->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *coro_size = instruction->coro_size->child; - if (type_is_invalid(coro_size->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_coro_alloc_helper(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, realloc_fn, coro_size); - ZigType *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); - result->value.type = get_optional_type(ira->codegen, u8_ptr_type); - return result; + zig_panic("TODO analyze cancel"); } static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op) { @@ -24853,65 +23478,6 @@ static IrInstruction *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstr return result; } -static IrInstruction *ir_analyze_instruction_promise_result_type(IrAnalyze *ira, IrInstructionPromiseResultType *instruction) { - ZigType *promise_type = ir_resolve_type(ira, instruction->promise_type->child); - if (type_is_invalid(promise_type)) - return ira->codegen->invalid_instruction; - - if (promise_type->id != ZigTypeIdPromise || promise_type->data.promise.result_type == nullptr) { - ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'", - buf_ptr(&promise_type->name))); - return ira->codegen->invalid_instruction; - } - - return ir_const_type(ira, &instruction->base, promise_type->data.promise.result_type); -} - -static IrInstruction *ir_analyze_instruction_await_bookkeeping(IrAnalyze *ira, IrInstructionAwaitBookkeeping *instruction) { - ZigType *promise_result_type = ir_resolve_type(ira, instruction->promise_result_type->child); - if (type_is_invalid(promise_result_type)) - return ira->codegen->invalid_instruction; - - ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); - ir_assert(fn_entry != nullptr, &instruction->base); - - if (type_can_fail(promise_result_type)) { - fn_entry->calls_or_awaits_errorable_fn = true; - } - - return ir_const_void(ira, &instruction->base); -} - -static IrInstruction *ir_analyze_instruction_merge_err_ret_traces(IrAnalyze *ira, - IrInstructionMergeErrRetTraces *instruction) -{ - IrInstruction *coro_promise_ptr = instruction->coro_promise_ptr->child; - if (type_is_invalid(coro_promise_ptr->value.type)) - return ira->codegen->invalid_instruction; - - ir_assert(coro_promise_ptr->value.type->id == ZigTypeIdPointer, &instruction->base); - ZigType *promise_frame_type = coro_promise_ptr->value.type->data.pointer.child_type; - ir_assert(promise_frame_type->id == ZigTypeIdStruct, &instruction->base); - ZigType *promise_result_type = promise_frame_type->data.structure.fields[1].type_entry; - - if (!type_can_fail(promise_result_type)) { - return ir_const_void(ira, &instruction->base); - } - - IrInstruction *src_err_ret_trace_ptr = instruction->src_err_ret_trace_ptr->child; - if (type_is_invalid(src_err_ret_trace_ptr->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *dest_err_ret_trace_ptr = instruction->dest_err_ret_trace_ptr->child; - if (type_is_invalid(dest_err_ret_trace_ptr->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_merge_err_ret_traces(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr); - result->value.type = ira->codegen->builtin_types.entry_void; - return result; -} - static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstructionSaveErrRetAddr *instruction) { IrInstruction *result = ir_build_save_err_ret_addr(&ira->new_irb, instruction->base.scope, instruction->base.source_node); @@ -25530,8 +24096,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_asm(ira, (IrInstructionAsm *)instruction); case IrInstructionIdArrayType: return ir_analyze_instruction_array_type(ira, (IrInstructionArrayType *)instruction); - case IrInstructionIdPromiseType: - return ir_analyze_instruction_promise_type(ira, (IrInstructionPromiseType *)instruction); case IrInstructionIdSizeOf: return ir_analyze_instruction_size_of(ira, (IrInstructionSizeOf *)instruction); case IrInstructionIdTestNonNull: @@ -25704,46 +24268,14 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction); case IrInstructionIdCancel: return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction); - case IrInstructionIdCoroId: - return ir_analyze_instruction_coro_id(ira, (IrInstructionCoroId *)instruction); - case IrInstructionIdCoroAlloc: - return ir_analyze_instruction_coro_alloc(ira, (IrInstructionCoroAlloc *)instruction); - case IrInstructionIdCoroSize: - return ir_analyze_instruction_coro_size(ira, (IrInstructionCoroSize *)instruction); - case IrInstructionIdCoroBegin: - return ir_analyze_instruction_coro_begin(ira, (IrInstructionCoroBegin *)instruction); - case IrInstructionIdGetImplicitAllocator: - return ir_analyze_instruction_get_implicit_allocator(ira, (IrInstructionGetImplicitAllocator *)instruction); - case IrInstructionIdCoroAllocFail: - return ir_analyze_instruction_coro_alloc_fail(ira, (IrInstructionCoroAllocFail *)instruction); - case IrInstructionIdCoroSuspend: - return ir_analyze_instruction_coro_suspend(ira, (IrInstructionCoroSuspend *)instruction); - case IrInstructionIdCoroEnd: - return ir_analyze_instruction_coro_end(ira, (IrInstructionCoroEnd *)instruction); - case IrInstructionIdCoroFree: - return ir_analyze_instruction_coro_free(ira, (IrInstructionCoroFree *)instruction); - case IrInstructionIdCoroResume: - return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); - case IrInstructionIdCoroSave: - return ir_analyze_instruction_coro_save(ira, (IrInstructionCoroSave *)instruction); - case IrInstructionIdCoroPromise: - return ir_analyze_instruction_coro_promise(ira, (IrInstructionCoroPromise *)instruction); - case IrInstructionIdCoroAllocHelper: - return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction); case IrInstructionIdAtomicRmw: return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction); case IrInstructionIdAtomicLoad: return ir_analyze_instruction_atomic_load(ira, (IrInstructionAtomicLoad *)instruction); - case IrInstructionIdPromiseResultType: - return ir_analyze_instruction_promise_result_type(ira, (IrInstructionPromiseResultType *)instruction); - case IrInstructionIdAwaitBookkeeping: - return ir_analyze_instruction_await_bookkeeping(ira, (IrInstructionAwaitBookkeeping *)instruction); case IrInstructionIdSaveErrRetAddr: return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstructionSaveErrRetAddr *)instruction); case IrInstructionIdAddImplicitReturnType: return ir_analyze_instruction_add_implicit_return_type(ira, (IrInstructionAddImplicitReturnType *)instruction); - case IrInstructionIdMergeErrRetTraces: - return ir_analyze_instruction_merge_err_ret_traces(ira, (IrInstructionMergeErrRetTraces *)instruction); case IrInstructionIdMarkErrRetTracePtr: return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction); case IrInstructionIdFloatOp: @@ -25788,9 +24320,7 @@ ZigType *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutable *new_ old_exec->analysis = ira; ira->codegen = codegen; - ZigFn *fn_entry = exec_fn_entry(old_exec); - bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; - ira->explicit_return_type = is_async ? get_promise_type(codegen, expected_type) : expected_type; + ira->explicit_return_type = expected_type; ira->explicit_return_type_source_node = expected_type_source_node; ira->old_irb.codegen = codegen; @@ -25889,17 +24419,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdSetAlignStack: case IrInstructionIdExport: case IrInstructionIdCancel: - case IrInstructionIdCoroId: - case IrInstructionIdCoroBegin: - case IrInstructionIdCoroAllocFail: - case IrInstructionIdCoroEnd: - case IrInstructionIdCoroResume: - case IrInstructionIdCoroSave: - case IrInstructionIdCoroAllocHelper: - case IrInstructionIdAwaitBookkeeping: case IrInstructionIdSaveErrRetAddr: case IrInstructionIdAddImplicitReturnType: - case IrInstructionIdMergeErrRetTraces: case IrInstructionIdMarkErrRetTracePtr: case IrInstructionIdAtomicRmw: case IrInstructionIdCmpxchgGen: @@ -25933,7 +24454,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdTypeOf: case IrInstructionIdStructFieldPtr: case IrInstructionIdArrayType: - case IrInstructionIdPromiseType: case IrInstructionIdSliceType: case IrInstructionIdSizeOf: case IrInstructionIdTestNonNull: @@ -25993,13 +24513,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdTagType: case IrInstructionIdErrorReturnTrace: case IrInstructionIdErrorUnion: - case IrInstructionIdGetImplicitAllocator: - case IrInstructionIdCoroAlloc: - case IrInstructionIdCoroSize: - case IrInstructionIdCoroSuspend: - case IrInstructionIdCoroFree: - case IrInstructionIdCoroPromise: - case IrInstructionIdPromiseResultType: case IrInstructionIdFloatOp: case IrInstructionIdMulAdd: case IrInstructionIdAtomicLoad: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 588a9b2882..9ea70ba7ab 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -257,13 +257,7 @@ static void ir_print_result_loc(IrPrint *irp, ResultLoc *result_loc) { static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instruction) { if (call_instruction->is_async) { - fprintf(irp->f, "async"); - if (call_instruction->async_allocator != nullptr) { - fprintf(irp->f, "<"); - ir_print_other_instruction(irp, call_instruction->async_allocator); - fprintf(irp->f, ">"); - } - fprintf(irp->f, " "); + fprintf(irp->f, "async "); } if (call_instruction->fn_entry) { fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name)); @@ -284,13 +278,7 @@ static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instructi static void ir_print_call_gen(IrPrint *irp, IrInstructionCallGen *call_instruction) { if (call_instruction->is_async) { - fprintf(irp->f, "async"); - if (call_instruction->async_allocator != nullptr) { - fprintf(irp->f, "<"); - ir_print_other_instruction(irp, call_instruction->async_allocator); - fprintf(irp->f, ">"); - } - fprintf(irp->f, " "); + fprintf(irp->f, "async "); } if (call_instruction->fn_entry) { fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name)); @@ -477,14 +465,6 @@ static void ir_print_array_type(IrPrint *irp, IrInstructionArrayType *instructio ir_print_other_instruction(irp, instruction->child_type); } -static void ir_print_promise_type(IrPrint *irp, IrInstructionPromiseType *instruction) { - fprintf(irp->f, "promise"); - if (instruction->payload_type != nullptr) { - fprintf(irp->f, "->"); - ir_print_other_instruction(irp, instruction->payload_type); - } -} - static void ir_print_slice_type(IrPrint *irp, IrInstructionSliceType *instruction) { const char *const_kw = instruction->is_const ? "const " : ""; fprintf(irp->f, "[]%s", const_kw); @@ -1396,105 +1376,6 @@ static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) { ir_print_other_instruction(irp, instruction->target); } -static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplicitAllocator *instruction) { - fprintf(irp->f, "@getImplicitAllocator("); - switch (instruction->id) { - case ImplicitAllocatorIdArg: - fprintf(irp->f, "Arg"); - break; - case ImplicitAllocatorIdLocalVar: - fprintf(irp->f, "LocalVar"); - break; - } - fprintf(irp->f, ")"); -} - -static void ir_print_coro_id(IrPrint *irp, IrInstructionCoroId *instruction) { - fprintf(irp->f, "@coroId("); - ir_print_other_instruction(irp, instruction->promise_ptr); - fprintf(irp->f, ")"); -} - -static void ir_print_coro_alloc(IrPrint *irp, IrInstructionCoroAlloc *instruction) { - fprintf(irp->f, "@coroAlloc("); - ir_print_other_instruction(irp, instruction->coro_id); - fprintf(irp->f, ")"); -} - -static void ir_print_coro_size(IrPrint *irp, IrInstructionCoroSize *instruction) { - fprintf(irp->f, "@coroSize()"); -} - -static void ir_print_coro_begin(IrPrint *irp, IrInstructionCoroBegin *instruction) { - fprintf(irp->f, "@coroBegin("); - ir_print_other_instruction(irp, instruction->coro_id); - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->coro_mem_ptr); - fprintf(irp->f, ")"); -} - -static void ir_print_coro_alloc_fail(IrPrint *irp, IrInstructionCoroAllocFail *instruction) { - fprintf(irp->f, "@coroAllocFail("); - ir_print_other_instruction(irp, instruction->err_val); - fprintf(irp->f, ")"); -} - -static void ir_print_coro_suspend(IrPrint *irp, IrInstructionCoroSuspend *instruction) { - fprintf(irp->f, "@coroSuspend("); - if (instruction->save_point != nullptr) { - ir_print_other_instruction(irp, instruction->save_point); - } else { - fprintf(irp->f, "null"); - } - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->is_final); - fprintf(irp->f, ")"); -} - -static void ir_print_coro_end(IrPrint *irp, IrInstructionCoroEnd *instruction) { - fprintf(irp->f, "@coroEnd()"); -} - -static void ir_print_coro_free(IrPrint *irp, IrInstructionCoroFree *instruction) { - fprintf(irp->f, "@coroFree("); - ir_print_other_instruction(irp, instruction->coro_id); - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->coro_handle); - fprintf(irp->f, ")"); -} - -static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) { - fprintf(irp->f, "@coroResume("); - ir_print_other_instruction(irp, instruction->awaiter_handle); - fprintf(irp->f, ")"); -} - -static void ir_print_coro_save(IrPrint *irp, IrInstructionCoroSave *instruction) { - fprintf(irp->f, "@coroSave("); - ir_print_other_instruction(irp, instruction->coro_handle); - fprintf(irp->f, ")"); -} - -static void ir_print_coro_promise(IrPrint *irp, IrInstructionCoroPromise *instruction) { - fprintf(irp->f, "@coroPromise("); - ir_print_other_instruction(irp, instruction->coro_handle); - fprintf(irp->f, ")"); -} - -static void ir_print_promise_result_type(IrPrint *irp, IrInstructionPromiseResultType *instruction) { - fprintf(irp->f, "@PromiseResultType("); - ir_print_other_instruction(irp, instruction->promise_type); - fprintf(irp->f, ")"); -} - -static void ir_print_coro_alloc_helper(IrPrint *irp, IrInstructionCoroAllocHelper *instruction) { - fprintf(irp->f, "@coroAllocHelper("); - ir_print_other_instruction(irp, instruction->realloc_fn); - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->coro_size); - fprintf(irp->f, ")"); -} - static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) { fprintf(irp->f, "@atomicRmw("); if (instruction->operand_type != nullptr) { @@ -1539,12 +1420,6 @@ static void ir_print_atomic_load(IrPrint *irp, IrInstructionAtomicLoad *instruct fprintf(irp->f, ")"); } -static void ir_print_await_bookkeeping(IrPrint *irp, IrInstructionAwaitBookkeeping *instruction) { - fprintf(irp->f, "@awaitBookkeeping("); - ir_print_other_instruction(irp, instruction->promise_result_type); - fprintf(irp->f, ")"); -} - static void ir_print_save_err_ret_addr(IrPrint *irp, IrInstructionSaveErrRetAddr *instruction) { fprintf(irp->f, "@saveErrRetAddr()"); } @@ -1555,16 +1430,6 @@ static void ir_print_add_implicit_return_type(IrPrint *irp, IrInstructionAddImpl fprintf(irp->f, ")"); } -static void ir_print_merge_err_ret_traces(IrPrint *irp, IrInstructionMergeErrRetTraces *instruction) { - fprintf(irp->f, "@mergeErrRetTraces("); - ir_print_other_instruction(irp, instruction->coro_promise_ptr); - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->src_err_ret_trace_ptr); - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->dest_err_ret_trace_ptr); - fprintf(irp->f, ")"); -} - static void ir_print_mark_err_ret_trace_ptr(IrPrint *irp, IrInstructionMarkErrRetTracePtr *instruction) { fprintf(irp->f, "@markErrRetTracePtr("); ir_print_other_instruction(irp, instruction->err_ret_trace_ptr); @@ -1727,9 +1592,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdArrayType: ir_print_array_type(irp, (IrInstructionArrayType *)instruction); break; - case IrInstructionIdPromiseType: - ir_print_promise_type(irp, (IrInstructionPromiseType *)instruction); - break; case IrInstructionIdSliceType: ir_print_slice_type(irp, (IrInstructionSliceType *)instruction); break; @@ -2033,63 +1895,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCancel: ir_print_cancel(irp, (IrInstructionCancel *)instruction); break; - case IrInstructionIdGetImplicitAllocator: - ir_print_get_implicit_allocator(irp, (IrInstructionGetImplicitAllocator *)instruction); - break; - case IrInstructionIdCoroId: - ir_print_coro_id(irp, (IrInstructionCoroId *)instruction); - break; - case IrInstructionIdCoroAlloc: - ir_print_coro_alloc(irp, (IrInstructionCoroAlloc *)instruction); - break; - case IrInstructionIdCoroSize: - ir_print_coro_size(irp, (IrInstructionCoroSize *)instruction); - break; - case IrInstructionIdCoroBegin: - ir_print_coro_begin(irp, (IrInstructionCoroBegin *)instruction); - break; - case IrInstructionIdCoroAllocFail: - ir_print_coro_alloc_fail(irp, (IrInstructionCoroAllocFail *)instruction); - break; - case IrInstructionIdCoroSuspend: - ir_print_coro_suspend(irp, (IrInstructionCoroSuspend *)instruction); - break; - case IrInstructionIdCoroEnd: - ir_print_coro_end(irp, (IrInstructionCoroEnd *)instruction); - break; - case IrInstructionIdCoroFree: - ir_print_coro_free(irp, (IrInstructionCoroFree *)instruction); - break; - case IrInstructionIdCoroResume: - ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction); - break; - case IrInstructionIdCoroSave: - ir_print_coro_save(irp, (IrInstructionCoroSave *)instruction); - break; - case IrInstructionIdCoroAllocHelper: - ir_print_coro_alloc_helper(irp, (IrInstructionCoroAllocHelper *)instruction); - break; case IrInstructionIdAtomicRmw: ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction); break; - case IrInstructionIdCoroPromise: - ir_print_coro_promise(irp, (IrInstructionCoroPromise *)instruction); - break; - case IrInstructionIdPromiseResultType: - ir_print_promise_result_type(irp, (IrInstructionPromiseResultType *)instruction); - break; - case IrInstructionIdAwaitBookkeeping: - ir_print_await_bookkeeping(irp, (IrInstructionAwaitBookkeeping *)instruction); - break; case IrInstructionIdSaveErrRetAddr: ir_print_save_err_ret_addr(irp, (IrInstructionSaveErrRetAddr *)instruction); break; case IrInstructionIdAddImplicitReturnType: ir_print_add_implicit_return_type(irp, (IrInstructionAddImplicitReturnType *)instruction); break; - case IrInstructionIdMergeErrRetTraces: - ir_print_merge_err_ret_traces(irp, (IrInstructionMergeErrRetTraces *)instruction); - break; case IrInstructionIdMarkErrRetTracePtr: ir_print_mark_err_ret_trace_ptr(irp, (IrInstructionMarkErrRetTracePtr *)instruction); break; diff --git a/src/parser.cpp b/src/parser.cpp index fe1f89ac92..b1a593d9c9 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -282,9 +282,6 @@ static AstNode *ast_parse_prefix_op_expr( case NodeTypeAwaitExpr: right = &prefix->data.await_expr.expr; break; - case NodeTypePromiseType: - right = &prefix->data.promise_type.payload_type; - break; case NodeTypeArrayType: right = &prefix->data.array_type.child_type; break; @@ -1643,10 +1640,6 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) { if (null != nullptr) return ast_create_node(pc, NodeTypeNullLiteral, null); - Token *promise = eat_token_if(pc, TokenIdKeywordPromise); - if (promise != nullptr) - return ast_create_node(pc, NodeTypePromiseType, promise); - Token *true_token = eat_token_if(pc, TokenIdKeywordTrue); if (true_token != nullptr) { AstNode *res = ast_create_node(pc, NodeTypeBoolLiteral, true_token); @@ -2042,11 +2035,6 @@ static Optional ast_parse_fn_cc(ParseContext *pc) { } if (eat_token_if(pc, TokenIdKeywordAsync) != nullptr) { res.cc = CallingConventionAsync; - if (eat_token_if(pc, TokenIdCmpLessThan) == nullptr) - return Optional::some(res); - - res.async_allocator_type = ast_expect(pc, ast_parse_type_expr); - expect_token(pc, TokenIdCmpGreaterThan); return Optional::some(res); } @@ -2533,16 +2521,6 @@ static AstNode *ast_parse_prefix_type_op(ParseContext *pc) { return res; } - Token *promise = eat_token_if(pc, TokenIdKeywordPromise); - if (promise != nullptr) { - if (eat_token_if(pc, TokenIdArrow) != nullptr) { - AstNode *res = ast_create_node(pc, NodeTypePromiseType, promise); - return res; - } - - put_back_token(pc); - } - AstNode *array = ast_parse_array_type_start(pc); if (array != nullptr) { assert(array->type == NodeTypeArrayType); @@ -2680,11 +2658,6 @@ static AstNode *ast_parse_async_prefix(ParseContext *pc) { AstNode *res = ast_create_node(pc, NodeTypeFnCallExpr, async); res->data.fn_call_expr.is_async = true; res->data.fn_call_expr.seen = false; - if (eat_token_if(pc, TokenIdCmpLessThan) != nullptr) { - AstNode *prefix_expr = ast_expect(pc, ast_parse_prefix_expr); - expect_token(pc, TokenIdCmpGreaterThan); - res->data.fn_call_expr.async_allocator = prefix_expr; - } return res; } @@ -2858,7 +2831,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont visit_node_list(&node->data.fn_proto.params, visit, context); visit_field(&node->data.fn_proto.align_expr, visit, context); visit_field(&node->data.fn_proto.section_expr, visit, context); - visit_field(&node->data.fn_proto.async_allocator_type, visit, context); break; case NodeTypeFnDef: visit_field(&node->data.fn_def.fn_proto, visit, context); @@ -2918,7 +2890,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeFnCallExpr: visit_field(&node->data.fn_call_expr.fn_ref_expr, visit, context); visit_node_list(&node->data.fn_call_expr.params, visit, context); - visit_field(&node->data.fn_call_expr.async_allocator, visit, context); break; case NodeTypeArrayAccessExpr: visit_field(&node->data.array_access_expr.array_ref_expr, visit, context); @@ -3034,9 +3005,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeInferredArrayType: visit_field(&node->data.array_type.child_type, visit, context); break; - case NodeTypePromiseType: - visit_field(&node->data.promise_type.payload_type, visit, context); - break; case NodeTypeErrorType: // none break; diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index 783b6e0e20..0869c3ba9c 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -136,7 +136,6 @@ static const struct ZigKeyword zig_keywords[] = { {"or", TokenIdKeywordOr}, {"orelse", TokenIdKeywordOrElse}, {"packed", TokenIdKeywordPacked}, - {"promise", TokenIdKeywordPromise}, {"pub", TokenIdKeywordPub}, {"resume", TokenIdKeywordResume}, {"return", TokenIdKeywordReturn}, @@ -1558,7 +1557,6 @@ const char * token_name(TokenId id) { case TokenIdKeywordOr: return "or"; case TokenIdKeywordOrElse: return "orelse"; case TokenIdKeywordPacked: return "packed"; - case TokenIdKeywordPromise: return "promise"; case TokenIdKeywordPub: return "pub"; case TokenIdKeywordReturn: return "return"; case TokenIdKeywordLinkSection: return "linksection"; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index 83dbe99471..253e0bd1e5 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -81,7 +81,6 @@ enum TokenId { TokenIdKeywordOr, TokenIdKeywordOrElse, TokenIdKeywordPacked, - TokenIdKeywordPromise, TokenIdKeywordPub, TokenIdKeywordResume, TokenIdKeywordReturn, diff --git a/std/fmt.zig b/std/fmt.zig index 2e9527f4ca..961c7279a2 100644 --- a/std/fmt.zig +++ b/std/fmt.zig @@ -328,9 +328,6 @@ pub fn formatType( try output(context, "error."); return output(context, @errorName(value)); }, - .Promise => { - return format(context, Errors, output, "promise@{x}", @ptrToInt(value)); - }, .Enum => { if (comptime std.meta.trait.hasFn("format")(T)) { return value.format(fmt, options, context, Errors, output); diff --git a/std/hash_map.zig b/std/hash_map.zig index c99d1d2490..4327bfdddb 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -560,7 +560,7 @@ pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type builtin.TypeId.Bool => return autoHash(@boolToInt(key), rng, HashInt), builtin.TypeId.Enum => return autoHash(@enumToInt(key), rng, HashInt), builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), rng, HashInt), - builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng, HashInt), + builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng, HashInt), builtin.TypeId.BoundFn, builtin.TypeId.ComptimeFloat, diff --git a/std/meta.zig b/std/meta.zig index 0db76ce774..6b90727737 100644 --- a/std/meta.zig +++ b/std/meta.zig @@ -104,8 +104,7 @@ pub fn Child(comptime T: type) type { TypeId.Array => |info| info.child, TypeId.Pointer => |info| info.child, TypeId.Optional => |info| info.child, - TypeId.Promise => |info| if (info.child) |child| child else null, - else => @compileError("Expected promise, pointer, optional, or array type, " ++ "found '" ++ @typeName(T) ++ "'"), + else => @compileError("Expected pointer, optional, or array type, " ++ "found '" ++ @typeName(T) ++ "'"), }; } @@ -114,7 +113,6 @@ test "std.meta.Child" { testing.expect(Child(*u8) == u8); testing.expect(Child([]u8) == u8); testing.expect(Child(?u8) == u8); - testing.expect(Child(promise->u8) == u8); } pub fn containerLayout(comptime T: type) TypeInfo.ContainerLayout { diff --git a/std/testing.zig b/std/testing.zig index 4568e024e2..84f6cff5d8 100644 --- a/std/testing.zig +++ b/std/testing.zig @@ -45,7 +45,6 @@ pub fn expectEqual(expected: var, actual: @typeOf(expected)) void { TypeId.EnumLiteral, TypeId.Enum, TypeId.Fn, - TypeId.Promise, TypeId.Vector, TypeId.ErrorSet, => { diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig index 71af5586ed..7950088348 100644 --- a/test/stage1/behavior.zig +++ b/test/stage1/behavior.zig @@ -39,11 +39,11 @@ comptime { _ = @import("behavior/bugs/828.zig"); _ = @import("behavior/bugs/920.zig"); _ = @import("behavior/byval_arg_var.zig"); - _ = @import("behavior/cancel.zig"); + //_ = @import("behavior/cancel.zig"); _ = @import("behavior/cast.zig"); _ = @import("behavior/const_slice_child.zig"); - _ = @import("behavior/coroutine_await_struct.zig"); - _ = @import("behavior/coroutines.zig"); + //_ = @import("behavior/coroutine_await_struct.zig"); + //_ = @import("behavior/coroutines.zig"); _ = @import("behavior/defer.zig"); _ = @import("behavior/enum.zig"); _ = @import("behavior/enum_with_members.zig"); diff --git a/test/stage1/behavior/type_info.zig b/test/stage1/behavior/type_info.zig index 4ae81aff20..2decf0c329 100644 --- a/test/stage1/behavior/type_info.zig +++ b/test/stage1/behavior/type_info.zig @@ -116,21 +116,6 @@ fn testOptional() void { expect(null_info.Optional.child == void); } -test "type info: promise info" { - testPromise(); - comptime testPromise(); -} - -fn testPromise() void { - const null_promise_info = @typeInfo(promise); - expect(TypeId(null_promise_info) == TypeId.Promise); - expect(null_promise_info.Promise.child == null); - - const promise_info = @typeInfo(promise->usize); - expect(TypeId(promise_info) == TypeId.Promise); - expect(promise_info.Promise.child.? == usize); -} - test "type info: error set, error union info" { testErrorSet(); comptime testErrorSet(); @@ -192,11 +177,11 @@ fn testUnion() void { expect(TypeId(typeinfo_info) == TypeId.Union); expect(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto); expect(typeinfo_info.Union.tag_type.? == TypeId); - expect(typeinfo_info.Union.fields.len == 25); + expect(typeinfo_info.Union.fields.len == 24); expect(typeinfo_info.Union.fields[4].enum_field != null); expect(typeinfo_info.Union.fields[4].enum_field.?.value == 4); expect(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int)); - expect(typeinfo_info.Union.decls.len == 21); + expect(typeinfo_info.Union.decls.len == 20); const TestNoTagUnion = union { Foo: void, @@ -265,7 +250,6 @@ fn testFunction() void { expect(fn_info.Fn.args.len == 2); expect(fn_info.Fn.is_var_args); expect(fn_info.Fn.return_type == null); - expect(fn_info.Fn.async_allocator_type == null); const test_instance: TestStruct = undefined; const bound_fn_info = @typeInfo(@typeOf(test_instance.foo)); From 72e983670e65eac0b89da5564432988862828b30 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 21 Jul 2019 16:21:16 -0400 Subject: [PATCH 002/125] simple async function call working --- CMakeLists.txt | 2 +- src/all_types.hpp | 30 ++++++++ src/analyze.cpp | 137 +++++++++++++++++++++++++++++---- src/analyze.hpp | 3 + src/codegen.cpp | 141 ++++++++++++++++++++++++++++------ src/ir.cpp | 187 +++++++++++++++++++++++++++++++++++++++------- src/ir_print.cpp | 16 ++++ 7 files changed, 449 insertions(+), 67 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d8cf0c507d..aa3cfbfeac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -426,7 +426,6 @@ set(ZIG_MAIN_SRC "${CMAKE_SOURCE_DIR}/src/main.cpp") set(ZIG0_SHIM_SRC "${CMAKE_SOURCE_DIR}/src/userland.cpp") set(ZIG_SOURCES - "${CMAKE_SOURCE_DIR}/src/glibc.cpp" "${CMAKE_SOURCE_DIR}/src/analyze.cpp" "${CMAKE_SOURCE_DIR}/src/ast_render.cpp" "${CMAKE_SOURCE_DIR}/src/bigfloat.cpp" @@ -438,6 +437,7 @@ set(ZIG_SOURCES "${CMAKE_SOURCE_DIR}/src/compiler.cpp" "${CMAKE_SOURCE_DIR}/src/errmsg.cpp" "${CMAKE_SOURCE_DIR}/src/error.cpp" + "${CMAKE_SOURCE_DIR}/src/glibc.cpp" "${CMAKE_SOURCE_DIR}/src/ir.cpp" "${CMAKE_SOURCE_DIR}/src/ir_print.cpp" "${CMAKE_SOURCE_DIR}/src/libc_installation.cpp" diff --git a/src/all_types.hpp b/src/all_types.hpp index 7fe035ad1c..3f61e77f66 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1251,6 +1251,7 @@ enum ZigTypeId { ZigTypeIdBoundFn, ZigTypeIdArgTuple, ZigTypeIdOpaque, + ZigTypeIdCoroFrame, ZigTypeIdVector, ZigTypeIdEnumLiteral, }; @@ -1265,6 +1266,11 @@ struct ZigTypeOpaque { Buf *bare_name; }; +struct ZigTypeCoroFrame { + ZigFn *fn; + ZigType *locals_struct; +}; + struct ZigType { ZigTypeId id; Buf name; @@ -1290,6 +1296,7 @@ struct ZigType { ZigTypeBoundFn bound_fn; ZigTypeVector vector; ZigTypeOpaque opaque; + ZigTypeCoroFrame frame; } data; // use these fields to make sure we don't duplicate type table entries for the same type @@ -1340,6 +1347,7 @@ struct ZigFn { ScopeBlock *def_scope; // parent is child_scope Buf symbol_name; ZigType *type_entry; // function type + ZigType *frame_type; // coro frame type // in the case of normal functions this is the implicit return type // in the case of async functions this is the implicit return type according to the // zig source code, not according to zig ir @@ -1356,6 +1364,7 @@ struct ZigFn { ZigList alloca_gen_list; ZigList variable_list; + ZigList resume_blocks; Buf *section_name; AstNode *set_alignstack_node; @@ -1365,6 +1374,7 @@ struct ZigFn { ZigList export_list; LLVMValueRef valgrind_client_request_array; + LLVMBasicBlockRef preamble_llvm_block; FnInline fn_inline; FnAnalState anal_state; @@ -1512,6 +1522,7 @@ enum PanicMsgId { PanicMsgIdBadEnumValue, PanicMsgIdFloatToInt, PanicMsgIdPtrCastNull, + PanicMsgIdBadResume, PanicMsgIdCount, }; @@ -1755,6 +1766,7 @@ struct CodeGen { ZigType *entry_global_error_set; ZigType *entry_arg_tuple; ZigType *entry_enum_literal; + ZigType *entry_frame_header; } builtin_types; ZigType *align_amt_type; ZigType *stack_trace_type; @@ -2119,6 +2131,8 @@ struct IrBasicBlock { size_t ref_count; // index into the basic block list size_t index; + // for coroutines, the resume_index which corresponds to this block + size_t resume_index; LLVMBasicBlockRef llvm_block; LLVMBasicBlockRef llvm_exit_block; // The instruction that referenced this basic block and caused us to @@ -2297,6 +2311,8 @@ enum IrInstructionId { IrInstructionIdEndExpr, IrInstructionIdPtrOfArrayToSlice, IrInstructionIdUnionInitNamedField, + IrInstructionIdSuspendBegin, + IrInstructionIdSuspendBr, }; struct IrInstruction { @@ -3511,6 +3527,18 @@ struct IrInstructionPtrOfArrayToSlice { IrInstruction *result_loc; }; +struct IrInstructionSuspendBegin { + IrInstruction base; + + IrBasicBlock *resume_block; +}; + +struct IrInstructionSuspendBr { + IrInstruction base; + + IrBasicBlock *resume_block; +}; + enum ResultLocId { ResultLocIdInvalid, ResultLocIdNone, @@ -3593,6 +3621,8 @@ static const size_t maybe_null_index = 1; static const size_t err_union_err_index = 0; static const size_t err_union_payload_index = 1; +static const size_t coro_resume_index_index = 0; + // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. static const size_t stack_trace_ptr_count = 32; diff --git a/src/analyze.cpp b/src/analyze.cpp index 15e12caa8d..2b93c390e0 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -228,6 +228,8 @@ AstNode *type_decl_node(ZigType *type_entry) { return type_entry->data.enumeration.decl_node; case ZigTypeIdUnion: return type_entry->data.unionation.decl_node; + case ZigTypeIdCoroFrame: + return type_entry->data.frame.fn->proto_node; case ZigTypeIdOpaque: case ZigTypeIdMetaType: case ZigTypeIdVoid: @@ -262,6 +264,20 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) { return type_entry->data.structure.resolve_status >= status; case ZigTypeIdUnion: return type_entry->data.unionation.resolve_status >= status; + case ZigTypeIdCoroFrame: + switch (status) { + case ResolveStatusInvalid: + zig_unreachable(); + case ResolveStatusUnstarted: + case ResolveStatusZeroBitsKnown: + return true; + case ResolveStatusAlignmentKnown: + case ResolveStatusSizeKnown: + return type_entry->data.frame.locals_struct != nullptr; + case ResolveStatusLLVMFwdDecl: + case ResolveStatusLLVMFull: + return type_entry->llvm_type != nullptr; + } case ZigTypeIdEnum: switch (status) { case ResolveStatusUnstarted: @@ -345,6 +361,25 @@ static const char *ptr_len_to_star_str(PtrLen ptr_len) { zig_unreachable(); } +ZigType *get_coro_frame_type(CodeGen *g, ZigFn *fn) { + if (fn->frame_type != nullptr) { + return fn->frame_type; + } + + ZigType *entry = new_type_table_entry(ZigTypeIdCoroFrame); + buf_resize(&entry->name, 0); + buf_appendf(&entry->name, "@Frame(%s)", buf_ptr(&fn->symbol_name)); + + entry->data.frame.fn = fn; + + // Coroutine frames are always non-zero bits because they always have a resume index. + entry->abi_size = SIZE_MAX; + entry->size_in_bits = SIZE_MAX; + + fn->frame_type = entry; + return entry; +} + ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const, bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset_in_host, uint32_t host_int_bytes, bool allow_zero) @@ -1039,6 +1074,7 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: + case ZigTypeIdCoroFrame: add_node_error(g, source_node, buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation", buf_ptr(&type_entry->name))); @@ -1127,6 +1163,7 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) { case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdVoid: + case ZigTypeIdCoroFrame: return false; case ZigTypeIdOpaque: case ZigTypeIdUnreachable: @@ -1297,6 +1334,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: switch (type_requires_comptime(g, type_entry)) { case ReqCompTimeNo: break; @@ -1392,6 +1430,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: switch (type_requires_comptime(g, fn_type_id.return_type)) { case ReqCompTimeInvalid: return g->builtin_types.entry_invalid; @@ -1825,6 +1864,39 @@ static Error resolve_union_type(CodeGen *g, ZigType *union_type) { return ErrorNone; } +static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { + assert(frame_type->data.frame.locals_struct == nullptr); + + ZigFn *fn = frame_type->data.frame.fn; + switch (fn->anal_state) { + case FnAnalStateInvalid: + return ErrorSemanticAnalyzeFail; + case FnAnalStateComplete: + break; + case FnAnalStateReady: + analyze_fn_body(g, fn); + if (fn->anal_state == FnAnalStateInvalid) + return ErrorSemanticAnalyzeFail; + break; + case FnAnalStateProbing: + add_node_error(g, fn->proto_node, + buf_sprintf("cannot resolve '%s': function not fully analyzed yet", + buf_ptr(&frame_type->name))); + return ErrorSemanticAnalyzeFail; + } + // TODO iterate over fn->alloca_gen_list + ZigList field_types = {}; + ZigList field_names = {}; + + field_names.append("resume_index"); + field_types.append(g->builtin_types.entry_usize); + + assert(field_names.length == field_types.length); + frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), + field_names.items, field_types.items, field_names.length); + return ErrorNone; +} + static bool type_is_valid_extern_enum_tag(CodeGen *g, ZigType *ty) { // Only integer types are allowed by the C ABI if(ty->id != ZigTypeIdInt) @@ -2997,6 +3069,7 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: return type_entry; } zig_unreachable(); @@ -3496,6 +3569,7 @@ bool is_container(ZigType *type_entry) { case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: return false; } zig_unreachable(); @@ -3552,6 +3626,7 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) { case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: zig_unreachable(); } zig_unreachable(); @@ -4002,6 +4077,7 @@ bool handle_is_ptr(ZigType *type_entry) { return false; case ZigTypeIdArray: case ZigTypeIdStruct: + case ZigTypeIdCoroFrame: return type_has_bits(type_entry); case ZigTypeIdErrorUnion: return type_has_bits(type_entry->data.error_union.payload_type); @@ -4246,6 +4322,9 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { case ZigTypeIdVector: // TODO better hashing algorithm return 3647867726; + case ZigTypeIdCoroFrame: + // TODO better hashing algorithm + return 675741936; case ZigTypeIdBoundFn: case ZigTypeIdInvalid: case ZigTypeIdUnreachable: @@ -4310,6 +4389,7 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { case ZigTypeIdOpaque: case ZigTypeIdErrorSet: case ZigTypeIdEnum: + case ZigTypeIdCoroFrame: return false; case ZigTypeIdPointer: @@ -4381,6 +4461,7 @@ static bool return_type_is_cacheable(ZigType *return_type) { case ZigTypeIdEnum: case ZigTypeIdPointer: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: return true; case ZigTypeIdArray: @@ -4512,6 +4593,7 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) { case ZigTypeIdBool: case ZigTypeIdFloat: case ZigTypeIdErrorUnion: + case ZigTypeIdCoroFrame: return OnePossibleValueNo; case ZigTypeIdUndefined: case ZigTypeIdNull: @@ -4599,6 +4681,7 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) { case ZigTypeIdFloat: case ZigTypeIdVoid: case ZigTypeIdUnreachable: + case ZigTypeIdCoroFrame: return ReqCompTimeNo; } zig_unreachable(); @@ -4941,6 +5024,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) { return resolve_enum_zero_bits(g, ty); } else if (ty->id == ZigTypeIdUnion) { return resolve_union_alignment(g, ty); + } else if (ty->id == ZigTypeIdCoroFrame) { + return resolve_coro_frame(g, ty); } return ErrorNone; case ResolveStatusSizeKnown: @@ -4950,6 +5035,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) { return resolve_enum_zero_bits(g, ty); } else if (ty->id == ZigTypeIdUnion) { return resolve_union_type(g, ty); + } else if (ty->id == ZigTypeIdCoroFrame) { + return resolve_coro_frame(g, ty); } return ErrorNone; case ResolveStatusLLVMFwdDecl: @@ -5144,6 +5231,8 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) { return false; } return true; + case ZigTypeIdCoroFrame: + zig_panic("TODO"); case ZigTypeIdUndefined: zig_panic("TODO"); case ZigTypeIdNull: @@ -5496,6 +5585,10 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { buf_appendf(buf, "(args value)"); return; } + case ZigTypeIdCoroFrame: + buf_appendf(buf, "(TODO: coroutine frame value)"); + return; + } zig_unreachable(); } @@ -5542,6 +5635,7 @@ uint32_t type_id_hash(TypeId x) { case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: + case ZigTypeIdCoroFrame: zig_unreachable(); case ZigTypeIdErrorUnion: return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type); @@ -5590,6 +5684,7 @@ bool type_id_eql(TypeId a, TypeId b) { case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: + case ZigTypeIdCoroFrame: zig_unreachable(); case ZigTypeIdErrorUnion: return a.data.error_union.err_set_type == b.data.error_union.err_set_type && @@ -5818,10 +5913,12 @@ size_t type_id_index(ZigType *entry) { return 20; case ZigTypeIdOpaque: return 21; - case ZigTypeIdVector: + case ZigTypeIdCoroFrame: return 22; - case ZigTypeIdEnumLiteral: + case ZigTypeIdVector: return 23; + case ZigTypeIdEnumLiteral: + return 24; } zig_unreachable(); } @@ -5878,6 +5975,8 @@ const char *type_id_name(ZigTypeId id) { return "Opaque"; case ZigTypeIdVector: return "Vector"; + case ZigTypeIdCoroFrame: + return "Frame"; } zig_unreachable(); } @@ -5947,7 +6046,7 @@ bool type_can_fail(ZigType *type_entry) { } bool fn_type_can_fail(FnTypeId *fn_type_id) { - return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync; + return type_can_fail(fn_type_id->return_type); } // ErrorNone - result pointer has the type @@ -6935,12 +7034,12 @@ static void resolve_llvm_types_array(CodeGen *g, ZigType *type) { debug_align_in_bits, get_llvm_di_type(g, elem_type), (int)type->data.array.len); } -static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) { +void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) { if (fn_type->llvm_di_type != nullptr) return; FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; bool first_arg_return = want_first_arg_sret(g, fn_type_id); - bool is_async = fn_type_id->cc == CallingConventionAsync; + bool is_async = fn_type_id->cc == CallingConventionAsync || (fn != nullptr && fn->resume_blocks.length != 0); bool is_c_abi = fn_type_id->cc == CallingConventionC; bool prefix_arg_error_return_trace = g->have_err_ret_tracing && fn_type_can_fail(fn_type_id); // +1 for maybe making the first argument the return value @@ -6955,7 +7054,7 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) { param_di_types.append(get_llvm_di_type(g, fn_type_id->return_type)); ZigType *gen_return_type; if (is_async) { - gen_return_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false); + gen_return_type = g->builtin_types.entry_usize; } else if (!type_has_bits(fn_type_id->return_type)) { gen_return_type = g->builtin_types.entry_void; } else if (first_arg_return) { @@ -6974,13 +7073,10 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) { param_di_types.append(get_llvm_di_type(g, gen_type)); } if (is_async) { - // coroutine frame pointer - // TODO if we can make this typed a little more it will be better for - // debug symbols. - // TODO do we need to make this aligned more? - ZigType *void_star = get_pointer_to_type(g, g->builtin_types.entry_c_void, false); - gen_param_types.append(get_llvm_type(g, void_star)); - param_di_types.append(get_llvm_di_type(g, void_star)); + ZigType *frame_type = (fn == nullptr) ? g->builtin_types.entry_frame_header : get_coro_frame_type(g, fn); + ZigType *ptr_type = get_pointer_to_type(g, frame_type, false); + gen_param_types.append(get_llvm_type(g, ptr_type)); + param_di_types.append(get_llvm_di_type(g, ptr_type)); } fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count); @@ -7055,6 +7151,17 @@ static void resolve_llvm_types_anyerror(CodeGen *g) { get_llvm_di_type(g, g->err_tag_type), ""); } +static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) { + if (frame_type->llvm_di_type != nullptr) return; + + resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status); + frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type; + frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type; + frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size; + frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align; + frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits; +} + static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) { assert(type->id == ZigTypeIdOpaque || type_is_resolved(type, ResolveStatusSizeKnown)); assert(wanted_resolve_status > ResolveStatusSizeKnown); @@ -7096,7 +7203,7 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r case ZigTypeIdArray: return resolve_llvm_types_array(g, type); case ZigTypeIdFn: - return resolve_llvm_types_fn(g, type); + return resolve_llvm_types_fn(g, type, nullptr); case ZigTypeIdErrorSet: { if (type->llvm_di_type != nullptr) return; @@ -7115,6 +7222,8 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r type->abi_align, get_llvm_di_type(g, type->data.vector.elem_type), type->data.vector.len); return; } + case ZigTypeIdCoroFrame: + return resolve_llvm_types_coro_frame(g, type, wanted_resolve_status); } zig_unreachable(); } diff --git a/src/analyze.hpp b/src/analyze.hpp index fbbdece8ba..286ff5e043 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -16,6 +16,7 @@ ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg); ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg); void emit_error_notes_for_ref_stack(CodeGen *g, ErrorMsg *msg); ZigType *new_type_table_entry(ZigTypeId id); +ZigType *get_coro_frame_type(CodeGen *g, ZigFn *fn); ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const); ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const, bool is_volatile, PtrLen ptr_len, @@ -247,4 +248,6 @@ void src_assert(bool ok, AstNode *source_node); bool is_container(ZigType *type_entry); ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *type_entry, Buf *type_name); +void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn); + #endif diff --git a/src/codegen.cpp b/src/codegen.cpp index 4cc99b39a8..85784e5ac5 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -498,7 +498,7 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { ZigType *fn_type = fn_table_entry->type_entry; // Make the raw_type_ref populated - (void)get_llvm_type(g, fn_type); + resolve_llvm_types_fn(g, fn_type, fn_table_entry); LLVMTypeRef fn_llvm_type = fn_type->data.fn.raw_type_ref; if (fn_table_entry->body_node == nullptr) { LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, buf_ptr(symbol_name)); @@ -921,9 +921,8 @@ static bool ir_want_fast_math(CodeGen *g, IrInstruction *instruction) { return false; } -static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) { +static bool ir_want_runtime_safety_scope(CodeGen *g, Scope *scope) { // TODO memoize - Scope *scope = instruction->scope; while (scope) { if (scope->id == ScopeIdBlock) { ScopeBlock *block_scope = (ScopeBlock *)scope; @@ -941,6 +940,10 @@ static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) { g->build_mode != BuildModeSmallRelease); } +static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) { + return ir_want_runtime_safety_scope(g, instruction->scope); +} + static Buf *panic_msg_buf(PanicMsgId msg_id) { switch (msg_id) { case PanicMsgIdCount: @@ -981,6 +984,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("integer part of floating point value out of bounds"); case PanicMsgIdPtrCastNull: return buf_create_from_str("cast causes pointer to be null"); + case PanicMsgIdBadResume: + return buf_create_from_str("invalid resume of async function"); } zig_unreachable(); } @@ -1027,14 +1032,18 @@ static void gen_safety_crash(CodeGen *g, PanicMsgId msg_id) { gen_panic(g, get_panic_msg_ptr_val(g, msg_id), nullptr); } -static void gen_assertion(CodeGen *g, PanicMsgId msg_id, IrInstruction *source_instruction) { - if (ir_want_runtime_safety(g, source_instruction)) { +static void gen_assertion_scope(CodeGen *g, PanicMsgId msg_id, Scope *source_scope) { + if (ir_want_runtime_safety_scope(g, source_scope)) { gen_safety_crash(g, msg_id); } else { LLVMBuildUnreachable(g->builder); } } +static void gen_assertion(CodeGen *g, PanicMsgId msg_id, IrInstruction *source_instruction) { + return gen_assertion_scope(g, msg_id, source_instruction->scope); +} + static LLVMValueRef get_stacksave_fn_val(CodeGen *g) { if (g->stacksave_fn_val) return g->stacksave_fn_val; @@ -2092,6 +2101,10 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut } static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) { + if (g->cur_fn->resume_blocks.length != 0) { + LLVMBuildRet(g->builder, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)); + return nullptr; + } if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) { if (return_instruction->value == nullptr) { LLVMBuildRetVoid(g->builder); @@ -3375,8 +3388,7 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI static bool get_prefix_arg_err_ret_stack(CodeGen *g, FnTypeId *fn_type_id) { return g->have_err_ret_tracing && (fn_type_id->return_type->id == ZigTypeIdErrorUnion || - fn_type_id->return_type->id == ZigTypeIdErrorSet || - fn_type_id->cc == CallingConventionAsync); + fn_type_id->return_type->id == ZigTypeIdErrorSet); } static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) { @@ -3440,14 +3452,22 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr bool is_var_args = fn_type_id->is_var_args; ZigList gen_param_values = {}; LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr; - if (first_arg_ret) { - gen_param_values.append(result_loc); - } - if (prefix_arg_err_ret_stack) { - gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); - } if (instruction->is_async) { - zig_panic("TODO codegen async call"); + assert(result_loc != nullptr); + assert(instruction->fn_entry != nullptr); + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, result_loc, coro_resume_index_index, ""); + LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type); + LLVMBuildStore(g->builder, zero, resume_index_ptr); + + if (prefix_arg_err_ret_stack) { + zig_panic("TODO"); + } + + gen_param_values.append(result_loc); + } else if (first_arg_ret) { + gen_param_values.append(result_loc); + } else if (prefix_arg_err_ret_stack) { + gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); } FnWalk fn_walk = {}; fn_walk.id = FnWalkIdCall; @@ -3489,9 +3509,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (instruction->is_async) { - LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_payload_index, ""); - LLVMBuildStore(g->builder, result, payload_ptr); - return result_loc; + return nullptr; } if (src_return_type->id == ZigTypeIdUnreachable) { @@ -4921,6 +4939,24 @@ static LLVMValueRef ir_render_assert_non_null(CodeGen *g, IrExecutable *executab return nullptr; } +static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable, + IrInstructionSuspendBegin *instruction) +{ + LLVMValueRef locals_ptr = g->cur_ret_ptr; + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_resume_index_index, ""); + LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, + instruction->resume_block->resume_index, false); + LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr); + return nullptr; +} + +static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable, + IrInstructionSuspendBr *instruction) +{ + LLVMBuildRet(g->builder, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)); + return nullptr; +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -5161,6 +5197,10 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_resize_slice(g, executable, (IrInstructionResizeSlice *)instruction); case IrInstructionIdPtrOfArrayToSlice: return ir_render_ptr_of_array_to_slice(g, executable, (IrInstructionPtrOfArrayToSlice *)instruction); + case IrInstructionIdSuspendBegin: + return ir_render_suspend_begin(g, executable, (IrInstructionSuspendBegin *)instruction); + case IrInstructionIdSuspendBr: + return ir_render_suspend_br(g, executable, (IrInstructionSuspendBr *)instruction); } zig_unreachable(); } @@ -5422,7 +5462,8 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con } return val; } - + case ZigTypeIdCoroFrame: + zig_panic("TODO bit pack a coroutine frame"); } zig_unreachable(); } @@ -5943,7 +5984,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c case ZigTypeIdArgTuple: case ZigTypeIdOpaque: zig_unreachable(); - + case ZigTypeIdCoroFrame: + zig_panic("TODO"); } zig_unreachable(); } @@ -6027,12 +6069,20 @@ static void generate_error_name_table(CodeGen *g) { static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) { IrExecutable *executable = &fn->analyzed_executable; assert(executable->basic_block_list.length > 0); + LLVMValueRef fn_val = fn_llvm_value(g, fn); + LLVMBasicBlockRef first_bb = nullptr; + if (fn->resume_blocks.length != 0) { + first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch"); + fn->preamble_llvm_block = first_bb; + } for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) { IrBasicBlock *bb = executable->basic_block_list.at(block_i); - bb->llvm_block = LLVMAppendBasicBlock(fn_llvm_value(g, fn), bb->name_hint); + bb->llvm_block = LLVMAppendBasicBlock(fn_val, bb->name_hint); } - IrBasicBlock *entry_bb = executable->basic_block_list.at(0); - LLVMPositionBuilderAtEnd(g->builder, entry_bb->llvm_block); + if (first_bb == nullptr) { + first_bb = executable->basic_block_list.at(0)->llvm_block; + } + LLVMPositionBuilderAtEnd(g->builder, first_bb); } static void gen_global_var(CodeGen *g, ZigVar *var, LLVMValueRef init_val, @@ -6209,7 +6259,7 @@ static void do_code_gen(CodeGen *g) { build_all_basic_blocks(g, fn_table_entry); clear_debug_source_node(g); - if (want_sret) { + if (want_sret || fn_table_entry->resume_blocks.length != 0) { g->cur_ret_ptr = LLVMGetParam(fn, 0); } else if (handle_is_ptr(fn_type_id->return_type)) { g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0); @@ -6357,6 +6407,41 @@ static void do_code_gen(CodeGen *g) { fn_walk_init.data.inits.gen_i = gen_i_init; walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init); + if (fn_table_entry->resume_blocks.length != 0) { + if (!g->strip_debug_symbols) { + AstNode *source_node = fn_table_entry->proto_node; + ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1, + (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope)); + } + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); + LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); + gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope); + + LLVMBasicBlockRef get_size_block = LLVMAppendBasicBlock(g->cur_fn_val, "GetSize"); + LLVMPositionBuilderAtEnd(g->builder, get_size_block); + assert(fn_table_entry->frame_type->abi_size != 0); + assert(fn_table_entry->frame_type->abi_size != SIZE_MAX); + LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false); + LLVMBuildRet(g->builder, size_val); + + LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block); + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, + coro_resume_index_index, ""); + LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); + // The +1 is because index 0 is reserved for getting the size. + LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, + fn_table_entry->resume_blocks.length + 1); + + LLVMValueRef zero = LLVMConstNull(usize_type_ref); + LLVMAddCase(switch_instr, zero, get_size_block); + + for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) { + LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_i + 1, false); + LLVMAddCase(switch_instr, case_value, fn_table_entry->resume_blocks.at(resume_i)->llvm_block); + } + } + ir_render(g, fn_table_entry); } @@ -6644,9 +6729,13 @@ static void define_builtin_types(CodeGen *g) { g->primitive_type_table.put(&entry->name, entry); } + { + const char *field_names[] = {"resume_index"}; + ZigType *field_types[] = {g->builtin_types.entry_usize}; + g->builtin_types.entry_frame_header = get_struct_type(g, "(frame header)", field_names, field_types, 1); + } } - static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char *name, size_t count) { BuiltinFnEntry *builtin_fn = allocate(1); buf_init_from_str(&builtin_fn->name, name); @@ -7072,6 +7161,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { " BoundFn: Fn,\n" " ArgTuple: void,\n" " Opaque: void,\n" + " Frame: void,\n" " Vector: Vector,\n" " EnumLiteral: void,\n" "\n\n" @@ -8335,6 +8425,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e case ZigTypeIdArgTuple: case ZigTypeIdErrorUnion: case ZigTypeIdErrorSet: + case ZigTypeIdCoroFrame: zig_unreachable(); case ZigTypeIdVoid: case ZigTypeIdUnreachable: @@ -8518,6 +8609,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu case ZigTypeIdUndefined: case ZigTypeIdNull: case ZigTypeIdArgTuple: + case ZigTypeIdCoroFrame: zig_unreachable(); } } @@ -8685,6 +8777,7 @@ static void gen_h_file(CodeGen *g) { case ZigTypeIdOptional: case ZigTypeIdFn: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: zig_unreachable(); case ZigTypeIdEnum: if (type_entry->data.enumeration.layout == ContainerLayoutExtern) { diff --git a/src/ir.cpp b/src/ir.cpp index f23fe1b7d0..2332e28c84 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -318,6 +318,7 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) { case ZigTypeIdFn: case ZigTypeIdArgTuple: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: return false; } zig_unreachable(); @@ -1026,6 +1027,14 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUnionInitNamedFi return IrInstructionIdUnionInitNamedField; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBegin *) { + return IrInstructionIdSuspendBegin; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBr *) { + return IrInstructionIdSuspendBr; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -3183,6 +3192,30 @@ static IrInstruction *ir_build_end_expr(IrBuilder *irb, Scope *scope, AstNode *s return &instruction->base; } +static IrInstruction *ir_build_suspend_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrBasicBlock *resume_block) +{ + IrInstructionSuspendBegin *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.value.type = irb->codegen->builtin_types.entry_void; + instruction->resume_block = resume_block; + + ir_ref_bb(resume_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrBasicBlock *resume_block) +{ + IrInstructionSuspendBr *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable; + instruction->resume_block = resume_block; + + ir_ref_bb(resume_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -3286,6 +3319,18 @@ static void ir_set_cursor_at_end_and_append_block(IrBuilder *irb, IrBasicBlock * ir_set_cursor_at_end(irb, basic_block); } +static ScopeSuspend *get_scope_suspend(Scope *scope) { + while (scope) { + if (scope->id == ScopeIdSuspend) + return (ScopeSuspend *)scope; + if (scope->id == ScopeIdFnDef) + return nullptr; + + scope = scope->parent; + } + return nullptr; +} + static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) { while (scope) { if (scope->id == ScopeIdDeferExpr) @@ -3308,14 +3353,9 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode { ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value)); - bool is_async = exec_is_async(irb->exec); - if (!is_async) { - IrInstruction *return_inst = ir_build_return(irb, scope, node, return_value); - return_inst->is_gen = is_generated_code; - return return_inst; - } - - zig_panic("TODO async return"); + IrInstruction *return_inst = ir_build_return(irb, scope, node, return_value); + return_inst->is_gen = is_generated_code; + return return_inst; } static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) { @@ -5393,12 +5433,8 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node } bool is_async = node->data.fn_call_expr.is_async; - if (is_async) { - zig_panic("TODO async fn call"); - } - - IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, - is_async, nullptr, result_loc); + IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, + FnInlineAuto, is_async, nullptr, result_loc); return ir_lval_wrap(irb, scope, fn_call, lval, result_loc); } @@ -7655,7 +7691,45 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) { assert(node->type == NodeTypeSuspend); - zig_panic("TODO ir_gen_suspend"); + ZigFn *fn_entry = exec_fn_entry(irb->exec); + if (!fn_entry) { + add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition")); + return irb->codegen->invalid_instruction; + } + ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope); + if (scope_defer_expr) { + if (!scope_defer_expr->reported_err) { + ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression")); + add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here")); + scope_defer_expr->reported_err = true; + } + return irb->codegen->invalid_instruction; + } + ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope); + if (existing_suspend_scope) { + if (!existing_suspend_scope->reported_err) { + ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside suspend block")); + add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("other suspend block here")); + existing_suspend_scope->reported_err = true; + } + return irb->codegen->invalid_instruction; + } + + IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "Resume"); + + ir_build_suspend_begin(irb, parent_scope, node, resume_block); + if (node->data.suspend.block != nullptr) { + Scope *child_scope; + ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope); + suspend_scope->resume_block = resume_block; + child_scope = &suspend_scope->base; + IrInstruction *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope); + ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res)); + } + + IrInstruction *result = ir_build_suspend_br(irb, parent_scope, node, resume_block); + ir_set_cursor_at_end_and_append_block(irb, resume_block); + return result; } static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, @@ -7854,13 +7928,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec // Entry block gets a reference because we enter it to begin. ir_ref_bb(irb->current_basic_block); - ZigFn *fn_entry = exec_fn_entry(irb->exec); - - bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; - if (is_async) { - zig_panic("ir_gen async fn"); - } - IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone, nullptr); assert(result); if (irb->exec->invalid) @@ -12659,6 +12726,7 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp * case ZigTypeIdNull: case ZigTypeIdErrorUnion: case ZigTypeIdUnion: + case ZigTypeIdCoroFrame: operator_allowed = false; break; case ZigTypeIdOptional: @@ -14023,6 +14091,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: + case ZigTypeIdCoroFrame: ir_add_error(ira, target, buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name))); break; @@ -14047,6 +14116,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdEnumLiteral: + case ZigTypeIdCoroFrame: ir_add_error(ira, target, buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name))); break; @@ -14553,6 +14623,20 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst return ir_const_void(ira, &instruction->base); } +static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry, + ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count) +{ + ir_assert(fn_entry != nullptr, &call_instruction->base); + + ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry); + IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, + frame_type, nullptr, true, true); + if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) { + return result_loc; + } + return ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count, + casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type); +} static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node, IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i) { @@ -15366,16 +15450,18 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (type_is_invalid(return_type)) return ira->codegen->invalid_instruction; - if (call_instruction->is_async) { - zig_panic("TODO async call"); - } - if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && fn_inline == FnInlineNever) { ir_add_error(ira, &call_instruction->base, buf_sprintf("no-inline call of inline function")); return ira->codegen->invalid_instruction; } + if (call_instruction->is_async) { + IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, + casted_args, call_param_count); + return ir_finish_anal(ira, result); + } + IrInstruction *result_loc; if (handle_is_ptr(return_type)) { result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, @@ -15535,7 +15621,7 @@ static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source zig_unreachable(); } -static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_instruction) { +static IrInstruction *ir_analyze_optional_type(IrAnalyze *ira, IrInstructionUnOp *un_op_instruction) { Error err; IrInstruction *value = un_op_instruction->value->child; ZigType *type_entry = ir_resolve_type(ira, value); @@ -15569,6 +15655,7 @@ static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_ case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: + case ZigTypeIdCoroFrame: return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry)); case ZigTypeIdUnreachable: case ZigTypeIdOpaque: @@ -15733,7 +15820,7 @@ static IrInstruction *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstruction return result; } case IrUnOpOptional: - return ir_analyze_maybe(ira, instruction); + return ir_analyze_optional_type(ira, instruction); } zig_unreachable(); } @@ -17340,6 +17427,7 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira, case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: { ResolveStatus needed_status = (align_bytes == 0) ? ResolveStatusZeroBitsKnown : ResolveStatusAlignmentKnown; @@ -17454,6 +17542,7 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira, case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: { if ((err = ensure_complete_type(ira->codegen, child_type))) return ira->codegen->invalid_instruction; @@ -17504,6 +17593,7 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira, case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: { uint64_t size_in_bytes = type_size(ira->codegen, type_entry); return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes); @@ -18067,6 +18157,7 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira, case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: ir_add_error(ira, &switch_target_instruction->base, buf_sprintf("invalid switch target type '%s'", buf_ptr(&target_type->name))); return ira->codegen->invalid_instruction; @@ -19906,6 +19997,8 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr break; } + case ZigTypeIdCoroFrame: + zig_panic("TODO @typeInfo for coro frames"); } assert(result != nullptr); @@ -21660,6 +21753,7 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdVector: + case ZigTypeIdCoroFrame: { uint64_t align_in_bytes = get_abi_alignment(ira->codegen, type_entry); return ir_const_unsigned(ira, &instruction->base, align_in_bytes); @@ -22815,6 +22909,8 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue zig_panic("TODO buf_write_value_bytes fn type"); case ZigTypeIdUnion: zig_panic("TODO buf_write_value_bytes union type"); + case ZigTypeIdCoroFrame: + zig_panic("TODO buf_write_value_bytes coro frame type"); } zig_unreachable(); } @@ -22994,6 +23090,8 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou zig_panic("TODO buf_read_value_bytes fn type"); case ZigTypeIdUnion: zig_panic("TODO buf_read_value_bytes union type"); + case ZigTypeIdCoroFrame: + zig_panic("TODO buf_read_value_bytes coro frame type"); } zig_unreachable(); } @@ -24021,6 +24119,33 @@ static IrInstruction *ir_analyze_instruction_union_init_named_field(IrAnalyze *i union_type, field_name, field_result_loc, result_loc); } +static IrInstruction *ir_analyze_instruction_suspend_begin(IrAnalyze *ira, IrInstructionSuspendBegin *instruction) { + IrBasicBlock *new_bb = ir_get_new_bb_runtime(ira, instruction->resume_block, &instruction->base); + if (new_bb == nullptr) + return ir_unreach_error(ira); + return ir_build_suspend_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node, new_bb); +} + +static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstructionSuspendBr *instruction) { + IrBasicBlock *old_dest_block = instruction->resume_block; + + IrBasicBlock *new_bb = ir_get_new_bb_runtime(ira, old_dest_block, &instruction->base); + if (new_bb == nullptr) + return ir_unreach_error(ira); + + ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); + ir_assert(fn_entry != nullptr, &instruction->base); + fn_entry->resume_blocks.append(new_bb); + // This is done after appending the block because resume_index 0 is reserved for querying the size. + new_bb->resume_index = fn_entry->resume_blocks.length; + + ir_push_resume_block(ira, old_dest_block); + + IrInstruction *result = ir_build_suspend_br(&ira->new_irb, + instruction->base.scope, instruction->base.source_node, new_bb); + return ir_finish_anal(ira, result); +} + static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: @@ -24304,6 +24429,10 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_bit_cast_src(ira, (IrInstructionBitCastSrc *)instruction); case IrInstructionIdUnionInitNamedField: return ir_analyze_instruction_union_init_named_field(ira, (IrInstructionUnionInitNamedField *)instruction); + case IrInstructionIdSuspendBegin: + return ir_analyze_instruction_suspend_begin(ira, (IrInstructionSuspendBegin *)instruction); + case IrInstructionIdSuspendBr: + return ir_analyze_instruction_suspend_br(ira, (IrInstructionSuspendBr *)instruction); } zig_unreachable(); } @@ -24436,6 +24565,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdOptionalWrap: case IrInstructionIdVectorToArray: case IrInstructionIdResetResult: + case IrInstructionIdSuspendBegin: + case IrInstructionIdSuspendBr: return true; case IrInstructionIdPhi: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 9ea70ba7ab..3a77e92bc7 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1503,6 +1503,16 @@ static void ir_print_union_init_named_field(IrPrint *irp, IrInstructionUnionInit fprintf(irp->f, ")"); } +static void ir_print_suspend_begin(IrPrint *irp, IrInstructionSuspendBegin *instruction) { + fprintf(irp->f, "@suspendBegin()"); +} + +static void ir_print_suspend_br(IrPrint *irp, IrInstructionSuspendBr *instruction) { + fprintf(irp->f, "@suspendBr("); + ir_print_other_block(irp, instruction->resume_block); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1961,6 +1971,12 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdUnionInitNamedField: ir_print_union_init_named_field(irp, (IrInstructionUnionInitNamedField *)instruction); break; + case IrInstructionIdSuspendBegin: + ir_print_suspend_begin(irp, (IrInstructionSuspendBegin *)instruction); + break; + case IrInstructionIdSuspendBr: + ir_print_suspend_br(irp, (IrInstructionSuspendBr *)instruction); + break; } fprintf(irp->f, "\n"); } From 27a5f2c4fa9cfa104faa4cc2b15cd21cc5a5501f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 21 Jul 2019 16:43:43 -0400 Subject: [PATCH 003/125] remove errors for async calling convention --- src/ir.cpp | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 2332e28c84..96d355bc95 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -14984,20 +14984,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c } return ira->codegen->invalid_instruction; } - if (fn_type_id->cc == CallingConventionAsync && !call_instruction->is_async) { - ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("must use async keyword to call async function")); - if (fn_proto_node) { - add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here")); - } - return ira->codegen->invalid_instruction; - } - if (fn_type_id->cc != CallingConventionAsync && call_instruction->is_async) { - ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("cannot use async keyword to call non-async function")); - if (fn_proto_node) { - add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here")); - } - return ira->codegen->invalid_instruction; - } if (fn_type_id->is_var_args) { From 6053ca4f69e490c744384bd02e89df595ff7b085 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 21 Jul 2019 17:10:16 -0400 Subject: [PATCH 004/125] fix not jumping to entry --- src/codegen.cpp | 13 +++++++++---- src/ir.cpp | 6 ++++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 85784e5ac5..47804d91f6 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6413,6 +6413,7 @@ static void do_code_gen(CodeGen *g) { ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1, (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope)); } + IrExecutable *executable = &fn_table_entry->analyzed_executable; LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); @@ -6429,15 +6430,19 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index_index, ""); LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); - // The +1 is because index 0 is reserved for getting the size. + // +1 - index 0 is reserved for the entry block + // +1 - index 1 is reserved for getting the size. LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, - fn_table_entry->resume_blocks.length + 1); + fn_table_entry->resume_blocks.length + 2); LLVMValueRef zero = LLVMConstNull(usize_type_ref); - LLVMAddCase(switch_instr, zero, get_size_block); + LLVMAddCase(switch_instr, zero, executable->basic_block_list.at(0)->llvm_block); + + LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); + LLVMAddCase(switch_instr, one, get_size_block); for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) { - LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_i + 1, false); + LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_i + 2, false); LLVMAddCase(switch_instr, case_value, fn_table_entry->resume_blocks.at(resume_i)->llvm_block); } } diff --git a/src/ir.cpp b/src/ir.cpp index 96d355bc95..2b3462772f 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -24121,9 +24121,11 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); ir_assert(fn_entry != nullptr, &instruction->base); + + // +2 - one for the GetSize block, one for the Entry block, resume blocks are indexed after that. + new_bb->resume_index = fn_entry->resume_blocks.length + 2; + fn_entry->resume_blocks.append(new_bb); - // This is done after appending the block because resume_index 0 is reserved for querying the size. - new_bb->resume_index = fn_entry->resume_blocks.length; ir_push_resume_block(ira, old_dest_block); From 56c08eb3025a622e5d3f5b2b6c704f8dc2cddf47 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 21 Jul 2019 17:17:44 -0400 Subject: [PATCH 005/125] returning from async fn adds bad resume safety --- src/codegen.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/codegen.cpp b/src/codegen.cpp index 47804d91f6..4dc1d9ed86 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2102,6 +2102,14 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) { if (g->cur_fn->resume_blocks.length != 0) { + if (ir_want_runtime_safety(g, &return_instruction->base)) { + LLVMValueRef locals_ptr = g->cur_ret_ptr; + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_resume_index_index, ""); + LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, + g->cur_fn->resume_blocks.length + 2, false); + LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr); + } + LLVMBuildRet(g->builder, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)); return nullptr; } From 78e03c466c6641571adef4bb3931d6cc6f425eb4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 21 Jul 2019 19:56:37 -0400 Subject: [PATCH 006/125] simple async function passing test --- src/all_types.hpp | 11 +- src/analyze.cpp | 17 +- src/analyze.hpp | 1 - src/codegen.cpp | 20 +- std/hash_map.zig | 115 +++---- std/testing.zig | 49 +-- test/stage1/behavior.zig | 2 +- test/stage1/behavior/coroutines.zig | 455 ++++++++++++++-------------- test/stage1/behavior/type_info.zig | 2 +- 9 files changed, 354 insertions(+), 318 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 3f61e77f66..a2b569898c 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1346,7 +1346,16 @@ struct ZigFn { Scope *child_scope; // parent is scope for last parameter ScopeBlock *def_scope; // parent is child_scope Buf symbol_name; - ZigType *type_entry; // function type + // This is the function type assuming the function does not suspend. + // Note that for an async function, this can be shared with non-async functions. So the value here + // should only be read for things in common between non-async and async function types. + ZigType *type_entry; + // For normal functions one could use the type_entry->raw_type_ref and type_entry->raw_di_type. + // However for functions that suspend, those values could possibly be their non-suspending equivalents. + // So these values should be preferred. + LLVMTypeRef raw_type_ref; + ZigLLVMDIType *raw_di_type; + ZigType *frame_type; // coro frame type // in the case of normal functions this is the implicit return type // in the case of async functions this is the implicit return type according to the diff --git a/src/analyze.cpp b/src/analyze.cpp index 2b93c390e0..91a100795b 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3750,7 +3750,7 @@ bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *sour return true; } -void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node) { +static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node) { ZigType *fn_type = fn_table_entry->type_entry; assert(!fn_type->data.fn.is_generic); FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; @@ -5850,6 +5850,7 @@ static const ZigTypeId all_type_ids[] = { ZigTypeIdBoundFn, ZigTypeIdArgTuple, ZigTypeIdOpaque, + ZigTypeIdCoroFrame, ZigTypeIdVector, ZigTypeIdEnumLiteral, }; @@ -7035,7 +7036,13 @@ static void resolve_llvm_types_array(CodeGen *g, ZigType *type) { } void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) { - if (fn_type->llvm_di_type != nullptr) return; + if (fn_type->llvm_di_type != nullptr) { + if (fn != nullptr) { + fn->raw_type_ref = fn_type->data.fn.raw_type_ref; + fn->raw_di_type = fn_type->data.fn.raw_di_type; + } + return; + } FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; bool first_arg_return = want_first_arg_sret(g, fn_type_id); @@ -7118,6 +7125,12 @@ void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) { for (size_t i = 0; i < gen_param_types.length; i += 1) { assert(gen_param_types.items[i] != nullptr); } + if (fn != nullptr) { + fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type), + gen_param_types.items, (unsigned int)gen_param_types.length, fn_type_id->is_var_args); + fn->raw_di_type = ZigLLVMCreateSubroutineType(g->dbuilder, param_di_types.items, (int)param_di_types.length, 0); + return; + } fn_type->data.fn.raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type), gen_param_types.items, (unsigned int)gen_param_types.length, fn_type_id->is_var_args); fn_type->llvm_type = LLVMPointerType(fn_type->data.fn.raw_type_ref, 0); diff --git a/src/analyze.hpp b/src/analyze.hpp index 286ff5e043..3f226080b5 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -105,7 +105,6 @@ void eval_min_max_value(CodeGen *g, ZigType *type_entry, ConstExprValue *const_v void eval_min_max_value_int(CodeGen *g, ZigType *int_type, BigInt *bigint, bool is_max); void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val); -void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node); ScopeBlock *create_block_scope(CodeGen *g, AstNode *node, Scope *parent); ScopeDefer *create_defer_scope(CodeGen *g, AstNode *node, Scope *parent); diff --git a/src/codegen.cpp b/src/codegen.cpp index 4dc1d9ed86..4a9e5fd629 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -499,7 +499,7 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { ZigType *fn_type = fn_table_entry->type_entry; // Make the raw_type_ref populated resolve_llvm_types_fn(g, fn_type, fn_table_entry); - LLVMTypeRef fn_llvm_type = fn_type->data.fn.raw_type_ref; + LLVMTypeRef fn_llvm_type = fn_table_entry->raw_type_ref; if (fn_table_entry->body_node == nullptr) { LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, buf_ptr(symbol_name)); if (existing_llvm_fn) { @@ -521,9 +521,9 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { assert(entry->value->id == TldIdFn); TldFn *tld_fn = reinterpret_cast(entry->value); // Make the raw_type_ref populated - (void)get_llvm_type(g, tld_fn->fn_entry->type_entry); + resolve_llvm_types_fn(g, tld_fn->fn_entry->type_entry, tld_fn->fn_entry); tld_fn->fn_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), - tld_fn->fn_entry->type_entry->data.fn.raw_type_ref); + tld_fn->fn_entry->raw_type_ref); fn_table_entry->llvm_value = LLVMConstBitCast(tld_fn->fn_entry->llvm_value, LLVMPointerType(fn_llvm_type, 0)); return fn_table_entry->llvm_value; @@ -683,10 +683,11 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) { unsigned flags = ZigLLVM_DIFlags_StaticMember; ZigLLVMDIScope *fn_di_scope = get_di_scope(g, scope->parent); assert(fn_di_scope != nullptr); + assert(fn_table_entry->raw_di_type != nullptr); ZigLLVMDISubprogram *subprogram = ZigLLVMCreateFunction(g->dbuilder, fn_di_scope, buf_ptr(&fn_table_entry->symbol_name), "", import->data.structure.root_struct->di_file, line_number, - fn_table_entry->type_entry->data.fn.raw_di_type, is_internal_linkage, + fn_table_entry->raw_di_type, is_internal_linkage, is_definition, scope_line, flags, is_optimized, nullptr); scope->di_scope = ZigLLVMSubprogramToScope(subprogram); @@ -3472,10 +3473,13 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } gen_param_values.append(result_loc); - } else if (first_arg_ret) { - gen_param_values.append(result_loc); - } else if (prefix_arg_err_ret_stack) { - gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); + } else { + if (first_arg_ret) { + gen_param_values.append(result_loc); + } + if (prefix_arg_err_ret_stack) { + gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); + } } FnWalk fn_walk = {}; fn_walk.id = FnWalkIdCall; diff --git a/std/hash_map.zig b/std/hash_map.zig index 4327bfdddb..bdd6cc7519 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -535,17 +535,18 @@ pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) { // TODO improve these hash functions pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type) HashInt { switch (@typeInfo(@typeOf(key))) { - builtin.TypeId.NoReturn, - builtin.TypeId.Opaque, - builtin.TypeId.Undefined, - builtin.TypeId.ArgTuple, + .NoReturn, + .Opaque, + .Undefined, + .ArgTuple, + .Frame, => @compileError("cannot hash this type"), - builtin.TypeId.Void, - builtin.TypeId.Null, + .Void, + .Null, => return 0, - builtin.TypeId.Int => |info| { + .Int => |info| { const unsigned_x = @bitCast(@IntType(false, info.bits), key); if (info.bits <= HashInt.bit_count) { return HashInt(unsigned_x) ^ comptime rng.scalar(HashInt); @@ -554,26 +555,26 @@ pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type } }, - builtin.TypeId.Float => |info| { + .Float => |info| { return autoHash(@bitCast(@IntType(false, info.bits), key), rng, HashInt); }, - builtin.TypeId.Bool => return autoHash(@boolToInt(key), rng, HashInt), - builtin.TypeId.Enum => return autoHash(@enumToInt(key), rng, HashInt), - builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), rng, HashInt), - builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng, HashInt), + .Bool => return autoHash(@boolToInt(key), rng, HashInt), + .Enum => return autoHash(@enumToInt(key), rng, HashInt), + .ErrorSet => return autoHash(@errorToInt(key), rng, HashInt), + .Fn => return autoHash(@ptrToInt(key), rng, HashInt), - builtin.TypeId.BoundFn, - builtin.TypeId.ComptimeFloat, - builtin.TypeId.ComptimeInt, - builtin.TypeId.Type, - builtin.TypeId.EnumLiteral, + .BoundFn, + .ComptimeFloat, + .ComptimeInt, + .Type, + .EnumLiteral, => return 0, - builtin.TypeId.Pointer => |info| switch (info.size) { - builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto hash for single item pointers"), - builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto hash for many item pointers"), - builtin.TypeInfo.Pointer.Size.C => @compileError("TODO auto hash C pointers"), - builtin.TypeInfo.Pointer.Size.Slice => { + .Pointer => |info| switch (info.size) { + .One => @compileError("TODO auto hash for single item pointers"), + .Many => @compileError("TODO auto hash for many item pointers"), + .C => @compileError("TODO auto hash C pointers"), + .Slice => { const interval = std.math.max(1, key.len / 256); var i: usize = 0; var h = comptime rng.scalar(HashInt); @@ -584,44 +585,44 @@ pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type }, }, - builtin.TypeId.Optional => @compileError("TODO auto hash for optionals"), - builtin.TypeId.Array => @compileError("TODO auto hash for arrays"), - builtin.TypeId.Vector => @compileError("TODO auto hash for vectors"), - builtin.TypeId.Struct => @compileError("TODO auto hash for structs"), - builtin.TypeId.Union => @compileError("TODO auto hash for unions"), - builtin.TypeId.ErrorUnion => @compileError("TODO auto hash for unions"), + .Optional => @compileError("TODO auto hash for optionals"), + .Array => @compileError("TODO auto hash for arrays"), + .Vector => @compileError("TODO auto hash for vectors"), + .Struct => @compileError("TODO auto hash for structs"), + .Union => @compileError("TODO auto hash for unions"), + .ErrorUnion => @compileError("TODO auto hash for unions"), } } pub fn autoEql(a: var, b: @typeOf(a)) bool { switch (@typeInfo(@typeOf(a))) { - builtin.TypeId.NoReturn, - builtin.TypeId.Opaque, - builtin.TypeId.Undefined, - builtin.TypeId.ArgTuple, + .NoReturn, + .Opaque, + .Undefined, + .ArgTuple, => @compileError("cannot test equality of this type"), - builtin.TypeId.Void, - builtin.TypeId.Null, + .Void, + .Null, => return true, - builtin.TypeId.Bool, - builtin.TypeId.Int, - builtin.TypeId.Float, - builtin.TypeId.ComptimeFloat, - builtin.TypeId.ComptimeInt, - builtin.TypeId.EnumLiteral, - builtin.TypeId.Promise, - builtin.TypeId.Enum, - builtin.TypeId.BoundFn, - builtin.TypeId.Fn, - builtin.TypeId.ErrorSet, - builtin.TypeId.Type, + .Bool, + .Int, + .Float, + .ComptimeFloat, + .ComptimeInt, + .EnumLiteral, + .Promise, + .Enum, + .BoundFn, + .Fn, + .ErrorSet, + .Type, => return a == b, - builtin.TypeId.Pointer => |info| switch (info.size) { - builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto eql for single item pointers"), - builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto eql for many item pointers"), - builtin.TypeInfo.Pointer.Size.C => @compileError("TODO auto eql for C pointers"), - builtin.TypeInfo.Pointer.Size.Slice => { + .Pointer => |info| switch (info.size) { + .One => @compileError("TODO auto eql for single item pointers"), + .Many => @compileError("TODO auto eql for many item pointers"), + .C => @compileError("TODO auto eql for C pointers"), + .Slice => { if (a.len != b.len) return false; for (a) |a_item, i| { if (!autoEql(a_item, b[i])) return false; @@ -630,11 +631,11 @@ pub fn autoEql(a: var, b: @typeOf(a)) bool { }, }, - builtin.TypeId.Optional => @compileError("TODO auto eql for optionals"), - builtin.TypeId.Array => @compileError("TODO auto eql for arrays"), - builtin.TypeId.Struct => @compileError("TODO auto eql for structs"), - builtin.TypeId.Union => @compileError("TODO auto eql for unions"), - builtin.TypeId.ErrorUnion => @compileError("TODO auto eql for unions"), - builtin.TypeId.Vector => @compileError("TODO auto eql for vectors"), + .Optional => @compileError("TODO auto eql for optionals"), + .Array => @compileError("TODO auto eql for arrays"), + .Struct => @compileError("TODO auto eql for structs"), + .Union => @compileError("TODO auto eql for unions"), + .ErrorUnion => @compileError("TODO auto eql for unions"), + .Vector => @compileError("TODO auto eql for vectors"), } } diff --git a/std/testing.zig b/std/testing.zig index 84f6cff5d8..3c4772cf37 100644 --- a/std/testing.zig +++ b/std/testing.zig @@ -25,35 +25,36 @@ pub fn expectError(expected_error: anyerror, actual_error_union: var) void { /// The types must match exactly. pub fn expectEqual(expected: var, actual: @typeOf(expected)) void { switch (@typeInfo(@typeOf(actual))) { - TypeId.NoReturn, - TypeId.BoundFn, - TypeId.ArgTuple, - TypeId.Opaque, + .NoReturn, + .BoundFn, + .ArgTuple, + .Opaque, + .Frame, => @compileError("value of type " ++ @typeName(@typeOf(actual)) ++ " encountered"), - TypeId.Undefined, - TypeId.Null, - TypeId.Void, + .Undefined, + .Null, + .Void, => return, - TypeId.Type, - TypeId.Bool, - TypeId.Int, - TypeId.Float, - TypeId.ComptimeFloat, - TypeId.ComptimeInt, - TypeId.EnumLiteral, - TypeId.Enum, - TypeId.Fn, - TypeId.Vector, - TypeId.ErrorSet, + .Type, + .Bool, + .Int, + .Float, + .ComptimeFloat, + .ComptimeInt, + .EnumLiteral, + .Enum, + .Fn, + .Vector, + .ErrorSet, => { if (actual != expected) { std.debug.panic("expected {}, found {}", expected, actual); } }, - TypeId.Pointer => |pointer| { + .Pointer => |pointer| { switch (pointer.size) { builtin.TypeInfo.Pointer.Size.One, builtin.TypeInfo.Pointer.Size.Many, @@ -75,22 +76,22 @@ pub fn expectEqual(expected: var, actual: @typeOf(expected)) void { } }, - TypeId.Array => |array| expectEqualSlices(array.child, &expected, &actual), + .Array => |array| expectEqualSlices(array.child, &expected, &actual), - TypeId.Struct => |structType| { + .Struct => |structType| { inline for (structType.fields) |field| { expectEqual(@field(expected, field.name), @field(actual, field.name)); } }, - TypeId.Union => |union_info| { + .Union => |union_info| { if (union_info.tag_type == null) { @compileError("Unable to compare untagged union values"); } @compileError("TODO implement testing.expectEqual for tagged unions"); }, - TypeId.Optional => { + .Optional => { if (expected) |expected_payload| { if (actual) |actual_payload| { expectEqual(expected_payload, actual_payload); @@ -104,7 +105,7 @@ pub fn expectEqual(expected: var, actual: @typeOf(expected)) void { } }, - TypeId.ErrorUnion => { + .ErrorUnion => { if (expected) |expected_payload| { if (actual) |actual_payload| { expectEqual(expected_payload, actual_payload); diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig index 7950088348..59401fbe84 100644 --- a/test/stage1/behavior.zig +++ b/test/stage1/behavior.zig @@ -43,7 +43,7 @@ comptime { _ = @import("behavior/cast.zig"); _ = @import("behavior/const_slice_child.zig"); //_ = @import("behavior/coroutine_await_struct.zig"); - //_ = @import("behavior/coroutines.zig"); + _ = @import("behavior/coroutines.zig"); _ = @import("behavior/defer.zig"); _ = @import("behavior/enum.zig"); _ = @import("behavior/enum_with_members.zig"); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 7395f3e064..cdab411fb1 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -1,236 +1,245 @@ const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; -const allocator = std.heap.direct_allocator; var x: i32 = 1; -test "create a coroutine and cancel it" { - const p = try async simpleAsyncFn(); - comptime expect(@typeOf(p) == promise->void); - cancel p; +test "simple coroutine suspend" { + const p = async simpleAsyncFn(); expect(x == 2); } -async fn simpleAsyncFn() void { +fn simpleAsyncFn() void { x += 1; suspend; x += 1; } -test "coroutine suspend, resume, cancel" { - seq('a'); - const p = try async testAsyncSeq(); - seq('c'); - resume p; - seq('f'); - cancel p; - seq('g'); - - expect(std.mem.eql(u8, points, "abcdefg")); -} -async fn testAsyncSeq() void { - defer seq('e'); - - seq('b'); - suspend; - seq('d'); -} -var points = [_]u8{0} ** "abcdefg".len; -var index: usize = 0; - -fn seq(c: u8) void { - points[index] = c; - index += 1; -} - -test "coroutine suspend with block" { - const p = try async testSuspendBlock(); - std.testing.expect(!result); - resume a_promise; - std.testing.expect(result); - cancel p; -} - -var a_promise: promise = undefined; -var result = false; -async fn testSuspendBlock() void { - suspend { - comptime expect(@typeOf(@handle()) == promise->void); - a_promise = @handle(); - } - - //Test to make sure that @handle() works as advertised (issue #1296) - //var our_handle: promise = @handle(); - expect(a_promise == @handle()); - - result = true; -} - -var await_a_promise: promise = undefined; -var await_final_result: i32 = 0; - -test "coroutine await" { - await_seq('a'); - const p = async await_amain() catch unreachable; - await_seq('f'); - resume await_a_promise; - await_seq('i'); - expect(await_final_result == 1234); - expect(std.mem.eql(u8, await_points, "abcdefghi")); -} -async fn await_amain() void { - await_seq('b'); - const p = async await_another() catch unreachable; - await_seq('e'); - await_final_result = await p; - await_seq('h'); -} -async fn await_another() i32 { - await_seq('c'); - suspend { - await_seq('d'); - await_a_promise = @handle(); - } - await_seq('g'); - return 1234; -} - -var await_points = [_]u8{0} ** "abcdefghi".len; -var await_seq_index: usize = 0; - -fn await_seq(c: u8) void { - await_points[await_seq_index] = c; - await_seq_index += 1; -} - -var early_final_result: i32 = 0; - -test "coroutine await early return" { - early_seq('a'); - const p = async early_amain() catch @panic("out of memory"); - early_seq('f'); - expect(early_final_result == 1234); - expect(std.mem.eql(u8, early_points, "abcdef")); -} -async fn early_amain() void { - early_seq('b'); - const p = async early_another() catch @panic("out of memory"); - early_seq('d'); - early_final_result = await p; - early_seq('e'); -} -async fn early_another() i32 { - early_seq('c'); - return 1234; -} - -var early_points = [_]u8{0} ** "abcdef".len; -var early_seq_index: usize = 0; - -fn early_seq(c: u8) void { - early_points[early_seq_index] = c; - early_seq_index += 1; -} - -test "coro allocation failure" { - var failing_allocator = std.debug.FailingAllocator.init(std.debug.global_allocator, 0); - if (async<&failing_allocator.allocator> asyncFuncThatNeverGetsRun()) { - @panic("expected allocation failure"); - } else |err| switch (err) { - error.OutOfMemory => {}, - } -} -async fn asyncFuncThatNeverGetsRun() void { - @panic("coro frame allocation should fail"); -} - -test "async function with dot syntax" { - const S = struct { - var y: i32 = 1; - async fn foo() void { - y += 1; - suspend; - } - }; - const p = try async S.foo(); - cancel p; - expect(S.y == 2); -} - -test "async fn pointer in a struct field" { - var data: i32 = 1; - const Foo = struct { - bar: async<*std.mem.Allocator> fn (*i32) void, - }; - var foo = Foo{ .bar = simpleAsyncFn2 }; - const p = (async foo.bar(&data)) catch unreachable; - expect(data == 2); - cancel p; - expect(data == 4); -} -async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void { - defer y.* += 2; - y.* += 1; - suspend; -} - -test "async fn with inferred error set" { - const p = (async failing()) catch unreachable; - resume p; - cancel p; -} - -async fn failing() !void { - suspend; - return error.Fail; -} - -test "error return trace across suspend points - early return" { - const p = nonFailing(); - resume p; - const p2 = try async printTrace(p); - cancel p2; -} - -test "error return trace across suspend points - async return" { - const p = nonFailing(); - const p2 = try async printTrace(p); - resume p; - cancel p2; -} - -fn nonFailing() (promise->anyerror!void) { - return async suspendThenFail() catch unreachable; -} -async fn suspendThenFail() anyerror!void { - suspend; - return error.Fail; -} -async fn printTrace(p: promise->(anyerror!void)) void { - (await p) catch |e| { - std.testing.expect(e == error.Fail); - if (@errorReturnTrace()) |trace| { - expect(trace.index == 1); - } else switch (builtin.mode) { - builtin.Mode.Debug, builtin.Mode.ReleaseSafe => @panic("expected return trace"), - builtin.Mode.ReleaseFast, builtin.Mode.ReleaseSmall => {}, - } - }; -} - -test "break from suspend" { - var buf: [500]u8 = undefined; - var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; - var my_result: i32 = 1; - const p = try async testBreakFromSuspend(&my_result); - cancel p; - std.testing.expect(my_result == 2); -} -async fn testBreakFromSuspend(my_result: *i32) void { - suspend { - resume @handle(); - } - my_result.* += 1; - suspend; - my_result.* += 1; -} +//test "create a coroutine and cancel it" { +// const p = try async simpleAsyncFn(); +// comptime expect(@typeOf(p) == promise->void); +// cancel p; +// expect(x == 2); +//} +//async fn simpleAsyncFn() void { +// x += 1; +// suspend; +// x += 1; +//} +// +//test "coroutine suspend, resume, cancel" { +// seq('a'); +// const p = try async testAsyncSeq(); +// seq('c'); +// resume p; +// seq('f'); +// cancel p; +// seq('g'); +// +// expect(std.mem.eql(u8, points, "abcdefg")); +//} +//async fn testAsyncSeq() void { +// defer seq('e'); +// +// seq('b'); +// suspend; +// seq('d'); +//} +//var points = [_]u8{0} ** "abcdefg".len; +//var index: usize = 0; +// +//fn seq(c: u8) void { +// points[index] = c; +// index += 1; +//} +// +//test "coroutine suspend with block" { +// const p = try async testSuspendBlock(); +// std.testing.expect(!result); +// resume a_promise; +// std.testing.expect(result); +// cancel p; +//} +// +//var a_promise: promise = undefined; +//var result = false; +//async fn testSuspendBlock() void { +// suspend { +// comptime expect(@typeOf(@handle()) == promise->void); +// a_promise = @handle(); +// } +// +// //Test to make sure that @handle() works as advertised (issue #1296) +// //var our_handle: promise = @handle(); +// expect(a_promise == @handle()); +// +// result = true; +//} +// +//var await_a_promise: promise = undefined; +//var await_final_result: i32 = 0; +// +//test "coroutine await" { +// await_seq('a'); +// const p = async await_amain() catch unreachable; +// await_seq('f'); +// resume await_a_promise; +// await_seq('i'); +// expect(await_final_result == 1234); +// expect(std.mem.eql(u8, await_points, "abcdefghi")); +//} +//async fn await_amain() void { +// await_seq('b'); +// const p = async await_another() catch unreachable; +// await_seq('e'); +// await_final_result = await p; +// await_seq('h'); +//} +//async fn await_another() i32 { +// await_seq('c'); +// suspend { +// await_seq('d'); +// await_a_promise = @handle(); +// } +// await_seq('g'); +// return 1234; +//} +// +//var await_points = [_]u8{0} ** "abcdefghi".len; +//var await_seq_index: usize = 0; +// +//fn await_seq(c: u8) void { +// await_points[await_seq_index] = c; +// await_seq_index += 1; +//} +// +//var early_final_result: i32 = 0; +// +//test "coroutine await early return" { +// early_seq('a'); +// const p = async early_amain() catch @panic("out of memory"); +// early_seq('f'); +// expect(early_final_result == 1234); +// expect(std.mem.eql(u8, early_points, "abcdef")); +//} +//async fn early_amain() void { +// early_seq('b'); +// const p = async early_another() catch @panic("out of memory"); +// early_seq('d'); +// early_final_result = await p; +// early_seq('e'); +//} +//async fn early_another() i32 { +// early_seq('c'); +// return 1234; +//} +// +//var early_points = [_]u8{0} ** "abcdef".len; +//var early_seq_index: usize = 0; +// +//fn early_seq(c: u8) void { +// early_points[early_seq_index] = c; +// early_seq_index += 1; +//} +// +//test "coro allocation failure" { +// var failing_allocator = std.debug.FailingAllocator.init(std.debug.global_allocator, 0); +// if (async<&failing_allocator.allocator> asyncFuncThatNeverGetsRun()) { +// @panic("expected allocation failure"); +// } else |err| switch (err) { +// error.OutOfMemory => {}, +// } +//} +//async fn asyncFuncThatNeverGetsRun() void { +// @panic("coro frame allocation should fail"); +//} +// +//test "async function with dot syntax" { +// const S = struct { +// var y: i32 = 1; +// async fn foo() void { +// y += 1; +// suspend; +// } +// }; +// const p = try async S.foo(); +// cancel p; +// expect(S.y == 2); +//} +// +//test "async fn pointer in a struct field" { +// var data: i32 = 1; +// const Foo = struct { +// bar: async<*std.mem.Allocator> fn (*i32) void, +// }; +// var foo = Foo{ .bar = simpleAsyncFn2 }; +// const p = (async foo.bar(&data)) catch unreachable; +// expect(data == 2); +// cancel p; +// expect(data == 4); +//} +//async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void { +// defer y.* += 2; +// y.* += 1; +// suspend; +//} +// +//test "async fn with inferred error set" { +// const p = (async failing()) catch unreachable; +// resume p; +// cancel p; +//} +// +//async fn failing() !void { +// suspend; +// return error.Fail; +//} +// +//test "error return trace across suspend points - early return" { +// const p = nonFailing(); +// resume p; +// const p2 = try async printTrace(p); +// cancel p2; +//} +// +//test "error return trace across suspend points - async return" { +// const p = nonFailing(); +// const p2 = try async printTrace(p); +// resume p; +// cancel p2; +//} +// +//fn nonFailing() (promise->anyerror!void) { +// return async suspendThenFail() catch unreachable; +//} +//async fn suspendThenFail() anyerror!void { +// suspend; +// return error.Fail; +//} +//async fn printTrace(p: promise->(anyerror!void)) void { +// (await p) catch |e| { +// std.testing.expect(e == error.Fail); +// if (@errorReturnTrace()) |trace| { +// expect(trace.index == 1); +// } else switch (builtin.mode) { +// builtin.Mode.Debug, builtin.Mode.ReleaseSafe => @panic("expected return trace"), +// builtin.Mode.ReleaseFast, builtin.Mode.ReleaseSmall => {}, +// } +// }; +//} +// +//test "break from suspend" { +// var buf: [500]u8 = undefined; +// var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; +// var my_result: i32 = 1; +// const p = try async testBreakFromSuspend(&my_result); +// cancel p; +// std.testing.expect(my_result == 2); +//} +//async fn testBreakFromSuspend(my_result: *i32) void { +// suspend { +// resume @handle(); +// } +// my_result.* += 1; +// suspend; +// my_result.* += 1; +//} diff --git a/test/stage1/behavior/type_info.zig b/test/stage1/behavior/type_info.zig index 2decf0c329..6a51015124 100644 --- a/test/stage1/behavior/type_info.zig +++ b/test/stage1/behavior/type_info.zig @@ -177,7 +177,7 @@ fn testUnion() void { expect(TypeId(typeinfo_info) == TypeId.Union); expect(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto); expect(typeinfo_info.Union.tag_type.? == TypeId); - expect(typeinfo_info.Union.fields.len == 24); + expect(typeinfo_info.Union.fields.len == 25); expect(typeinfo_info.Union.fields[4].enum_field != null); expect(typeinfo_info.Union.fields[4].enum_field.?.value == 4); expect(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int)); From 11bd50f2b2a74ce25d841a15ba67d042d41b71c2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 21 Jul 2019 20:54:08 -0400 Subject: [PATCH 007/125] implement coroutine resume --- src/all_types.hpp | 7 ++++++ src/codegen.cpp | 14 ++++++++++++ src/ir.cpp | 35 ++++++++++++++++++++++++++++- src/ir_print.cpp | 9 ++++++++ test/stage1/behavior/coroutines.zig | 4 +++- 5 files changed, 67 insertions(+), 2 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index a2b569898c..82d2e2cddb 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -2322,6 +2322,7 @@ enum IrInstructionId { IrInstructionIdUnionInitNamedField, IrInstructionIdSuspendBegin, IrInstructionIdSuspendBr, + IrInstructionIdCoroResume, }; struct IrInstruction { @@ -3548,6 +3549,12 @@ struct IrInstructionSuspendBr { IrBasicBlock *resume_block; }; +struct IrInstructionCoroResume { + IrInstruction base; + + IrInstruction *frame; +}; + enum ResultLocId { ResultLocIdInvalid, ResultLocIdNone, diff --git a/src/codegen.cpp b/src/codegen.cpp index 4a9e5fd629..fa5f3ef8ee 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4969,6 +4969,18 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable, return nullptr; } +static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, + IrInstructionCoroResume *instruction) +{ + LLVMValueRef frame = ir_llvm_value(g, instruction->frame); + ZigType *frame_type = instruction->frame->value.type; + assert(frame_type->id == ZigTypeIdCoroFrame); + ZigFn *fn = frame_type->data.frame.fn; + LLVMValueRef fn_val = fn_llvm_value(g, fn); + LLVMBuildCall(g->builder, fn_val, &frame, 1, ""); + return nullptr; +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -5213,6 +5225,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_suspend_begin(g, executable, (IrInstructionSuspendBegin *)instruction); case IrInstructionIdSuspendBr: return ir_render_suspend_br(g, executable, (IrInstructionSuspendBr *)instruction); + case IrInstructionIdCoroResume: + return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index 2b3462772f..d0a11c2f1e 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1035,6 +1035,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBr *) { return IrInstructionIdSuspendBr; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) { + return IrInstructionIdCoroResume; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -3216,6 +3220,18 @@ static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *frame) +{ + IrInstructionCoroResume *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.value.type = irb->codegen->builtin_types.entry_void; + instruction->frame = frame; + + ir_ref_instruction(frame, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -7675,7 +7691,7 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - zig_panic("TODO ir_gen_resume"); + return ir_build_coro_resume(irb, scope, node, target_inst); } static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -24134,6 +24150,20 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru return ir_finish_anal(ira, result); } +static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) { + IrInstruction *frame = instruction->frame->child; + if (type_is_invalid(frame->value.type)) + return ira->codegen->invalid_instruction; + + if (frame->value.type->id != ZigTypeIdCoroFrame) { + ir_add_error(ira, instruction->frame, + buf_sprintf("expected frame, found '%s'", buf_ptr(&frame->value.type->name))); + return ira->codegen->invalid_instruction; + } + + return ir_build_coro_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame); +} + static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: @@ -24421,6 +24451,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_suspend_begin(ira, (IrInstructionSuspendBegin *)instruction); case IrInstructionIdSuspendBr: return ir_analyze_instruction_suspend_br(ira, (IrInstructionSuspendBr *)instruction); + case IrInstructionIdCoroResume: + return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); } zig_unreachable(); } @@ -24555,6 +24587,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdResetResult: case IrInstructionIdSuspendBegin: case IrInstructionIdSuspendBr: + case IrInstructionIdCoroResume: return true; case IrInstructionIdPhi: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 3a77e92bc7..e14647ea82 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1513,6 +1513,12 @@ static void ir_print_suspend_br(IrPrint *irp, IrInstructionSuspendBr *instructio fprintf(irp->f, ")"); } +static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) { + fprintf(irp->f, "@coroResume("); + ir_print_other_instruction(irp, instruction->frame); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1977,6 +1983,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdSuspendBr: ir_print_suspend_br(irp, (IrInstructionSuspendBr *)instruction); break; + case IrInstructionIdCoroResume: + ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index cdab411fb1..fd07790e7f 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -4,9 +4,11 @@ const expect = std.testing.expect; var x: i32 = 1; -test "simple coroutine suspend" { +test "simple coroutine suspend and resume" { const p = async simpleAsyncFn(); expect(x == 2); + resume p; + expect(x == 3); } fn simpleAsyncFn() void { x += 1; From 59bf9ca58c992e02423fae2ba8773eac098189b9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 21 Jul 2019 23:27:47 -0400 Subject: [PATCH 008/125] implement async function parameters --- src/all_types.hpp | 1 + src/analyze.cpp | 71 +++++++++++++++++++---------- src/codegen.cpp | 37 +++++++++------ test/stage1/behavior/coroutines.zig | 37 +++++++-------- 4 files changed, 90 insertions(+), 56 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 82d2e2cddb..aa7ff06ce9 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -3638,6 +3638,7 @@ static const size_t err_union_err_index = 0; static const size_t err_union_payload_index = 1; static const size_t coro_resume_index_index = 0; +static const size_t coro_arg_start = 1; // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. diff --git a/src/analyze.cpp b/src/analyze.cpp index 91a100795b..aff11e017f 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1891,6 +1891,21 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { field_names.append("resume_index"); field_types.append(g->builtin_types.entry_usize); + for (size_t arg_i = 0; arg_i < fn->type_entry->data.fn.fn_type_id.param_count; arg_i += 1) { + FnTypeParamInfo *param_info = &fn->type_entry->data.fn.fn_type_id.param_info[arg_i]; + AstNode *param_decl_node = get_param_decl_node(fn, arg_i); + Buf *param_name; + bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args; + if (param_decl_node && !is_var_args) { + param_name = param_decl_node->data.param_decl.name; + } else { + param_name = buf_sprintf("arg%" ZIG_PRI_usize "", arg_i); + } + ZigType *param_type = param_info[arg_i].type; + field_names.append(buf_ptr(param_name)); + field_types.append(param_type); + } + assert(field_names.length == field_types.length); frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), field_names.items, field_types.items, field_names.length); @@ -7058,19 +7073,22 @@ void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) { // +1 for maybe first argument the error return trace // +2 for maybe arguments async allocator and error code pointer ZigList param_di_types = {}; - param_di_types.append(get_llvm_di_type(g, fn_type_id->return_type)); ZigType *gen_return_type; if (is_async) { gen_return_type = g->builtin_types.entry_usize; + param_di_types.append(get_llvm_di_type(g, gen_return_type)); } else if (!type_has_bits(fn_type_id->return_type)) { gen_return_type = g->builtin_types.entry_void; + param_di_types.append(get_llvm_di_type(g, gen_return_type)); } else if (first_arg_return) { + gen_return_type = g->builtin_types.entry_void; + param_di_types.append(get_llvm_di_type(g, gen_return_type)); ZigType *gen_type = get_pointer_to_type(g, fn_type_id->return_type, false); gen_param_types.append(get_llvm_type(g, gen_type)); param_di_types.append(get_llvm_di_type(g, gen_type)); - gen_return_type = g->builtin_types.entry_void; } else { gen_return_type = fn_type_id->return_type; + param_di_types.append(get_llvm_di_type(g, gen_return_type)); } fn_type->data.fn.gen_return_type = gen_return_type; @@ -7080,36 +7098,43 @@ void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) { param_di_types.append(get_llvm_di_type(g, gen_type)); } if (is_async) { + fn_type->data.fn.gen_param_info = allocate(1); + ZigType *frame_type = (fn == nullptr) ? g->builtin_types.entry_frame_header : get_coro_frame_type(g, fn); ZigType *ptr_type = get_pointer_to_type(g, frame_type, false); gen_param_types.append(get_llvm_type(g, ptr_type)); param_di_types.append(get_llvm_di_type(g, ptr_type)); - } - fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count); - for (size_t i = 0; i < fn_type_id->param_count; i += 1) { - FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i]; - ZigType *type_entry = src_param_info->type; - FnGenParamInfo *gen_param_info = &fn_type->data.fn.gen_param_info[i]; + fn_type->data.fn.gen_param_info[0].src_index = 0; + fn_type->data.fn.gen_param_info[0].gen_index = 0; + fn_type->data.fn.gen_param_info[0].type = ptr_type; - gen_param_info->src_index = i; - gen_param_info->gen_index = SIZE_MAX; + } else { + fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count); + for (size_t i = 0; i < fn_type_id->param_count; i += 1) { + FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i]; + ZigType *type_entry = src_param_info->type; + FnGenParamInfo *gen_param_info = &fn_type->data.fn.gen_param_info[i]; - if (is_c_abi || !type_has_bits(type_entry)) - continue; + gen_param_info->src_index = i; + gen_param_info->gen_index = SIZE_MAX; - ZigType *gen_type; - if (handle_is_ptr(type_entry)) { - gen_type = get_pointer_to_type(g, type_entry, true); - gen_param_info->is_byval = true; - } else { - gen_type = type_entry; + if (is_c_abi || !type_has_bits(type_entry)) + continue; + + ZigType *gen_type; + if (handle_is_ptr(type_entry)) { + gen_type = get_pointer_to_type(g, type_entry, true); + gen_param_info->is_byval = true; + } else { + gen_type = type_entry; + } + gen_param_info->gen_index = gen_param_types.length; + gen_param_info->type = gen_type; + gen_param_types.append(get_llvm_type(g, gen_type)); + + param_di_types.append(get_llvm_di_type(g, gen_type)); } - gen_param_info->gen_index = gen_param_types.length; - gen_param_info->type = gen_type; - gen_param_types.append(get_llvm_type(g, gen_type)); - - param_di_types.append(get_llvm_di_type(g, gen_type)); } if (is_c_abi) { diff --git a/src/codegen.cpp b/src/codegen.cpp index fa5f3ef8ee..9184bd7c84 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1965,10 +1965,12 @@ static bool iter_function_params_c_abi(CodeGen *g, ZigType *fn_type, FnWalk *fn_ } case FnWalkIdInits: { clear_debug_source_node(g); - LLVMValueRef arg = LLVMGetParam(llvm_fn, fn_walk->data.inits.gen_i); - LLVMTypeRef ptr_to_int_type_ref = LLVMPointerType(LLVMIntType((unsigned)ty_size * 8), 0); - LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, var->value_ref, ptr_to_int_type_ref, ""); - gen_store_untyped(g, arg, bitcasted, var->align_bytes, false); + if (fn_walk->data.inits.fn->resume_blocks.length == 0) { + LLVMValueRef arg = LLVMGetParam(llvm_fn, fn_walk->data.inits.gen_i); + LLVMTypeRef ptr_to_int_type_ref = LLVMPointerType(LLVMIntType((unsigned)ty_size * 8), 0); + LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, var->value_ref, ptr_to_int_type_ref, ""); + gen_store_untyped(g, arg, bitcasted, var->align_bytes, false); + } if (var->decl_node) { gen_var_debug_decl(g, var); } @@ -2061,7 +2063,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) { assert(variable); assert(variable->value_ref); - if (!handle_is_ptr(variable->var_type)) { + if (!handle_is_ptr(variable->var_type) && fn_walk->data.inits.fn->resume_blocks.length == 0) { clear_debug_source_node(g); ZigType *fn_type = fn_table_entry->type_entry; unsigned gen_arg_index = fn_type->data.fn.gen_param_info[variable->src_arg_index].gen_index; @@ -3471,8 +3473,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (prefix_arg_err_ret_stack) { zig_panic("TODO"); } - - gen_param_values.append(result_loc); } else { if (first_arg_ret) { gen_param_values.append(result_loc); @@ -3504,6 +3504,15 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMCallConv llvm_cc = get_llvm_cc(g, cc); LLVMValueRef result; + if (instruction->is_async) { + for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) { + LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, result_loc, coro_arg_start + arg_i, ""); + LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr); + } + ZigLLVMBuildCall(g->builder, fn_val, &result_loc, 1, llvm_cc, fn_inline, ""); + return nullptr; + } + if (instruction->new_stack == nullptr) { result = ZigLLVMBuildCall(g->builder, fn_val, gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, ""); @@ -3519,11 +3528,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMBuildCall(g->builder, stackrestore_fn_val, &old_stack_ref, 1, ""); } - - if (instruction->is_async) { - return nullptr; - } - if (src_return_type->id == ZigTypeIdUnreachable) { return LLVMBuildUnreachable(g->builder); } else if (!ret_has_bits) { @@ -6285,7 +6289,9 @@ static void do_code_gen(CodeGen *g) { build_all_basic_blocks(g, fn_table_entry); clear_debug_source_node(g); - if (want_sret || fn_table_entry->resume_blocks.length != 0) { + bool is_async = cc == CallingConventionAsync || fn_table_entry->resume_blocks.length != 0; + + if (want_sret || is_async) { g->cur_ret_ptr = LLVMGetParam(fn, 0); } else if (handle_is_ptr(fn_type_id->return_type)) { g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0); @@ -6303,7 +6309,6 @@ static void do_code_gen(CodeGen *g) { } // error return tracing setup - bool is_async = cc == CallingConventionAsync; bool have_err_ret_trace_stack = g->have_err_ret_tracing && fn_table_entry->calls_or_awaits_errorable_fn && !is_async && !have_err_ret_trace_arg; LLVMValueRef err_ret_array_val = nullptr; if (have_err_ret_trace_stack) { @@ -6378,7 +6383,9 @@ static void do_code_gen(CodeGen *g) { FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index]; assert(gen_info->gen_index != SIZE_MAX); - if (handle_is_ptr(var->var_type)) { + if (is_async) { + var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_arg_start + var_i, ""); + } else if (handle_is_ptr(var->var_type)) { if (gen_info->is_byval) { gen_type = var->var_type; } else { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index fd07790e7f..3a54657020 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -2,33 +2,34 @@ const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; -var x: i32 = 1; +var global_x: i32 = 1; test "simple coroutine suspend and resume" { const p = async simpleAsyncFn(); - expect(x == 2); + expect(global_x == 2); resume p; - expect(x == 3); + expect(global_x == 3); } fn simpleAsyncFn() void { - x += 1; + global_x += 1; suspend; - x += 1; + global_x += 1; } -//test "create a coroutine and cancel it" { -// const p = try async simpleAsyncFn(); -// comptime expect(@typeOf(p) == promise->void); -// cancel p; -// expect(x == 2); -//} -//async fn simpleAsyncFn() void { -// x += 1; -// suspend; -// x += 1; -//} -// -//test "coroutine suspend, resume, cancel" { +var global_y: i32 = 1; + +test "pass parameter to coroutine" { + const p = async simpleAsyncFnWithArg(2); + expect(global_y == 3); + resume p; + expect(global_y == 5); +} +fn simpleAsyncFnWithArg(delta: i32) void { + global_y += delta; + suspend; + global_y += delta; +} +//test "coroutine suspend, resume" { // seq('a'); // const p = try async testAsyncSeq(); // seq('c'); From 32d0ac135556903dc016a64ff4d6b5cb9e35c84a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 22 Jul 2019 00:07:48 -0400 Subject: [PATCH 009/125] fix wrong calling convention on async resume --- src/codegen.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 9184bd7c84..071daf5104 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4981,7 +4981,7 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, assert(frame_type->id == ZigTypeIdCoroFrame); ZigFn *fn = frame_type->data.frame.fn; LLVMValueRef fn_val = fn_llvm_value(g, fn); - LLVMBuildCall(g->builder, fn_val, &frame, 1, ""); + ZigLLVMBuildCall(g->builder, fn_val, &frame, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); return nullptr; } From 5b69a9cd83d3ec7d3e54ac4c2c4635838aafe0de Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 22 Jul 2019 12:15:16 -0400 Subject: [PATCH 010/125] disable segfault handler when panicking this prevents a segfault in stack trace printing to activate the segfault handler. --- src/codegen.cpp | 277 ++++++++++++++++++------------------ src/codegen.hpp | 3 +- src/main.cpp | 7 +- std/debug.zig | 62 +++++--- std/os/windows/kernel32.zig | 1 + std/special/start.zig | 14 +- 6 files changed, 192 insertions(+), 172 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 071daf5104..339b04cc90 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -89,126 +89,6 @@ static const char *symbols_that_llvm_depends_on[] = { // TODO probably all of compiler-rt needs to go here }; -CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget *target, - OutType out_type, BuildMode build_mode, Buf *override_lib_dir, Buf *override_std_dir, - ZigLibCInstallation *libc, Buf *cache_dir) -{ - CodeGen *g = allocate(1); - - codegen_add_time_event(g, "Initialize"); - - g->subsystem = TargetSubsystemAuto; - g->libc = libc; - g->zig_target = target; - g->cache_dir = cache_dir; - - if (override_lib_dir == nullptr) { - g->zig_lib_dir = get_zig_lib_dir(); - } else { - g->zig_lib_dir = override_lib_dir; - } - - if (override_std_dir == nullptr) { - g->zig_std_dir = buf_alloc(); - os_path_join(g->zig_lib_dir, buf_create_from_str("std"), g->zig_std_dir); - } else { - g->zig_std_dir = override_std_dir; - } - - g->zig_c_headers_dir = buf_alloc(); - os_path_join(g->zig_lib_dir, buf_create_from_str("include"), g->zig_c_headers_dir); - - g->build_mode = build_mode; - g->out_type = out_type; - g->import_table.init(32); - g->builtin_fn_table.init(32); - g->primitive_type_table.init(32); - g->type_table.init(32); - g->fn_type_table.init(32); - g->error_table.init(16); - g->generic_table.init(16); - g->llvm_fn_table.init(16); - g->memoized_fn_eval_table.init(16); - g->exported_symbol_names.init(8); - g->external_prototypes.init(8); - g->string_literals_table.init(16); - g->type_info_cache.init(32); - g->is_test_build = false; - g->is_single_threaded = false; - buf_resize(&g->global_asm, 0); - - for (size_t i = 0; i < array_length(symbols_that_llvm_depends_on); i += 1) { - g->external_prototypes.put(buf_create_from_str(symbols_that_llvm_depends_on[i]), nullptr); - } - - if (root_src_path) { - Buf *root_pkg_path; - Buf *rel_root_src_path; - if (main_pkg_path == nullptr) { - Buf *src_basename = buf_alloc(); - Buf *src_dir = buf_alloc(); - os_path_split(root_src_path, src_dir, src_basename); - - if (buf_len(src_basename) == 0) { - fprintf(stderr, "Invalid root source path: %s\n", buf_ptr(root_src_path)); - exit(1); - } - root_pkg_path = src_dir; - rel_root_src_path = src_basename; - } else { - Buf resolved_root_src_path = os_path_resolve(&root_src_path, 1); - Buf resolved_main_pkg_path = os_path_resolve(&main_pkg_path, 1); - - if (!buf_starts_with_buf(&resolved_root_src_path, &resolved_main_pkg_path)) { - fprintf(stderr, "Root source path '%s' outside main package path '%s'", - buf_ptr(root_src_path), buf_ptr(main_pkg_path)); - exit(1); - } - root_pkg_path = main_pkg_path; - rel_root_src_path = buf_create_from_mem( - buf_ptr(&resolved_root_src_path) + buf_len(&resolved_main_pkg_path) + 1, - buf_len(&resolved_root_src_path) - buf_len(&resolved_main_pkg_path) - 1); - } - - g->root_package = new_package(buf_ptr(root_pkg_path), buf_ptr(rel_root_src_path), ""); - g->std_package = new_package(buf_ptr(g->zig_std_dir), "std.zig", "std"); - g->root_package->package_table.put(buf_create_from_str("std"), g->std_package); - } else { - g->root_package = new_package(".", "", ""); - } - - g->root_package->package_table.put(buf_create_from_str("root"), g->root_package); - - g->zig_std_special_dir = buf_alloc(); - os_path_join(g->zig_std_dir, buf_sprintf("special"), g->zig_std_special_dir); - - assert(target != nullptr); - if (!target->is_native) { - g->each_lib_rpath = false; - } else { - g->each_lib_rpath = true; - - if (target_os_is_darwin(g->zig_target->os)) { - init_darwin_native(g); - } - - } - - if (target_os_requires_libc(g->zig_target->os)) { - g->libc_link_lib = create_link_lib(buf_create_from_str("c")); - g->link_libs_list.append(g->libc_link_lib); - } - - target_triple_llvm(&g->llvm_triple_str, g->zig_target); - g->pointer_size_bytes = target_arch_pointer_bit_width(g->zig_target->arch) / 8; - - if (!target_has_debug_info(g->zig_target)) { - g->strip_debug_symbols = true; - } - - return g; -} - void codegen_set_clang_argv(CodeGen *g, const char **args, size_t len) { g->clang_argv = args; g->clang_argv_len = len; @@ -233,10 +113,6 @@ void codegen_set_lib_version(CodeGen *g, size_t major, size_t minor, size_t patc g->version_patch = patch; } -void codegen_set_is_test(CodeGen *g, bool is_test_build) { - g->is_test_build = is_test_build; -} - void codegen_set_emit_file_type(CodeGen *g, EmitFileType emit_file_type) { g->emit_file_type = emit_file_type; } @@ -7459,6 +7335,14 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { return contents; } +static ZigPackage *create_test_runner_pkg(CodeGen *g) { + return codegen_create_package(g, buf_ptr(g->zig_std_special_dir), "test_runner.zig", "std.special"); +} + +static ZigPackage *create_panic_pkg(CodeGen *g) { + return codegen_create_package(g, buf_ptr(g->zig_std_special_dir), "panic.zig", "std.special"); +} + static Error define_builtin_compile_vars(CodeGen *g) { if (g->std_package == nullptr) return ErrorNone; @@ -7543,8 +7427,16 @@ static Error define_builtin_compile_vars(CodeGen *g) { g->root_package->package_table.put(buf_create_from_str("builtin"), g->compile_var_package); g->std_package->package_table.put(buf_create_from_str("builtin"), g->compile_var_package); g->std_package->package_table.put(buf_create_from_str("std"), g->std_package); - g->std_package->package_table.put(buf_create_from_str("root"), - g->is_test_build ? g->test_runner_package : g->root_package); + ZigPackage *root_pkg; + if (g->is_test_build) { + if (g->test_runner_package == nullptr) { + g->test_runner_package = create_test_runner_pkg(g); + } + root_pkg = g->test_runner_package; + } else { + root_pkg = g->root_package; + } + g->std_package->package_table.put(buf_create_from_str("root"), root_pkg); g->compile_var_import = add_source_file(g, g->compile_var_package, builtin_zig_path, contents, SourceKindPkgMain); @@ -8036,14 +7928,6 @@ static ZigPackage *create_start_pkg(CodeGen *g, ZigPackage *pkg_with_main) { return package; } -static ZigPackage *create_test_runner_pkg(CodeGen *g) { - return codegen_create_package(g, buf_ptr(g->zig_std_special_dir), "test_runner.zig", "std.special"); -} - -static ZigPackage *create_panic_pkg(CodeGen *g) { - return codegen_create_package(g, buf_ptr(g->zig_std_special_dir), "panic.zig", "std.special"); -} - static void create_test_compile_var_and_add_test_runner(CodeGen *g) { Error err; @@ -8093,7 +7977,7 @@ static void create_test_compile_var_and_add_test_runner(CodeGen *g) { ConstExprValue *test_fn_slice = create_const_slice(g, test_fn_array, 0, g->test_fns.length, true); update_compile_var(g, buf_create_from_str("test_functions"), test_fn_slice); - g->test_runner_package = create_test_runner_pkg(g); + assert(g->test_runner_package != nullptr); g->test_runner_import = add_special_code(g, g->test_runner_package, "test_runner.zig"); } @@ -9207,7 +9091,8 @@ CodeGen *create_child_codegen(CodeGen *parent_gen, Buf *root_src_path, OutType o ZigLibCInstallation *libc) { CodeGen *child_gen = codegen_create(nullptr, root_src_path, parent_gen->zig_target, out_type, - parent_gen->build_mode, parent_gen->zig_lib_dir, parent_gen->zig_std_dir, libc, get_stage1_cache_path()); + parent_gen->build_mode, parent_gen->zig_lib_dir, parent_gen->zig_std_dir, libc, get_stage1_cache_path(), + false); child_gen->disable_gen_h = true; child_gen->want_stack_check = WantStackCheckDisabled; child_gen->verbose_tokenize = parent_gen->verbose_tokenize; @@ -9234,3 +9119,123 @@ CodeGen *create_child_codegen(CodeGen *parent_gen, Buf *root_src_path, OutType o return child_gen; } +CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget *target, + OutType out_type, BuildMode build_mode, Buf *override_lib_dir, Buf *override_std_dir, + ZigLibCInstallation *libc, Buf *cache_dir, bool is_test_build) +{ + CodeGen *g = allocate(1); + + codegen_add_time_event(g, "Initialize"); + + g->subsystem = TargetSubsystemAuto; + g->libc = libc; + g->zig_target = target; + g->cache_dir = cache_dir; + + if (override_lib_dir == nullptr) { + g->zig_lib_dir = get_zig_lib_dir(); + } else { + g->zig_lib_dir = override_lib_dir; + } + + if (override_std_dir == nullptr) { + g->zig_std_dir = buf_alloc(); + os_path_join(g->zig_lib_dir, buf_create_from_str("std"), g->zig_std_dir); + } else { + g->zig_std_dir = override_std_dir; + } + + g->zig_c_headers_dir = buf_alloc(); + os_path_join(g->zig_lib_dir, buf_create_from_str("include"), g->zig_c_headers_dir); + + g->build_mode = build_mode; + g->out_type = out_type; + g->import_table.init(32); + g->builtin_fn_table.init(32); + g->primitive_type_table.init(32); + g->type_table.init(32); + g->fn_type_table.init(32); + g->error_table.init(16); + g->generic_table.init(16); + g->llvm_fn_table.init(16); + g->memoized_fn_eval_table.init(16); + g->exported_symbol_names.init(8); + g->external_prototypes.init(8); + g->string_literals_table.init(16); + g->type_info_cache.init(32); + g->is_test_build = is_test_build; + g->is_single_threaded = false; + buf_resize(&g->global_asm, 0); + + for (size_t i = 0; i < array_length(symbols_that_llvm_depends_on); i += 1) { + g->external_prototypes.put(buf_create_from_str(symbols_that_llvm_depends_on[i]), nullptr); + } + + if (root_src_path) { + Buf *root_pkg_path; + Buf *rel_root_src_path; + if (main_pkg_path == nullptr) { + Buf *src_basename = buf_alloc(); + Buf *src_dir = buf_alloc(); + os_path_split(root_src_path, src_dir, src_basename); + + if (buf_len(src_basename) == 0) { + fprintf(stderr, "Invalid root source path: %s\n", buf_ptr(root_src_path)); + exit(1); + } + root_pkg_path = src_dir; + rel_root_src_path = src_basename; + } else { + Buf resolved_root_src_path = os_path_resolve(&root_src_path, 1); + Buf resolved_main_pkg_path = os_path_resolve(&main_pkg_path, 1); + + if (!buf_starts_with_buf(&resolved_root_src_path, &resolved_main_pkg_path)) { + fprintf(stderr, "Root source path '%s' outside main package path '%s'", + buf_ptr(root_src_path), buf_ptr(main_pkg_path)); + exit(1); + } + root_pkg_path = main_pkg_path; + rel_root_src_path = buf_create_from_mem( + buf_ptr(&resolved_root_src_path) + buf_len(&resolved_main_pkg_path) + 1, + buf_len(&resolved_root_src_path) - buf_len(&resolved_main_pkg_path) - 1); + } + + g->root_package = new_package(buf_ptr(root_pkg_path), buf_ptr(rel_root_src_path), ""); + g->std_package = new_package(buf_ptr(g->zig_std_dir), "std.zig", "std"); + g->root_package->package_table.put(buf_create_from_str("std"), g->std_package); + } else { + g->root_package = new_package(".", "", ""); + } + + g->root_package->package_table.put(buf_create_from_str("root"), g->root_package); + + g->zig_std_special_dir = buf_alloc(); + os_path_join(g->zig_std_dir, buf_sprintf("special"), g->zig_std_special_dir); + + assert(target != nullptr); + if (!target->is_native) { + g->each_lib_rpath = false; + } else { + g->each_lib_rpath = true; + + if (target_os_is_darwin(g->zig_target->os)) { + init_darwin_native(g); + } + + } + + if (target_os_requires_libc(g->zig_target->os)) { + g->libc_link_lib = create_link_lib(buf_create_from_str("c")); + g->link_libs_list.append(g->libc_link_lib); + } + + target_triple_llvm(&g->llvm_triple_str, g->zig_target); + g->pointer_size_bytes = target_arch_pointer_bit_width(g->zig_target->arch) / 8; + + if (!target_has_debug_info(g->zig_target)) { + g->strip_debug_symbols = true; + } + + return g; +} + diff --git a/src/codegen.hpp b/src/codegen.hpp index 5de36c1aab..cdff61a26f 100644 --- a/src/codegen.hpp +++ b/src/codegen.hpp @@ -18,14 +18,13 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget *target, OutType out_type, BuildMode build_mode, Buf *zig_lib_dir, Buf *override_std_dir, - ZigLibCInstallation *libc, Buf *cache_dir); + ZigLibCInstallation *libc, Buf *cache_dir, bool is_test_build); CodeGen *create_child_codegen(CodeGen *parent_gen, Buf *root_src_path, OutType out_type, ZigLibCInstallation *libc); void codegen_set_clang_argv(CodeGen *codegen, const char **args, size_t len); void codegen_set_llvm_argv(CodeGen *codegen, const char **args, size_t len); -void codegen_set_is_test(CodeGen *codegen, bool is_test); void codegen_set_each_lib_rpath(CodeGen *codegen, bool each_lib_rpath); void codegen_set_emit_file_type(CodeGen *g, EmitFileType emit_file_type); diff --git a/src/main.cpp b/src/main.cpp index ce68e53d85..42d0850046 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -583,7 +583,7 @@ int main(int argc, char **argv) { } CodeGen *g = codegen_create(main_pkg_path, build_runner_path, &target, OutTypeExe, - BuildModeDebug, override_lib_dir, override_std_dir, nullptr, &full_cache_dir); + BuildModeDebug, override_lib_dir, override_std_dir, nullptr, &full_cache_dir, false); g->valgrind_support = valgrind_support; g->enable_time_report = timing_info; codegen_set_out_name(g, buf_create_from_str("build")); @@ -1011,7 +1011,7 @@ int main(int argc, char **argv) { } case CmdBuiltin: { CodeGen *g = codegen_create(main_pkg_path, nullptr, &target, - out_type, build_mode, override_lib_dir, override_std_dir, nullptr, nullptr); + out_type, build_mode, override_lib_dir, override_std_dir, nullptr, nullptr, false); codegen_set_strip(g, strip); for (size_t i = 0; i < link_libs.length; i += 1) { LinkLib *link_lib = codegen_add_link_lib(g, buf_create_from_str(link_libs.at(i))); @@ -1115,7 +1115,7 @@ int main(int argc, char **argv) { cache_dir_buf = buf_create_from_str(cache_dir); } CodeGen *g = codegen_create(main_pkg_path, zig_root_source_file, &target, out_type, build_mode, - override_lib_dir, override_std_dir, libc, cache_dir_buf); + override_lib_dir, override_std_dir, libc, cache_dir_buf, cmd == CmdTest); if (llvm_argv.length >= 2) codegen_set_llvm_argv(g, llvm_argv.items + 1, llvm_argv.length - 2); g->valgrind_support = valgrind_support; g->want_pic = want_pic; @@ -1125,7 +1125,6 @@ int main(int argc, char **argv) { g->enable_time_report = timing_info; codegen_set_out_name(g, buf_out_name); codegen_set_lib_version(g, ver_major, ver_minor, ver_patch); - codegen_set_is_test(g, cmd == CmdTest); g->want_single_threaded = want_single_threaded; codegen_set_linker_script(g, linker_script); g->version_script_path = version_script; diff --git a/std/debug.zig b/std/debug.zig index d81e62901a..c416944860 100644 --- a/std/debug.zig +++ b/std/debug.zig @@ -12,6 +12,7 @@ const coff = std.coff; const pdb = std.pdb; const ArrayList = std.ArrayList; const builtin = @import("builtin"); +const root = @import("root"); const maxInt = std.math.maxInt; const File = std.fs.File; const windows = std.os.windows; @@ -217,6 +218,12 @@ var panicking: u8 = 0; // TODO make this a bool pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn { @setCold(true); + if (enable_segfault_handler) { + // If a segfault happens while panicking, we want it to actually segfault, not trigger + // the handler. + resetSegfaultHandler(); + } + if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) { // Panicked during a panic. @@ -2312,39 +2319,58 @@ fn getDebugInfoAllocator() *mem.Allocator { /// Whether or not the current target can print useful debug information when a segfault occurs. pub const have_segfault_handling_support = (builtin.arch == builtin.Arch.x86_64 and builtin.os == .linux) or builtin.os == .windows; +pub const enable_segfault_handler: bool = if (@hasDecl(root, "enable_segfault_handler")) + root.enable_segfault_handler +else + runtime_safety and have_segfault_handling_support; + +pub fn maybeEnableSegfaultHandler() void { + if (enable_segfault_handler) { + std.debug.attachSegfaultHandler(); + } +} + +var windows_segfault_handle: ?windows.HANDLE = null; /// Attaches a global SIGSEGV handler which calls @panic("segmentation fault"); pub fn attachSegfaultHandler() void { if (!have_segfault_handling_support) { @compileError("segfault handler not supported for this target"); } - switch (builtin.os) { - .linux => { - var act = os.Sigaction{ - .sigaction = handleSegfaultLinux, - .mask = os.empty_sigset, - .flags = (os.SA_SIGINFO | os.SA_RESTART | os.SA_RESETHAND), - }; - - os.sigaction(os.SIGSEGV, &act, null); - }, - .windows => { - _ = windows.kernel32.AddVectoredExceptionHandler(0, handleSegfaultWindows); - }, - else => unreachable, + if (windows.is_the_target) { + windows_segfault_handle = windows.kernel32.AddVectoredExceptionHandler(0, handleSegfaultWindows); + return; } + var act = os.Sigaction{ + .sigaction = handleSegfaultLinux, + .mask = os.empty_sigset, + .flags = (os.SA_SIGINFO | os.SA_RESTART | os.SA_RESETHAND), + }; + + os.sigaction(os.SIGSEGV, &act, null); } -extern fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: *const c_void) noreturn { - // Reset to the default handler so that if a segfault happens in this handler it will crash - // the process. Also when this handler returns, the original instruction will be repeated - // and the resulting segfault will crash the process rather than continually dump stack traces. +fn resetSegfaultHandler() void { + if (windows.is_the_target) { + if (windows_segfault_handle) |handle| { + assert(windows.kernel32.RemoveVectoredExceptionHandler() != 0); + windows_segfault_handle = null; + } + return; + } var act = os.Sigaction{ .sigaction = os.SIG_DFL, .mask = os.empty_sigset, .flags = 0, }; os.sigaction(os.SIGSEGV, &act, null); +} + +extern fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: *const c_void) noreturn { + // Reset to the default handler so that if a segfault happens in this handler it will crash + // the process. Also when this handler returns, the original instruction will be repeated + // and the resulting segfault will crash the process rather than continually dump stack traces. + resetSegfaultHandler(); const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); const ip = @intCast(usize, ctx.mcontext.gregs[os.REG_RIP]); diff --git a/std/os/windows/kernel32.zig b/std/os/windows/kernel32.zig index e4edc349ab..2ae73ad45a 100644 --- a/std/os/windows/kernel32.zig +++ b/std/os/windows/kernel32.zig @@ -1,6 +1,7 @@ usingnamespace @import("bits.zig"); pub extern "kernel32" stdcallcc fn AddVectoredExceptionHandler(First: c_ulong, Handler: ?VECTORED_EXCEPTION_HANDLER) ?*c_void; +pub extern "kernel32" stdcallcc fn RemoveVectoredExceptionHandler(Handle: HANDLE) c_ulong; pub extern "kernel32" stdcallcc fn CancelIoEx(hFile: HANDLE, lpOverlapped: LPOVERLAPPED) BOOL; diff --git a/std/special/start.zig b/std/special/start.zig index 30298669e3..f8a018e0ae 100644 --- a/std/special/start.zig +++ b/std/special/start.zig @@ -24,16 +24,6 @@ comptime { } } -fn enableSegfaultHandler() void { - const enable_segfault_handler: bool = if (@hasDecl(root, "enable_segfault_handler")) - root.enable_segfault_handler - else - std.debug.runtime_safety and std.debug.have_segfault_handling_support; - if (enable_segfault_handler) { - std.debug.attachSegfaultHandler(); - } -} - extern fn wasm_freestanding_start() void { _ = callMain(); } @@ -72,7 +62,7 @@ extern fn WinMainCRTStartup() noreturn { _ = @import("start_windows_tls.zig"); } - enableSegfaultHandler(); + std.debug.maybeEnableSegfaultHandler(); std.os.windows.kernel32.ExitProcess(callMain()); } @@ -113,7 +103,7 @@ inline fn callMainWithArgs(argc: usize, argv: [*][*]u8, envp: [][*]u8) u8 { std.os.argv = argv[0..argc]; std.os.environ = envp; - enableSegfaultHandler(); + std.debug.maybeEnableSegfaultHandler(); return callMain(); } From 650e07ebd96d6c476cadc1f7c19856e950ceef9c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 22 Jul 2019 13:04:22 -0400 Subject: [PATCH 011/125] fix suspend at end of function --- src/ir.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index d0a11c2f1e..6f9f582c6f 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -3212,7 +3212,6 @@ static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode IrBasicBlock *resume_block) { IrInstructionSuspendBr *instruction = ir_build_instruction(irb, scope, source_node); - instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable; instruction->resume_block = resume_block; ir_ref_bb(resume_block); @@ -7744,6 +7743,7 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod } IrInstruction *result = ir_build_suspend_br(irb, parent_scope, node, resume_block); + result->value.type = irb->codegen->builtin_types.entry_void; ir_set_cursor_at_end_and_append_block(irb, resume_block); return result; } @@ -10279,7 +10279,7 @@ static IrBasicBlock *ir_get_new_bb_runtime(IrAnalyze *ira, IrBasicBlock *old_bb, } static void ir_start_bb(IrAnalyze *ira, IrBasicBlock *old_bb, IrBasicBlock *const_predecessor_bb) { - ir_assert(!old_bb->suspended, old_bb->instruction_list.at(0)); + ir_assert(!old_bb->suspended, (old_bb->instruction_list.length != 0) ? old_bb->instruction_list.at(0) : nullptr); ira->instruction_index = 0; ira->old_irb.current_basic_block = old_bb; ira->const_predecessor_bb = const_predecessor_bb; @@ -22547,7 +22547,7 @@ static IrInstruction *ir_analyze_instruction_check_statement_is_void(IrAnalyze * if (type_is_invalid(statement_type)) return ira->codegen->invalid_instruction; - if (statement_type->id != ZigTypeIdVoid) { + if (statement_type->id != ZigTypeIdVoid && statement_type->id != ZigTypeIdUnreachable) { ir_add_error(ira, &instruction->base, buf_sprintf("expression value is ignored")); } @@ -24147,6 +24147,7 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru IrInstruction *result = ir_build_suspend_br(&ira->new_irb, instruction->base.scope, instruction->base.source_node, new_bb); + result->value.type = ira->codegen->builtin_types.entry_unreachable; return ir_finish_anal(ira, result); } From fcadeb50c04199fce2d9675ba2976680c71c67ff Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 22 Jul 2019 14:36:14 -0400 Subject: [PATCH 012/125] fix multiple coroutines existing clobbering each other --- src/analyze.cpp | 68 +++++++++++++++++++++-------- src/analyze.hpp | 2 +- src/codegen.cpp | 66 +++++++++++++++++----------- test/runtime_safety.zig | 14 ++++++ test/stage1/behavior/coroutines.zig | 18 ++++++++ 5 files changed, 123 insertions(+), 45 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index aff11e017f..4bb3de095e 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1865,7 +1865,8 @@ static Error resolve_union_type(CodeGen *g, ZigType *union_type) { } static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { - assert(frame_type->data.frame.locals_struct == nullptr); + if (frame_type->data.frame.locals_struct != nullptr) + return ErrorNone; ZigFn *fn = frame_type->data.frame.fn; switch (fn->anal_state) { @@ -3824,6 +3825,15 @@ static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_typ } fn_table_entry->anal_state = FnAnalStateComplete; + + if (fn_table_entry->resume_blocks.length != 0) { + ZigType *frame_type = get_coro_frame_type(g, fn_table_entry); + Error err; + if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) { + fn_table_entry->anal_state = FnAnalStateInvalid; + return; + } + } } static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) { @@ -7050,18 +7060,12 @@ static void resolve_llvm_types_array(CodeGen *g, ZigType *type) { debug_align_in_bits, get_llvm_di_type(g, elem_type), (int)type->data.array.len); } -void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) { - if (fn_type->llvm_di_type != nullptr) { - if (fn != nullptr) { - fn->raw_type_ref = fn_type->data.fn.raw_type_ref; - fn->raw_di_type = fn_type->data.fn.raw_di_type; - } - return; - } +static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) { + if (fn_type->llvm_di_type != nullptr) return; FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; bool first_arg_return = want_first_arg_sret(g, fn_type_id); - bool is_async = fn_type_id->cc == CallingConventionAsync || (fn != nullptr && fn->resume_blocks.length != 0); + bool is_async = fn_type_id->cc == CallingConventionAsync; bool is_c_abi = fn_type_id->cc == CallingConventionC; bool prefix_arg_error_return_trace = g->have_err_ret_tracing && fn_type_can_fail(fn_type_id); // +1 for maybe making the first argument the return value @@ -7100,7 +7104,11 @@ void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) { if (is_async) { fn_type->data.fn.gen_param_info = allocate(1); - ZigType *frame_type = (fn == nullptr) ? g->builtin_types.entry_frame_header : get_coro_frame_type(g, fn); + ZigType *frame_type = g->builtin_types.entry_frame_header; + Error err; + if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) { + zig_unreachable(); + } ZigType *ptr_type = get_pointer_to_type(g, frame_type, false); gen_param_types.append(get_llvm_type(g, ptr_type)); param_di_types.append(get_llvm_di_type(g, ptr_type)); @@ -7150,12 +7158,7 @@ void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) { for (size_t i = 0; i < gen_param_types.length; i += 1) { assert(gen_param_types.items[i] != nullptr); } - if (fn != nullptr) { - fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type), - gen_param_types.items, (unsigned int)gen_param_types.length, fn_type_id->is_var_args); - fn->raw_di_type = ZigLLVMCreateSubroutineType(g->dbuilder, param_di_types.items, (int)param_di_types.length, 0); - return; - } + fn_type->data.fn.raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type), gen_param_types.items, (unsigned int)gen_param_types.length, fn_type_id->is_var_args); fn_type->llvm_type = LLVMPointerType(fn_type->data.fn.raw_type_ref, 0); @@ -7165,6 +7168,35 @@ void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) { LLVMABIAlignmentOfType(g->target_data_ref, fn_type->llvm_type), ""); } +void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) { + if (fn->raw_di_type != nullptr) return; + + ZigType *fn_type = fn->type_entry; + FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; + bool cc_async = fn_type_id->cc == CallingConventionAsync; + bool inferred_async = fn->resume_blocks.length != 0; + bool is_async = cc_async || inferred_async; + if (!is_async) { + resolve_llvm_types_fn_type(g, fn_type); + fn->raw_type_ref = fn_type->data.fn.raw_type_ref; + fn->raw_di_type = fn_type->data.fn.raw_di_type; + return; + } + + ZigType *gen_return_type = g->builtin_types.entry_usize; + ZigList param_di_types = {}; + // first "parameter" is return value + param_di_types.append(get_llvm_di_type(g, gen_return_type)); + + ZigType *frame_type = get_coro_frame_type(g, fn); + ZigType *ptr_type = get_pointer_to_type(g, frame_type, false); + LLVMTypeRef gen_param_type = get_llvm_type(g, ptr_type); + param_di_types.append(get_llvm_di_type(g, ptr_type)); + + fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type), &gen_param_type, 1, false); + fn->raw_di_type = ZigLLVMCreateSubroutineType(g->dbuilder, param_di_types.items, (int)param_di_types.length, 0); +} + static void resolve_llvm_types_anyerror(CodeGen *g) { ZigType *entry = g->builtin_types.entry_global_error_set; entry->llvm_type = get_llvm_type(g, g->err_tag_type); @@ -7241,7 +7273,7 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r case ZigTypeIdArray: return resolve_llvm_types_array(g, type); case ZigTypeIdFn: - return resolve_llvm_types_fn(g, type, nullptr); + return resolve_llvm_types_fn_type(g, type); case ZigTypeIdErrorSet: { if (type->llvm_di_type != nullptr) return; diff --git a/src/analyze.hpp b/src/analyze.hpp index 3f226080b5..57f4452104 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -247,6 +247,6 @@ void src_assert(bool ok, AstNode *source_node); bool is_container(ZigType *type_entry); ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *type_entry, Buf *type_name); -void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn); +void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn); #endif diff --git a/src/codegen.cpp b/src/codegen.cpp index 339b04cc90..f3519ea72d 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -371,10 +371,12 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { symbol_name = buf_sprintf("\x01_%s", buf_ptr(symbol_name)); } + bool is_async = fn_table_entry->resume_blocks.length != 0 || cc == CallingConventionAsync; + ZigType *fn_type = fn_table_entry->type_entry; // Make the raw_type_ref populated - resolve_llvm_types_fn(g, fn_type, fn_table_entry); + resolve_llvm_types_fn(g, fn_table_entry); LLVMTypeRef fn_llvm_type = fn_table_entry->raw_type_ref; if (fn_table_entry->body_node == nullptr) { LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, buf_ptr(symbol_name)); @@ -397,7 +399,7 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { assert(entry->value->id == TldIdFn); TldFn *tld_fn = reinterpret_cast(entry->value); // Make the raw_type_ref populated - resolve_llvm_types_fn(g, tld_fn->fn_entry->type_entry, tld_fn->fn_entry); + resolve_llvm_types_fn(g, tld_fn->fn_entry); tld_fn->fn_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), tld_fn->fn_entry->raw_type_ref); fn_table_entry->llvm_value = LLVMConstBitCast(tld_fn->fn_entry->llvm_value, @@ -517,18 +519,22 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { init_gen_i = 1; } - // set parameter attributes - FnWalk fn_walk = {}; - fn_walk.id = FnWalkIdAttrs; - fn_walk.data.attrs.fn = fn_table_entry; - fn_walk.data.attrs.gen_i = init_gen_i; - walk_function_params(g, fn_type, &fn_walk); + if (is_async) { + addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull"); + } else { + // set parameter attributes + FnWalk fn_walk = {}; + fn_walk.id = FnWalkIdAttrs; + fn_walk.data.attrs.fn = fn_table_entry; + fn_walk.data.attrs.gen_i = init_gen_i; + walk_function_params(g, fn_type, &fn_walk); - uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry); - if (err_ret_trace_arg_index != UINT32_MAX) { - // Error return trace memory is in the stack, which is impossible to be at address 0 - // on any architecture. - addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull"); + uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry); + if (err_ret_trace_arg_index != UINT32_MAX) { + // Error return trace memory is in the stack, which is impossible to be at address 0 + // on any architecture. + addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull"); + } } return fn_table_entry->llvm_value; @@ -6254,14 +6260,21 @@ static void do_code_gen(CodeGen *g) { } else if (is_c_abi) { fn_walk_var.data.vars.var = var; iter_function_params_c_abi(g, fn_table_entry->type_entry, &fn_walk_var, var->src_arg_index); + } else if (is_async) { + var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_arg_start + var_i, ""); + if (var->decl_node) { + var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope), + buf_ptr(&var->name), import->data.structure.root_struct->di_file, + (unsigned)(var->decl_node->line + 1), + get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0); + gen_var_debug_decl(g, var); + } } else { ZigType *gen_type; FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index]; assert(gen_info->gen_index != SIZE_MAX); - if (is_async) { - var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_arg_start + var_i, ""); - } else if (handle_is_ptr(var->var_type)) { + if (handle_is_ptr(var->var_type)) { if (gen_info->is_byval) { gen_type = var->var_type; } else { @@ -6307,16 +6320,7 @@ static void do_code_gen(CodeGen *g) { gen_store(g, LLVMConstInt(usize->llvm_type, stack_trace_ptr_count, false), len_field_ptr, get_pointer_to_type(g, usize, false)); } - // create debug variable declarations for parameters - // rely on the first variables in the variable_list being parameters. - FnWalk fn_walk_init = {}; - fn_walk_init.id = FnWalkIdInits; - fn_walk_init.data.inits.fn = fn_table_entry; - fn_walk_init.data.inits.llvm_fn = fn; - fn_walk_init.data.inits.gen_i = gen_i_init; - walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init); - - if (fn_table_entry->resume_blocks.length != 0) { + if (is_async) { if (!g->strip_debug_symbols) { AstNode *source_node = fn_table_entry->proto_node; ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1, @@ -6354,8 +6358,18 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_i + 2, false); LLVMAddCase(switch_instr, case_value, fn_table_entry->resume_blocks.at(resume_i)->llvm_block); } + } else { + // create debug variable declarations for parameters + // rely on the first variables in the variable_list being parameters. + FnWalk fn_walk_init = {}; + fn_walk_init.id = FnWalkIdInits; + fn_walk_init.data.inits.fn = fn_table_entry; + fn_walk_init.data.inits.llvm_fn = fn; + fn_walk_init.data.inits.gen_i = gen_i_init; + walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init); } + ir_render(g, fn_table_entry); } diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig index aee607e10f..336dbb8bf0 100644 --- a/test/runtime_safety.zig +++ b/test/runtime_safety.zig @@ -1,6 +1,20 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompareOutputContext) void { + cases.addRuntimeSafety("invalid resume of async function", + \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { + \\ @import("std").os.exit(126); + \\} + \\pub fn main() void { + \\ var p = async suspendOnce(); + \\ resume p; //ok + \\ resume p; //bad + \\} + \\fn suspendOnce() void { + \\ suspend; + \\} + ); + cases.addRuntimeSafety(".? operator on null pointer", \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { \\ @import("std").os.exit(126); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 3a54657020..4ecd4efd13 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -29,6 +29,24 @@ fn simpleAsyncFnWithArg(delta: i32) void { suspend; global_y += delta; } + +test "suspend at end of function" { + const S = struct { + var x: i32 = 1; + + fn doTheTest() void { + expect(x == 1); + const p = async suspendAtEnd(); + expect(x == 2); + } + + fn suspendAtEnd() void { + x += 1; + suspend; + } + }; + S.doTheTest(); +} //test "coroutine suspend, resume" { // seq('a'); // const p = try async testAsyncSeq(); From 7e9760de10e05a4c2a7bae4c4bb945351b9ae0cb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 23 Jul 2019 18:54:45 -0400 Subject: [PATCH 013/125] inferring async from async calls --- build.zig | 4 +- src/all_types.hpp | 7 ++++ src/analyze.cpp | 100 +++++++++++++++++++++++++++++++++++++--------- src/analyze.hpp | 1 + src/codegen.cpp | 20 ++++++---- src/ir.cpp | 19 +++++++++ 6 files changed, 123 insertions(+), 28 deletions(-) diff --git a/build.zig b/build.zig index 011f742d82..45758d4075 100644 --- a/build.zig +++ b/build.zig @@ -375,7 +375,9 @@ fn addLibUserlandStep(b: *Builder) void { artifact.bundle_compiler_rt = true; artifact.setTarget(builtin.arch, builtin.os, builtin.abi); artifact.linkSystemLibrary("c"); - artifact.linkSystemLibrary("ntdll"); + if (builtin.os == .windows) { + artifact.linkSystemLibrary("ntdll"); + } const libuserland_step = b.step("libuserland", "Build the userland compiler library for use in stage1"); libuserland_step.dependOn(&artifact.step); diff --git a/src/all_types.hpp b/src/all_types.hpp index aa7ff06ce9..8991b53e64 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1336,6 +1336,11 @@ struct GlobalExport { GlobalLinkageId linkage; }; +struct FnCall { + AstNode *source_node; + ZigFn *callee; +}; + struct ZigFn { CodeGen *codegen; LLVMValueRef llvm_value; @@ -1379,8 +1384,10 @@ struct ZigFn { AstNode *set_alignstack_node; AstNode *set_cold_node; + const AstNode *inferred_async_node; ZigList export_list; + ZigList call_list; LLVMValueRef valgrind_client_request_array; LLVMBasicBlockRef preamble_llvm_block; diff --git a/src/analyze.cpp b/src/analyze.cpp index 80e22c6c62..3da13dcc02 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -31,6 +31,11 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope); static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope); +// nullptr means not analyzed yet; this one means currently being analyzed +static const AstNode *inferred_async_checking = reinterpret_cast(0x1); +// this one means analyzed and it's not async +static const AstNode *inferred_async_none = reinterpret_cast(0x2); + static bool is_top_level_struct(ZigType *import) { return import->id == ZigTypeIdStruct && import->data.structure.root_struct != nullptr; } @@ -1892,8 +1897,12 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { field_names.append("resume_index"); field_types.append(g->builtin_types.entry_usize); - for (size_t arg_i = 0; arg_i < fn->type_entry->data.fn.fn_type_id.param_count; arg_i += 1) { - FnTypeParamInfo *param_info = &fn->type_entry->data.fn.fn_type_id.param_info[arg_i]; + FnTypeId *fn_type_id = &fn->type_entry->data.fn.fn_type_id; + field_names.append("result"); + field_types.append(fn_type_id->return_type); + + for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) { + FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i]; AstNode *param_decl_node = get_param_decl_node(fn, arg_i); Buf *param_name; bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args; @@ -2796,6 +2805,16 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) { g->fn_defs.append(fn_table_entry); } + switch (fn_table_entry->type_entry->data.fn.fn_type_id.cc) { + case CallingConventionAsync: + fn_table_entry->inferred_async_node = fn_table_entry->proto_node; + break; + case CallingConventionUnspecified: + break; + default: + fn_table_entry->inferred_async_node = inferred_async_none; + } + if (scope_is_root_decls(tld_fn->base.parent_scope) && (import == g->root_import || import->data.structure.root_struct->package == g->panic_package)) { @@ -3767,6 +3786,55 @@ bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *sour return true; } +static void resolve_async_fn_frame(CodeGen *g, ZigFn *fn) { + ZigType *frame_type = get_coro_frame_type(g, fn); + Error err; + if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) { + fn->anal_state = FnAnalStateInvalid; + return; + } +} + +bool fn_is_async(ZigFn *fn) { + assert(fn->inferred_async_node != nullptr); + assert(fn->inferred_async_node != inferred_async_checking); + return fn->inferred_async_node != inferred_async_none; +} + +// This function resolves functions being inferred async. +static void analyze_fn_async(CodeGen *g, ZigFn *fn) { + if (fn->inferred_async_node == inferred_async_checking) { + // TODO call graph cycle detected, disallow the recursion + fn->inferred_async_node = inferred_async_none; + return; + } + if (fn->inferred_async_node == inferred_async_none) { + return; + } + if (fn->inferred_async_node != nullptr) { + resolve_async_fn_frame(g, fn); + return; + } + fn->inferred_async_node = inferred_async_checking; + for (size_t i = 0; i < fn->call_list.length; i += 1) { + FnCall *call = &fn->call_list.at(i); + if (call->callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) + continue; + assert(call->callee->anal_state == FnAnalStateComplete); + analyze_fn_async(g, call->callee); + if (call->callee->anal_state == FnAnalStateInvalid) { + fn->anal_state = FnAnalStateInvalid; + return; + } + if (fn_is_async(call->callee)) { + fn->inferred_async_node = call->source_node; + resolve_async_fn_frame(g, fn); + return; + } + } + fn->inferred_async_node = inferred_async_none; +} + static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node) { ZigType *fn_type = fn_table_entry->type_entry; assert(!fn_type->data.fn.is_generic); @@ -3824,17 +3892,7 @@ static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_typ ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4); fprintf(stderr, "}\n"); } - fn_table_entry->anal_state = FnAnalStateComplete; - - if (fn_table_entry->resume_blocks.length != 0) { - ZigType *frame_type = get_coro_frame_type(g, fn_table_entry); - Error err; - if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) { - fn_table_entry->anal_state = FnAnalStateInvalid; - return; - } - } } static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) { @@ -4004,6 +4062,16 @@ void semantic_analyze(CodeGen *g) { analyze_fn_body(g, fn_entry); } } + + if (g->errors.length != 0) { + return; + } + + // second pass over functions for detecting async + for (g->fn_defs_index = 0; g->fn_defs_index < g->fn_defs.length; g->fn_defs_index += 1) { + ZigFn *fn_entry = g->fn_defs.at(g->fn_defs_index); + analyze_fn_async(g, fn_entry); + } } ZigType *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) { @@ -7173,11 +7241,7 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) { if (fn->raw_di_type != nullptr) return; ZigType *fn_type = fn->type_entry; - FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; - bool cc_async = fn_type_id->cc == CallingConventionAsync; - bool inferred_async = fn->resume_blocks.length != 0; - bool is_async = cc_async || inferred_async; - if (!is_async) { + if (!fn_is_async(fn)) { resolve_llvm_types_fn_type(g, fn_type); fn->raw_type_ref = fn_type->data.fn.raw_type_ref; fn->raw_di_type = fn_type->data.fn.raw_di_type; @@ -7223,8 +7287,6 @@ static void resolve_llvm_types_anyerror(CodeGen *g) { } static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) { - if (frame_type->llvm_di_type != nullptr) return; - resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status); frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type; frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type; diff --git a/src/analyze.hpp b/src/analyze.hpp index 57f4452104..50e7b72309 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -248,5 +248,6 @@ bool is_container(ZigType *type_entry); ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *type_entry, Buf *type_name); void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn); +bool fn_is_async(ZigFn *fn); #endif diff --git a/src/codegen.cpp b/src/codegen.cpp index f3519ea72d..6c0bedee2d 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -371,7 +371,7 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { symbol_name = buf_sprintf("\x01_%s", buf_ptr(symbol_name)); } - bool is_async = fn_table_entry->resume_blocks.length != 0 || cc == CallingConventionAsync; + bool is_async = fn_is_async(fn_table_entry); ZigType *fn_type = fn_table_entry->type_entry; @@ -1847,7 +1847,7 @@ static bool iter_function_params_c_abi(CodeGen *g, ZigType *fn_type, FnWalk *fn_ } case FnWalkIdInits: { clear_debug_source_node(g); - if (fn_walk->data.inits.fn->resume_blocks.length == 0) { + if (!fn_is_async(fn_walk->data.inits.fn)) { LLVMValueRef arg = LLVMGetParam(llvm_fn, fn_walk->data.inits.gen_i); LLVMTypeRef ptr_to_int_type_ref = LLVMPointerType(LLVMIntType((unsigned)ty_size * 8), 0); LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, var->value_ref, ptr_to_int_type_ref, ""); @@ -1945,7 +1945,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) { assert(variable); assert(variable->value_ref); - if (!handle_is_ptr(variable->var_type) && fn_walk->data.inits.fn->resume_blocks.length == 0) { + if (!handle_is_ptr(variable->var_type) && !fn_is_async(fn_walk->data.inits.fn)) { clear_debug_source_node(g); ZigType *fn_type = fn_table_entry->type_entry; unsigned gen_arg_index = fn_type->data.fn.gen_param_info[variable->src_arg_index].gen_index; @@ -1986,7 +1986,7 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut } static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) { - if (g->cur_fn->resume_blocks.length != 0) { + if (fn_is_async(g->cur_fn)) { if (ir_want_runtime_safety(g, &return_instruction->base)) { LLVMValueRef locals_ptr = g->cur_ret_ptr; LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_resume_index_index, ""); @@ -3387,8 +3387,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef result; if (instruction->is_async) { + size_t ret_1_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 1 : 0; for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) { - LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, result_loc, coro_arg_start + arg_i, ""); + LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, result_loc, + coro_arg_start + ret_1_or_0 + arg_i, ""); LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr); } ZigLLVMBuildCall(g->builder, fn_val, &result_loc, 1, llvm_cc, fn_inline, ""); @@ -5983,7 +5985,7 @@ static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) { assert(executable->basic_block_list.length > 0); LLVMValueRef fn_val = fn_llvm_value(g, fn); LLVMBasicBlockRef first_bb = nullptr; - if (fn->resume_blocks.length != 0) { + if (fn_is_async(fn)) { first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch"); fn->preamble_llvm_block = first_bb; } @@ -6171,7 +6173,7 @@ static void do_code_gen(CodeGen *g) { build_all_basic_blocks(g, fn_table_entry); clear_debug_source_node(g); - bool is_async = cc == CallingConventionAsync || fn_table_entry->resume_blocks.length != 0; + bool is_async = fn_is_async(fn_table_entry); if (want_sret || is_async) { g->cur_ret_ptr = LLVMGetParam(fn, 0); @@ -6261,7 +6263,9 @@ static void do_code_gen(CodeGen *g) { fn_walk_var.data.vars.var = var; iter_function_params_c_abi(g, fn_table_entry->type_entry, &fn_walk_var, var->src_arg_index); } else if (is_async) { - var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_arg_start + var_i, ""); + size_t ret_1_or_0 = type_has_bits(fn_type_id->return_type) ? 1 : 0; + var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, + coro_arg_start + ret_1_or_0 + var_i, ""); if (var->decl_node) { var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope), buf_ptr(&var->name), import->data.structure.root_struct->di_file, diff --git a/src/ir.cpp b/src/ir.cpp index 6f9f582c6f..0cc68eaa55 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -15383,6 +15383,13 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c zig_panic("TODO async call"); } + if (!call_instruction->is_async) { + if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { + parent_fn_entry->inferred_async_node = fn_ref->source_node; + } + parent_fn_entry->call_list.append({call_instruction->base.source_node, impl_fn}); + } + IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, impl_fn, nullptr, impl_param_count, casted_args, fn_inline, call_instruction->is_async, casted_new_stack, result_loc, @@ -15458,6 +15465,15 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c return ira->codegen->invalid_instruction; } + if (!call_instruction->is_async) { + if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { + parent_fn_entry->inferred_async_node = fn_ref->source_node; + } + if (fn_entry != nullptr) { + parent_fn_entry->call_list.append({call_instruction->base.source_node, fn_entry}); + } + } + if (call_instruction->is_async) { IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, casted_args, call_param_count); @@ -24142,6 +24158,9 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru new_bb->resume_index = fn_entry->resume_blocks.length + 2; fn_entry->resume_blocks.append(new_bb); + if (fn_entry->inferred_async_node == nullptr) { + fn_entry->inferred_async_node = instruction->base.source_node; + } ir_push_resume_block(ira, old_dest_block); From 19ee4957502c704312646f75544e968b618aa807 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 23 Jul 2019 19:35:41 -0400 Subject: [PATCH 014/125] add error for function with ccc indirectly calling async function --- src/all_types.hpp | 2 +- src/analyze.cpp | 56 ++++++++++++++++++++++++++++++++--------- src/analyze.hpp | 4 +-- test/compile_errors.zig | 18 +++++++++++++ 4 files changed, 65 insertions(+), 15 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 8991b53e64..a68f19a877 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1342,7 +1342,6 @@ struct FnCall { }; struct ZigFn { - CodeGen *codegen; LLVMValueRef llvm_value; const char *llvm_name; AstNode *proto_node; @@ -1385,6 +1384,7 @@ struct ZigFn { AstNode *set_cold_node; const AstNode *inferred_async_node; + ZigFn *inferred_async_fn; ZigList export_list; ZigList call_list; diff --git a/src/analyze.cpp b/src/analyze.cpp index 3da13dcc02..fe86c613f3 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -61,14 +61,14 @@ ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg) { return err; } -ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) { +ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg) { Token fake_token; fake_token.start_line = node->line; fake_token.start_column = node->column; return add_token_error(g, node->owner, &fake_token, msg); } -ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg) { +ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg) { Token fake_token; fake_token.start_line = node->line; fake_token.start_column = node->column; @@ -2656,7 +2656,6 @@ ZigFn *create_fn_raw(CodeGen *g, FnInline inline_value) { fn_entry->prealloc_backward_branch_quota = default_backward_branch_quota; - fn_entry->codegen = g; fn_entry->analyzed_executable.backward_branch_count = &fn_entry->prealloc_bbc; fn_entry->analyzed_executable.backward_branch_quota = &fn_entry->prealloc_backward_branch_quota; fn_entry->analyzed_executable.fn_entry = fn_entry; @@ -2784,6 +2783,7 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) { } } } else { + fn_table_entry->inferred_async_node = inferred_async_none; g->external_prototypes.put_unique(tld_fn->base.name, &tld_fn->base); } @@ -2805,14 +2805,11 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) { g->fn_defs.append(fn_table_entry); } - switch (fn_table_entry->type_entry->data.fn.fn_type_id.cc) { - case CallingConventionAsync: - fn_table_entry->inferred_async_node = fn_table_entry->proto_node; - break; - case CallingConventionUnspecified: - break; - default: - fn_table_entry->inferred_async_node = inferred_async_none; + // if the calling convention implies that it cannot be async, we save that for later + // and leave the value to be nullptr to indicate that we have not emitted possible + // compile errors for improperly calling async functions. + if (fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) { + fn_table_entry->inferred_async_node = fn_table_entry->proto_node; } if (scope_is_root_decls(tld_fn->base.parent_scope) && @@ -3801,6 +3798,25 @@ bool fn_is_async(ZigFn *fn) { return fn->inferred_async_node != inferred_async_none; } +static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { + assert(fn->inferred_async_node != nullptr); + assert(fn->inferred_async_node != inferred_async_checking); + assert(fn->inferred_async_node != inferred_async_none); + if (fn->inferred_async_fn != nullptr) { + ErrorMsg *new_msg = add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("async function call here")); + return add_async_error_notes(g, new_msg, fn->inferred_async_fn); + } else if (fn->inferred_async_node->type == NodeTypeFnProto) { + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("async calling convention here")); + } else if (fn->inferred_async_node->type == NodeTypeSuspend) { + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("suspends here")); + } else { + zig_unreachable(); + } +} + // This function resolves functions being inferred async. static void analyze_fn_async(CodeGen *g, ZigFn *fn) { if (fn->inferred_async_node == inferred_async_checking) { @@ -3816,6 +3832,13 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { return; } fn->inferred_async_node = inferred_async_checking; + + bool must_not_be_async = false; + if (fn->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) { + must_not_be_async = true; + fn->inferred_async_node = inferred_async_none; + } + for (size_t i = 0; i < fn->call_list.length; i += 1) { FnCall *call = &fn->call_list.at(i); if (call->callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) @@ -3828,6 +3851,15 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { } if (fn_is_async(call->callee)) { fn->inferred_async_node = call->source_node; + fn->inferred_async_fn = call->callee; + if (must_not_be_async) { + ErrorMsg *msg = add_node_error(g, fn->proto_node, + buf_sprintf("function with calling convention '%s' cannot be async", + calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc))); + add_async_error_notes(g, msg, fn); + fn->anal_state = FnAnalStateInvalid; + return; + } resolve_async_fn_frame(g, fn); return; } @@ -4451,7 +4483,7 @@ bool generic_fn_type_id_eql(GenericFnTypeId *a, GenericFnTypeId *b) { if (a_val->special != ConstValSpecialRuntime && b_val->special != ConstValSpecialRuntime) { assert(a_val->special == ConstValSpecialStatic); assert(b_val->special == ConstValSpecialStatic); - if (!const_values_equal(a->fn_entry->codegen, a_val, b_val)) { + if (!const_values_equal(a->codegen, a_val, b_val)) { return false; } } else { diff --git a/src/analyze.hpp b/src/analyze.hpp index 50e7b72309..47ff4344ba 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -11,9 +11,9 @@ #include "all_types.hpp" void semantic_analyze(CodeGen *g); -ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg); +ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg); ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg); -ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg); +ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg); void emit_error_notes_for_ref_stack(CodeGen *g, ErrorMsg *msg); ZigType *new_type_table_entry(ZigTypeId id); ZigType *get_coro_frame_type(CodeGen *g, ZigFn *fn); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 40ce8d304b..c4948135a0 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,24 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "function with ccc indirectly calling async function", + \\export fn entry() void { + \\ foo(); + \\} + \\fn foo() void { + \\ bar(); + \\} + \\fn bar() void { + \\ suspend; + \\} + , + "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async", + "tmp.zig:2:8: note: async function call here", + "tmp.zig:5:8: note: async function call here", + "tmp.zig:8:5: note: suspends here", + ); + cases.add( "capture group on switch prong with incompatible payload types", \\const Union = union(enum) { From e220812f2f0fe2becd570308971f98e0290835db Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 24 Jul 2019 02:59:51 -0400 Subject: [PATCH 015/125] implement local variables in async functions --- src/analyze.cpp | 23 ++++++++++- src/codegen.cpp | 64 ++++++++++++++++++++--------- test/stage1/behavior/coroutines.zig | 30 ++++++++++++++ 3 files changed, 97 insertions(+), 20 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index fe86c613f3..957e61b198 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1911,11 +1911,32 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { } else { param_name = buf_sprintf("arg%" ZIG_PRI_usize "", arg_i); } - ZigType *param_type = param_info[arg_i].type; + ZigType *param_type = param_info->type; field_names.append(buf_ptr(param_name)); field_types.append(param_type); } + for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) { + IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i); + ZigType *ptr_type = instruction->base.value.type; + assert(ptr_type->id == ZigTypeIdPointer); + ZigType *child_type = ptr_type->data.pointer.child_type; + if (!type_has_bits(child_type)) + continue; + if (instruction->base.ref_count == 0) + continue; + if (instruction->base.value.special != ConstValSpecialRuntime) { + if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != + ConstValSpecialRuntime) + { + continue; + } + } + field_names.append(instruction->name_hint); + field_types.append(child_type); + } + + assert(field_names.length == field_types.length); frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), field_names.items, field_types.items, field_names.length); diff --git a/src/codegen.cpp b/src/codegen.cpp index 6c0bedee2d..534b97232e 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6174,6 +6174,7 @@ static void do_code_gen(CodeGen *g) { clear_debug_source_node(g); bool is_async = fn_is_async(fn_table_entry); + size_t async_var_index = coro_arg_start + (type_has_bits(fn_type_id->return_type) ? 1 : 0); if (want_sret || is_async) { g->cur_ret_ptr = LLVMGetParam(fn, 0); @@ -6206,25 +6207,27 @@ static void do_code_gen(CodeGen *g) { g->cur_err_ret_trace_val_stack = nullptr; } - // allocate temporary stack data - for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) { - IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i); - ZigType *ptr_type = instruction->base.value.type; - assert(ptr_type->id == ZigTypeIdPointer); - ZigType *child_type = ptr_type->data.pointer.child_type; - if (!type_has_bits(child_type)) - continue; - if (instruction->base.ref_count == 0) - continue; - if (instruction->base.value.special != ConstValSpecialRuntime) { - if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != - ConstValSpecialRuntime) - { + if (!is_async) { + // allocate temporary stack data + for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) { + IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i); + ZigType *ptr_type = instruction->base.value.type; + assert(ptr_type->id == ZigTypeIdPointer); + ZigType *child_type = ptr_type->data.pointer.child_type; + if (!type_has_bits(child_type)) continue; + if (instruction->base.ref_count == 0) + continue; + if (instruction->base.value.special != ConstValSpecialRuntime) { + if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != + ConstValSpecialRuntime) + { + continue; + } } + instruction->base.llvm_value = build_alloca(g, child_type, instruction->name_hint, + get_ptr_align(g, ptr_type)); } - instruction->base.llvm_value = build_alloca(g, child_type, instruction->name_hint, - get_ptr_align(g, ptr_type)); } ZigType *import = get_scope_import(&fn_table_entry->fndef_scope->base); @@ -6263,9 +6266,9 @@ static void do_code_gen(CodeGen *g) { fn_walk_var.data.vars.var = var; iter_function_params_c_abi(g, fn_table_entry->type_entry, &fn_walk_var, var->src_arg_index); } else if (is_async) { - size_t ret_1_or_0 = type_has_bits(fn_type_id->return_type) ? 1 : 0; - var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, - coro_arg_start + ret_1_or_0 + var_i, ""); + var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, + buf_ptr(&var->name)); + async_var_index += 1; if (var->decl_node) { var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope), buf_ptr(&var->name), import->data.structure.root_struct->di_file, @@ -6299,6 +6302,29 @@ static void do_code_gen(CodeGen *g) { } } + if (is_async) { + for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) { + IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i); + ZigType *ptr_type = instruction->base.value.type; + assert(ptr_type->id == ZigTypeIdPointer); + ZigType *child_type = ptr_type->data.pointer.child_type; + if (!type_has_bits(child_type)) + continue; + if (instruction->base.ref_count == 0) + continue; + if (instruction->base.value.special != ConstValSpecialRuntime) { + if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != + ConstValSpecialRuntime) + { + continue; + } + } + instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, + instruction->name_hint); + async_var_index += 1; + } + } + // finishing error return trace setup. we have to do this after all the allocas. if (have_err_ret_trace_stack) { ZigType *usize = g->builtin_types.entry_usize; diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 4ecd4efd13..4f1cc84064 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -47,6 +47,36 @@ test "suspend at end of function" { }; S.doTheTest(); } + +test "local variable in async function" { + const S = struct { + var x: i32 = 0; + + fn doTheTest() void { + expect(x == 0); + const p = async add(1, 2); + expect(x == 0); + resume p; + expect(x == 0); + resume p; + expect(x == 0); + resume p; + expect(x == 3); + } + + fn add(a: i32, b: i32) void { + var accum: i32 = 0; + suspend; + accum += a; + suspend; + accum += b; + suspend; + x = accum; + } + }; + S.doTheTest(); +} + //test "coroutine suspend, resume" { // seq('a'); // const p = try async testAsyncSeq(); From ead2d32be871411685f846e604ec7e4253b9f25a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 25 Jul 2019 00:03:06 -0400 Subject: [PATCH 016/125] calling an inferred async function --- src/all_types.hpp | 17 ++- src/analyze.cpp | 220 +++++++++++++++++----------- src/codegen.cpp | 72 +++++++-- src/ir.cpp | 26 ++-- src/zig_llvm.cpp | 4 + src/zig_llvm.h | 1 + test/stage1/behavior/coroutines.zig | 16 ++ 7 files changed, 238 insertions(+), 118 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index a68f19a877..d67356b178 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -35,6 +35,7 @@ struct ConstExprValue; struct IrInstruction; struct IrInstructionCast; struct IrInstructionAllocaGen; +struct IrInstructionCallGen; struct IrBasicBlock; struct ScopeDecls; struct ZigWindowsSDK; @@ -1336,11 +1337,6 @@ struct GlobalExport { GlobalLinkageId linkage; }; -struct FnCall { - AstNode *source_node; - ZigFn *callee; -}; - struct ZigFn { LLVMValueRef llvm_value; const char *llvm_name; @@ -1387,7 +1383,7 @@ struct ZigFn { ZigFn *inferred_async_fn; ZigList export_list; - ZigList call_list; + ZigList call_list; LLVMValueRef valgrind_client_request_array; LLVMBasicBlockRef preamble_llvm_block; @@ -2585,6 +2581,8 @@ struct IrInstructionCallGen { size_t arg_count; IrInstruction **args; IrInstruction *result_loc; + IrInstruction *frame_result_loc; + IrBasicBlock *resume_block; IrInstruction *new_stack; FnInline fn_inline; @@ -3645,7 +3643,12 @@ static const size_t err_union_err_index = 0; static const size_t err_union_payload_index = 1; static const size_t coro_resume_index_index = 0; -static const size_t coro_arg_start = 1; +static const size_t coro_fn_ptr_index = 1; +static const size_t coro_awaiter_index = 2; +static const size_t coro_arg_start = 3; + +// one for the GetSize block, one for the Entry block, resume blocks are indexed after that. +static const size_t coro_extra_resume_block_count = 2; // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. diff --git a/src/analyze.cpp b/src/analyze.cpp index 957e61b198..c8e02a4771 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1869,80 +1869,6 @@ static Error resolve_union_type(CodeGen *g, ZigType *union_type) { return ErrorNone; } -static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { - if (frame_type->data.frame.locals_struct != nullptr) - return ErrorNone; - - ZigFn *fn = frame_type->data.frame.fn; - switch (fn->anal_state) { - case FnAnalStateInvalid: - return ErrorSemanticAnalyzeFail; - case FnAnalStateComplete: - break; - case FnAnalStateReady: - analyze_fn_body(g, fn); - if (fn->anal_state == FnAnalStateInvalid) - return ErrorSemanticAnalyzeFail; - break; - case FnAnalStateProbing: - add_node_error(g, fn->proto_node, - buf_sprintf("cannot resolve '%s': function not fully analyzed yet", - buf_ptr(&frame_type->name))); - return ErrorSemanticAnalyzeFail; - } - // TODO iterate over fn->alloca_gen_list - ZigList field_types = {}; - ZigList field_names = {}; - - field_names.append("resume_index"); - field_types.append(g->builtin_types.entry_usize); - - FnTypeId *fn_type_id = &fn->type_entry->data.fn.fn_type_id; - field_names.append("result"); - field_types.append(fn_type_id->return_type); - - for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) { - FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i]; - AstNode *param_decl_node = get_param_decl_node(fn, arg_i); - Buf *param_name; - bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args; - if (param_decl_node && !is_var_args) { - param_name = param_decl_node->data.param_decl.name; - } else { - param_name = buf_sprintf("arg%" ZIG_PRI_usize "", arg_i); - } - ZigType *param_type = param_info->type; - field_names.append(buf_ptr(param_name)); - field_types.append(param_type); - } - - for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) { - IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i); - ZigType *ptr_type = instruction->base.value.type; - assert(ptr_type->id == ZigTypeIdPointer); - ZigType *child_type = ptr_type->data.pointer.child_type; - if (!type_has_bits(child_type)) - continue; - if (instruction->base.ref_count == 0) - continue; - if (instruction->base.value.special != ConstValSpecialRuntime) { - if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != - ConstValSpecialRuntime) - { - continue; - } - } - field_names.append(instruction->name_hint); - field_types.append(child_type); - } - - - assert(field_names.length == field_types.length); - frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), - field_names.items, field_types.items, field_names.length); - return ErrorNone; -} - static bool type_is_valid_extern_enum_tag(CodeGen *g, ZigType *ty) { // Only integer types are allowed by the C ABI if(ty->id != ZigTypeIdInt) @@ -3861,18 +3787,24 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { } for (size_t i = 0; i < fn->call_list.length; i += 1) { - FnCall *call = &fn->call_list.at(i); - if (call->callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) + IrInstructionCallGen *call = fn->call_list.at(i); + ZigFn *callee = call->fn_entry; + if (callee == nullptr) { + // TODO function pointer call here, could be anything continue; - assert(call->callee->anal_state == FnAnalStateComplete); - analyze_fn_async(g, call->callee); - if (call->callee->anal_state == FnAnalStateInvalid) { + } + + if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) + continue; + assert(callee->anal_state == FnAnalStateComplete); + analyze_fn_async(g, callee); + if (callee->anal_state == FnAnalStateInvalid) { fn->anal_state = FnAnalStateInvalid; return; } - if (fn_is_async(call->callee)) { - fn->inferred_async_node = call->source_node; - fn->inferred_async_fn = call->callee; + if (fn_is_async(callee)) { + fn->inferred_async_node = call->base.source_node; + fn->inferred_async_fn = callee; if (must_not_be_async) { ErrorMsg *msg = add_node_error(g, fn->proto_node, buf_sprintf("function with calling convention '%s' cannot be async", @@ -5147,6 +5079,127 @@ Error ensure_complete_type(CodeGen *g, ZigType *type_entry) { return type_resolve(g, type_entry, ResolveStatusSizeKnown); } +static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { + if (frame_type->data.frame.locals_struct != nullptr) + return ErrorNone; + + ZigFn *fn = frame_type->data.frame.fn; + switch (fn->anal_state) { + case FnAnalStateInvalid: + return ErrorSemanticAnalyzeFail; + case FnAnalStateComplete: + break; + case FnAnalStateReady: + analyze_fn_body(g, fn); + if (fn->anal_state == FnAnalStateInvalid) + return ErrorSemanticAnalyzeFail; + break; + case FnAnalStateProbing: + add_node_error(g, fn->proto_node, + buf_sprintf("cannot resolve '%s': function not fully analyzed yet", + buf_ptr(&frame_type->name))); + return ErrorSemanticAnalyzeFail; + } + + for (size_t i = 0; i < fn->call_list.length; i += 1) { + IrInstructionCallGen *call = fn->call_list.at(i); + ZigFn *callee = call->fn_entry; + assert(callee != nullptr); + + analyze_fn_body(g, callee); + if (callee->anal_state == FnAnalStateInvalid) { + frame_type->data.frame.locals_struct = g->builtin_types.entry_invalid; + return ErrorSemanticAnalyzeFail; + } + analyze_fn_async(g, callee); + if (!fn_is_async(callee)) + continue; + + IrBasicBlock *new_resume_block = allocate(1); + new_resume_block->name_hint = "CallResume"; + new_resume_block->resume_index = fn->resume_blocks.length + coro_extra_resume_block_count; + fn->resume_blocks.append(new_resume_block); + call->resume_block = new_resume_block; + fn->analyzed_executable.basic_block_list.append(new_resume_block); + + ZigType *callee_frame_type = get_coro_frame_type(g, callee); + + IrInstructionAllocaGen *alloca_gen = allocate(1); + alloca_gen->base.id = IrInstructionIdAllocaGen; + alloca_gen->base.source_node = call->base.source_node; + alloca_gen->base.scope = call->base.scope; + alloca_gen->base.value.type = get_pointer_to_type(g, callee_frame_type, false); + alloca_gen->base.ref_count = 1; + alloca_gen->name_hint = ""; + fn->alloca_gen_list.append(alloca_gen); + call->frame_result_loc = &alloca_gen->base; + } + + ZigList field_types = {}; + ZigList field_names = {}; + + field_names.append("resume_index"); + field_types.append(g->builtin_types.entry_usize); + + field_names.append("fn_ptr"); + field_types.append(fn->type_entry); + + field_names.append("awaiter"); + field_types.append(g->builtin_types.entry_usize); + + FnTypeId *fn_type_id = &fn->type_entry->data.fn.fn_type_id; + ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false); + field_names.append("result_ptr"); + field_types.append(ptr_return_type); + + field_names.append("result"); + field_types.append(fn_type_id->return_type); + + for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) { + FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i]; + AstNode *param_decl_node = get_param_decl_node(fn, arg_i); + Buf *param_name; + bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args; + if (param_decl_node && !is_var_args) { + param_name = param_decl_node->data.param_decl.name; + } else { + param_name = buf_sprintf("arg%" ZIG_PRI_usize "", arg_i); + } + ZigType *param_type = param_info->type; + field_names.append(buf_ptr(param_name)); + field_types.append(param_type); + } + + for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) { + IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i); + ZigType *ptr_type = instruction->base.value.type; + assert(ptr_type->id == ZigTypeIdPointer); + ZigType *child_type = ptr_type->data.pointer.child_type; + if (!type_has_bits(child_type)) + continue; + if (instruction->base.ref_count == 0) + continue; + if (instruction->base.value.special != ConstValSpecialRuntime) { + if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != + ConstValSpecialRuntime) + { + continue; + } + } + field_names.append(instruction->name_hint); + field_types.append(child_type); + } + + + assert(field_names.length == field_types.length); + frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), + field_names.items, field_types.items, field_names.length); + frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size; + frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align; + frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits; + return ErrorNone; +} + Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) { if (type_is_invalid(ty)) return ErrorSemanticAnalyzeFail; @@ -7343,9 +7396,6 @@ static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, Resol resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status); frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type; frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type; - frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size; - frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align; - frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits; } static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) { diff --git a/src/codegen.cpp b/src/codegen.cpp index 534b97232e..34f4aa1cc4 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3324,13 +3324,16 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) { static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) { LLVMValueRef fn_val; ZigType *fn_type; + bool callee_is_async; if (instruction->fn_entry) { fn_val = fn_llvm_value(g, instruction->fn_entry); fn_type = instruction->fn_entry->type_entry; + callee_is_async = fn_is_async(instruction->fn_entry); } else { assert(instruction->fn_ref); fn_val = ir_llvm_value(g, instruction->fn_ref); fn_type = instruction->fn_ref->value.type; + callee_is_async = fn_type->data.fn.fn_type_id.cc == CallingConventionAsync; } FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; @@ -3345,17 +3348,47 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr bool is_var_args = fn_type_id->is_var_args; ZigList gen_param_values = {}; LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr; + LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type); + LLVMValueRef frame_result_loc; + LLVMValueRef awaiter_init_val; + LLVMValueRef ret_ptr; if (instruction->is_async) { - assert(result_loc != nullptr); + frame_result_loc = result_loc; + awaiter_init_val = zero; + if (ret_has_bits) { + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, ""); + } + } else if (callee_is_async) { + frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); + awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, + g->builtin_types.entry_usize->llvm_type, ""); // caller's own frame pointer + if (ret_has_bits) { + ret_ptr = result_loc; + } + } + if (instruction->is_async || callee_is_async) { + assert(frame_result_loc != nullptr); assert(instruction->fn_entry != nullptr); - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, result_loc, coro_resume_index_index, ""); - LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type); + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_resume_index_index, ""); LLVMBuildStore(g->builder, zero, resume_index_ptr); + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_fn_ptr_index, ""); + LLVMValueRef bitcasted_fn_val = LLVMBuildBitCast(g->builder, fn_val, + LLVMGetElementType(LLVMTypeOf(fn_ptr_ptr)), ""); + LLVMBuildStore(g->builder, bitcasted_fn_val, fn_ptr_ptr); if (prefix_arg_err_ret_stack) { zig_panic("TODO"); } - } else { + + LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_awaiter_index, ""); + LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr); + + if (ret_has_bits) { + LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); + LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); + } + } + if (!instruction->is_async && !callee_is_async) { if (first_arg_ret) { gen_param_values.append(result_loc); } @@ -3386,14 +3419,28 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMCallConv llvm_cc = get_llvm_cc(g, cc); LLVMValueRef result; - if (instruction->is_async) { - size_t ret_1_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 1 : 0; + if (instruction->is_async || callee_is_async) { + size_t ret_2_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0; for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) { - LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, result_loc, - coro_arg_start + ret_1_or_0 + arg_i, ""); + LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + coro_arg_start + ret_2_or_0 + arg_i, ""); LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr); } - ZigLLVMBuildCall(g->builder, fn_val, &result_loc, 1, llvm_cc, fn_inline, ""); + } + if (instruction->is_async) { + ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, ""); + return nullptr; + } else if (callee_is_async) { + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index_index, ""); + LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, + instruction->resume_block->resume_index, false); + LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr); + + LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, ""); + ZigLLVMSetTailCall(call_inst); + LLVMBuildRet(g->builder, call_inst); + + LLVMPositionBuilderAtEnd(g->builder, instruction->resume_block->llvm_block); return nullptr; } @@ -6174,7 +6221,7 @@ static void do_code_gen(CodeGen *g) { clear_debug_source_node(g); bool is_async = fn_is_async(fn_table_entry); - size_t async_var_index = coro_arg_start + (type_has_bits(fn_type_id->return_type) ? 1 : 0); + size_t async_var_index = coro_arg_start + (type_has_bits(fn_type_id->return_type) ? 2 : 0); if (want_sret || is_async) { g->cur_ret_ptr = LLVMGetParam(fn, 0); @@ -6385,8 +6432,9 @@ static void do_code_gen(CodeGen *g) { LLVMAddCase(switch_instr, one, get_size_block); for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) { - LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_i + 2, false); - LLVMAddCase(switch_instr, case_value, fn_table_entry->resume_blocks.at(resume_i)->llvm_block); + IrBasicBlock *resume_block = fn_table_entry->resume_blocks.at(resume_i); + LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_block->resume_index, false); + LLVMAddCase(switch_instr, case_value, resume_block->llvm_block); } } else { // create debug variable declarations for parameters diff --git a/src/ir.cpp b/src/ir.cpp index 0cc68eaa55..cb4a90c310 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1385,7 +1385,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s return &call_instruction->base; } -static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction, +static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction, ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, FnInline fn_inline, bool is_async, IrInstruction *new_stack, IrInstruction *result_loc, ZigType *return_type) @@ -1408,7 +1408,7 @@ static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_in if (new_stack != nullptr) ir_ref_instruction(new_stack, ira->new_irb.current_basic_block); if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); - return &call_instruction->base; + return call_instruction; } static IrInstruction *ir_build_phi(IrBuilder *irb, Scope *scope, AstNode *source_node, @@ -14650,8 +14650,8 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) { return result_loc; } - return ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count, - casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type); + return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count, + casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type)->base; } static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node, IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i) @@ -15387,15 +15387,16 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { parent_fn_entry->inferred_async_node = fn_ref->source_node; } - parent_fn_entry->call_list.append({call_instruction->base.source_node, impl_fn}); } - IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, + IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, impl_fn, nullptr, impl_param_count, casted_args, fn_inline, call_instruction->is_async, casted_new_stack, result_loc, impl_fn_type_id->return_type); - return ir_finish_anal(ira, new_call_instruction); + parent_fn_entry->call_list.append(new_call_instruction); + + return ir_finish_anal(ira, &new_call_instruction->base); } ZigFn *parent_fn_entry = exec_fn_entry(ira->new_irb.exec); @@ -15469,9 +15470,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { parent_fn_entry->inferred_async_node = fn_ref->source_node; } - if (fn_entry != nullptr) { - parent_fn_entry->call_list.append({call_instruction->base.source_node, fn_entry}); - } } if (call_instruction->is_async) { @@ -15491,10 +15489,11 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c result_loc = nullptr; } - IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, + IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, call_param_count, casted_args, fn_inline, false, casted_new_stack, result_loc, return_type); - return ir_finish_anal(ira, new_call_instruction); + parent_fn_entry->call_list.append(new_call_instruction); + return ir_finish_anal(ira, &new_call_instruction->base); } static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction) { @@ -24154,8 +24153,7 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); ir_assert(fn_entry != nullptr, &instruction->base); - // +2 - one for the GetSize block, one for the Entry block, resume blocks are indexed after that. - new_bb->resume_index = fn_entry->resume_blocks.length + 2; + new_bb->resume_index = fn_entry->resume_blocks.length + coro_extra_resume_block_count; fn_entry->resume_blocks.append(new_bb); if (fn_entry->inferred_async_node == nullptr) { diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index c51c9e1a50..b52edabe65 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -898,6 +898,10 @@ LLVMValueRef ZigLLVMBuildAShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLV return wrap(unwrap(builder)->CreateAShr(unwrap(LHS), unwrap(RHS), name, true)); } +void ZigLLVMSetTailCall(LLVMValueRef Call) { + unwrap(Call)->setTailCallKind(CallInst::TCK_MustTail); +} + class MyOStream: public raw_ostream { public: diff --git a/src/zig_llvm.h b/src/zig_llvm.h index 8b7b0775f7..2a2ab567a6 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -211,6 +211,7 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMInsertDeclare(struct ZigLLVMDIBuilder *dibuilde ZIG_EXTERN_C struct ZigLLVMDILocation *ZigLLVMGetDebugLoc(unsigned line, unsigned col, struct ZigLLVMDIScope *scope); ZIG_EXTERN_C void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state); +ZIG_EXTERN_C void ZigLLVMSetTailCall(LLVMValueRef Call); ZIG_EXTERN_C void ZigLLVMAddFunctionAttr(LLVMValueRef fn, const char *attr_name, const char *attr_value); ZIG_EXTERN_C void ZigLLVMAddFunctionAttrCold(LLVMValueRef fn); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 4f1cc84064..7188e7af8c 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -77,6 +77,22 @@ test "local variable in async function" { S.doTheTest(); } +test "calling an inferred async function" { + const S = struct { + fn doTheTest() void { + const p = async first(); + } + + fn first() void { + other(); + } + fn other() void { + suspend; + } + }; + S.doTheTest(); +} + //test "coroutine suspend, resume" { // seq('a'); // const p = try async testAsyncSeq(); From 70bced5dcffccc2f8029d8c3d7f2d18b48d993f5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 25 Jul 2019 01:47:56 -0400 Subject: [PATCH 017/125] implement `@frame` and `@Frame` --- BRANCH_TODO | 4 ++ src/all_types.hpp | 16 +++++-- src/codegen.cpp | 16 +++---- src/ir.cpp | 71 +++++++++++++++++++++-------- src/ir_print.cpp | 17 +++++-- test/stage1/behavior/coroutines.zig | 8 ++++ 6 files changed, 96 insertions(+), 36 deletions(-) create mode 100644 BRANCH_TODO diff --git a/BRANCH_TODO b/BRANCH_TODO new file mode 100644 index 0000000000..a22620a626 --- /dev/null +++ b/BRANCH_TODO @@ -0,0 +1,4 @@ + * await + * await of a non async function + * async call on a non async function + * safety for resuming when it is awaiting diff --git a/src/all_types.hpp b/src/all_types.hpp index d67356b178..ebdde4642e 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1435,8 +1435,6 @@ enum BuiltinFnId { BuiltinFnIdErrName, BuiltinFnIdBreakpoint, BuiltinFnIdReturnAddress, - BuiltinFnIdFrameAddress, - BuiltinFnIdHandle, BuiltinFnIdEmbedFile, BuiltinFnIdCmpxchgWeak, BuiltinFnIdCmpxchgStrong, @@ -1507,6 +1505,9 @@ enum BuiltinFnId { BuiltinFnIdAtomicLoad, BuiltinFnIdHasDecl, BuiltinFnIdUnionInit, + BuiltinFnIdFrameAddress, + BuiltinFnIdFrameType, + BuiltinFnIdFrameHandle, }; struct BuiltinFnEntry { @@ -2252,7 +2253,8 @@ enum IrInstructionId { IrInstructionIdBreakpoint, IrInstructionIdReturnAddress, IrInstructionIdFrameAddress, - IrInstructionIdHandle, + IrInstructionIdFrameHandle, + IrInstructionIdFrameType, IrInstructionIdAlignOf, IrInstructionIdOverflowOp, IrInstructionIdTestErrSrc, @@ -3038,10 +3040,16 @@ struct IrInstructionFrameAddress { IrInstruction base; }; -struct IrInstructionHandle { +struct IrInstructionFrameHandle { IrInstruction base; }; +struct IrInstructionFrameType { + IrInstruction base; + + IrInstruction *fn; +}; + enum IrOverflowOp { IrOverflowOpAdd, IrOverflowOpSub, diff --git a/src/codegen.cpp b/src/codegen.cpp index 34f4aa1cc4..d6f19d6a43 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4457,10 +4457,8 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable return LLVMBuildPtrToInt(g->builder, ptr_val, g->builtin_types.entry_usize->llvm_type, ""); } -static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable, - IrInstructionHandle *instruction) -{ - zig_panic("TODO @handle() codegen"); +static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable, IrInstructionFrameHandle *instruction) { + return g->cur_ret_ptr; } static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) { @@ -5008,6 +5006,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdBitCastSrc: case IrInstructionIdTestErrSrc: case IrInstructionIdUnionInitNamedField: + case IrInstructionIdFrameType: zig_unreachable(); case IrInstructionIdDeclVarGen: @@ -5086,8 +5085,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_return_address(g, executable, (IrInstructionReturnAddress *)instruction); case IrInstructionIdFrameAddress: return ir_render_frame_address(g, executable, (IrInstructionFrameAddress *)instruction); - case IrInstructionIdHandle: - return ir_render_handle(g, executable, (IrInstructionHandle *)instruction); + case IrInstructionIdFrameHandle: + return ir_render_handle(g, executable, (IrInstructionFrameHandle *)instruction); case IrInstructionIdOverflowOp: return ir_render_overflow_op(g, executable, (IrInstructionOverflowOp *)instruction); case IrInstructionIdTestErrGen: @@ -6754,8 +6753,6 @@ static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdBreakpoint, "breakpoint", 0); create_builtin_fn(g, BuiltinFnIdReturnAddress, "returnAddress", 0); - create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0); - create_builtin_fn(g, BuiltinFnIdHandle, "handle", 0); create_builtin_fn(g, BuiltinFnIdMemcpy, "memcpy", 3); create_builtin_fn(g, BuiltinFnIdMemset, "memset", 3); create_builtin_fn(g, BuiltinFnIdSizeof, "sizeOf", 1); @@ -6856,6 +6853,9 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdThis, "This", 0); create_builtin_fn(g, BuiltinFnIdHasDecl, "hasDecl", 2); create_builtin_fn(g, BuiltinFnIdUnionInit, "unionInit", 3); + create_builtin_fn(g, BuiltinFnIdFrameHandle, "frame", 0); + create_builtin_fn(g, BuiltinFnIdFrameType, "Frame", 1); + create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0); } static const char *bool_to_str(bool b) { diff --git a/src/ir.cpp b/src/ir.cpp index cb4a90c310..93d559b446 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -755,8 +755,12 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameAddress *) return IrInstructionIdFrameAddress; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionHandle *) { - return IrInstructionIdHandle; +static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameHandle *) { + return IrInstructionIdFrameHandle; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameType *) { + return IrInstructionIdFrameType; } static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignOf *) { @@ -2362,7 +2366,16 @@ static IrInstruction *ir_build_frame_address(IrBuilder *irb, Scope *scope, AstNo } static IrInstruction *ir_build_handle(IrBuilder *irb, Scope *scope, AstNode *source_node) { - IrInstructionHandle *instruction = ir_build_instruction(irb, scope, source_node); + IrInstructionFrameHandle *instruction = ir_build_instruction(irb, scope, source_node); + return &instruction->base; +} + +static IrInstruction *ir_build_frame_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn) { + IrInstructionFrameType *instruction = ir_build_instruction(irb, scope, source_node); + instruction->fn = fn; + + ir_ref_instruction(fn, irb->current_basic_block); + return &instruction->base; } @@ -3358,11 +3371,6 @@ static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) { return nullptr; } -static bool exec_is_async(IrExecutable *exec) { - ZigFn *fn_entry = exec_fn_entry(exec); - return fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; -} - static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *return_value, bool is_generated_code) { @@ -4278,8 +4286,6 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return irb->codegen->invalid_instruction; } - bool is_async = exec_is_async(irb->exec); - switch (builtin_fn->id) { case BuiltinFnIdInvalid: zig_unreachable(); @@ -4902,16 +4908,21 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return ir_lval_wrap(irb, scope, ir_build_return_address(irb, scope, node), lval, result_loc); case BuiltinFnIdFrameAddress: return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval, result_loc); - case BuiltinFnIdHandle: + case BuiltinFnIdFrameHandle: if (!irb->exec->fn_entry) { add_node_error(irb->codegen, node, buf_sprintf("@handle() called outside of function definition")); return irb->codegen->invalid_instruction; } - if (!is_async) { - add_node_error(irb->codegen, node, buf_sprintf("@handle() in non-async function")); - return irb->codegen->invalid_instruction; - } return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval, result_loc); + case BuiltinFnIdFrameType: { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *frame_type = ir_build_frame_type(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, frame_type, lval, result_loc); + } case BuiltinFnIdAlignOf: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -21726,8 +21737,25 @@ static IrInstruction *ir_analyze_instruction_frame_address(IrAnalyze *ira, IrIns return result; } -static IrInstruction *ir_analyze_instruction_handle(IrAnalyze *ira, IrInstructionHandle *instruction) { - zig_panic("TODO anlayze @handle()"); +static IrInstruction *ir_analyze_instruction_frame_handle(IrAnalyze *ira, IrInstructionFrameHandle *instruction) { + ZigFn *fn = exec_fn_entry(ira->new_irb.exec); + ir_assert(fn != nullptr, &instruction->base); + + ZigType *frame_type = get_coro_frame_type(ira->codegen, fn); + ZigType *ptr_frame_type = get_pointer_to_type(ira->codegen, frame_type, false); + + IrInstruction *result = ir_build_handle(&ira->new_irb, instruction->base.scope, instruction->base.source_node); + result->value.type = ptr_frame_type; + return result; +} + +static IrInstruction *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstructionFrameType *instruction) { + ZigFn *fn = ir_resolve_fn(ira, instruction->fn->child); + if (fn == nullptr) + return ira->codegen->invalid_instruction; + + ZigType *ty = get_coro_frame_type(ira->codegen, fn); + return ir_const_type(ira, &instruction->base, ty); } static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstructionAlignOf *instruction) { @@ -24355,8 +24383,10 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_return_address(ira, (IrInstructionReturnAddress *)instruction); case IrInstructionIdFrameAddress: return ir_analyze_instruction_frame_address(ira, (IrInstructionFrameAddress *)instruction); - case IrInstructionIdHandle: - return ir_analyze_instruction_handle(ira, (IrInstructionHandle *)instruction); + case IrInstructionIdFrameHandle: + return ir_analyze_instruction_frame_handle(ira, (IrInstructionFrameHandle *)instruction); + case IrInstructionIdFrameType: + return ir_analyze_instruction_frame_type(ira, (IrInstructionFrameType *)instruction); case IrInstructionIdAlignOf: return ir_analyze_instruction_align_of(ira, (IrInstructionAlignOf *)instruction); case IrInstructionIdOverflowOp: @@ -24650,7 +24680,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdAlignOf: case IrInstructionIdReturnAddress: case IrInstructionIdFrameAddress: - case IrInstructionIdHandle: + case IrInstructionIdFrameHandle: + case IrInstructionIdFrameType: case IrInstructionIdTestErrSrc: case IrInstructionIdTestErrGen: case IrInstructionIdFnProto: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index e14647ea82..5b3bba2271 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -906,8 +906,14 @@ static void ir_print_frame_address(IrPrint *irp, IrInstructionFrameAddress *inst fprintf(irp->f, "@frameAddress()"); } -static void ir_print_handle(IrPrint *irp, IrInstructionHandle *instruction) { - fprintf(irp->f, "@handle()"); +static void ir_print_handle(IrPrint *irp, IrInstructionFrameHandle *instruction) { + fprintf(irp->f, "@frame()"); +} + +static void ir_print_frame_type(IrPrint *irp, IrInstructionFrameType *instruction) { + fprintf(irp->f, "@Frame("); + ir_print_other_instruction(irp, instruction->fn); + fprintf(irp->f, ")"); } static void ir_print_return_address(IrPrint *irp, IrInstructionReturnAddress *instruction) { @@ -1764,8 +1770,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdFrameAddress: ir_print_frame_address(irp, (IrInstructionFrameAddress *)instruction); break; - case IrInstructionIdHandle: - ir_print_handle(irp, (IrInstructionHandle *)instruction); + case IrInstructionIdFrameHandle: + ir_print_handle(irp, (IrInstructionFrameHandle *)instruction); + break; + case IrInstructionIdFrameType: + ir_print_frame_type(irp, (IrInstructionFrameType *)instruction); break; case IrInstructionIdAlignOf: ir_print_align_of(irp, (IrInstructionAlignOf *)instruction); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 7188e7af8c..33246f761f 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -79,15 +79,23 @@ test "local variable in async function" { test "calling an inferred async function" { const S = struct { + var x: i32 = 1; + var other_frame: *@Frame(other) = undefined; + fn doTheTest() void { const p = async first(); + expect(x == 1); + resume other_frame.*; + expect(x == 2); } fn first() void { other(); } fn other() void { + other_frame = @frame(); suspend; + x += 1; } }; S.doTheTest(); From 538c0cd2250e08aad07784355b402cfae6145507 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 25 Jul 2019 15:05:55 -0400 Subject: [PATCH 018/125] implement `@frameSize` --- BRANCH_TODO | 1 + src/all_types.hpp | 16 +++++++ src/codegen.cpp | 13 ++++++ src/ir.cpp | 70 +++++++++++++++++++++++++++-- src/ir_print.cpp | 20 +++++++++ test/stage1/behavior/coroutines.zig | 26 +++++++++++ 6 files changed, 142 insertions(+), 4 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index a22620a626..6ea57d2173 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -2,3 +2,4 @@ * await of a non async function * async call on a non async function * safety for resuming when it is awaiting + * implicit cast of normal function to async function should be allowed when it is inferred to be async diff --git a/src/all_types.hpp b/src/all_types.hpp index ebdde4642e..d30b3b8a80 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1508,6 +1508,7 @@ enum BuiltinFnId { BuiltinFnIdFrameAddress, BuiltinFnIdFrameType, BuiltinFnIdFrameHandle, + BuiltinFnIdFrameSize, }; struct BuiltinFnEntry { @@ -2255,6 +2256,8 @@ enum IrInstructionId { IrInstructionIdFrameAddress, IrInstructionIdFrameHandle, IrInstructionIdFrameType, + IrInstructionIdFrameSizeSrc, + IrInstructionIdFrameSizeGen, IrInstructionIdAlignOf, IrInstructionIdOverflowOp, IrInstructionIdTestErrSrc, @@ -3050,6 +3053,19 @@ struct IrInstructionFrameType { IrInstruction *fn; }; +struct IrInstructionFrameSizeSrc { + IrInstruction base; + + IrInstruction *fn; +}; + +struct IrInstructionFrameSizeGen { + IrInstruction base; + + IrInstruction *fn; + IrInstruction *frame_ptr; +}; + enum IrOverflowOp { IrOverflowOpAdd, IrOverflowOpSub, diff --git a/src/codegen.cpp b/src/codegen.cpp index d6f19d6a43..6fc152ad3e 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4914,6 +4914,15 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, return nullptr; } +static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, IrInstructionFrameSizeGen *instruction) { + LLVMValueRef fn_val = ir_llvm_value(g, instruction->fn); + LLVMValueRef frame_ptr = ir_llvm_value(g, instruction->frame_ptr); + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_ptr, coro_resume_index_index, ""); + LLVMValueRef one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false); + LLVMBuildStore(g->builder, one, resume_index_ptr); + return ZigLLVMBuildCall(g->builder, fn_val, &frame_ptr, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -5007,6 +5016,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdTestErrSrc: case IrInstructionIdUnionInitNamedField: case IrInstructionIdFrameType: + case IrInstructionIdFrameSizeSrc: zig_unreachable(); case IrInstructionIdDeclVarGen: @@ -5161,6 +5171,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_suspend_br(g, executable, (IrInstructionSuspendBr *)instruction); case IrInstructionIdCoroResume: return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); + case IrInstructionIdFrameSizeGen: + return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction); } zig_unreachable(); } @@ -6856,6 +6868,7 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdFrameHandle, "frame", 0); create_builtin_fn(g, BuiltinFnIdFrameType, "Frame", 1); create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0); + create_builtin_fn(g, BuiltinFnIdFrameSize, "frameSize", 1); } static const char *bool_to_str(bool b) { diff --git a/src/ir.cpp b/src/ir.cpp index 93d559b446..1a62af8ce4 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -763,6 +763,14 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameType *) { return IrInstructionIdFrameType; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameSizeSrc *) { + return IrInstructionIdFrameSizeSrc; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameSizeGen *) { + return IrInstructionIdFrameSizeGen; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignOf *) { return IrInstructionIdAlignOf; } @@ -2379,6 +2387,28 @@ static IrInstruction *ir_build_frame_type(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_frame_size_src(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn) { + IrInstructionFrameSizeSrc *instruction = ir_build_instruction(irb, scope, source_node); + instruction->fn = fn; + + ir_ref_instruction(fn, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_frame_size_gen(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn, + IrInstruction *frame_ptr) +{ + IrInstructionFrameSizeGen *instruction = ir_build_instruction(irb, scope, source_node); + instruction->fn = fn; + instruction->frame_ptr = frame_ptr; + + ir_ref_instruction(fn, irb->current_basic_block); + ir_ref_instruction(frame_ptr, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_overflow_op(IrBuilder *irb, Scope *scope, AstNode *source_node, IrOverflowOp op, IrInstruction *type_value, IrInstruction *op1, IrInstruction *op2, IrInstruction *result_ptr, ZigType *result_ptr_type) @@ -4923,6 +4953,15 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo IrInstruction *frame_type = ir_build_frame_type(irb, scope, node, arg0_value); return ir_lval_wrap(irb, scope, frame_type, lval, result_loc); } + case BuiltinFnIdFrameSize: { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *frame_size = ir_build_frame_size_src(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, frame_size, lval, result_loc); + } case BuiltinFnIdAlignOf: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -21758,6 +21797,28 @@ static IrInstruction *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstru return ir_const_type(ira, &instruction->base, ty); } +static IrInstruction *ir_analyze_instruction_frame_size(IrAnalyze *ira, IrInstructionFrameSizeSrc *instruction) { + IrInstruction *fn = instruction->fn->child; + if (type_is_invalid(fn->value.type)) + return ira->codegen->invalid_instruction; + + if (fn->value.type->id != ZigTypeIdFn) { + ir_add_error(ira, fn, + buf_sprintf("expected function, found '%s'", buf_ptr(&fn->value.type->name))); + return ira->codegen->invalid_instruction; + } + + IrInstruction *frame_ptr = ir_resolve_result(ira, &instruction->base, no_result_loc(), + ira->codegen->builtin_types.entry_frame_header, nullptr, true, false); + if (frame_ptr != nullptr && (type_is_invalid(frame_ptr->value.type) || instr_is_unreachable(frame_ptr))) + return frame_ptr; + + IrInstruction *result = ir_build_frame_size_gen(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, fn, frame_ptr); + result->value.type = ira->codegen->builtin_types.entry_usize; + return result; +} + static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstructionAlignOf *instruction) { Error err; IrInstruction *type_value = instruction->type_value->child; @@ -22348,10 +22409,6 @@ static IrInstruction *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstruct return ira->codegen->invalid_instruction; } - if (fn_type_id.cc == CallingConventionAsync) { - zig_panic("TODO"); - } - return ir_const_type(ira, &instruction->base, get_fn_type(ira->codegen, &fn_type_id)); } @@ -24237,6 +24294,7 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction case IrInstructionIdSliceGen: case IrInstructionIdRefGen: case IrInstructionIdTestErrGen: + case IrInstructionIdFrameSizeGen: zig_unreachable(); case IrInstructionIdReturn: @@ -24387,6 +24445,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_frame_handle(ira, (IrInstructionFrameHandle *)instruction); case IrInstructionIdFrameType: return ir_analyze_instruction_frame_type(ira, (IrInstructionFrameType *)instruction); + case IrInstructionIdFrameSizeSrc: + return ir_analyze_instruction_frame_size(ira, (IrInstructionFrameSizeSrc *)instruction); case IrInstructionIdAlignOf: return ir_analyze_instruction_align_of(ira, (IrInstructionAlignOf *)instruction); case IrInstructionIdOverflowOp: @@ -24682,6 +24742,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdFrameAddress: case IrInstructionIdFrameHandle: case IrInstructionIdFrameType: + case IrInstructionIdFrameSizeSrc: + case IrInstructionIdFrameSizeGen: case IrInstructionIdTestErrSrc: case IrInstructionIdTestErrGen: case IrInstructionIdFnProto: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 5b3bba2271..7e903ed662 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -916,6 +916,20 @@ static void ir_print_frame_type(IrPrint *irp, IrInstructionFrameType *instructio fprintf(irp->f, ")"); } +static void ir_print_frame_size_src(IrPrint *irp, IrInstructionFrameSizeSrc *instruction) { + fprintf(irp->f, "@frameSize("); + ir_print_other_instruction(irp, instruction->fn); + fprintf(irp->f, ")"); +} + +static void ir_print_frame_size_gen(IrPrint *irp, IrInstructionFrameSizeGen *instruction) { + fprintf(irp->f, "@frameSize("); + ir_print_other_instruction(irp, instruction->fn); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->frame_ptr); + fprintf(irp->f, ")"); +} + static void ir_print_return_address(IrPrint *irp, IrInstructionReturnAddress *instruction) { fprintf(irp->f, "@returnAddress()"); } @@ -1776,6 +1790,12 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdFrameType: ir_print_frame_type(irp, (IrInstructionFrameType *)instruction); break; + case IrInstructionIdFrameSizeSrc: + ir_print_frame_size_src(irp, (IrInstructionFrameSizeSrc *)instruction); + break; + case IrInstructionIdFrameSizeGen: + ir_print_frame_size_gen(irp, (IrInstructionFrameSizeGen *)instruction); + break; case IrInstructionIdAlignOf: ir_print_align_of(irp, (IrInstructionAlignOf *)instruction); break; diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 33246f761f..7af04d37c9 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -101,6 +101,32 @@ test "calling an inferred async function" { S.doTheTest(); } +test "@frameSize" { + const S = struct { + fn doTheTest() void { + { + var ptr = @ptrCast(async fn(i32) void, other); + const size = @frameSize(ptr); + expect(size == @sizeOf(@Frame(other))); + } + { + var ptr = @ptrCast(async fn() void, first); + const size = @frameSize(ptr); + expect(size == @sizeOf(@Frame(first))); + } + } + + fn first() void { + other(1); + } + fn other(param: i32) void { + var local: i32 = undefined; + suspend; + } + }; + S.doTheTest(); +} + //test "coroutine suspend, resume" { // seq('a'); // const p = try async testAsyncSeq(); From 7b3686861f87d006da817db98f7d3b13fada9815 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 25 Jul 2019 22:24:01 -0400 Subject: [PATCH 019/125] `@frameSize` works via PrefixData --- BRANCH_TODO | 9 +++++++++ src/all_types.hpp | 1 - src/codegen.cpp | 21 +++++++++++++-------- src/ir.cpp | 12 ++---------- src/ir_print.cpp | 2 -- src/zig_llvm.cpp | 6 +++++- src/zig_llvm.h | 1 + 7 files changed, 30 insertions(+), 22 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 6ea57d2173..d10bc704d8 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,5 +1,14 @@ + * reimplement @frameSize with Prefix Data + * reimplement with function splitting rather than switch + * add the `anyframe` type and `anyframe->T` * await * await of a non async function + * await in single-threaded mode * async call on a non async function + * @asyncCall with an async function pointer + * cancel + * defer and errdefer * safety for resuming when it is awaiting * implicit cast of normal function to async function should be allowed when it is inferred to be async + * go over the commented out tests + * revive std.event.Loop diff --git a/src/all_types.hpp b/src/all_types.hpp index d30b3b8a80..6ee3a6b937 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -3063,7 +3063,6 @@ struct IrInstructionFrameSizeGen { IrInstruction base; IrInstruction *fn; - IrInstruction *frame_ptr; }; enum IrOverflowOp { diff --git a/src/codegen.cpp b/src/codegen.cpp index 6fc152ad3e..4343006b17 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4914,13 +4914,16 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, return nullptr; } -static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, IrInstructionFrameSizeGen *instruction) { +static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, + IrInstructionFrameSizeGen *instruction) +{ + LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type; + LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0); LLVMValueRef fn_val = ir_llvm_value(g, instruction->fn); - LLVMValueRef frame_ptr = ir_llvm_value(g, instruction->frame_ptr); - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_ptr, coro_resume_index_index, ""); - LLVMValueRef one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false); - LLVMBuildStore(g->builder, one, resume_index_ptr); - return ZigLLVMBuildCall(g->builder, fn_val, &frame_ptr, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); + LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, ""); + LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true); + LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, ""); + return LLVMBuildLoad(g->builder, prefix_ptr, ""); } static void set_debug_location(CodeGen *g, IrInstruction *instruction) { @@ -6409,13 +6412,16 @@ static void do_code_gen(CodeGen *g) { } if (is_async) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false); + ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val); + if (!g->strip_debug_symbols) { AstNode *source_node = fn_table_entry->proto_node; ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1, (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope)); } IrExecutable *executable = &fn_table_entry->analyzed_executable; - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope); @@ -6424,7 +6430,6 @@ static void do_code_gen(CodeGen *g) { LLVMPositionBuilderAtEnd(g->builder, get_size_block); assert(fn_table_entry->frame_type->abi_size != 0); assert(fn_table_entry->frame_type->abi_size != SIZE_MAX); - LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false); LLVMBuildRet(g->builder, size_val); LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block); diff --git a/src/ir.cpp b/src/ir.cpp index 1a62af8ce4..7a5af347b7 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -2396,15 +2396,12 @@ static IrInstruction *ir_build_frame_size_src(IrBuilder *irb, Scope *scope, AstN return &instruction->base; } -static IrInstruction *ir_build_frame_size_gen(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn, - IrInstruction *frame_ptr) +static IrInstruction *ir_build_frame_size_gen(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn) { IrInstructionFrameSizeGen *instruction = ir_build_instruction(irb, scope, source_node); instruction->fn = fn; - instruction->frame_ptr = frame_ptr; ir_ref_instruction(fn, irb->current_basic_block); - ir_ref_instruction(frame_ptr, irb->current_basic_block); return &instruction->base; } @@ -21808,13 +21805,8 @@ static IrInstruction *ir_analyze_instruction_frame_size(IrAnalyze *ira, IrInstru return ira->codegen->invalid_instruction; } - IrInstruction *frame_ptr = ir_resolve_result(ira, &instruction->base, no_result_loc(), - ira->codegen->builtin_types.entry_frame_header, nullptr, true, false); - if (frame_ptr != nullptr && (type_is_invalid(frame_ptr->value.type) || instr_is_unreachable(frame_ptr))) - return frame_ptr; - IrInstruction *result = ir_build_frame_size_gen(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, fn, frame_ptr); + instruction->base.source_node, fn); result->value.type = ira->codegen->builtin_types.entry_usize; return result; } diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 7e903ed662..ae467bdc8c 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -925,8 +925,6 @@ static void ir_print_frame_size_src(IrPrint *irp, IrInstructionFrameSizeSrc *ins static void ir_print_frame_size_gen(IrPrint *irp, IrInstructionFrameSizeGen *instruction) { fprintf(irp->f, "@frameSize("); ir_print_other_instruction(irp, instruction->fn); - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->frame_ptr); fprintf(irp->f, ")"); } diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index b52edabe65..906b278b21 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -899,9 +899,13 @@ LLVMValueRef ZigLLVMBuildAShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLV } void ZigLLVMSetTailCall(LLVMValueRef Call) { - unwrap(Call)->setTailCallKind(CallInst::TCK_MustTail); + unwrap(Call)->setTailCallKind(CallInst::TCK_MustTail); } +void ZigLLVMFunctionSetPrefixData(LLVMValueRef function, LLVMValueRef data) { + unwrap(function)->setPrefixData(unwrap(data)); +} + class MyOStream: public raw_ostream { public: diff --git a/src/zig_llvm.h b/src/zig_llvm.h index 2a2ab567a6..2be119ba0c 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -212,6 +212,7 @@ ZIG_EXTERN_C struct ZigLLVMDILocation *ZigLLVMGetDebugLoc(unsigned line, unsigne ZIG_EXTERN_C void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state); ZIG_EXTERN_C void ZigLLVMSetTailCall(LLVMValueRef Call); +ZIG_EXTERN_C void ZigLLVMFunctionSetPrefixData(LLVMValueRef fn, LLVMValueRef data); ZIG_EXTERN_C void ZigLLVMAddFunctionAttr(LLVMValueRef fn, const char *attr_name, const char *attr_value); ZIG_EXTERN_C void ZigLLVMAddFunctionAttrCold(LLVMValueRef fn); From 018a89c7a1b2763a50375f6d6d168dfa1f877f6a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 Jul 2019 17:07:19 -0400 Subject: [PATCH 020/125] async functions return void, no more GetSize resume block --- src/all_types.hpp | 4 ++-- src/analyze.cpp | 4 ++-- src/codegen.cpp | 18 ++++-------------- 3 files changed, 8 insertions(+), 18 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 6ee3a6b937..c9bdfabb0d 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -3670,8 +3670,8 @@ static const size_t coro_fn_ptr_index = 1; static const size_t coro_awaiter_index = 2; static const size_t coro_arg_start = 3; -// one for the GetSize block, one for the Entry block, resume blocks are indexed after that. -static const size_t coro_extra_resume_block_count = 2; +// one for the Entry block, resume blocks are indexed after that. +static const size_t coro_extra_resume_block_count = 1; // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. diff --git a/src/analyze.cpp b/src/analyze.cpp index c8e02a4771..e1fedab7cf 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -7254,7 +7254,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) { ZigList param_di_types = {}; ZigType *gen_return_type; if (is_async) { - gen_return_type = g->builtin_types.entry_usize; + gen_return_type = g->builtin_types.entry_void; param_di_types.append(get_llvm_di_type(g, gen_return_type)); } else if (!type_has_bits(fn_type_id->return_type)) { gen_return_type = g->builtin_types.entry_void; @@ -7354,7 +7354,7 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) { return; } - ZigType *gen_return_type = g->builtin_types.entry_usize; + ZigType *gen_return_type = g->builtin_types.entry_void; ZigList param_di_types = {}; // first "parameter" is return value param_di_types.append(get_llvm_di_type(g, gen_return_type)); diff --git a/src/codegen.cpp b/src/codegen.cpp index 4343006b17..63018cb6a3 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1995,7 +1995,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr); } - LLVMBuildRet(g->builder, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)); + LLVMBuildRetVoid(g->builder); return nullptr; } if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) { @@ -3438,7 +3438,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, ""); ZigLLVMSetTailCall(call_inst); - LLVMBuildRet(g->builder, call_inst); + LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, instruction->resume_block->llvm_block); return nullptr; @@ -4898,7 +4898,7 @@ static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable, IrInstructionSuspendBr *instruction) { - LLVMBuildRet(g->builder, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)); + LLVMBuildRetVoid(g->builder); return nullptr; } @@ -6426,27 +6426,17 @@ static void do_code_gen(CodeGen *g) { LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope); - LLVMBasicBlockRef get_size_block = LLVMAppendBasicBlock(g->cur_fn_val, "GetSize"); - LLVMPositionBuilderAtEnd(g->builder, get_size_block); - assert(fn_table_entry->frame_type->abi_size != 0); - assert(fn_table_entry->frame_type->abi_size != SIZE_MAX); - LLVMBuildRet(g->builder, size_val); - LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block); LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index_index, ""); LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); // +1 - index 0 is reserved for the entry block - // +1 - index 1 is reserved for getting the size. LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, - fn_table_entry->resume_blocks.length + 2); + fn_table_entry->resume_blocks.length + 1); LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMAddCase(switch_instr, zero, executable->basic_block_list.at(0)->llvm_block); - LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); - LLVMAddCase(switch_instr, one, get_size_block); - for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) { IrBasicBlock *resume_block = fn_table_entry->resume_blocks.at(resume_i); LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_block->resume_index, false); From ee64a22045ccbc39773779d4e386e25f563c8a90 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 Jul 2019 19:52:35 -0400 Subject: [PATCH 021/125] add the `anyframe` and `anyframe->T` types --- BRANCH_TODO | 6 +- src/all_types.hpp | 21 ++++++ src/analyze.cpp | 111 ++++++++++++++++++++++++++++- src/analyze.hpp | 1 + src/ast_render.cpp | 10 +++ src/codegen.cpp | 16 ++++- src/ir.cpp | 86 +++++++++++++++++++++- src/ir_print.cpp | 12 ++++ src/parser.cpp | 22 +++++- src/tokenizer.cpp | 2 + src/tokenizer.hpp | 1 + std/hash_map.zig | 1 + std/testing.zig | 1 + std/zig/ast.zig | 16 ++--- std/zig/parse.zig | 40 +++++------ std/zig/parser_test.zig | 4 +- std/zig/render.zig | 10 +-- std/zig/tokenizer.zig | 4 +- test/stage1/behavior/type_info.zig | 23 +++++- 19 files changed, 337 insertions(+), 50 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index d10bc704d8..e2c4fec436 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,6 +1,4 @@ - * reimplement @frameSize with Prefix Data - * reimplement with function splitting rather than switch - * add the `anyframe` type and `anyframe->T` + * make the anyframe type and anyframe->T type work with resume * await * await of a non async function * await in single-threaded mode @@ -12,3 +10,5 @@ * implicit cast of normal function to async function should be allowed when it is inferred to be async * go over the commented out tests * revive std.event.Loop + * reimplement with function splitting rather than switch + * @typeInfo for @Frame(func) diff --git a/src/all_types.hpp b/src/all_types.hpp index c9bdfabb0d..1096feade0 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -479,6 +479,7 @@ enum NodeType { NodeTypeResume, NodeTypeAwaitExpr, NodeTypeSuspend, + NodeTypeAnyFrameType, NodeTypeEnumLiteral, }; @@ -936,6 +937,10 @@ struct AstNodeSuspend { AstNode *block; }; +struct AstNodeAnyFrameType { + AstNode *payload_type; // can be NULL +}; + struct AstNodeEnumLiteral { Token *period; Token *identifier; @@ -1001,6 +1006,7 @@ struct AstNode { AstNodeResumeExpr resume_expr; AstNodeAwaitExpr await_expr; AstNodeSuspend suspend; + AstNodeAnyFrameType anyframe_type; AstNodeEnumLiteral enum_literal; } data; }; @@ -1253,6 +1259,7 @@ enum ZigTypeId { ZigTypeIdArgTuple, ZigTypeIdOpaque, ZigTypeIdCoroFrame, + ZigTypeIdAnyFrame, ZigTypeIdVector, ZigTypeIdEnumLiteral, }; @@ -1272,6 +1279,10 @@ struct ZigTypeCoroFrame { ZigType *locals_struct; }; +struct ZigTypeAnyFrame { + ZigType *result_type; // null if `anyframe` instead of `anyframe->T` +}; + struct ZigType { ZigTypeId id; Buf name; @@ -1298,11 +1309,13 @@ struct ZigType { ZigTypeVector vector; ZigTypeOpaque opaque; ZigTypeCoroFrame frame; + ZigTypeAnyFrame any_frame; } data; // use these fields to make sure we don't duplicate type table entries for the same type ZigType *pointer_parent[2]; // [0 - mut, 1 - const] ZigType *optional_parent; + ZigType *any_frame_parent; // If we generate a constant name value for this type, we memoize it here. // The type of this is array ConstExprValue *cached_const_name_val; @@ -1781,6 +1794,7 @@ struct CodeGen { ZigType *entry_arg_tuple; ZigType *entry_enum_literal; ZigType *entry_frame_header; + ZigType *entry_any_frame; } builtin_types; ZigType *align_amt_type; ZigType *stack_trace_type; @@ -2208,6 +2222,7 @@ enum IrInstructionId { IrInstructionIdSetRuntimeSafety, IrInstructionIdSetFloatMode, IrInstructionIdArrayType, + IrInstructionIdAnyFrameType, IrInstructionIdSliceType, IrInstructionIdGlobalAsm, IrInstructionIdAsm, @@ -2709,6 +2724,12 @@ struct IrInstructionPtrType { bool is_allow_zero; }; +struct IrInstructionAnyFrameType { + IrInstruction base; + + IrInstruction *payload_type; +}; + struct IrInstructionSliceType { IrInstruction base; diff --git a/src/analyze.cpp b/src/analyze.cpp index e1fedab7cf..e47be8f14c 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -256,6 +256,7 @@ AstNode *type_decl_node(ZigType *type_entry) { case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdVector: + case ZigTypeIdAnyFrame: return nullptr; } zig_unreachable(); @@ -322,6 +323,7 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) { case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdVector: + case ZigTypeIdAnyFrame: return true; } zig_unreachable(); @@ -354,6 +356,31 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x) { return get_int_type(g, false, bits_needed_for_unsigned(x)); } +ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type) { + if (result_type != nullptr && result_type->any_frame_parent != nullptr) { + return result_type->any_frame_parent; + } else if (result_type == nullptr && g->builtin_types.entry_any_frame != nullptr) { + return g->builtin_types.entry_any_frame; + } + + ZigType *entry = new_type_table_entry(ZigTypeIdAnyFrame); + entry->abi_size = g->builtin_types.entry_usize->abi_size; + entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits; + entry->abi_align = g->builtin_types.entry_usize->abi_align; + entry->data.any_frame.result_type = result_type; + buf_init_from_str(&entry->name, "anyframe"); + if (result_type != nullptr) { + buf_appendf(&entry->name, "->%s", buf_ptr(&result_type->name)); + } + + if (result_type != nullptr) { + result_type->any_frame_parent = entry; + } else if (result_type == nullptr) { + g->builtin_types.entry_any_frame = entry; + } + return entry; +} + static const char *ptr_len_to_star_str(PtrLen ptr_len) { switch (ptr_len) { case PtrLenSingle: @@ -1080,6 +1107,7 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: add_node_error(g, source_node, buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation", buf_ptr(&type_entry->name))); @@ -1169,6 +1197,7 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) { case ZigTypeIdArgTuple: case ZigTypeIdVoid: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: return false; case ZigTypeIdOpaque: case ZigTypeIdUnreachable: @@ -1340,6 +1369,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc case ZigTypeIdFn: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: switch (type_requires_comptime(g, type_entry)) { case ReqCompTimeNo: break; @@ -1436,6 +1466,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc case ZigTypeIdFn: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: switch (type_requires_comptime(g, fn_type_id.return_type)) { case ReqCompTimeInvalid: return g->builtin_types.entry_invalid; @@ -2997,6 +3028,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeAwaitExpr: case NodeTypeSuspend: case NodeTypeEnumLiteral: + case NodeTypeAnyFrameType: zig_unreachable(); } } @@ -3049,6 +3081,7 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry case ZigTypeIdBoundFn: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: return type_entry; } zig_unreachable(); @@ -3550,6 +3583,7 @@ bool is_container(ZigType *type_entry) { case ZigTypeIdOpaque: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: return false; } zig_unreachable(); @@ -3607,6 +3641,7 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) { case ZigTypeIdOpaque: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: zig_unreachable(); } zig_unreachable(); @@ -3615,11 +3650,13 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) { ZigType *get_src_ptr_type(ZigType *type) { if (type->id == ZigTypeIdPointer) return type; if (type->id == ZigTypeIdFn) return type; + if (type->id == ZigTypeIdAnyFrame) return type; if (type->id == ZigTypeIdOptional) { if (type->data.maybe.child_type->id == ZigTypeIdPointer) { return type->data.maybe.child_type->data.pointer.allow_zero ? nullptr : type->data.maybe.child_type; } if (type->data.maybe.child_type->id == ZigTypeIdFn) return type->data.maybe.child_type; + if (type->data.maybe.child_type->id == ZigTypeIdAnyFrame) return type->data.maybe.child_type; } return nullptr; } @@ -3635,6 +3672,13 @@ bool type_is_nonnull_ptr(ZigType *type) { return get_codegen_ptr_type(type) == type && !ptr_allows_addr_zero(type); } +static uint32_t get_coro_frame_align_bytes(CodeGen *g) { + uint32_t a = g->pointer_size_bytes * 2; + // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw + if (a < 8) a = 8; + return a; +} + uint32_t get_ptr_align(CodeGen *g, ZigType *type) { ZigType *ptr_type = get_src_ptr_type(type); if (ptr_type->id == ZigTypeIdPointer) { @@ -3646,6 +3690,8 @@ uint32_t get_ptr_align(CodeGen *g, ZigType *type) { // when getting the alignment of `?extern fn() void`. // See http://lists.llvm.org/pipermail/llvm-dev/2018-September/126142.html return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment; + } else if (ptr_type->id == ZigTypeIdAnyFrame) { + return get_coro_frame_align_bytes(g); } else { zig_unreachable(); } @@ -3657,6 +3703,8 @@ bool get_ptr_const(ZigType *type) { return ptr_type->data.pointer.is_const; } else if (ptr_type->id == ZigTypeIdFn) { return true; + } else if (ptr_type->id == ZigTypeIdAnyFrame) { + return true; } else { zig_unreachable(); } @@ -4153,6 +4201,7 @@ bool handle_is_ptr(ZigType *type_entry) { case ZigTypeIdFn: case ZigTypeIdEnum: case ZigTypeIdVector: + case ZigTypeIdAnyFrame: return false; case ZigTypeIdArray: case ZigTypeIdStruct: @@ -4404,6 +4453,9 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { case ZigTypeIdCoroFrame: // TODO better hashing algorithm return 675741936; + case ZigTypeIdAnyFrame: + // TODO better hashing algorithm + return 3747294894; case ZigTypeIdBoundFn: case ZigTypeIdInvalid: case ZigTypeIdUnreachable: @@ -4469,6 +4521,7 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { case ZigTypeIdErrorSet: case ZigTypeIdEnum: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: return false; case ZigTypeIdPointer: @@ -4541,6 +4594,7 @@ static bool return_type_is_cacheable(ZigType *return_type) { case ZigTypeIdPointer: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: return true; case ZigTypeIdArray: @@ -4673,6 +4727,7 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) { case ZigTypeIdFloat: case ZigTypeIdErrorUnion: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: return OnePossibleValueNo; case ZigTypeIdUndefined: case ZigTypeIdNull: @@ -4761,6 +4816,7 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) { case ZigTypeIdVoid: case ZigTypeIdUnreachable: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: return ReqCompTimeNo; } zig_unreachable(); @@ -5433,6 +5489,8 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) { return true; case ZigTypeIdCoroFrame: zig_panic("TODO"); + case ZigTypeIdAnyFrame: + zig_panic("TODO"); case ZigTypeIdUndefined: zig_panic("TODO"); case ZigTypeIdNull: @@ -5786,7 +5844,11 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { return; } case ZigTypeIdCoroFrame: - buf_appendf(buf, "(TODO: coroutine frame value)"); + buf_appendf(buf, "(TODO: async function frame value)"); + return; + + case ZigTypeIdAnyFrame: + buf_appendf(buf, "(TODO: anyframe value)"); return; } @@ -5836,6 +5898,7 @@ uint32_t type_id_hash(TypeId x) { case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: zig_unreachable(); case ZigTypeIdErrorUnion: return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type); @@ -5885,6 +5948,7 @@ bool type_id_eql(TypeId a, TypeId b) { case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: zig_unreachable(); case ZigTypeIdErrorUnion: return a.data.error_union.err_set_type == b.data.error_union.err_set_type && @@ -6051,6 +6115,7 @@ static const ZigTypeId all_type_ids[] = { ZigTypeIdArgTuple, ZigTypeIdOpaque, ZigTypeIdCoroFrame, + ZigTypeIdAnyFrame, ZigTypeIdVector, ZigTypeIdEnumLiteral, }; @@ -6116,10 +6181,12 @@ size_t type_id_index(ZigType *entry) { return 21; case ZigTypeIdCoroFrame: return 22; - case ZigTypeIdVector: + case ZigTypeIdAnyFrame: return 23; - case ZigTypeIdEnumLiteral: + case ZigTypeIdVector: return 24; + case ZigTypeIdEnumLiteral: + return 25; } zig_unreachable(); } @@ -6178,6 +6245,8 @@ const char *type_id_name(ZigTypeId id) { return "Vector"; case ZigTypeIdCoroFrame: return "Frame"; + case ZigTypeIdAnyFrame: + return "AnyFrame"; } zig_unreachable(); } @@ -7398,6 +7467,40 @@ static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, Resol frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type; } +static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, ResolveStatus wanted_resolve_status) { + if (any_frame_type->llvm_di_type != nullptr) return; + + ZigType *result_type = any_frame_type->data.any_frame.result_type; + Buf *name = buf_sprintf("(%s header)", buf_ptr(&any_frame_type->name)); + + ZigType *frame_header_type; + if (result_type == nullptr || !type_has_bits(result_type)) { + const char *field_names[] = {"resume_index", "fn_ptr", "awaiter"}; + ZigType *field_types[] = { + g->builtin_types.entry_usize, + g->builtin_types.entry_usize, + g->builtin_types.entry_usize, + }; + frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 3); + } else { + ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false); + + const char *field_names[] = {"resume_index", "fn_ptr", "awaiter", "result_ptr", "result"}; + ZigType *field_types[] = { + g->builtin_types.entry_usize, + g->builtin_types.entry_usize, + g->builtin_types.entry_usize, + ptr_result_type, + result_type, + }; + frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 5); + } + + ZigType *ptr_type = get_pointer_to_type(g, frame_header_type, false); + any_frame_type->llvm_type = get_llvm_type(g, ptr_type); + any_frame_type->llvm_di_type = get_llvm_di_type(g, ptr_type); +} + static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) { assert(type->id == ZigTypeIdOpaque || type_is_resolved(type, ResolveStatusSizeKnown)); assert(wanted_resolve_status > ResolveStatusSizeKnown); @@ -7460,6 +7563,8 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r } case ZigTypeIdCoroFrame: return resolve_llvm_types_coro_frame(g, type, wanted_resolve_status); + case ZigTypeIdAnyFrame: + return resolve_llvm_types_any_frame(g, type, wanted_resolve_status); } zig_unreachable(); } diff --git a/src/analyze.hpp b/src/analyze.hpp index 47ff4344ba..3115c79b40 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -41,6 +41,7 @@ ZigType *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const c ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], ZigType *field_types[], size_t field_count); ZigType *get_test_fn_type(CodeGen *g); +ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type); bool handle_is_ptr(ZigType *type_entry); bool type_has_bits(ZigType *type_entry); diff --git a/src/ast_render.cpp b/src/ast_render.cpp index d97f58fdec..4d6bae311b 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -259,6 +259,8 @@ static const char *node_type_str(NodeType node_type) { return "Suspend"; case NodeTypePointerType: return "PointerType"; + case NodeTypeAnyFrameType: + return "AnyFrameType"; case NodeTypeEnumLiteral: return "EnumLiteral"; } @@ -847,6 +849,14 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { render_node_ungrouped(ar, node->data.inferred_array_type.child_type); break; } + case NodeTypeAnyFrameType: { + fprintf(ar->f, "anyframe"); + if (node->data.anyframe_type.payload_type != nullptr) { + fprintf(ar->f, "->"); + render_node_grouped(ar, node->data.anyframe_type.payload_type); + } + break; + } case NodeTypeErrorType: fprintf(ar->f, "anyerror"); break; diff --git a/src/codegen.cpp b/src/codegen.cpp index 63018cb6a3..c666317c17 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4947,6 +4947,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdSetRuntimeSafety: case IrInstructionIdSetFloatMode: case IrInstructionIdArrayType: + case IrInstructionIdAnyFrameType: case IrInstructionIdSliceType: case IrInstructionIdSizeOf: case IrInstructionIdSwitchTarget: @@ -5438,7 +5439,9 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con return val; } case ZigTypeIdCoroFrame: - zig_panic("TODO bit pack a coroutine frame"); + zig_panic("TODO bit pack an async function frame"); + case ZigTypeIdAnyFrame: + zig_panic("TODO bit pack an anyframe"); } zig_unreachable(); } @@ -5961,6 +5964,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c zig_unreachable(); case ZigTypeIdCoroFrame: zig_panic("TODO"); + case ZigTypeIdAnyFrame: + zig_panic("TODO"); } zig_unreachable(); } @@ -7176,6 +7181,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { " ArgTuple: void,\n" " Opaque: void,\n" " Frame: void,\n" + " AnyFrame: AnyFrame,\n" " Vector: Vector,\n" " EnumLiteral: void,\n" "\n\n" @@ -7291,6 +7297,10 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { " args: []FnArg,\n" " };\n" "\n" + " pub const AnyFrame = struct {\n" + " child: ?type,\n" + " };\n" + "\n" " pub const Vector = struct {\n" " len: comptime_int,\n" " child: type,\n" @@ -8448,6 +8458,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e case ZigTypeIdErrorUnion: case ZigTypeIdErrorSet: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: zig_unreachable(); case ZigTypeIdVoid: case ZigTypeIdUnreachable: @@ -8632,6 +8643,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu case ZigTypeIdNull: case ZigTypeIdArgTuple: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: zig_unreachable(); } } @@ -8800,7 +8812,9 @@ static void gen_h_file(CodeGen *g) { case ZigTypeIdFn: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: zig_unreachable(); + case ZigTypeIdEnum: if (type_entry->data.enumeration.layout == ContainerLayoutExtern) { fprintf(out_h, "enum %s {\n", buf_ptr(type_h_name(type_entry))); diff --git a/src/ir.cpp b/src/ir.cpp index 7a5af347b7..e6d987a2ee 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -303,6 +303,7 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) { case ZigTypeIdBoundFn: case ZigTypeIdErrorSet: case ZigTypeIdOpaque: + case ZigTypeIdAnyFrame: return true; case ZigTypeIdFloat: return a->data.floating.bit_count == b->data.floating.bit_count; @@ -563,6 +564,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionArrayType *) { return IrInstructionIdArrayType; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionAnyFrameType *) { + return IrInstructionIdAnyFrameType; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionSliceType *) { return IrInstructionIdSliceType; } @@ -1696,6 +1701,16 @@ static IrInstruction *ir_build_array_type(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *payload_type) +{ + IrInstructionAnyFrameType *instruction = ir_build_instruction(irb, scope, source_node); + instruction->payload_type = payload_type; + + if (payload_type != nullptr) ir_ref_instruction(payload_type, irb->current_basic_block); + + return &instruction->base; +} static IrInstruction *ir_build_slice_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, bool is_allow_zero) { @@ -6515,6 +6530,22 @@ static IrInstruction *ir_gen_array_type(IrBuilder *irb, Scope *scope, AstNode *n } } +static IrInstruction *ir_gen_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *node) { + assert(node->type == NodeTypeAnyFrameType); + + AstNode *payload_type_node = node->data.anyframe_type.payload_type; + IrInstruction *payload_type_value = nullptr; + + if (payload_type_node != nullptr) { + payload_type_value = ir_gen_node(irb, payload_type_node, scope); + if (payload_type_value == irb->codegen->invalid_instruction) + return payload_type_value; + + } + + return ir_build_anyframe_type(irb, scope, node, payload_type_value); +} + static IrInstruction *ir_gen_undefined_literal(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeUndefinedLiteral); return ir_build_const_undefined(irb, scope, node); @@ -7884,6 +7915,8 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc); case NodeTypePointerType: return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc); + case NodeTypeAnyFrameType: + return ir_lval_wrap(irb, scope, ir_gen_anyframe_type(irb, scope, node), lval, result_loc); case NodeTypeStringLiteral: return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc); case NodeTypeUndefinedLiteral: @@ -12775,6 +12808,7 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp * case ZigTypeIdArgTuple: case ZigTypeIdEnum: case ZigTypeIdEnumLiteral: + case ZigTypeIdAnyFrame: operator_allowed = is_equality_cmp; break; @@ -14155,6 +14189,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: ir_add_error(ira, target, buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name))); break; @@ -14180,6 +14215,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio case ZigTypeIdOpaque: case ZigTypeIdEnumLiteral: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: ir_add_error(ira, target, buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name))); break; @@ -15720,7 +15756,9 @@ static IrInstruction *ir_analyze_optional_type(IrAnalyze *ira, IrInstructionUnOp case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry)); + case ZigTypeIdUnreachable: case ZigTypeIdOpaque: ir_add_error_node(ira, un_op_instruction->base.source_node, @@ -17443,6 +17481,20 @@ static IrInstruction *ir_analyze_instruction_set_float_mode(IrAnalyze *ira, return ir_const_void(ira, &instruction->base); } +static IrInstruction *ir_analyze_instruction_any_frame_type(IrAnalyze *ira, + IrInstructionAnyFrameType *instruction) +{ + ZigType *payload_type = nullptr; + if (instruction->payload_type != nullptr) { + payload_type = ir_resolve_type(ira, instruction->payload_type->child); + if (type_is_invalid(payload_type)) + return ira->codegen->invalid_instruction; + } + + ZigType *any_frame_type = get_any_frame_type(ira->codegen, payload_type); + return ir_const_type(ira, &instruction->base, any_frame_type); +} + static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira, IrInstructionSliceType *slice_type_instruction) { @@ -17492,6 +17544,7 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira, case ZigTypeIdBoundFn: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: { ResolveStatus needed_status = (align_bytes == 0) ? ResolveStatusZeroBitsKnown : ResolveStatusAlignmentKnown; @@ -17607,6 +17660,7 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira, case ZigTypeIdBoundFn: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: { if ((err = ensure_complete_type(ira->codegen, child_type))) return ira->codegen->invalid_instruction; @@ -17658,6 +17712,7 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira, case ZigTypeIdFn: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: { uint64_t size_in_bytes = type_size(ira->codegen, type_entry); return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes); @@ -18222,6 +18277,7 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira, case ZigTypeIdOpaque: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: ir_add_error(ira, &switch_target_instruction->base, buf_sprintf("invalid switch target type '%s'", buf_ptr(&target_type->name))); return ira->codegen->invalid_instruction; @@ -19656,6 +19712,22 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr break; } + case ZigTypeIdAnyFrame: { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "AnyFrame", nullptr); + + ConstExprValue *fields = create_const_vals(1); + result->data.x_struct.fields = fields; + + // child: ?type + ensure_field_index(result->type, "child", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); + fields[0].data.x_optional = (type_entry->data.any_frame.result_type == nullptr) ? nullptr : + create_const_type(ira->codegen, type_entry->data.any_frame.result_type); + break; + } case ZigTypeIdEnum: { result = create_const_vals(1); @@ -20062,7 +20134,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr break; } case ZigTypeIdCoroFrame: - zig_panic("TODO @typeInfo for coro frames"); + zig_panic("TODO @typeInfo for async function frames"); } assert(result != nullptr); @@ -21852,6 +21924,7 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct case ZigTypeIdFn: case ZigTypeIdVector: case ZigTypeIdCoroFrame: + case ZigTypeIdAnyFrame: { uint64_t align_in_bytes = get_abi_alignment(ira->codegen, type_entry); return ir_const_unsigned(ira, &instruction->base, align_in_bytes); @@ -23004,7 +23077,9 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case ZigTypeIdUnion: zig_panic("TODO buf_write_value_bytes union type"); case ZigTypeIdCoroFrame: - zig_panic("TODO buf_write_value_bytes coro frame type"); + zig_panic("TODO buf_write_value_bytes async fn frame type"); + case ZigTypeIdAnyFrame: + zig_panic("TODO buf_write_value_bytes anyframe type"); } zig_unreachable(); } @@ -23185,7 +23260,9 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou case ZigTypeIdUnion: zig_panic("TODO buf_read_value_bytes union type"); case ZigTypeIdCoroFrame: - zig_panic("TODO buf_read_value_bytes coro frame type"); + zig_panic("TODO buf_read_value_bytes async fn frame type"); + case ZigTypeIdAnyFrame: + zig_panic("TODO buf_read_value_bytes anyframe type"); } zig_unreachable(); } @@ -24327,6 +24404,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_set_runtime_safety(ira, (IrInstructionSetRuntimeSafety *)instruction); case IrInstructionIdSetFloatMode: return ir_analyze_instruction_set_float_mode(ira, (IrInstructionSetFloatMode *)instruction); + case IrInstructionIdAnyFrameType: + return ir_analyze_instruction_any_frame_type(ira, (IrInstructionAnyFrameType *)instruction); case IrInstructionIdSliceType: return ir_analyze_instruction_slice_type(ira, (IrInstructionSliceType *)instruction); case IrInstructionIdGlobalAsm: @@ -24707,6 +24786,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdStructFieldPtr: case IrInstructionIdArrayType: case IrInstructionIdSliceType: + case IrInstructionIdAnyFrameType: case IrInstructionIdSizeOf: case IrInstructionIdTestNonNull: case IrInstructionIdOptionalUnwrapPtr: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index ae467bdc8c..284ebed2f3 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -471,6 +471,15 @@ static void ir_print_slice_type(IrPrint *irp, IrInstructionSliceType *instructio ir_print_other_instruction(irp, instruction->child_type); } +static void ir_print_any_frame_type(IrPrint *irp, IrInstructionAnyFrameType *instruction) { + if (instruction->payload_type == nullptr) { + fprintf(irp->f, "anyframe"); + } else { + fprintf(irp->f, "anyframe->"); + ir_print_other_instruction(irp, instruction->payload_type); + } +} + static void ir_print_global_asm(IrPrint *irp, IrInstructionGlobalAsm *instruction) { fprintf(irp->f, "asm(\"%s\")", buf_ptr(instruction->asm_code)); } @@ -1629,6 +1638,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdSliceType: ir_print_slice_type(irp, (IrInstructionSliceType *)instruction); break; + case IrInstructionIdAnyFrameType: + ir_print_any_frame_type(irp, (IrInstructionAnyFrameType *)instruction); + break; case IrInstructionIdGlobalAsm: ir_print_global_asm(irp, (IrInstructionGlobalAsm *)instruction); break; diff --git a/src/parser.cpp b/src/parser.cpp index b1a593d9c9..82312aacf3 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -282,6 +282,9 @@ static AstNode *ast_parse_prefix_op_expr( case NodeTypeAwaitExpr: right = &prefix->data.await_expr.expr; break; + case NodeTypeAnyFrameType: + right = &prefix->data.anyframe_type.payload_type; + break; case NodeTypeArrayType: right = &prefix->data.array_type.child_type; break; @@ -1640,6 +1643,10 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) { if (null != nullptr) return ast_create_node(pc, NodeTypeNullLiteral, null); + Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame); + if (anyframe != nullptr) + return ast_create_node(pc, NodeTypeAnyFrameType, anyframe); + Token *true_token = eat_token_if(pc, TokenIdKeywordTrue); if (true_token != nullptr) { AstNode *res = ast_create_node(pc, NodeTypeBoolLiteral, true_token); @@ -2510,7 +2517,7 @@ static AstNode *ast_parse_prefix_op(ParseContext *pc) { // PrefixTypeOp // <- QUESTIONMARK -// / KEYWORD_promise MINUSRARROW +// / KEYWORD_anyframe MINUSRARROW // / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile)* // / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile)* static AstNode *ast_parse_prefix_type_op(ParseContext *pc) { @@ -2521,6 +2528,16 @@ static AstNode *ast_parse_prefix_type_op(ParseContext *pc) { return res; } + Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame); + if (anyframe != nullptr) { + if (eat_token_if(pc, TokenIdArrow) != nullptr) { + AstNode *res = ast_create_node(pc, NodeTypeAnyFrameType, anyframe); + return res; + } + + put_back_token(pc); + } + AstNode *array = ast_parse_array_type_start(pc); if (array != nullptr) { assert(array->type == NodeTypeArrayType); @@ -3005,6 +3022,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeInferredArrayType: visit_field(&node->data.array_type.child_type, visit, context); break; + case NodeTypeAnyFrameType: + visit_field(&node->data.anyframe_type.payload_type, visit, context); + break; case NodeTypeErrorType: // none break; diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index 0869c3ba9c..38c6c7153e 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -109,6 +109,7 @@ static const struct ZigKeyword zig_keywords[] = { {"align", TokenIdKeywordAlign}, {"allowzero", TokenIdKeywordAllowZero}, {"and", TokenIdKeywordAnd}, + {"anyframe", TokenIdKeywordAnyFrame}, {"asm", TokenIdKeywordAsm}, {"async", TokenIdKeywordAsync}, {"await", TokenIdKeywordAwait}, @@ -1533,6 +1534,7 @@ const char * token_name(TokenId id) { case TokenIdKeywordCancel: return "cancel"; case TokenIdKeywordAlign: return "align"; case TokenIdKeywordAnd: return "and"; + case TokenIdKeywordAnyFrame: return "anyframe"; case TokenIdKeywordAsm: return "asm"; case TokenIdKeywordBreak: return "break"; case TokenIdKeywordCatch: return "catch"; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index 253e0bd1e5..98bdfea907 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -53,6 +53,7 @@ enum TokenId { TokenIdKeywordAlign, TokenIdKeywordAllowZero, TokenIdKeywordAnd, + TokenIdKeywordAnyFrame, TokenIdKeywordAsm, TokenIdKeywordAsync, TokenIdKeywordAwait, diff --git a/std/hash_map.zig b/std/hash_map.zig index bdd6cc7519..431fbb35ab 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -540,6 +540,7 @@ pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type .Undefined, .ArgTuple, .Frame, + .AnyFrame, => @compileError("cannot hash this type"), .Void, diff --git a/std/testing.zig b/std/testing.zig index 3c4772cf37..7f347b0c24 100644 --- a/std/testing.zig +++ b/std/testing.zig @@ -30,6 +30,7 @@ pub fn expectEqual(expected: var, actual: @typeOf(expected)) void { .ArgTuple, .Opaque, .Frame, + .AnyFrame, => @compileError("value of type " ++ @typeName(@typeOf(actual)) ++ " encountered"), .Undefined, diff --git a/std/zig/ast.zig b/std/zig/ast.zig index 38bd94339f..475a0e4e13 100644 --- a/std/zig/ast.zig +++ b/std/zig/ast.zig @@ -400,7 +400,7 @@ pub const Node = struct { VarType, ErrorType, FnProto, - PromiseType, + AnyFrameType, // Primary expressions IntegerLiteral, @@ -952,9 +952,9 @@ pub const Node = struct { } }; - pub const PromiseType = struct { + pub const AnyFrameType = struct { base: Node, - promise_token: TokenIndex, + anyframe_token: TokenIndex, result: ?Result, pub const Result = struct { @@ -962,7 +962,7 @@ pub const Node = struct { return_type: *Node, }; - pub fn iterate(self: *PromiseType, index: usize) ?*Node { + pub fn iterate(self: *AnyFrameType, index: usize) ?*Node { var i = index; if (self.result) |result| { @@ -973,13 +973,13 @@ pub const Node = struct { return null; } - pub fn firstToken(self: *const PromiseType) TokenIndex { - return self.promise_token; + pub fn firstToken(self: *const AnyFrameType) TokenIndex { + return self.anyframe_token; } - pub fn lastToken(self: *const PromiseType) TokenIndex { + pub fn lastToken(self: *const AnyFrameType) TokenIndex { if (self.result) |result| return result.return_type.lastToken(); - return self.promise_token; + return self.anyframe_token; } }; diff --git a/std/zig/parse.zig b/std/zig/parse.zig index 59acf99890..600178cdce 100644 --- a/std/zig/parse.zig +++ b/std/zig/parse.zig @@ -1201,7 +1201,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { /// / KEYWORD_error DOT IDENTIFIER /// / KEYWORD_false /// / KEYWORD_null -/// / KEYWORD_promise +/// / KEYWORD_anyframe /// / KEYWORD_true /// / KEYWORD_undefined /// / KEYWORD_unreachable @@ -1256,11 +1256,11 @@ fn parsePrimaryTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*N } if (eatToken(it, .Keyword_false)) |token| return createLiteral(arena, Node.BoolLiteral, token); if (eatToken(it, .Keyword_null)) |token| return createLiteral(arena, Node.NullLiteral, token); - if (eatToken(it, .Keyword_promise)) |token| { - const node = try arena.create(Node.PromiseType); - node.* = Node.PromiseType{ - .base = Node{ .id = .PromiseType }, - .promise_token = token, + if (eatToken(it, .Keyword_anyframe)) |token| { + const node = try arena.create(Node.AnyFrameType); + node.* = Node.AnyFrameType{ + .base = Node{ .id = .AnyFrameType }, + .anyframe_token = token, .result = null, }; return &node.base; @@ -2194,7 +2194,7 @@ fn parsePrefixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { /// PrefixTypeOp /// <- QUESTIONMARK -/// / KEYWORD_promise MINUSRARROW +/// / KEYWORD_anyframe MINUSRARROW /// / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)* /// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)* fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { @@ -2209,20 +2209,20 @@ fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node return &node.base; } - // TODO: Returning a PromiseType instead of PrefixOp makes casting and setting .rhs or + // TODO: Returning a AnyFrameType instead of PrefixOp makes casting and setting .rhs or // .return_type more difficult for the caller (see parsePrefixOpExpr helper). - // Consider making the PromiseType a member of PrefixOp and add a - // PrefixOp.PromiseType variant? - if (eatToken(it, .Keyword_promise)) |token| { + // Consider making the AnyFrameType a member of PrefixOp and add a + // PrefixOp.AnyFrameType variant? + if (eatToken(it, .Keyword_anyframe)) |token| { const arrow = eatToken(it, .Arrow) orelse { putBackToken(it, token); return null; }; - const node = try arena.create(Node.PromiseType); - node.* = Node.PromiseType{ - .base = Node{ .id = .PromiseType }, - .promise_token = token, - .result = Node.PromiseType.Result{ + const node = try arena.create(Node.AnyFrameType); + node.* = Node.AnyFrameType{ + .base = Node{ .id = .AnyFrameType }, + .anyframe_token = token, + .result = Node.AnyFrameType.Result{ .arrow_token = arrow, .return_type = undefined, // set by caller }, @@ -2903,8 +2903,8 @@ fn parsePrefixOpExpr( rightmost_op = rhs; } else break; }, - .PromiseType => { - const prom = rightmost_op.cast(Node.PromiseType).?; + .AnyFrameType => { + const prom = rightmost_op.cast(Node.AnyFrameType).?; if (try opParseFn(arena, it, tree)) |rhs| { prom.result.?.return_type = rhs; rightmost_op = rhs; @@ -2922,8 +2922,8 @@ fn parsePrefixOpExpr( .InvalidToken = AstError.InvalidToken{ .token = it.index }, }); }, - .PromiseType => { - const prom = rightmost_op.cast(Node.PromiseType).?; + .AnyFrameType => { + const prom = rightmost_op.cast(Node.AnyFrameType).?; prom.result.?.return_type = try expectNode(arena, it, tree, childParseFn, AstError{ .InvalidToken = AstError.InvalidToken{ .token = it.index }, }); diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index f6f3363bf6..28cde6de01 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -2111,12 +2111,12 @@ test "zig fmt: coroutines" { \\ suspend; \\ x += 1; \\ suspend; - \\ const p: promise->void = async simpleAsyncFn() catch unreachable; + \\ const p: anyframe->void = async simpleAsyncFn() catch unreachable; \\ await p; \\} \\ \\test "coroutine suspend, resume, cancel" { - \\ const p: promise = try async testAsyncSeq(); + \\ const p: anyframe = try async testAsyncSeq(); \\ resume p; \\ cancel p; \\} diff --git a/std/zig/render.zig b/std/zig/render.zig index b85c11c6ac..c6bb51267d 100644 --- a/std/zig/render.zig +++ b/std/zig/render.zig @@ -1205,15 +1205,15 @@ fn renderExpression( } }, - ast.Node.Id.PromiseType => { - const promise_type = @fieldParentPtr(ast.Node.PromiseType, "base", base); + ast.Node.Id.AnyFrameType => { + const anyframe_type = @fieldParentPtr(ast.Node.AnyFrameType, "base", base); - if (promise_type.result) |result| { - try renderToken(tree, stream, promise_type.promise_token, indent, start_col, Space.None); // promise + if (anyframe_type.result) |result| { + try renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, Space.None); // anyframe try renderToken(tree, stream, result.arrow_token, indent, start_col, Space.None); // -> return renderExpression(allocator, stream, tree, indent, start_col, result.return_type, space); } else { - return renderToken(tree, stream, promise_type.promise_token, indent, start_col, space); // promise + return renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, space); // anyframe } }, diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig index 4539e1e5b2..9de20c39f2 100644 --- a/std/zig/tokenizer.zig +++ b/std/zig/tokenizer.zig @@ -15,6 +15,7 @@ pub const Token = struct { Keyword{ .bytes = "align", .id = Id.Keyword_align }, Keyword{ .bytes = "allowzero", .id = Id.Keyword_allowzero }, Keyword{ .bytes = "and", .id = Id.Keyword_and }, + Keyword{ .bytes = "anyframe", .id = Id.Keyword_anyframe }, Keyword{ .bytes = "asm", .id = Id.Keyword_asm }, Keyword{ .bytes = "async", .id = Id.Keyword_async }, Keyword{ .bytes = "await", .id = Id.Keyword_await }, @@ -42,7 +43,6 @@ pub const Token = struct { Keyword{ .bytes = "or", .id = Id.Keyword_or }, Keyword{ .bytes = "orelse", .id = Id.Keyword_orelse }, Keyword{ .bytes = "packed", .id = Id.Keyword_packed }, - Keyword{ .bytes = "promise", .id = Id.Keyword_promise }, Keyword{ .bytes = "pub", .id = Id.Keyword_pub }, Keyword{ .bytes = "resume", .id = Id.Keyword_resume }, Keyword{ .bytes = "return", .id = Id.Keyword_return }, @@ -174,7 +174,7 @@ pub const Token = struct { Keyword_or, Keyword_orelse, Keyword_packed, - Keyword_promise, + Keyword_anyframe, Keyword_pub, Keyword_resume, Keyword_return, diff --git a/test/stage1/behavior/type_info.zig b/test/stage1/behavior/type_info.zig index 6a51015124..b86ba27c13 100644 --- a/test/stage1/behavior/type_info.zig +++ b/test/stage1/behavior/type_info.zig @@ -177,11 +177,11 @@ fn testUnion() void { expect(TypeId(typeinfo_info) == TypeId.Union); expect(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto); expect(typeinfo_info.Union.tag_type.? == TypeId); - expect(typeinfo_info.Union.fields.len == 25); + expect(typeinfo_info.Union.fields.len == 26); expect(typeinfo_info.Union.fields[4].enum_field != null); expect(typeinfo_info.Union.fields[4].enum_field.?.value == 4); expect(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int)); - expect(typeinfo_info.Union.decls.len == 20); + expect(typeinfo_info.Union.decls.len == 21); const TestNoTagUnion = union { Foo: void, @@ -280,6 +280,25 @@ fn testVector() void { expect(vec_info.Vector.child == i32); } +test "type info: anyframe and anyframe->T" { + testAnyFrame(); + comptime testAnyFrame(); +} + +fn testAnyFrame() void { + { + const anyframe_info = @typeInfo(anyframe->i32); + expect(TypeId(anyframe_info) == .AnyFrame); + expect(anyframe_info.AnyFrame.child.? == i32); + } + + { + const anyframe_info = @typeInfo(anyframe); + expect(TypeId(anyframe_info) == .AnyFrame); + expect(anyframe_info.AnyFrame.child == null); + } +} + test "type info: optional field unwrapping" { const Struct = struct { cdOffset: u32, From dbdc4d62d08c94a967b36afdfa57b126775a4eee Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 29 Jul 2019 19:32:49 -0400 Subject: [PATCH 022/125] improve support for anyframe and anyframe->T * add implicit cast from `*@Frame(func)` to `anyframe->T` or `anyframe`. * add implicit cast from `anyframe->T` to `anyframe`. * `resume` works on `anyframe->T` and `anyframe` types. --- src/all_types.hpp | 2 +- src/analyze.cpp | 14 ++---- src/codegen.cpp | 25 +++++++--- src/ir.cpp | 76 ++++++++++++++++++++++++++--- test/stage1/behavior/coroutines.zig | 9 +++- 5 files changed, 97 insertions(+), 29 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 1096feade0..cd64c149d9 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1726,6 +1726,7 @@ struct CodeGen { LLVMValueRef err_name_table; LLVMValueRef safety_crash_err_fn; LLVMValueRef return_err_fn; + LLVMTypeRef async_fn_llvm_type; // reminder: hash tables must be initialized before use HashMap import_table; @@ -1793,7 +1794,6 @@ struct CodeGen { ZigType *entry_global_error_set; ZigType *entry_arg_tuple; ZigType *entry_enum_literal; - ZigType *entry_frame_header; ZigType *entry_any_frame; } builtin_types; ZigType *align_amt_type; diff --git a/src/analyze.cpp b/src/analyze.cpp index e47be8f14c..c117409445 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -7348,19 +7348,13 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) { if (is_async) { fn_type->data.fn.gen_param_info = allocate(1); - ZigType *frame_type = g->builtin_types.entry_frame_header; - Error err; - if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) { - zig_unreachable(); - } - ZigType *ptr_type = get_pointer_to_type(g, frame_type, false); - gen_param_types.append(get_llvm_type(g, ptr_type)); - param_di_types.append(get_llvm_di_type(g, ptr_type)); + ZigType *frame_type = get_any_frame_type(g, fn_type_id->return_type); + gen_param_types.append(get_llvm_type(g, frame_type)); + param_di_types.append(get_llvm_di_type(g, frame_type)); fn_type->data.fn.gen_param_info[0].src_index = 0; fn_type->data.fn.gen_param_info[0].gen_index = 0; - fn_type->data.fn.gen_param_info[0].type = ptr_type; - + fn_type->data.fn.gen_param_info[0].type = frame_type; } else { fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count); for (size_t i = 0; i < fn_type_id->param_count; i += 1) { diff --git a/src/codegen.cpp b/src/codegen.cpp index c666317c17..0ee902b537 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4902,14 +4902,28 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable, return nullptr; } +static LLVMTypeRef async_fn_llvm_type(CodeGen *g) { + if (g->async_fn_llvm_type != nullptr) + return g->async_fn_llvm_type; + + ZigType *anyframe_type = get_any_frame_type(g, nullptr); + LLVMTypeRef param_type = get_llvm_type(g, anyframe_type); + LLVMTypeRef return_type = LLVMVoidType(); + LLVMTypeRef fn_type = LLVMFunctionType(return_type, ¶m_type, 1, false); + g->async_fn_llvm_type = LLVMPointerType(fn_type, 0); + + return g->async_fn_llvm_type; +} + static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) { LLVMValueRef frame = ir_llvm_value(g, instruction->frame); ZigType *frame_type = instruction->frame->value.type; - assert(frame_type->id == ZigTypeIdCoroFrame); - ZigFn *fn = frame_type->data.frame.fn; - LLVMValueRef fn_val = fn_llvm_value(g, fn); + assert(frame_type->id == ZigTypeIdAnyFrame); + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, ""); + LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); + LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, async_fn_llvm_type(g), ""); ZigLLVMBuildCall(g->builder, fn_val, &frame, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); return nullptr; } @@ -6746,11 +6760,6 @@ static void define_builtin_types(CodeGen *g) { g->primitive_type_table.put(&entry->name, entry); } - { - const char *field_names[] = {"resume_index"}; - ZigType *field_types[] = {g->builtin_types.entry_usize}; - g->builtin_types.entry_frame_header = get_struct_type(g, "(frame header)", field_names, field_types, 1); - } } static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char *name, size_t count) { diff --git a/src/ir.cpp b/src/ir.cpp index e6d987a2ee..98a8f1061e 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -7764,7 +7764,7 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeResume); - IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, scope); + IrInstruction *target_inst = ir_gen_node_extra(irb, node->data.resume_expr.expr, scope, LValPtr, nullptr); if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -10882,6 +10882,33 @@ static IrInstruction *ir_analyze_err_set_cast(IrAnalyze *ira, IrInstruction *sou return result; } +static IrInstruction *ir_analyze_frame_ptr_to_anyframe(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *value, ZigType *wanted_type) +{ + if (instr_is_comptime(value)) { + zig_panic("TODO comptime frame pointer"); + } + + IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node, + wanted_type, value, CastOpBitCast); + result->value.type = wanted_type; + return result; +} + +static IrInstruction *ir_analyze_anyframe_to_anyframe(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *value, ZigType *wanted_type) +{ + if (instr_is_comptime(value)) { + zig_panic("TODO comptime anyframe->T to anyframe"); + } + + IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node, + wanted_type, value, CastOpBitCast); + result->value.type = wanted_type; + return result; +} + + static IrInstruction *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, ZigType *wanted_type, ResultLoc *result_loc) { @@ -11978,6 +12005,29 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } + // *@Frame(func) to anyframe->T or anyframe + if (actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle && + actual_type->data.pointer.child_type->id == ZigTypeIdCoroFrame && wanted_type->id == ZigTypeIdAnyFrame) + { + bool ok = true; + if (wanted_type->data.any_frame.result_type != nullptr) { + ZigFn *fn = actual_type->data.pointer.child_type->data.frame.fn; + ZigType *fn_return_type = fn->type_entry->data.fn.fn_type_id.return_type; + if (wanted_type->data.any_frame.result_type != fn_return_type) { + ok = false; + } + } + if (ok) { + return ir_analyze_frame_ptr_to_anyframe(ira, source_instr, value, wanted_type); + } + } + + // anyframe->T to anyframe + if (actual_type->id == ZigTypeIdAnyFrame && actual_type->data.any_frame.result_type != nullptr && + wanted_type->id == ZigTypeIdAnyFrame && wanted_type->data.any_frame.result_type == nullptr) + { + return ir_analyze_anyframe_to_anyframe(ira, source_instr, value, wanted_type); + } // cast from null literal to maybe type if (wanted_type->id == ZigTypeIdOptional && @@ -24323,17 +24373,27 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru } static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) { - IrInstruction *frame = instruction->frame->child; - if (type_is_invalid(frame->value.type)) + IrInstruction *frame_ptr = instruction->frame->child; + if (type_is_invalid(frame_ptr->value.type)) return ira->codegen->invalid_instruction; - if (frame->value.type->id != ZigTypeIdCoroFrame) { - ir_add_error(ira, instruction->frame, - buf_sprintf("expected frame, found '%s'", buf_ptr(&frame->value.type->name))); - return ira->codegen->invalid_instruction; + IrInstruction *frame; + if (frame_ptr->value.type->id == ZigTypeIdPointer && + frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle && + frame_ptr->value.type->data.pointer.is_const && + frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdAnyFrame) + { + frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr); + } else { + frame = frame_ptr; } - return ir_build_coro_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame); + ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr); + IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type); + if (type_is_invalid(casted_frame->value.type)) + return ira->codegen->invalid_instruction; + + return ir_build_coro_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame); } static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 7af04d37c9..fddc912e77 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -5,15 +5,20 @@ const expect = std.testing.expect; var global_x: i32 = 1; test "simple coroutine suspend and resume" { - const p = async simpleAsyncFn(); + const frame = async simpleAsyncFn(); expect(global_x == 2); - resume p; + resume frame; expect(global_x == 3); + const af: anyframe->void = &frame; + resume frame; + expect(global_x == 4); } fn simpleAsyncFn() void { global_x += 1; suspend; global_x += 1; + suspend; + global_x += 1; } var global_y: i32 = 1; From e7ae4e4645a46a216c5913e2f9120cb02c10008c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Aug 2019 16:08:52 -0400 Subject: [PATCH 023/125] reimplement async with function splitting instead of switch --- BRANCH_TODO | 4 +- src/all_types.hpp | 20 +- src/analyze.cpp | 161 +++++++++--- src/codegen.cpp | 390 +++++++++++++++------------- src/ir.cpp | 8 +- test/stage1/behavior/coroutines.zig | 98 +++---- 6 files changed, 409 insertions(+), 272 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index e2c4fec436..7c19147aa8 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,4 +1,5 @@ - * make the anyframe type and anyframe->T type work with resume + * fix @frameSize + * fix calling an inferred async function * await * await of a non async function * await in single-threaded mode @@ -10,5 +11,4 @@ * implicit cast of normal function to async function should be allowed when it is inferred to be async * go over the commented out tests * revive std.event.Loop - * reimplement with function splitting rather than switch * @typeInfo for @Frame(func) diff --git a/src/all_types.hpp b/src/all_types.hpp index cd64c149d9..b5b8b06259 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1726,7 +1726,7 @@ struct CodeGen { LLVMValueRef err_name_table; LLVMValueRef safety_crash_err_fn; LLVMValueRef return_err_fn; - LLVMTypeRef async_fn_llvm_type; + LLVMTypeRef anyframe_fn_type; // reminder: hash tables must be initialized before use HashMap import_table; @@ -1795,7 +1795,9 @@ struct CodeGen { ZigType *entry_arg_tuple; ZigType *entry_enum_literal; ZigType *entry_any_frame; + ZigType *entry_async_fn; } builtin_types; + ZigType *align_amt_type; ZigType *stack_trace_type; ZigType *ptr_to_stack_trace_type; @@ -1934,6 +1936,7 @@ struct ZigVar { ZigType *var_type; LLVMValueRef value_ref; IrInstruction *is_comptime; + IrInstruction *ptr_instruction; // which node is the declaration of the variable AstNode *decl_node; ZigLLVMDILocalVariable *di_loc_var; @@ -2159,8 +2162,8 @@ struct IrBasicBlock { size_t ref_count; // index into the basic block list size_t index; - // for coroutines, the resume_index which corresponds to this block - size_t resume_index; + // for async functions, the split function which corresponds to this block + LLVMValueRef split_llvm_fn; LLVMBasicBlockRef llvm_block; LLVMBasicBlockRef llvm_exit_block; // The instruction that referenced this basic block and caused us to @@ -3686,13 +3689,9 @@ static const size_t maybe_null_index = 1; static const size_t err_union_err_index = 0; static const size_t err_union_payload_index = 1; -static const size_t coro_resume_index_index = 0; -static const size_t coro_fn_ptr_index = 1; -static const size_t coro_awaiter_index = 2; -static const size_t coro_arg_start = 3; - -// one for the Entry block, resume blocks are indexed after that. -static const size_t coro_extra_resume_block_count = 1; +static const size_t coro_fn_ptr_index = 0; +static const size_t coro_awaiter_index = 1; +static const size_t coro_arg_start = 2; // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. @@ -3719,6 +3718,7 @@ enum FnWalkId { struct FnWalkAttrs { ZigFn *fn; + LLVMValueRef llvm_fn; unsigned gen_i; }; diff --git a/src/analyze.cpp b/src/analyze.cpp index c117409445..5e22358423 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5135,6 +5135,19 @@ Error ensure_complete_type(CodeGen *g, ZigType *type_entry) { return type_resolve(g, type_entry, ResolveStatusSizeKnown); } +static ZigType *get_async_fn_type(CodeGen *g, ZigType *orig_fn_type) { + if (orig_fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) + return orig_fn_type; + + ZigType *fn_type = allocate_nonzero(1); + *fn_type = *orig_fn_type; + fn_type->data.fn.fn_type_id.cc = CallingConventionAsync; + fn_type->llvm_type = nullptr; + fn_type->llvm_di_type = nullptr; + + return fn_type; +} + static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (frame_type->data.frame.locals_struct != nullptr) return ErrorNone; @@ -5156,6 +5169,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { buf_ptr(&frame_type->name))); return ErrorSemanticAnalyzeFail; } + ZigType *fn_type = get_async_fn_type(g, fn->type_entry); for (size_t i = 0; i < fn->call_list.length; i += 1) { IrInstructionCallGen *call = fn->call_list.at(i); @@ -5173,7 +5187,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { IrBasicBlock *new_resume_block = allocate(1); new_resume_block->name_hint = "CallResume"; - new_resume_block->resume_index = fn->resume_blocks.length + coro_extra_resume_block_count; + new_resume_block->split_llvm_fn = reinterpret_cast(0x1); fn->resume_blocks.append(new_resume_block); call->resume_block = new_resume_block; fn->analyzed_executable.basic_block_list.append(new_resume_block); @@ -5194,16 +5208,13 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { ZigList field_types = {}; ZigList field_names = {}; - field_names.append("resume_index"); - field_types.append(g->builtin_types.entry_usize); - field_names.append("fn_ptr"); - field_types.append(fn->type_entry); + field_types.append(fn_type); field_names.append("awaiter"); field_types.append(g->builtin_types.entry_usize); - FnTypeId *fn_type_id = &fn->type_entry->data.fn.fn_type_id; + FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false); field_names.append("result_ptr"); field_types.append(ptr_return_type); @@ -6686,7 +6697,9 @@ static void resolve_llvm_types_slice(CodeGen *g, ZigType *type, ResolveStatus wa type->data.structure.resolve_status = ResolveStatusLLVMFull; } -static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status) { +static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status, + ZigType *coro_frame_type) +{ assert(struct_type->id == ZigTypeIdStruct); assert(struct_type->data.structure.resolve_status != ResolveStatusInvalid); assert(struct_type->data.structure.resolve_status >= ResolveStatusSizeKnown); @@ -6774,7 +6787,16 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS } packed_bits_offset = next_packed_bits_offset; } else { - element_types[gen_field_index] = get_llvm_type(g, field_type); + LLVMTypeRef llvm_type; + if (i == 0 && coro_frame_type != nullptr) { + assert(coro_frame_type->id == ZigTypeIdCoroFrame); + assert(field_type->id == ZigTypeIdFn); + resolve_llvm_types_fn(g, coro_frame_type->data.frame.fn); + llvm_type = LLVMPointerType(coro_frame_type->data.frame.fn->raw_type_ref, 0); + } else { + llvm_type = get_llvm_type(g, field_type); + } + element_types[gen_field_index] = llvm_type; gen_field_index += 1; } @@ -7456,7 +7478,7 @@ static void resolve_llvm_types_anyerror(CodeGen *g) { } static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) { - resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status); + resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status, frame_type); frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type; frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type; } @@ -7464,35 +7486,112 @@ static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, Resol static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, ResolveStatus wanted_resolve_status) { if (any_frame_type->llvm_di_type != nullptr) return; - ZigType *result_type = any_frame_type->data.any_frame.result_type; Buf *name = buf_sprintf("(%s header)", buf_ptr(&any_frame_type->name)); + LLVMTypeRef frame_header_type = LLVMStructCreateNamed(LLVMGetGlobalContext(), buf_ptr(name)); + any_frame_type->llvm_type = LLVMPointerType(frame_header_type, 0); - ZigType *frame_header_type; + unsigned dwarf_kind = ZigLLVMTag_DW_structure_type(); + ZigLLVMDIFile *di_file = nullptr; + ZigLLVMDIScope *di_scope = ZigLLVMCompileUnitToScope(g->compile_unit); + unsigned line = 0; + ZigLLVMDIType *frame_header_di_type = ZigLLVMCreateReplaceableCompositeType(g->dbuilder, + dwarf_kind, buf_ptr(name), di_scope, di_file, line); + any_frame_type->llvm_di_type = ZigLLVMCreateDebugPointerType(g->dbuilder, frame_header_di_type, + 8*g->pointer_size_bytes, 8*g->builtin_types.entry_usize->abi_align, buf_ptr(&any_frame_type->name)); + + LLVMTypeRef llvm_void = LLVMVoidType(); + LLVMTypeRef fn_type = LLVMFunctionType(llvm_void, &any_frame_type->llvm_type, 1, false); + LLVMTypeRef usize_type_ref = get_llvm_type(g, g->builtin_types.entry_usize); + ZigLLVMDIType *usize_di_type = get_llvm_di_type(g, g->builtin_types.entry_usize); + ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit); + + ZigType *result_type = any_frame_type->data.any_frame.result_type; if (result_type == nullptr || !type_has_bits(result_type)) { - const char *field_names[] = {"resume_index", "fn_ptr", "awaiter"}; - ZigType *field_types[] = { - g->builtin_types.entry_usize, - g->builtin_types.entry_usize, - g->builtin_types.entry_usize, + LLVMTypeRef ptr_result_type = LLVMPointerType(fn_type, 0); + if (result_type == nullptr) { + g->anyframe_fn_type = ptr_result_type; + } + LLVMTypeRef field_types[] = { + ptr_result_type, // fn_ptr + usize_type_ref, // awaiter }; - frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 3); + LLVMStructSetBody(frame_header_type, field_types, 2, false); + + ZigLLVMDIType *di_element_types[] = { + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types[0]), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[0]), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0), + ZigLLVM_DIFlags_Zero, usize_di_type), + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types[1]), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1), + ZigLLVM_DIFlags_Zero, usize_di_type), + }; + ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder, + compile_unit_scope, buf_ptr(name), + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type), + 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type), + ZigLLVM_DIFlags_Zero, + nullptr, di_element_types, 2, 0, nullptr, ""); + + ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type); } else { ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false); - - const char *field_names[] = {"resume_index", "fn_ptr", "awaiter", "result_ptr", "result"}; - ZigType *field_types[] = { - g->builtin_types.entry_usize, - g->builtin_types.entry_usize, - g->builtin_types.entry_usize, - ptr_result_type, - result_type, + LLVMTypeRef field_types[] = { + LLVMPointerType(fn_type, 0), // fn_ptr + usize_type_ref, // awaiter + get_llvm_type(g, ptr_result_type), // result_ptr + get_llvm_type(g, result_type), // result }; - frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 5); - } + LLVMStructSetBody(frame_header_type, field_types, 4, false); - ZigType *ptr_type = get_pointer_to_type(g, frame_header_type, false); - any_frame_type->llvm_type = get_llvm_type(g, ptr_type); - any_frame_type->llvm_di_type = get_llvm_di_type(g, ptr_type); + ZigLLVMDIType *di_element_types[] = { + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types[0]), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[0]), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0), + ZigLLVM_DIFlags_Zero, usize_di_type), + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types[1]), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1), + ZigLLVM_DIFlags_Zero, usize_di_type), + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types[2]), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)), + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types[3]), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[3]), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 3), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)), + }; + ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder, + compile_unit_scope, buf_ptr(name), + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type), + 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type), + ZigLLVM_DIFlags_Zero, + nullptr, di_element_types, 2, 0, nullptr, ""); + + ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type); + } } static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) { @@ -7520,7 +7619,7 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r if (type->data.structure.is_slice) return resolve_llvm_types_slice(g, type, wanted_resolve_status); else - return resolve_llvm_types_struct(g, type, wanted_resolve_status); + return resolve_llvm_types_struct(g, type, wanted_resolve_status, nullptr); case ZigTypeIdEnum: return resolve_llvm_types_enum(g, type); case ZigTypeIdUnion: diff --git a/src/codegen.cpp b/src/codegen.cpp index 0ee902b537..d955736083 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -343,27 +343,24 @@ static bool cc_want_sret_attr(CallingConvention cc) { zig_unreachable(); } -static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { - if (fn_table_entry->llvm_value) - return fn_table_entry->llvm_value; - - Buf *unmangled_name = &fn_table_entry->symbol_name; +static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) { + Buf *unmangled_name = &fn->symbol_name; Buf *symbol_name; GlobalLinkageId linkage; - if (fn_table_entry->body_node == nullptr) { + if (fn->body_node == nullptr) { symbol_name = unmangled_name; linkage = GlobalLinkageIdStrong; - } else if (fn_table_entry->export_list.length == 0) { + } else if (fn->export_list.length == 0) { symbol_name = get_mangled_name(g, unmangled_name, false); linkage = GlobalLinkageIdInternal; } else { - GlobalExport *fn_export = &fn_table_entry->export_list.items[0]; + GlobalExport *fn_export = &fn->export_list.items[0]; symbol_name = &fn_export->name; linkage = fn_export->linkage; } bool external_linkage = linkage != GlobalLinkageIdInternal; - CallingConvention cc = fn_table_entry->type_entry->data.fn.fn_type_id.cc; + CallingConvention cc = fn->type_entry->data.fn.fn_type_id.cc; if (cc == CallingConventionStdcall && external_linkage && g->zig_target->arch == ZigLLVM_x86) { @@ -371,28 +368,28 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { symbol_name = buf_sprintf("\x01_%s", buf_ptr(symbol_name)); } - bool is_async = fn_is_async(fn_table_entry); + bool is_async = fn_is_async(fn); - ZigType *fn_type = fn_table_entry->type_entry; + ZigType *fn_type = fn->type_entry; // Make the raw_type_ref populated - resolve_llvm_types_fn(g, fn_table_entry); - LLVMTypeRef fn_llvm_type = fn_table_entry->raw_type_ref; - if (fn_table_entry->body_node == nullptr) { + resolve_llvm_types_fn(g, fn); + LLVMTypeRef fn_llvm_type = fn->raw_type_ref; + LLVMValueRef llvm_fn = nullptr; + if (fn->body_node == nullptr) { LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, buf_ptr(symbol_name)); if (existing_llvm_fn) { - fn_table_entry->llvm_value = LLVMConstBitCast(existing_llvm_fn, LLVMPointerType(fn_llvm_type, 0)); - return fn_table_entry->llvm_value; + return LLVMConstBitCast(existing_llvm_fn, LLVMPointerType(fn_llvm_type, 0)); } else { auto entry = g->exported_symbol_names.maybe_get(symbol_name); if (entry == nullptr) { - fn_table_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type); + llvm_fn = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type); if (target_is_wasm(g->zig_target)) { - assert(fn_table_entry->proto_node->type == NodeTypeFnProto); - AstNodeFnProto *fn_proto = &fn_table_entry->proto_node->data.fn_proto; + assert(fn->proto_node->type == NodeTypeFnProto); + AstNodeFnProto *fn_proto = &fn->proto_node->data.fn_proto; if (fn_proto-> is_extern && fn_proto->lib_name != nullptr ) { - addLLVMFnAttrStr(fn_table_entry->llvm_value, "wasm-import-module", buf_ptr(fn_proto->lib_name)); + addLLVMFnAttrStr(llvm_fn, "wasm-import-module", buf_ptr(fn_proto->lib_name)); } } } else { @@ -402,101 +399,98 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { resolve_llvm_types_fn(g, tld_fn->fn_entry); tld_fn->fn_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), tld_fn->fn_entry->raw_type_ref); - fn_table_entry->llvm_value = LLVMConstBitCast(tld_fn->fn_entry->llvm_value, - LLVMPointerType(fn_llvm_type, 0)); - return fn_table_entry->llvm_value; + llvm_fn = LLVMConstBitCast(tld_fn->fn_entry->llvm_value, LLVMPointerType(fn_llvm_type, 0)); + return llvm_fn; } } } else { - if (fn_table_entry->llvm_value == nullptr) { - fn_table_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type); + if (llvm_fn == nullptr) { + llvm_fn = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type); } - for (size_t i = 1; i < fn_table_entry->export_list.length; i += 1) { - GlobalExport *fn_export = &fn_table_entry->export_list.items[i]; - LLVMAddAlias(g->module, LLVMTypeOf(fn_table_entry->llvm_value), - fn_table_entry->llvm_value, buf_ptr(&fn_export->name)); + for (size_t i = 1; i < fn->export_list.length; i += 1) { + GlobalExport *fn_export = &fn->export_list.items[i]; + LLVMAddAlias(g->module, LLVMTypeOf(llvm_fn), llvm_fn, buf_ptr(&fn_export->name)); } } - fn_table_entry->llvm_name = strdup(LLVMGetValueName(fn_table_entry->llvm_value)); - switch (fn_table_entry->fn_inline) { + switch (fn->fn_inline) { case FnInlineAlways: - addLLVMFnAttr(fn_table_entry->llvm_value, "alwaysinline"); - g->inline_fns.append(fn_table_entry); + addLLVMFnAttr(llvm_fn, "alwaysinline"); + g->inline_fns.append(fn); break; case FnInlineNever: - addLLVMFnAttr(fn_table_entry->llvm_value, "noinline"); + addLLVMFnAttr(llvm_fn, "noinline"); break; case FnInlineAuto: - if (fn_table_entry->alignstack_value != 0) { - addLLVMFnAttr(fn_table_entry->llvm_value, "noinline"); + if (fn->alignstack_value != 0) { + addLLVMFnAttr(llvm_fn, "noinline"); } break; } if (cc == CallingConventionNaked) { - addLLVMFnAttr(fn_table_entry->llvm_value, "naked"); + addLLVMFnAttr(llvm_fn, "naked"); } else { - LLVMSetFunctionCallConv(fn_table_entry->llvm_value, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc)); + LLVMSetFunctionCallConv(llvm_fn, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc)); } if (cc == CallingConventionAsync) { - addLLVMFnAttr(fn_table_entry->llvm_value, "optnone"); - addLLVMFnAttr(fn_table_entry->llvm_value, "noinline"); + addLLVMFnAttr(llvm_fn, "optnone"); + addLLVMFnAttr(llvm_fn, "noinline"); } - bool want_cold = fn_table_entry->is_cold || cc == CallingConventionCold; + bool want_cold = fn->is_cold || cc == CallingConventionCold; if (want_cold) { - ZigLLVMAddFunctionAttrCold(fn_table_entry->llvm_value); + ZigLLVMAddFunctionAttrCold(llvm_fn); } - LLVMSetLinkage(fn_table_entry->llvm_value, to_llvm_linkage(linkage)); + LLVMSetLinkage(llvm_fn, to_llvm_linkage(linkage)); if (linkage == GlobalLinkageIdInternal) { - LLVMSetUnnamedAddr(fn_table_entry->llvm_value, true); + LLVMSetUnnamedAddr(llvm_fn, true); } ZigType *return_type = fn_type->data.fn.fn_type_id.return_type; if (return_type->id == ZigTypeIdUnreachable) { - addLLVMFnAttr(fn_table_entry->llvm_value, "noreturn"); + addLLVMFnAttr(llvm_fn, "noreturn"); } - if (fn_table_entry->body_node != nullptr) { - maybe_export_dll(g, fn_table_entry->llvm_value, linkage); + if (fn->body_node != nullptr) { + maybe_export_dll(g, llvm_fn, linkage); bool want_fn_safety = g->build_mode != BuildModeFastRelease && g->build_mode != BuildModeSmallRelease && - !fn_table_entry->def_scope->safety_off; + !fn->def_scope->safety_off; if (want_fn_safety) { if (g->libc_link_lib != nullptr) { - addLLVMFnAttr(fn_table_entry->llvm_value, "sspstrong"); - addLLVMFnAttrStr(fn_table_entry->llvm_value, "stack-protector-buffer-size", "4"); + addLLVMFnAttr(llvm_fn, "sspstrong"); + addLLVMFnAttrStr(llvm_fn, "stack-protector-buffer-size", "4"); } } - if (g->have_stack_probing && !fn_table_entry->def_scope->safety_off) { - addLLVMFnAttrStr(fn_table_entry->llvm_value, "probe-stack", "__zig_probe_stack"); + if (g->have_stack_probing && !fn->def_scope->safety_off) { + addLLVMFnAttrStr(llvm_fn, "probe-stack", "__zig_probe_stack"); } } else { - maybe_import_dll(g, fn_table_entry->llvm_value, linkage); + maybe_import_dll(g, llvm_fn, linkage); } - if (fn_table_entry->alignstack_value != 0) { - addLLVMFnAttrInt(fn_table_entry->llvm_value, "alignstack", fn_table_entry->alignstack_value); + if (fn->alignstack_value != 0) { + addLLVMFnAttrInt(llvm_fn, "alignstack", fn->alignstack_value); } - addLLVMFnAttr(fn_table_entry->llvm_value, "nounwind"); - add_uwtable_attr(g, fn_table_entry->llvm_value); - addLLVMFnAttr(fn_table_entry->llvm_value, "nobuiltin"); - if (g->build_mode == BuildModeDebug && fn_table_entry->fn_inline != FnInlineAlways) { - ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim", "true"); - ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim-non-leaf", nullptr); + addLLVMFnAttr(llvm_fn, "nounwind"); + add_uwtable_attr(g, llvm_fn); + addLLVMFnAttr(llvm_fn, "nobuiltin"); + if (g->build_mode == BuildModeDebug && fn->fn_inline != FnInlineAlways) { + ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim", "true"); + ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim-non-leaf", nullptr); } - if (fn_table_entry->section_name) { - LLVMSetSection(fn_table_entry->llvm_value, buf_ptr(fn_table_entry->section_name)); + if (fn->section_name) { + LLVMSetSection(llvm_fn, buf_ptr(fn->section_name)); } - if (fn_table_entry->align_bytes > 0) { - LLVMSetAlignment(fn_table_entry->llvm_value, (unsigned)fn_table_entry->align_bytes); + if (fn->align_bytes > 0) { + LLVMSetAlignment(llvm_fn, (unsigned)fn->align_bytes); } else { // We'd like to set the best alignment for the function here, but on Darwin LLVM gives // "Cannot getTypeInfo() on a type that is unsized!" assertion failure when calling @@ -508,36 +502,46 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) { if (!type_has_bits(return_type)) { // nothing to do } else if (type_is_nonnull_ptr(return_type)) { - addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull"); + addLLVMAttr(llvm_fn, 0, "nonnull"); } else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) { // Sret pointers must not be address 0 - addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull"); - addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret"); + addLLVMArgAttr(llvm_fn, 0, "nonnull"); + addLLVMArgAttr(llvm_fn, 0, "sret"); if (cc_want_sret_attr(cc)) { - addLLVMArgAttr(fn_table_entry->llvm_value, 0, "noalias"); + addLLVMArgAttr(llvm_fn, 0, "noalias"); } init_gen_i = 1; } if (is_async) { - addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull"); + addLLVMArgAttr(llvm_fn, 0, "nonnull"); } else { // set parameter attributes FnWalk fn_walk = {}; fn_walk.id = FnWalkIdAttrs; - fn_walk.data.attrs.fn = fn_table_entry; + fn_walk.data.attrs.fn = fn; + fn_walk.data.attrs.llvm_fn = llvm_fn; fn_walk.data.attrs.gen_i = init_gen_i; walk_function_params(g, fn_type, &fn_walk); - uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry); + uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn); if (err_ret_trace_arg_index != UINT32_MAX) { // Error return trace memory is in the stack, which is impossible to be at address 0 // on any architecture. - addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull"); + addLLVMArgAttr(llvm_fn, (unsigned)err_ret_trace_arg_index, "nonnull"); } } - return fn_table_entry->llvm_value; + return llvm_fn; +} + +static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn) { + if (fn->llvm_value) + return fn->llvm_value; + + fn->llvm_value = make_fn_llvm_value(g, fn); + fn->llvm_name = strdup(LLVMGetValueName(fn->llvm_value)); + return fn->llvm_value; } static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) { @@ -1665,7 +1669,7 @@ static bool iter_function_params_c_abi(CodeGen *g, ZigType *fn_type, FnWalk *fn_ param_info = &fn_type->data.fn.fn_type_id.param_info[src_i]; ty = param_info->type; source_node = fn_walk->data.attrs.fn->proto_node; - llvm_fn = fn_walk->data.attrs.fn->llvm_value; + llvm_fn = fn_walk->data.attrs.llvm_fn; break; case FnWalkIdCall: { if (src_i >= fn_walk->data.call.inst->arg_count) @@ -1916,7 +1920,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) { switch (fn_walk->id) { case FnWalkIdAttrs: { - LLVMValueRef llvm_fn = fn_walk->data.attrs.fn->llvm_value; + LLVMValueRef llvm_fn = fn_walk->data.attrs.llvm_fn; bool is_byval = gen_info->is_byval; FnTypeParamInfo *param_info = &fn_type->data.fn.fn_type_id.param_info[param_i]; @@ -1989,10 +1993,9 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns if (fn_is_async(g->cur_fn)) { if (ir_want_runtime_safety(g, &return_instruction->base)) { LLVMValueRef locals_ptr = g->cur_ret_ptr; - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_resume_index_index, ""); - LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, - g->cur_fn->resume_blocks.length + 2, false); - LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr); + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, ""); + LLVMValueRef new_resume_fn = g->cur_fn->resume_blocks.last()->split_llvm_fn; + LLVMBuildStore(g->builder, new_resume_fn, resume_index_ptr); } LLVMBuildRetVoid(g->builder); @@ -2954,14 +2957,17 @@ static LLVMValueRef ir_render_bool_not(CodeGen *g, IrExecutable *executable, IrI return LLVMBuildICmp(g->builder, LLVMIntEQ, value, zero, ""); } -static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, IrInstructionDeclVarGen *instruction) { - ZigVar *var = instruction->var; - +static void render_decl_var(CodeGen *g, ZigVar *var) { if (!type_has_bits(var->var_type)) - return nullptr; + return; - var->value_ref = ir_llvm_value(g, instruction->var_ptr); + var->value_ref = ir_llvm_value(g, var->ptr_instruction); gen_var_debug_decl(g, var); +} + +static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, IrInstructionDeclVarGen *instruction) { + instruction->var->ptr_instruction = instruction->var_ptr; + render_decl_var(g, instruction->var); return nullptr; } @@ -3369,12 +3375,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); assert(instruction->fn_entry != nullptr); - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_resume_index_index, ""); - LLVMBuildStore(g->builder, zero, resume_index_ptr); - LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_fn_ptr_index, ""); - LLVMValueRef bitcasted_fn_val = LLVMBuildBitCast(g->builder, fn_val, - LLVMGetElementType(LLVMTypeOf(fn_ptr_ptr)), ""); - LLVMBuildStore(g->builder, bitcasted_fn_val, fn_ptr_ptr); if (prefix_arg_err_ret_stack) { zig_panic("TODO"); @@ -3431,10 +3431,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, ""); return nullptr; } else if (callee_is_async) { - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index_index, ""); - LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, - instruction->resume_block->resume_index, false); - LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr); + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, ""); + LLVMValueRef new_fn_ptr = instruction->resume_block->split_llvm_fn; + LLVMBuildStore(g->builder, new_fn_ptr, fn_ptr_ptr); LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, ""); ZigLLVMSetTailCall(call_inst); @@ -4888,10 +4887,9 @@ static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable IrInstructionSuspendBegin *instruction) { LLVMValueRef locals_ptr = g->cur_ret_ptr; - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_resume_index_index, ""); - LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, - instruction->resume_block->resume_index, false); - LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr); + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, ""); + LLVMValueRef new_fn_ptr = instruction->resume_block->split_llvm_fn; + LLVMBuildStore(g->builder, new_fn_ptr, fn_ptr_ptr); return nullptr; } @@ -4902,17 +4900,17 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable, return nullptr; } -static LLVMTypeRef async_fn_llvm_type(CodeGen *g) { - if (g->async_fn_llvm_type != nullptr) - return g->async_fn_llvm_type; +static LLVMTypeRef anyframe_fn_type(CodeGen *g) { + if (g->anyframe_fn_type != nullptr) + return g->anyframe_fn_type; ZigType *anyframe_type = get_any_frame_type(g, nullptr); LLVMTypeRef param_type = get_llvm_type(g, anyframe_type); LLVMTypeRef return_type = LLVMVoidType(); LLVMTypeRef fn_type = LLVMFunctionType(return_type, ¶m_type, 1, false); - g->async_fn_llvm_type = LLVMPointerType(fn_type, 0); + g->anyframe_fn_type = LLVMPointerType(fn_type, 0); - return g->async_fn_llvm_type; + return g->anyframe_fn_type; } static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, @@ -4923,7 +4921,7 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, assert(frame_type->id == ZigTypeIdAnyFrame); LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, ""); LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); - LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, async_fn_llvm_type(g), ""); + LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, anyframe_fn_type(g), ""); ZigLLVMBuildCall(g->builder, fn_val, &frame, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); return nullptr; } @@ -5022,7 +5020,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdCallSrc: case IrInstructionIdAllocaSrc: case IrInstructionIdEndExpr: - case IrInstructionIdAllocaGen: case IrInstructionIdImplicitCast: case IrInstructionIdResolveResult: case IrInstructionIdResetResult: @@ -5035,6 +5032,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdUnionInitNamedField: case IrInstructionIdFrameType: case IrInstructionIdFrameSizeSrc: + case IrInstructionIdAllocaGen: zig_unreachable(); case IrInstructionIdDeclVarGen: @@ -5195,6 +5193,92 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, zig_unreachable(); } +static void render_async_spills(CodeGen *g) { + ZigType *fn_type = g->cur_fn->type_entry; + ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base); + size_t async_var_index = coro_arg_start + (type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0); + for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) { + ZigVar *var = g->cur_fn->variable_list.at(var_i); + + if (!type_has_bits(var->var_type)) { + continue; + } + if (ir_get_var_is_comptime(var)) + continue; + switch (type_requires_comptime(g, var->var_type)) { + case ReqCompTimeInvalid: + zig_unreachable(); + case ReqCompTimeYes: + continue; + case ReqCompTimeNo: + break; + } + if (var->src_arg_index == SIZE_MAX) { + continue; + } + + var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, + buf_ptr(&var->name)); + async_var_index += 1; + if (var->decl_node) { + var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope), + buf_ptr(&var->name), import->data.structure.root_struct->di_file, + (unsigned)(var->decl_node->line + 1), + get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0); + gen_var_debug_decl(g, var); + } + } + for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) { + IrInstructionAllocaGen *instruction = g->cur_fn->alloca_gen_list.at(alloca_i); + ZigType *ptr_type = instruction->base.value.type; + assert(ptr_type->id == ZigTypeIdPointer); + ZigType *child_type = ptr_type->data.pointer.child_type; + if (!type_has_bits(child_type)) + continue; + if (instruction->base.ref_count == 0) + continue; + if (instruction->base.value.special != ConstValSpecialRuntime) { + if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != + ConstValSpecialRuntime) + { + continue; + } + } + instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, + instruction->name_hint); + async_var_index += 1; + } +} + +static void render_async_var_decls(CodeGen *g, Scope *scope) { + render_async_spills(g); + for (;;) { + switch (scope->id) { + case ScopeIdCImport: + zig_unreachable(); + case ScopeIdFnDef: + return; + case ScopeIdVarDecl: { + ZigVar *var = reinterpret_cast(scope)->var; + if (var->ptr_instruction != nullptr) { + render_decl_var(g, var); + } + // fallthrough + } + case ScopeIdDecls: + case ScopeIdBlock: + case ScopeIdDefer: + case ScopeIdDeferExpr: + case ScopeIdLoop: + case ScopeIdSuspend: + case ScopeIdCompTime: + case ScopeIdRuntime: + scope = scope->parent; + continue; + } + } +} + static void ir_render(CodeGen *g, ZigFn *fn_entry) { assert(fn_entry); @@ -5204,6 +5288,11 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) { IrBasicBlock *current_block = executable->basic_block_list.at(block_i); assert(current_block->llvm_block); LLVMPositionBuilderAtEnd(g->builder, current_block->llvm_block); + if (current_block->split_llvm_fn != nullptr) { + g->cur_fn_val = current_block->split_llvm_fn; + g->cur_ret_ptr = LLVMGetParam(g->cur_fn_val, 0); + render_async_var_decls(g, current_block->instruction_list.at(0)->scope); + } for (size_t instr_i = 0; instr_i < current_block->instruction_list.length; instr_i += 1) { IrInstruction *instruction = current_block->instruction_list.at(instr_i); if (instruction->ref_count == 0 && !ir_has_side_effects(instruction)) @@ -6064,19 +6153,17 @@ static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) { IrExecutable *executable = &fn->analyzed_executable; assert(executable->basic_block_list.length > 0); LLVMValueRef fn_val = fn_llvm_value(g, fn); - LLVMBasicBlockRef first_bb = nullptr; - if (fn_is_async(fn)) { - first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch"); - fn->preamble_llvm_block = first_bb; - } for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) { IrBasicBlock *bb = executable->basic_block_list.at(block_i); + if (bb->split_llvm_fn != nullptr) { + assert(bb->split_llvm_fn == reinterpret_cast(0x1)); + fn_val = make_fn_llvm_value(g, fn); + bb->split_llvm_fn = fn_val; + } bb->llvm_block = LLVMAppendBasicBlock(fn_val, bb->name_hint); } - if (first_bb == nullptr) { - first_bb = executable->basic_block_list.at(0)->llvm_block; - } - LLVMPositionBuilderAtEnd(g->builder, first_bb); + IrBasicBlock *entry_bb = executable->basic_block_list.at(0); + LLVMPositionBuilderAtEnd(g->builder, entry_bb->llvm_block); } static void gen_global_var(CodeGen *g, ZigVar *var, LLVMValueRef init_val, @@ -6254,7 +6341,6 @@ static void do_code_gen(CodeGen *g) { clear_debug_source_node(g); bool is_async = fn_is_async(fn_table_entry); - size_t async_var_index = coro_arg_start + (type_has_bits(fn_type_id->return_type) ? 2 : 0); if (want_sret || is_async) { g->cur_ret_ptr = LLVMGetParam(fn, 0); @@ -6287,7 +6373,9 @@ static void do_code_gen(CodeGen *g) { g->cur_err_ret_trace_val_stack = nullptr; } - if (!is_async) { + if (is_async) { + render_async_spills(g); + } else { // allocate temporary stack data for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) { IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i); @@ -6345,18 +6433,7 @@ static void do_code_gen(CodeGen *g) { } else if (is_c_abi) { fn_walk_var.data.vars.var = var; iter_function_params_c_abi(g, fn_table_entry->type_entry, &fn_walk_var, var->src_arg_index); - } else if (is_async) { - var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, - buf_ptr(&var->name)); - async_var_index += 1; - if (var->decl_node) { - var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope), - buf_ptr(&var->name), import->data.structure.root_struct->di_file, - (unsigned)(var->decl_node->line + 1), - get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0); - gen_var_debug_decl(g, var); - } - } else { + } else if (!is_async) { ZigType *gen_type; FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index]; assert(gen_info->gen_index != SIZE_MAX); @@ -6382,29 +6459,6 @@ static void do_code_gen(CodeGen *g) { } } - if (is_async) { - for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) { - IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i); - ZigType *ptr_type = instruction->base.value.type; - assert(ptr_type->id == ZigTypeIdPointer); - ZigType *child_type = ptr_type->data.pointer.child_type; - if (!type_has_bits(child_type)) - continue; - if (instruction->base.ref_count == 0) - continue; - if (instruction->base.value.special != ConstValSpecialRuntime) { - if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != - ConstValSpecialRuntime) - { - continue; - } - } - instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, - instruction->name_hint); - async_var_index += 1; - } - } - // finishing error return trace setup. we have to do this after all the allocas. if (have_err_ret_trace_stack) { ZigType *usize = g->builtin_types.entry_usize; @@ -6435,31 +6489,16 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false); ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val); - if (!g->strip_debug_symbols) { - AstNode *source_node = fn_table_entry->proto_node; - ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1, - (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope)); - } - IrExecutable *executable = &fn_table_entry->analyzed_executable; - LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); - LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); - gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope); + if (ir_want_runtime_safety_scope(g, fn_table_entry->child_scope)) { + IrBasicBlock *bad_resume_block = allocate(1); + bad_resume_block->name_hint = "BadResume"; + bad_resume_block->split_llvm_fn = make_fn_llvm_value(g, fn_table_entry); - LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block); - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, - coro_resume_index_index, ""); - LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); - // +1 - index 0 is reserved for the entry block - LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, - fn_table_entry->resume_blocks.length + 1); + LLVMBasicBlockRef llvm_block = LLVMAppendBasicBlock(bad_resume_block->split_llvm_fn, "BadResume"); + LLVMPositionBuilderAtEnd(g->builder, llvm_block); + gen_safety_crash(g, PanicMsgIdBadResume); - LLVMValueRef zero = LLVMConstNull(usize_type_ref); - LLVMAddCase(switch_instr, zero, executable->basic_block_list.at(0)->llvm_block); - - for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) { - IrBasicBlock *resume_block = fn_table_entry->resume_blocks.at(resume_i); - LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_block->resume_index, false); - LLVMAddCase(switch_instr, case_value, resume_block->llvm_block); + fn_table_entry->resume_blocks.append(bad_resume_block); } } else { // create debug variable declarations for parameters @@ -6472,7 +6511,6 @@ static void do_code_gen(CodeGen *g) { walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init); } - ir_render(g, fn_table_entry); } diff --git a/src/ir.cpp b/src/ir.cpp index 98a8f1061e..3d376270f6 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -3227,7 +3227,7 @@ static IrInstruction *ir_build_alloca_src(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } -static IrInstructionAllocaGen *ir_create_alloca_gen(IrAnalyze *ira, IrInstruction *source_instruction, +static IrInstructionAllocaGen *ir_build_alloca_gen(IrAnalyze *ira, IrInstruction *source_instruction, uint32_t align, const char *name_hint) { IrInstructionAllocaGen *instruction = ir_create_instruction(&ira->new_irb, @@ -14351,7 +14351,7 @@ static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_in ConstExprValue *pointee = create_const_vals(1); pointee->special = ConstValSpecialUndef; - IrInstructionAllocaGen *result = ir_create_alloca_gen(ira, source_inst, align, name_hint); + IrInstructionAllocaGen *result = ir_build_alloca_gen(ira, source_inst, align, name_hint); result->base.value.special = ConstValSpecialStatic; result->base.value.data.x_ptr.special = ConstPtrSpecialRef; result->base.value.data.x_ptr.mut = force_comptime ? ConstPtrMutComptimeVar : ConstPtrMutInfer; @@ -14448,7 +14448,7 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe return nullptr; } // need to return a result location and don't have one. use a stack allocation - IrInstructionAllocaGen *alloca_gen = ir_create_alloca_gen(ira, suspend_source_instr, 0, ""); + IrInstructionAllocaGen *alloca_gen = ir_build_alloca_gen(ira, suspend_source_instr, 0, ""); if ((err = type_resolve(ira->codegen, value_type, ResolveStatusZeroBitsKnown))) return ira->codegen->invalid_instruction; alloca_gen->base.value.type = get_pointer_to_type_extra(ira->codegen, value_type, false, false, @@ -24357,7 +24357,7 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); ir_assert(fn_entry != nullptr, &instruction->base); - new_bb->resume_index = fn_entry->resume_blocks.length + coro_extra_resume_block_count; + new_bb->split_llvm_fn = reinterpret_cast(0x1); fn_entry->resume_blocks.append(new_bb); if (fn_entry->inferred_async_node == nullptr) { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index fddc912e77..7a8a4a07df 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -82,55 +82,55 @@ test "local variable in async function" { S.doTheTest(); } -test "calling an inferred async function" { - const S = struct { - var x: i32 = 1; - var other_frame: *@Frame(other) = undefined; - - fn doTheTest() void { - const p = async first(); - expect(x == 1); - resume other_frame.*; - expect(x == 2); - } - - fn first() void { - other(); - } - fn other() void { - other_frame = @frame(); - suspend; - x += 1; - } - }; - S.doTheTest(); -} - -test "@frameSize" { - const S = struct { - fn doTheTest() void { - { - var ptr = @ptrCast(async fn(i32) void, other); - const size = @frameSize(ptr); - expect(size == @sizeOf(@Frame(other))); - } - { - var ptr = @ptrCast(async fn() void, first); - const size = @frameSize(ptr); - expect(size == @sizeOf(@Frame(first))); - } - } - - fn first() void { - other(1); - } - fn other(param: i32) void { - var local: i32 = undefined; - suspend; - } - }; - S.doTheTest(); -} +//test "calling an inferred async function" { +// const S = struct { +// var x: i32 = 1; +// var other_frame: *@Frame(other) = undefined; +// +// fn doTheTest() void { +// const p = async first(); +// expect(x == 1); +// resume other_frame.*; +// expect(x == 2); +// } +// +// fn first() void { +// other(); +// } +// fn other() void { +// other_frame = @frame(); +// suspend; +// x += 1; +// } +// }; +// S.doTheTest(); +//} +// +//test "@frameSize" { +// const S = struct { +// fn doTheTest() void { +// { +// var ptr = @ptrCast(async fn(i32) void, other); +// const size = @frameSize(ptr); +// expect(size == @sizeOf(@Frame(other))); +// } +// { +// var ptr = @ptrCast(async fn() void, first); +// const size = @frameSize(ptr); +// expect(size == @sizeOf(@Frame(first))); +// } +// } +// +// fn first() void { +// other(1); +// } +// fn other(param: i32) void { +// var local: i32 = undefined; +// suspend; +// } +// }; +// S.doTheTest(); +//} //test "coroutine suspend, resume" { // seq('a'); From 1dd0c3d49f929fc280d5bb4bbeed6538b50b2535 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Aug 2019 16:41:30 -0400 Subject: [PATCH 024/125] fix calling an inferred async function --- BRANCH_TODO | 2 - src/all_types.hpp | 1 - src/analyze.cpp | 7 -- src/codegen.cpp | 182 ++++++++++++++-------------- test/stage1/behavior/coroutines.zig | 98 +++++++-------- 5 files changed, 142 insertions(+), 148 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 7c19147aa8..4e128b78a1 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,5 +1,3 @@ - * fix @frameSize - * fix calling an inferred async function * await * await of a non async function * await in single-threaded mode diff --git a/src/all_types.hpp b/src/all_types.hpp index b5b8b06259..e66c9aebff 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -2605,7 +2605,6 @@ struct IrInstructionCallGen { IrInstruction **args; IrInstruction *result_loc; IrInstruction *frame_result_loc; - IrBasicBlock *resume_block; IrInstruction *new_stack; FnInline fn_inline; diff --git a/src/analyze.cpp b/src/analyze.cpp index 5e22358423..99caf9688b 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5185,13 +5185,6 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (!fn_is_async(callee)) continue; - IrBasicBlock *new_resume_block = allocate(1); - new_resume_block->name_hint = "CallResume"; - new_resume_block->split_llvm_fn = reinterpret_cast(0x1); - fn->resume_blocks.append(new_resume_block); - call->resume_block = new_resume_block; - fn->analyzed_executable.basic_block_list.append(new_resume_block); - ZigType *callee_frame_type = get_coro_frame_type(g, callee); IrInstructionAllocaGen *alloca_gen = allocate(1); diff --git a/src/codegen.cpp b/src/codegen.cpp index d955736083..d0aadaabe1 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3327,6 +3327,92 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) { LLVMAddCallSiteAttribute(call_instr, 1, sret_attr); } +static void render_async_spills(CodeGen *g) { + ZigType *fn_type = g->cur_fn->type_entry; + ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base); + size_t async_var_index = coro_arg_start + (type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0); + for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) { + ZigVar *var = g->cur_fn->variable_list.at(var_i); + + if (!type_has_bits(var->var_type)) { + continue; + } + if (ir_get_var_is_comptime(var)) + continue; + switch (type_requires_comptime(g, var->var_type)) { + case ReqCompTimeInvalid: + zig_unreachable(); + case ReqCompTimeYes: + continue; + case ReqCompTimeNo: + break; + } + if (var->src_arg_index == SIZE_MAX) { + continue; + } + + var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, + buf_ptr(&var->name)); + async_var_index += 1; + if (var->decl_node) { + var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope), + buf_ptr(&var->name), import->data.structure.root_struct->di_file, + (unsigned)(var->decl_node->line + 1), + get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0); + gen_var_debug_decl(g, var); + } + } + for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) { + IrInstructionAllocaGen *instruction = g->cur_fn->alloca_gen_list.at(alloca_i); + ZigType *ptr_type = instruction->base.value.type; + assert(ptr_type->id == ZigTypeIdPointer); + ZigType *child_type = ptr_type->data.pointer.child_type; + if (!type_has_bits(child_type)) + continue; + if (instruction->base.ref_count == 0) + continue; + if (instruction->base.value.special != ConstValSpecialRuntime) { + if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != + ConstValSpecialRuntime) + { + continue; + } + } + instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, + instruction->name_hint); + async_var_index += 1; + } +} + +static void render_async_var_decls(CodeGen *g, Scope *scope) { + render_async_spills(g); + for (;;) { + switch (scope->id) { + case ScopeIdCImport: + zig_unreachable(); + case ScopeIdFnDef: + return; + case ScopeIdVarDecl: { + ZigVar *var = reinterpret_cast(scope)->var; + if (var->ptr_instruction != nullptr) { + render_decl_var(g, var); + } + // fallthrough + } + case ScopeIdDecls: + case ScopeIdBlock: + case ScopeIdDefer: + case ScopeIdDeferExpr: + case ScopeIdLoop: + case ScopeIdSuspend: + case ScopeIdCompTime: + case ScopeIdRuntime: + scope = scope->parent; + continue; + } + } +} + static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) { LLVMValueRef fn_val; ZigType *fn_type; @@ -3431,15 +3517,19 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, ""); return nullptr; } else if (callee_is_async) { + LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn); LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, ""); - LLVMValueRef new_fn_ptr = instruction->resume_block->split_llvm_fn; - LLVMBuildStore(g->builder, new_fn_ptr, fn_ptr_ptr); + LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr); LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, ""); ZigLLVMSetTailCall(call_inst); LLVMBuildRetVoid(g->builder); - LLVMPositionBuilderAtEnd(g->builder, instruction->resume_block->llvm_block); + g->cur_fn_val = split_llvm_fn; + g->cur_ret_ptr = LLVMGetParam(split_llvm_fn, 0); + LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "CallResume"); + LLVMPositionBuilderAtEnd(g->builder, call_bb); + render_async_var_decls(g, instruction->base.scope); return nullptr; } @@ -5193,92 +5283,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, zig_unreachable(); } -static void render_async_spills(CodeGen *g) { - ZigType *fn_type = g->cur_fn->type_entry; - ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base); - size_t async_var_index = coro_arg_start + (type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0); - for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) { - ZigVar *var = g->cur_fn->variable_list.at(var_i); - - if (!type_has_bits(var->var_type)) { - continue; - } - if (ir_get_var_is_comptime(var)) - continue; - switch (type_requires_comptime(g, var->var_type)) { - case ReqCompTimeInvalid: - zig_unreachable(); - case ReqCompTimeYes: - continue; - case ReqCompTimeNo: - break; - } - if (var->src_arg_index == SIZE_MAX) { - continue; - } - - var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, - buf_ptr(&var->name)); - async_var_index += 1; - if (var->decl_node) { - var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope), - buf_ptr(&var->name), import->data.structure.root_struct->di_file, - (unsigned)(var->decl_node->line + 1), - get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0); - gen_var_debug_decl(g, var); - } - } - for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) { - IrInstructionAllocaGen *instruction = g->cur_fn->alloca_gen_list.at(alloca_i); - ZigType *ptr_type = instruction->base.value.type; - assert(ptr_type->id == ZigTypeIdPointer); - ZigType *child_type = ptr_type->data.pointer.child_type; - if (!type_has_bits(child_type)) - continue; - if (instruction->base.ref_count == 0) - continue; - if (instruction->base.value.special != ConstValSpecialRuntime) { - if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != - ConstValSpecialRuntime) - { - continue; - } - } - instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, - instruction->name_hint); - async_var_index += 1; - } -} - -static void render_async_var_decls(CodeGen *g, Scope *scope) { - render_async_spills(g); - for (;;) { - switch (scope->id) { - case ScopeIdCImport: - zig_unreachable(); - case ScopeIdFnDef: - return; - case ScopeIdVarDecl: { - ZigVar *var = reinterpret_cast(scope)->var; - if (var->ptr_instruction != nullptr) { - render_decl_var(g, var); - } - // fallthrough - } - case ScopeIdDecls: - case ScopeIdBlock: - case ScopeIdDefer: - case ScopeIdDeferExpr: - case ScopeIdLoop: - case ScopeIdSuspend: - case ScopeIdCompTime: - case ScopeIdRuntime: - scope = scope->parent; - continue; - } - } -} - static void ir_render(CodeGen *g, ZigFn *fn_entry) { assert(fn_entry); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 7a8a4a07df..fddc912e77 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -82,55 +82,55 @@ test "local variable in async function" { S.doTheTest(); } -//test "calling an inferred async function" { -// const S = struct { -// var x: i32 = 1; -// var other_frame: *@Frame(other) = undefined; -// -// fn doTheTest() void { -// const p = async first(); -// expect(x == 1); -// resume other_frame.*; -// expect(x == 2); -// } -// -// fn first() void { -// other(); -// } -// fn other() void { -// other_frame = @frame(); -// suspend; -// x += 1; -// } -// }; -// S.doTheTest(); -//} -// -//test "@frameSize" { -// const S = struct { -// fn doTheTest() void { -// { -// var ptr = @ptrCast(async fn(i32) void, other); -// const size = @frameSize(ptr); -// expect(size == @sizeOf(@Frame(other))); -// } -// { -// var ptr = @ptrCast(async fn() void, first); -// const size = @frameSize(ptr); -// expect(size == @sizeOf(@Frame(first))); -// } -// } -// -// fn first() void { -// other(1); -// } -// fn other(param: i32) void { -// var local: i32 = undefined; -// suspend; -// } -// }; -// S.doTheTest(); -//} +test "calling an inferred async function" { + const S = struct { + var x: i32 = 1; + var other_frame: *@Frame(other) = undefined; + + fn doTheTest() void { + const p = async first(); + expect(x == 1); + resume other_frame.*; + expect(x == 2); + } + + fn first() void { + other(); + } + fn other() void { + other_frame = @frame(); + suspend; + x += 1; + } + }; + S.doTheTest(); +} + +test "@frameSize" { + const S = struct { + fn doTheTest() void { + { + var ptr = @ptrCast(async fn(i32) void, other); + const size = @frameSize(ptr); + expect(size == @sizeOf(@Frame(other))); + } + { + var ptr = @ptrCast(async fn() void, first); + const size = @frameSize(ptr); + expect(size == @sizeOf(@Frame(first))); + } + } + + fn first() void { + other(1); + } + fn other(param: i32) void { + var local: i32 = undefined; + suspend; + } + }; + S.doTheTest(); +} //test "coroutine suspend, resume" { // seq('a'); From 0f879d02a4c4b1de0e28c2863c1e5f3760eb5b19 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Aug 2019 19:14:48 -0400 Subject: [PATCH 025/125] more passing coroutine tests --- BRANCH_TODO | 3 + src/ir.cpp | 1 - test/stage1/behavior/coroutines.zig | 107 ++++++++++++++-------------- 3 files changed, 56 insertions(+), 55 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 4e128b78a1..f5db81a080 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -6,7 +6,10 @@ * cancel * defer and errdefer * safety for resuming when it is awaiting + * safety for double await * implicit cast of normal function to async function should be allowed when it is inferred to be async * go over the commented out tests * revive std.event.Loop * @typeInfo for @Frame(func) + * peer type resolution of *@Frame(func) and anyframe + * peer type resolution of *@Frame(func) and anyframe->T when the return type matches diff --git a/src/ir.cpp b/src/ir.cpp index 3d376270f6..3a6853b034 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -24380,7 +24380,6 @@ static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstr IrInstruction *frame; if (frame_ptr->value.type->id == ZigTypeIdPointer && frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle && - frame_ptr->value.type->data.pointer.is_const && frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdAnyFrame) { frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index fddc912e77..01237ed1c9 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -132,56 +132,55 @@ test "@frameSize" { S.doTheTest(); } -//test "coroutine suspend, resume" { -// seq('a'); -// const p = try async testAsyncSeq(); -// seq('c'); -// resume p; -// seq('f'); -// cancel p; -// seq('g'); -// -// expect(std.mem.eql(u8, points, "abcdefg")); -//} -//async fn testAsyncSeq() void { -// defer seq('e'); -// -// seq('b'); -// suspend; -// seq('d'); -//} -//var points = [_]u8{0} ** "abcdefg".len; -//var index: usize = 0; -// -//fn seq(c: u8) void { -// points[index] = c; -// index += 1; -//} -// -//test "coroutine suspend with block" { -// const p = try async testSuspendBlock(); -// std.testing.expect(!result); -// resume a_promise; -// std.testing.expect(result); -// cancel p; -//} -// -//var a_promise: promise = undefined; -//var result = false; -//async fn testSuspendBlock() void { -// suspend { -// comptime expect(@typeOf(@handle()) == promise->void); -// a_promise = @handle(); -// } -// -// //Test to make sure that @handle() works as advertised (issue #1296) -// //var our_handle: promise = @handle(); -// expect(a_promise == @handle()); -// -// result = true; -//} -// -//var await_a_promise: promise = undefined; +test "coroutine suspend, resume" { + seq('a'); + const p = async testAsyncSeq(); + seq('c'); + resume p; + seq('f'); + // `cancel` is now a suspend point so it cannot be done here + seq('g'); + + expect(std.mem.eql(u8, points, "abcdefg")); +} +async fn testAsyncSeq() void { + defer seq('e'); + + seq('b'); + suspend; + seq('d'); +} +var points = [_]u8{0} ** "abcdefg".len; +var index: usize = 0; + +fn seq(c: u8) void { + points[index] = c; + index += 1; +} + +test "coroutine suspend with block" { + const p = async testSuspendBlock(); + expect(!result); + resume a_promise; + expect(result); +} + +var a_promise: anyframe = undefined; +var result = false; +async fn testSuspendBlock() void { + suspend { + comptime expect(@typeOf(@frame()) == *@Frame(testSuspendBlock)); + a_promise = @frame(); + } + + // Test to make sure that @frame() works as advertised (issue #1296) + // var our_handle: anyframe = @frame(); + expect(a_promise == anyframe(@frame())); + + result = true; +} + +//var await_a_promise: anyframe = undefined; //var await_final_result: i32 = 0; // //test "coroutine await" { @@ -204,7 +203,7 @@ test "@frameSize" { // await_seq('c'); // suspend { // await_seq('d'); -// await_a_promise = @handle(); +// await_a_promise = @frame(); // } // await_seq('g'); // return 1234; @@ -314,14 +313,14 @@ test "@frameSize" { // cancel p2; //} // -//fn nonFailing() (promise->anyerror!void) { +//fn nonFailing() (anyframe->anyerror!void) { // return async suspendThenFail() catch unreachable; //} //async fn suspendThenFail() anyerror!void { // suspend; // return error.Fail; //} -//async fn printTrace(p: promise->(anyerror!void)) void { +//async fn printTrace(p: anyframe->(anyerror!void)) void { // (await p) catch |e| { // std.testing.expect(e == error.Fail); // if (@errorReturnTrace()) |trace| { @@ -343,7 +342,7 @@ test "@frameSize" { //} //async fn testBreakFromSuspend(my_result: *i32) void { // suspend { -// resume @handle(); +// resume @frame(); // } // my_result.* += 1; // suspend; From 056c4e2c988c0a2ff6f1be8fe18a0a056d848271 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Aug 2019 01:05:34 -0400 Subject: [PATCH 026/125] implement async await and return --- BRANCH_TODO | 6 +- src/all_types.hpp | 10 +- src/analyze.cpp | 25 ++- src/ast_render.cpp | 4 +- src/codegen.cpp | 162 +++++++++++++- src/ir.cpp | 97 ++++++-- src/ir_print.cpp | 9 + .../behavior/coroutine_await_struct.zig | 8 +- test/stage1/behavior/coroutines.zig | 209 ++++++++---------- 9 files changed, 384 insertions(+), 146 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index f5db81a080..6d6ae42529 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,4 +1,5 @@ - * await + * compile error for error: expected anyframe->T, found 'anyframe' + * compile error for error: expected anyframe->T, found 'i32' * await of a non async function * await in single-threaded mode * async call on a non async function @@ -13,3 +14,6 @@ * @typeInfo for @Frame(func) * peer type resolution of *@Frame(func) and anyframe * peer type resolution of *@Frame(func) and anyframe->T when the return type matches + * returning a value from within a suspend block + * struct types as the return type of an async function. make sure it works with return result locations. + * make resuming inside a suspend block, with nothing after it, a must-tail call. diff --git a/src/all_types.hpp b/src/all_types.hpp index e66c9aebff..9ab90b2785 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1550,6 +1550,8 @@ enum PanicMsgId { PanicMsgIdFloatToInt, PanicMsgIdPtrCastNull, PanicMsgIdBadResume, + PanicMsgIdBadAwait, + PanicMsgIdBadReturn, PanicMsgIdCount, }; @@ -1795,7 +1797,6 @@ struct CodeGen { ZigType *entry_arg_tuple; ZigType *entry_enum_literal; ZigType *entry_any_frame; - ZigType *entry_async_fn; } builtin_types; ZigType *align_amt_type; @@ -2348,6 +2349,7 @@ enum IrInstructionId { IrInstructionIdUnionInitNamedField, IrInstructionIdSuspendBegin, IrInstructionIdSuspendBr, + IrInstructionIdAwait, IrInstructionIdCoroResume, }; @@ -3600,6 +3602,12 @@ struct IrInstructionSuspendBr { IrBasicBlock *resume_block; }; +struct IrInstructionAwait { + IrInstruction base; + + IrInstruction *frame; +}; + struct IrInstructionCoroResume { IrInstruction base; diff --git a/src/analyze.cpp b/src/analyze.cpp index 99caf9688b..5af9698dd1 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3807,6 +3807,9 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { } else if (fn->inferred_async_node->type == NodeTypeSuspend) { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("suspends here")); + } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) { + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("await is a suspend point")); } else { zig_unreachable(); } @@ -7361,7 +7364,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) { param_di_types.append(get_llvm_di_type(g, gen_type)); } if (is_async) { - fn_type->data.fn.gen_param_info = allocate(1); + fn_type->data.fn.gen_param_info = allocate(2); ZigType *frame_type = get_any_frame_type(g, fn_type_id->return_type); gen_param_types.append(get_llvm_type(g, frame_type)); @@ -7370,6 +7373,13 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) { fn_type->data.fn.gen_param_info[0].src_index = 0; fn_type->data.fn.gen_param_info[0].gen_index = 0; fn_type->data.fn.gen_param_info[0].type = frame_type; + + gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize)); + param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize)); + + fn_type->data.fn.gen_param_info[1].src_index = 1; + fn_type->data.fn.gen_param_info[1].gen_index = 1; + fn_type->data.fn.gen_param_info[1].type = g->builtin_types.entry_usize; } else { fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count); for (size_t i = 0; i < fn_type_id->param_count; i += 1) { @@ -7434,15 +7444,21 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) { ZigType *gen_return_type = g->builtin_types.entry_void; ZigList param_di_types = {}; + ZigList gen_param_types = {}; // first "parameter" is return value param_di_types.append(get_llvm_di_type(g, gen_return_type)); ZigType *frame_type = get_coro_frame_type(g, fn); ZigType *ptr_type = get_pointer_to_type(g, frame_type, false); - LLVMTypeRef gen_param_type = get_llvm_type(g, ptr_type); + gen_param_types.append(get_llvm_type(g, ptr_type)); param_di_types.append(get_llvm_di_type(g, ptr_type)); - fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type), &gen_param_type, 1, false); + // this parameter is used to pass the result pointer when await completes + gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize)); + param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize)); + + fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type), + gen_param_types.items, gen_param_types.length, false); fn->raw_di_type = ZigLLVMCreateSubroutineType(g->dbuilder, param_di_types.items, (int)param_di_types.length, 0); } @@ -7493,7 +7509,8 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re 8*g->pointer_size_bytes, 8*g->builtin_types.entry_usize->abi_align, buf_ptr(&any_frame_type->name)); LLVMTypeRef llvm_void = LLVMVoidType(); - LLVMTypeRef fn_type = LLVMFunctionType(llvm_void, &any_frame_type->llvm_type, 1, false); + LLVMTypeRef arg_types[] = {any_frame_type->llvm_type, g->builtin_types.entry_usize->llvm_type}; + LLVMTypeRef fn_type = LLVMFunctionType(llvm_void, arg_types, 2, false); LLVMTypeRef usize_type_ref = get_llvm_type(g, g->builtin_types.entry_usize); ZigLLVMDIType *usize_di_type = get_llvm_di_type(g, g->builtin_types.entry_usize); ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit); diff --git a/src/ast_render.cpp b/src/ast_render.cpp index 4d6bae311b..98e11e24c9 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -1149,9 +1149,11 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { } case NodeTypeSuspend: { - fprintf(ar->f, "suspend"); if (node->data.suspend.block != nullptr) { + fprintf(ar->f, "suspend "); render_node_grouped(ar, node->data.suspend.block); + } else { + fprintf(ar->f, "suspend\n"); } break; } diff --git a/src/codegen.cpp b/src/codegen.cpp index d0aadaabe1..6fe46acbbf 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -873,6 +873,10 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("cast causes pointer to be null"); case PanicMsgIdBadResume: return buf_create_from_str("invalid resume of async function"); + case PanicMsgIdBadAwait: + return buf_create_from_str("async function awaited twice"); + case PanicMsgIdBadReturn: + return buf_create_from_str("async function returned twice"); } zig_unreachable(); } @@ -1991,14 +1995,66 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) { if (fn_is_async(g->cur_fn)) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef locals_ptr = g->cur_ret_ptr; + bool ret_type_has_bits = return_instruction->value != nullptr && + type_has_bits(return_instruction->value->value.type); + ZigType *ret_type = ret_type_has_bits ? return_instruction->value->value.type : nullptr; + if (ir_want_runtime_safety(g, &return_instruction->base)) { - LLVMValueRef locals_ptr = g->cur_ret_ptr; LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, ""); LLVMValueRef new_resume_fn = g->cur_fn->resume_blocks.last()->split_llvm_fn; LLVMBuildStore(g->builder, new_resume_fn, resume_index_ptr); } + LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_awaiter_index, ""); + LLVMValueRef result_ptr_as_usize; + if (ret_type_has_bits) { + LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_arg_start, ""); + LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, ""); + if (!handle_is_ptr(ret_type)) { + // It's a scalar, so it didn't get written to the result ptr. Do that now. + LLVMBuildStore(g->builder, ir_llvm_value(g, return_instruction->value), result_ptr); + } + result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, ""); + } else { + result_ptr_as_usize = LLVMGetUndef(usize_type_ref); + } + LLVMValueRef zero = LLVMConstNull(usize_type_ref); + LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); + LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, + all_ones, LLVMAtomicOrderingSequentiallyConsistent, g->is_single_threaded); + + LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn"); + LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); + LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem"); + + LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2); + + LLVMAddCase(switch_instr, zero, early_return_block); + LLVMAddCase(switch_instr, all_ones, bad_return_block); + + // Something has gone horribly wrong, and this is an invalid second return. + LLVMPositionBuilderAtEnd(g->builder, bad_return_block); + gen_assertion(g, PanicMsgIdBadReturn, &return_instruction->base); + + // The caller will deal with fetching the result - we're done. + LLVMPositionBuilderAtEnd(g->builder, early_return_block); LLVMBuildRetVoid(g->builder); + + // We need to resume the caller by tail calling them. + LLVMPositionBuilderAtEnd(g->builder, resume_them_block); + ZigType *any_frame_type = get_any_frame_type(g, ret_type); + LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val, + get_llvm_type(g, any_frame_type), ""); + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, their_frame_ptr, coro_fn_ptr_index, ""); + LLVMValueRef awaiter_fn = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); + LLVMValueRef args[] = {their_frame_ptr, result_ptr_as_usize}; + LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, awaiter_fn, args, 2, LLVMFastCallConv, + ZigLLVM_FnInlineAuto, ""); + ZigLLVMSetTailCall(call_inst); + LLVMBuildRetVoid(g->builder); + return nullptr; } if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) { @@ -3514,14 +3570,17 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } } if (instruction->is_async) { - ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, ""); + LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)}; + ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, ""); return nullptr; } else if (callee_is_async) { + ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true); LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn); LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, ""); LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr); - LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, ""); + LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)}; + LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, ""); ZigLLVMSetTailCall(call_inst); LLVMBuildRetVoid(g->builder); @@ -3530,7 +3589,15 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "CallResume"); LLVMPositionBuilderAtEnd(g->builder, call_bb); render_async_var_decls(g, instruction->base.scope); - return nullptr; + + if (type_has_bits(src_return_type)) { + LLVMValueRef spilled_result_ptr = LLVMGetParam(g->cur_fn_val, 1); + LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr, + get_llvm_type(g, ptr_result_type), ""); + return get_handle_value(g, casted_spilled_result_ptr, src_return_type, ptr_result_type); + } else { + return nullptr; + } } if (instruction->new_stack == nullptr) { @@ -4829,7 +4896,7 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable, LLVMValueRef operand = ir_llvm_value(g, instruction->operand); if (get_codegen_ptr_type(operand_type) == nullptr) { - return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, false); + return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, g->is_single_threaded); } // it's a pointer but we need to treat it as an int @@ -4990,14 +5057,89 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable, return nullptr; } +static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwait *instruction) { + LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame); + ZigType *result_type = instruction->base.value.type; + ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true); + + // Prepare to be suspended + LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn); + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, ""); + LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr); + + // At this point resuming the function will do the correct thing. + // This code is as if it is running inside the suspend block. + + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + // caller's own frame pointer + LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); + LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); + LLVMValueRef result_ptr_as_usize; + if (type_has_bits(result_type)) { + LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_arg_start, ""); + LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, ""); + result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, ""); + } else { + result_ptr_as_usize = LLVMGetUndef(usize_type_ref); + } + LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val, + LLVMAtomicOrderingSequentiallyConsistent, g->is_single_threaded); + + LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait"); + LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend"); + LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); + + LLVMValueRef zero = LLVMConstNull(usize_type_ref); + LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); + LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, bad_await_block, 2); + + LLVMAddCase(switch_instr, zero, complete_suspend_block); + LLVMAddCase(switch_instr, all_ones, early_return_block); + + // We discovered that another awaiter was already here. + LLVMPositionBuilderAtEnd(g->builder, bad_await_block); + gen_assertion(g, PanicMsgIdBadAwait, &instruction->base); + + // Rely on the target to resume us from suspension. + LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block); + LLVMBuildRetVoid(g->builder); + + // The async function has already completed. So we use a tail call to resume ourselves. + LLVMPositionBuilderAtEnd(g->builder, early_return_block); + LLVMValueRef args[] = {g->cur_ret_ptr, result_ptr_as_usize}; + LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, split_llvm_fn, args, 2, LLVMFastCallConv, + ZigLLVM_FnInlineAuto, ""); + ZigLLVMSetTailCall(call_inst); + LLVMBuildRetVoid(g->builder); + + g->cur_fn_val = split_llvm_fn; + g->cur_ret_ptr = LLVMGetParam(split_llvm_fn, 0); + LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "AwaitResume"); + LLVMPositionBuilderAtEnd(g->builder, call_bb); + render_async_var_decls(g, instruction->base.scope); + + if (type_has_bits(result_type)) { + LLVMValueRef spilled_result_ptr = LLVMGetParam(g->cur_fn_val, 1); + LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr, + get_llvm_type(g, ptr_result_type), ""); + return get_handle_value(g, casted_spilled_result_ptr, result_type, ptr_result_type); + } else { + return nullptr; + } +} + static LLVMTypeRef anyframe_fn_type(CodeGen *g) { if (g->anyframe_fn_type != nullptr) return g->anyframe_fn_type; + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; ZigType *anyframe_type = get_any_frame_type(g, nullptr); - LLVMTypeRef param_type = get_llvm_type(g, anyframe_type); LLVMTypeRef return_type = LLVMVoidType(); - LLVMTypeRef fn_type = LLVMFunctionType(return_type, ¶m_type, 1, false); + LLVMTypeRef param_types[] = { + get_llvm_type(g, anyframe_type), + usize_type_ref, + }; + LLVMTypeRef fn_type = LLVMFunctionType(return_type, param_types, 2, false); g->anyframe_fn_type = LLVMPointerType(fn_type, 0); return g->anyframe_fn_type; @@ -5006,13 +5148,15 @@ static LLVMTypeRef anyframe_fn_type(CodeGen *g) { static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMValueRef frame = ir_llvm_value(g, instruction->frame); ZigType *frame_type = instruction->frame->value.type; assert(frame_type->id == ZigTypeIdAnyFrame); LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, ""); LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, anyframe_fn_type(g), ""); - ZigLLVMBuildCall(g->builder, fn_val, &frame, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); + LLVMValueRef args[] = {frame, LLVMGetUndef(usize_type_ref)}; + ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); return nullptr; } @@ -5279,6 +5423,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); case IrInstructionIdFrameSizeGen: return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction); + case IrInstructionIdAwait: + return ir_render_await(g, executable, (IrInstructionAwait *)instruction); } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index 3a6853b034..ecd2cd6f87 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1052,6 +1052,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBr *) { return IrInstructionIdSuspendBr; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionAwait *) { + return IrInstructionIdAwait; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) { return IrInstructionIdCoroResume; } @@ -3274,6 +3278,17 @@ static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_await(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *frame) +{ + IrInstructionAwait *instruction = ir_build_instruction(irb, scope, source_node); + instruction->frame = frame; + + ir_ref_instruction(frame, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) { @@ -7774,11 +7789,26 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeAwaitExpr); - IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, scope); + ZigFn *fn_entry = exec_fn_entry(irb->exec); + if (!fn_entry) { + add_node_error(irb->codegen, node, buf_sprintf("await outside function definition")); + return irb->codegen->invalid_instruction; + } + ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope); + if (existing_suspend_scope) { + if (!existing_suspend_scope->reported_err) { + ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot await inside suspend block")); + add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here")); + existing_suspend_scope->reported_err = true; + } + return irb->codegen->invalid_instruction; + } + + IrInstruction *target_inst = ir_gen_node_extra(irb, node->data.await_expr.expr, scope, LValPtr, nullptr); if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - zig_panic("TODO ir_gen_await_expr"); + return ir_build_await(irb, scope, node, target_inst); } static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) { @@ -7789,15 +7819,6 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition")); return irb->codegen->invalid_instruction; } - ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope); - if (scope_defer_expr) { - if (!scope_defer_expr->reported_err) { - ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression")); - add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here")); - scope_defer_expr->reported_err = true; - } - return irb->codegen->invalid_instruction; - } ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope); if (existing_suspend_scope) { if (!existing_suspend_scope->reported_err) { @@ -7808,7 +7829,7 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod return irb->codegen->invalid_instruction; } - IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "Resume"); + IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); ir_build_suspend_begin(irb, parent_scope, node, resume_block); if (node->data.suspend.block != nullptr) { @@ -24372,6 +24393,49 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru return ir_finish_anal(ira, result); } +static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwait *instruction) { + IrInstruction *frame_ptr = instruction->frame->child; + if (type_is_invalid(frame_ptr->value.type)) + return ira->codegen->invalid_instruction; + + ZigType *result_type; + IrInstruction *frame; + if (frame_ptr->value.type->id == ZigTypeIdPointer && + frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle && + frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame) + { + result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type; + frame = frame_ptr; + } else { + frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr); + if (frame->value.type->id != ZigTypeIdAnyFrame || + frame->value.type->data.any_frame.result_type == nullptr) + { + ir_add_error(ira, &instruction->base, + buf_sprintf("expected anyframe->T, found '%s'", buf_ptr(&frame->value.type->name))); + return ira->codegen->invalid_instruction; + } + result_type = frame->value.type->data.any_frame.result_type; + } + + ZigType *any_frame_type = get_any_frame_type(ira->codegen, result_type); + IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type); + if (type_is_invalid(casted_frame->value.type)) + return ira->codegen->invalid_instruction; + + ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); + ir_assert(fn_entry != nullptr, &instruction->base); + + if (fn_entry->inferred_async_node == nullptr) { + fn_entry->inferred_async_node = instruction->base.source_node; + } + + IrInstruction *result = ir_build_await(&ira->new_irb, + instruction->base.scope, instruction->base.source_node, frame); + result->value.type = result_type; + return ir_finish_anal(ira, result); +} + static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) { IrInstruction *frame_ptr = instruction->frame->child; if (type_is_invalid(frame_ptr->value.type)) @@ -24380,11 +24444,11 @@ static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstr IrInstruction *frame; if (frame_ptr->value.type->id == ZigTypeIdPointer && frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle && - frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdAnyFrame) + frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame) { - frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr); - } else { frame = frame_ptr; + } else { + frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr); } ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr); @@ -24691,6 +24755,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_suspend_br(ira, (IrInstructionSuspendBr *)instruction); case IrInstructionIdCoroResume: return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); + case IrInstructionIdAwait: + return ir_analyze_instruction_await(ira, (IrInstructionAwait *)instruction); } zig_unreachable(); } @@ -24826,6 +24892,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdSuspendBegin: case IrInstructionIdSuspendBr: case IrInstructionIdCoroResume: + case IrInstructionIdAwait: return true; case IrInstructionIdPhi: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 284ebed2f3..46d2906d30 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1546,6 +1546,12 @@ static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruct fprintf(irp->f, ")"); } +static void ir_print_await(IrPrint *irp, IrInstructionAwait *instruction) { + fprintf(irp->f, "@await("); + ir_print_other_instruction(irp, instruction->frame); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -2025,6 +2031,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCoroResume: ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction); break; + case IrInstructionIdAwait: + ir_print_await(irp, (IrInstructionAwait *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/test/stage1/behavior/coroutine_await_struct.zig b/test/stage1/behavior/coroutine_await_struct.zig index 66ff8bb492..a649b0a39b 100644 --- a/test/stage1/behavior/coroutine_await_struct.zig +++ b/test/stage1/behavior/coroutine_await_struct.zig @@ -6,12 +6,12 @@ const Foo = struct { x: i32, }; -var await_a_promise: promise = undefined; +var await_a_promise: anyframe = undefined; var await_final_result = Foo{ .x = 0 }; test "coroutine await struct" { await_seq('a'); - const p = async await_amain() catch unreachable; + const p = async await_amain(); await_seq('f'); resume await_a_promise; await_seq('i'); @@ -20,7 +20,7 @@ test "coroutine await struct" { } async fn await_amain() void { await_seq('b'); - const p = async await_another() catch unreachable; + const p = async await_another(); await_seq('e'); await_final_result = await p; await_seq('h'); @@ -29,7 +29,7 @@ async fn await_another() Foo { await_seq('c'); suspend { await_seq('d'); - await_a_promise = @handle(); + await_a_promise = @frame(); } await_seq('g'); return Foo{ .x = 1234 }; diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 01237ed1c9..28dd834bfe 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -180,97 +180,85 @@ async fn testSuspendBlock() void { result = true; } -//var await_a_promise: anyframe = undefined; -//var await_final_result: i32 = 0; -// -//test "coroutine await" { -// await_seq('a'); -// const p = async await_amain() catch unreachable; -// await_seq('f'); -// resume await_a_promise; -// await_seq('i'); -// expect(await_final_result == 1234); -// expect(std.mem.eql(u8, await_points, "abcdefghi")); -//} -//async fn await_amain() void { -// await_seq('b'); -// const p = async await_another() catch unreachable; -// await_seq('e'); -// await_final_result = await p; -// await_seq('h'); -//} -//async fn await_another() i32 { -// await_seq('c'); -// suspend { -// await_seq('d'); -// await_a_promise = @frame(); -// } -// await_seq('g'); -// return 1234; -//} -// -//var await_points = [_]u8{0} ** "abcdefghi".len; -//var await_seq_index: usize = 0; -// -//fn await_seq(c: u8) void { -// await_points[await_seq_index] = c; -// await_seq_index += 1; -//} -// -//var early_final_result: i32 = 0; -// -//test "coroutine await early return" { -// early_seq('a'); -// const p = async early_amain() catch @panic("out of memory"); -// early_seq('f'); -// expect(early_final_result == 1234); -// expect(std.mem.eql(u8, early_points, "abcdef")); -//} -//async fn early_amain() void { -// early_seq('b'); -// const p = async early_another() catch @panic("out of memory"); -// early_seq('d'); -// early_final_result = await p; -// early_seq('e'); -//} -//async fn early_another() i32 { -// early_seq('c'); -// return 1234; -//} -// -//var early_points = [_]u8{0} ** "abcdef".len; -//var early_seq_index: usize = 0; -// -//fn early_seq(c: u8) void { -// early_points[early_seq_index] = c; -// early_seq_index += 1; -//} -// -//test "coro allocation failure" { -// var failing_allocator = std.debug.FailingAllocator.init(std.debug.global_allocator, 0); -// if (async<&failing_allocator.allocator> asyncFuncThatNeverGetsRun()) { -// @panic("expected allocation failure"); -// } else |err| switch (err) { -// error.OutOfMemory => {}, -// } -//} -//async fn asyncFuncThatNeverGetsRun() void { -// @panic("coro frame allocation should fail"); -//} -// -//test "async function with dot syntax" { -// const S = struct { -// var y: i32 = 1; -// async fn foo() void { -// y += 1; -// suspend; -// } -// }; -// const p = try async S.foo(); -// cancel p; -// expect(S.y == 2); -//} -// +var await_a_promise: anyframe = undefined; +var await_final_result: i32 = 0; + +test "coroutine await" { + await_seq('a'); + const p = async await_amain(); + await_seq('f'); + resume await_a_promise; + await_seq('i'); + expect(await_final_result == 1234); + expect(std.mem.eql(u8, await_points, "abcdefghi")); +} +async fn await_amain() void { + await_seq('b'); + const p = async await_another(); + await_seq('e'); + await_final_result = await p; + await_seq('h'); +} +async fn await_another() i32 { + await_seq('c'); + suspend { + await_seq('d'); + await_a_promise = @frame(); + } + await_seq('g'); + return 1234; +} + +var await_points = [_]u8{0} ** "abcdefghi".len; +var await_seq_index: usize = 0; + +fn await_seq(c: u8) void { + await_points[await_seq_index] = c; + await_seq_index += 1; +} + +var early_final_result: i32 = 0; + +test "coroutine await early return" { + early_seq('a'); + const p = async early_amain(); + early_seq('f'); + expect(early_final_result == 1234); + expect(std.mem.eql(u8, early_points, "abcdef")); +} +async fn early_amain() void { + early_seq('b'); + const p = async early_another(); + early_seq('d'); + early_final_result = await p; + early_seq('e'); +} +async fn early_another() i32 { + early_seq('c'); + return 1234; +} + +var early_points = [_]u8{0} ** "abcdef".len; +var early_seq_index: usize = 0; + +fn early_seq(c: u8) void { + early_points[early_seq_index] = c; + early_seq_index += 1; +} + +test "async function with dot syntax" { + const S = struct { + var y: i32 = 1; + async fn foo() void { + y += 1; + suspend; + } + }; + const p = async S.foo(); + // can't cancel in tests because they are non-async functions + expect(S.y == 2); +} + //test "async fn pointer in a struct field" { // var data: i32 = 1; // const Foo = struct { @@ -287,18 +275,17 @@ async fn testSuspendBlock() void { // y.* += 1; // suspend; //} -// + //test "async fn with inferred error set" { -// const p = (async failing()) catch unreachable; +// const p = async failing(); // resume p; -// cancel p; //} // //async fn failing() !void { // suspend; // return error.Fail; //} -// + //test "error return trace across suspend points - early return" { // const p = nonFailing(); // resume p; @@ -331,20 +318,18 @@ async fn testSuspendBlock() void { // } // }; //} -// -//test "break from suspend" { -// var buf: [500]u8 = undefined; -// var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; -// var my_result: i32 = 1; -// const p = try async testBreakFromSuspend(&my_result); -// cancel p; -// std.testing.expect(my_result == 2); -//} -//async fn testBreakFromSuspend(my_result: *i32) void { -// suspend { -// resume @frame(); -// } -// my_result.* += 1; -// suspend; -// my_result.* += 1; -//} + +test "break from suspend" { + var my_result: i32 = 1; + const p = async testBreakFromSuspend(&my_result); + // can't cancel here + std.testing.expect(my_result == 2); +} +async fn testBreakFromSuspend(my_result: *i32) void { + suspend { + resume @frame(); + } + my_result.* += 1; + suspend; + my_result.* += 1; +} From 5bd330e76c4b9bfc341dcba1732a8ac9277e133d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Aug 2019 16:32:24 -0400 Subject: [PATCH 027/125] add heap allocated async function frame test --- test/stage1/behavior/coroutines.zig | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 28dd834bfe..b22f50c228 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -333,3 +333,27 @@ async fn testBreakFromSuspend(my_result: *i32) void { suspend; my_result.* += 1; } + +test "heap allocated async function frame" { + const S = struct { + var x: i32 = 42; + + fn doTheTest() !void { + const frame = try std.heap.direct_allocator.create(@Frame(someFunc)); + defer std.heap.direct_allocator.destroy(frame); + + expect(x == 42); + frame.* = async someFunc(); + expect(x == 43); + resume frame; + expect(x == 44); + } + + fn someFunc() void { + x += 1; + suspend; + x += 1; + } + }; + try S.doTheTest(); +} From 0920bb087279bdfa317fc88cef877d40ece18239 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Aug 2019 19:27:27 -0400 Subject: [PATCH 028/125] implement async functions returning structs --- BRANCH_TODO | 4 ++- src/codegen.cpp | 27 ++++++++++++++----- test/stage1/behavior/coroutines.zig | 42 +++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 8 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 6d6ae42529..92390f099f 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,3 +1,4 @@ + * struct types as the return type of an async function. make sure it works with return result locations. * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function @@ -15,5 +16,6 @@ * peer type resolution of *@Frame(func) and anyframe * peer type resolution of *@Frame(func) and anyframe->T when the return type matches * returning a value from within a suspend block - * struct types as the return type of an async function. make sure it works with return result locations. * make resuming inside a suspend block, with nothing after it, a must-tail call. + * make sure there are safety tests for all the new safety features (search the new PanicFnId enum values) + * error return tracing diff --git a/src/codegen.cpp b/src/codegen.cpp index f12880773b..b050e02e0a 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -503,7 +503,7 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) { // nothing to do } else if (type_is_nonnull_ptr(return_type)) { addLLVMAttr(llvm_fn, 0, "nonnull"); - } else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) { + } else if (!is_async && want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) { // Sret pointers must not be address 0 addLLVMArgAttr(llvm_fn, 0, "nonnull"); addLLVMArgAttr(llvm_fn, 0, "sret"); @@ -3241,8 +3241,13 @@ static LLVMValueRef ir_render_var_ptr(CodeGen *g, IrExecutable *executable, IrIn static LLVMValueRef ir_render_return_ptr(CodeGen *g, IrExecutable *executable, IrInstructionReturnPtr *instruction) { - src_assert(g->cur_ret_ptr != nullptr || !type_has_bits(instruction->base.value.type), - instruction->base.source_node); + if (!type_has_bits(instruction->base.value.type)) + return nullptr; + src_assert(g->cur_ret_ptr != nullptr, instruction->base.source_node); + if (fn_is_async(g->cur_fn)) { + LLVMValueRef ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_arg_start, ""); + return LLVMBuildLoad(g->builder, ptr_ptr, ""); + } return g->cur_ret_ptr; } @@ -3506,6 +3511,12 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (ret_has_bits) { ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, ""); } + + // Use the result location which is inside the frame if this is an async call. + if (ret_has_bits) { + LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); + LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); + } } else if (callee_is_async) { frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, @@ -3513,6 +3524,12 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (ret_has_bits) { ret_ptr = result_loc; } + + // Use the call instruction's result location. + if (ret_has_bits) { + LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); + LLVMBuildStore(g->builder, result_loc, ret_ptr_ptr); + } } if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); @@ -3525,10 +3542,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_awaiter_index, ""); LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr); - if (ret_has_bits) { - LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); - LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); - } } if (!instruction->is_async && !callee_is_async) { if (first_arg_ret) { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index b22f50c228..a1c1b7ad61 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -1,6 +1,7 @@ const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; +const expectEqual = std.testing.expectEqual; var global_x: i32 = 1; @@ -357,3 +358,44 @@ test "heap allocated async function frame" { }; try S.doTheTest(); } + +test "async function call return value" { + const S = struct { + var frame: anyframe = undefined; + var pt = Point{.x = 10, .y = 11 }; + + fn doTheTest() void { + expectEqual(pt.x, 10); + expectEqual(pt.y, 11); + _ = async first(); + expectEqual(pt.x, 10); + expectEqual(pt.y, 11); + resume frame; + expectEqual(pt.x, 1); + expectEqual(pt.y, 2); + } + + fn first() void { + pt = second(1, 2); + } + + fn second(x: i32, y: i32) Point { + return other(x, y); + } + + fn other(x: i32, y: i32) Point { + frame = @frame(); + suspend; + return Point{ + .x = x, + .y = y, + }; + } + + const Point = struct { + x: i32, + y: i32, + }; + }; + S.doTheTest(); +} From 24d78177eec4d8fc3aa8ca99dd50788e38f9f8b6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 3 Aug 2019 01:06:14 -0400 Subject: [PATCH 029/125] add compile error for async call of function pointer --- BRANCH_TODO | 2 +- src/ir.cpp | 5 ++++- test/compile_errors.zig | 12 ++++++++++++ test/stage1/behavior/coroutines.zig | 8 ++++---- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 92390f099f..f3d881f5e5 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,4 +1,3 @@ - * struct types as the return type of an async function. make sure it works with return result locations. * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function @@ -19,3 +18,4 @@ * make resuming inside a suspend block, with nothing after it, a must-tail call. * make sure there are safety tests for all the new safety features (search the new PanicFnId enum values) * error return tracing + * compile error for casting a function to a non-async function pointer, but then later it gets inferred to be an async function diff --git a/src/ir.cpp b/src/ir.cpp index f140cfeabe..b01f43b3e1 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -14819,7 +14819,10 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry, ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count) { - ir_assert(fn_entry != nullptr, &call_instruction->base); + if (fn_entry == nullptr) { + ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required")); + return ira->codegen->invalid_instruction; + } ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry); IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 272d99c930..4b1a24c675 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "runtime-known function called with async keyword", + \\export fn entry() void { + \\ var ptr = afunc; + \\ _ = async ptr(); + \\} + \\ + \\async fn afunc() void { } + , + "tmp.zig:3:15: error: function is not comptime-known; @asyncCall required", + ); + cases.add( "function with ccc indirectly calling async function", \\export fn entry() void { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index a1c1b7ad61..aa77541d19 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -263,15 +263,15 @@ test "async function with dot syntax" { //test "async fn pointer in a struct field" { // var data: i32 = 1; // const Foo = struct { -// bar: async<*std.mem.Allocator> fn (*i32) void, +// bar: async fn (*i32) void, // }; // var foo = Foo{ .bar = simpleAsyncFn2 }; -// const p = (async foo.bar(&data)) catch unreachable; +// const p = async foo.bar(&data); // expect(data == 2); -// cancel p; +// resume p; // expect(data == 4); //} -//async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void { +//async fn simpleAsyncFn2(y: *i32) void { // defer y.* += 2; // y.* += 1; // suspend; From 521aaf350185b5f01816b7a9ec604335edb3ac16 Mon Sep 17 00:00:00 2001 From: daurnimator Date: Sat, 3 Aug 2019 15:56:25 +1000 Subject: [PATCH 030/125] std: return Elf object from constructors instead of filling in pointer --- std/debug.zig | 3 +-- std/elf.zig | 10 ++++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/std/debug.zig b/std/debug.zig index 32f96d3e15..d1c17343ef 100644 --- a/std/debug.zig +++ b/std/debug.zig @@ -1024,8 +1024,7 @@ pub fn openElfDebugInfo( elf_seekable_stream: *DwarfSeekableStream, elf_in_stream: *DwarfInStream, ) !DwarfInfo { - var efile: elf.Elf = undefined; - try efile.openStream(allocator, elf_seekable_stream, elf_in_stream); + var efile = try elf.Elf.openStream(allocator, elf_seekable_stream, elf_in_stream); errdefer efile.close(); var di = DwarfInfo{ diff --git a/std/elf.zig b/std/elf.zig index c605a177a5..0b3ea20398 100644 --- a/std/elf.zig +++ b/std/elf.zig @@ -371,21 +371,21 @@ pub const Elf = struct { prealloc_file: File, /// Call close when done. - pub fn openPath(elf: *Elf, allocator: *mem.Allocator, path: []const u8) !void { + pub fn openPath(allocator: *mem.Allocator, path: []const u8) !Elf { @compileError("TODO implement"); } /// Call close when done. - pub fn openFile(elf: *Elf, allocator: *mem.Allocator, file: File) !void { + pub fn openFile(allocator: *mem.Allocator, file: File) !Elf { @compileError("TODO implement"); } pub fn openStream( - elf: *Elf, allocator: *mem.Allocator, seekable_stream: *io.SeekableStream(anyerror, anyerror), in: *io.InStream(anyerror), - ) !void { + ) !Elf { + var elf: Elf = undefined; elf.auto_close_stream = false; elf.allocator = allocator; elf.seekable_stream = seekable_stream; @@ -523,6 +523,8 @@ pub const Elf = struct { // not a string table return error.InvalidFormat; } + + return elf; } pub fn close(elf: *Elf) void { From e444e737b786afd0c5aba2ea04c901f89c57813e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 3 Aug 2019 02:11:52 -0400 Subject: [PATCH 031/125] add runtime safety for resuming an awaiting function --- BRANCH_TODO | 4 ++- src/all_types.hpp | 1 + src/codegen.cpp | 47 ++++++++++++++++++++++++++--- test/runtime_safety.zig | 32 ++++++++++++++++++++ test/stage1/behavior/coroutines.zig | 2 +- 5 files changed, 79 insertions(+), 7 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index f3d881f5e5..a9bc5f3666 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -6,7 +6,6 @@ * @asyncCall with an async function pointer * cancel * defer and errdefer - * safety for resuming when it is awaiting * safety for double await * implicit cast of normal function to async function should be allowed when it is inferred to be async * go over the commented out tests @@ -19,3 +18,6 @@ * make sure there are safety tests for all the new safety features (search the new PanicFnId enum values) * error return tracing * compile error for casting a function to a non-async function pointer, but then later it gets inferred to be an async function + * compile error for copying a frame + * compile error for resuming a const frame pointer + * runtime safety enabling/disabling scope has to be coordinated across resume/await/calls/return diff --git a/src/all_types.hpp b/src/all_types.hpp index 079b8ded95..0f8cce1376 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1552,6 +1552,7 @@ enum PanicMsgId { PanicMsgIdBadResume, PanicMsgIdBadAwait, PanicMsgIdBadReturn, + PanicMsgIdResumedAnAwaitingFn, PanicMsgIdCount, }; diff --git a/src/codegen.cpp b/src/codegen.cpp index b050e02e0a..db617e636a 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -877,6 +877,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("async function awaited twice"); case PanicMsgIdBadReturn: return buf_create_from_str("async function returned twice"); + case PanicMsgIdResumedAnAwaitingFn: + return buf_create_from_str("awaiting function resumed"); } zig_unreachable(); } @@ -2018,7 +2020,10 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns } result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, ""); } else { - result_ptr_as_usize = LLVMGetUndef(usize_type_ref); + // For debug safety, this value has to be anything other than all 1's, which signals + // that it is being resumed. 0 is a bad choice since null pointers are special. + result_ptr_as_usize = ir_want_runtime_safety(g, &return_instruction->base) ? + LLVMConstInt(usize_type_ref, 1, false) : LLVMGetUndef(usize_type_ref); } LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); @@ -3582,8 +3587,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr); } } + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (instruction->is_async) { - LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)}; + LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)}; ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, ""); return nullptr; } else if (callee_is_async) { @@ -3591,8 +3597,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn); LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, ""); LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr); - - LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)}; + LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)}; LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, ""); ZigLLVMSetTailCall(call_inst); LLVMBuildRetVoid(g->builder); @@ -3601,6 +3606,21 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr g->cur_ret_ptr = LLVMGetParam(split_llvm_fn, 0); LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "CallResume"); LLVMPositionBuilderAtEnd(g->builder, call_bb); + + if (ir_want_runtime_safety(g, &instruction->base)) { + LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "BadResume"); + LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "OkResume"); + LLVMValueRef arg_val = LLVMGetParam(split_llvm_fn, 1); + LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); + LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, arg_val, all_ones, ""); + LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block); + + LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); + gen_safety_crash(g, PanicMsgIdResumedAnAwaitingFn); + + LLVMPositionBuilderAtEnd(g->builder, ok_resume_block); + } + render_async_var_decls(g, instruction->base.scope); if (type_has_bits(src_return_type)) { @@ -5139,6 +5159,21 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst g->cur_ret_ptr = LLVMGetParam(split_llvm_fn, 0); LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "AwaitResume"); LLVMPositionBuilderAtEnd(g->builder, call_bb); + + if (ir_want_runtime_safety(g, &instruction->base)) { + LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "BadResume"); + LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "OkResume"); + LLVMValueRef arg_val = LLVMGetParam(split_llvm_fn, 1); + LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); + LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, arg_val, all_ones, ""); + LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block); + + LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); + gen_safety_crash(g, PanicMsgIdResumedAnAwaitingFn); + + LLVMPositionBuilderAtEnd(g->builder, ok_resume_block); + } + render_async_var_decls(g, instruction->base.scope); if (type_has_bits(result_type)) { @@ -5178,7 +5213,9 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, ""); LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, anyframe_fn_type(g), ""); - LLVMValueRef args[] = {frame, LLVMGetUndef(usize_type_ref)}; + LLVMValueRef arg_val = ir_want_runtime_safety(g, &instruction->base) ? + LLVMConstAllOnes(usize_type_ref) : LLVMGetUndef(usize_type_ref); + LLVMValueRef args[] = {frame, arg_val}; ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); return nullptr; } diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig index 336dbb8bf0..43cf0856c3 100644 --- a/test/runtime_safety.zig +++ b/test/runtime_safety.zig @@ -1,6 +1,38 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompareOutputContext) void { + cases.addRuntimeSafety("resuming a function which is awaiting a frame", + \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { + \\ @import("std").os.exit(126); + \\} + \\pub fn main() void { + \\ var frame = async first(); + \\ resume frame; + \\} + \\fn first() void { + \\ var frame = async other(); + \\ await frame; + \\} + \\fn other() void { + \\ suspend; + \\} + ); + cases.addRuntimeSafety("resuming a function which is awaiting a call", + \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { + \\ @import("std").os.exit(126); + \\} + \\pub fn main() void { + \\ var frame = async first(); + \\ resume frame; + \\} + \\fn first() void { + \\ other(); + \\} + \\fn other() void { + \\ suspend; + \\} + ); + cases.addRuntimeSafety("invalid resume of async function", \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { \\ @import("std").os.exit(126); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index aa77541d19..2b82dce707 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -89,7 +89,7 @@ test "calling an inferred async function" { var other_frame: *@Frame(other) = undefined; fn doTheTest() void { - const p = async first(); + _ = async first(); expect(x == 1); resume other_frame.*; expect(x == 2); From c87920966133d3285b60ccd022282e3f53789e0c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 3 Aug 2019 02:40:38 -0400 Subject: [PATCH 032/125] add compile error for calling async function pointer --- src/analyze.cpp | 6 +++++- test/compile_errors.zig | 12 ++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index 5af9698dd1..5eb70d6717 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5177,7 +5177,11 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { for (size_t i = 0; i < fn->call_list.length; i += 1) { IrInstructionCallGen *call = fn->call_list.at(i); ZigFn *callee = call->fn_entry; - assert(callee != nullptr); + if (callee == nullptr) { + add_node_error(g, call->base.source_node, + buf_sprintf("function is not comptime-known; @asyncCall required")); + return ErrorSemanticAnalyzeFail; + } analyze_fn_body(g, callee); if (callee->anal_state == FnAnalStateInvalid) { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 4b1a24c675..3245632e37 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "runtime-known async function called", + \\export fn entry() void { + \\ var ptr = afunc; + \\ _ = ptr(); + \\} + \\ + \\async fn afunc() void {} + , + "tmp.zig:3:12: error: function is not comptime-known; @asyncCall required", + ); + cases.add( "runtime-known function called with async keyword", \\export fn entry() void { From 87710a1cc2c4d0e7ecc309e430f7d33baadc5f02 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 3 Aug 2019 16:14:24 -0400 Subject: [PATCH 033/125] implement `@asyncCall` which supports async function pointers --- BRANCH_TODO | 16 ++++- src/all_types.hpp | 3 + src/analyze.cpp | 3 + src/codegen.cpp | 105 +++++++++++++++++++++------- src/ir.cpp | 102 ++++++++++++++++++++++----- test/compile_errors.zig | 12 ++++ test/runtime_safety.zig | 15 ++++ test/stage1/behavior/coroutines.zig | 52 +++++++++----- 8 files changed, 247 insertions(+), 61 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index a9bc5f3666..0ac1062b43 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,9 +1,8 @@ + * @asyncCall with an async function pointer * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function - * await in single-threaded mode * async call on a non async function - * @asyncCall with an async function pointer * cancel * defer and errdefer * safety for double await @@ -21,3 +20,16 @@ * compile error for copying a frame * compile error for resuming a const frame pointer * runtime safety enabling/disabling scope has to be coordinated across resume/await/calls/return + * await in single-threaded mode + * calling a generic function which is async + * make sure `await @asyncCall` and `await async` are handled correctly. + * allow @asyncCall with a real @Frame(func) (the point of this is result pointer) + * documentation + - @asyncCall + - @frame + - @Frame + - @frameSize + - coroutines section + - suspend + - resume + - anyframe, anyframe->T diff --git a/src/all_types.hpp b/src/all_types.hpp index 0f8cce1376..87db8edf8d 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1503,6 +1503,7 @@ enum BuiltinFnId { BuiltinFnIdInlineCall, BuiltinFnIdNoInlineCall, BuiltinFnIdNewStackCall, + BuiltinFnIdAsyncCall, BuiltinFnIdTypeId, BuiltinFnIdShlExact, BuiltinFnIdShrExact, @@ -1553,6 +1554,7 @@ enum PanicMsgId { PanicMsgIdBadAwait, PanicMsgIdBadReturn, PanicMsgIdResumedAnAwaitingFn, + PanicMsgIdFrameTooSmall, PanicMsgIdCount, }; @@ -3699,6 +3701,7 @@ static const size_t maybe_null_index = 1; static const size_t err_union_err_index = 0; static const size_t err_union_payload_index = 1; +// label (grep this): [coro_frame_struct_layout] static const size_t coro_fn_ptr_index = 0; static const size_t coro_awaiter_index = 1; static const size_t coro_arg_start = 2; diff --git a/src/analyze.cpp b/src/analyze.cpp index 5eb70d6717..cd8f981ff3 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5205,6 +5205,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { call->frame_result_loc = &alloca_gen->base; } + // label (grep this): [coro_frame_struct_layout] ZigList field_types = {}; ZigList field_names = {}; @@ -7525,6 +7526,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re if (result_type == nullptr) { g->anyframe_fn_type = ptr_result_type; } + // label (grep this): [coro_frame_struct_layout] LLVMTypeRef field_types[] = { ptr_result_type, // fn_ptr usize_type_ref, // awaiter @@ -7558,6 +7560,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type); } else { ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false); + // label (grep this): [coro_frame_struct_layout] LLVMTypeRef field_types[] = { LLVMPointerType(fn_type, 0), // fn_ptr usize_type_ref, // awaiter diff --git a/src/codegen.cpp b/src/codegen.cpp index db617e636a..ebdd9e6120 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -879,6 +879,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("async function returned twice"); case PanicMsgIdResumedAnAwaitingFn: return buf_create_from_str("awaiting function resumed"); + case PanicMsgIdFrameTooSmall: + return buf_create_from_str("frame too small"); } zig_unreachable(); } @@ -3479,7 +3481,18 @@ static void render_async_var_decls(CodeGen *g, Scope *scope) { } } +static LLVMValueRef gen_frame_size(CodeGen *g, LLVMValueRef fn_val) { + LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type; + LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0); + LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, ""); + LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true); + LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, ""); + return LLVMBuildLoad(g->builder, prefix_ptr, ""); +} + static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef fn_val; ZigType *fn_type; bool callee_is_async; @@ -3511,34 +3524,54 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef awaiter_init_val; LLVMValueRef ret_ptr; if (instruction->is_async) { - frame_result_loc = result_loc; awaiter_init_val = zero; - if (ret_has_bits) { - ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, ""); - } - // Use the result location which is inside the frame if this is an async call. - if (ret_has_bits) { - LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); - LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); + if (instruction->new_stack == nullptr) { + frame_result_loc = result_loc; + + if (ret_has_bits) { + // Use the result location which is inside the frame if this is an async call. + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, ""); + } + } else { + LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack); + if (ir_want_runtime_safety(g, &instruction->base)) { + LLVMValueRef given_len_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_len_index, ""); + LLVMValueRef given_frame_len = LLVMBuildLoad(g->builder, given_len_ptr, ""); + LLVMValueRef actual_frame_len = gen_frame_size(g, fn_val); + + LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckFail"); + LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckOk"); + + LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntUGE, given_frame_len, actual_frame_len, ""); + LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block); + + LLVMPositionBuilderAtEnd(g->builder, fail_block); + gen_safety_crash(g, PanicMsgIdFrameTooSmall); + + LLVMPositionBuilderAtEnd(g->builder, ok_block); + } + LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, ""); + LLVMValueRef frame_ptr = LLVMBuildLoad(g->builder, frame_ptr_ptr, ""); + frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr, + get_llvm_type(g, instruction->base.value.type), ""); + + if (ret_has_bits) { + // Use the result location provided to the @asyncCall builtin + ret_ptr = result_loc; + } } } else if (callee_is_async) { frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, g->builtin_types.entry_usize->llvm_type, ""); // caller's own frame pointer if (ret_has_bits) { + // Use the call instruction's result location. ret_ptr = result_loc; } - - // Use the call instruction's result location. - if (ret_has_bits) { - LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); - LLVMBuildStore(g->builder, result_loc, ret_ptr_ptr); - } } if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); - assert(instruction->fn_entry != nullptr); if (prefix_arg_err_ret_stack) { zig_panic("TODO"); @@ -3547,6 +3580,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_awaiter_index, ""); LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr); + if (ret_has_bits) { + LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); + LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); + } } if (!instruction->is_async && !callee_is_async) { if (first_arg_ret) { @@ -3581,16 +3618,37 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (instruction->is_async || callee_is_async) { size_t ret_2_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0; + size_t arg_start_i = coro_arg_start + ret_2_or_0; + + LLVMValueRef casted_frame; + if (instruction->new_stack != nullptr) { + // We need the frame type to be a pointer to a struct that includes the args + // label (grep this): [coro_frame_struct_layout] + size_t field_count = arg_start_i + gen_param_values.length; + LLVMTypeRef *field_types = allocate_nonzero(field_count); + LLVMGetStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc)), field_types); + for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) { + field_types[arg_start_i + arg_i] = LLVMTypeOf(gen_param_values.at(arg_i)); + } + LLVMTypeRef frame_with_args_type = LLVMStructType(field_types, field_count, false); + LLVMTypeRef ptr_frame_with_args_type = LLVMPointerType(frame_with_args_type, 0); + + casted_frame = LLVMBuildBitCast(g->builder, frame_result_loc, ptr_frame_with_args_type, ""); + } else { + casted_frame = frame_result_loc; + } + for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) { - LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, - coro_arg_start + ret_2_or_0 + arg_i, ""); + LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, casted_frame, arg_start_i + arg_i, ""); LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr); } } - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (instruction->is_async) { LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)}; ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, ""); + if (instruction->new_stack != nullptr) { + return frame_result_loc; + } return nullptr; } else if (callee_is_async) { ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true); @@ -5223,13 +5281,8 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, IrInstructionFrameSizeGen *instruction) { - LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type; - LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0); LLVMValueRef fn_val = ir_llvm_value(g, instruction->fn); - LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, ""); - LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true); - LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, ""); - return LLVMBuildLoad(g->builder, prefix_ptr, ""); + return gen_frame_size(g, fn_val); } static void set_debug_location(CodeGen *g, IrInstruction *instruction) { @@ -7097,13 +7150,13 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdFloor, "floor", 2); create_builtin_fn(g, BuiltinFnIdCeil, "ceil", 2); create_builtin_fn(g, BuiltinFnIdTrunc, "trunc", 2); - //Needs library support on Windows - //create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2); + create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2); create_builtin_fn(g, BuiltinFnIdRound, "round", 2); create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4); create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX); + create_builtin_fn(g, BuiltinFnIdAsyncCall, "asyncCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1); create_builtin_fn(g, BuiltinFnIdShlExact, "shlExact", 2); create_builtin_fn(g, BuiltinFnIdShrExact, "shrExact", 2); diff --git a/src/ir.cpp b/src/ir.cpp index b01f43b3e1..fbf9da9656 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1402,6 +1402,10 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block); for (size_t i = 0; i < arg_count; i += 1) ir_ref_instruction(args[i], irb->current_basic_block); + if (is_async && new_stack != nullptr) { + // in this case the arg at the end is the return pointer + ir_ref_instruction(args[arg_count], irb->current_basic_block); + } if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block); return &call_instruction->base; @@ -5203,8 +5207,10 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } case BuiltinFnIdNewStackCall: { - if (node->data.fn_call_expr.params.length == 0) { - add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0")); + if (node->data.fn_call_expr.params.length < 2) { + add_node_error(irb->codegen, node, + buf_sprintf("expected at least 2 arguments, found %" ZIG_PRI_usize, + node->data.fn_call_expr.params.length)); return irb->codegen->invalid_instruction; } @@ -5232,6 +5238,50 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo FnInlineAuto, false, new_stack, result_loc); return ir_lval_wrap(irb, scope, call, lval, result_loc); } + case BuiltinFnIdAsyncCall: + { + size_t arg_offset = 3; + if (node->data.fn_call_expr.params.length < arg_offset) { + add_node_error(irb->codegen, node, + buf_sprintf("expected at least %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize, + arg_offset, node->data.fn_call_expr.params.length)); + return irb->codegen->invalid_instruction; + } + + AstNode *bytes_node = node->data.fn_call_expr.params.at(0); + IrInstruction *bytes = ir_gen_node(irb, bytes_node, scope); + if (bytes == irb->codegen->invalid_instruction) + return bytes; + + AstNode *ret_ptr_node = node->data.fn_call_expr.params.at(1); + IrInstruction *ret_ptr = ir_gen_node(irb, ret_ptr_node, scope); + if (ret_ptr == irb->codegen->invalid_instruction) + return ret_ptr; + + AstNode *fn_ref_node = node->data.fn_call_expr.params.at(2); + IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope); + if (fn_ref == irb->codegen->invalid_instruction) + return fn_ref; + + size_t arg_count = node->data.fn_call_expr.params.length - arg_offset; + + // last "arg" is return pointer + IrInstruction **args = allocate(arg_count + 1); + + for (size_t i = 0; i < arg_count; i += 1) { + AstNode *arg_node = node->data.fn_call_expr.params.at(i + arg_offset); + IrInstruction *arg = ir_gen_node(irb, arg_node, scope); + if (arg == irb->codegen->invalid_instruction) + return arg; + args[i] = arg; + } + + args[arg_count] = ret_ptr; + + IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, + FnInlineAuto, true, bytes, result_loc); + return ir_lval_wrap(irb, scope, call, lval, result_loc); + } case BuiltinFnIdTypeId: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -14817,11 +14867,31 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst } static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry, - ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count) + ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, + IrInstruction *casted_new_stack) { if (fn_entry == nullptr) { - ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required")); - return ira->codegen->invalid_instruction; + if (call_instruction->new_stack == nullptr) { + ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required")); + return ira->codegen->invalid_instruction; + } + // this is an @asyncCall + + if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) { + ir_add_error(ira, fn_ref, + buf_sprintf("expected async function, found '%s'", buf_ptr(&fn_type->name))); + return ira->codegen->invalid_instruction; + } + + IrInstruction *ret_ptr = call_instruction->args[call_instruction->arg_count]->child; + if (type_is_invalid(ret_ptr->value.type)) + return ira->codegen->invalid_instruction; + + ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_type->data.fn.fn_type_id.return_type); + + IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, nullptr, fn_ref, + arg_count, casted_args, FnInlineAuto, true, casted_new_stack, ret_ptr, anyframe_type); + return &call_gen->base; } ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry); @@ -15559,13 +15629,13 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c size_t impl_param_count = impl_fn_type_id->param_count; if (call_instruction->is_async) { - zig_panic("TODO async call"); + IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, + nullptr, casted_args, call_param_count, casted_new_stack); + return ir_finish_anal(ira, result); } - if (!call_instruction->is_async) { - if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { - parent_fn_entry->inferred_async_node = fn_ref->source_node; - } + if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { + parent_fn_entry->inferred_async_node = fn_ref->source_node; } IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, @@ -15645,18 +15715,16 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c return ira->codegen->invalid_instruction; } - if (!call_instruction->is_async) { - if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { - parent_fn_entry->inferred_async_node = fn_ref->source_node; - } - } - if (call_instruction->is_async) { IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, - casted_args, call_param_count); + casted_args, call_param_count, casted_new_stack); return ir_finish_anal(ira, result); } + if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { + parent_fn_entry->inferred_async_node = fn_ref->source_node; + } + IrInstruction *result_loc; if (handle_is_ptr(return_type)) { result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 3245632e37..2941cadcf5 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "non async function pointer passed to @asyncCall", + \\export fn entry() void { + \\ var ptr = afunc; + \\ var bytes: [100]u8 = undefined; + \\ _ = @asyncCall(&bytes, {}, ptr); + \\} + \\fn afunc() void { } + , + "tmp.zig:4:32: error: expected async function, found 'fn() void'", + ); + cases.add( "runtime-known async function called", \\export fn entry() void { diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig index 43cf0856c3..ac9037caae 100644 --- a/test/runtime_safety.zig +++ b/test/runtime_safety.zig @@ -1,6 +1,20 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompareOutputContext) void { + cases.addRuntimeSafety("@asyncCall with too small a frame", + \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { + \\ @import("std").os.exit(126); + \\} + \\pub fn main() void { + \\ var bytes: [1]u8 = undefined; + \\ var ptr = other; + \\ var frame = @asyncCall(&bytes, {}, ptr); + \\} + \\async fn other() void { + \\ suspend; + \\} + ); + cases.addRuntimeSafety("resuming a function which is awaiting a frame", \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { \\ @import("std").os.exit(126); @@ -17,6 +31,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ suspend; \\} ); + cases.addRuntimeSafety("resuming a function which is awaiting a call", \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { \\ @import("std").os.exit(126); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 2b82dce707..511568a898 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -260,22 +260,42 @@ test "async function with dot syntax" { expect(S.y == 2); } -//test "async fn pointer in a struct field" { -// var data: i32 = 1; -// const Foo = struct { -// bar: async fn (*i32) void, -// }; -// var foo = Foo{ .bar = simpleAsyncFn2 }; -// const p = async foo.bar(&data); -// expect(data == 2); -// resume p; -// expect(data == 4); -//} -//async fn simpleAsyncFn2(y: *i32) void { -// defer y.* += 2; -// y.* += 1; -// suspend; -//} +test "async fn pointer in a struct field" { + var data: i32 = 1; + const Foo = struct { + bar: async fn (*i32) void, + }; + var foo = Foo{ .bar = simpleAsyncFn2 }; + var bytes: [64]u8 = undefined; + const p = @asyncCall(&bytes, {}, foo.bar, &data); + comptime expect(@typeOf(p) == anyframe->void); + expect(data == 2); + resume p; + expect(data == 4); +} +async fn simpleAsyncFn2(y: *i32) void { + defer y.* += 2; + y.* += 1; + suspend; +} + +test "@asyncCall with return type" { + const Foo = struct { + bar: async fn () i32, + + async fn afunc() i32 { + suspend; + return 1234; + } + }; + var foo = Foo{ .bar = Foo.afunc }; + var bytes: [64]u8 = undefined; + var aresult: i32 = 0; + const frame = @asyncCall(&bytes, &aresult, foo.bar); + expect(aresult == 0); + resume frame; + expect(aresult == 1234); +} //test "async fn with inferred error set" { // const p = async failing(); From 12924477a50d37bf055fd5fc1cdf0fd77684a472 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 3 Aug 2019 20:33:16 -0400 Subject: [PATCH 034/125] fix regression in calling extern functions --- BRANCH_TODO | 8 ++++---- src/analyze.cpp | 3 +++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 0ac1062b43..62fee38371 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,13 +1,13 @@ - * @asyncCall with an async function pointer + * suspension points inside branching control flow + * go over the commented out tests + * error return tracing * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function * async call on a non async function * cancel * defer and errdefer - * safety for double await * implicit cast of normal function to async function should be allowed when it is inferred to be async - * go over the commented out tests * revive std.event.Loop * @typeInfo for @Frame(func) * peer type resolution of *@Frame(func) and anyframe @@ -15,7 +15,6 @@ * returning a value from within a suspend block * make resuming inside a suspend block, with nothing after it, a must-tail call. * make sure there are safety tests for all the new safety features (search the new PanicFnId enum values) - * error return tracing * compile error for casting a function to a non-async function pointer, but then later it gets inferred to be an async function * compile error for copying a frame * compile error for resuming a const frame pointer @@ -33,3 +32,4 @@ - suspend - resume - anyframe, anyframe->T + * safety for double await diff --git a/src/analyze.cpp b/src/analyze.cpp index cd8f981ff3..009cb2de12 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5182,6 +5182,9 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { buf_sprintf("function is not comptime-known; @asyncCall required")); return ErrorSemanticAnalyzeFail; } + if (callee->body_node == nullptr) { + continue; + } analyze_fn_body(g, callee); if (callee->anal_state == FnAnalStateInvalid) { From 887eac0219345763f1ae9c8d9efad6950f6bbfe6 Mon Sep 17 00:00:00 2001 From: daurnimator Date: Sun, 4 Aug 2019 16:27:36 +1000 Subject: [PATCH 035/125] std: remove elf.auto_close_stream and elf.prealloc_file --- std/elf.zig | 5 ----- 1 file changed, 5 deletions(-) diff --git a/std/elf.zig b/std/elf.zig index 0b3ea20398..37635895fd 100644 --- a/std/elf.zig +++ b/std/elf.zig @@ -356,7 +356,6 @@ pub const SectionHeader = struct { pub const Elf = struct { seekable_stream: *io.SeekableStream(anyerror, anyerror), in_stream: *io.InStream(anyerror), - auto_close_stream: bool, is_64: bool, endian: builtin.Endian, file_type: FileType, @@ -368,7 +367,6 @@ pub const Elf = struct { string_section: *SectionHeader, section_headers: []SectionHeader, allocator: *mem.Allocator, - prealloc_file: File, /// Call close when done. pub fn openPath(allocator: *mem.Allocator, path: []const u8) !Elf { @@ -386,7 +384,6 @@ pub const Elf = struct { in: *io.InStream(anyerror), ) !Elf { var elf: Elf = undefined; - elf.auto_close_stream = false; elf.allocator = allocator; elf.seekable_stream = seekable_stream; elf.in_stream = in; @@ -529,8 +526,6 @@ pub const Elf = struct { pub fn close(elf: *Elf) void { elf.allocator.free(elf.section_headers); - - if (elf.auto_close_stream) elf.prealloc_file.close(); } pub fn findSection(elf: *Elf, name: []const u8) !?*SectionHeader { From 6150da3df99b41f89ea01a72e6c1b76fe4c36f89 Mon Sep 17 00:00:00 2001 From: Sahnvour Date: Thu, 27 Jun 2019 23:21:35 +0200 Subject: [PATCH 036/125] direct port of wyhash v2 also inspired by https://github.com/ManDeJan/zig-wyhash --- std/hash.zig | 4 ++ std/hash/wyhash.zig | 99 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 std/hash/wyhash.zig diff --git a/std/hash.zig b/std/hash.zig index 148504aa39..723860da3b 100644 --- a/std/hash.zig +++ b/std/hash.zig @@ -16,6 +16,7 @@ pub const SipHash128 = siphash.SipHash128; pub const murmur = @import("hash/murmur.zig"); pub const Murmur2_32 = murmur.Murmur2_32; + pub const Murmur2_64 = murmur.Murmur2_64; pub const Murmur3_32 = murmur.Murmur3_32; @@ -23,6 +24,8 @@ pub const cityhash = @import("hash/cityhash.zig"); pub const CityHash32 = cityhash.CityHash32; pub const CityHash64 = cityhash.CityHash64; +pub const wyhash = @import("hash/wyhash.zig").hash; + test "hash" { _ = @import("hash/adler.zig"); _ = @import("hash/crc.zig"); @@ -30,4 +33,5 @@ test "hash" { _ = @import("hash/siphash.zig"); _ = @import("hash/murmur.zig"); _ = @import("hash/cityhash.zig"); + _ = @import("hash/wyhash.zig"); } diff --git a/std/hash/wyhash.zig b/std/hash/wyhash.zig new file mode 100644 index 0000000000..57efe8fd63 --- /dev/null +++ b/std/hash/wyhash.zig @@ -0,0 +1,99 @@ +const std = @import("std"); +const mem = std.mem; + +const primes = [_]u64{ + 0xa0761d6478bd642f, + 0xe7037ed1a0b428db, + 0x8ebc6af09c88c6e3, + 0x589965cc75374cc3, + 0x1d8e4e27c47d124f, +}; + +fn read_bytes(comptime bytes: u8, data: []const u8) u64 { + return mem.readVarInt(u64, data[0..bytes], @import("builtin").endian); +} + +fn read_8bytes_swapped(data: []const u8) u64 { + return (read_bytes(4, data) << 32 | read_bytes(4, data[4..])); +} + +fn mum(a: u64, b: u64) u64 { + var r: u128 = @intCast(u128, a) * @intCast(u128, b); + r = (r >> 64) ^ r; + return @truncate(u64, r); +} + +fn mix0(a: u64, b: u64, seed: u64) u64 { + return mum(a ^ seed ^ primes[0], b ^ seed ^ primes[1]); +} + +fn mix1(a: u64, b: u64, seed: u64) u64 { + return mum(a ^ seed ^ primes[2], b ^ seed ^ primes[3]); +} + +pub fn hash(key: []const u8, initial_seed: u64) u64 { + var seed = initial_seed; + + var i: usize = 0; + while (i + 32 <= key.len) : (i += 32) { + seed = mix0( + read_bytes(8, key[i..]), + read_bytes(8, key[i + 8 ..]), + seed, + ) ^ mix1( + read_bytes(8, key[i + 16 ..]), + read_bytes(8, key[i + 24 ..]), + seed, + ); + } + + const rem_len = @truncate(u5, key.len); + const rem_key = key[i..]; + seed = switch (rem_len) { + 0 => seed, + 1 => mix0(read_bytes(1, rem_key), primes[4], seed), + 2 => mix0(read_bytes(2, rem_key), primes[4], seed), + 3 => mix0((read_bytes(2, rem_key) << 8) | read_bytes(1, rem_key[2..]), primes[4], seed), + 4 => mix0(read_bytes(4, rem_key), primes[4], seed), + 5 => mix0((read_bytes(4, rem_key) << 8) | read_bytes(1, rem_key[4..]), primes[4], seed), + 6 => mix0((read_bytes(4, rem_key) << 16) | read_bytes(2, rem_key[4..]), primes[4], seed), + 7 => mix0((read_bytes(4, rem_key) << 24) | (read_bytes(2, rem_key[4..]) << 8) | read_bytes(1, rem_key[6..]), primes[4], seed), + 8 => mix0(read_8bytes_swapped(rem_key), primes[4], seed), + 9 => mix0(read_8bytes_swapped(rem_key), read_bytes(1, rem_key[8..]), seed), + 10 => mix0(read_8bytes_swapped(rem_key), read_bytes(2, rem_key[8..]), seed), + 11 => mix0(read_8bytes_swapped(rem_key), (read_bytes(2, rem_key[8..]) << 8) | read_bytes(1, rem_key[10..]), seed), + 12 => mix0(read_8bytes_swapped(rem_key), read_bytes(4, rem_key[8..]), seed), + 13 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 8) | read_bytes(1, rem_key[12..]), seed), + 14 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 16) | read_bytes(2, rem_key[12..]), seed), + 15 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 24) | (read_bytes(2, rem_key[12..]) << 8) | read_bytes(1, rem_key[14..]), seed), + 16 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed), + 17 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(1, rem_key[16..]), primes[4], seed), + 18 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(2, rem_key[16..]), primes[4], seed), + 19 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(2, rem_key[16..]) << 8) | read_bytes(1, rem_key[18..]), primes[4], seed), + 20 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(4, rem_key[16..]), primes[4], seed), + 21 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 8) | read_bytes(1, rem_key[20..]), primes[4], seed), + 22 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 16) | read_bytes(2, rem_key[20..]), primes[4], seed), + 23 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 24) | (read_bytes(2, rem_key[20..]) << 8) | read_bytes(1, rem_key[22..]), primes[4], seed), + 24 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), primes[4], seed), + 25 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(1, rem_key[24..]), seed), + 26 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(2, rem_key[24..]), seed), + 27 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(2, rem_key[24..]) << 8) | read_bytes(1, rem_key[26..]), seed), + 28 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(4, rem_key[24..]), seed), + 29 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 8) | read_bytes(1, rem_key[28..]), seed), + 30 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 16) | read_bytes(2, rem_key[28..]), seed), + 31 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 24) | (read_bytes(2, rem_key[28..]) << 8) | read_bytes(1, rem_key[30..]), seed), + }; + + return mum(seed ^ key.len, primes[4]); +} + +test "test vectors" { + const expectEqual = std.testing.expectEqual; + expectEqual(hash("", 0), 0x0); + expectEqual(hash("a", 1), 0xbed235177f41d328); + expectEqual(hash("abc", 2), 0xbe348debe59b27c3); + expectEqual(hash("message digest", 3), 0x37320f657213a290); + expectEqual(hash("abcdefghijklmnopqrstuvwxyz", 4), 0xd0b270e1d8a7019c); + expectEqual(hash("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 5), 0x602a1894d3bbfe7f); + expectEqual(hash("12345678901234567890123456789012345678901234567890123456789012345678901234567890", 6), 0x829e9c148b75970e); +} From 5bd407b27890fbed82891289e6a2bf2da93c2a41 Mon Sep 17 00:00:00 2001 From: Sahnvour Date: Sun, 30 Jun 2019 11:35:57 +0200 Subject: [PATCH 037/125] use wyhash in std's hashmap, and improve autoHash to handle more types and behave more correctly --- std/hash_map.zig | 264 +++++++++++++++++++++++++++++++---------------- 1 file changed, 173 insertions(+), 91 deletions(-) diff --git a/std/hash_map.zig b/std/hash_map.zig index c99d1d2490..6a8679ccd0 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -4,6 +4,8 @@ const assert = debug.assert; const testing = std.testing; const math = std.math; const mem = std.mem; +const meta = std.meta; +const wyhash = std.hash.wyhash; const Allocator = mem.Allocator; const builtin = @import("builtin"); @@ -448,15 +450,17 @@ test "iterator hash map" { try reset_map.putNoClobber(2, 22); try reset_map.putNoClobber(3, 33); + // TODO this test depends on the hashing algorithm, because it assumes the + // order of the elements in the hashmap. This should not be the case. var keys = [_]i32{ + 1, 3, 2, - 1, }; var values = [_]i32{ + 11, 33, 22, - 11, }; var it = reset_map.iterator(); @@ -518,8 +522,8 @@ pub fn getTrivialEqlFn(comptime K: type) (fn (K, K) bool) { pub fn getAutoHashFn(comptime K: type) (fn (K) u32) { return struct { fn hash(key: K) u32 { - comptime var rng = comptime std.rand.DefaultPrng.init(0); - return autoHash(key, &rng.random, u32); + const h = autoHash(key, 0); + return @truncate(u32, h); } }.hash; } @@ -527,114 +531,192 @@ pub fn getAutoHashFn(comptime K: type) (fn (K) u32) { pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) { return struct { fn eql(a: K, b: K) bool { - return autoEql(a, b); + return meta.eql(a, b); } }.eql; } -// TODO improve these hash functions -pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type) HashInt { - switch (@typeInfo(@typeOf(key))) { +/// Provides generic hashing for any eligible type. +/// Only hashes `key` itself, pointers are not followed. +/// The underlying hashing algorithm is wyhash. +pub fn autoHash(key: var, seed: u64) u64 { + // We use the fact that wyhash takes an input seed to "chain" hasing when the + // key has multiple parts that are not necessarily contiguous in memory. + const Key = @typeOf(key); + switch (@typeInfo(Key)) { builtin.TypeId.NoReturn, builtin.TypeId.Opaque, builtin.TypeId.Undefined, builtin.TypeId.ArgTuple, + builtin.TypeId.Void, + builtin.TypeId.Null, + builtin.TypeId.BoundFn, + builtin.TypeId.ComptimeFloat, + builtin.TypeId.ComptimeInt, + builtin.TypeId.Type, + builtin.TypeId.EnumLiteral, => @compileError("cannot hash this type"), - builtin.TypeId.Void, - builtin.TypeId.Null, - => return 0, + builtin.TypeId.Int => return wyhash(std.mem.asBytes(&key), seed), - builtin.TypeId.Int => |info| { - const unsigned_x = @bitCast(@IntType(false, info.bits), key); - if (info.bits <= HashInt.bit_count) { - return HashInt(unsigned_x) ^ comptime rng.scalar(HashInt); - } else { - return @truncate(HashInt, unsigned_x ^ comptime rng.scalar(@typeOf(unsigned_x))); + builtin.TypeId.Float => |info| return autoHash(@bitCast(@IntType(false, info.bits), key), seed), + + builtin.TypeId.Bool => return autoHash(@boolToInt(key), seed), + builtin.TypeId.Enum => return autoHash(@enumToInt(key), seed), + builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), seed), + builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), seed), + + builtin.TypeId.Pointer => |info| return switch (info.size) { + builtin.TypeInfo.Pointer.Size.One, + builtin.TypeInfo.Pointer.Size.Many, + builtin.TypeInfo.Pointer.Size.C, + => return autoHash(@ptrToInt(key), seed), + + builtin.TypeInfo.Pointer.Size.Slice => return autoHash(key.len, autoHash(key.ptr, seed)), + }, + + builtin.TypeId.Optional => return if (key) |k| autoHash(k, seed) else 0, + + builtin.TypeId.Array => { + // TODO detect via a trait when Key has no padding bits to + // hash it as an array of bytes. + // Otherwise, hash every element. + var s = seed; + for (key) |element| { + // We reuse the hash of the previous element as the seed for the + // next one so that they're dependant. + s = autoHash(element, s); } + return s; }, - builtin.TypeId.Float => |info| { - return autoHash(@bitCast(@IntType(false, info.bits), key), rng, HashInt); + builtin.TypeId.Vector => |info| { + // If there's no unused bits in the child type, we can just hash + // this as an array of bytes. + if (info.child.bit_count % 8 == 0) { + return wyhash(mem.asBytes(&key), seed); + } + + // Otherwise, hash every element. + var s = seed; + // TODO remove the copy to an array once field access is done. + const array: [info.len]info.child = key; + comptime var i: u32 = 0; + inline while (i < info.len) : (i += 1) { + s = autoHash(array[i], s); + } + return s; }, - builtin.TypeId.Bool => return autoHash(@boolToInt(key), rng, HashInt), - builtin.TypeId.Enum => return autoHash(@enumToInt(key), rng, HashInt), - builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), rng, HashInt), - builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng, HashInt), - builtin.TypeId.BoundFn, - builtin.TypeId.ComptimeFloat, - builtin.TypeId.ComptimeInt, - builtin.TypeId.Type, - builtin.TypeId.EnumLiteral, - => return 0, + builtin.TypeId.Struct => |info| { + // TODO detect via a trait when Key has no padding bits to + // hash it as an array of bytes. + // Otherwise, hash every field. + var s = seed; + inline for (info.fields) |field| { + // We reuse the hash of the previous field as the seed for the + // next one so that they're dependant. + s = autoHash(@field(key, field.name), s); + } + return s; + }, - builtin.TypeId.Pointer => |info| switch (info.size) { - builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto hash for single item pointers"), - builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto hash for many item pointers"), - builtin.TypeInfo.Pointer.Size.C => @compileError("TODO auto hash C pointers"), - builtin.TypeInfo.Pointer.Size.Slice => { - const interval = std.math.max(1, key.len / 256); - var i: usize = 0; - var h = comptime rng.scalar(HashInt); - while (i < key.len) : (i += interval) { - h ^= autoHash(key[i], rng, HashInt); + builtin.TypeId.Union => |info| { + if (info.tag_type) |tag_type| { + const tag = meta.activeTag(key); + const s = autoHash(tag, seed); + inline for (info.fields) |field| { + const enum_field = field.enum_field.?; + if (enum_field.value == @enumToInt(tag)) { + return autoHash(@field(key, enum_field.name), s); + } } - return h; - }, + unreachable; + } else @compileError("cannot hash untagged union type: " ++ @typeName(Key) ++ ", provide your own hash function"); }, - builtin.TypeId.Optional => @compileError("TODO auto hash for optionals"), - builtin.TypeId.Array => @compileError("TODO auto hash for arrays"), - builtin.TypeId.Vector => @compileError("TODO auto hash for vectors"), - builtin.TypeId.Struct => @compileError("TODO auto hash for structs"), - builtin.TypeId.Union => @compileError("TODO auto hash for unions"), - builtin.TypeId.ErrorUnion => @compileError("TODO auto hash for unions"), + builtin.TypeId.ErrorUnion => { + return autoHash(key catch |err| return autoHash(err, seed), seed); + }, } } -pub fn autoEql(a: var, b: @typeOf(a)) bool { - switch (@typeInfo(@typeOf(a))) { - builtin.TypeId.NoReturn, - builtin.TypeId.Opaque, - builtin.TypeId.Undefined, - builtin.TypeId.ArgTuple, - => @compileError("cannot test equality of this type"), - builtin.TypeId.Void, - builtin.TypeId.Null, - => return true, - builtin.TypeId.Bool, - builtin.TypeId.Int, - builtin.TypeId.Float, - builtin.TypeId.ComptimeFloat, - builtin.TypeId.ComptimeInt, - builtin.TypeId.EnumLiteral, - builtin.TypeId.Promise, - builtin.TypeId.Enum, - builtin.TypeId.BoundFn, - builtin.TypeId.Fn, - builtin.TypeId.ErrorSet, - builtin.TypeId.Type, - => return a == b, - - builtin.TypeId.Pointer => |info| switch (info.size) { - builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto eql for single item pointers"), - builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto eql for many item pointers"), - builtin.TypeInfo.Pointer.Size.C => @compileError("TODO auto eql for C pointers"), - builtin.TypeInfo.Pointer.Size.Slice => { - if (a.len != b.len) return false; - for (a) |a_item, i| { - if (!autoEql(a_item, b[i])) return false; - } - return true; - }, - }, - - builtin.TypeId.Optional => @compileError("TODO auto eql for optionals"), - builtin.TypeId.Array => @compileError("TODO auto eql for arrays"), - builtin.TypeId.Struct => @compileError("TODO auto eql for structs"), - builtin.TypeId.Union => @compileError("TODO auto eql for unions"), - builtin.TypeId.ErrorUnion => @compileError("TODO auto eql for unions"), - builtin.TypeId.Vector => @compileError("TODO auto eql for vectors"), - } +test "autoHash slice" { + const array1 = try std.heap.direct_allocator.create([6]u32); + defer std.heap.direct_allocator.destroy(array1); + array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 }; + const array2 = [_]u32{ 1, 2, 3, 4, 5, 6 }; + const a = array1[0..]; + const b = array2[0..]; + const c = array1[0..3]; + testing.expect(autoHash(a, 0) == autoHash(a, 0)); + testing.expect(autoHash(a, 0) != autoHash(array1, 0)); + testing.expect(autoHash(a, 0) != autoHash(b, 0)); + testing.expect(autoHash(a, 0) != autoHash(c, 0)); +} + +test "autoHash optional" { + const a: ?u32 = 123; + const b: ?u32 = null; + testing.expectEqual(autoHash(a, 0), autoHash(u32(123), 0)); + testing.expect(autoHash(a, 0) != autoHash(b, 0)); + testing.expectEqual(autoHash(b, 0), 0); +} + +test "autoHash array" { + const a = [_]u32{ 1, 2, 3 }; + const h = autoHash(a, 0); + testing.expectEqual(h, autoHash(u32(3), autoHash(u32(2), autoHash(u32(1), 0)))); +} + +test "autoHash struct" { + const Foo = struct { + a: u32 = 1, + b: u32 = 2, + c: u32 = 3, + }; + const f = Foo{}; + const h = autoHash(f, 0); + testing.expectEqual(h, autoHash(u32(3), autoHash(u32(2), autoHash(u32(1), 0)))); +} + +test "autoHash union" { + const Foo = union(enum) { + A: u32, + B: f32, + C: u32, + }; + + const a = Foo{ .A = 18 }; + var b = Foo{ .B = 12.34 }; + const c = Foo{ .C = 18 }; + testing.expect(autoHash(a, 0) == autoHash(a, 0)); + testing.expect(autoHash(a, 0) != autoHash(b, 0)); + testing.expect(autoHash(a, 0) != autoHash(c, 0)); + + b = Foo{ .A = 18 }; + testing.expect(autoHash(a, 0) == autoHash(b, 0)); +} + +test "autoHash vector" { + const a: @Vector(4, u32) = [_]u32{ 1, 2, 3, 4 }; + const b: @Vector(4, u32) = [_]u32{ 1, 2, 3, 5 }; + const c: @Vector(4, u31) = [_]u31{ 1, 2, 3, 4 }; + testing.expect(autoHash(a, 0) == autoHash(a, 0)); + testing.expect(autoHash(a, 0) != autoHash(b, 0)); + testing.expect(autoHash(a, 0) != autoHash(c, 0)); +} + +test "autoHash error union" { + const Errors = error{Test}; + const Foo = struct { + a: u32 = 1, + b: u32 = 2, + c: u32 = 3, + }; + const f = Foo{}; + const g: Errors!Foo = Errors.Test; + testing.expect(autoHash(f, 0) != autoHash(g, 0)); + testing.expect(autoHash(f, 0) == autoHash(Foo{}, 0)); + testing.expect(autoHash(g, 0) == autoHash(Errors.Test, 0)); } From c9ce43f59fc777055612aeea58db0849390bc204 Mon Sep 17 00:00:00 2001 From: Sahnvour Date: Sun, 30 Jun 2019 20:46:43 +0200 Subject: [PATCH 038/125] fix hashmap using strings as keys --- std/http/headers.zig | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/std/http/headers.zig b/std/http/headers.zig index 69ed494f3a..7eb7fcc2c2 100644 --- a/std/http/headers.zig +++ b/std/http/headers.zig @@ -102,9 +102,19 @@ test "HeaderEntry" { testing.expectEqualSlices(u8, "x", e.value); } +fn stringEql(a: []const u8, b: []const u8) bool { + if (a.len != b.len) return false; + if (a.ptr == b.ptr) return true; + return mem.compare(u8, a, b) == .Equal; +} + +fn stringHash(s: []const u8) u32 { + return @truncate(u32, std.hash.wyhash(s, 0)); +} + const HeaderList = std.ArrayList(HeaderEntry); const HeaderIndexList = std.ArrayList(usize); -const HeaderIndex = std.AutoHashMap([]const u8, HeaderIndexList); +const HeaderIndex = std.HashMap([]const u8, HeaderIndexList, stringHash, stringEql); pub const Headers = struct { // the owned header field name is stored in the index as part of the key From 83dffc70afe4956c56f570ce5c854b17cbd6f218 Mon Sep 17 00:00:00 2001 From: Marc Tiehuis Date: Mon, 1 Jul 2019 23:23:26 +1200 Subject: [PATCH 039/125] Add iterative wyhash api --- std/hash.zig | 4 +- std/hash/wyhash.zig | 164 ++++++++++++++++++++++++++++---------------- std/hash_map.zig | 6 +- 3 files changed, 112 insertions(+), 62 deletions(-) diff --git a/std/hash.zig b/std/hash.zig index 723860da3b..e246fd0ad3 100644 --- a/std/hash.zig +++ b/std/hash.zig @@ -17,6 +17,7 @@ pub const SipHash128 = siphash.SipHash128; pub const murmur = @import("hash/murmur.zig"); pub const Murmur2_32 = murmur.Murmur2_32; + pub const Murmur2_64 = murmur.Murmur2_64; pub const Murmur3_32 = murmur.Murmur3_32; @@ -24,7 +25,8 @@ pub const cityhash = @import("hash/cityhash.zig"); pub const CityHash32 = cityhash.CityHash32; pub const CityHash64 = cityhash.CityHash64; -pub const wyhash = @import("hash/wyhash.zig").hash; +const wyhash = @import("hash/wyhash.zig"); +pub const Wyhash = wyhash.Wyhash; test "hash" { _ = @import("hash/adler.zig"); diff --git a/std/hash/wyhash.zig b/std/hash/wyhash.zig index 57efe8fd63..49119c5a95 100644 --- a/std/hash/wyhash.zig +++ b/std/hash/wyhash.zig @@ -10,7 +10,7 @@ const primes = [_]u64{ }; fn read_bytes(comptime bytes: u8, data: []const u8) u64 { - return mem.readVarInt(u64, data[0..bytes], @import("builtin").endian); + return mem.readVarInt(u64, data[0..bytes], .Little); } fn read_8bytes_swapped(data: []const u8) u64 { @@ -18,7 +18,7 @@ fn read_8bytes_swapped(data: []const u8) u64 { } fn mum(a: u64, b: u64) u64 { - var r: u128 = @intCast(u128, a) * @intCast(u128, b); + var r = std.math.mulWide(u64, a, b); r = (r >> 64) ^ r; return @truncate(u64, r); } @@ -31,69 +31,117 @@ fn mix1(a: u64, b: u64, seed: u64) u64 { return mum(a ^ seed ^ primes[2], b ^ seed ^ primes[3]); } -pub fn hash(key: []const u8, initial_seed: u64) u64 { - var seed = initial_seed; +pub const Wyhash = struct { + seed: u64, - var i: usize = 0; - while (i + 32 <= key.len) : (i += 32) { - seed = mix0( - read_bytes(8, key[i..]), - read_bytes(8, key[i + 8 ..]), - seed, + buf: [32]u8, + buf_len: usize, + msg_len: usize, + + pub fn init(seed: u64) Wyhash { + return Wyhash{ + .seed = seed, + .buf = undefined, + .buf_len = 0, + .msg_len = 0, + }; + } + + fn round(self: *Wyhash, b: []const u8) void { + std.debug.assert(b.len == 32); + + self.seed = mix0( + read_bytes(8, b[0..]), + read_bytes(8, b[8..]), + self.seed, ) ^ mix1( - read_bytes(8, key[i + 16 ..]), - read_bytes(8, key[i + 24 ..]), - seed, + read_bytes(8, b[16..]), + read_bytes(8, b[24..]), + self.seed, ); } - const rem_len = @truncate(u5, key.len); - const rem_key = key[i..]; - seed = switch (rem_len) { - 0 => seed, - 1 => mix0(read_bytes(1, rem_key), primes[4], seed), - 2 => mix0(read_bytes(2, rem_key), primes[4], seed), - 3 => mix0((read_bytes(2, rem_key) << 8) | read_bytes(1, rem_key[2..]), primes[4], seed), - 4 => mix0(read_bytes(4, rem_key), primes[4], seed), - 5 => mix0((read_bytes(4, rem_key) << 8) | read_bytes(1, rem_key[4..]), primes[4], seed), - 6 => mix0((read_bytes(4, rem_key) << 16) | read_bytes(2, rem_key[4..]), primes[4], seed), - 7 => mix0((read_bytes(4, rem_key) << 24) | (read_bytes(2, rem_key[4..]) << 8) | read_bytes(1, rem_key[6..]), primes[4], seed), - 8 => mix0(read_8bytes_swapped(rem_key), primes[4], seed), - 9 => mix0(read_8bytes_swapped(rem_key), read_bytes(1, rem_key[8..]), seed), - 10 => mix0(read_8bytes_swapped(rem_key), read_bytes(2, rem_key[8..]), seed), - 11 => mix0(read_8bytes_swapped(rem_key), (read_bytes(2, rem_key[8..]) << 8) | read_bytes(1, rem_key[10..]), seed), - 12 => mix0(read_8bytes_swapped(rem_key), read_bytes(4, rem_key[8..]), seed), - 13 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 8) | read_bytes(1, rem_key[12..]), seed), - 14 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 16) | read_bytes(2, rem_key[12..]), seed), - 15 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 24) | (read_bytes(2, rem_key[12..]) << 8) | read_bytes(1, rem_key[14..]), seed), - 16 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed), - 17 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(1, rem_key[16..]), primes[4], seed), - 18 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(2, rem_key[16..]), primes[4], seed), - 19 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(2, rem_key[16..]) << 8) | read_bytes(1, rem_key[18..]), primes[4], seed), - 20 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(4, rem_key[16..]), primes[4], seed), - 21 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 8) | read_bytes(1, rem_key[20..]), primes[4], seed), - 22 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 16) | read_bytes(2, rem_key[20..]), primes[4], seed), - 23 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 24) | (read_bytes(2, rem_key[20..]) << 8) | read_bytes(1, rem_key[22..]), primes[4], seed), - 24 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), primes[4], seed), - 25 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(1, rem_key[24..]), seed), - 26 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(2, rem_key[24..]), seed), - 27 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(2, rem_key[24..]) << 8) | read_bytes(1, rem_key[26..]), seed), - 28 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(4, rem_key[24..]), seed), - 29 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 8) | read_bytes(1, rem_key[28..]), seed), - 30 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 16) | read_bytes(2, rem_key[28..]), seed), - 31 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 24) | (read_bytes(2, rem_key[28..]) << 8) | read_bytes(1, rem_key[30..]), seed), - }; + pub fn update(self: *Wyhash, b: []const u8) void { + var off: usize = 0; - return mum(seed ^ key.len, primes[4]); -} + // Partial from previous. + if (self.buf_len != 0 and self.buf_len + b.len > 32) { + off += 32 - self.buf_len; + mem.copy(u8, self.buf[self.buf_len..], b[0..off]); + self.round(self.buf[0..]); + self.buf_len = 0; + } + + // Full middle blocks. + while (off + 32 <= b.len) : (off += 32) { + @inlineCall(self.round, b[off .. off + 32]); + } + + // Remainder for next pass. + mem.copy(u8, self.buf[self.buf_len..], b[off..]); + self.buf_len += @intCast(u8, b[off..].len); + self.msg_len += b.len; + } + + pub fn final(self: *Wyhash) u64 { + const seed = self.seed; + const rem_len = @intCast(u5, self.buf_len); + const rem_key = self.buf[0..self.buf_len]; + + self.seed = switch (rem_len) { + 0 => seed, + 1 => mix0(read_bytes(1, rem_key), primes[4], seed), + 2 => mix0(read_bytes(2, rem_key), primes[4], seed), + 3 => mix0((read_bytes(2, rem_key) << 8) | read_bytes(1, rem_key[2..]), primes[4], seed), + 4 => mix0(read_bytes(4, rem_key), primes[4], seed), + 5 => mix0((read_bytes(4, rem_key) << 8) | read_bytes(1, rem_key[4..]), primes[4], seed), + 6 => mix0((read_bytes(4, rem_key) << 16) | read_bytes(2, rem_key[4..]), primes[4], seed), + 7 => mix0((read_bytes(4, rem_key) << 24) | (read_bytes(2, rem_key[4..]) << 8) | read_bytes(1, rem_key[6..]), primes[4], seed), + 8 => mix0(read_8bytes_swapped(rem_key), primes[4], seed), + 9 => mix0(read_8bytes_swapped(rem_key), read_bytes(1, rem_key[8..]), seed), + 10 => mix0(read_8bytes_swapped(rem_key), read_bytes(2, rem_key[8..]), seed), + 11 => mix0(read_8bytes_swapped(rem_key), (read_bytes(2, rem_key[8..]) << 8) | read_bytes(1, rem_key[10..]), seed), + 12 => mix0(read_8bytes_swapped(rem_key), read_bytes(4, rem_key[8..]), seed), + 13 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 8) | read_bytes(1, rem_key[12..]), seed), + 14 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 16) | read_bytes(2, rem_key[12..]), seed), + 15 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 24) | (read_bytes(2, rem_key[12..]) << 8) | read_bytes(1, rem_key[14..]), seed), + 16 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed), + 17 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(1, rem_key[16..]), primes[4], seed), + 18 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(2, rem_key[16..]), primes[4], seed), + 19 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(2, rem_key[16..]) << 8) | read_bytes(1, rem_key[18..]), primes[4], seed), + 20 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(4, rem_key[16..]), primes[4], seed), + 21 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 8) | read_bytes(1, rem_key[20..]), primes[4], seed), + 22 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 16) | read_bytes(2, rem_key[20..]), primes[4], seed), + 23 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 24) | (read_bytes(2, rem_key[20..]) << 8) | read_bytes(1, rem_key[22..]), primes[4], seed), + 24 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), primes[4], seed), + 25 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(1, rem_key[24..]), seed), + 26 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(2, rem_key[24..]), seed), + 27 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(2, rem_key[24..]) << 8) | read_bytes(1, rem_key[26..]), seed), + 28 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(4, rem_key[24..]), seed), + 29 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 8) | read_bytes(1, rem_key[28..]), seed), + 30 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 16) | read_bytes(2, rem_key[28..]), seed), + 31 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 24) | (read_bytes(2, rem_key[28..]) << 8) | read_bytes(1, rem_key[30..]), seed), + }; + + return mum(self.seed ^ self.msg_len, primes[4]); + } + + pub fn hash(seed: u64, input: []const u8) u64 { + var c = Wyhash.init(seed); + c.update(input); + return c.final(); + } +}; test "test vectors" { const expectEqual = std.testing.expectEqual; - expectEqual(hash("", 0), 0x0); - expectEqual(hash("a", 1), 0xbed235177f41d328); - expectEqual(hash("abc", 2), 0xbe348debe59b27c3); - expectEqual(hash("message digest", 3), 0x37320f657213a290); - expectEqual(hash("abcdefghijklmnopqrstuvwxyz", 4), 0xd0b270e1d8a7019c); - expectEqual(hash("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 5), 0x602a1894d3bbfe7f); - expectEqual(hash("12345678901234567890123456789012345678901234567890123456789012345678901234567890", 6), 0x829e9c148b75970e); + const hash = Wyhash.hash; + + expectEqual(hash(0, ""), 0x0); + expectEqual(hash(1, "a"), 0xbed235177f41d328); + expectEqual(hash(2, "abc"), 0xbe348debe59b27c3); + expectEqual(hash(3, "message digest"), 0x37320f657213a290); + expectEqual(hash(4, "abcdefghijklmnopqrstuvwxyz"), 0xd0b270e1d8a7019c); + expectEqual(hash(5, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 0x602a1894d3bbfe7f); + expectEqual(hash(6, "12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 0x829e9c148b75970e); } diff --git a/std/hash_map.zig b/std/hash_map.zig index 6a8679ccd0..71cfecdd6d 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -5,7 +5,7 @@ const testing = std.testing; const math = std.math; const mem = std.mem; const meta = std.meta; -const wyhash = std.hash.wyhash; +const Wyhash = std.hash.Wyhash; const Allocator = mem.Allocator; const builtin = @import("builtin"); @@ -557,7 +557,7 @@ pub fn autoHash(key: var, seed: u64) u64 { builtin.TypeId.EnumLiteral, => @compileError("cannot hash this type"), - builtin.TypeId.Int => return wyhash(std.mem.asBytes(&key), seed), + builtin.TypeId.Int => return Wyhash.hash(seed, std.mem.asBytes(&key)), builtin.TypeId.Float => |info| return autoHash(@bitCast(@IntType(false, info.bits), key), seed), @@ -594,7 +594,7 @@ pub fn autoHash(key: var, seed: u64) u64 { // If there's no unused bits in the child type, we can just hash // this as an array of bytes. if (info.child.bit_count % 8 == 0) { - return wyhash(mem.asBytes(&key), seed); + return Wyhash.hash(seed, mem.asBytes(&key)); } // Otherwise, hash every element. From 4c93ccab5ad48ce61e4136c646b3123a06150083 Mon Sep 17 00:00:00 2001 From: Marc Tiehuis Date: Mon, 1 Jul 2019 23:23:40 +1200 Subject: [PATCH 040/125] Add throughput test program for hash functions --- std/hash/throughput_test.zig | 148 +++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 std/hash/throughput_test.zig diff --git a/std/hash/throughput_test.zig b/std/hash/throughput_test.zig new file mode 100644 index 0000000000..4b7e8ef344 --- /dev/null +++ b/std/hash/throughput_test.zig @@ -0,0 +1,148 @@ +const builtin = @import("builtin"); +const std = @import("std"); +const time = std.time; +const Timer = time.Timer; +const hash = std.hash; + +const KiB = 1024; +const MiB = 1024 * KiB; +const GiB = 1024 * MiB; + +var prng = std.rand.DefaultPrng.init(0); + +const Hash = struct { + ty: type, + name: []const u8, + init_u8s: ?[]const u8 = null, + init_u64: ?u64 = null, +}; + +const siphash_key = "0123456789abcdef"; + +const hashes = [_]Hash{ + Hash{ .ty = hash.Wyhash, .name = "wyhash", .init_u64 = 0 }, + Hash{ .ty = hash.SipHash64(1, 3), .name = "siphash(1,3)", .init_u8s = siphash_key }, + Hash{ .ty = hash.SipHash64(2, 4), .name = "siphash(2,4)", .init_u8s = siphash_key }, + Hash{ .ty = hash.Fnv1a_64, .name = "fnv1a" }, + Hash{ .ty = hash.Crc32, .name = "crc32" }, +}; + +const Result = struct { + hash: u64, + throughput: u64, +}; + +pub fn benchmarkHash(comptime H: var, bytes: usize) !Result { + var h = blk: { + if (H.init_u8s) |init| { + break :blk H.ty.init(init); + } + if (H.init_u64) |init| { + break :blk H.ty.init(init); + } + break :blk H.ty.init(); + }; + + var block: [8192]u8 = undefined; + prng.random.bytes(block[0..]); + + var offset: usize = 0; + var timer = try Timer.start(); + const start = timer.lap(); + while (offset < bytes) : (offset += block.len) { + h.update(block[0..]); + } + const end = timer.read(); + + const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s; + const throughput = @floatToInt(u64, @intToFloat(f64, bytes) / elapsed_s); + + return Result{ + .hash = h.final(), + .throughput = throughput, + }; +} + +fn usage() void { + std.debug.warn( + \\throughput_test [options] + \\ + \\Options: + \\ --filter [test-name] + \\ --seed [int] + \\ --count [int] + \\ --help + \\ + ); +} + +fn mode(comptime x: comptime_int) comptime_int { + return if (builtin.mode == builtin.Mode.Debug) x / 64 else x; +} + +// TODO(#1358): Replace with builtin formatted padding when available. +fn printPad(stdout: var, s: []const u8) !void { + var i: usize = 0; + while (i < 12 - s.len) : (i += 1) { + try stdout.print(" "); + } + try stdout.print("{}", s); +} + +pub fn main() !void { + var stdout_file = try std.io.getStdOut(); + var stdout_out_stream = stdout_file.outStream(); + const stdout = &stdout_out_stream.stream; + + var buffer: [1024]u8 = undefined; + var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]); + const args = try std.process.argsAlloc(&fixed.allocator); + + var filter: ?[]u8 = ""; + var count: usize = mode(128 * MiB); + + var i: usize = 1; + while (i < args.len) : (i += 1) { + if (std.mem.eql(u8, args[i], "--seed")) { + i += 1; + if (i == args.len) { + usage(); + std.os.exit(1); + } + + const seed = try std.fmt.parseUnsigned(u32, args[i], 10); + prng.seed(seed); + } else if (std.mem.eql(u8, args[i], "--filter")) { + i += 1; + if (i == args.len) { + usage(); + std.os.exit(1); + } + + filter = args[i]; + } else if (std.mem.eql(u8, args[i], "--count")) { + i += 1; + if (i == args.len) { + usage(); + std.os.exit(1); + } + + const c = try std.fmt.parseUnsigned(u32, args[i], 10); + count = c * MiB; + } else if (std.mem.eql(u8, args[i], "--help")) { + usage(); + return; + } else { + usage(); + std.os.exit(1); + } + } + + inline for (hashes) |H| { + if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) { + const result = try benchmarkHash(H, count); + try printPad(stdout, H.name); + try stdout.print(": {:4} MiB/s [{:16}]\n", result.throughput / (1 * MiB), result.hash); + } + } +} From 8805a7b50985fca23969beab8636fbfbecd857ee Mon Sep 17 00:00:00 2001 From: Sahnvour Date: Tue, 2 Jul 2019 18:38:46 +0200 Subject: [PATCH 041/125] adapt http/headers.zig to wyhash's new interface --- std/http/headers.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/std/http/headers.zig b/std/http/headers.zig index 7eb7fcc2c2..c588f2d055 100644 --- a/std/http/headers.zig +++ b/std/http/headers.zig @@ -109,7 +109,7 @@ fn stringEql(a: []const u8, b: []const u8) bool { } fn stringHash(s: []const u8) u32 { - return @truncate(u32, std.hash.wyhash(s, 0)); + return @truncate(u32, std.hash.Wyhash.hash(0, s)); } const HeaderList = std.ArrayList(HeaderEntry); From 5bf63bfbf113d3921101311f1e3040890b94e798 Mon Sep 17 00:00:00 2001 From: Sahnvour Date: Tue, 2 Jul 2019 18:40:01 +0200 Subject: [PATCH 042/125] make use of hashing streaming interface in autoHash --- std/hash_map.zig | 154 ++++++++++++++++++++++++++--------------------- 1 file changed, 85 insertions(+), 69 deletions(-) diff --git a/std/hash_map.zig b/std/hash_map.zig index 71cfecdd6d..d906b54618 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -522,8 +522,9 @@ pub fn getTrivialEqlFn(comptime K: type) (fn (K, K) bool) { pub fn getAutoHashFn(comptime K: type) (fn (K) u32) { return struct { fn hash(key: K) u32 { - const h = autoHash(key, 0); - return @truncate(u32, h); + var hasher = Wyhash.init(0); + autoHash(&hasher, key); + return @truncate(u32, hasher.final()); } }.hash; } @@ -538,10 +539,7 @@ pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) { /// Provides generic hashing for any eligible type. /// Only hashes `key` itself, pointers are not followed. -/// The underlying hashing algorithm is wyhash. -pub fn autoHash(key: var, seed: u64) u64 { - // We use the fact that wyhash takes an input seed to "chain" hasing when the - // key has multiple parts that are not necessarily contiguous in memory. +pub fn autoHash(hasher: var, key: var) void { const Key = @typeOf(key); switch (@typeInfo(Key)) { builtin.TypeId.NoReturn, @@ -557,91 +555,101 @@ pub fn autoHash(key: var, seed: u64) u64 { builtin.TypeId.EnumLiteral, => @compileError("cannot hash this type"), - builtin.TypeId.Int => return Wyhash.hash(seed, std.mem.asBytes(&key)), + builtin.TypeId.Int => hasher.update(std.mem.asBytes(&key)), - builtin.TypeId.Float => |info| return autoHash(@bitCast(@IntType(false, info.bits), key), seed), + builtin.TypeId.Float => |info| autoHash(hasher, @bitCast(@IntType(false, info.bits), key)), - builtin.TypeId.Bool => return autoHash(@boolToInt(key), seed), - builtin.TypeId.Enum => return autoHash(@enumToInt(key), seed), - builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), seed), - builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), seed), + builtin.TypeId.Bool => autoHash(hasher, @boolToInt(key)), + builtin.TypeId.Enum => autoHash(hasher, @enumToInt(key)), + builtin.TypeId.ErrorSet => autoHash(hasher, @errorToInt(key)), + builtin.TypeId.Promise, builtin.TypeId.Fn => autoHash(hasher, @ptrToInt(key)), - builtin.TypeId.Pointer => |info| return switch (info.size) { + builtin.TypeId.Pointer => |info| switch (info.size) { builtin.TypeInfo.Pointer.Size.One, builtin.TypeInfo.Pointer.Size.Many, builtin.TypeInfo.Pointer.Size.C, - => return autoHash(@ptrToInt(key), seed), + => autoHash(hasher, @ptrToInt(key)), - builtin.TypeInfo.Pointer.Size.Slice => return autoHash(key.len, autoHash(key.ptr, seed)), + builtin.TypeInfo.Pointer.Size.Slice => { + autoHash(hasher, key.ptr); + autoHash(hasher, key.len); + }, }, - builtin.TypeId.Optional => return if (key) |k| autoHash(k, seed) else 0, + builtin.TypeId.Optional => if (key) |k| autoHash(hasher, k), builtin.TypeId.Array => { // TODO detect via a trait when Key has no padding bits to // hash it as an array of bytes. // Otherwise, hash every element. - var s = seed; for (key) |element| { - // We reuse the hash of the previous element as the seed for the - // next one so that they're dependant. - s = autoHash(element, s); + autoHash(hasher, element); } - return s; }, builtin.TypeId.Vector => |info| { - // If there's no unused bits in the child type, we can just hash - // this as an array of bytes. if (info.child.bit_count % 8 == 0) { - return Wyhash.hash(seed, mem.asBytes(&key)); + // If there's no unused bits in the child type, we can just hash + // this as an array of bytes. + hasher.update(mem.asBytes(&key)); + } else { + // Otherwise, hash every element. + // TODO remove the copy to an array once field access is done. + const array: [info.len]info.child = key; + comptime var i: u32 = 0; + inline while (i < info.len) : (i += 1) { + autoHash(hasher, array[i]); + } } - - // Otherwise, hash every element. - var s = seed; - // TODO remove the copy to an array once field access is done. - const array: [info.len]info.child = key; - comptime var i: u32 = 0; - inline while (i < info.len) : (i += 1) { - s = autoHash(array[i], s); - } - return s; }, builtin.TypeId.Struct => |info| { // TODO detect via a trait when Key has no padding bits to // hash it as an array of bytes. // Otherwise, hash every field. - var s = seed; inline for (info.fields) |field| { // We reuse the hash of the previous field as the seed for the // next one so that they're dependant. - s = autoHash(@field(key, field.name), s); + autoHash(hasher, @field(key, field.name)); } - return s; }, - builtin.TypeId.Union => |info| { + builtin.TypeId.Union => |info| blk: { if (info.tag_type) |tag_type| { const tag = meta.activeTag(key); - const s = autoHash(tag, seed); + const s = autoHash(hasher, tag); inline for (info.fields) |field| { const enum_field = field.enum_field.?; if (enum_field.value == @enumToInt(tag)) { - return autoHash(@field(key, enum_field.name), s); + autoHash(hasher, @field(key, enum_field.name)); + // TODO use a labelled break when it does not crash the compiler. + // break :blk; + return; } } unreachable; } else @compileError("cannot hash untagged union type: " ++ @typeName(Key) ++ ", provide your own hash function"); }, - builtin.TypeId.ErrorUnion => { - return autoHash(key catch |err| return autoHash(err, seed), seed); + builtin.TypeId.ErrorUnion => blk: { + const payload = key catch |err| { + autoHash(hasher, err); + break :blk; + }; + autoHash(hasher, payload); }, } } +fn testAutoHash(key: var) u64 { + var hasher = Wyhash.init(0); + autoHash(&hasher, key); + return hasher.final(); +} + test "autoHash slice" { + // Allocate one array dynamically so that we're assured it is not merged + // with the other by the optimization passes. const array1 = try std.heap.direct_allocator.create([6]u32); defer std.heap.direct_allocator.destroy(array1); array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 }; @@ -649,38 +657,46 @@ test "autoHash slice" { const a = array1[0..]; const b = array2[0..]; const c = array1[0..3]; - testing.expect(autoHash(a, 0) == autoHash(a, 0)); - testing.expect(autoHash(a, 0) != autoHash(array1, 0)); - testing.expect(autoHash(a, 0) != autoHash(b, 0)); - testing.expect(autoHash(a, 0) != autoHash(c, 0)); + testing.expect(testAutoHash(a) == testAutoHash(a)); + testing.expect(testAutoHash(a) != testAutoHash(array1)); + testing.expect(testAutoHash(a) != testAutoHash(b)); + testing.expect(testAutoHash(a) != testAutoHash(c)); } -test "autoHash optional" { +test "testAutoHash optional" { const a: ?u32 = 123; const b: ?u32 = null; - testing.expectEqual(autoHash(a, 0), autoHash(u32(123), 0)); - testing.expect(autoHash(a, 0) != autoHash(b, 0)); - testing.expectEqual(autoHash(b, 0), 0); + testing.expectEqual(testAutoHash(a), testAutoHash(u32(123))); + testing.expect(testAutoHash(a) != testAutoHash(b)); + testing.expectEqual(testAutoHash(b), 0); } -test "autoHash array" { +test "testAutoHash array" { const a = [_]u32{ 1, 2, 3 }; - const h = autoHash(a, 0); - testing.expectEqual(h, autoHash(u32(3), autoHash(u32(2), autoHash(u32(1), 0)))); + const h = testAutoHash(a); + var hasher = Wyhash.init(0); + autoHash(&hasher, u32(1)); + autoHash(&hasher, u32(2)); + autoHash(&hasher, u32(3)); + testing.expectEqual(h, hasher.final()); } -test "autoHash struct" { +test "testAutoHash struct" { const Foo = struct { a: u32 = 1, b: u32 = 2, c: u32 = 3, }; const f = Foo{}; - const h = autoHash(f, 0); - testing.expectEqual(h, autoHash(u32(3), autoHash(u32(2), autoHash(u32(1), 0)))); + const h = testAutoHash(f); + var hasher = Wyhash.init(0); + autoHash(&hasher, u32(1)); + autoHash(&hasher, u32(2)); + autoHash(&hasher, u32(3)); + testing.expectEqual(h, hasher.final()); } -test "autoHash union" { +test "testAutoHash union" { const Foo = union(enum) { A: u32, B: f32, @@ -690,24 +706,24 @@ test "autoHash union" { const a = Foo{ .A = 18 }; var b = Foo{ .B = 12.34 }; const c = Foo{ .C = 18 }; - testing.expect(autoHash(a, 0) == autoHash(a, 0)); - testing.expect(autoHash(a, 0) != autoHash(b, 0)); - testing.expect(autoHash(a, 0) != autoHash(c, 0)); + testing.expect(testAutoHash(a) == testAutoHash(a)); + testing.expect(testAutoHash(a) != testAutoHash(b)); + testing.expect(testAutoHash(a) != testAutoHash(c)); b = Foo{ .A = 18 }; - testing.expect(autoHash(a, 0) == autoHash(b, 0)); + testing.expect(testAutoHash(a) == testAutoHash(b)); } -test "autoHash vector" { +test "testAutoHash vector" { const a: @Vector(4, u32) = [_]u32{ 1, 2, 3, 4 }; const b: @Vector(4, u32) = [_]u32{ 1, 2, 3, 5 }; const c: @Vector(4, u31) = [_]u31{ 1, 2, 3, 4 }; - testing.expect(autoHash(a, 0) == autoHash(a, 0)); - testing.expect(autoHash(a, 0) != autoHash(b, 0)); - testing.expect(autoHash(a, 0) != autoHash(c, 0)); + testing.expect(testAutoHash(a) == testAutoHash(a)); + testing.expect(testAutoHash(a) != testAutoHash(b)); + testing.expect(testAutoHash(a) != testAutoHash(c)); } -test "autoHash error union" { +test "testAutoHash error union" { const Errors = error{Test}; const Foo = struct { a: u32 = 1, @@ -716,7 +732,7 @@ test "autoHash error union" { }; const f = Foo{}; const g: Errors!Foo = Errors.Test; - testing.expect(autoHash(f, 0) != autoHash(g, 0)); - testing.expect(autoHash(f, 0) == autoHash(Foo{}, 0)); - testing.expect(autoHash(g, 0) == autoHash(Errors.Test, 0)); + testing.expect(testAutoHash(f) != testAutoHash(g)); + testing.expect(testAutoHash(f) == testAutoHash(Foo{})); + testing.expect(testAutoHash(g) == testAutoHash(Errors.Test)); } From 4b5172d2879742b98e3e34b90b05ac28da9f39fe Mon Sep 17 00:00:00 2001 From: Sahnvour Date: Tue, 2 Jul 2019 19:46:51 +0200 Subject: [PATCH 043/125] move autoHash into its own module since it can be used with any hash function implementing a streaming interface --- std/hash.zig | 4 + std/hash/auto_hash.zig | 208 +++++++++++++++++++++++++++++++++++++++++ std/hash_map.zig | 201 +-------------------------------------- 3 files changed, 213 insertions(+), 200 deletions(-) create mode 100644 std/hash/auto_hash.zig diff --git a/std/hash.zig b/std/hash.zig index e246fd0ad3..648f34b11d 100644 --- a/std/hash.zig +++ b/std/hash.zig @@ -1,6 +1,9 @@ const adler = @import("hash/adler.zig"); pub const Adler32 = adler.Adler32; +const auto_hash = @import("hash/auto_hash.zig"); +pub const autoHash = auto_hash.autoHash; + // pub for polynomials + generic crc32 construction pub const crc = @import("hash/crc.zig"); pub const Crc32 = crc.Crc32; @@ -30,6 +33,7 @@ pub const Wyhash = wyhash.Wyhash; test "hash" { _ = @import("hash/adler.zig"); + _ = @import("hash/auto_hash.zig"); _ = @import("hash/crc.zig"); _ = @import("hash/fnv.zig"); _ = @import("hash/siphash.zig"); diff --git a/std/hash/auto_hash.zig b/std/hash/auto_hash.zig new file mode 100644 index 0000000000..b21af0a1d8 --- /dev/null +++ b/std/hash/auto_hash.zig @@ -0,0 +1,208 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const mem = std.mem; +const meta = std.meta; + +/// Provides generic hashing for any eligible type. +/// Only hashes `key` itself, pointers are not followed. +pub fn autoHash(hasher: var, key: var) void { + const Key = @typeOf(key); + switch (@typeInfo(Key)) { + builtin.TypeId.NoReturn, + builtin.TypeId.Opaque, + builtin.TypeId.Undefined, + builtin.TypeId.ArgTuple, + builtin.TypeId.Void, + builtin.TypeId.Null, + builtin.TypeId.BoundFn, + builtin.TypeId.ComptimeFloat, + builtin.TypeId.ComptimeInt, + builtin.TypeId.Type, + builtin.TypeId.EnumLiteral, + => @compileError("cannot hash this type"), + + builtin.TypeId.Int => hasher.update(std.mem.asBytes(&key)), + + builtin.TypeId.Float => |info| autoHash(hasher, @bitCast(@IntType(false, info.bits), key)), + + builtin.TypeId.Bool => autoHash(hasher, @boolToInt(key)), + builtin.TypeId.Enum => autoHash(hasher, @enumToInt(key)), + builtin.TypeId.ErrorSet => autoHash(hasher, @errorToInt(key)), + builtin.TypeId.Promise, builtin.TypeId.Fn => autoHash(hasher, @ptrToInt(key)), + + builtin.TypeId.Pointer => |info| switch (info.size) { + builtin.TypeInfo.Pointer.Size.One, + builtin.TypeInfo.Pointer.Size.Many, + builtin.TypeInfo.Pointer.Size.C, + => autoHash(hasher, @ptrToInt(key)), + + builtin.TypeInfo.Pointer.Size.Slice => { + autoHash(hasher, key.ptr); + autoHash(hasher, key.len); + }, + }, + + builtin.TypeId.Optional => if (key) |k| autoHash(hasher, k), + + builtin.TypeId.Array => { + // TODO detect via a trait when Key has no padding bits to + // hash it as an array of bytes. + // Otherwise, hash every element. + for (key) |element| { + autoHash(hasher, element); + } + }, + + builtin.TypeId.Vector => |info| { + if (info.child.bit_count % 8 == 0) { + // If there's no unused bits in the child type, we can just hash + // this as an array of bytes. + hasher.update(mem.asBytes(&key)); + } else { + // Otherwise, hash every element. + // TODO remove the copy to an array once field access is done. + const array: [info.len]info.child = key; + comptime var i: u32 = 0; + inline while (i < info.len) : (i += 1) { + autoHash(hasher, array[i]); + } + } + }, + + builtin.TypeId.Struct => |info| { + // TODO detect via a trait when Key has no padding bits to + // hash it as an array of bytes. + // Otherwise, hash every field. + inline for (info.fields) |field| { + // We reuse the hash of the previous field as the seed for the + // next one so that they're dependant. + autoHash(hasher, @field(key, field.name)); + } + }, + + builtin.TypeId.Union => |info| blk: { + if (info.tag_type) |tag_type| { + const tag = meta.activeTag(key); + const s = autoHash(hasher, tag); + inline for (info.fields) |field| { + const enum_field = field.enum_field.?; + if (enum_field.value == @enumToInt(tag)) { + autoHash(hasher, @field(key, enum_field.name)); + // TODO use a labelled break when it does not crash the compiler. + // break :blk; + return; + } + } + unreachable; + } else @compileError("cannot hash untagged union type: " ++ @typeName(Key) ++ ", provide your own hash function"); + }, + + builtin.TypeId.ErrorUnion => blk: { + const payload = key catch |err| { + autoHash(hasher, err); + break :blk; + }; + autoHash(hasher, payload); + }, + } +} + +const testing = std.testing; +const Wyhash = std.hash.Wyhash; + +fn testAutoHash(key: var) u64 { + // Any hash could be used here, for testing autoHash. + var hasher = Wyhash.init(0); + autoHash(&hasher, key); + return hasher.final(); +} + +test "autoHash slice" { + // Allocate one array dynamically so that we're assured it is not merged + // with the other by the optimization passes. + const array1 = try std.heap.direct_allocator.create([6]u32); + defer std.heap.direct_allocator.destroy(array1); + array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 }; + const array2 = [_]u32{ 1, 2, 3, 4, 5, 6 }; + const a = array1[0..]; + const b = array2[0..]; + const c = array1[0..3]; + testing.expect(testAutoHash(a) == testAutoHash(a)); + testing.expect(testAutoHash(a) != testAutoHash(array1)); + testing.expect(testAutoHash(a) != testAutoHash(b)); + testing.expect(testAutoHash(a) != testAutoHash(c)); +} + +test "testAutoHash optional" { + const a: ?u32 = 123; + const b: ?u32 = null; + testing.expectEqual(testAutoHash(a), testAutoHash(u32(123))); + testing.expect(testAutoHash(a) != testAutoHash(b)); + testing.expectEqual(testAutoHash(b), 0); +} + +test "testAutoHash array" { + const a = [_]u32{ 1, 2, 3 }; + const h = testAutoHash(a); + var hasher = Wyhash.init(0); + autoHash(&hasher, u32(1)); + autoHash(&hasher, u32(2)); + autoHash(&hasher, u32(3)); + testing.expectEqual(h, hasher.final()); +} + +test "testAutoHash struct" { + const Foo = struct { + a: u32 = 1, + b: u32 = 2, + c: u32 = 3, + }; + const f = Foo{}; + const h = testAutoHash(f); + var hasher = Wyhash.init(0); + autoHash(&hasher, u32(1)); + autoHash(&hasher, u32(2)); + autoHash(&hasher, u32(3)); + testing.expectEqual(h, hasher.final()); +} + +test "testAutoHash union" { + const Foo = union(enum) { + A: u32, + B: f32, + C: u32, + }; + + const a = Foo{ .A = 18 }; + var b = Foo{ .B = 12.34 }; + const c = Foo{ .C = 18 }; + testing.expect(testAutoHash(a) == testAutoHash(a)); + testing.expect(testAutoHash(a) != testAutoHash(b)); + testing.expect(testAutoHash(a) != testAutoHash(c)); + + b = Foo{ .A = 18 }; + testing.expect(testAutoHash(a) == testAutoHash(b)); +} + +test "testAutoHash vector" { + const a: @Vector(4, u32) = [_]u32{ 1, 2, 3, 4 }; + const b: @Vector(4, u32) = [_]u32{ 1, 2, 3, 5 }; + const c: @Vector(4, u31) = [_]u31{ 1, 2, 3, 4 }; + testing.expect(testAutoHash(a) == testAutoHash(a)); + testing.expect(testAutoHash(a) != testAutoHash(b)); + testing.expect(testAutoHash(a) != testAutoHash(c)); +} + +test "testAutoHash error union" { + const Errors = error{Test}; + const Foo = struct { + a: u32 = 1, + b: u32 = 2, + c: u32 = 3, + }; + const f = Foo{}; + const g: Errors!Foo = Errors.Test; + testing.expect(testAutoHash(f) != testAutoHash(g)); + testing.expect(testAutoHash(f) == testAutoHash(Foo{})); + testing.expect(testAutoHash(g) == testAutoHash(Errors.Test)); +} diff --git a/std/hash_map.zig b/std/hash_map.zig index d906b54618..ab3c4c248d 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -5,6 +5,7 @@ const testing = std.testing; const math = std.math; const mem = std.mem; const meta = std.meta; +const autoHash = std.hash.autoHash; const Wyhash = std.hash.Wyhash; const Allocator = mem.Allocator; const builtin = @import("builtin"); @@ -536,203 +537,3 @@ pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) { } }.eql; } - -/// Provides generic hashing for any eligible type. -/// Only hashes `key` itself, pointers are not followed. -pub fn autoHash(hasher: var, key: var) void { - const Key = @typeOf(key); - switch (@typeInfo(Key)) { - builtin.TypeId.NoReturn, - builtin.TypeId.Opaque, - builtin.TypeId.Undefined, - builtin.TypeId.ArgTuple, - builtin.TypeId.Void, - builtin.TypeId.Null, - builtin.TypeId.BoundFn, - builtin.TypeId.ComptimeFloat, - builtin.TypeId.ComptimeInt, - builtin.TypeId.Type, - builtin.TypeId.EnumLiteral, - => @compileError("cannot hash this type"), - - builtin.TypeId.Int => hasher.update(std.mem.asBytes(&key)), - - builtin.TypeId.Float => |info| autoHash(hasher, @bitCast(@IntType(false, info.bits), key)), - - builtin.TypeId.Bool => autoHash(hasher, @boolToInt(key)), - builtin.TypeId.Enum => autoHash(hasher, @enumToInt(key)), - builtin.TypeId.ErrorSet => autoHash(hasher, @errorToInt(key)), - builtin.TypeId.Promise, builtin.TypeId.Fn => autoHash(hasher, @ptrToInt(key)), - - builtin.TypeId.Pointer => |info| switch (info.size) { - builtin.TypeInfo.Pointer.Size.One, - builtin.TypeInfo.Pointer.Size.Many, - builtin.TypeInfo.Pointer.Size.C, - => autoHash(hasher, @ptrToInt(key)), - - builtin.TypeInfo.Pointer.Size.Slice => { - autoHash(hasher, key.ptr); - autoHash(hasher, key.len); - }, - }, - - builtin.TypeId.Optional => if (key) |k| autoHash(hasher, k), - - builtin.TypeId.Array => { - // TODO detect via a trait when Key has no padding bits to - // hash it as an array of bytes. - // Otherwise, hash every element. - for (key) |element| { - autoHash(hasher, element); - } - }, - - builtin.TypeId.Vector => |info| { - if (info.child.bit_count % 8 == 0) { - // If there's no unused bits in the child type, we can just hash - // this as an array of bytes. - hasher.update(mem.asBytes(&key)); - } else { - // Otherwise, hash every element. - // TODO remove the copy to an array once field access is done. - const array: [info.len]info.child = key; - comptime var i: u32 = 0; - inline while (i < info.len) : (i += 1) { - autoHash(hasher, array[i]); - } - } - }, - - builtin.TypeId.Struct => |info| { - // TODO detect via a trait when Key has no padding bits to - // hash it as an array of bytes. - // Otherwise, hash every field. - inline for (info.fields) |field| { - // We reuse the hash of the previous field as the seed for the - // next one so that they're dependant. - autoHash(hasher, @field(key, field.name)); - } - }, - - builtin.TypeId.Union => |info| blk: { - if (info.tag_type) |tag_type| { - const tag = meta.activeTag(key); - const s = autoHash(hasher, tag); - inline for (info.fields) |field| { - const enum_field = field.enum_field.?; - if (enum_field.value == @enumToInt(tag)) { - autoHash(hasher, @field(key, enum_field.name)); - // TODO use a labelled break when it does not crash the compiler. - // break :blk; - return; - } - } - unreachable; - } else @compileError("cannot hash untagged union type: " ++ @typeName(Key) ++ ", provide your own hash function"); - }, - - builtin.TypeId.ErrorUnion => blk: { - const payload = key catch |err| { - autoHash(hasher, err); - break :blk; - }; - autoHash(hasher, payload); - }, - } -} - -fn testAutoHash(key: var) u64 { - var hasher = Wyhash.init(0); - autoHash(&hasher, key); - return hasher.final(); -} - -test "autoHash slice" { - // Allocate one array dynamically so that we're assured it is not merged - // with the other by the optimization passes. - const array1 = try std.heap.direct_allocator.create([6]u32); - defer std.heap.direct_allocator.destroy(array1); - array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 }; - const array2 = [_]u32{ 1, 2, 3, 4, 5, 6 }; - const a = array1[0..]; - const b = array2[0..]; - const c = array1[0..3]; - testing.expect(testAutoHash(a) == testAutoHash(a)); - testing.expect(testAutoHash(a) != testAutoHash(array1)); - testing.expect(testAutoHash(a) != testAutoHash(b)); - testing.expect(testAutoHash(a) != testAutoHash(c)); -} - -test "testAutoHash optional" { - const a: ?u32 = 123; - const b: ?u32 = null; - testing.expectEqual(testAutoHash(a), testAutoHash(u32(123))); - testing.expect(testAutoHash(a) != testAutoHash(b)); - testing.expectEqual(testAutoHash(b), 0); -} - -test "testAutoHash array" { - const a = [_]u32{ 1, 2, 3 }; - const h = testAutoHash(a); - var hasher = Wyhash.init(0); - autoHash(&hasher, u32(1)); - autoHash(&hasher, u32(2)); - autoHash(&hasher, u32(3)); - testing.expectEqual(h, hasher.final()); -} - -test "testAutoHash struct" { - const Foo = struct { - a: u32 = 1, - b: u32 = 2, - c: u32 = 3, - }; - const f = Foo{}; - const h = testAutoHash(f); - var hasher = Wyhash.init(0); - autoHash(&hasher, u32(1)); - autoHash(&hasher, u32(2)); - autoHash(&hasher, u32(3)); - testing.expectEqual(h, hasher.final()); -} - -test "testAutoHash union" { - const Foo = union(enum) { - A: u32, - B: f32, - C: u32, - }; - - const a = Foo{ .A = 18 }; - var b = Foo{ .B = 12.34 }; - const c = Foo{ .C = 18 }; - testing.expect(testAutoHash(a) == testAutoHash(a)); - testing.expect(testAutoHash(a) != testAutoHash(b)); - testing.expect(testAutoHash(a) != testAutoHash(c)); - - b = Foo{ .A = 18 }; - testing.expect(testAutoHash(a) == testAutoHash(b)); -} - -test "testAutoHash vector" { - const a: @Vector(4, u32) = [_]u32{ 1, 2, 3, 4 }; - const b: @Vector(4, u32) = [_]u32{ 1, 2, 3, 5 }; - const c: @Vector(4, u31) = [_]u31{ 1, 2, 3, 4 }; - testing.expect(testAutoHash(a) == testAutoHash(a)); - testing.expect(testAutoHash(a) != testAutoHash(b)); - testing.expect(testAutoHash(a) != testAutoHash(c)); -} - -test "testAutoHash error union" { - const Errors = error{Test}; - const Foo = struct { - a: u32 = 1, - b: u32 = 2, - c: u32 = 3, - }; - const f = Foo{}; - const g: Errors!Foo = Errors.Test; - testing.expect(testAutoHash(f) != testAutoHash(g)); - testing.expect(testAutoHash(f) == testAutoHash(Foo{})); - testing.expect(testAutoHash(g) == testAutoHash(Errors.Test)); -} From 3faf5d38576616d033c343130607189eb9fe613c Mon Sep 17 00:00:00 2001 From: Sahnvour Date: Tue, 16 Jul 2019 20:31:02 +0200 Subject: [PATCH 044/125] wyhash: stateless is faster for both iterative hashing and small keys. --- std/hash/wyhash.zig | 52 +++++++++++++++++---------------------------- 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/std/hash/wyhash.zig b/std/hash/wyhash.zig index 49119c5a95..dfa5156cad 100644 --- a/std/hash/wyhash.zig +++ b/std/hash/wyhash.zig @@ -33,16 +33,11 @@ fn mix1(a: u64, b: u64, seed: u64) u64 { pub const Wyhash = struct { seed: u64, - - buf: [32]u8, - buf_len: usize, msg_len: usize, pub fn init(seed: u64) Wyhash { return Wyhash{ .seed = seed, - .buf = undefined, - .buf_len = 0, .msg_len = 0, }; } @@ -61,34 +56,12 @@ pub const Wyhash = struct { ); } - pub fn update(self: *Wyhash, b: []const u8) void { - var off: usize = 0; + fn partial(self: *Wyhash, b: []const u8) void { + const rem_key = b; + const rem_len = b.len; - // Partial from previous. - if (self.buf_len != 0 and self.buf_len + b.len > 32) { - off += 32 - self.buf_len; - mem.copy(u8, self.buf[self.buf_len..], b[0..off]); - self.round(self.buf[0..]); - self.buf_len = 0; - } - - // Full middle blocks. - while (off + 32 <= b.len) : (off += 32) { - @inlineCall(self.round, b[off .. off + 32]); - } - - // Remainder for next pass. - mem.copy(u8, self.buf[self.buf_len..], b[off..]); - self.buf_len += @intCast(u8, b[off..].len); - self.msg_len += b.len; - } - - pub fn final(self: *Wyhash) u64 { - const seed = self.seed; - const rem_len = @intCast(u5, self.buf_len); - const rem_key = self.buf[0..self.buf_len]; - - self.seed = switch (rem_len) { + var seed = self.seed; + seed = switch (@intCast(u5, rem_len)) { 0 => seed, 1 => mix0(read_bytes(1, rem_key), primes[4], seed), 2 => mix0(read_bytes(2, rem_key), primes[4], seed), @@ -122,7 +95,22 @@ pub const Wyhash = struct { 30 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 16) | read_bytes(2, rem_key[28..]), seed), 31 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 24) | (read_bytes(2, rem_key[28..]) << 8) | read_bytes(1, rem_key[30..]), seed), }; + self.seed = seed; + } + pub fn update(self: *Wyhash, b: []const u8) void { + var off: usize = 0; + + // Full middle blocks. + while (off + 32 <= b.len) : (off += 32) { + @inlineCall(self.round, b[off .. off + 32]); + } + + self.partial(b[off..]); + self.msg_len += b.len; + } + + pub fn final(self: *Wyhash) u64 { return mum(self.seed ^ self.msg_len, primes[4]); } From 54255ee32e1e6c83b04c3e5f2f1dd7e8aa5e0dd7 Mon Sep 17 00:00:00 2001 From: Sahnvour Date: Tue, 16 Jul 2019 22:32:10 +0200 Subject: [PATCH 045/125] autohash: force inlining of integer hashing so that the optimizer can see the fast path based on key's size which is known at comptime otherwise it will always outline the call to hasher.update, resulting in much worse performance --- std/hash/auto_hash.zig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/std/hash/auto_hash.zig b/std/hash/auto_hash.zig index b21af0a1d8..2da9691ffd 100644 --- a/std/hash/auto_hash.zig +++ b/std/hash/auto_hash.zig @@ -21,7 +21,9 @@ pub fn autoHash(hasher: var, key: var) void { builtin.TypeId.EnumLiteral, => @compileError("cannot hash this type"), - builtin.TypeId.Int => hasher.update(std.mem.asBytes(&key)), + // Help the optimizer see that hashing an int is easy by inlining! + // TODO Check if the situation is better after #561 is resolved. + builtin.TypeId.Int => @inlineCall(hasher.update, std.mem.asBytes(&key)), builtin.TypeId.Float => |info| autoHash(hasher, @bitCast(@IntType(false, info.bits), key)), From 30466bccefedd5e795b72b422f98b8a58e786289 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 4 Aug 2019 15:15:11 -0400 Subject: [PATCH 046/125] update CONTRIBUTING.md --- CONTRIBUTING.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c19ef48829..2ee0e85ccf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,6 +25,7 @@ Here are some examples: * [Iterative Replacement of C with Zig](http://tiehuis.github.io/blog/zig1.html) * [The Right Tool for the Right Job: Redis Modules & Zig](https://www.youtube.com/watch?v=eCHM8-_poZY) + * [Writing a small ray tracer in Rust and Zig](https://nelari.us/post/raytracer_with_rust_and_zig/) Zig is a brand new language, with no advertising budget. Word of mouth is the only way people find out about the project, and the more people hear about it, @@ -45,8 +46,8 @@ The most highly regarded argument in such a discussion is a real world use case. The issue label [Contributor Friendly](https://github.com/ziglang/zig/issues?q=is%3Aissue+is%3Aopen+label%3A%22contributor+friendly%22) -exists to help contributors find issues that are "limited in scope and/or -knowledge of Zig internals." +exists to help you find issues that are **limited in scope and/or +knowledge of Zig internals.** ### Editing Source Code @@ -61,8 +62,7 @@ To test changes, do the following from the build directory: 1. Run `make install` (on POSIX) or `msbuild -p:Configuration=Release INSTALL.vcxproj` (on Windows). -2. `bin/zig build --build-file ../build.zig test` (on POSIX) or - `bin\zig.exe build --build-file ..\build.zig test` (on Windows). +2. `bin/zig build test` (on POSIX) or `bin\zig.exe build test` (on Windows). That runs the whole test suite, which does a lot of extra testing that you likely won't always need, and can take upwards of 2 hours. This is what the @@ -79,8 +79,8 @@ Another example is choosing a different set of things to test. For example, not the other ones. Combining this suggestion with the previous one, you could do this: -`bin/zig build --build-file ../build.zig test-std -Dskip-release` (on POSIX) or -`bin\zig.exe build --build-file ..\build.zig test-std -Dskip-release` (on Windows). +`bin/zig build test-std -Dskip-release` (on POSIX) or +`bin\zig.exe build test-std -Dskip-release` (on Windows). This will run only the standard library tests, in debug mode only, for all targets (it will cross-compile the tests for non-native targets but not run From fa30ebfbe5949fc63aee9853d66932facfd1d168 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 4 Aug 2019 18:24:10 -0400 Subject: [PATCH 047/125] suspension points inside branching control flow --- BRANCH_TODO | 1 - src/all_types.hpp | 15 ++- src/analyze.cpp | 35 ++++-- src/codegen.cpp | 165 ++++++++++++++++------------ src/ir.cpp | 2 +- test/stage1/behavior/coroutines.zig | 26 +++++ 6 files changed, 161 insertions(+), 83 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 62fee38371..f76252d935 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,4 +1,3 @@ - * suspension points inside branching control flow * go over the commented out tests * error return tracing * compile error for error: expected anyframe->T, found 'anyframe' diff --git a/src/all_types.hpp b/src/all_types.hpp index 87db8edf8d..8e12e720ef 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1716,6 +1716,9 @@ struct CodeGen { ZigLLVMDIFile *dummy_di_file; LLVMValueRef cur_ret_ptr; LLVMValueRef cur_fn_val; + LLVMValueRef cur_async_switch_instr; + LLVMValueRef cur_async_resume_index_ptr; + LLVMValueRef cur_async_awaiter_ptr; LLVMValueRef cur_err_ret_trace_val_arg; LLVMValueRef cur_err_ret_trace_val_stack; LLVMValueRef memcpy_fn_val; @@ -2166,8 +2169,8 @@ struct IrBasicBlock { size_t ref_count; // index into the basic block list size_t index; - // for async functions, the split function which corresponds to this block - LLVMValueRef split_llvm_fn; + // for async functions, the resume index which corresponds to this block + size_t resume_index; LLVMBasicBlockRef llvm_block; LLVMBasicBlockRef llvm_exit_block; // The instruction that referenced this basic block and caused us to @@ -3703,8 +3706,12 @@ static const size_t err_union_payload_index = 1; // label (grep this): [coro_frame_struct_layout] static const size_t coro_fn_ptr_index = 0; -static const size_t coro_awaiter_index = 1; -static const size_t coro_arg_start = 2; +static const size_t coro_resume_index = 1; +static const size_t coro_awaiter_index = 2; +static const size_t coro_arg_start = 3; + +// one for the Entry block, resume blocks are indexed after that. +static const size_t coro_extra_resume_block_count = 1; // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. diff --git a/src/analyze.cpp b/src/analyze.cpp index 009cb2de12..e7480c579b 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5215,6 +5215,9 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { field_names.append("fn_ptr"); field_types.append(fn_type); + field_names.append("resume_index"); + field_types.append(g->builtin_types.entry_usize); + field_names.append("awaiter"); field_types.append(g->builtin_types.entry_usize); @@ -7532,9 +7535,10 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re // label (grep this): [coro_frame_struct_layout] LLVMTypeRef field_types[] = { ptr_result_type, // fn_ptr + usize_type_ref, // resume_index usize_type_ref, // awaiter }; - LLVMStructSetBody(frame_header_type, field_types, 2, false); + LLVMStructSetBody(frame_header_type, field_types, 3, false); ZigLLVMDIType *di_element_types[] = { ZigLLVMCreateDebugMemberType(g->dbuilder, @@ -7545,12 +7549,19 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0), ZigLLVM_DIFlags_Zero, usize_di_type), ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter", + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "resume_index", di_file, line, 8*LLVMABISizeOfType(g->target_data_ref, field_types[1]), 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]), 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1), ZigLLVM_DIFlags_Zero, usize_di_type), + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types[2]), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2), + ZigLLVM_DIFlags_Zero, usize_di_type), }; ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder, compile_unit_scope, buf_ptr(name), @@ -7558,7 +7569,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type), 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type), ZigLLVM_DIFlags_Zero, - nullptr, di_element_types, 2, 0, nullptr, ""); + nullptr, di_element_types, 3, 0, nullptr, ""); ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type); } else { @@ -7566,11 +7577,12 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re // label (grep this): [coro_frame_struct_layout] LLVMTypeRef field_types[] = { LLVMPointerType(fn_type, 0), // fn_ptr + usize_type_ref, // resume_index usize_type_ref, // awaiter get_llvm_type(g, ptr_result_type), // result_ptr get_llvm_type(g, result_type), // result }; - LLVMStructSetBody(frame_header_type, field_types, 4, false); + LLVMStructSetBody(frame_header_type, field_types, 5, false); ZigLLVMDIType *di_element_types[] = { ZigLLVMCreateDebugMemberType(g->dbuilder, @@ -7588,18 +7600,25 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1), ZigLLVM_DIFlags_Zero, usize_di_type), ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr", + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter", di_file, line, 8*LLVMABISizeOfType(g->target_data_ref, field_types[2]), 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]), 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2), - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)), + ZigLLVM_DIFlags_Zero, usize_di_type), ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result", + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr", di_file, line, 8*LLVMABISizeOfType(g->target_data_ref, field_types[3]), 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[3]), 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 3), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)), + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types[4]), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[4]), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 4), ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)), }; ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder, @@ -7608,7 +7627,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type), 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type), ZigLLVM_DIFlags_Zero, - nullptr, di_element_types, 2, 0, nullptr, ""); + nullptr, di_element_types, 5, 0, nullptr, ""); ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type); } diff --git a/src/codegen.cpp b/src/codegen.cpp index ebdd9e6120..1943859d41 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1997,7 +1997,9 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut return call_instruction; } -static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) { +static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, + IrInstructionReturn *return_instruction) +{ if (fn_is_async(g->cur_fn)) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMValueRef locals_ptr = g->cur_ret_ptr; @@ -2006,12 +2008,10 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns ZigType *ret_type = ret_type_has_bits ? return_instruction->value->value.type : nullptr; if (ir_want_runtime_safety(g, &return_instruction->base)) { - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, ""); - LLVMValueRef new_resume_fn = g->cur_fn->resume_blocks.last()->split_llvm_fn; - LLVMBuildStore(g->builder, new_resume_fn, resume_index_ptr); + LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref); + LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr); } - LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_awaiter_index, ""); LLVMValueRef result_ptr_as_usize; if (ret_type_has_bits) { LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_arg_start, ""); @@ -2029,8 +2029,8 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns } LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); - LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, - all_ones, LLVMAtomicOrderingSequentiallyConsistent, g->is_single_threaded); + LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, g->cur_async_awaiter_ptr, + all_ones, LLVMAtomicOrderingMonotonic, g->is_single_threaded); LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn"); LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); @@ -3453,7 +3453,6 @@ static void render_async_spills(CodeGen *g) { } static void render_async_var_decls(CodeGen *g, Scope *scope) { - render_async_spills(g); for (;;) { switch (scope->id) { case ScopeIdCImport: @@ -3573,6 +3572,14 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_fn_ptr_index, ""); + LLVMValueRef bitcasted_fn_val = LLVMBuildBitCast(g->builder, fn_val, + LLVMGetElementType(LLVMTypeOf(fn_ptr_ptr)), ""); + LLVMBuildStore(g->builder, bitcasted_fn_val, fn_ptr_ptr); + + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_resume_index, ""); + LLVMBuildStore(g->builder, zero, resume_index_ptr); + if (prefix_arg_err_ret_stack) { zig_panic("TODO"); } @@ -3652,23 +3659,24 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr return nullptr; } else if (callee_is_async) { ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true); - LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn); - LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, ""); - LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr); + + LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(g->cur_fn_val, "CallResume"); + size_t new_block_index = g->cur_fn->resume_blocks.length + coro_extra_resume_block_count; + g->cur_fn->resume_blocks.append(nullptr); + LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); + LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, call_bb); + + LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)}; LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, ""); ZigLLVMSetTailCall(call_inst); LLVMBuildRetVoid(g->builder); - g->cur_fn_val = split_llvm_fn; - g->cur_ret_ptr = LLVMGetParam(split_llvm_fn, 0); - LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "CallResume"); LLVMPositionBuilderAtEnd(g->builder, call_bb); - if (ir_want_runtime_safety(g, &instruction->base)) { - LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "BadResume"); - LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "OkResume"); - LLVMValueRef arg_val = LLVMGetParam(split_llvm_fn, 1); + LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); + LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume"); + LLVMValueRef arg_val = LLVMGetParam(g->cur_fn_val, 1); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, arg_val, all_ones, ""); LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block); @@ -5144,10 +5152,9 @@ static LLVMValueRef ir_render_assert_non_null(CodeGen *g, IrExecutable *executab static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable, IrInstructionSuspendBegin *instruction) { - LLVMValueRef locals_ptr = g->cur_ret_ptr; - LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, ""); - LLVMValueRef new_fn_ptr = instruction->resume_block->split_llvm_fn; - LLVMBuildStore(g->builder, new_fn_ptr, fn_ptr_ptr); + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef new_resume_index = LLVMConstInt(usize_type_ref, instruction->resume_block->resume_index, false); + LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr); return nullptr; } @@ -5159,19 +5166,22 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable, } static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwait *instruction) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame); ZigType *result_type = instruction->base.value.type; ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true); // Prepare to be suspended - LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn); - LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, ""); - LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr); + LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitResume"); + size_t new_block_index = g->cur_fn->resume_blocks.length + coro_extra_resume_block_count; + g->cur_fn->resume_blocks.append(nullptr); + LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); + LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb); + LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); // At this point resuming the function will do the correct thing. // This code is as if it is running inside the suspend block. - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; // caller's own frame pointer LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); @@ -5184,18 +5194,20 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst result_ptr_as_usize = LLVMGetUndef(usize_type_ref); } LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val, - LLVMAtomicOrderingSequentiallyConsistent, g->is_single_threaded); + LLVMAtomicOrderingMonotonic, g->is_single_threaded); LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait"); LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend"); - LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, bad_await_block, 2); + LLVMBasicBlockRef predecessor_bb = LLVMGetInsertBlock(g->builder); LLVMAddCase(switch_instr, zero, complete_suspend_block); - LLVMAddCase(switch_instr, all_ones, early_return_block); + + // Early return: The async function has already completed. No need to suspend. + LLVMAddCase(switch_instr, all_ones, resume_bb); // We discovered that another awaiter was already here. LLVMPositionBuilderAtEnd(g->builder, bad_await_block); @@ -5205,25 +5217,18 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block); LLVMBuildRetVoid(g->builder); - // The async function has already completed. So we use a tail call to resume ourselves. - LLVMPositionBuilderAtEnd(g->builder, early_return_block); - LLVMValueRef args[] = {g->cur_ret_ptr, result_ptr_as_usize}; - LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, split_llvm_fn, args, 2, LLVMFastCallConv, - ZigLLVM_FnInlineAuto, ""); - ZigLLVMSetTailCall(call_inst); - LLVMBuildRetVoid(g->builder); - - g->cur_fn_val = split_llvm_fn; - g->cur_ret_ptr = LLVMGetParam(split_llvm_fn, 0); - LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "AwaitResume"); - LLVMPositionBuilderAtEnd(g->builder, call_bb); + LLVMPositionBuilderAtEnd(g->builder, resume_bb); + // We either got here from Entry (function call) or from the switch above + LLVMValueRef spilled_result_ptr = LLVMBuildPhi(g->builder, usize_type_ref, ""); + LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), result_ptr_as_usize }; + LLVMBasicBlockRef incoming_blocks[] = { g->cur_fn->preamble_llvm_block, predecessor_bb }; + LLVMAddIncoming(spilled_result_ptr, incoming_values, incoming_blocks, 2); if (ir_want_runtime_safety(g, &instruction->base)) { - LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "BadResume"); - LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "OkResume"); - LLVMValueRef arg_val = LLVMGetParam(split_llvm_fn, 1); + LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); + LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume"); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); - LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, arg_val, all_ones, ""); + LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, spilled_result_ptr, all_ones, ""); LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block); LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); @@ -5235,7 +5240,6 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst render_async_var_decls(g, instruction->base.scope); if (type_has_bits(result_type)) { - LLVMValueRef spilled_result_ptr = LLVMGetParam(g->cur_fn_val, 1); LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr, get_llvm_type(g, ptr_result_type), ""); return get_handle_value(g, casted_spilled_result_ptr, result_type, ptr_result_type); @@ -5547,13 +5551,18 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) { IrExecutable *executable = &fn_entry->analyzed_executable; assert(executable->basic_block_list.length > 0); + + if (fn_is_async(fn_entry)) { + IrBasicBlock *entry_block = executable->basic_block_list.at(0); + LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block); + render_async_var_decls(g, entry_block->instruction_list.at(0)->scope); + } + for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) { IrBasicBlock *current_block = executable->basic_block_list.at(block_i); assert(current_block->llvm_block); LLVMPositionBuilderAtEnd(g->builder, current_block->llvm_block); - if (current_block->split_llvm_fn != nullptr) { - g->cur_fn_val = current_block->split_llvm_fn; - g->cur_ret_ptr = LLVMGetParam(g->cur_fn_val, 0); + if (current_block->resume_index != 0) { render_async_var_decls(g, current_block->instruction_list.at(0)->scope); } for (size_t instr_i = 0; instr_i < current_block->instruction_list.length; instr_i += 1) { @@ -6416,17 +6425,19 @@ static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) { IrExecutable *executable = &fn->analyzed_executable; assert(executable->basic_block_list.length > 0); LLVMValueRef fn_val = fn_llvm_value(g, fn); + LLVMBasicBlockRef first_bb = nullptr; + if (fn_is_async(fn)) { + first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch"); + fn->preamble_llvm_block = first_bb; + } for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) { IrBasicBlock *bb = executable->basic_block_list.at(block_i); - if (bb->split_llvm_fn != nullptr) { - assert(bb->split_llvm_fn == reinterpret_cast(0x1)); - fn_val = make_fn_llvm_value(g, fn); - bb->split_llvm_fn = fn_val; - } bb->llvm_block = LLVMAppendBasicBlock(fn_val, bb->name_hint); } - IrBasicBlock *entry_bb = executable->basic_block_list.at(0); - LLVMPositionBuilderAtEnd(g->builder, entry_bb->llvm_block); + if (first_bb == nullptr) { + first_bb = executable->basic_block_list.at(0)->llvm_block; + } + LLVMPositionBuilderAtEnd(g->builder, first_bb); } static void gen_global_var(CodeGen *g, ZigVar *var, LLVMValueRef init_val, @@ -6636,9 +6647,7 @@ static void do_code_gen(CodeGen *g) { g->cur_err_ret_trace_val_stack = nullptr; } - if (is_async) { - render_async_spills(g); - } else { + if (!is_async) { // allocate temporary stack data for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) { IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i); @@ -6752,17 +6761,35 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false); ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val); - if (ir_want_runtime_safety_scope(g, fn_table_entry->child_scope)) { - IrBasicBlock *bad_resume_block = allocate(1); - bad_resume_block->name_hint = "BadResume"; - bad_resume_block->split_llvm_fn = make_fn_llvm_value(g, fn_table_entry); - - LLVMBasicBlockRef llvm_block = LLVMAppendBasicBlock(bad_resume_block->split_llvm_fn, "BadResume"); - LLVMPositionBuilderAtEnd(g->builder, llvm_block); - gen_safety_crash(g, PanicMsgIdBadResume); - - fn_table_entry->resume_blocks.append(bad_resume_block); + if (!g->strip_debug_symbols) { + AstNode *source_node = fn_table_entry->proto_node; + ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1, + (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope)); } + IrExecutable *executable = &fn_table_entry->analyzed_executable; + LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); + LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); + gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope); + + LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block); + render_async_spills(g); + g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, ""); + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, ""); + g->cur_async_resume_index_ptr = resume_index_ptr; + LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); + LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, + fn_table_entry->resume_blocks.length + coro_extra_resume_block_count); + g->cur_async_switch_instr = switch_instr; + + LLVMValueRef zero = LLVMConstNull(usize_type_ref); + LLVMAddCase(switch_instr, zero, executable->basic_block_list.at(0)->llvm_block); + + for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) { + IrBasicBlock *resume_block = fn_table_entry->resume_blocks.at(resume_i); + LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_block->resume_index, false); + LLVMAddCase(switch_instr, case_value, resume_block->llvm_block); + } + } else { // create debug variable declarations for parameters // rely on the first variables in the variable_list being parameters. diff --git a/src/ir.cpp b/src/ir.cpp index fbf9da9656..c81000573c 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -24474,7 +24474,7 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); ir_assert(fn_entry != nullptr, &instruction->base); - new_bb->split_llvm_fn = reinterpret_cast(0x1); + new_bb->resume_index = fn_entry->resume_blocks.length + coro_extra_resume_block_count; fn_entry->resume_blocks.append(new_bb); if (fn_entry->inferred_async_node == nullptr) { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 511568a898..ccf9485b51 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -419,3 +419,29 @@ test "async function call return value" { }; S.doTheTest(); } + +test "suspension points inside branching control flow" { + const S = struct { + var global_result: i32 = 10; + + fn doTheTest() void { + expect(10 == global_result); + var frame = async func(true); + expect(10 == global_result); + resume frame; + expect(11 == global_result); + resume frame; + expect(12 == global_result); + resume frame; + expect(13 == global_result); + } + + fn func(b: bool) void { + while (b) { + suspend; + global_result += 1; + } + } + }; + S.doTheTest(); +} From 042914de75f7ccf520fb2058372cc3f255ccfecb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 4 Aug 2019 18:26:37 -0400 Subject: [PATCH 048/125] move state from ZigFn to CodeGen to save memory --- src/all_types.hpp | 2 +- src/codegen.cpp | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 8e12e720ef..653e6b6254 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1399,7 +1399,6 @@ struct ZigFn { ZigList call_list; LLVMValueRef valgrind_client_request_array; - LLVMBasicBlockRef preamble_llvm_block; FnInline fn_inline; FnAnalState anal_state; @@ -1719,6 +1718,7 @@ struct CodeGen { LLVMValueRef cur_async_switch_instr; LLVMValueRef cur_async_resume_index_ptr; LLVMValueRef cur_async_awaiter_ptr; + LLVMBasicBlockRef cur_preamble_llvm_block; LLVMValueRef cur_err_ret_trace_val_arg; LLVMValueRef cur_err_ret_trace_val_stack; LLVMValueRef memcpy_fn_val; diff --git a/src/codegen.cpp b/src/codegen.cpp index 1943859d41..13b3ab5073 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5221,7 +5221,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // We either got here from Entry (function call) or from the switch above LLVMValueRef spilled_result_ptr = LLVMBuildPhi(g->builder, usize_type_ref, ""); LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), result_ptr_as_usize }; - LLVMBasicBlockRef incoming_blocks[] = { g->cur_fn->preamble_llvm_block, predecessor_bb }; + LLVMBasicBlockRef incoming_blocks[] = { g->cur_preamble_llvm_block, predecessor_bb }; LLVMAddIncoming(spilled_result_ptr, incoming_values, incoming_blocks, 2); if (ir_want_runtime_safety(g, &instruction->base)) { @@ -6428,7 +6428,7 @@ static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) { LLVMBasicBlockRef first_bb = nullptr; if (fn_is_async(fn)) { first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch"); - fn->preamble_llvm_block = first_bb; + g->cur_preamble_llvm_block = first_bb; } for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) { IrBasicBlock *bb = executable->basic_block_list.at(block_i); @@ -6771,7 +6771,7 @@ static void do_code_gen(CodeGen *g) { LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope); - LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block); + LLVMPositionBuilderAtEnd(g->builder, g->cur_preamble_llvm_block); render_async_spills(g); g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, ""); LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, ""); From fbf21efd24bf812e0fd52a5917708a4c45f05b5e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 4 Aug 2019 18:57:59 -0400 Subject: [PATCH 049/125] simpler, less memory intensive suspend/resume implementation --- src/all_types.hpp | 16 ++++------ src/codegen.cpp | 54 ++++++++++++++++------------------ src/ir.cpp | 74 ++++++++++++++++++----------------------------- src/ir_print.cpp | 10 +++---- 4 files changed, 62 insertions(+), 92 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 653e6b6254..7c903677a8 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1386,7 +1386,6 @@ struct ZigFn { ZigList alloca_gen_list; ZigList variable_list; - ZigList resume_blocks; Buf *section_name; AstNode *set_alignstack_node; @@ -1719,6 +1718,7 @@ struct CodeGen { LLVMValueRef cur_async_resume_index_ptr; LLVMValueRef cur_async_awaiter_ptr; LLVMBasicBlockRef cur_preamble_llvm_block; + size_t cur_resume_block_count; LLVMValueRef cur_err_ret_trace_val_arg; LLVMValueRef cur_err_ret_trace_val_stack; LLVMValueRef memcpy_fn_val; @@ -2114,7 +2114,6 @@ struct ScopeRuntime { struct ScopeSuspend { Scope base; - IrBasicBlock *resume_block; bool reported_err; }; @@ -2169,8 +2168,6 @@ struct IrBasicBlock { size_t ref_count; // index into the basic block list size_t index; - // for async functions, the resume index which corresponds to this block - size_t resume_index; LLVMBasicBlockRef llvm_block; LLVMBasicBlockRef llvm_exit_block; // The instruction that referenced this basic block and caused us to @@ -2354,7 +2351,7 @@ enum IrInstructionId { IrInstructionIdPtrOfArrayToSlice, IrInstructionIdUnionInitNamedField, IrInstructionIdSuspendBegin, - IrInstructionIdSuspendBr, + IrInstructionIdSuspendFinish, IrInstructionIdAwait, IrInstructionIdCoroResume, }; @@ -3600,13 +3597,13 @@ struct IrInstructionPtrOfArrayToSlice { struct IrInstructionSuspendBegin { IrInstruction base; - IrBasicBlock *resume_block; + LLVMBasicBlockRef resume_bb; }; -struct IrInstructionSuspendBr { +struct IrInstructionSuspendFinish { IrInstruction base; - IrBasicBlock *resume_block; + IrInstructionSuspendBegin *begin; }; struct IrInstructionAwait { @@ -3710,9 +3707,6 @@ static const size_t coro_resume_index = 1; static const size_t coro_awaiter_index = 2; static const size_t coro_arg_start = 3; -// one for the Entry block, resume blocks are indexed after that. -static const size_t coro_extra_resume_block_count = 1; - // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. static const size_t stack_trace_ptr_count = 32; diff --git a/src/codegen.cpp b/src/codegen.cpp index 13b3ab5073..1b9019ad08 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3661,8 +3661,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true); LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(g->cur_fn_val, "CallResume"); - size_t new_block_index = g->cur_fn->resume_blocks.length + coro_extra_resume_block_count; - g->cur_fn->resume_blocks.append(nullptr); + size_t new_block_index = g->cur_resume_block_count; + g->cur_resume_block_count += 1; LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, call_bb); @@ -5153,15 +5153,22 @@ static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable IrInstructionSuspendBegin *instruction) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - LLVMValueRef new_resume_index = LLVMConstInt(usize_type_ref, instruction->resume_block->resume_index, false); - LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr); + instruction->resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "SuspendResume"); + size_t new_block_index = g->cur_resume_block_count; + g->cur_resume_block_count += 1; + LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); + LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, instruction->resume_bb); + LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); return nullptr; } -static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable, - IrInstructionSuspendBr *instruction) +static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executable, + IrInstructionSuspendFinish *instruction) { LLVMBuildRetVoid(g->builder); + + LLVMPositionBuilderAtEnd(g->builder, instruction->begin->resume_bb); + render_async_var_decls(g, instruction->base.scope); return nullptr; } @@ -5173,8 +5180,8 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // Prepare to be suspended LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitResume"); - size_t new_block_index = g->cur_fn->resume_blocks.length + coro_extra_resume_block_count; - g->cur_fn->resume_blocks.append(nullptr); + size_t new_block_index = g->cur_resume_block_count; + g->cur_resume_block_count += 1; LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb); LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); @@ -5534,8 +5541,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_ptr_of_array_to_slice(g, executable, (IrInstructionPtrOfArrayToSlice *)instruction); case IrInstructionIdSuspendBegin: return ir_render_suspend_begin(g, executable, (IrInstructionSuspendBegin *)instruction); - case IrInstructionIdSuspendBr: - return ir_render_suspend_br(g, executable, (IrInstructionSuspendBr *)instruction); + case IrInstructionIdSuspendFinish: + return ir_render_suspend_finish(g, executable, (IrInstructionSuspendFinish *)instruction); case IrInstructionIdCoroResume: return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); case IrInstructionIdFrameSizeGen: @@ -5552,19 +5559,10 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) { IrExecutable *executable = &fn_entry->analyzed_executable; assert(executable->basic_block_list.length > 0); - if (fn_is_async(fn_entry)) { - IrBasicBlock *entry_block = executable->basic_block_list.at(0); - LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block); - render_async_var_decls(g, entry_block->instruction_list.at(0)->scope); - } - for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) { IrBasicBlock *current_block = executable->basic_block_list.at(block_i); assert(current_block->llvm_block); LLVMPositionBuilderAtEnd(g->builder, current_block->llvm_block); - if (current_block->resume_index != 0) { - render_async_var_decls(g, current_block->instruction_list.at(0)->scope); - } for (size_t instr_i = 0; instr_i < current_block->instruction_list.length; instr_i += 1) { IrInstruction *instruction = current_block->instruction_list.at(instr_i); if (instruction->ref_count == 0 && !ir_has_side_effects(instruction)) @@ -6757,6 +6755,8 @@ static void do_code_gen(CodeGen *g) { } if (is_async) { + g->cur_resume_block_count = 0; + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false); ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val); @@ -6777,19 +6777,15 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, ""); g->cur_async_resume_index_ptr = resume_index_ptr; LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); - LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, - fn_table_entry->resume_blocks.length + coro_extra_resume_block_count); + LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4); g->cur_async_switch_instr = switch_instr; LLVMValueRef zero = LLVMConstNull(usize_type_ref); - LLVMAddCase(switch_instr, zero, executable->basic_block_list.at(0)->llvm_block); - - for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) { - IrBasicBlock *resume_block = fn_table_entry->resume_blocks.at(resume_i); - LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_block->resume_index, false); - LLVMAddCase(switch_instr, case_value, resume_block->llvm_block); - } - + IrBasicBlock *entry_block = executable->basic_block_list.at(0); + LLVMAddCase(switch_instr, zero, entry_block->llvm_block); + g->cur_resume_block_count += 1; + LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block); + render_async_var_decls(g, entry_block->instruction_list.at(0)->scope); } else { // create debug variable declarations for parameters // rely on the first variables in the variable_list being parameters. diff --git a/src/ir.cpp b/src/ir.cpp index c81000573c..45a48d6f50 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1049,8 +1049,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBegin *) return IrInstructionIdSuspendBegin; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBr *) { - return IrInstructionIdSuspendBr; +static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendFinish *) { + return IrInstructionIdSuspendFinish; } static constexpr IrInstructionId ir_instruction_id(IrInstructionAwait *) { @@ -3260,25 +3260,21 @@ static IrInstruction *ir_build_end_expr(IrBuilder *irb, Scope *scope, AstNode *s return &instruction->base; } -static IrInstruction *ir_build_suspend_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrBasicBlock *resume_block) -{ +static IrInstructionSuspendBegin *ir_build_suspend_begin(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionSuspendBegin *instruction = ir_build_instruction(irb, scope, source_node); instruction->base.value.type = irb->codegen->builtin_types.entry_void; - instruction->resume_block = resume_block; - ir_ref_bb(resume_block); - - return &instruction->base; + return instruction; } -static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrBasicBlock *resume_block) +static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstructionSuspendBegin *begin) { - IrInstructionSuspendBr *instruction = ir_build_instruction(irb, scope, source_node); - instruction->resume_block = resume_block; + IrInstructionSuspendFinish *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.value.type = irb->codegen->builtin_types.entry_void; + instruction->begin = begin; - ir_ref_bb(resume_block); + ir_ref_instruction(&begin->base, irb->current_basic_block); return &instruction->base; } @@ -7890,22 +7886,15 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod return irb->codegen->invalid_instruction; } - IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); - - ir_build_suspend_begin(irb, parent_scope, node, resume_block); + IrInstructionSuspendBegin *begin = ir_build_suspend_begin(irb, parent_scope, node); if (node->data.suspend.block != nullptr) { - Scope *child_scope; ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope); - suspend_scope->resume_block = resume_block; - child_scope = &suspend_scope->base; + Scope *child_scope = &suspend_scope->base; IrInstruction *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope); ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res)); } - IrInstruction *result = ir_build_suspend_br(irb, parent_scope, node, resume_block); - result->value.type = irb->codegen->builtin_types.entry_void; - ir_set_cursor_at_end_and_append_block(irb, resume_block); - return result; + return ir_build_suspend_finish(irb, parent_scope, node, begin); } static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, @@ -24458,35 +24447,28 @@ static IrInstruction *ir_analyze_instruction_union_init_named_field(IrAnalyze *i } static IrInstruction *ir_analyze_instruction_suspend_begin(IrAnalyze *ira, IrInstructionSuspendBegin *instruction) { - IrBasicBlock *new_bb = ir_get_new_bb_runtime(ira, instruction->resume_block, &instruction->base); - if (new_bb == nullptr) - return ir_unreach_error(ira); - return ir_build_suspend_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node, new_bb); + IrInstructionSuspendBegin *result = ir_build_suspend_begin(&ira->new_irb, instruction->base.scope, + instruction->base.source_node); + return &result->base; } -static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstructionSuspendBr *instruction) { - IrBasicBlock *old_dest_block = instruction->resume_block; - - IrBasicBlock *new_bb = ir_get_new_bb_runtime(ira, old_dest_block, &instruction->base); - if (new_bb == nullptr) - return ir_unreach_error(ira); +static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira, + IrInstructionSuspendFinish *instruction) +{ + IrInstruction *begin_base = instruction->begin->base.child; + if (type_is_invalid(begin_base->value.type)) + return ira->codegen->invalid_instruction; + ir_assert(begin_base->id == IrInstructionIdSuspendBegin, &instruction->base); + IrInstructionSuspendBegin *begin = reinterpret_cast(begin_base); ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); ir_assert(fn_entry != nullptr, &instruction->base); - new_bb->resume_index = fn_entry->resume_blocks.length + coro_extra_resume_block_count; - - fn_entry->resume_blocks.append(new_bb); if (fn_entry->inferred_async_node == nullptr) { fn_entry->inferred_async_node = instruction->base.source_node; } - ir_push_resume_block(ira, old_dest_block); - - IrInstruction *result = ir_build_suspend_br(&ira->new_irb, - instruction->base.scope, instruction->base.source_node, new_bb); - result->value.type = ira->codegen->builtin_types.entry_unreachable; - return ir_finish_anal(ira, result); + return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin); } static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwait *instruction) { @@ -24847,8 +24829,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_union_init_named_field(ira, (IrInstructionUnionInitNamedField *)instruction); case IrInstructionIdSuspendBegin: return ir_analyze_instruction_suspend_begin(ira, (IrInstructionSuspendBegin *)instruction); - case IrInstructionIdSuspendBr: - return ir_analyze_instruction_suspend_br(ira, (IrInstructionSuspendBr *)instruction); + case IrInstructionIdSuspendFinish: + return ir_analyze_instruction_suspend_finish(ira, (IrInstructionSuspendFinish *)instruction); case IrInstructionIdCoroResume: return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); case IrInstructionIdAwait: @@ -24986,7 +24968,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdVectorToArray: case IrInstructionIdResetResult: case IrInstructionIdSuspendBegin: - case IrInstructionIdSuspendBr: + case IrInstructionIdSuspendFinish: case IrInstructionIdCoroResume: case IrInstructionIdAwait: return true; diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 46d2906d30..549da9de19 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1534,10 +1534,8 @@ static void ir_print_suspend_begin(IrPrint *irp, IrInstructionSuspendBegin *inst fprintf(irp->f, "@suspendBegin()"); } -static void ir_print_suspend_br(IrPrint *irp, IrInstructionSuspendBr *instruction) { - fprintf(irp->f, "@suspendBr("); - ir_print_other_block(irp, instruction->resume_block); - fprintf(irp->f, ")"); +static void ir_print_suspend_finish(IrPrint *irp, IrInstructionSuspendFinish *instruction) { + fprintf(irp->f, "@suspendFinish()"); } static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) { @@ -2025,8 +2023,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdSuspendBegin: ir_print_suspend_begin(irp, (IrInstructionSuspendBegin *)instruction); break; - case IrInstructionIdSuspendBr: - ir_print_suspend_br(irp, (IrInstructionSuspendBr *)instruction); + case IrInstructionIdSuspendFinish: + ir_print_suspend_finish(irp, (IrInstructionSuspendFinish *)instruction); break; case IrInstructionIdCoroResume: ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction); From f27e5d439c121e620b2c0d9d7a6a8f4154826aa8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 4 Aug 2019 20:44:52 -0400 Subject: [PATCH 050/125] refactor logic for determining if there is a frame pointer --- src/codegen.cpp | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 1b9019ad08..4ecdfd3bdd 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -343,6 +343,10 @@ static bool cc_want_sret_attr(CallingConvention cc) { zig_unreachable(); } +static bool codegen_have_frame_pointer(CodeGen *g) { + return g->build_mode == BuildModeDebug; +} + static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) { Buf *unmangled_name = &fn->symbol_name; Buf *symbol_name; @@ -482,7 +486,7 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) { addLLVMFnAttr(llvm_fn, "nounwind"); add_uwtable_attr(g, llvm_fn); addLLVMFnAttr(llvm_fn, "nobuiltin"); - if (g->build_mode == BuildModeDebug && fn->fn_inline != FnInlineAlways) { + if (codegen_have_frame_pointer(g) && fn->fn_inline != FnInlineAlways) { ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim", "true"); ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim-non-leaf", nullptr); } @@ -1020,7 +1024,7 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) { // Error return trace memory is in the stack, which is impossible to be at address 0 // on any architecture. addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); - if (g->build_mode == BuildModeDebug) { + if (codegen_have_frame_pointer(g)) { ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true"); ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr); } @@ -1101,7 +1105,7 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) { // Error return trace memory is in the stack, which is impossible to be at address 0 // on any architecture. addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); - if (g->build_mode == BuildModeDebug) { + if (codegen_have_frame_pointer(g)) { ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true"); ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr); } @@ -1173,7 +1177,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); addLLVMFnAttr(fn_val, "nounwind"); add_uwtable_attr(g, fn_val); - if (g->build_mode == BuildModeDebug) { + if (codegen_have_frame_pointer(g)) { ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true"); ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr); } @@ -4210,7 +4214,7 @@ static LLVMValueRef get_enum_tag_name_function(CodeGen *g, ZigType *enum_type) { LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); addLLVMFnAttr(fn_val, "nounwind"); add_uwtable_attr(g, fn_val); - if (g->build_mode == BuildModeDebug) { + if (codegen_have_frame_pointer(g)) { ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true"); ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr); } @@ -8220,6 +8224,12 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa args.append("-g"); } + if (codegen_have_frame_pointer(g)) { + args.append("-fno-omit-frame-pointer"); + } else { + args.append("-fomit-frame-pointer"); + } + switch (g->build_mode) { case BuildModeDebug: // windows c runtime requires -D_DEBUG if using debug libraries @@ -8232,7 +8242,6 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa } else { args.append("-fno-stack-protector"); } - args.append("-fno-omit-frame-pointer"); break; case BuildModeSafeRelease: // See the comment in the BuildModeFastRelease case for why we pass -O2 rather @@ -8246,7 +8255,6 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa } else { args.append("-fno-stack-protector"); } - args.append("-fomit-frame-pointer"); break; case BuildModeFastRelease: args.append("-DNDEBUG"); @@ -8257,13 +8265,11 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa // running in -O2 and thus the -O3 path has been tested less. args.append("-O2"); args.append("-fno-stack-protector"); - args.append("-fomit-frame-pointer"); break; case BuildModeSmallRelease: args.append("-DNDEBUG"); args.append("-Os"); args.append("-fno-stack-protector"); - args.append("-fomit-frame-pointer"); break; } @@ -9685,4 +9691,3 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget return g; } - From 0d8c9fcb18b399bd2afedbcbcc7736326ef92297 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 5 Aug 2019 00:41:49 -0400 Subject: [PATCH 051/125] support async functions with inferred error sets --- BRANCH_TODO | 2 + src/all_types.hpp | 2 +- src/analyze.cpp | 226 +++++++++++++++------------- src/codegen.cpp | 141 ++++++++++------- src/codegen.hpp | 1 + src/ir.cpp | 9 +- test/stage1/behavior/coroutines.zig | 77 +++++++--- 7 files changed, 269 insertions(+), 189 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index f76252d935..fcd98f0f71 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,3 +1,4 @@ + * delete IrInstructionMarkErrRetTracePtr * go over the commented out tests * error return tracing * compile error for error: expected anyframe->T, found 'anyframe' @@ -32,3 +33,4 @@ - resume - anyframe, anyframe->T * safety for double await + * call graph analysis to have fewer stack trace frames diff --git a/src/all_types.hpp b/src/all_types.hpp index 7c903677a8..1ea4954dec 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -3705,7 +3705,7 @@ static const size_t err_union_payload_index = 1; static const size_t coro_fn_ptr_index = 0; static const size_t coro_resume_index = 1; static const size_t coro_awaiter_index = 2; -static const size_t coro_arg_start = 3; +static const size_t coro_ret_start = 3; // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. diff --git a/src/analyze.cpp b/src/analyze.cpp index e7480c579b..e1b386d9af 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -7,6 +7,7 @@ #include "analyze.hpp" #include "ast_render.hpp" +#include "codegen.hpp" #include "config.h" #include "error.hpp" #include "ir.hpp" @@ -5212,23 +5213,34 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { ZigList field_types = {}; ZigList field_names = {}; - field_names.append("fn_ptr"); + field_names.append("@fn_ptr"); field_types.append(fn_type); - field_names.append("resume_index"); + field_names.append("@resume_index"); field_types.append(g->builtin_types.entry_usize); - field_names.append("awaiter"); + field_names.append("@awaiter"); field_types.append(g->builtin_types.entry_usize); FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false); - field_names.append("result_ptr"); + field_names.append("@ptr_result"); field_types.append(ptr_return_type); - field_names.append("result"); + field_names.append("@result"); field_types.append(fn_type_id->return_type); + if (codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type)) { + field_names.append("@ptr_stack_trace"); + field_types.append(get_ptr_to_stack_trace_type(g)); + + field_names.append("@stack_trace"); + field_types.append(g->stack_trace_type); + + field_names.append("@instruction_addresses"); + field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count)); + } + for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) { FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i]; AstNode *param_decl_node = get_param_decl_node(fn, arg_i); @@ -5237,7 +5249,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (param_decl_node && !is_var_args) { param_name = param_decl_node->data.param_decl.name; } else { - param_name = buf_sprintf("arg%" ZIG_PRI_usize "", arg_i); + param_name = buf_sprintf("@arg%" ZIG_PRI_usize, arg_i); } ZigType *param_type = param_info->type; field_names.append(buf_ptr(param_name)); @@ -5260,7 +5272,13 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { continue; } } - field_names.append(instruction->name_hint); + const char *name; + if (*instruction->name_hint == 0) { + name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i)); + } else { + name = instruction->name_hint; + } + field_names.append(name); field_types.append(child_type); } @@ -7369,7 +7387,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) { } fn_type->data.fn.gen_return_type = gen_return_type; - if (prefix_arg_error_return_trace) { + if (prefix_arg_error_return_trace && !is_async) { ZigType *gen_type = get_ptr_to_stack_trace_type(g); gen_param_types.append(get_llvm_type(g, gen_type)); param_di_types.append(get_llvm_di_type(g, gen_type)); @@ -7527,110 +7545,112 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit); ZigType *result_type = any_frame_type->data.any_frame.result_type; - if (result_type == nullptr || !type_has_bits(result_type)) { - LLVMTypeRef ptr_result_type = LLVMPointerType(fn_type, 0); - if (result_type == nullptr) { - g->anyframe_fn_type = ptr_result_type; + ZigType *ptr_result_type = (result_type == nullptr) ? nullptr : get_pointer_to_type(g, result_type, false); + LLVMTypeRef ptr_fn_llvm_type = LLVMPointerType(fn_type, 0); + if (result_type == nullptr) { + g->anyframe_fn_type = ptr_fn_llvm_type; + } + + ZigList field_types = {}; + ZigList di_element_types = {}; + + // label (grep this): [coro_frame_struct_layout] + field_types.append(ptr_fn_llvm_type); // fn_ptr + field_types.append(usize_type_ref); // resume_index + field_types.append(usize_type_ref); // awaiter + + bool have_result_type = result_type != nullptr && type_has_bits(result_type); + if (have_result_type) { + field_types.append(get_llvm_type(g, ptr_result_type)); // ptr_result + field_types.append(get_llvm_type(g, result_type)); // result + if (codegen_fn_has_err_ret_tracing(g, result_type)) { + field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace + field_types.append(get_llvm_type(g, g->stack_trace_type)); // stack_trace + field_types.append(get_llvm_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count))); // instruction_addresses } - // label (grep this): [coro_frame_struct_layout] - LLVMTypeRef field_types[] = { - ptr_result_type, // fn_ptr - usize_type_ref, // resume_index - usize_type_ref, // awaiter - }; - LLVMStructSetBody(frame_header_type, field_types, 3, false); + } + LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false); - ZigLLVMDIType *di_element_types[] = { - ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr", - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types[0]), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[0]), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0), - ZigLLVM_DIFlags_Zero, usize_di_type), - ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "resume_index", - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types[1]), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1), - ZigLLVM_DIFlags_Zero, usize_di_type), - ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter", - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types[2]), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2), - ZigLLVM_DIFlags_Zero, usize_di_type), - }; - ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder, - compile_unit_scope, buf_ptr(name), - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type), - 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type), - ZigLLVM_DIFlags_Zero, - nullptr, di_element_types, 3, 0, nullptr, ""); + di_element_types.append( + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, usize_di_type)); + di_element_types.append( + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "resume_index", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, usize_di_type)); + di_element_types.append( + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, usize_di_type)); - ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type); - } else { - ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false); - // label (grep this): [coro_frame_struct_layout] - LLVMTypeRef field_types[] = { - LLVMPointerType(fn_type, 0), // fn_ptr - usize_type_ref, // resume_index - usize_type_ref, // awaiter - get_llvm_type(g, ptr_result_type), // result_ptr - get_llvm_type(g, result_type), // result - }; - LLVMStructSetBody(frame_header_type, field_types, 5, false); - - ZigLLVMDIType *di_element_types[] = { + if (have_result_type) { + di_element_types.append( ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr", + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_result", di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types[0]), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[0]), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0), - ZigLLVM_DIFlags_Zero, usize_di_type), - ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter", - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types[1]), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1), - ZigLLVM_DIFlags_Zero, usize_di_type), - ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter", - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types[2]), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2), - ZigLLVM_DIFlags_Zero, usize_di_type), - ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr", - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types[3]), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[3]), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 3), - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)), + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type))); + di_element_types.append( ZigLLVMCreateDebugMemberType(g->dbuilder, ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result", di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types[4]), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[4]), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 4), - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)), - }; - ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder, - compile_unit_scope, buf_ptr(name), - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type), - 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type), - ZigLLVM_DIFlags_Zero, - nullptr, di_element_types, 5, 0, nullptr, ""); + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type))); - ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type); - } + if (codegen_fn_has_err_ret_tracing(g, result_type)) { + di_element_types.append( + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g)))); + di_element_types.append( + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "stack_trace", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, g->stack_trace_type))); + di_element_types.append( + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "instruction_addresses", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count)))); + } + }; + + ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder, + compile_unit_scope, buf_ptr(name), + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type), + 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type), + ZigLLVM_DIFlags_Zero, + nullptr, di_element_types.items, di_element_types.length, 0, nullptr, ""); + + ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type); } static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) { diff --git a/src/codegen.cpp b/src/codegen.cpp index 4ecdfd3bdd..59289523a7 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -297,12 +297,30 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) { zig_unreachable(); } +// label (grep this): [coro_frame_struct_layout] +static uint32_t frame_index_trace(CodeGen *g, FnTypeId *fn_type_id) { + // [0] *ReturnType + // [1] ReturnType + uint32_t return_field_count = type_has_bits(fn_type_id->return_type) ? 2 : 0; + return coro_ret_start + return_field_count; +} + +// label (grep this): [coro_frame_struct_layout] +static uint32_t frame_index_arg(CodeGen *g, FnTypeId *fn_type_id) { + bool have_stack_trace = g->have_err_ret_tracing && codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type); + // [0] *StackTrace + // [1] StackTrace + // [2] [stack_trace_ptr_count]usize + uint32_t trace_field_count = have_stack_trace ? 3 : 0; + return frame_index_trace(g, fn_type_id) + trace_field_count; +} + static uint32_t get_err_ret_trace_arg_index(CodeGen *g, ZigFn *fn_table_entry) { if (!g->have_err_ret_tracing) { return UINT32_MAX; } - if (fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) { - return 0; + if (fn_is_async(fn_table_entry)) { + return UINT32_MAX; } ZigType *fn_type = fn_table_entry->type_entry; if (!fn_type_can_fail(&fn_type->data.fn.fn_type_id)) { @@ -438,10 +456,6 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) { } else { LLVMSetFunctionCallConv(llvm_fn, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc)); } - if (cc == CallingConventionAsync) { - addLLVMFnAttr(llvm_fn, "optnone"); - addLLVMFnAttr(llvm_fn, "noinline"); - } bool want_cold = fn->is_cold || cc == CallingConventionCold; if (want_cold) { @@ -1273,8 +1287,8 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) { if (!g->have_err_ret_tracing) { return nullptr; } - if (g->cur_fn->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) { - return g->cur_err_ret_trace_val_stack; + if (fn_is_async(g->cur_fn)) { + return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, ""); } if (g->cur_err_ret_trace_val_stack != nullptr) { return g->cur_err_ret_trace_val_stack; @@ -2006,7 +2020,6 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, { if (fn_is_async(g->cur_fn)) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - LLVMValueRef locals_ptr = g->cur_ret_ptr; bool ret_type_has_bits = return_instruction->value != nullptr && type_has_bits(return_instruction->value->value.type); ZigType *ret_type = ret_type_has_bits ? return_instruction->value->value.type : nullptr; @@ -2018,7 +2031,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, LLVMValueRef result_ptr_as_usize; if (ret_type_has_bits) { - LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_arg_start, ""); + LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, ""); LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, ""); if (!handle_is_ptr(ret_type)) { // It's a scalar, so it didn't get written to the result ptr. Do that now. @@ -3256,7 +3269,7 @@ static LLVMValueRef ir_render_return_ptr(CodeGen *g, IrExecutable *executable, return nullptr; src_assert(g->cur_ret_ptr != nullptr, instruction->base.source_node); if (fn_is_async(g->cur_fn)) { - LLVMValueRef ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_arg_start, ""); + LLVMValueRef ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, ""); return LLVMBuildLoad(g->builder, ptr_ptr, ""); } return g->cur_ret_ptr; @@ -3356,12 +3369,6 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI } } -static bool get_prefix_arg_err_ret_stack(CodeGen *g, FnTypeId *fn_type_id) { - return g->have_err_ret_tracing && - (fn_type_id->return_type->id == ZigTypeIdErrorUnion || - fn_type_id->return_type->id == ZigTypeIdErrorSet); -} - static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) { LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_ptr_index, ""); LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_len_index, ""); @@ -3402,7 +3409,7 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) { static void render_async_spills(CodeGen *g) { ZigType *fn_type = g->cur_fn->type_entry; ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base); - size_t async_var_index = coro_arg_start + (type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0); + uint32_t async_var_index = frame_index_arg(g, &fn_type->data.fn.fn_type_id); for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) { ZigVar *var = g->cur_fn->variable_list.at(var_i); @@ -3518,11 +3525,11 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr CallingConvention cc = fn_type->data.fn.fn_type_id.cc; bool first_arg_ret = ret_has_bits && want_first_arg_sret(g, fn_type_id); - bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, fn_type_id); + bool prefix_arg_err_ret_stack = codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type); bool is_var_args = fn_type_id->is_var_args; ZigList gen_param_values = {}; LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr; - LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type); + LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef frame_result_loc; LLVMValueRef awaiter_init_val; LLVMValueRef ret_ptr; @@ -3534,7 +3541,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (ret_has_bits) { // Use the result location which is inside the frame if this is an async call. - ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, ""); + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, ""); } } else { LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack); @@ -3564,14 +3571,49 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr ret_ptr = result_loc; } } + + if (prefix_arg_err_ret_stack) { + uint32_t trace_field_index = frame_index_trace(g, fn_type_id); + LLVMValueRef trace_field_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + trace_field_index, ""); + LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + trace_field_index + 1, ""); + LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, frame_result_loc, + trace_field_index + 2, ""); + LLVMBuildStore(g->builder, trace_field_ptr, trace_field_ptr_ptr); + + LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, ""); + LLVMBuildStore(g->builder, zero, index_ptr); + + LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, ""); + LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, ""); + LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) }; + LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, trace_field_addrs, indices, 2, ""); + LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr); + + LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, ""); + LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr); + } } else if (callee_is_async) { frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); - awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, - g->builtin_types.entry_usize->llvm_type, ""); // caller's own frame pointer + awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); // caller's own frame pointer if (ret_has_bits) { - // Use the call instruction's result location. - ret_ptr = result_loc; + if (result_loc != nullptr) { + // Use the call instruction's result location. + ret_ptr = result_loc; + } else { + // return type is a scalar, but we still need a pointer to it. Use the async fn frame. + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, ""); + } } + + if (prefix_arg_err_ret_stack) { + uint32_t trace_field_index = frame_index_trace(g, fn_type_id); + LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, trace_field_index, ""); + LLVMValueRef err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); + LLVMBuildStore(g->builder, err_trace_val, trace_field_ptr); + } + } if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); @@ -3584,19 +3626,14 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_resume_index, ""); LLVMBuildStore(g->builder, zero, resume_index_ptr); - if (prefix_arg_err_ret_stack) { - zig_panic("TODO"); - } - LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_awaiter_index, ""); LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr); if (ret_has_bits) { - LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); + LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start, ""); LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); } - } - if (!instruction->is_async && !callee_is_async) { + } else { if (first_arg_ret) { gen_param_values.append(result_loc); } @@ -3628,16 +3665,15 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef result; if (instruction->is_async || callee_is_async) { - size_t ret_2_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0; - size_t arg_start_i = coro_arg_start + ret_2_or_0; + uint32_t arg_start_i = frame_index_arg(g, &fn_type->data.fn.fn_type_id); LLVMValueRef casted_frame; if (instruction->new_stack != nullptr) { // We need the frame type to be a pointer to a struct that includes the args - // label (grep this): [coro_frame_struct_layout] size_t field_count = arg_start_i + gen_param_values.length; LLVMTypeRef *field_types = allocate_nonzero(field_count); LLVMGetStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc)), field_types); + assert(LLVMCountStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc))) == arg_start_i); for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) { field_types[arg_start_i + arg_i] = LLVMTypeOf(gen_param_values.at(arg_i)); } @@ -5198,7 +5234,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); LLVMValueRef result_ptr_as_usize; if (type_has_bits(result_type)) { - LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_arg_start, ""); + LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start, ""); LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, ""); result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, ""); } else { @@ -5259,23 +5295,6 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst } } -static LLVMTypeRef anyframe_fn_type(CodeGen *g) { - if (g->anyframe_fn_type != nullptr) - return g->anyframe_fn_type; - - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - ZigType *anyframe_type = get_any_frame_type(g, nullptr); - LLVMTypeRef return_type = LLVMVoidType(); - LLVMTypeRef param_types[] = { - get_llvm_type(g, anyframe_type), - usize_type_ref, - }; - LLVMTypeRef fn_type = LLVMFunctionType(return_type, param_types, 2, false); - g->anyframe_fn_type = LLVMPointerType(fn_type, 0); - - return g->anyframe_fn_type; -} - static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) { @@ -5285,7 +5304,7 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, assert(frame_type->id == ZigTypeIdAnyFrame); LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, ""); LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); - LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, anyframe_fn_type(g), ""); + LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, g->anyframe_fn_type, ""); LLVMValueRef arg_val = ir_want_runtime_safety(g, &instruction->base) ? LLVMConstAllOnes(usize_type_ref) : LLVMGetUndef(usize_type_ref); LLVMValueRef args[] = {frame, arg_val}; @@ -6636,7 +6655,8 @@ static void do_code_gen(CodeGen *g) { } // error return tracing setup - bool have_err_ret_trace_stack = g->have_err_ret_tracing && fn_table_entry->calls_or_awaits_errorable_fn && !is_async && !have_err_ret_trace_arg; + bool have_err_ret_trace_stack = g->have_err_ret_tracing && fn_table_entry->calls_or_awaits_errorable_fn && + !is_async && !have_err_ret_trace_arg; LLVMValueRef err_ret_array_val = nullptr; if (have_err_ret_trace_stack) { ZigType *array_type = get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count); @@ -6780,6 +6800,11 @@ static void do_code_gen(CodeGen *g) { g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, ""); LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, ""); g->cur_async_resume_index_ptr = resume_index_ptr; + if (codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type)) { + uint32_t field_index = frame_index_trace(g, fn_type_id); + g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, field_index, ""); + } + LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4); g->cur_async_switch_instr = switch_instr; @@ -9691,3 +9716,9 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget return g; } + +bool codegen_fn_has_err_ret_tracing(CodeGen *g, ZigType *return_type) { + return g->have_err_ret_tracing && + (return_type->id == ZigTypeIdErrorUnion || + return_type->id == ZigTypeIdErrorSet); +} diff --git a/src/codegen.hpp b/src/codegen.hpp index cdff61a26f..0ee4ce837e 100644 --- a/src/codegen.hpp +++ b/src/codegen.hpp @@ -61,5 +61,6 @@ Buf *codegen_generate_builtin_source(CodeGen *g); TargetSubsystem detect_subsystem(CodeGen *g); void codegen_release_caches(CodeGen *codegen); +bool codegen_fn_has_err_ret_tracing(CodeGen *g, ZigType *return_type); #endif diff --git a/src/ir.cpp b/src/ir.cpp index 45a48d6f50..e6212fa079 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -14859,11 +14859,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, IrInstruction *casted_new_stack) { - if (fn_entry == nullptr) { - if (call_instruction->new_stack == nullptr) { - ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required")); - return ira->codegen->invalid_instruction; - } + if (casted_new_stack != nullptr) { // this is an @asyncCall if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) { @@ -14881,6 +14877,9 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, nullptr, fn_ref, arg_count, casted_args, FnInlineAuto, true, casted_new_stack, ret_ptr, anyframe_type); return &call_gen->base; + } else if (fn_entry == nullptr) { + ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required")); + return ira->codegen->invalid_instruction; } ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index ccf9485b51..c4f4cd3c99 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -161,13 +161,13 @@ fn seq(c: u8) void { test "coroutine suspend with block" { const p = async testSuspendBlock(); - expect(!result); + expect(!global_result); resume a_promise; - expect(result); + expect(global_result); } var a_promise: anyframe = undefined; -var result = false; +var global_result = false; async fn testSuspendBlock() void { suspend { comptime expect(@typeOf(@frame()) == *@Frame(testSuspendBlock)); @@ -178,7 +178,7 @@ async fn testSuspendBlock() void { // var our_handle: anyframe = @frame(); expect(a_promise == anyframe(@frame())); - result = true; + global_result = true; } var await_a_promise: anyframe = undefined; @@ -283,29 +283,56 @@ test "@asyncCall with return type" { const Foo = struct { bar: async fn () i32, - async fn afunc() i32 { + var global_frame: anyframe = undefined; + + async fn middle() i32 { + return afunc(); + } + + fn afunc() i32 { + global_frame = @frame(); suspend; return 1234; } }; - var foo = Foo{ .bar = Foo.afunc }; - var bytes: [64]u8 = undefined; + var foo = Foo{ .bar = Foo.middle }; + var bytes: [100]u8 = undefined; var aresult: i32 = 0; - const frame = @asyncCall(&bytes, &aresult, foo.bar); + _ = @asyncCall(&bytes, &aresult, foo.bar); expect(aresult == 0); - resume frame; + resume Foo.global_frame; expect(aresult == 1234); } -//test "async fn with inferred error set" { -// const p = async failing(); -// resume p; -//} -// -//async fn failing() !void { -// suspend; -// return error.Fail; -//} +test "async fn with inferred error set" { + const S = struct { + var global_frame: anyframe = undefined; + + fn doTheTest() void { + var frame: [1]@Frame(middle) = undefined; + var result: anyerror!void = undefined; + _ = @asyncCall(@sliceToBytes(frame[0..]), &result, middle); + resume global_frame; + std.testing.expectError(error.Fail, result); + } + + async fn middle() !void { + var f = async middle2(); + return await f; + } + + fn middle2() !void { + return failing(); + } + + fn failing() !void { + global_frame = @frame(); + suspend; + return error.Fail; + } + }; + S.doTheTest(); +} //test "error return trace across suspend points - early return" { // const p = nonFailing(); @@ -422,24 +449,24 @@ test "async function call return value" { test "suspension points inside branching control flow" { const S = struct { - var global_result: i32 = 10; + var result: i32 = 10; fn doTheTest() void { - expect(10 == global_result); + expect(10 == result); var frame = async func(true); - expect(10 == global_result); + expect(10 == result); resume frame; - expect(11 == global_result); + expect(11 == result); resume frame; - expect(12 == global_result); + expect(12 == result); resume frame; - expect(13 == global_result); + expect(13 == result); } fn func(b: bool) void { while (b) { suspend; - global_result += 1; + result += 1; } } }; From a7763c06f9941bdeccc0679abf863b21f7cc33a3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 5 Aug 2019 00:44:39 -0400 Subject: [PATCH 052/125] delete IrInstructionMarkErrRetTracePtr this IR instruction is no longer needed --- BRANCH_TODO | 1 - src/all_types.hpp | 7 ------- src/codegen.cpp | 10 ---------- src/ir.cpp | 27 --------------------------- src/ir_print.cpp | 9 --------- 5 files changed, 54 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index fcd98f0f71..85fe81b3b1 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,4 +1,3 @@ - * delete IrInstructionMarkErrRetTracePtr * go over the commented out tests * error return tracing * compile error for error: expected anyframe->T, found 'anyframe' diff --git a/src/all_types.hpp b/src/all_types.hpp index 1ea4954dec..85f00a6baf 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -2334,7 +2334,6 @@ enum IrInstructionId { IrInstructionIdAtomicLoad, IrInstructionIdSaveErrRetAddr, IrInstructionIdAddImplicitReturnType, - IrInstructionIdMarkErrRetTracePtr, IrInstructionIdErrSetCast, IrInstructionIdToBytes, IrInstructionIdFromBytes, @@ -3451,12 +3450,6 @@ struct IrInstructionAddImplicitReturnType { IrInstruction *value; }; -struct IrInstructionMarkErrRetTracePtr { - IrInstruction base; - - IrInstruction *err_ret_trace_ptr; -}; - // For float ops which take a single argument struct IrInstructionFloatOp { IrInstruction base; diff --git a/src/codegen.cpp b/src/codegen.cpp index 59289523a7..f9e0598707 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5062,14 +5062,6 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable, return load_inst; } -static LLVMValueRef ir_render_mark_err_ret_trace_ptr(CodeGen *g, IrExecutable *executable, - IrInstructionMarkErrRetTracePtr *instruction) -{ - assert(g->have_err_ret_tracing); - g->cur_err_ret_trace_val_stack = ir_llvm_value(g, instruction->err_ret_trace_ptr); - return nullptr; -} - static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) { LLVMValueRef op = ir_llvm_value(g, instruction->op1); assert(instruction->base.value.type->id == ZigTypeIdFloat); @@ -5544,8 +5536,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction); case IrInstructionIdSaveErrRetAddr: return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction); - case IrInstructionIdMarkErrRetTracePtr: - return ir_render_mark_err_ret_trace_ptr(g, executable, (IrInstructionMarkErrRetTracePtr *)instruction); case IrInstructionIdFloatOp: return ir_render_float_op(g, executable, (IrInstructionFloatOp *)instruction); case IrInstructionIdMulAdd: diff --git a/src/ir.cpp b/src/ir.cpp index e6212fa079..cae2768bef 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -993,10 +993,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAddImplicitRetur return IrInstructionIdAddImplicitReturnType; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionMarkErrRetTracePtr *) { - return IrInstructionIdMarkErrRetTracePtr; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatOp *) { return IrInstructionIdFloatOp; } @@ -3092,15 +3088,6 @@ static IrInstruction *ir_build_add_implicit_return_type(IrBuilder *irb, Scope *s return &instruction->base; } -static IrInstruction *ir_build_mark_err_ret_trace_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_ret_trace_ptr) { - IrInstructionMarkErrRetTracePtr *instruction = ir_build_instruction(irb, scope, source_node); - instruction->err_ret_trace_ptr = err_ret_trace_ptr; - - ir_ref_instruction(err_ret_trace_ptr, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_has_decl(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *container, IrInstruction *name) { @@ -23908,17 +23895,6 @@ static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, I return result; } -static IrInstruction *ir_analyze_instruction_mark_err_ret_trace_ptr(IrAnalyze *ira, IrInstructionMarkErrRetTracePtr *instruction) { - IrInstruction *err_ret_trace_ptr = instruction->err_ret_trace_ptr->child; - if (type_is_invalid(err_ret_trace_ptr->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_build_mark_err_ret_trace_ptr(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, err_ret_trace_ptr); - result->value.type = ira->codegen->builtin_types.entry_void; - return result; -} - static void ir_eval_float_op(IrAnalyze *ira, IrInstructionFloatOp *source_instr, ZigType *float_type, ConstExprValue *op, ConstExprValue *out_val) { assert(ira && source_instr && float_type && out_val && op); @@ -24798,8 +24774,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstructionSaveErrRetAddr *)instruction); case IrInstructionIdAddImplicitReturnType: return ir_analyze_instruction_add_implicit_return_type(ira, (IrInstructionAddImplicitReturnType *)instruction); - case IrInstructionIdMarkErrRetTracePtr: - return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction); case IrInstructionIdFloatOp: return ir_analyze_instruction_float_op(ira, (IrInstructionFloatOp *)instruction); case IrInstructionIdMulAdd: @@ -24951,7 +24925,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCancel: case IrInstructionIdSaveErrRetAddr: case IrInstructionIdAddImplicitReturnType: - case IrInstructionIdMarkErrRetTracePtr: case IrInstructionIdAtomicRmw: case IrInstructionIdCmpxchgGen: case IrInstructionIdCmpxchgSrc: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 549da9de19..bc9d09b30c 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1457,12 +1457,6 @@ static void ir_print_add_implicit_return_type(IrPrint *irp, IrInstructionAddImpl fprintf(irp->f, ")"); } -static void ir_print_mark_err_ret_trace_ptr(IrPrint *irp, IrInstructionMarkErrRetTracePtr *instruction) { - fprintf(irp->f, "@markErrRetTracePtr("); - ir_print_other_instruction(irp, instruction->err_ret_trace_ptr); - fprintf(irp->f, ")"); -} - static void ir_print_float_op(IrPrint *irp, IrInstructionFloatOp *instruction) { fprintf(irp->f, "@%s(", float_op_to_name(instruction->op, false)); @@ -1963,9 +1957,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdAddImplicitReturnType: ir_print_add_implicit_return_type(irp, (IrInstructionAddImplicitReturnType *)instruction); break; - case IrInstructionIdMarkErrRetTracePtr: - ir_print_mark_err_ret_trace_ptr(irp, (IrInstructionMarkErrRetTracePtr *)instruction); - break; case IrInstructionIdFloatOp: ir_print_float_op(irp, (IrInstructionFloatOp *)instruction); break; From dfe8c5a2e9b1778c1911e987c9286d05db307fe7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 5 Aug 2019 03:09:17 -0400 Subject: [PATCH 053/125] add a src() method to AstNode to aid debugging --- src/all_types.hpp | 4 ++++ src/ast_render.cpp | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/src/all_types.hpp b/src/all_types.hpp index 85f00a6baf..0098a630d8 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1009,6 +1009,10 @@ struct AstNode { AstNodeAnyFrameType anyframe_type; AstNodeEnumLiteral enum_literal; } data; + + // This is a function for use in the debugger to print + // the source location. + void src(); }; // this struct is allocated with allocate_nonzero diff --git a/src/ast_render.cpp b/src/ast_render.cpp index e54bd58676..dd4d9cf646 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -1186,3 +1186,9 @@ void ast_render(FILE *f, AstNode *node, int indent_size) { render_node_grouped(&ar, node); } + +void AstNode::src() { + fprintf(stderr, "%s:%" ZIG_PRI_usize ":%" ZIG_PRI_usize "\n", + buf_ptr(this->owner->data.structure.root_struct->path), + this->line + 1, this->column + 1); +} From 20f63e588e62c4a7250bc96c9e5b54c8106ad1af Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 5 Aug 2019 03:10:14 -0400 Subject: [PATCH 054/125] async functions have error return traces where appropriate however the traces are not merged on `await` or async function calls yet. When an async function has an error set or error union as its return type, it has a `StackTrace` before the args in the frame, so that it is accessible from `anyframe->T` awaiters. However when it does not have an errorable return type, but it does call or await an errorable, it has a stack trace just before the locals. This way when doing an `@asyncCall` on an async function pointer, it can populate the args (which are after the `StackTrace`) because it knows the offset of the args based only on the return type. This sort of matches normal functions, where a stack trace pointer could be supplied by a parameter, or it could be supplied by the stack of the function, depending on whether the function itself is errorable. --- BRANCH_TODO | 2 +- src/analyze.cpp | 28 ++++---- src/codegen.cpp | 105 ++++++++++++++++------------ src/codegen.hpp | 3 +- src/ir.cpp | 7 +- test/stage1/behavior/coroutines.zig | 16 +++-- 6 files changed, 91 insertions(+), 70 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 85fe81b3b1..00aab06910 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,5 +1,5 @@ + * error return tracing - handle `await` and function calls * go over the commented out tests - * error return tracing * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function diff --git a/src/analyze.cpp b/src/analyze.cpp index e1b386d9af..4f301d2355 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5230,9 +5230,8 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { field_names.append("@result"); field_types.append(fn_type_id->return_type); - if (codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type)) { - field_names.append("@ptr_stack_trace"); - field_types.append(get_ptr_to_stack_trace_type(g)); + if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { + (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type field_names.append("@stack_trace"); field_types.append(g->stack_trace_type); @@ -5256,6 +5255,16 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { field_types.append(param_type); } + if (codegen_fn_has_err_ret_tracing_stack(g, fn)) { + (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type + + field_names.append("@stack_trace"); + field_types.append(g->stack_trace_type); + + field_names.append("@instruction_addresses"); + field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count)); + } + for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) { IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i); ZigType *ptr_type = instruction->base.value.type; @@ -7563,8 +7572,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re if (have_result_type) { field_types.append(get_llvm_type(g, ptr_result_type)); // ptr_result field_types.append(get_llvm_type(g, result_type)); // result - if (codegen_fn_has_err_ret_tracing(g, result_type)) { - field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace + if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { field_types.append(get_llvm_type(g, g->stack_trace_type)); // stack_trace field_types.append(get_llvm_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count))); // instruction_addresses } @@ -7614,15 +7622,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type))); - if (codegen_fn_has_err_ret_tracing(g, result_type)) { - di_element_types.append( - ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace", - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g)))); + if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { di_element_types.append( ZigLLVMCreateDebugMemberType(g->dbuilder, ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "stack_trace", diff --git a/src/codegen.cpp b/src/codegen.cpp index f9e0598707..1f8711012f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -298,7 +298,7 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) { } // label (grep this): [coro_frame_struct_layout] -static uint32_t frame_index_trace(CodeGen *g, FnTypeId *fn_type_id) { +static uint32_t frame_index_trace_arg(CodeGen *g, FnTypeId *fn_type_id) { // [0] *ReturnType // [1] ReturnType uint32_t return_field_count = type_has_bits(fn_type_id->return_type) ? 2 : 0; @@ -307,14 +307,25 @@ static uint32_t frame_index_trace(CodeGen *g, FnTypeId *fn_type_id) { // label (grep this): [coro_frame_struct_layout] static uint32_t frame_index_arg(CodeGen *g, FnTypeId *fn_type_id) { - bool have_stack_trace = g->have_err_ret_tracing && codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type); - // [0] *StackTrace - // [1] StackTrace - // [2] [stack_trace_ptr_count]usize - uint32_t trace_field_count = have_stack_trace ? 3 : 0; - return frame_index_trace(g, fn_type_id) + trace_field_count; + bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type); + // [0] StackTrace + // [1] [stack_trace_ptr_count]usize + uint32_t trace_field_count = have_stack_trace ? 2 : 0; + return frame_index_trace_arg(g, fn_type_id) + trace_field_count; } +// label (grep this): [coro_frame_struct_layout] +static uint32_t frame_index_trace_stack(CodeGen *g, FnTypeId *fn_type_id) { + uint32_t result = frame_index_arg(g, fn_type_id); + for (size_t i = 0; i < fn_type_id->param_count; i += 1) { + if (type_has_bits(fn_type_id->param_info->type)) { + result += 1; + } + } + return result; +} + + static uint32_t get_err_ret_trace_arg_index(CodeGen *g, ZigFn *fn_table_entry) { if (!g->have_err_ret_tracing) { return UINT32_MAX; @@ -1287,9 +1298,6 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) { if (!g->have_err_ret_tracing) { return nullptr; } - if (fn_is_async(g->cur_fn)) { - return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, ""); - } if (g->cur_err_ret_trace_val_stack != nullptr) { return g->cur_err_ret_trace_val_stack; } @@ -3441,6 +3449,10 @@ static void render_async_spills(CodeGen *g) { gen_var_debug_decl(g, var); } } + // label (grep this): [coro_frame_struct_layout] + if (codegen_fn_has_err_ret_tracing_stack(g, g->cur_fn)) { + async_var_index += 2; + } for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) { IrInstructionAllocaGen *instruction = g->cur_fn->alloca_gen_list.at(alloca_i); ZigType *ptr_type = instruction->base.value.type; @@ -3525,7 +3537,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr CallingConvention cc = fn_type->data.fn.fn_type_id.cc; bool first_arg_ret = ret_has_bits && want_first_arg_sret(g, fn_type_id); - bool prefix_arg_err_ret_stack = codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type); + bool prefix_arg_err_ret_stack = codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type); bool is_var_args = fn_type_id->is_var_args; ZigList gen_param_values = {}; LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr; @@ -3572,28 +3584,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } } - if (prefix_arg_err_ret_stack) { - uint32_t trace_field_index = frame_index_trace(g, fn_type_id); - LLVMValueRef trace_field_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, - trace_field_index, ""); - LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, - trace_field_index + 1, ""); - LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, frame_result_loc, - trace_field_index + 2, ""); - LLVMBuildStore(g->builder, trace_field_ptr, trace_field_ptr_ptr); - - LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, ""); - LLVMBuildStore(g->builder, zero, index_ptr); - - LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, ""); - LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, ""); - LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) }; - LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, trace_field_addrs, indices, 2, ""); - LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr); - - LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, ""); - LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr); - } + // even if prefix_arg_err_ret_stack is true, let the async function do its own + // initialization. } else if (callee_is_async) { frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); // caller's own frame pointer @@ -3607,13 +3599,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } } - if (prefix_arg_err_ret_stack) { - uint32_t trace_field_index = frame_index_trace(g, fn_type_id); - LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, trace_field_index, ""); - LLVMValueRef err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); - LLVMBuildStore(g->builder, err_trace_val, trace_field_ptr); - } - + // even if prefix_arg_err_ret_stack is true, let the async function do its + // error return tracing normally, and then we'll invoke merge_error_return_traces like normal. } if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); @@ -6790,9 +6777,16 @@ static void do_code_gen(CodeGen *g) { g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, ""); LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, ""); g->cur_async_resume_index_ptr = resume_index_ptr; - if (codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type)) { - uint32_t field_index = frame_index_trace(g, fn_type_id); - g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, field_index, ""); + LLVMValueRef err_ret_trace_val = nullptr; + uint32_t trace_field_index; + if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { + trace_field_index = frame_index_trace_arg(g, fn_type_id); + err_ret_trace_val = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, ""); + g->cur_err_ret_trace_val_arg = err_ret_trace_val; + } else if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry)) { + trace_field_index = frame_index_trace_stack(g, fn_type_id); + err_ret_trace_val = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, ""); + g->cur_err_ret_trace_val_stack = err_ret_trace_val; } LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); @@ -6804,6 +6798,24 @@ static void do_code_gen(CodeGen *g) { LLVMAddCase(switch_instr, zero, entry_block->llvm_block); g->cur_resume_block_count += 1; LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block); + if (err_ret_trace_val != nullptr) { + LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, + trace_field_index, ""); + LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, + trace_field_index + 1, ""); + + LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, ""); + LLVMBuildStore(g->builder, zero, index_ptr); + + LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, ""); + LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, ""); + LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) }; + LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, trace_field_addrs, indices, 2, ""); + LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr); + + LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, ""); + LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr); + } render_async_var_decls(g, entry_block->instruction_list.at(0)->scope); } else { // create debug variable declarations for parameters @@ -9707,8 +9719,13 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget return g; } -bool codegen_fn_has_err_ret_tracing(CodeGen *g, ZigType *return_type) { +bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) { return g->have_err_ret_tracing && (return_type->id == ZigTypeIdErrorUnion || return_type->id == ZigTypeIdErrorSet); } + +bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn) { + return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn && + !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type); +} diff --git a/src/codegen.hpp b/src/codegen.hpp index 0ee4ce837e..c84ef4bc48 100644 --- a/src/codegen.hpp +++ b/src/codegen.hpp @@ -61,6 +61,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g); TargetSubsystem detect_subsystem(CodeGen *g); void codegen_release_caches(CodeGen *codegen); -bool codegen_fn_has_err_ret_tracing(CodeGen *g, ZigType *return_type); +bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type); +bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn); #endif diff --git a/src/ir.cpp b/src/ir.cpp index cae2768bef..ca54d54c2d 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -15560,9 +15560,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c break; } } - if (call_instruction->is_async) { - zig_panic("TODO async call"); - } auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn); if (existing_entry) { @@ -24483,6 +24480,10 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction fn_entry->inferred_async_node = instruction->base.source_node; } + if (type_can_fail(result_type)) { + fn_entry->calls_or_awaits_errorable_fn = true; + } + IrInstruction *result = ir_build_await(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame); result->value.type = result_type; diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index c4f4cd3c99..4cea8d1507 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -337,19 +337,21 @@ test "async fn with inferred error set" { //test "error return trace across suspend points - early return" { // const p = nonFailing(); // resume p; -// const p2 = try async printTrace(p); -// cancel p2; +// const p2 = async printTrace(p); //} // //test "error return trace across suspend points - async return" { // const p = nonFailing(); -// const p2 = try async printTrace(p); +// const p2 = async printTrace(p); // resume p; -// cancel p2; //} // //fn nonFailing() (anyframe->anyerror!void) { -// return async suspendThenFail() catch unreachable; +// const Static = struct { +// var frame: @Frame(suspendThenFail) = undefined; +// }; +// Static.frame = async suspendThenFail(); +// return &Static.frame; //} //async fn suspendThenFail() anyerror!void { // suspend; @@ -361,8 +363,8 @@ test "async fn with inferred error set" { // if (@errorReturnTrace()) |trace| { // expect(trace.index == 1); // } else switch (builtin.mode) { -// builtin.Mode.Debug, builtin.Mode.ReleaseSafe => @panic("expected return trace"), -// builtin.Mode.ReleaseFast, builtin.Mode.ReleaseSmall => {}, +// .Debug, .ReleaseSafe => @panic("expected return trace"), +// .ReleaseFast, .ReleaseSmall => {}, // } // }; //} From 01573658729e212bad1c9dc506cd0666682488fb Mon Sep 17 00:00:00 2001 From: Euan Torano Date: Mon, 5 Aug 2019 16:07:52 +0100 Subject: [PATCH 055/125] Add missing S_IS* for NetBSD --- std/os/bits/netbsd.zig | 59 ++++++++++++++++++++++++++++++++++++++++++ std/os/netbsd.zig | 1 + 2 files changed, 60 insertions(+) diff --git a/std/os/bits/netbsd.zig b/std/os/bits/netbsd.zig index ef58bd1356..ff19d090af 100644 --- a/std/os/bits/netbsd.zig +++ b/std/os/bits/netbsd.zig @@ -760,3 +760,62 @@ pub const stack_t = extern struct { ss_size: isize, ss_flags: i32, }; + +pub const S_IFMT = 0o170000; + +pub const S_IFIFO = 0o010000; +pub const S_IFCHR = 0o020000; +pub const S_IFDIR = 0o040000; +pub const S_IFBLK = 0o060000; +pub const S_IFREG = 0o100000; +pub const S_IFLNK = 0o120000; +pub const S_IFSOCK = 0o140000; +pub const S_IFWHT = 0o160000; + +pub const S_ISUID = 0o4000; +pub const S_ISGID = 0o2000; +pub const S_ISVTX = 0o1000; +pub const S_IRWXU = 0o700; +pub const S_IRUSR = 0o400; +pub const S_IWUSR = 0o200; +pub const S_IXUSR = 0o100; +pub const S_IRWXG = 0o070; +pub const S_IRGRP = 0o040; +pub const S_IWGRP = 0o020; +pub const S_IXGRP = 0o010; +pub const S_IRWXO = 0o007; +pub const S_IROTH = 0o004; +pub const S_IWOTH = 0o002; +pub const S_IXOTH = 0o001; + +pub fn S_ISFIFO(m: u32) bool { + return m & S_IFMT == S_IFIFO; +} + +pub fn S_ISCHR(m: u32) bool { + return m & S_IFMT == S_IFCHR; +} + +pub fn S_ISDIR(m: u32) bool { + return m & S_IFMT == S_IFDIR; +} + +pub fn S_ISBLK(m: u32) bool { + return m & S_IFMT == S_IFBLK; +} + +pub fn S_ISREG(m: u32) bool { + return m & S_IFMT == S_IFREG; +} + +pub fn S_ISLNK(m: u32) bool { + return m & S_IFMT == S_IFLNK; +} + +pub fn S_ISSOCK(m: u32) bool { + return m & S_IFMT == S_IFSOCK; +} + +pub fn S_IWHT(m: u32) bool { + return m & S_IFMT == S_IFWHT; +} diff --git a/std/os/netbsd.zig b/std/os/netbsd.zig index cd63e40f5c..802283fc87 100644 --- a/std/os/netbsd.zig +++ b/std/os/netbsd.zig @@ -2,3 +2,4 @@ const builtin = @import("builtin"); const std = @import("../std.zig"); pub const is_the_target = builtin.os == .netbsd; pub usingnamespace std.c; +pub usingnamespace @import("bits.zig"); \ No newline at end of file From 8aa87ec441c13ce590dc5a9f48aab638326c8b67 Mon Sep 17 00:00:00 2001 From: Euan T Date: Mon, 5 Aug 2019 16:35:40 +0100 Subject: [PATCH 056/125] Add misisng newline --- std/os/netbsd.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/std/os/netbsd.zig b/std/os/netbsd.zig index 802283fc87..d484e7374b 100644 --- a/std/os/netbsd.zig +++ b/std/os/netbsd.zig @@ -2,4 +2,4 @@ const builtin = @import("builtin"); const std = @import("../std.zig"); pub const is_the_target = builtin.os == .netbsd; pub usingnamespace std.c; -pub usingnamespace @import("bits.zig"); \ No newline at end of file +pub usingnamespace @import("bits.zig"); From 7b8c96612f1326525c655abe7f7bb0472cbddee1 Mon Sep 17 00:00:00 2001 From: Euan Torano Date: Mon, 5 Aug 2019 17:21:12 +0100 Subject: [PATCH 057/125] Fix #2993 - use getrandom on freebsd --- std/c/freebsd.zig | 1 + std/os.zig | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/std/c/freebsd.zig b/std/c/freebsd.zig index bcc60e65ed..3d5736d37b 100644 --- a/std/c/freebsd.zig +++ b/std/c/freebsd.zig @@ -6,3 +6,4 @@ pub const _errno = __error; pub extern "c" fn getdents(fd: c_int, buf_ptr: [*]u8, nbytes: usize) usize; pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int; +pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) c_int; diff --git a/std/os.zig b/std/os.zig index c2010bf6a9..805f7f1645 100644 --- a/std/os.zig +++ b/std/os.zig @@ -120,6 +120,19 @@ pub fn getrandom(buf: []u8) GetRandomError!void { } } } + if (freebsd.is_the_target) { + while (true) { + const err = std.c.getErrno(std.c.getrandom(buf.ptr, buf.len, 0)); + + switch (err) { + 0 => return, + EINVAL => unreachable, + EFAULT => unreachable, + EINTR => continue, + else => return unexpectedErrno(err), + } + } + } if (wasi.is_the_target) { switch (wasi.random_get(buf.ptr, buf.len)) { 0 => return, From 7f23dac6dce2ce897295e8186f164f695cacdbc9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 5 Aug 2019 13:49:17 -0400 Subject: [PATCH 058/125] update readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 91178f87de..1be33c34f7 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ ![ZIG](https://ziglang.org/zig-logo.svg) -Zig is an open-source programming language designed for **robustness**, +A general-purpose programming language designed for **robustness**, **optimality**, and **maintainability**. ## Resources From 2d25348f63691921d54e12ae938f185ac96e4cf6 Mon Sep 17 00:00:00 2001 From: Euan Torano Date: Tue, 6 Aug 2019 19:32:22 +0100 Subject: [PATCH 059/125] Fix function signature and use a loop to ensure buffer is filled. --- std/os/windows.zig | 15 ++++++++++++--- std/os/windows/advapi32.zig | 2 +- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/std/os/windows.zig b/std/os/windows.zig index ac76e8f58f..a8f5b54644 100644 --- a/std/os/windows.zig +++ b/std/os/windows.zig @@ -138,10 +138,19 @@ pub const RtlGenRandomError = error{Unexpected}; /// https://github.com/rust-lang-nursery/rand/issues/111 /// https://bugzilla.mozilla.org/show_bug.cgi?id=504270 pub fn RtlGenRandom(output: []u8) RtlGenRandomError!void { - if (advapi32.RtlGenRandom(output.ptr, output.len) == 0) { - switch (kernel32.GetLastError()) { - else => |err| return unexpectedError(err), + var total_read: usize = 0; + var buff: []u8 = output[0..]; + const max_read_size: ULONG = ULONG(maxInt(ULONG)); + + while (total_read < output.len) { + const to_read: ULONG = @intCast(ULONG, math.min(buff.len, max_read_size)); + + if (advapi32.RtlGenRandom(buff.ptr, to_read) == 0) { + return unexpectedError(kernel32.GetLastError()); } + + total_read += @intCast(usize, to_read); + buff = buff[to_read..]; } } diff --git a/std/os/windows/advapi32.zig b/std/os/windows/advapi32.zig index 165a2c10a3..940f10994c 100644 --- a/std/os/windows/advapi32.zig +++ b/std/os/windows/advapi32.zig @@ -19,5 +19,5 @@ pub extern "advapi32" stdcallcc fn RegQueryValueExW( // RtlGenRandom is known as SystemFunction036 under advapi32 // http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx */ -pub extern "advapi32" stdcallcc fn SystemFunction036(output: [*]u8, length: usize) BOOL; +pub extern "advapi32" stdcallcc fn SystemFunction036(output: [*]u8, length: ULONG) BOOL; pub const RtlGenRandom = SystemFunction036; From 400500a3afafca8178f13a7e4e1cd0ae7808aff2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 6 Aug 2019 16:37:25 -0400 Subject: [PATCH 060/125] improve async function semantics * add safety panic for resuming a function which is returning, pending an await * remove IrInstructionResultPtr * add IrInstructionReturnBegin. This does the early return in async functions; does nothing in normal functions. * `await` gets a result location * `analyze_fn_async` will call `analyze_fn_body` if necessary. * async function frames have a result pointer field for themselves to access and one for the awaiter to supply before the atomic rmw. when returning, async functions copy the result to the awaiter result pointer, if it is non-null. * async function frames have a stack trace pointer which is supplied by the awaiter before the atomicrmw. Later in the frame is a stack trace struct and addresses, which is used for its own calls and awaits. * when awaiting an async function, if an early return occurred, the awaiter tail resumes the frame. * when an async function returns, early return does a suspend (in IrInstructionReturnBegin) before copying the error return trace data, result, and running the defers. After the last defer runs, the frame will no longer be accessed. * proper acquire/release atomic ordering attributes in async functions. --- BRANCH_TODO | 3 + src/all_types.hpp | 27 ++- src/analyze.cpp | 51 +++--- src/codegen.cpp | 441 ++++++++++++++++++++++++++++------------------ src/codegen.hpp | 2 +- src/ir.cpp | 251 ++++++++++++++------------ src/ir_print.cpp | 45 +++-- 7 files changed, 494 insertions(+), 326 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 00aab06910..1efaf1acc4 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -33,3 +33,6 @@ - anyframe, anyframe->T * safety for double await * call graph analysis to have fewer stack trace frames + * grep for "coroutine" and "coro" and replace all that nomenclature with "async functions" + * when there are multiple calls to async functions in a function, reuse the same frame buffer, so that the + needed bytes is equal to the largest callee's frame diff --git a/src/all_types.hpp b/src/all_types.hpp index 0098a630d8..bc65948579 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1557,6 +1557,7 @@ enum PanicMsgId { PanicMsgIdBadReturn, PanicMsgIdResumedAnAwaitingFn, PanicMsgIdFrameTooSmall, + PanicMsgIdResumedFnPendingAwait, PanicMsgIdCount, }; @@ -1717,10 +1718,12 @@ struct CodeGen { LLVMTargetMachineRef target_machine; ZigLLVMDIFile *dummy_di_file; LLVMValueRef cur_ret_ptr; + LLVMValueRef cur_ret_ptr_ptr; LLVMValueRef cur_fn_val; LLVMValueRef cur_async_switch_instr; LLVMValueRef cur_async_resume_index_ptr; LLVMValueRef cur_async_awaiter_ptr; + LLVMValueRef cur_async_prev_val; LLVMBasicBlockRef cur_preamble_llvm_block; size_t cur_resume_block_count; LLVMValueRef cur_err_ret_trace_val_arg; @@ -2223,6 +2226,7 @@ enum IrInstructionId { IrInstructionIdCallGen, IrInstructionIdConst, IrInstructionIdReturn, + IrInstructionIdReturnBegin, IrInstructionIdCast, IrInstructionIdResizeSlice, IrInstructionIdContainerInitList, @@ -2326,7 +2330,6 @@ enum IrInstructionId { IrInstructionIdImplicitCast, IrInstructionIdResolveResult, IrInstructionIdResetResult, - IrInstructionIdResultPtr, IrInstructionIdOpaqueType, IrInstructionIdSetAlignStack, IrInstructionIdArgType, @@ -2355,7 +2358,8 @@ enum IrInstructionId { IrInstructionIdUnionInitNamedField, IrInstructionIdSuspendBegin, IrInstructionIdSuspendFinish, - IrInstructionIdAwait, + IrInstructionIdAwaitSrc, + IrInstructionIdAwaitGen, IrInstructionIdCoroResume, }; @@ -2630,7 +2634,13 @@ struct IrInstructionConst { struct IrInstructionReturn { IrInstruction base; - IrInstruction *value; + IrInstruction *operand; +}; + +struct IrInstructionReturnBegin { + IrInstruction base; + + IrInstruction *operand; }; enum CastOp { @@ -3136,6 +3146,7 @@ struct IrInstructionTestErrSrc { IrInstruction base; bool resolve_err_set; + bool base_ptr_is_payload; IrInstruction *base_ptr; }; @@ -3603,10 +3614,18 @@ struct IrInstructionSuspendFinish { IrInstructionSuspendBegin *begin; }; -struct IrInstructionAwait { +struct IrInstructionAwaitSrc { IrInstruction base; IrInstruction *frame; + ResultLoc *result_loc; +}; + +struct IrInstructionAwaitGen { + IrInstruction base; + + IrInstruction *frame; + IrInstruction *result_loc; }; struct IrInstructionCoroResume { diff --git a/src/analyze.cpp b/src/analyze.cpp index 4f301d2355..36eeaeac9c 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3848,6 +3848,13 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) continue; + if (callee->anal_state == FnAnalStateReady) { + analyze_fn_body(g, callee); + if (callee->anal_state == FnAnalStateInvalid) { + fn->anal_state = FnAnalStateInvalid; + return; + } + } assert(callee->anal_state == FnAnalStateComplete); analyze_fn_async(g, callee); if (callee->anal_state == FnAnalStateInvalid) { @@ -5224,20 +5231,18 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false); - field_names.append("@ptr_result"); + field_names.append("@result_ptr_callee"); + field_types.append(ptr_return_type); + + field_names.append("@result_ptr_awaiter"); field_types.append(ptr_return_type); field_names.append("@result"); field_types.append(fn_type_id->return_type); if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { - (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type - - field_names.append("@stack_trace"); - field_types.append(g->stack_trace_type); - - field_names.append("@instruction_addresses"); - field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count)); + field_names.append("@ptr_stack_trace"); + field_types.append(get_ptr_to_stack_trace_type(g)); } for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) { @@ -5255,7 +5260,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { field_types.append(param_type); } - if (codegen_fn_has_err_ret_tracing_stack(g, fn)) { + if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) { (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type field_names.append("@stack_trace"); @@ -7570,11 +7575,11 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re bool have_result_type = result_type != nullptr && type_has_bits(result_type); if (have_result_type) { - field_types.append(get_llvm_type(g, ptr_result_type)); // ptr_result + field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_callee + field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_awaiter field_types.append(get_llvm_type(g, result_type)); // result if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { - field_types.append(get_llvm_type(g, g->stack_trace_type)); // stack_trace - field_types.append(get_llvm_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count))); // instruction_addresses + field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace } } LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false); @@ -7607,7 +7612,15 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re if (have_result_type) { di_element_types.append( ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_result", + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_callee", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type))); + di_element_types.append( + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_awaiter", di_file, line, 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), @@ -7625,20 +7638,12 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { di_element_types.append( ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "stack_trace", + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace", di_file, line, 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, g->stack_trace_type))); - di_element_types.append( - ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "instruction_addresses", - di_file, line, - 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), - 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), - 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count)))); + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g)))); } }; diff --git a/src/codegen.cpp b/src/codegen.cpp index 1f8711012f..cf846d99e9 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -24,6 +24,14 @@ #include #include +enum ResumeId { + ResumeIdManual, + ResumeIdReturn, + ResumeIdCall, + + ResumeIdAwaitEarlyReturn // must be last +}; + static void init_darwin_native(CodeGen *g) { char *osx_target = getenv("MACOSX_DEPLOYMENT_TARGET"); char *ios_target = getenv("IPHONEOS_DEPLOYMENT_TARGET"); @@ -298,25 +306,25 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) { } // label (grep this): [coro_frame_struct_layout] -static uint32_t frame_index_trace_arg(CodeGen *g, FnTypeId *fn_type_id) { - // [0] *ReturnType - // [1] ReturnType - uint32_t return_field_count = type_has_bits(fn_type_id->return_type) ? 2 : 0; +static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) { + // [0] *ReturnType (callee's) + // [1] *ReturnType (awaiter's) + // [2] ReturnType + uint32_t return_field_count = type_has_bits(return_type) ? 3 : 0; return coro_ret_start + return_field_count; } // label (grep this): [coro_frame_struct_layout] -static uint32_t frame_index_arg(CodeGen *g, FnTypeId *fn_type_id) { - bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type); - // [0] StackTrace - // [1] [stack_trace_ptr_count]usize - uint32_t trace_field_count = have_stack_trace ? 2 : 0; - return frame_index_trace_arg(g, fn_type_id) + trace_field_count; +static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) { + bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type); + // [0] *StackTrace + uint32_t trace_field_count = have_stack_trace ? 1 : 0; + return frame_index_trace_arg(g, return_type) + trace_field_count; } // label (grep this): [coro_frame_struct_layout] static uint32_t frame_index_trace_stack(CodeGen *g, FnTypeId *fn_type_id) { - uint32_t result = frame_index_arg(g, fn_type_id); + uint32_t result = frame_index_arg(g, fn_type_id->return_type); for (size_t i = 0; i < fn_type_id->param_count; i += 1) { if (type_has_bits(fn_type_id->param_info->type)) { result += 1; @@ -901,7 +909,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { case PanicMsgIdPtrCastNull: return buf_create_from_str("cast causes pointer to be null"); case PanicMsgIdBadResume: - return buf_create_from_str("invalid resume of async function"); + return buf_create_from_str("resumed an async function which already returned"); case PanicMsgIdBadAwait: return buf_create_from_str("async function awaited twice"); case PanicMsgIdBadReturn: @@ -910,6 +918,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("awaiting function resumed"); case PanicMsgIdFrameTooSmall: return buf_create_from_str("frame too small"); + case PanicMsgIdResumedFnPendingAwait: + return buf_create_from_str("resumed an async function which can only be awaited"); } zig_unreachable(); } @@ -1301,7 +1311,14 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) { if (g->cur_err_ret_trace_val_stack != nullptr) { return g->cur_err_ret_trace_val_stack; } - return g->cur_err_ret_trace_val_arg; + if (g->cur_err_ret_trace_val_arg != nullptr) { + if (fn_is_async(g->cur_fn)) { + return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, ""); + } else { + return g->cur_err_ret_trace_val_arg; + } + } + return nullptr; } static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *scope) { @@ -2023,99 +2040,191 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut return call_instruction; } -static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, - IrInstructionReturn *return_instruction) +static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, ResumeId resume_id, PanicMsgId msg_id, + LLVMBasicBlockRef end_bb) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); + if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume"); + LLVMValueRef ok_bit; + if (resume_id == ResumeIdAwaitEarlyReturn) { + LLVMValueRef last_value = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false), ""); + ok_bit = LLVMBuildICmp(g->builder, LLVMIntULT, LLVMGetParam(g->cur_fn_val, 1), last_value, ""); + } else { + LLVMValueRef expected_value = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, resume_id, false), ""); + ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, ""); + } + LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block); + + LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); + gen_assertion(g, msg_id, source_instr); + + LLVMPositionBuilderAtEnd(g->builder, end_bb); +} + +static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr, + ResumeId resume_id, LLVMValueRef arg_val) +{ + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + if (fn_val == nullptr) { + if (g->anyframe_fn_type == nullptr) { + (void)get_llvm_type(g, get_any_frame_type(g, nullptr)); + } + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_fn_ptr_index, ""); + fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); + } + if (arg_val == nullptr) { + arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, resume_id, false), ""); + } else { + assert(resume_id == ResumeIdAwaitEarlyReturn); + } + LLVMValueRef args[] = {target_frame_ptr, arg_val}; + return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); +} + +static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, + IrInstructionReturnBegin *instruction) +{ + if (!fn_is_async(g->cur_fn)) return nullptr; + + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + + bool ret_type_has_bits = instruction->operand != nullptr && + type_has_bits(instruction->operand->value.type); + ZigType *ret_type = ret_type_has_bits ? instruction->operand->value.type : nullptr; + if (ret_type_has_bits && !handle_is_ptr(ret_type)) { + // It's a scalar, so it didn't get written to the result ptr. Do that before the atomic rmw. + LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr_ptr, ""); + LLVMBuildStore(g->builder, ir_llvm_value(g, instruction->operand), result_ptr); + } + + // Prepare to be suspended. We might end up not having to suspend though. + LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "ReturnResume"); + size_t new_block_index = g->cur_resume_block_count; + g->cur_resume_block_count += 1; + LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); + LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb); + LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); + + LLVMValueRef zero = LLVMConstNull(usize_type_ref); + LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); + LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, g->cur_async_awaiter_ptr, + all_ones, LLVMAtomicOrderingAcquire, g->is_single_threaded); + + LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn"); + LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); + LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem"); + + LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2); + LLVMBasicBlockRef switch_bb = LLVMGetInsertBlock(g->builder); + + LLVMAddCase(switch_instr, zero, early_return_block); + LLVMAddCase(switch_instr, all_ones, bad_return_block); + + // Something has gone horribly wrong, and this is an invalid second return. + LLVMPositionBuilderAtEnd(g->builder, bad_return_block); + gen_assertion(g, PanicMsgIdBadReturn, &instruction->base); + + // The caller has not done an await yet. So we suspend at the return instruction, until a + // cancel or await is performed. + LLVMPositionBuilderAtEnd(g->builder, early_return_block); + LLVMBuildRetVoid(g->builder); + + // Add a safety check for when getting resumed by the awaiter. + LLVMPositionBuilderAtEnd(g->builder, resume_bb); + LLVMBasicBlockRef after_resume_block = LLVMGetInsertBlock(g->builder); + gen_assert_resume_id(g, &instruction->base, ResumeIdAwaitEarlyReturn, PanicMsgIdResumedFnPendingAwait, + resume_them_block); + + // We need to resume the caller by tail calling them. + // That will happen when rendering IrInstructionReturn after running the defers/errdefers. + // We either got here from Entry (function call) or from the switch above + g->cur_async_prev_val = LLVMBuildPhi(g->builder, usize_type_ref, ""); + LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), prev_val }; + LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb }; + LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2); + + return nullptr; +} + +static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) { if (fn_is_async(g->cur_fn)) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - bool ret_type_has_bits = return_instruction->value != nullptr && - type_has_bits(return_instruction->value->value.type); - ZigType *ret_type = ret_type_has_bits ? return_instruction->value->value.type : nullptr; + bool ret_type_has_bits = instruction->operand != nullptr && + type_has_bits(instruction->operand->value.type); + ZigType *ret_type = ret_type_has_bits ? instruction->operand->value.type : nullptr; - if (ir_want_runtime_safety(g, &return_instruction->base)) { + if (ir_want_runtime_safety(g, &instruction->base)) { LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref); LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr); } - LLVMValueRef result_ptr_as_usize; if (ret_type_has_bits) { - LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, ""); - LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, ""); - if (!handle_is_ptr(ret_type)) { - // It's a scalar, so it didn't get written to the result ptr. Do that now. - LLVMBuildStore(g->builder, ir_llvm_value(g, return_instruction->value), result_ptr); - } - result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, ""); - } else { - // For debug safety, this value has to be anything other than all 1's, which signals - // that it is being resumed. 0 is a bad choice since null pointers are special. - result_ptr_as_usize = ir_want_runtime_safety(g, &return_instruction->base) ? - LLVMConstInt(usize_type_ref, 1, false) : LLVMGetUndef(usize_type_ref); + // If the awaiter result pointer is non-null, we need to copy the result to there. + LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult"); + LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd"); + LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start + 1, ""); + LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, ""); + LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr)); + LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, ""); + LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block); + + LLVMPositionBuilderAtEnd(g->builder, copy_block); + LLVMValueRef ret_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr_ptr, ""); + LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); + LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, ""); + LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, ret_ptr, ptr_u8, ""); + bool is_volatile = false; + uint32_t abi_align = get_abi_alignment(g, ret_type); + LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false); + ZigLLVMBuildMemCpy(g->builder, + dest_ptr_casted, abi_align, + src_ptr_casted, abi_align, byte_count_val, is_volatile); + LLVMBuildBr(g->builder, copy_end_block); + + LLVMPositionBuilderAtEnd(g->builder, copy_end_block); } - LLVMValueRef zero = LLVMConstNull(usize_type_ref); - LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); - LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, g->cur_async_awaiter_ptr, - all_ones, LLVMAtomicOrderingMonotonic, g->is_single_threaded); - - LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn"); - LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); - LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem"); - - LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2); - - LLVMAddCase(switch_instr, zero, early_return_block); - LLVMAddCase(switch_instr, all_ones, bad_return_block); - - // Something has gone horribly wrong, and this is an invalid second return. - LLVMPositionBuilderAtEnd(g->builder, bad_return_block); - gen_assertion(g, PanicMsgIdBadReturn, &return_instruction->base); - - // The caller will deal with fetching the result - we're done. - LLVMPositionBuilderAtEnd(g->builder, early_return_block); - LLVMBuildRetVoid(g->builder); // We need to resume the caller by tail calling them. - LLVMPositionBuilderAtEnd(g->builder, resume_them_block); ZigType *any_frame_type = get_any_frame_type(g, ret_type); - LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val, + LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, g->cur_async_prev_val, get_llvm_type(g, any_frame_type), ""); - LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, their_frame_ptr, coro_fn_ptr_index, ""); - LLVMValueRef awaiter_fn = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); - LLVMValueRef args[] = {their_frame_ptr, result_ptr_as_usize}; - LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, awaiter_fn, args, 2, LLVMFastCallConv, - ZigLLVM_FnInlineAuto, ""); + LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr); ZigLLVMSetTailCall(call_inst); LLVMBuildRetVoid(g->builder); return nullptr; } if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) { - if (return_instruction->value == nullptr) { + if (instruction->operand == nullptr) { LLVMBuildRetVoid(g->builder); return nullptr; } assert(g->cur_ret_ptr); - src_assert(return_instruction->value->value.special != ConstValSpecialRuntime, - return_instruction->base.source_node); - LLVMValueRef value = ir_llvm_value(g, return_instruction->value); - ZigType *return_type = return_instruction->value->value.type; + src_assert(instruction->operand->value.special != ConstValSpecialRuntime, + instruction->base.source_node); + LLVMValueRef value = ir_llvm_value(g, instruction->operand); + ZigType *return_type = instruction->operand->value.type; gen_assign_raw(g, g->cur_ret_ptr, get_pointer_to_type(g, return_type, false), value); LLVMBuildRetVoid(g->builder); } else if (g->cur_fn->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync && handle_is_ptr(g->cur_fn->type_entry->data.fn.fn_type_id.return_type)) { - if (return_instruction->value == nullptr) { + if (instruction->operand == nullptr) { LLVMValueRef by_val_value = gen_load_untyped(g, g->cur_ret_ptr, 0, false, ""); LLVMBuildRet(g->builder, by_val_value); } else { - LLVMValueRef value = ir_llvm_value(g, return_instruction->value); + LLVMValueRef value = ir_llvm_value(g, instruction->operand); LLVMValueRef by_val_value = gen_load_untyped(g, value, 0, false, ""); LLVMBuildRet(g->builder, by_val_value); } - } else if (return_instruction->value == nullptr) { + } else if (instruction->operand == nullptr) { LLVMBuildRetVoid(g->builder); } else { - LLVMValueRef value = ir_llvm_value(g, return_instruction->value); + LLVMValueRef value = ir_llvm_value(g, instruction->operand); LLVMBuildRet(g->builder, value); } return nullptr; @@ -3417,7 +3526,7 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) { static void render_async_spills(CodeGen *g) { ZigType *fn_type = g->cur_fn->type_entry; ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base); - uint32_t async_var_index = frame_index_arg(g, &fn_type->data.fn.fn_type_id); + uint32_t async_var_index = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type); for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) { ZigVar *var = g->cur_fn->variable_list.at(var_i); @@ -3450,7 +3559,7 @@ static void render_async_spills(CodeGen *g) { } } // label (grep this): [coro_frame_struct_layout] - if (codegen_fn_has_err_ret_tracing_stack(g, g->cur_fn)) { + if (codegen_fn_has_err_ret_tracing_stack(g, g->cur_fn, true)) { async_var_index += 2; } for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) { @@ -3553,7 +3662,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (ret_has_bits) { // Use the result location which is inside the frame if this is an async call. - ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, ""); + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 2, ""); } } else { LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack); @@ -3590,17 +3699,26 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); // caller's own frame pointer if (ret_has_bits) { - if (result_loc != nullptr) { + if (result_loc == nullptr) { + // return type is a scalar, but we still need a pointer to it. Use the async fn frame. + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 2, ""); + } else { // Use the call instruction's result location. ret_ptr = result_loc; - } else { - // return type is a scalar, but we still need a pointer to it. Use the async fn frame. - ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, ""); } + + // Store a zero in the awaiter's result ptr to indicate we do not need a copy made. + LLVMValueRef awaiter_ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, ""); + LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr))); + LLVMBuildStore(g->builder, zero_ptr, awaiter_ret_ptr); } - // even if prefix_arg_err_ret_stack is true, let the async function do its - // error return tracing normally, and then we'll invoke merge_error_return_traces like normal. + if (prefix_arg_err_ret_stack) { + LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + frame_index_trace_arg(g, src_return_type), ""); + LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); + LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); + } } if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); @@ -3652,7 +3770,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef result; if (instruction->is_async || callee_is_async) { - uint32_t arg_start_i = frame_index_arg(g, &fn_type->data.fn.fn_type_id); + uint32_t arg_start_i = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type); LLVMValueRef casted_frame; if (instruction->new_stack != nullptr) { @@ -3678,8 +3796,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } } if (instruction->is_async) { - LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)}; - ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, ""); + gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); if (instruction->new_stack != nullptr) { return frame_result_loc; } @@ -3694,36 +3811,23 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, call_bb); LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); - LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)}; - LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, ""); + + LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); ZigLLVMSetTailCall(call_inst); LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, call_bb); - if (ir_want_runtime_safety(g, &instruction->base)) { - LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); - LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume"); - LLVMValueRef arg_val = LLVMGetParam(g->cur_fn_val, 1); - LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); - LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, arg_val, all_ones, ""); - LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block); - - LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); - gen_safety_crash(g, PanicMsgIdResumedAnAwaitingFn); - - LLVMPositionBuilderAtEnd(g->builder, ok_resume_block); - } - + gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr); render_async_var_decls(g, instruction->base.scope); - if (type_has_bits(src_return_type)) { - LLVMValueRef spilled_result_ptr = LLVMGetParam(g->cur_fn_val, 1); - LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr, - get_llvm_type(g, ptr_result_type), ""); - return get_handle_value(g, casted_spilled_result_ptr, src_return_type, ptr_result_type); - } else { + if (!type_has_bits(src_return_type)) return nullptr; - } + + if (result_loc != nullptr) + return get_handle_value(g, result_loc, src_return_type, ptr_result_type); + + LLVMValueRef result_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 2, ""); + return LLVMBuildLoad(g->builder, result_ptr, ""); } if (instruction->new_stack == nullptr) { @@ -5191,8 +5295,9 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl return nullptr; } -static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwait *instruction) { +static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame); ZigType *result_type = instruction->base.value.type; ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true); @@ -5208,86 +5313,75 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // At this point resuming the function will do the correct thing. // This code is as if it is running inside the suspend block. + // supply the awaiter return pointer + LLVMValueRef result_loc = (instruction->result_loc == nullptr) ? + nullptr : ir_llvm_value(g, instruction->result_loc); + if (type_has_bits(result_type)) { + LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start + 1, ""); + if (result_loc == nullptr) { + // no copy needed + LLVMBuildStore(g->builder, zero, awaiter_ret_ptr_ptr); + } else { + LLVMBuildStore(g->builder, result_loc, awaiter_ret_ptr_ptr); + } + } + + // supply the error return trace pointer + LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); + if (my_err_ret_trace_val != nullptr) { + LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, + frame_index_trace_arg(g, result_type), ""); + LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); + } + // caller's own frame pointer LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); - LLVMValueRef result_ptr_as_usize; - if (type_has_bits(result_type)) { - LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start, ""); - LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, ""); - result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, ""); - } else { - result_ptr_as_usize = LLVMGetUndef(usize_type_ref); - } LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val, - LLVMAtomicOrderingMonotonic, g->is_single_threaded); + LLVMAtomicOrderingRelease, g->is_single_threaded); LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait"); LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend"); + LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); - LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, bad_await_block, 2); - LLVMBasicBlockRef predecessor_bb = LLVMGetInsertBlock(g->builder); LLVMAddCase(switch_instr, zero, complete_suspend_block); - - // Early return: The async function has already completed. No need to suspend. - LLVMAddCase(switch_instr, all_ones, resume_bb); + LLVMAddCase(switch_instr, all_ones, early_return_block); // We discovered that another awaiter was already here. LLVMPositionBuilderAtEnd(g->builder, bad_await_block); gen_assertion(g, PanicMsgIdBadAwait, &instruction->base); + // Early return: The async function has already completed, but it is suspending before setting the result, + // populating the error return trace if applicable, and running the defers. + // Tail resume it now, so that it can complete. + LLVMPositionBuilderAtEnd(g->builder, early_return_block); + LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val); + ZigLLVMSetTailCall(call_inst); + LLVMBuildRetVoid(g->builder); + // Rely on the target to resume us from suspension. LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block); LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, resume_bb); - // We either got here from Entry (function call) or from the switch above - LLVMValueRef spilled_result_ptr = LLVMBuildPhi(g->builder, usize_type_ref, ""); - LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), result_ptr_as_usize }; - LLVMBasicBlockRef incoming_blocks[] = { g->cur_preamble_llvm_block, predecessor_bb }; - LLVMAddIncoming(spilled_result_ptr, incoming_values, incoming_blocks, 2); - - if (ir_want_runtime_safety(g, &instruction->base)) { - LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); - LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume"); - LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); - LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, spilled_result_ptr, all_ones, ""); - LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block); - - LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); - gen_safety_crash(g, PanicMsgIdResumedAnAwaitingFn); - - LLVMPositionBuilderAtEnd(g->builder, ok_resume_block); - } - - render_async_var_decls(g, instruction->base.scope); - - if (type_has_bits(result_type)) { - LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr, - get_llvm_type(g, ptr_result_type), ""); - return get_handle_value(g, casted_spilled_result_ptr, result_type, ptr_result_type); - } else { - return nullptr; + gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr); + if (type_has_bits(result_type) && result_loc != nullptr) { + return get_handle_value(g, result_loc, result_type, ptr_result_type); } + return nullptr; } static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) { - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMValueRef frame = ir_llvm_value(g, instruction->frame); ZigType *frame_type = instruction->frame->value.type; assert(frame_type->id == ZigTypeIdAnyFrame); - LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, ""); - LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); - LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, g->anyframe_fn_type, ""); - LLVMValueRef arg_val = ir_want_runtime_safety(g, &instruction->base) ? - LLVMConstAllOnes(usize_type_ref) : LLVMGetUndef(usize_type_ref); - LLVMValueRef args[] = {frame, arg_val}; - ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); + + gen_resume(g, nullptr, frame, ResumeIdManual, nullptr); return nullptr; } @@ -5383,7 +5477,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdImplicitCast: case IrInstructionIdResolveResult: case IrInstructionIdResetResult: - case IrInstructionIdResultPtr: case IrInstructionIdContainerInitList: case IrInstructionIdSliceSrc: case IrInstructionIdRef: @@ -5393,10 +5486,13 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdFrameType: case IrInstructionIdFrameSizeSrc: case IrInstructionIdAllocaGen: + case IrInstructionIdAwaitSrc: zig_unreachable(); case IrInstructionIdDeclVarGen: return ir_render_decl_var(g, executable, (IrInstructionDeclVarGen *)instruction); + case IrInstructionIdReturnBegin: + return ir_render_return_begin(g, executable, (IrInstructionReturnBegin *)instruction); case IrInstructionIdReturn: return ir_render_return(g, executable, (IrInstructionReturn *)instruction); case IrInstructionIdBinOp: @@ -5547,8 +5643,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); case IrInstructionIdFrameSizeGen: return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction); - case IrInstructionIdAwait: - return ir_render_await(g, executable, (IrInstructionAwait *)instruction); + case IrInstructionIdAwaitGen: + return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction); } zig_unreachable(); } @@ -6777,16 +6873,19 @@ static void do_code_gen(CodeGen *g) { g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, ""); LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, ""); g->cur_async_resume_index_ptr = resume_index_ptr; - LLVMValueRef err_ret_trace_val = nullptr; - uint32_t trace_field_index; + + if (type_has_bits(fn_type_id->return_type)) { + g->cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, ""); + } if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { - trace_field_index = frame_index_trace_arg(g, fn_type_id); - err_ret_trace_val = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, ""); - g->cur_err_ret_trace_val_arg = err_ret_trace_val; - } else if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry)) { - trace_field_index = frame_index_trace_stack(g, fn_type_id); - err_ret_trace_val = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, ""); - g->cur_err_ret_trace_val_stack = err_ret_trace_val; + uint32_t trace_field_index = frame_index_trace_arg(g, fn_type_id->return_type); + g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, ""); + } + uint32_t trace_field_index_stack = UINT32_MAX; + if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) { + trace_field_index_stack = frame_index_trace_stack(g, fn_type_id); + g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, + trace_field_index_stack, ""); } LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); @@ -6798,11 +6897,11 @@ static void do_code_gen(CodeGen *g) { LLVMAddCase(switch_instr, zero, entry_block->llvm_block); g->cur_resume_block_count += 1; LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block); - if (err_ret_trace_val != nullptr) { + if (trace_field_index_stack != UINT32_MAX) { LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, - trace_field_index, ""); + trace_field_index_stack, ""); LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, - trace_field_index + 1, ""); + trace_field_index_stack + 1, ""); LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, ""); LLVMBuildStore(g->builder, zero, index_ptr); @@ -9725,7 +9824,7 @@ bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) { return_type->id == ZigTypeIdErrorSet); } -bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn) { +bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async) { return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn && - !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type); + (is_async || !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type)); } diff --git a/src/codegen.hpp b/src/codegen.hpp index c84ef4bc48..794a0fd5a6 100644 --- a/src/codegen.hpp +++ b/src/codegen.hpp @@ -62,6 +62,6 @@ TargetSubsystem detect_subsystem(CodeGen *g); void codegen_release_caches(CodeGen *codegen); bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type); -bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn); +bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async); #endif diff --git a/src/ir.cpp b/src/ir.cpp index ca54d54c2d..64e5e31a1b 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -525,6 +525,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionReturn *) { return IrInstructionIdReturn; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionReturnBegin *) { + return IrInstructionIdReturnBegin; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionCast *) { return IrInstructionIdCast; } @@ -945,10 +949,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionResetResult *) { return IrInstructionIdResetResult; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionResultPtr *) { - return IrInstructionIdResultPtr; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrOfArrayToSlice *) { return IrInstructionIdPtrOfArrayToSlice; } @@ -1049,8 +1049,12 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendFinish *) return IrInstructionIdSuspendFinish; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionAwait *) { - return IrInstructionIdAwait; +static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitSrc *) { + return IrInstructionIdAwaitSrc; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitGen *) { + return IrInstructionIdAwaitGen; } static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) { @@ -1109,18 +1113,32 @@ static IrInstruction *ir_build_cond_br(IrBuilder *irb, Scope *scope, AstNode *so } static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *return_value) + IrInstruction *operand) { IrInstructionReturn *return_instruction = ir_build_instruction(irb, scope, source_node); return_instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable; return_instruction->base.value.special = ConstValSpecialStatic; - return_instruction->value = return_value; + return_instruction->operand = operand; - if (return_value != nullptr) ir_ref_instruction(return_value, irb->current_basic_block); + if (operand != nullptr) ir_ref_instruction(operand, irb->current_basic_block); return &return_instruction->base; } +static IrInstruction *ir_build_return_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *operand) +{ + IrInstructionReturnBegin *return_instruction = ir_build_instruction(irb, scope, source_node); + return_instruction->base.value.type = irb->codegen->builtin_types.entry_void; + return_instruction->base.value.special = ConstValSpecialStatic; + return_instruction->operand = operand; + + ir_ref_instruction(operand, irb->current_basic_block); + + return &return_instruction->base; +} + + static IrInstruction *ir_build_const_void(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); const_instruction->base.value.type = irb->codegen->builtin_types.entry_void; @@ -2525,11 +2543,12 @@ static IrInstruction *ir_build_align_of(IrBuilder *irb, Scope *scope, AstNode *s } static IrInstruction *ir_build_test_err_src(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *base_ptr, bool resolve_err_set) + IrInstruction *base_ptr, bool resolve_err_set, bool base_ptr_is_payload) { IrInstructionTestErrSrc *instruction = ir_build_instruction(irb, scope, source_node); instruction->base_ptr = base_ptr; instruction->resolve_err_set = resolve_err_set; + instruction->base_ptr_is_payload = base_ptr_is_payload; ir_ref_instruction(base_ptr, irb->current_basic_block); @@ -2971,18 +2990,6 @@ static IrInstruction *ir_build_reset_result(IrBuilder *irb, Scope *scope, AstNod return &instruction->base; } -static IrInstruction *ir_build_result_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, - ResultLoc *result_loc, IrInstruction *result) -{ - IrInstructionResultPtr *instruction = ir_build_instruction(irb, scope, source_node); - instruction->result_loc = result_loc; - instruction->result = result; - - ir_ref_instruction(result, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_opaque_type(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionOpaqueType *instruction = ir_build_instruction(irb, scope, source_node); @@ -3266,17 +3273,33 @@ static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstN return &instruction->base; } -static IrInstruction *ir_build_await(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *frame) +static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *frame, ResultLoc *result_loc) { - IrInstructionAwait *instruction = ir_build_instruction(irb, scope, source_node); + IrInstructionAwaitSrc *instruction = ir_build_instruction(irb, scope, source_node); instruction->frame = frame; + instruction->result_loc = result_loc; ir_ref_instruction(frame, irb->current_basic_block); return &instruction->base; } +static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction, + IrInstruction *frame, ZigType *result_type, IrInstruction *result_loc) +{ + IrInstructionAwaitGen *instruction = ir_build_instruction(&ira->new_irb, + source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = result_type; + instruction->frame = frame; + instruction->result_loc = result_loc; + + ir_ref_instruction(frame, ira->new_irb.current_basic_block); + if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) { @@ -3416,16 +3439,6 @@ static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) { return nullptr; } -static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *return_value, - bool is_generated_code) -{ - ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value)); - - IrInstruction *return_inst = ir_build_return(irb, scope, node, return_value); - return_inst->is_gen = is_generated_code; - return return_inst; -} - static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) { assert(node->type == NodeTypeReturnExpr); @@ -3467,19 +3480,16 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, return_value = ir_build_const_void(irb, scope, node); } + ir_build_return_begin(irb, scope, node, return_value); + size_t defer_counts[2]; ir_count_defers(irb, scope, outer_scope, defer_counts); bool have_err_defers = defer_counts[ReturnKindError] > 0; if (have_err_defers || irb->codegen->have_err_ret_tracing) { IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr"); IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk"); - if (!have_err_defers) { - ir_gen_defers_for_block(irb, scope, outer_scope, false); - } - IrInstruction *ret_ptr = ir_build_result_ptr(irb, scope, node, &result_loc_ret->base, - return_value); - IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, ret_ptr, false); + IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true); bool should_inline = ir_should_inline(irb->exec, scope); IrInstruction *is_comptime; @@ -3493,28 +3503,26 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt"); ir_set_cursor_at_end_and_append_block(irb, err_block); - if (have_err_defers) { - ir_gen_defers_for_block(irb, scope, outer_scope, true); - } if (irb->codegen->have_err_ret_tracing && !should_inline) { ir_build_save_err_ret_addr(irb, scope, node); } + ir_gen_defers_for_block(irb, scope, outer_scope, true); ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); ir_set_cursor_at_end_and_append_block(irb, ok_block); - if (have_err_defers) { - ir_gen_defers_for_block(irb, scope, outer_scope, false); - } + ir_gen_defers_for_block(irb, scope, outer_scope, false); ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); - IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false); + ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value)); + IrInstruction *result = ir_build_return(irb, scope, node, return_value); result_loc_ret->base.source_instruction = result; return result; } else { // generate unconditional defers ir_gen_defers_for_block(irb, scope, outer_scope, false); - IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false); + ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value)); + IrInstruction *result = ir_build_return(irb, scope, node, return_value); result_loc_ret->base.source_instruction = result; return result; } @@ -3525,7 +3533,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr); if (err_union_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - IrInstruction *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true); + IrInstruction *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true, false); IrBasicBlock *return_block = ir_create_basic_block(irb, scope, "ErrRetReturn"); IrBasicBlock *continue_block = ir_create_basic_block(irb, scope, "ErrRetContinue"); @@ -3539,10 +3547,10 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err_val, return_block, continue_block, is_comptime)); ir_set_cursor_at_end_and_append_block(irb, return_block); + IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr); + IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr); + ir_build_return_begin(irb, scope, node, err_val); if (!ir_gen_defers_for_block(irb, scope, outer_scope, true)) { - IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr); - IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr); - ResultLocReturn *result_loc_ret = allocate(1); result_loc_ret->base.id = ResultLocIdReturn; ir_build_reset_result(irb, scope, node, &result_loc_ret->base); @@ -3551,7 +3559,8 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, if (irb->codegen->have_err_ret_tracing && !should_inline) { ir_build_save_err_ret_addr(irb, scope, node); } - IrInstruction *ret_inst = ir_gen_async_return(irb, scope, node, err_val, false); + ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val)); + IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val); result_loc_ret->base.source_instruction = ret_inst; } @@ -6081,7 +6090,8 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n LValPtr, nullptr); if (err_val_ptr == irb->codegen->invalid_instruction) return err_val_ptr; - IrInstruction *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr, true); + IrInstruction *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr, + true, false); IrBasicBlock *after_cond_block = irb->current_basic_block; IrInstruction *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node)); IrInstruction *cond_br_inst; @@ -6897,7 +6907,7 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode * return err_val_ptr; IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr); - IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true); + IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true, false); IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "TryOk"); IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "TryElse"); @@ -7513,7 +7523,7 @@ static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode if (err_union_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - IrInstruction *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true); + IrInstruction *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true, false); IrInstruction *is_comptime; if (ir_should_inline(irb->exec, parent_scope)) { @@ -7830,7 +7840,9 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) return ir_build_coro_resume(irb, scope, node, target_inst); } -static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeAwaitExpr); ZigFn *fn_entry = exec_fn_entry(irb->exec); @@ -7852,7 +7864,8 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - return ir_build_await(irb, scope, node, target_inst); + IrInstruction *await_inst = ir_build_await_src(irb, scope, node, target_inst, result_loc); + return ir_lval_wrap(irb, scope, await_inst, lval, result_loc); } static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) { @@ -8016,7 +8029,7 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop case NodeTypeResume: return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc); case NodeTypeAwaitExpr: - return ir_lval_wrap(irb, scope, ir_gen_await_expr(irb, scope, node), lval, result_loc); + return ir_gen_await_expr(irb, scope, node, lval, result_loc); case NodeTypeSuspend: return ir_lval_wrap(irb, scope, ir_gen_suspend(irb, scope, node), lval, result_loc); case NodeTypeEnumLiteral: @@ -8088,8 +8101,10 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec return false; if (!instr_is_unreachable(result)) { + ir_mark_gen(ir_build_return_begin(irb, scope, node, result)); // no need for save_err_ret_addr because this cannot return error - ir_gen_async_return(irb, scope, result->source_node, result, true); + ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result)); + ir_mark_gen(ir_build_return(irb, scope, result->source_node, result)); } return true; @@ -8181,18 +8196,19 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec IrInstruction *instruction = bb->instruction_list.at(i); if (instruction->id == IrInstructionIdReturn) { IrInstructionReturn *ret_inst = (IrInstructionReturn *)instruction; - IrInstruction *value = ret_inst->value; - if (value->value.special == ConstValSpecialRuntime) { - exec_add_error_node(codegen, exec, value->source_node, + IrInstruction *operand = ret_inst->operand; + if (operand->value.special == ConstValSpecialRuntime) { + exec_add_error_node(codegen, exec, operand->source_node, buf_sprintf("unable to evaluate constant expression")); return &codegen->invalid_instruction->value; } - return &value->value; + return &operand->value; } else if (ir_has_side_effects(instruction)) { if (instr_is_comptime(instruction)) { switch (instruction->id) { case IrInstructionIdUnwrapErrPayload: case IrInstructionIdUnionFieldPtr: + case IrInstructionIdReturnBegin: continue; default: break; @@ -12593,21 +12609,13 @@ static IrInstruction *ir_analyze_instruction_add_implicit_return_type(IrAnalyze return ir_const_void(ira, &instruction->base); } -static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) { - IrInstruction *value = instruction->value->child; - if (type_is_invalid(value->value.type)) - return ir_unreach_error(ira); +static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInstructionReturnBegin *instruction) { + IrInstruction *operand = instruction->operand->child; + if (type_is_invalid(operand->value.type)) + return ira->codegen->invalid_instruction; - if (!instr_is_comptime(value) && handle_is_ptr(ira->explicit_return_type)) { - // result location mechanism took care of it. - IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, nullptr); - result->value.type = ira->codegen->builtin_types.entry_unreachable; - return ir_finish_anal(ira, result); - } - - IrInstruction *casted_value = ir_implicit_cast(ira, value, ira->explicit_return_type); - if (type_is_invalid(casted_value->value.type)) { + IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type); + if (type_is_invalid(casted_operand->value.type)) { AstNode *source_node = ira->explicit_return_type_source_node; if (source_node != nullptr) { ErrorMsg *msg = ira->codegen->errors.last(); @@ -12617,15 +12625,38 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio return ir_unreach_error(ira); } - if (casted_value->value.special == ConstValSpecialRuntime && - casted_value->value.type->id == ZigTypeIdPointer && - casted_value->value.data.rh_ptr == RuntimeHintPtrStack) + return ir_build_return_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node, + casted_operand); +} + +static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) { + IrInstruction *operand = instruction->operand->child; + if (type_is_invalid(operand->value.type)) + return ir_unreach_error(ira); + + if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) { + // result location mechanism took care of it. + IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, nullptr); + result->value.type = ira->codegen->builtin_types.entry_unreachable; + return ir_finish_anal(ira, result); + } + + IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type); + if (type_is_invalid(casted_operand->value.type)) { + // error already reported by IrInstructionReturnBegin + return ir_unreach_error(ira); + } + + if (casted_operand->value.special == ConstValSpecialRuntime && + casted_operand->value.type->id == ZigTypeIdPointer && + casted_operand->value.data.rh_ptr == RuntimeHintPtrStack) { - ir_add_error(ira, casted_value, buf_sprintf("function returns address of local variable")); + ir_add_error(ira, casted_operand, buf_sprintf("function returns address of local variable")); return ir_unreach_error(ira); } IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, casted_value); + instruction->base.source_node, casted_operand); result->value.type = ira->codegen->builtin_types.entry_unreachable; return ir_finish_anal(ira, result); } @@ -22176,19 +22207,6 @@ static IrInstruction *ir_analyze_instruction_overflow_op(IrAnalyze *ira, IrInstr return result; } -static IrInstruction *ir_analyze_instruction_result_ptr(IrAnalyze *ira, IrInstructionResultPtr *instruction) { - IrInstruction *result = instruction->result->child; - if (type_is_invalid(result->value.type)) - return result; - - if (instruction->result_loc->written && instruction->result_loc->resolved_loc != nullptr && - !instr_is_comptime(result)) - { - return instruction->result_loc->resolved_loc; - } - return ir_get_ref(ira, &instruction->base, result, true, false); -} - static void ir_eval_mul_add(IrAnalyze *ira, IrInstructionMulAdd *source_instr, ZigType *float_type, ConstExprValue *op1, ConstExprValue *op2, ConstExprValue *op3, ConstExprValue *out_val) { if (float_type->id == ZigTypeIdComptimeFloat) { @@ -22313,11 +22331,16 @@ static IrInstruction *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruct if (type_is_invalid(base_ptr->value.type)) return ira->codegen->invalid_instruction; - IrInstruction *value = ir_get_deref(ira, &instruction->base, base_ptr, nullptr); + IrInstruction *value; + if (instruction->base_ptr_is_payload) { + value = base_ptr; + } else { + value = ir_get_deref(ira, &instruction->base, base_ptr, nullptr); + } + ZigType *type_entry = value->value.type; if (type_is_invalid(type_entry)) return ira->codegen->invalid_instruction; - if (type_entry->id == ZigTypeIdErrorUnion) { if (instr_is_comptime(value)) { ConstExprValue *err_union_val = ir_resolve_const(ira, value, UndefBad); @@ -24443,7 +24466,7 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira, return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin); } -static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwait *instruction) { +static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) { IrInstruction *frame_ptr = instruction->frame->child; if (type_is_invalid(frame_ptr->value.type)) return ira->codegen->invalid_instruction; @@ -24484,9 +24507,17 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction fn_entry->calls_or_awaits_errorable_fn = true; } - IrInstruction *result = ir_build_await(&ira->new_irb, - instruction->base.scope, instruction->base.source_node, frame); - result->value.type = result_type; + IrInstruction *result_loc; + if (type_has_bits(result_type)) { + result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc, + result_type, nullptr, true, false, true); + if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) + return result_loc; + } else { + result_loc = nullptr; + } + + IrInstruction *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc); return ir_finish_anal(ira, result); } @@ -24541,8 +24572,11 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction case IrInstructionIdRefGen: case IrInstructionIdTestErrGen: case IrInstructionIdFrameSizeGen: + case IrInstructionIdAwaitGen: zig_unreachable(); + case IrInstructionIdReturnBegin: + return ir_analyze_instruction_return_begin(ira, (IrInstructionReturnBegin *)instruction); case IrInstructionIdReturn: return ir_analyze_instruction_return(ira, (IrInstructionReturn *)instruction); case IrInstructionIdConst: @@ -24749,8 +24783,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_resolve_result(ira, (IrInstructionResolveResult *)instruction); case IrInstructionIdResetResult: return ir_analyze_instruction_reset_result(ira, (IrInstructionResetResult *)instruction); - case IrInstructionIdResultPtr: - return ir_analyze_instruction_result_ptr(ira, (IrInstructionResultPtr *)instruction); case IrInstructionIdOpaqueType: return ir_analyze_instruction_opaque_type(ira, (IrInstructionOpaqueType *)instruction); case IrInstructionIdSetAlignStack: @@ -24807,8 +24839,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_suspend_finish(ira, (IrInstructionSuspendFinish *)instruction); case IrInstructionIdCoroResume: return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); - case IrInstructionIdAwait: - return ir_analyze_instruction_await(ira, (IrInstructionAwait *)instruction); + case IrInstructionIdAwaitSrc: + return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction); } zig_unreachable(); } @@ -24898,6 +24930,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdStorePtr: case IrInstructionIdCallSrc: case IrInstructionIdCallGen: + case IrInstructionIdReturnBegin: case IrInstructionIdReturn: case IrInstructionIdUnreachable: case IrInstructionIdSetCold: @@ -24943,7 +24976,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdSuspendBegin: case IrInstructionIdSuspendFinish: case IrInstructionIdCoroResume: - case IrInstructionIdAwait: + case IrInstructionIdAwaitSrc: + case IrInstructionIdAwaitGen: return true; case IrInstructionIdPhi: @@ -25041,7 +25075,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdHasDecl: case IrInstructionIdAllocaSrc: case IrInstructionIdAllocaGen: - case IrInstructionIdResultPtr: return false; case IrInstructionIdAsm: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index bc9d09b30c..c56a660e29 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -64,11 +64,15 @@ static void ir_print_other_block(IrPrint *irp, IrBasicBlock *bb) { } } -static void ir_print_return(IrPrint *irp, IrInstructionReturn *return_instruction) { +static void ir_print_return_begin(IrPrint *irp, IrInstructionReturnBegin *instruction) { + fprintf(irp->f, "@returnBegin("); + ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ")"); +} + +static void ir_print_return(IrPrint *irp, IrInstructionReturn *instruction) { fprintf(irp->f, "return "); - if (return_instruction->value != nullptr) { - ir_print_other_instruction(irp, return_instruction->value); - } + ir_print_other_instruction(irp, instruction->operand); } static void ir_print_const(IrPrint *irp, IrInstructionConst *const_instruction) { @@ -1329,14 +1333,6 @@ static void ir_print_reset_result(IrPrint *irp, IrInstructionResetResult *instru fprintf(irp->f, ")"); } -static void ir_print_result_ptr(IrPrint *irp, IrInstructionResultPtr *instruction) { - fprintf(irp->f, "ResultPtr("); - ir_print_result_loc(irp, instruction->result_loc); - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->result); - fprintf(irp->f, ")"); -} - static void ir_print_opaque_type(IrPrint *irp, IrInstructionOpaqueType *instruction) { fprintf(irp->f, "@OpaqueType()"); } @@ -1538,9 +1534,19 @@ static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruct fprintf(irp->f, ")"); } -static void ir_print_await(IrPrint *irp, IrInstructionAwait *instruction) { +static void ir_print_await_src(IrPrint *irp, IrInstructionAwaitSrc *instruction) { fprintf(irp->f, "@await("); ir_print_other_instruction(irp, instruction->frame); + fprintf(irp->f, ","); + ir_print_result_loc(irp, instruction->result_loc); + fprintf(irp->f, ")"); +} + +static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction) { + fprintf(irp->f, "@await("); + ir_print_other_instruction(irp, instruction->frame); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->result_loc); fprintf(irp->f, ")"); } @@ -1549,6 +1555,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: zig_unreachable(); + case IrInstructionIdReturnBegin: + ir_print_return_begin(irp, (IrInstructionReturnBegin *)instruction); + break; case IrInstructionIdReturn: ir_print_return(irp, (IrInstructionReturn *)instruction); break; @@ -1921,9 +1930,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdResetResult: ir_print_reset_result(irp, (IrInstructionResetResult *)instruction); break; - case IrInstructionIdResultPtr: - ir_print_result_ptr(irp, (IrInstructionResultPtr *)instruction); - break; case IrInstructionIdOpaqueType: ir_print_opaque_type(irp, (IrInstructionOpaqueType *)instruction); break; @@ -2020,8 +2026,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCoroResume: ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction); break; - case IrInstructionIdAwait: - ir_print_await(irp, (IrInstructionAwait *)instruction); + case IrInstructionIdAwaitSrc: + ir_print_await_src(irp, (IrInstructionAwaitSrc *)instruction); + break; + case IrInstructionIdAwaitGen: + ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction); break; } fprintf(irp->f, "\n"); From 17199b087915661c935f0970cc1e4eb29968a68d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 6 Aug 2019 18:29:56 -0400 Subject: [PATCH 061/125] passing the error return trace async function test --- src/all_types.hpp | 2 +- src/analyze.cpp | 5 +++ src/codegen.cpp | 68 ++++++++++++++++------------- src/ir.cpp | 66 ++++++++++++++++------------ test/stage1/behavior/coroutines.zig | 68 ++++++++++++++--------------- 5 files changed, 114 insertions(+), 95 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index bc65948579..1dad546a7e 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1718,7 +1718,7 @@ struct CodeGen { LLVMTargetMachineRef target_machine; ZigLLVMDIFile *dummy_di_file; LLVMValueRef cur_ret_ptr; - LLVMValueRef cur_ret_ptr_ptr; + LLVMValueRef cur_frame_ptr; LLVMValueRef cur_fn_val; LLVMValueRef cur_async_switch_instr; LLVMValueRef cur_async_resume_index_ptr; diff --git a/src/analyze.cpp b/src/analyze.cpp index 36eeaeac9c..764b28ed45 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5160,6 +5160,8 @@ static ZigType *get_async_fn_type(CodeGen *g, ZigType *orig_fn_type) { } static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { + Error err; + if (frame_type->data.frame.locals_struct != nullptr) return ErrorNone; @@ -5286,6 +5288,9 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { continue; } } + if ((err = type_resolve(g, child_type, ResolveStatusSizeKnown))) { + return err; + } const char *name; if (*instruction->name_hint == 0) { name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i)); diff --git a/src/codegen.cpp b/src/codegen.cpp index cf846d99e9..d1b5ebedad 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2088,17 +2088,19 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, IrInstructionReturnBegin *instruction) { - if (!fn_is_async(g->cur_fn)) return nullptr; + bool ret_type_has_bits = instruction->operand != nullptr && + type_has_bits(instruction->operand->value.type); + + if (!fn_is_async(g->cur_fn)) { + return ret_type_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr; + } LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - bool ret_type_has_bits = instruction->operand != nullptr && - type_has_bits(instruction->operand->value.type); ZigType *ret_type = ret_type_has_bits ? instruction->operand->value.type : nullptr; if (ret_type_has_bits && !handle_is_ptr(ret_type)) { // It's a scalar, so it didn't get written to the result ptr. Do that before the atomic rmw. - LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr_ptr, ""); - LLVMBuildStore(g->builder, ir_llvm_value(g, instruction->operand), result_ptr); + LLVMBuildStore(g->builder, ir_llvm_value(g, instruction->operand), g->cur_ret_ptr); } // Prepare to be suspended. We might end up not having to suspend though. @@ -2147,7 +2149,11 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb }; LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2); - return nullptr; + if (!ret_type_has_bits) { + return nullptr; + } + + return get_handle_value(g, g->cur_ret_ptr, ret_type, get_pointer_to_type(g, ret_type, true)); } static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) { @@ -2166,17 +2172,16 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns // If the awaiter result pointer is non-null, we need to copy the result to there. LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult"); LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd"); - LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start + 1, ""); + LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_ret_start + 1, ""); LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, ""); LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr)); LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, ""); LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block); LLVMPositionBuilderAtEnd(g->builder, copy_block); - LLVMValueRef ret_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr_ptr, ""); LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, ""); - LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, ret_ptr, ptr_u8, ""); + LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, ""); bool is_volatile = false; uint32_t abi_align = get_abi_alignment(g, ret_type); LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false); @@ -3385,10 +3390,6 @@ static LLVMValueRef ir_render_return_ptr(CodeGen *g, IrExecutable *executable, if (!type_has_bits(instruction->base.value.type)) return nullptr; src_assert(g->cur_ret_ptr != nullptr, instruction->base.source_node); - if (fn_is_async(g->cur_fn)) { - LLVMValueRef ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, ""); - return LLVMBuildLoad(g->builder, ptr_ptr, ""); - } return g->cur_ret_ptr; } @@ -3547,7 +3548,7 @@ static void render_async_spills(CodeGen *g) { continue; } - var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, + var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, async_var_index, buf_ptr(&var->name)); async_var_index += 1; if (var->decl_node) { @@ -3578,7 +3579,7 @@ static void render_async_spills(CodeGen *g) { continue; } } - instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index, + instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, async_var_index, instruction->name_hint); async_var_index += 1; } @@ -3697,7 +3698,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr // initialization. } else if (callee_is_async) { frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); - awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); // caller's own frame pointer + awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer if (ret_has_bits) { if (result_loc == nullptr) { // return type is a scalar, but we still need a pointer to it. Use the async fn frame. @@ -4850,7 +4851,7 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable } static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable, IrInstructionFrameHandle *instruction) { - return g->cur_ret_ptr; + return g->cur_frame_ptr; } static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) { @@ -5335,7 +5336,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst } // caller's own frame pointer - LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); + LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val, LLVMAtomicOrderingRelease, g->is_single_threaded); @@ -6710,13 +6711,17 @@ static void do_code_gen(CodeGen *g) { bool is_async = fn_is_async(fn_table_entry); - if (want_sret || is_async) { - g->cur_ret_ptr = LLVMGetParam(fn, 0); - } else if (handle_is_ptr(fn_type_id->return_type)) { - g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0); - // TODO add debug info variable for this + if (is_async) { + g->cur_frame_ptr = LLVMGetParam(fn, 0); } else { - g->cur_ret_ptr = nullptr; + if (want_sret) { + g->cur_ret_ptr = LLVMGetParam(fn, 0); + } else if (handle_is_ptr(fn_type_id->return_type)) { + g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0); + // TODO add debug info variable for this + } else { + g->cur_ret_ptr = nullptr; + } } uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry); @@ -6870,21 +6875,22 @@ static void do_code_gen(CodeGen *g) { LLVMPositionBuilderAtEnd(g->builder, g->cur_preamble_llvm_block); render_async_spills(g); - g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, ""); - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, ""); + g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_awaiter_index, ""); + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_resume_index, ""); g->cur_async_resume_index_ptr = resume_index_ptr; if (type_has_bits(fn_type_id->return_type)) { - g->cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, ""); + LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_ret_start, ""); + g->cur_ret_ptr = LLVMBuildLoad(g->builder, cur_ret_ptr_ptr, ""); } if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { uint32_t trace_field_index = frame_index_trace_arg(g, fn_type_id->return_type); - g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, ""); + g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index, ""); } uint32_t trace_field_index_stack = UINT32_MAX; if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) { trace_field_index_stack = frame_index_trace_stack(g, fn_type_id); - g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, + g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack, ""); } @@ -6898,9 +6904,9 @@ static void do_code_gen(CodeGen *g) { g->cur_resume_block_count += 1; LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block); if (trace_field_index_stack != UINT32_MAX) { - LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, + LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack, ""); - LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, + LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack + 1, ""); LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, ""); diff --git a/src/ir.cpp b/src/ir.cpp index 64e5e31a1b..7cb868cab2 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1129,8 +1129,6 @@ static IrInstruction *ir_build_return_begin(IrBuilder *irb, Scope *scope, AstNod IrInstruction *operand) { IrInstructionReturnBegin *return_instruction = ir_build_instruction(irb, scope, source_node); - return_instruction->base.value.type = irb->codegen->builtin_types.entry_void; - return_instruction->base.value.special = ConstValSpecialStatic; return_instruction->operand = operand; ir_ref_instruction(operand, irb->current_basic_block); @@ -3480,7 +3478,8 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, return_value = ir_build_const_void(irb, scope, node); } - ir_build_return_begin(irb, scope, node, return_value); + ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value)); + return_value = ir_build_return_begin(irb, scope, node, return_value); size_t defer_counts[2]; ir_count_defers(irb, scope, outer_scope, defer_counts); @@ -3514,14 +3513,12 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); - ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value)); IrInstruction *result = ir_build_return(irb, scope, node, return_value); result_loc_ret->base.source_instruction = result; return result; } else { // generate unconditional defers ir_gen_defers_for_block(irb, scope, outer_scope, false); - ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value)); IrInstruction *result = ir_build_return(irb, scope, node, return_value); result_loc_ret->base.source_instruction = result; return result; @@ -3549,7 +3546,8 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_set_cursor_at_end_and_append_block(irb, return_block); IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr); IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr); - ir_build_return_begin(irb, scope, node, err_val); + ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val)); + err_val = ir_build_return_begin(irb, scope, node, err_val); if (!ir_gen_defers_for_block(irb, scope, outer_scope, true)) { ResultLocReturn *result_loc_ret = allocate(1); result_loc_ret->base.id = ResultLocIdReturn; @@ -3559,7 +3557,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, if (irb->codegen->have_err_ret_tracing && !should_inline) { ir_build_save_err_ret_addr(irb, scope, node); } - ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val)); IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val); result_loc_ret->base.source_instruction = ret_inst; } @@ -4972,7 +4969,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval, result_loc); case BuiltinFnIdFrameHandle: if (!irb->exec->fn_entry) { - add_node_error(irb->codegen, node, buf_sprintf("@handle() called outside of function definition")); + add_node_error(irb->codegen, node, buf_sprintf("@frame() called outside of function definition")); return irb->codegen->invalid_instruction; } return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval, result_loc); @@ -8101,9 +8098,9 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec return false; if (!instr_is_unreachable(result)) { - ir_mark_gen(ir_build_return_begin(irb, scope, node, result)); - // no need for save_err_ret_addr because this cannot return error ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result)); + result = ir_mark_gen(ir_build_return_begin(irb, scope, node, result)); + // no need for save_err_ret_addr because this cannot return error ir_mark_gen(ir_build_return(irb, scope, result->source_node, result)); } @@ -9789,6 +9786,8 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT ZigType *prev_err_set_type = (err_set_type == nullptr) ? prev_type->data.error_union.err_set_type : err_set_type; ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type; + if (prev_err_set_type == cur_err_set_type) + continue; if (!resolve_inferred_error_set(ira->codegen, prev_err_set_type, cur_inst->source_node)) { return ira->codegen->builtin_types.entry_invalid; @@ -12614,6 +12613,14 @@ static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInst if (type_is_invalid(operand->value.type)) return ira->codegen->invalid_instruction; + if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) { + // result location mechanism took care of it. + IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, operand); + copy_const_val(&result->value, &operand->value, true); + return result; + } + IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type); if (type_is_invalid(casted_operand->value.type)) { AstNode *source_node = ira->explicit_return_type_source_node; @@ -12625,8 +12632,18 @@ static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInst return ir_unreach_error(ira); } - return ir_build_return_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node, - casted_operand); + if (casted_operand->value.special == ConstValSpecialRuntime && + casted_operand->value.type->id == ZigTypeIdPointer && + casted_operand->value.data.rh_ptr == RuntimeHintPtrStack) + { + ir_add_error(ira, casted_operand, buf_sprintf("function returns address of local variable")); + return ir_unreach_error(ira); + } + + IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, casted_operand); + copy_const_val(&result->value, &casted_operand->value, true); + return result; } static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) { @@ -12642,21 +12659,8 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio return ir_finish_anal(ira, result); } - IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type); - if (type_is_invalid(casted_operand->value.type)) { - // error already reported by IrInstructionReturnBegin - return ir_unreach_error(ira); - } - - if (casted_operand->value.special == ConstValSpecialRuntime && - casted_operand->value.type->id == ZigTypeIdPointer && - casted_operand->value.data.rh_ptr == RuntimeHintPtrStack) - { - ir_add_error(ira, casted_operand, buf_sprintf("function returns address of local variable")); - return ir_unreach_error(ira); - } IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, casted_operand); + instruction->base.source_node, operand); result->value.type = ira->codegen->builtin_types.entry_unreachable; return ir_finish_anal(ira, result); } @@ -14612,8 +14616,12 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe if ((err = type_resolve(ira->codegen, ira->explicit_return_type, ResolveStatusZeroBitsKnown))) { return ira->codegen->invalid_instruction; } - if (!type_has_bits(ira->explicit_return_type) || !handle_is_ptr(ira->explicit_return_type)) - return nullptr; + if (!type_has_bits(ira->explicit_return_type) || !handle_is_ptr(ira->explicit_return_type)) { + ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); + if (fn_entry == nullptr || fn_entry->inferred_async_node == nullptr) { + return nullptr; + } + } ZigType *ptr_return_type = get_pointer_to_type(ira->codegen, ira->explicit_return_type, false); result_loc->written = true; @@ -24510,7 +24518,7 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction IrInstruction *result_loc; if (type_has_bits(result_type)) { result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc, - result_type, nullptr, true, false, true); + result_type, nullptr, true, true, true); if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) return result_loc; } else { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 4cea8d1507..7a8edd793c 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -334,40 +334,40 @@ test "async fn with inferred error set" { S.doTheTest(); } -//test "error return trace across suspend points - early return" { -// const p = nonFailing(); -// resume p; -// const p2 = async printTrace(p); -//} -// -//test "error return trace across suspend points - async return" { -// const p = nonFailing(); -// const p2 = async printTrace(p); -// resume p; -//} -// -//fn nonFailing() (anyframe->anyerror!void) { -// const Static = struct { -// var frame: @Frame(suspendThenFail) = undefined; -// }; -// Static.frame = async suspendThenFail(); -// return &Static.frame; -//} -//async fn suspendThenFail() anyerror!void { -// suspend; -// return error.Fail; -//} -//async fn printTrace(p: anyframe->(anyerror!void)) void { -// (await p) catch |e| { -// std.testing.expect(e == error.Fail); -// if (@errorReturnTrace()) |trace| { -// expect(trace.index == 1); -// } else switch (builtin.mode) { -// .Debug, .ReleaseSafe => @panic("expected return trace"), -// .ReleaseFast, .ReleaseSmall => {}, -// } -// }; -//} +test "error return trace across suspend points - early return" { + const p = nonFailing(); + resume p; + const p2 = async printTrace(p); +} + +test "error return trace across suspend points - async return" { + const p = nonFailing(); + const p2 = async printTrace(p); + resume p; +} + +fn nonFailing() (anyframe->anyerror!void) { + const Static = struct { + var frame: @Frame(suspendThenFail) = undefined; + }; + Static.frame = async suspendThenFail(); + return &Static.frame; +} +async fn suspendThenFail() anyerror!void { + suspend; + return error.Fail; +} +async fn printTrace(p: anyframe->(anyerror!void)) void { + (await p) catch |e| { + std.testing.expect(e == error.Fail); + if (@errorReturnTrace()) |trace| { + expect(trace.index == 1); + } else switch (builtin.mode) { + .Debug, .ReleaseSafe => @panic("expected return trace"), + .ReleaseFast, .ReleaseSmall => {}, + } + }; +} test "break from suspend" { var my_result: i32 = 1; From 966c9ea63c635a12c923d3ab155a9070affb18c5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 6 Aug 2019 18:47:09 -0400 Subject: [PATCH 062/125] error return trace across suspend points --- src/all_types.hpp | 1 + src/codegen.cpp | 151 ++++++++++++++++++++++++++++++++++++++-- test/runtime_safety.zig | 16 +++-- 3 files changed, 158 insertions(+), 10 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 1dad546a7e..e1fff953b4 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1737,6 +1737,7 @@ struct CodeGen { LLVMValueRef stacksave_fn_val; LLVMValueRef stackrestore_fn_val; LLVMValueRef write_register_fn_val; + LLVMValueRef merge_err_ret_traces_fn_val; LLVMValueRef sp_md_node; LLVMValueRef err_name_table; LLVMValueRef safety_crash_err_fn; diff --git a/src/codegen.cpp b/src/codegen.cpp index d1b5ebedad..24933bd104 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2026,18 +2026,159 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) { } } +static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { + if (g->merge_err_ret_traces_fn_val) + return g->merge_err_ret_traces_fn_val; + + assert(g->stack_trace_type != nullptr); + + LLVMTypeRef param_types[] = { + get_llvm_type(g, get_ptr_to_stack_trace_type(g)), + get_llvm_type(g, get_ptr_to_stack_trace_type(g)), + }; + LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false); + + Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_merge_error_return_traces"), false); + LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref); + LLVMSetLinkage(fn_val, LLVMInternalLinkage); + LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); + addLLVMFnAttr(fn_val, "nounwind"); + add_uwtable_attr(g, fn_val); + // Error return trace memory is in the stack, which is impossible to be at address 0 + // on any architecture. + addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); + addLLVMArgAttr(fn_val, (unsigned)0, "noalias"); + addLLVMArgAttr(fn_val, (unsigned)0, "writeonly"); + // Error return trace memory is in the stack, which is impossible to be at address 0 + // on any architecture. + addLLVMArgAttr(fn_val, (unsigned)1, "nonnull"); + addLLVMArgAttr(fn_val, (unsigned)1, "noalias"); + addLLVMArgAttr(fn_val, (unsigned)1, "readonly"); + if (g->build_mode == BuildModeDebug) { + ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true"); + ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr); + } + + // this is above the ZigLLVMClearCurrentDebugLocation + LLVMValueRef add_error_return_trace_addr_fn_val = get_add_error_return_trace_addr_fn(g); + + LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry"); + LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder); + LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder); + LLVMPositionBuilderAtEnd(g->builder, entry_block); + ZigLLVMClearCurrentDebugLocation(g->builder); + + // var frame_index: usize = undefined; + // var frames_left: usize = undefined; + // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) { + // frame_index = 0; + // frames_left = src_stack_trace.index; + // if (frames_left == 0) return; + // } else { + // frame_index = (src_stack_trace.index + 1) % src_stack_trace.instruction_addresses.len; + // frames_left = src_stack_trace.instruction_addresses.len; + // } + // while (true) { + // __zig_add_err_ret_trace_addr(dest_stack_trace, src_stack_trace.instruction_addresses[frame_index]); + // frames_left -= 1; + // if (frames_left == 0) return; + // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len; + // } + LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return"); + + LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index"); + LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left"); + + LLVMValueRef dest_stack_trace_ptr = LLVMGetParam(fn_val, 0); + LLVMValueRef src_stack_trace_ptr = LLVMGetParam(fn_val, 1); + + size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index; + size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index; + LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr, + (unsigned)src_index_field_index, ""); + LLVMValueRef src_addresses_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr, + (unsigned)src_addresses_field_index, ""); + ZigType *slice_type = g->stack_trace_type->data.structure.fields[1].type_entry; + size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index; + LLVMValueRef src_ptr_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)ptr_field_index, ""); + size_t len_field_index = slice_type->data.structure.fields[slice_len_index].gen_index; + LLVMValueRef src_len_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)len_field_index, ""); + LLVMValueRef src_index_val = LLVMBuildLoad(g->builder, src_index_field_ptr, ""); + LLVMValueRef src_ptr_val = LLVMBuildLoad(g->builder, src_ptr_field_ptr, ""); + LLVMValueRef src_len_val = LLVMBuildLoad(g->builder, src_len_field_ptr, ""); + LLVMValueRef no_wrap_bit = LLVMBuildICmp(g->builder, LLVMIntULT, src_index_val, src_len_val, ""); + LLVMBasicBlockRef no_wrap_block = LLVMAppendBasicBlock(fn_val, "NoWrap"); + LLVMBasicBlockRef yes_wrap_block = LLVMAppendBasicBlock(fn_val, "YesWrap"); + LLVMBasicBlockRef loop_block = LLVMAppendBasicBlock(fn_val, "Loop"); + LLVMBuildCondBr(g->builder, no_wrap_bit, no_wrap_block, yes_wrap_block); + + LLVMPositionBuilderAtEnd(g->builder, no_wrap_block); + LLVMValueRef usize_zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type); + LLVMBuildStore(g->builder, usize_zero, frame_index_ptr); + LLVMBuildStore(g->builder, src_index_val, frames_left_ptr); + LLVMValueRef frames_left_eq_zero_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_index_val, usize_zero, ""); + LLVMBuildCondBr(g->builder, frames_left_eq_zero_bit, return_block, loop_block); + + LLVMPositionBuilderAtEnd(g->builder, yes_wrap_block); + LLVMValueRef usize_one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false); + LLVMValueRef plus_one = LLVMBuildNUWAdd(g->builder, src_index_val, usize_one, ""); + LLVMValueRef mod_len = LLVMBuildURem(g->builder, plus_one, src_len_val, ""); + LLVMBuildStore(g->builder, mod_len, frame_index_ptr); + LLVMBuildStore(g->builder, src_len_val, frames_left_ptr); + LLVMBuildBr(g->builder, loop_block); + + LLVMPositionBuilderAtEnd(g->builder, loop_block); + LLVMValueRef ptr_index = LLVMBuildLoad(g->builder, frame_index_ptr, ""); + LLVMValueRef addr_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr_val, &ptr_index, 1, ""); + LLVMValueRef this_addr_val = LLVMBuildLoad(g->builder, addr_ptr, ""); + LLVMValueRef args[] = {dest_stack_trace_ptr, this_addr_val}; + ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, ""); + LLVMValueRef prev_frames_left = LLVMBuildLoad(g->builder, frames_left_ptr, ""); + LLVMValueRef new_frames_left = LLVMBuildNUWSub(g->builder, prev_frames_left, usize_one, ""); + LLVMValueRef done_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, new_frames_left, usize_zero, ""); + LLVMBasicBlockRef continue_block = LLVMAppendBasicBlock(fn_val, "Continue"); + LLVMBuildCondBr(g->builder, done_bit, return_block, continue_block); + + LLVMPositionBuilderAtEnd(g->builder, return_block); + LLVMBuildRetVoid(g->builder); + + LLVMPositionBuilderAtEnd(g->builder, continue_block); + LLVMBuildStore(g->builder, new_frames_left, frames_left_ptr); + LLVMValueRef prev_index = LLVMBuildLoad(g->builder, frame_index_ptr, ""); + LLVMValueRef index_plus_one = LLVMBuildNUWAdd(g->builder, prev_index, usize_one, ""); + LLVMValueRef index_mod_len = LLVMBuildURem(g->builder, index_plus_one, src_len_val, ""); + LLVMBuildStore(g->builder, index_mod_len, frame_index_ptr); + LLVMBuildBr(g->builder, loop_block); + + LLVMPositionBuilderAtEnd(g->builder, prev_block); + if (!g->strip_debug_symbols) { + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + } + + g->merge_err_ret_traces_fn_val = fn_val; + return fn_val; + +} static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *executable, IrInstructionSaveErrRetAddr *save_err_ret_addr_instruction) { assert(g->have_err_ret_tracing); LLVMValueRef return_err_fn = get_return_err_fn(g); - LLVMValueRef args[] = { - get_cur_err_ret_trace_val(g, save_err_ret_addr_instruction->base.scope), - }; - LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, return_err_fn, args, 1, + LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, save_err_ret_addr_instruction->base.scope); + ZigLLVMBuildCall(g->builder, return_err_fn, &my_err_trace_val, 1, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); - return call_instruction; + + if (fn_is_async(g->cur_fn) && g->cur_fn->calls_or_awaits_errorable_fn && + codegen_fn_has_err_ret_tracing_arg(g, g->cur_fn->type_entry->data.fn.fn_type_id.return_type)) + { + LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, ""); + LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val }; + ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + } + + return nullptr; } static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, ResumeId resume_id, PanicMsgId msg_id, diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig index ac9037caae..786c516298 100644 --- a/test/runtime_safety.zig +++ b/test/runtime_safety.zig @@ -544,23 +544,29 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ std.os.exit(126); \\} \\ + \\var failing_frame: @Frame(failing) = undefined; + \\ \\pub fn main() void { \\ const p = nonFailing(); \\ resume p; - \\ const p2 = async printTrace(p) catch unreachable; - \\ cancel p2; + \\ const p2 = async printTrace(p); \\} \\ - \\fn nonFailing() promise->anyerror!void { - \\ return async failing() catch unreachable; + \\fn nonFailing() anyframe->anyerror!void { + \\ failing_frame = async failing(); + \\ return &failing_frame; \\} \\ \\async fn failing() anyerror!void { \\ suspend; + \\ return second(); + \\} + \\ + \\async fn second() anyerror!void { \\ return error.Fail; \\} \\ - \\async fn printTrace(p: promise->anyerror!void) void { + \\async fn printTrace(p: anyframe->anyerror!void) void { \\ (await p) catch unreachable; \\} ); From 1afbb53661e655906980668f0224804118a9862e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 6 Aug 2019 19:07:25 -0400 Subject: [PATCH 063/125] fix awaiting when result type is a struct --- BRANCH_TODO | 3 +- src/codegen.cpp | 5 +- test/stage1/behavior.zig | 4 +- test/stage1/behavior/cancel.zig | 168 ++++++++++++++-------------- test/stage1/behavior/coroutines.zig | 33 ++++++ 5 files changed, 122 insertions(+), 91 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 1efaf1acc4..294bb42d55 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,5 +1,4 @@ - * error return tracing - handle `await` and function calls - * go over the commented out tests + * go over the commented out tests in cancel.zig * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function diff --git a/src/codegen.cpp b/src/codegen.cpp index 24933bd104..7a27585e45 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2300,9 +2300,8 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) { if (fn_is_async(g->cur_fn)) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - bool ret_type_has_bits = instruction->operand != nullptr && - type_has_bits(instruction->operand->value.type); - ZigType *ret_type = ret_type_has_bits ? instruction->operand->value.type : nullptr; + ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + bool ret_type_has_bits = type_has_bits(ret_type); if (ir_want_runtime_safety(g, &instruction->base)) { LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref); diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig index 59401fbe84..71af5586ed 100644 --- a/test/stage1/behavior.zig +++ b/test/stage1/behavior.zig @@ -39,10 +39,10 @@ comptime { _ = @import("behavior/bugs/828.zig"); _ = @import("behavior/bugs/920.zig"); _ = @import("behavior/byval_arg_var.zig"); - //_ = @import("behavior/cancel.zig"); + _ = @import("behavior/cancel.zig"); _ = @import("behavior/cast.zig"); _ = @import("behavior/const_slice_child.zig"); - //_ = @import("behavior/coroutine_await_struct.zig"); + _ = @import("behavior/coroutine_await_struct.zig"); _ = @import("behavior/coroutines.zig"); _ = @import("behavior/defer.zig"); _ = @import("behavior/enum.zig"); diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig index efc0df1aed..cb8a075279 100644 --- a/test/stage1/behavior/cancel.zig +++ b/test/stage1/behavior/cancel.zig @@ -1,86 +1,86 @@ const std = @import("std"); -var defer_f1: bool = false; -var defer_f2: bool = false; -var defer_f3: bool = false; - -test "cancel forwards" { - const p = async f1() catch unreachable; - cancel p; - std.testing.expect(defer_f1); - std.testing.expect(defer_f2); - std.testing.expect(defer_f3); -} - -async fn f1() void { - defer { - defer_f1 = true; - } - await (async f2() catch unreachable); -} - -async fn f2() void { - defer { - defer_f2 = true; - } - await (async f3() catch unreachable); -} - -async fn f3() void { - defer { - defer_f3 = true; - } - suspend; -} - -var defer_b1: bool = false; -var defer_b2: bool = false; -var defer_b3: bool = false; -var defer_b4: bool = false; - -test "cancel backwards" { - const p = async b1() catch unreachable; - cancel p; - std.testing.expect(defer_b1); - std.testing.expect(defer_b2); - std.testing.expect(defer_b3); - std.testing.expect(defer_b4); -} - -async fn b1() void { - defer { - defer_b1 = true; - } - await (async b2() catch unreachable); -} - -var b4_handle: promise = undefined; - -async fn b2() void { - const b3_handle = async b3() catch unreachable; - resume b4_handle; - cancel b4_handle; - defer { - defer_b2 = true; - } - const value = await b3_handle; - @panic("unreachable"); -} - -async fn b3() i32 { - defer { - defer_b3 = true; - } - await (async b4() catch unreachable); - return 1234; -} - -async fn b4() void { - defer { - defer_b4 = true; - } - suspend { - b4_handle = @handle(); - } - suspend; -} +//var defer_f1: bool = false; +//var defer_f2: bool = false; +//var defer_f3: bool = false; +// +//test "cancel forwards" { +// const p = async f1() catch unreachable; +// cancel p; +// std.testing.expect(defer_f1); +// std.testing.expect(defer_f2); +// std.testing.expect(defer_f3); +//} +// +//async fn f1() void { +// defer { +// defer_f1 = true; +// } +// await (async f2() catch unreachable); +//} +// +//async fn f2() void { +// defer { +// defer_f2 = true; +// } +// await (async f3() catch unreachable); +//} +// +//async fn f3() void { +// defer { +// defer_f3 = true; +// } +// suspend; +//} +// +//var defer_b1: bool = false; +//var defer_b2: bool = false; +//var defer_b3: bool = false; +//var defer_b4: bool = false; +// +//test "cancel backwards" { +// const p = async b1() catch unreachable; +// cancel p; +// std.testing.expect(defer_b1); +// std.testing.expect(defer_b2); +// std.testing.expect(defer_b3); +// std.testing.expect(defer_b4); +//} +// +//async fn b1() void { +// defer { +// defer_b1 = true; +// } +// await (async b2() catch unreachable); +//} +// +//var b4_handle: promise = undefined; +// +//async fn b2() void { +// const b3_handle = async b3() catch unreachable; +// resume b4_handle; +// cancel b4_handle; +// defer { +// defer_b2 = true; +// } +// const value = await b3_handle; +// @panic("unreachable"); +//} +// +//async fn b3() i32 { +// defer { +// defer_b3 = true; +// } +// await (async b4() catch unreachable); +// return 1234; +//} +// +//async fn b4() void { +// defer { +// defer_b4 = true; +// } +// suspend { +// b4_handle = @handle(); +// } +// suspend; +//} diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 7a8edd793c..d11f6831b3 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -474,3 +474,36 @@ test "suspension points inside branching control flow" { }; S.doTheTest(); } + +test "call async function which has struct return type" { + const S = struct { + var frame: anyframe = undefined; + + fn doTheTest() void { + _ = async atest(); + resume frame; + } + + fn atest() void { + const result = func(); + expect(result.x == 5); + expect(result.y == 6); + } + + const Point = struct { + x: usize, + y: usize, + }; + + fn func() Point { + suspend { + frame = @frame(); + } + return Point{ + .x = 5, + .y = 6, + }; + } + }; + S.doTheTest(); +} From 7e1fcb55b3e96524ce6f5620e2e98a3e3cc56608 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Aug 2019 00:52:56 -0400 Subject: [PATCH 064/125] implement cancel all behavior tests passing in this branch --- BRANCH_TODO | 2 +- src/all_types.hpp | 3 +- src/analyze.cpp | 3 + src/codegen.cpp | 109 ++++++++++++-------- src/ir.cpp | 55 ++++++++-- src/ir_print.cpp | 2 +- test/stage1/behavior/cancel.zig | 176 +++++++++++++++++--------------- 7 files changed, 216 insertions(+), 134 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 294bb42d55..ca3888f391 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,4 +1,4 @@ - * go over the commented out tests in cancel.zig + * clean up the bitcasting of awaiter fn ptr * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function diff --git a/src/all_types.hpp b/src/all_types.hpp index e1fff953b4..a7fb542ad3 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1556,6 +1556,7 @@ enum PanicMsgId { PanicMsgIdBadAwait, PanicMsgIdBadReturn, PanicMsgIdResumedAnAwaitingFn, + PanicMsgIdResumedACancelingFn, PanicMsgIdFrameTooSmall, PanicMsgIdResumedFnPendingAwait, @@ -3432,7 +3433,7 @@ struct IrInstructionErrorUnion { struct IrInstructionCancel { IrInstruction base; - IrInstruction *target; + IrInstruction *frame; }; struct IrInstructionAtomicRmw { diff --git a/src/analyze.cpp b/src/analyze.cpp index 764b28ed45..cf71bd90f3 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3811,6 +3811,9 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("await is a suspend point")); + } else if (fn->inferred_async_node->type == NodeTypeCancel) { + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("cancel is a suspend point")); } else { zig_unreachable(); } diff --git a/src/codegen.cpp b/src/codegen.cpp index 7a27585e45..2a6c5f8b8f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -911,11 +911,13 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { case PanicMsgIdBadResume: return buf_create_from_str("resumed an async function which already returned"); case PanicMsgIdBadAwait: - return buf_create_from_str("async function awaited twice"); + return buf_create_from_str("async function awaited/canceled twice"); case PanicMsgIdBadReturn: return buf_create_from_str("async function returned twice"); case PanicMsgIdResumedAnAwaitingFn: return buf_create_from_str("awaiting function resumed"); + case PanicMsgIdResumedACancelingFn: + return buf_create_from_str("canceling function resumed"); case PanicMsgIdFrameTooSmall: return buf_create_from_str("frame too small"); case PanicMsgIdResumedFnPendingAwait: @@ -2189,12 +2191,12 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume"); LLVMValueRef ok_bit; if (resume_id == ResumeIdAwaitEarlyReturn) { - LLVMValueRef last_value = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), - LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false), ""); + LLVMValueRef last_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false)); ok_bit = LLVMBuildICmp(g->builder, LLVMIntULT, LLVMGetParam(g->cur_fn_val, 1), last_value, ""); } else { - LLVMValueRef expected_value = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), - LLVMConstInt(usize_type_ref, resume_id, false), ""); + LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, resume_id, false)); ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, ""); } LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block); @@ -2210,11 +2212,13 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (fn_val == nullptr) { - if (g->anyframe_fn_type == nullptr) { - (void)get_llvm_type(g, get_any_frame_type(g, nullptr)); - } LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_fn_ptr_index, ""); - fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); + LLVMValueRef fn_val_typed = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); + LLVMValueRef as_int = LLVMBuildPtrToInt(g->builder, fn_val_typed, usize_type_ref, ""); + LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); + LLVMValueRef mask_val = LLVMConstNot(one); + LLVMValueRef as_int_masked = LLVMBuildAnd(g->builder, as_int, mask_val, ""); + fn_val = LLVMBuildIntToPtr(g->builder, as_int_masked, LLVMTypeOf(fn_val_typed), ""); } if (arg_val == nullptr) { arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), @@ -2226,6 +2230,17 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); } +static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint); + size_t new_block_index = g->cur_resume_block_count; + g->cur_resume_block_count += 1; + LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); + LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb); + LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); + return resume_bb; +} + static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, IrInstructionReturnBegin *instruction) { @@ -2245,12 +2260,7 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, } // Prepare to be suspended. We might end up not having to suspend though. - LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "ReturnResume"); - size_t new_block_index = g->cur_resume_block_count; - g->cur_resume_block_count += 1; - LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); - LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb); - LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); + LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "ReturnResume"); LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); @@ -2335,7 +2345,10 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns // We need to resume the caller by tail calling them. ZigType *any_frame_type = get_any_frame_type(g, ret_type); - LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, g->cur_async_prev_val, + LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); + LLVMValueRef mask_val = LLVMConstNot(one); + LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, g->cur_async_prev_val, mask_val, ""); + LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val, get_llvm_type(g, any_frame_type), ""); LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr); ZigLLVMSetTailCall(call_inst); @@ -3945,13 +3958,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } else if (callee_is_async) { ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true); - LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(g->cur_fn_val, "CallResume"); - size_t new_block_index = g->cur_resume_block_count; - g->cur_resume_block_count += 1; - LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); - LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, call_bb); - - LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); + LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume"); LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); ZigLLVMSetTailCall(call_inst); @@ -4672,10 +4679,6 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu return cur_err_ret_trace_val; } -static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) { - zig_panic("TODO cancel"); -} - static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) { switch (atomic_order) { case AtomicOrderUnordered: return LLVMAtomicOrderingUnordered; @@ -5416,13 +5419,7 @@ static LLVMValueRef ir_render_assert_non_null(CodeGen *g, IrExecutable *executab static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable, IrInstructionSuspendBegin *instruction) { - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - instruction->resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "SuspendResume"); - size_t new_block_index = g->cur_resume_block_count; - g->cur_resume_block_count += 1; - LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); - LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, instruction->resume_bb); - LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); + instruction->resume_bb = gen_suspend_begin(g, "SuspendResume"); return nullptr; } @@ -5436,6 +5433,43 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl return nullptr; } +static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef zero = LLVMConstNull(usize_type_ref); + LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); + LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); + + LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame); + LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "CancelResume"); + + LLVMValueRef awaiter_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); + LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, ""); + LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); + + LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_ored_val, + LLVMAtomicOrderingRelease, g->is_single_threaded); + + LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CancelSuspend"); + LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); + + LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_bb, 2); + LLVMAddCase(switch_instr, zero, complete_suspend_block); + LLVMAddCase(switch_instr, all_ones, early_return_block); + + LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block); + LLVMBuildRetVoid(g->builder); + + LLVMPositionBuilderAtEnd(g->builder, early_return_block); + LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val); + ZigLLVMSetTailCall(call_inst); + LLVMBuildRetVoid(g->builder); + + LLVMPositionBuilderAtEnd(g->builder, resume_bb); + gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedACancelingFn, nullptr); + + return nullptr; +} + static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMValueRef zero = LLVMConstNull(usize_type_ref); @@ -5444,12 +5478,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true); // Prepare to be suspended - LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitResume"); - size_t new_block_index = g->cur_resume_block_count; - g->cur_resume_block_count += 1; - LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false); - LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb); - LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr); + LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume"); // At this point resuming the function will do the correct thing. // This code is as if it is running inside the suspend block. diff --git a/src/ir.cpp b/src/ir.cpp index 7cb868cab2..853cf4daa1 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -3271,6 +3271,16 @@ static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstN return &instruction->base; } +static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) { + IrInstructionCancel *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.value.type = irb->codegen->builtin_types.entry_void; + instruction->frame = frame; + + ir_ref_instruction(frame, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame, ResultLoc *result_loc) { @@ -7820,11 +7830,26 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeCancel); - IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, scope); - if (target_inst == irb->codegen->invalid_instruction) + ZigFn *fn_entry = exec_fn_entry(irb->exec); + if (!fn_entry) { + add_node_error(irb->codegen, node, buf_sprintf("cancel outside function definition")); + return irb->codegen->invalid_instruction; + } + ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope); + if (existing_suspend_scope) { + if (!existing_suspend_scope->reported_err) { + ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot cancel inside suspend block")); + add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here")); + existing_suspend_scope->reported_err = true; + } + return irb->codegen->invalid_instruction; + } + + IrInstruction *operand = ir_gen_node(irb, node->data.cancel_expr.expr, scope); + if (operand == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - zig_panic("TODO ir_gen_cancel"); + return ir_build_cancel(irb, scope, node, operand); } static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -23781,10 +23806,6 @@ static IrInstruction *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstruct } } -static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { - zig_panic("TODO analyze cancel"); -} - static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op) { ZigType *operand_type = ir_resolve_type(ira, op); if (type_is_invalid(operand_type)) @@ -24474,6 +24495,26 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira, return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin); } +static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { + IrInstruction *frame = instruction->frame->child; + if (type_is_invalid(frame->value.type)) + return ira->codegen->invalid_instruction; + + ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr); + IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type); + if (type_is_invalid(casted_frame->value.type)) + return ira->codegen->invalid_instruction; + + ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); + ir_assert(fn_entry != nullptr, &instruction->base); + + if (fn_entry->inferred_async_node == nullptr) { + fn_entry->inferred_async_node = instruction->base.source_node; + } + + return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame); +} + static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) { IrInstruction *frame_ptr = instruction->frame->child; if (type_is_invalid(frame_ptr->value.type)) diff --git a/src/ir_print.cpp b/src/ir_print.cpp index c56a660e29..0348cfc986 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1396,7 +1396,7 @@ static void ir_print_error_union(IrPrint *irp, IrInstructionErrorUnion *instruct static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) { fprintf(irp->f, "cancel "); - ir_print_other_instruction(irp, instruction->target); + ir_print_other_instruction(irp, instruction->frame); } static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) { diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig index cb8a075279..c8636212b0 100644 --- a/test/stage1/behavior/cancel.zig +++ b/test/stage1/behavior/cancel.zig @@ -1,86 +1,94 @@ const std = @import("std"); +const expect = std.testing.expect; -//var defer_f1: bool = false; -//var defer_f2: bool = false; -//var defer_f3: bool = false; -// -//test "cancel forwards" { -// const p = async f1() catch unreachable; -// cancel p; -// std.testing.expect(defer_f1); -// std.testing.expect(defer_f2); -// std.testing.expect(defer_f3); -//} -// -//async fn f1() void { -// defer { -// defer_f1 = true; -// } -// await (async f2() catch unreachable); -//} -// -//async fn f2() void { -// defer { -// defer_f2 = true; -// } -// await (async f3() catch unreachable); -//} -// -//async fn f3() void { -// defer { -// defer_f3 = true; -// } -// suspend; -//} -// -//var defer_b1: bool = false; -//var defer_b2: bool = false; -//var defer_b3: bool = false; -//var defer_b4: bool = false; -// -//test "cancel backwards" { -// const p = async b1() catch unreachable; -// cancel p; -// std.testing.expect(defer_b1); -// std.testing.expect(defer_b2); -// std.testing.expect(defer_b3); -// std.testing.expect(defer_b4); -//} -// -//async fn b1() void { -// defer { -// defer_b1 = true; -// } -// await (async b2() catch unreachable); -//} -// -//var b4_handle: promise = undefined; -// -//async fn b2() void { -// const b3_handle = async b3() catch unreachable; -// resume b4_handle; -// cancel b4_handle; -// defer { -// defer_b2 = true; -// } -// const value = await b3_handle; -// @panic("unreachable"); -//} -// -//async fn b3() i32 { -// defer { -// defer_b3 = true; -// } -// await (async b4() catch unreachable); -// return 1234; -//} -// -//async fn b4() void { -// defer { -// defer_b4 = true; -// } -// suspend { -// b4_handle = @handle(); -// } -// suspend; -//} +var defer_f1: bool = false; +var defer_f2: bool = false; +var defer_f3: bool = false; +var f3_frame: anyframe = undefined; + +test "cancel forwards" { + _ = async atest1(); + resume f3_frame; +} + +fn atest1() void { + const p = async f1(); + cancel &p; + expect(defer_f1); + expect(defer_f2); + expect(defer_f3); +} + +async fn f1() void { + defer { + defer_f1 = true; + } + var f2_frame = async f2(); + await f2_frame; +} + +async fn f2() void { + defer { + defer_f2 = true; + } + f3(); +} + +async fn f3() void { + f3_frame = @frame(); + defer { + defer_f3 = true; + } + suspend; +} + +var defer_b1: bool = false; +var defer_b2: bool = false; +var defer_b3: bool = false; +var defer_b4: bool = false; + +test "cancel backwards" { + _ = async b1(); + resume b4_handle; + expect(defer_b1); + expect(defer_b2); + expect(defer_b3); + expect(defer_b4); +} + +async fn b1() void { + defer { + defer_b1 = true; + } + b2(); +} + +var b4_handle: anyframe = undefined; + +async fn b2() void { + const b3_handle = async b3(); + resume b4_handle; + defer { + defer_b2 = true; + } + const value = await b3_handle; + expect(value == 1234); +} + +async fn b3() i32 { + defer { + defer_b3 = true; + } + b4(); + return 1234; +} + +async fn b4() void { + defer { + defer_b4 = true; + } + suspend { + b4_handle = @frame(); + } + suspend; +} From f587fa1cd73c3c0382e1bd2da2e24a7473421a2c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Aug 2019 10:56:19 -0400 Subject: [PATCH 065/125] clean up the bitcasting of awaiter fn ptr --- BRANCH_TODO | 1 - src/codegen.cpp | 7 +------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index ca3888f391..bf7fc98310 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,4 +1,3 @@ - * clean up the bitcasting of awaiter fn ptr * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function diff --git a/src/codegen.cpp b/src/codegen.cpp index 2a6c5f8b8f..86bd48c894 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2213,12 +2213,7 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (fn_val == nullptr) { LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_fn_ptr_index, ""); - LLVMValueRef fn_val_typed = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); - LLVMValueRef as_int = LLVMBuildPtrToInt(g->builder, fn_val_typed, usize_type_ref, ""); - LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); - LLVMValueRef mask_val = LLVMConstNot(one); - LLVMValueRef as_int_masked = LLVMBuildAnd(g->builder, as_int, mask_val, ""); - fn_val = LLVMBuildIntToPtr(g->builder, as_int_masked, LLVMTypeOf(fn_val_typed), ""); + fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); } if (arg_val == nullptr) { arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), From e11cafbd4f11fa5eae0cbdf03854291834b4cd77 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Aug 2019 10:56:37 -0400 Subject: [PATCH 066/125] cancel works on non-pointers --- src/ir.cpp | 16 +++++++++++++--- test/stage1/behavior/cancel.zig | 16 ++++++++++++++++ 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 853cf4daa1..76e8c91f39 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -7845,7 +7845,7 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) return irb->codegen->invalid_instruction; } - IrInstruction *operand = ir_gen_node(irb, node->data.cancel_expr.expr, scope); + IrInstruction *operand = ir_gen_node_extra(irb, node->data.cancel_expr.expr, scope, LValPtr, nullptr); if (operand == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -24496,10 +24496,20 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira, } static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { - IrInstruction *frame = instruction->frame->child; - if (type_is_invalid(frame->value.type)) + IrInstruction *frame_ptr = instruction->frame->child; + if (type_is_invalid(frame_ptr->value.type)) return ira->codegen->invalid_instruction; + IrInstruction *frame; + if (frame_ptr->value.type->id == ZigTypeIdPointer && + frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle && + frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame) + { + frame = frame_ptr; + } else { + frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr); + } + ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr); IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type); if (type_is_invalid(casted_frame->value.type)) diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig index c8636212b0..b546857ae3 100644 --- a/test/stage1/behavior/cancel.zig +++ b/test/stage1/behavior/cancel.zig @@ -92,3 +92,19 @@ async fn b4() void { } suspend; } + +test "cancel on a non-pointer" { + const S = struct { + fn doTheTest() void { + _ = async atest(); + } + fn atest() void { + var f = async func(); + cancel f; + } + fn func() void { + suspend; + } + }; + S.doTheTest(); +} From 8fcf21fefce56695820b5ec31161589822df8762 Mon Sep 17 00:00:00 2001 From: Ryan Saunderson Date: Wed, 7 Aug 2019 12:51:30 -0500 Subject: [PATCH 067/125] modify header precedence for zig cc, resolves intrinsics issues (#3027) --- src/codegen.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index ef24716dff..881b83c169 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -8292,15 +8292,18 @@ void add_cc_args(CodeGen *g, ZigList &args, const char *out_dep_pa } } + //note(dimenus): appending libc headers before c_headers breaks intrinsics + //and other compiler specific items + // According to Rich Felker libc headers are supposed to go before C language headers. + args.append("-isystem"); + args.append(buf_ptr(g->zig_c_headers_dir)); + for (size_t i = 0; i < g->libc_include_dir_len; i += 1) { Buf *include_dir = g->libc_include_dir_list[i]; args.append("-isystem"); args.append(buf_ptr(include_dir)); } - // According to Rich Felker libc headers are supposed to go before C language headers. - args.append("-isystem"); - args.append(buf_ptr(g->zig_c_headers_dir)); if (g->zig_target->is_native) { args.append("-march=native"); From 2cd5e555818583e77e5601d43d55339e8c4017b0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Aug 2019 16:27:58 -0400 Subject: [PATCH 068/125] std.math.min: return a more restrictive type sometimes --- std/math.zig | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 59 insertions(+), 2 deletions(-) diff --git a/std/math.zig b/std/math.zig index ac06a07953..2745fe6508 100644 --- a/std/math.zig +++ b/std/math.zig @@ -242,12 +242,69 @@ pub fn floatExponentBits(comptime T: type) comptime_int { }; } -pub fn min(x: var, y: var) @typeOf(x + y) { - return if (x < y) x else y; +/// Given two types, returns the smallest one which is capable of holding the +/// full range of the minimum value. +pub fn Min(comptime A: type, comptime B: type) type { + return switch (@typeInfo(A)) { + .Int => |a_info| switch (@typeInfo(B)) { + .Int => |b_info| blk: { + if (a_info.is_signed == b_info.is_signed) { + break :blk if (a_info.bits < b_info.bits) A else B; + } else if (a_info.is_signed) { + break :blk A; + } else { + break :blk B; + } + }, + .ComptimeInt => A, + else => @compileError("unsupported type: " ++ @typeName(B)), + }, + .Float => |a_info| if (a_info.bits < @typeInfo(B).Float.bits) A else B, + .ComptimeInt => B, + .ComptimeFloat => B, + else => @compileError("unsupported type: " ++ @typeName(A)), + }; +} + +/// Returns the smaller number. When one of the parameter's type's full range fits in the other, +/// the return type is the smaller type. +pub fn min(x: var, y: var) Min(@typeOf(x), @typeOf(y)) { + const Result = Min(@typeOf(x), @typeOf(y)); + if (x < y) { + // TODO Zig should allow this as an implicit cast because x is immutable and in this + // scope it is known to fit in the return type. + switch (@typeInfo(Result)) { + .Int => return @intCast(Result, x), + .Float => return @floatCast(Result, x), + else => return x, + } + } else { + // TODO Zig should allow this as an implicit cast because y is immutable and in this + // scope it is known to fit in the return type. + switch (@typeInfo(Result)) { + .Int => return @intCast(Result, y), + .Float => return @floatCast(Result, y), + else => return y, + } + } } test "math.min" { testing.expect(min(i32(-1), i32(2)) == -1); + { + var a: u16 = 999; + var b: u32 = 10; + var result = min(a, b); + testing.expect(@typeOf(result) == u16); + testing.expect(result == 10); + } + { + var a: f64 = 10.34; + var b: f32 = 999.12; + var result = min(a, b); + testing.expect(@typeOf(result) == f32); + testing.expect(result == 10.34); + } } pub fn max(x: var, y: var) @typeOf(x + y) { From 8621e3b5bd814005129b58469d93c2499e3d085e Mon Sep 17 00:00:00 2001 From: Sam Tebbs Date: Wed, 7 Aug 2019 11:51:12 +0100 Subject: [PATCH 069/125] Don't emit clang error if source or filename pointer is null --- src/ir.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 65a21a418d..2b096a3383 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -21312,12 +21312,15 @@ static IrInstruction *ir_analyze_instruction_c_import(IrAnalyze *ira, IrInstruct } for (size_t i = 0; i < errors_len; i += 1) { Stage2ErrorMsg *clang_err = &errors_ptr[i]; - ErrorMsg *err_msg = err_msg_create_with_offset( - clang_err->filename_ptr ? - buf_create_from_mem(clang_err->filename_ptr, clang_err->filename_len) : buf_alloc(), - clang_err->line, clang_err->column, clang_err->offset, clang_err->source, - buf_create_from_mem(clang_err->msg_ptr, clang_err->msg_len)); - err_msg_add_note(parent_err_msg, err_msg); + // Clang can emit "too many errors, stopping now", in which case `source` and `filename_ptr` are null + if (clang_err->source && clang_err->filename_ptr) { + ErrorMsg *err_msg = err_msg_create_with_offset( + clang_err->filename_ptr ? + buf_create_from_mem(clang_err->filename_ptr, clang_err->filename_len) : buf_alloc(), + clang_err->line, clang_err->column, clang_err->offset, clang_err->source, + buf_create_from_mem(clang_err->msg_ptr, clang_err->msg_len)); + err_msg_add_note(parent_err_msg, err_msg); + } } return ira->codegen->invalid_instruction; From f2bef0447a523e6f21ab21e4845aa982a1332572 Mon Sep 17 00:00:00 2001 From: Euan Torano Date: Wed, 7 Aug 2019 22:19:25 +0100 Subject: [PATCH 070/125] Remove unneeded casts. --- std/os/windows.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/std/os/windows.zig b/std/os/windows.zig index a8f5b54644..4c6bfe70d5 100644 --- a/std/os/windows.zig +++ b/std/os/windows.zig @@ -140,16 +140,16 @@ pub const RtlGenRandomError = error{Unexpected}; pub fn RtlGenRandom(output: []u8) RtlGenRandomError!void { var total_read: usize = 0; var buff: []u8 = output[0..]; - const max_read_size: ULONG = ULONG(maxInt(ULONG)); + const max_read_size: ULONG = maxInt(ULONG); while (total_read < output.len) { - const to_read: ULONG = @intCast(ULONG, math.min(buff.len, max_read_size)); + const to_read: ULONG = math.min(buff.len, max_read_size); if (advapi32.RtlGenRandom(buff.ptr, to_read) == 0) { return unexpectedError(kernel32.GetLastError()); } - total_read += @intCast(usize, to_read); + total_read += to_read; buff = buff[to_read..]; } } From 34bfdf193aee4cb4fc931c6cc4ee82ef0a3a506f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 11:37:49 -0400 Subject: [PATCH 071/125] cancel, defer, errdefer all working as intended now --- BRANCH_TODO | 3 +- src/all_types.hpp | 7 ++ src/codegen.cpp | 14 +++ src/ir.cpp | 163 +++++++++++++++++++++------- src/ir_print.cpp | 8 ++ test/stage1/behavior/cancel.zig | 9 +- test/stage1/behavior/coroutines.zig | 107 ++++++++++++++---- 7 files changed, 246 insertions(+), 65 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index bf7fc98310..d7f6b31dd5 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -2,8 +2,7 @@ * compile error for error: expected anyframe->T, found 'i32' * await of a non async function * async call on a non async function - * cancel - * defer and errdefer + * a test where an async function destroys its own frame in a defer * implicit cast of normal function to async function should be allowed when it is inferred to be async * revive std.event.Loop * @typeInfo for @Frame(func) diff --git a/src/all_types.hpp b/src/all_types.hpp index a7fb542ad3..e6daa1c726 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -2363,6 +2363,7 @@ enum IrInstructionId { IrInstructionIdAwaitSrc, IrInstructionIdAwaitGen, IrInstructionIdCoroResume, + IrInstructionIdTestCancelRequested, }; struct IrInstruction { @@ -3636,6 +3637,12 @@ struct IrInstructionCoroResume { IrInstruction *frame; }; +struct IrInstructionTestCancelRequested { + IrInstruction base; + + bool use_return_begin_prev_value; +}; + enum ResultLocId { ResultLocIdInvalid, ResultLocIdNone, diff --git a/src/codegen.cpp b/src/codegen.cpp index 86bd48c894..00458c7665 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5557,6 +5557,18 @@ static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, return gen_frame_size(g, fn_val); } +static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *executable, + IrInstructionTestCancelRequested *instruction) +{ + if (!fn_is_async(g->cur_fn)) + return LLVMConstInt(LLVMInt1Type(), 0, false); + if (instruction->use_return_begin_prev_value) { + return LLVMBuildTrunc(g->builder, g->cur_async_prev_val, LLVMInt1Type(), ""); + } else { + zig_panic("TODO"); + } +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -5810,6 +5822,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction); case IrInstructionIdAwaitGen: return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction); + case IrInstructionIdTestCancelRequested: + return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction); } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index 76e8c91f39..b2389d1501 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -26,6 +26,7 @@ struct IrBuilder { CodeGen *codegen; IrExecutable *exec; IrBasicBlock *current_basic_block; + AstNode *main_block_node; }; struct IrAnalyze { @@ -1061,6 +1062,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) { return IrInstructionIdCoroResume; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelRequested *) { + return IrInstructionIdTestCancelRequested; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -3320,6 +3325,16 @@ static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node, + bool use_return_begin_prev_value) +{ + IrInstructionTestCancelRequested *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.value.type = irb->codegen->builtin_types.entry_bool; + instruction->use_return_begin_prev_value = use_return_begin_prev_value; + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -3494,45 +3509,62 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, size_t defer_counts[2]; ir_count_defers(irb, scope, outer_scope, defer_counts); bool have_err_defers = defer_counts[ReturnKindError] > 0; - if (have_err_defers || irb->codegen->have_err_ret_tracing) { - IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr"); - IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk"); - - IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true); - - bool should_inline = ir_should_inline(irb->exec, scope); - IrInstruction *is_comptime; - if (should_inline) { - is_comptime = ir_build_const_bool(irb, scope, node, true); - } else { - is_comptime = ir_build_test_comptime(irb, scope, node, is_err); - } - - ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime)); - IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt"); - - ir_set_cursor_at_end_and_append_block(irb, err_block); - if (irb->codegen->have_err_ret_tracing && !should_inline) { - ir_build_save_err_ret_addr(irb, scope, node); - } - ir_gen_defers_for_block(irb, scope, outer_scope, true); - ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, ok_block); - ir_gen_defers_for_block(irb, scope, outer_scope, false); - ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); - IrInstruction *result = ir_build_return(irb, scope, node, return_value); - result_loc_ret->base.source_instruction = result; - return result; - } else { - // generate unconditional defers + if (!have_err_defers && !irb->codegen->have_err_ret_tracing) { + // only generate unconditional defers ir_gen_defers_for_block(irb, scope, outer_scope, false); IrInstruction *result = ir_build_return(irb, scope, node, return_value); result_loc_ret->base.source_instruction = result; return result; } + bool should_inline = ir_should_inline(irb->exec, scope); + bool need_test_cancel = !should_inline && have_err_defers; + + IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr"); + IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, scope, "Defers"); + IrBasicBlock *ok_block = need_test_cancel ? + ir_create_basic_block(irb, scope, "ErrRetOk") : normal_defers_block; + IrBasicBlock *all_defers_block = have_err_defers ? ir_create_basic_block(irb, scope, "ErrDefers") : normal_defers_block; + + IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true); + + IrInstruction *force_comptime = ir_build_const_bool(irb, scope, node, should_inline); + IrInstruction *err_is_comptime; + if (should_inline) { + err_is_comptime = force_comptime; + } else { + err_is_comptime = ir_build_test_comptime(irb, scope, node, is_err); + } + + ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, err_is_comptime)); + IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt"); + + ir_set_cursor_at_end_and_append_block(irb, err_block); + if (irb->codegen->have_err_ret_tracing && !should_inline) { + ir_build_save_err_ret_addr(irb, scope, node); + } + ir_build_br(irb, scope, node, all_defers_block, err_is_comptime); + + if (need_test_cancel) { + ir_set_cursor_at_end_and_append_block(irb, ok_block); + IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node, true); + ir_mark_gen(ir_build_cond_br(irb, scope, node, is_canceled, + all_defers_block, normal_defers_block, force_comptime)); + } + + if (all_defers_block != normal_defers_block) { + ir_set_cursor_at_end_and_append_block(irb, all_defers_block); + ir_gen_defers_for_block(irb, scope, outer_scope, true); + ir_build_br(irb, scope, node, ret_stmt_block, force_comptime); + } + + ir_set_cursor_at_end_and_append_block(irb, normal_defers_block); + ir_gen_defers_for_block(irb, scope, outer_scope, false); + ir_build_br(irb, scope, node, ret_stmt_block, force_comptime); + + ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); + IrInstruction *result = ir_build_return(irb, scope, node, return_value); + result_loc_ret->base.source_instruction = result; + return result; } case ReturnKindError: { @@ -3765,18 +3797,59 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode incoming_values.append(else_expr_result); } - if (block_node->data.block.name != nullptr) { + bool is_return_from_fn = block_node == irb->main_block_node; + if (!is_return_from_fn) { ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); + } + + IrInstruction *result; + if (block_node->data.block.name != nullptr) { ir_mark_gen(ir_build_br(irb, parent_scope, block_node, scope_block->end_block, scope_block->is_comptime)); ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block); IrInstruction *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length, incoming_blocks.items, incoming_values.items, scope_block->peer_parent); - return ir_expr_wrap(irb, parent_scope, phi, result_loc); + result = ir_expr_wrap(irb, parent_scope, phi, result_loc); } else { - ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); IrInstruction *void_inst = ir_mark_gen(ir_build_const_void(irb, child_scope, block_node)); - return ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc); + result = ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc); } + if (!is_return_from_fn) + return result; + + // no need for save_err_ret_addr because this cannot return error + // but if it is a canceled async function we do need to run the errdefers + + ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result)); + result = ir_mark_gen(ir_build_return_begin(irb, child_scope, block_node, result)); + + size_t defer_counts[2]; + ir_count_defers(irb, child_scope, outer_block_scope, defer_counts); + bool have_err_defers = defer_counts[ReturnKindError] > 0; + if (!have_err_defers) { + // only generate unconditional defers + ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); + return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result)); + } + IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node, true); + IrBasicBlock *all_defers_block = ir_create_basic_block(irb, child_scope, "ErrDefers"); + IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, child_scope, "Defers"); + IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, child_scope, "RetStmt"); + bool should_inline = ir_should_inline(irb->exec, child_scope); + IrInstruction *errdefers_is_comptime = ir_build_const_bool(irb, child_scope, block_node, + should_inline || !have_err_defers); + ir_mark_gen(ir_build_cond_br(irb, child_scope, block_node, is_canceled, + all_defers_block, normal_defers_block, errdefers_is_comptime)); + + ir_set_cursor_at_end_and_append_block(irb, all_defers_block); + ir_gen_defers_for_block(irb, child_scope, outer_block_scope, true); + ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, normal_defers_block); + ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); + ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); + return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result)); } static IrInstruction *ir_gen_bin_op_id(IrBuilder *irb, Scope *scope, AstNode *node, IrBinOp op_id) { @@ -8111,6 +8184,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec irb->codegen = codegen; irb->exec = ir_executable; + irb->main_block_node = node; IrBasicBlock *entry_block = ir_create_basic_block(irb, scope, "Entry"); ir_set_cursor_at_end_and_append_block(irb, entry_block); @@ -24603,6 +24677,16 @@ static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstr return ir_build_coro_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame); } +static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ira, + IrInstructionTestCancelRequested *instruction) +{ + if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) { + return ir_const_bool(ira, &instruction->base, false); + } + return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node, + instruction->use_return_begin_prev_value); +} + static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: @@ -24900,6 +24984,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); case IrInstructionIdAwaitSrc: return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction); + case IrInstructionIdTestCancelRequested: + return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction); } zig_unreachable(); } @@ -25134,6 +25220,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdHasDecl: case IrInstructionIdAllocaSrc: case IrInstructionIdAllocaGen: + case IrInstructionIdTestCancelRequested: return false; case IrInstructionIdAsm: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 0348cfc986..8b8445f625 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1550,6 +1550,11 @@ static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction) fprintf(irp->f, ")"); } +static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancelRequested *instruction) { + const char *arg = instruction->use_return_begin_prev_value ? "UseReturnBeginPrevValue" : "AdditionalCheck"; + fprintf(irp->f, "@testCancelRequested(%s)", arg); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -2032,6 +2037,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdAwaitGen: ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction); break; + case IrInstructionIdTestCancelRequested: + ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig index b546857ae3..5dedb20159 100644 --- a/test/stage1/behavior/cancel.zig +++ b/test/stage1/behavior/cancel.zig @@ -48,8 +48,9 @@ var defer_b3: bool = false; var defer_b4: bool = false; test "cancel backwards" { - _ = async b1(); + var b1_frame = async b1(); resume b4_handle; + _ = async awaitAFrame(&b1_frame); expect(defer_b1); expect(defer_b2); expect(defer_b3); @@ -63,7 +64,7 @@ async fn b1() void { b2(); } -var b4_handle: anyframe = undefined; +var b4_handle: anyframe->void = undefined; async fn b2() void { const b3_handle = async b3(); @@ -93,6 +94,10 @@ async fn b4() void { suspend; } +fn awaitAFrame(f: anyframe->void) void { + await f; +} + test "cancel on a non-pointer" { const S = struct { fn doTheTest() void { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index d11f6831b3..2fd5912aac 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -134,29 +134,44 @@ test "@frameSize" { } test "coroutine suspend, resume" { - seq('a'); - const p = async testAsyncSeq(); - seq('c'); - resume p; - seq('f'); - // `cancel` is now a suspend point so it cannot be done here - seq('g'); + const S = struct { + var frame: anyframe = undefined; - expect(std.mem.eql(u8, points, "abcdefg")); -} -async fn testAsyncSeq() void { - defer seq('e'); + fn doTheTest() void { + _ = async amain(); + seq('d'); + resume frame; + seq('h'); - seq('b'); - suspend; - seq('d'); -} -var points = [_]u8{0} ** "abcdefg".len; -var index: usize = 0; + expect(std.mem.eql(u8, points, "abcdefgh")); + } -fn seq(c: u8) void { - points[index] = c; - index += 1; + fn amain() void { + seq('a'); + var f = async testAsyncSeq(); + seq('c'); + cancel f; + seq('g'); + } + + fn testAsyncSeq() void { + defer seq('f'); + + seq('b'); + suspend { + frame = @frame(); + } + seq('e'); + } + var points = [_]u8{'x'} ** "abcdefgh".len; + var index: usize = 0; + + fn seq(c: u8) void { + points[index] = c; + index += 1; + } + }; + S.doTheTest(); } test "coroutine suspend with block" { @@ -267,12 +282,19 @@ test "async fn pointer in a struct field" { }; var foo = Foo{ .bar = simpleAsyncFn2 }; var bytes: [64]u8 = undefined; - const p = @asyncCall(&bytes, {}, foo.bar, &data); - comptime expect(@typeOf(p) == anyframe->void); + const f = @asyncCall(&bytes, {}, foo.bar, &data); + comptime expect(@typeOf(f) == anyframe->void); expect(data == 2); - resume p; + resume f; + expect(data == 2); + _ = async doTheAwait(f); expect(data == 4); } + +fn doTheAwait(f: anyframe->void) void { + await f; +} + async fn simpleAsyncFn2(y: *i32) void { defer y.* += 2; y.* += 1; @@ -507,3 +529,42 @@ test "call async function which has struct return type" { }; S.doTheTest(); } + +test "errdefers in scope get run when canceling async fn call" { + const S = struct { + var frame: anyframe = undefined; + var x: u32 = 0; + + fn doTheTest() void { + x = 9; + _ = async cancelIt(); + resume frame; + expect(x == 6); + + x = 9; + _ = async awaitIt(); + resume frame; + expect(x == 11); + } + + fn cancelIt() void { + var f = async func(); + cancel f; + } + + fn awaitIt() void { + var f = async func(); + await f; + } + + fn func() void { + defer x += 1; + errdefer x /= 2; + defer x += 1; + suspend { + frame = @frame(); + } + } + }; + S.doTheTest(); +} From d813805f775b74416337ce8f1bd7b2e789caf185 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 12:02:56 -0400 Subject: [PATCH 072/125] more debuggable safety for awaiting twice --- src/codegen.cpp | 10 +++++----- test/runtime_safety.zig | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 00458c7665..3c1b7543dc 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2259,7 +2259,7 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); - LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, g->cur_async_awaiter_ptr, + LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXor, g->cur_async_awaiter_ptr, all_ones, LLVMAtomicOrderingAcquire, g->is_single_threaded); LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn"); @@ -2346,7 +2346,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val, get_llvm_type(g, any_frame_type), ""); LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr); - ZigLLVMSetTailCall(call_inst); + LLVMSetTailCall(call_inst, true); LLVMBuildRetVoid(g->builder); return nullptr; @@ -3956,7 +3956,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume"); LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); - ZigLLVMSetTailCall(call_inst); + LLVMSetTailCall(call_inst, true); LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, call_bb); @@ -5456,7 +5456,7 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns LLVMPositionBuilderAtEnd(g->builder, early_return_block); LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val); - ZigLLVMSetTailCall(call_inst); + LLVMSetTailCall(call_inst, true); LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, resume_bb); @@ -5524,7 +5524,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // Tail resume it now, so that it can complete. LLVMPositionBuilderAtEnd(g->builder, early_return_block); LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val); - ZigLLVMSetTailCall(call_inst); + LLVMSetTailCall(call_inst, true); LLVMBuildRetVoid(g->builder); // Rely on the target to resume us from suspension. diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig index 786c516298..0fb593c0e2 100644 --- a/test/runtime_safety.zig +++ b/test/runtime_safety.zig @@ -1,6 +1,30 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompareOutputContext) void { + cases.addRuntimeSafety("awaiting twice", + \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { + \\ @import("std").os.exit(126); + \\} + \\var frame: anyframe = undefined; + \\ + \\pub fn main() void { + \\ _ = async amain(); + \\ resume frame; + \\} + \\ + \\fn amain() void { + \\ var f = async func(); + \\ await f; + \\ await f; + \\} + \\ + \\fn func() void { + \\ suspend { + \\ frame = @frame(); + \\ } + \\} + ); + cases.addRuntimeSafety("@asyncCall with too small a frame", \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { \\ @import("std").os.exit(126); From 8d4cb852856631543aa961a6149b9a22021a1573 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 12:30:22 -0400 Subject: [PATCH 073/125] async functions in single threaded mode do not use atomic ops --- BRANCH_TODO | 2 -- src/codegen.cpp | 34 +++++++++++++++++++++++++++++----- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index d7f6b31dd5..c80c1e92e4 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -15,7 +15,6 @@ * compile error for copying a frame * compile error for resuming a const frame pointer * runtime safety enabling/disabling scope has to be coordinated across resume/await/calls/return - * await in single-threaded mode * calling a generic function which is async * make sure `await @asyncCall` and `await async` are handled correctly. * allow @asyncCall with a real @Frame(func) (the point of this is result pointer) @@ -28,7 +27,6 @@ - suspend - resume - anyframe, anyframe->T - * safety for double await * call graph analysis to have fewer stack trace frames * grep for "coroutine" and "coro" and replace all that nomenclature with "async functions" * when there are multiple calls to async functions in a function, reuse the same frame buffer, so that the diff --git a/src/codegen.cpp b/src/codegen.cpp index 3c1b7543dc..4ef81de4a0 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2236,6 +2236,29 @@ static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) { return resume_bb; } +static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMValueRef ptr, LLVMValueRef val, + LLVMAtomicOrdering order) +{ + if (g->is_single_threaded) { + LLVMValueRef loaded = LLVMBuildLoad(g->builder, ptr, ""); + LLVMValueRef modified; + switch (op) { + case LLVMAtomicRMWBinOpXchg: + modified = val; + break; + case LLVMAtomicRMWBinOpXor: + modified = LLVMBuildXor(g->builder, loaded, val, ""); + break; + default: + zig_unreachable(); + } + LLVMBuildStore(g->builder, modified, ptr); + return loaded; + } else { + return LLVMBuildAtomicRMW(g->builder, op, ptr, val, order, false); + } +} + static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, IrInstructionReturnBegin *instruction) { @@ -2259,8 +2282,8 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); - LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXor, g->cur_async_awaiter_ptr, - all_ones, LLVMAtomicOrderingAcquire, g->is_single_threaded); + LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXor, g->cur_async_awaiter_ptr, + all_ones, LLVMAtomicOrderingAcquire); LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn"); LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); @@ -5278,7 +5301,8 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable, LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr, LLVMPointerType(g->builtin_types.entry_usize->llvm_type, 0), ""); LLVMValueRef casted_operand = LLVMBuildPtrToInt(g->builder, operand, g->builtin_types.entry_usize->llvm_type, ""); - LLVMValueRef uncasted_result = LLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering, false); + LLVMValueRef uncasted_result = LLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering, + g->is_single_threaded); return LLVMBuildIntToPtr(g->builder, uncasted_result, get_llvm_type(g, operand_type), ""); } @@ -5441,8 +5465,8 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, ""); LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); - LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_ored_val, - LLVMAtomicOrderingRelease, g->is_single_threaded); + LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_ored_val, + LLVMAtomicOrderingRelease); LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CancelSuspend"); LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); From 8be95af48011f71f1902dff4ffbf5ea95cf1bcf4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 13:07:05 -0400 Subject: [PATCH 074/125] add compile error for unable to determine async fn frame --- BRANCH_TODO | 5 +++++ src/analyze.cpp | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/BRANCH_TODO b/BRANCH_TODO index c80c1e92e4..bd797a75a8 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -31,3 +31,8 @@ * grep for "coroutine" and "coro" and replace all that nomenclature with "async functions" * when there are multiple calls to async functions in a function, reuse the same frame buffer, so that the needed bytes is equal to the largest callee's frame + * if an async function is never called with async then a few optimizations can be made: + - the return does not need to be atomic + - it can be assumed that these are always available: the awaiter ptr, return ptr if applicable, + error return trace ptr if applicable. + - it can be assumed that it is never cancelled diff --git a/src/analyze.cpp b/src/analyze.cpp index cf71bd90f3..aa5c3c88f7 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5198,6 +5198,13 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (callee->body_node == nullptr) { continue; } + if (callee->anal_state == FnAnalStateProbing) { + ErrorMsg *msg = add_node_error(g, fn->proto_node, + buf_sprintf("unable to determine async function frame of '%s'", buf_ptr(&fn->symbol_name))); + add_error_note(g, msg, call->base.source_node, + buf_sprintf("analysis of function '%s' depends on the frame", buf_ptr(&callee->symbol_name))); + return ErrorSemanticAnalyzeFail; + } analyze_fn_body(g, callee); if (callee->anal_state == FnAnalStateInvalid) { From bfa1d12fbad2031402fbafe51c3a0c481fe69351 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 13:44:57 -0400 Subject: [PATCH 075/125] better compile errors when frame depends on itself --- src/analyze.cpp | 11 ++++++++--- src/ir.cpp | 12 +++++++++--- src/ir.hpp | 2 ++ test/compile_errors.zig | 30 ++++++++++++++++++++++++++++++ 4 files changed, 49 insertions(+), 6 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index aa5c3c88f7..cc90573f41 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5179,11 +5179,14 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (fn->anal_state == FnAnalStateInvalid) return ErrorSemanticAnalyzeFail; break; - case FnAnalStateProbing: - add_node_error(g, fn->proto_node, + case FnAnalStateProbing: { + ErrorMsg *msg = add_node_error(g, fn->proto_node, buf_sprintf("cannot resolve '%s': function not fully analyzed yet", buf_ptr(&frame_type->name))); + ir_add_analysis_trace(fn->ir_executable.analysis, msg, + buf_sprintf("depends on its own frame here")); return ErrorSemanticAnalyzeFail; + } } ZigType *fn_type = get_async_fn_type(g, fn->type_entry); @@ -5201,8 +5204,10 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (callee->anal_state == FnAnalStateProbing) { ErrorMsg *msg = add_node_error(g, fn->proto_node, buf_sprintf("unable to determine async function frame of '%s'", buf_ptr(&fn->symbol_name))); - add_error_note(g, msg, call->base.source_node, + ErrorMsg *note = add_error_note(g, msg, call->base.source_node, buf_sprintf("analysis of function '%s' depends on the frame", buf_ptr(&callee->symbol_name))); + ir_add_analysis_trace(callee->ir_executable.analysis, note, + buf_sprintf("depends on the frame here")); return ErrorSemanticAnalyzeFail; } diff --git a/src/ir.cpp b/src/ir.cpp index b2389d1501..f92434bb33 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -8217,18 +8217,24 @@ bool ir_gen_fn(CodeGen *codegen, ZigFn *fn_entry) { return ir_gen(codegen, body_node, fn_entry->child_scope, ir_executable); } -static void add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) { +static void ir_add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) { if (!exec || !exec->source_node || limit < 0) return; add_error_note(codegen, err_msg, exec->source_node, buf_sprintf("called from here")); - add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1); + ir_add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1); +} + +void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text) { + IrInstruction *old_instruction = ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index); + add_error_note(ira->codegen, err_msg, old_instruction->source_node, text); + ir_add_call_stack_errors(ira->codegen, ira->new_irb.exec, err_msg, 10); } static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg) { invalidate_exec(exec); ErrorMsg *err_msg = add_node_error(codegen, source_node, msg); if (exec->parent_exec) { - add_call_stack_errors(codegen, exec, err_msg, 10); + ir_add_call_stack_errors(codegen, exec, err_msg, 10); } return err_msg; } diff --git a/src/ir.hpp b/src/ir.hpp index 597624e2e6..3761c5a97d 100644 --- a/src/ir.hpp +++ b/src/ir.hpp @@ -28,4 +28,6 @@ ConstExprValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ConstExprVal AstNode *source_node); const char *float_op_to_name(BuiltinFnId op, bool llvm_name); +void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text); + #endif diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 2941cadcf5..810e40b18b 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,36 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "async function indirectly depends on its own frame", + \\export fn entry() void { + \\ _ = async amain(); + \\} + \\async fn amain() void { + \\ other(); + \\} + \\fn other() void { + \\ var x: [@sizeOf(@Frame(amain))]u8 = undefined; + \\} + , + "tmp.zig:4:1: error: unable to determine async function frame of 'amain'", + "tmp.zig:5:10: note: analysis of function 'other' depends on the frame", + "tmp.zig:8:13: note: depends on the frame here", + ); + + cases.add( + "async function depends on its own frame", + \\export fn entry() void { + \\ _ = async amain(); + \\} + \\async fn amain() void { + \\ var x: [@sizeOf(@Frame(amain))]u8 = undefined; + \\} + , + "tmp.zig:4:1: error: cannot resolve '@Frame(amain)': function not fully analyzed yet", + "tmp.zig:5:13: note: depends on its own frame here", + ); + cases.add( "non async function pointer passed to @asyncCall", \\export fn entry() void { From cfe84423c97eb2121138c2de5876c47782cd6dda Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 15:13:05 -0400 Subject: [PATCH 076/125] fix segfault with var args --- src/ir.cpp | 8 ++- std/event/fs.zig | 120 ++++++++++++++++----------------- std/event/loop.zig | 163 ++++++++++++++++++++++----------------------- 3 files changed, 143 insertions(+), 148 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index f92434bb33..20a21bb5c3 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -15746,7 +15746,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c size_t impl_param_count = impl_fn_type_id->param_count; if (call_instruction->is_async) { IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, - nullptr, casted_args, call_param_count, casted_new_stack); + nullptr, casted_args, impl_param_count, casted_new_stack); return ir_finish_anal(ira, result); } @@ -15756,7 +15756,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, impl_fn, nullptr, impl_param_count, casted_args, fn_inline, - call_instruction->is_async, casted_new_stack, result_loc, + false, casted_new_stack, result_loc, impl_fn_type_id->return_type); parent_fn_entry->call_list.append(new_call_instruction); @@ -15799,7 +15799,9 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c casted_args[next_arg_index] = casted_arg; next_arg_index += 1; } - for (size_t call_i = 0; call_i < call_instruction->arg_count; call_i += 1) { + size_t iter_count = (call_param_count < call_instruction->arg_count) ? + call_param_count : call_instruction->arg_count; + for (size_t call_i = 0; call_i < iter_count; call_i += 1) { IrInstruction *old_arg = call_instruction->args[call_i]->child; if (type_is_invalid(old_arg->value.type)) return ira->codegen->invalid_instruction; diff --git a/std/event/fs.zig b/std/event/fs.zig index c25426b98a..3ead77e949 100644 --- a/std/event/fs.zig +++ b/std/event/fs.zig @@ -83,10 +83,10 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us resume @handle(); } switch (builtin.os) { - builtin.Os.macosx, - builtin.Os.linux, - builtin.Os.freebsd, - builtin.Os.netbsd, + .macosx, + .linux, + .freebsd, + .netbsd, => { const iovecs = try loop.allocator.alloc(os.iovec_const, data.len); defer loop.allocator.free(iovecs); @@ -100,7 +100,7 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us return await (async pwritevPosix(loop, fd, iovecs, offset) catch unreachable); }, - builtin.Os.windows => { + .windows => { const data_copy = try std.mem.dupe(loop.allocator, []const u8, data); defer loop.allocator.free(data_copy); return await (async pwritevWindows(loop, fd, data, offset) catch unreachable); @@ -220,10 +220,10 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR assert(data.len != 0); switch (builtin.os) { - builtin.Os.macosx, - builtin.Os.linux, - builtin.Os.freebsd, - builtin.Os.netbsd, + .macosx, + .linux, + .freebsd, + .netbsd, => { const iovecs = try loop.allocator.alloc(os.iovec, data.len); defer loop.allocator.free(iovecs); @@ -237,7 +237,7 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR return await (async preadvPosix(loop, fd, iovecs, offset) catch unreachable); }, - builtin.Os.windows => { + .windows => { const data_copy = try std.mem.dupe(loop.allocator, []u8, data); defer loop.allocator.free(data_copy); return await (async preadvWindows(loop, fd, data_copy, offset) catch unreachable); @@ -403,12 +403,12 @@ pub async fn openPosix( pub async fn openRead(loop: *Loop, path: []const u8) File.OpenError!fd_t { switch (builtin.os) { - builtin.Os.macosx, builtin.Os.linux, builtin.Os.freebsd, builtin.Os.netbsd => { + .macosx, .linux, .freebsd, .netbsd => { const flags = os.O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC; return await (async openPosix(loop, path, flags, File.default_mode) catch unreachable); }, - builtin.Os.windows => return windows.CreateFile( + .windows => return windows.CreateFile( path, windows.GENERIC_READ, windows.FILE_SHARE_READ, @@ -431,15 +431,15 @@ pub async fn openWrite(loop: *Loop, path: []const u8) File.OpenError!fd_t { /// Creates if does not exist. Truncates the file if it exists. pub async fn openWriteMode(loop: *Loop, path: []const u8, mode: File.Mode) File.OpenError!fd_t { switch (builtin.os) { - builtin.Os.macosx, - builtin.Os.linux, - builtin.Os.freebsd, - builtin.Os.netbsd, + .macosx, + .linux, + .freebsd, + .netbsd, => { const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC; return await (async openPosix(loop, path, flags, File.default_mode) catch unreachable); }, - builtin.Os.windows => return windows.CreateFile( + .windows => return windows.CreateFile( path, windows.GENERIC_WRITE, windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE, @@ -459,12 +459,12 @@ pub async fn openReadWrite( mode: File.Mode, ) File.OpenError!fd_t { switch (builtin.os) { - builtin.Os.macosx, builtin.Os.linux, builtin.Os.freebsd, builtin.Os.netbsd => { + .macosx, .linux, .freebsd, .netbsd => { const flags = os.O_LARGEFILE | os.O_RDWR | os.O_CREAT | os.O_CLOEXEC; return await (async openPosix(loop, path, flags, mode) catch unreachable); }, - builtin.Os.windows => return windows.CreateFile( + .windows => return windows.CreateFile( path, windows.GENERIC_WRITE | windows.GENERIC_READ, windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE, @@ -489,9 +489,9 @@ pub const CloseOperation = struct { os_data: OsData, const OsData = switch (builtin.os) { - builtin.Os.linux, builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => OsDataPosix, + .linux, .macosx, .freebsd, .netbsd => OsDataPosix, - builtin.Os.windows => struct { + .windows => struct { handle: ?fd_t, }, @@ -508,8 +508,8 @@ pub const CloseOperation = struct { self.* = CloseOperation{ .loop = loop, .os_data = switch (builtin.os) { - builtin.Os.linux, builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => initOsDataPosix(self), - builtin.Os.windows => OsData{ .handle = null }, + .linux, .macosx, .freebsd, .netbsd => initOsDataPosix(self), + .windows => OsData{ .handle = null }, else => @compileError("Unsupported OS"), }, }; @@ -535,10 +535,10 @@ pub const CloseOperation = struct { /// Defer this after creating. pub fn finish(self: *CloseOperation) void { switch (builtin.os) { - builtin.Os.linux, - builtin.Os.macosx, - builtin.Os.freebsd, - builtin.Os.netbsd, + .linux, + .macosx, + .freebsd, + .netbsd, => { if (self.os_data.have_fd) { self.loop.posixFsRequest(&self.os_data.close_req_node); @@ -546,7 +546,7 @@ pub const CloseOperation = struct { self.loop.allocator.destroy(self); } }, - builtin.Os.windows => { + .windows => { if (self.os_data.handle) |handle| { os.close(handle); } @@ -558,15 +558,15 @@ pub const CloseOperation = struct { pub fn setHandle(self: *CloseOperation, handle: fd_t) void { switch (builtin.os) { - builtin.Os.linux, - builtin.Os.macosx, - builtin.Os.freebsd, - builtin.Os.netbsd, + .linux, + .macosx, + .freebsd, + .netbsd, => { self.os_data.close_req_node.data.msg.Close.fd = handle; self.os_data.have_fd = true; }, - builtin.Os.windows => { + .windows => { self.os_data.handle = handle; }, else => @compileError("Unsupported OS"), @@ -576,14 +576,14 @@ pub const CloseOperation = struct { /// Undo a `setHandle`. pub fn clearHandle(self: *CloseOperation) void { switch (builtin.os) { - builtin.Os.linux, - builtin.Os.macosx, - builtin.Os.freebsd, - builtin.Os.netbsd, + .linux, + .macosx, + .freebsd, + .netbsd, => { self.os_data.have_fd = false; }, - builtin.Os.windows => { + .windows => { self.os_data.handle = null; }, else => @compileError("Unsupported OS"), @@ -592,15 +592,15 @@ pub const CloseOperation = struct { pub fn getHandle(self: *CloseOperation) fd_t { switch (builtin.os) { - builtin.Os.linux, - builtin.Os.macosx, - builtin.Os.freebsd, - builtin.Os.netbsd, + .linux, + .macosx, + .freebsd, + .netbsd, => { assert(self.os_data.have_fd); return self.os_data.close_req_node.data.msg.Close.fd; }, - builtin.Os.windows => { + .windows => { return self.os_data.handle.?; }, else => @compileError("Unsupported OS"), @@ -617,12 +617,12 @@ pub async fn writeFile(loop: *Loop, path: []const u8, contents: []const u8) !voi /// contents must remain alive until writeFile completes. pub async fn writeFileMode(loop: *Loop, path: []const u8, contents: []const u8, mode: File.Mode) !void { switch (builtin.os) { - builtin.Os.linux, - builtin.Os.macosx, - builtin.Os.freebsd, - builtin.Os.netbsd, + .linux, + .macosx, + .freebsd, + .netbsd, => return await (async writeFileModeThread(loop, path, contents, mode) catch unreachable), - builtin.Os.windows => return await (async writeFileWindows(loop, path, contents) catch unreachable), + .windows => return await (async writeFileWindows(loop, path, contents) catch unreachable), else => @compileError("Unsupported OS"), } } @@ -728,7 +728,7 @@ pub fn Watch(comptime V: type) type { os_data: OsData, const OsData = switch (builtin.os) { - builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => struct { + .macosx, .freebsd, .netbsd => struct { file_table: FileTable, table_lock: event.Lock, @@ -739,8 +739,8 @@ pub fn Watch(comptime V: type) type { }; }, - builtin.Os.linux => LinuxOsData, - builtin.Os.windows => WindowsOsData, + .linux => LinuxOsData, + .windows => WindowsOsData, else => @compileError("Unsupported OS"), }; @@ -793,7 +793,7 @@ pub fn Watch(comptime V: type) type { errdefer channel.destroy(); switch (builtin.os) { - builtin.Os.linux => { + .linux => { const inotify_fd = try os.inotify_init1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC); errdefer os.close(inotify_fd); @@ -802,7 +802,7 @@ pub fn Watch(comptime V: type) type { return result; }, - builtin.Os.windows => { + .windows => { const self = try loop.allocator.create(Self); errdefer loop.allocator.destroy(self); self.* = Self{ @@ -817,7 +817,7 @@ pub fn Watch(comptime V: type) type { return self; }, - builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => { + .macosx, .freebsd, .netbsd => { const self = try loop.allocator.create(Self); errdefer loop.allocator.destroy(self); @@ -837,7 +837,7 @@ pub fn Watch(comptime V: type) type { /// All addFile calls and removeFile calls must have completed. pub fn destroy(self: *Self) void { switch (builtin.os) { - builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => { + .macosx, .freebsd, .netbsd => { // TODO we need to cancel the coroutines before destroying the lock self.os_data.table_lock.deinit(); var it = self.os_data.file_table.iterator(); @@ -847,8 +847,8 @@ pub fn Watch(comptime V: type) type { } self.channel.destroy(); }, - builtin.Os.linux => cancel self.os_data.putter, - builtin.Os.windows => { + .linux => cancel self.os_data.putter, + .windows => { while (self.os_data.all_putters.get()) |putter_node| { cancel putter_node.data; } @@ -879,9 +879,9 @@ pub fn Watch(comptime V: type) type { pub async fn addFile(self: *Self, file_path: []const u8, value: V) !?V { switch (builtin.os) { - builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable), - builtin.Os.linux => return await (async addFileLinux(self, file_path, value) catch unreachable), - builtin.Os.windows => return await (async addFileWindows(self, file_path, value) catch unreachable), + .macosx, .freebsd, .netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable), + .linux => return await (async addFileLinux(self, file_path, value) catch unreachable), + .windows => return await (async addFileWindows(self, file_path, value) catch unreachable), else => @compileError("Unsupported OS"), } } diff --git a/std/event/loop.zig b/std/event/loop.zig index aacd4bd7aa..70cd8d2ab6 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -13,7 +13,7 @@ const Thread = std.Thread; pub const Loop = struct { allocator: *mem.Allocator, - next_tick_queue: std.atomic.Queue(promise), + next_tick_queue: std.atomic.Queue(anyframe), os_data: OsData, final_resume_node: ResumeNode, pending_event_count: usize, @@ -24,11 +24,11 @@ pub const Loop = struct { available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd), eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node, - pub const NextTickNode = std.atomic.Queue(promise).Node; + pub const NextTickNode = std.atomic.Queue(anyframe).Node; pub const ResumeNode = struct { id: Id, - handle: promise, + handle: anyframe, overlapped: Overlapped, pub const overlapped_init = switch (builtin.os) { @@ -110,7 +110,7 @@ pub const Loop = struct { .pending_event_count = 1, .allocator = allocator, .os_data = undefined, - .next_tick_queue = std.atomic.Queue(promise).init(), + .next_tick_queue = std.atomic.Queue(anyframe).init(), .extra_threads = undefined, .available_eventfd_resume_nodes = std.atomic.Stack(ResumeNode.EventFd).init(), .eventfd_resume_nodes = undefined, @@ -148,18 +148,18 @@ pub const Loop = struct { fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void { switch (builtin.os) { .linux => { - self.os_data.fs_queue = std.atomic.Queue(fs.Request).init(); - self.os_data.fs_queue_item = 0; - // we need another thread for the file system because Linux does not have an async - // file system I/O API. - self.os_data.fs_end_request = fs.RequestNode{ - .prev = undefined, - .next = undefined, - .data = fs.Request{ - .msg = fs.Request.Msg.End, - .finish = fs.Request.Finish.NoAction, - }, - }; + // TODO self.os_data.fs_queue = std.atomic.Queue(fs.Request).init(); + // TODO self.os_data.fs_queue_item = 0; + // TODO // we need another thread for the file system because Linux does not have an async + // TODO // file system I/O API. + // TODO self.os_data.fs_end_request = fs.RequestNode{ + // TODO .prev = undefined, + // TODO .next = undefined, + // TODO .data = fs.Request{ + // TODO .msg = fs.Request.Msg.End, + // TODO .finish = fs.Request.Finish.NoAction, + // TODO }, + // TODO }; errdefer { while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd); @@ -197,10 +197,10 @@ pub const Loop = struct { &self.os_data.final_eventfd_event, ); - self.os_data.fs_thread = try Thread.spawn(self, posixFsRun); + // TODO self.os_data.fs_thread = try Thread.spawn(self, posixFsRun); errdefer { - self.posixFsRequest(&self.os_data.fs_end_request); - self.os_data.fs_thread.wait(); + // TODO self.posixFsRequest(&self.os_data.fs_end_request); + // TODO self.os_data.fs_thread.wait(); } if (builtin.single_threaded) { @@ -302,10 +302,10 @@ pub const Loop = struct { .udata = undefined, }; - self.os_data.fs_thread = try Thread.spawn(self, posixFsRun); + // TODO self.os_data.fs_thread = try Thread.spawn(self, posixFsRun); errdefer { - self.posixFsRequest(&self.os_data.fs_end_request); - self.os_data.fs_thread.wait(); + // TODO self.posixFsRequest(&self.os_data.fs_end_request); + // TODO self.os_data.fs_thread.wait(); } if (builtin.single_threaded) { @@ -397,7 +397,7 @@ pub const Loop = struct { } } - /// resume_node must live longer than the promise that it holds a reference to. + /// resume_node must live longer than the anyframe that it holds a reference to. /// flags must contain EPOLLET pub fn linuxAddFd(self: *Loop, fd: i32, resume_node: *ResumeNode, flags: u32) !void { assert(flags & os.EPOLLET == os.EPOLLET); @@ -460,7 +460,7 @@ pub const Loop = struct { return resume_node.kev; } - /// resume_node must live longer than the promise that it holds a reference to. + /// resume_node must live longer than the anyframe that it holds a reference to. pub fn bsdAddKev(self: *Loop, resume_node: *ResumeNode.Basic, ident: usize, filter: i16, fflags: u32) !void { self.beginOneEvent(); errdefer self.finishOneEvent(); @@ -561,11 +561,11 @@ pub const Loop = struct { self.workerRun(); switch (builtin.os) { - builtin.Os.linux, - builtin.Os.macosx, - builtin.Os.freebsd, - builtin.Os.netbsd, - => self.os_data.fs_thread.wait(), + .linux, + .macosx, + .freebsd, + .netbsd, + => {}, // TODO self.os_data.fs_thread.wait(), else => {}, } @@ -574,45 +574,39 @@ pub const Loop = struct { } } - /// This is equivalent to an async call, except instead of beginning execution of the async function, - /// it immediately returns to the caller, and the async function is queued in the event loop. It still - /// returns a promise to be awaited. - pub fn call(self: *Loop, comptime func: var, args: ...) !(promise->@typeOf(func).ReturnType) { - const S = struct { - async fn asyncFunc(loop: *Loop, handle: *promise->@typeOf(func).ReturnType, args2: ...) @typeOf(func).ReturnType { - suspend { - handle.* = @handle(); - var my_tick_node = Loop.NextTickNode{ - .prev = undefined, - .next = undefined, - .data = @handle(), - }; - loop.onNextTick(&my_tick_node); - } - // TODO guaranteed allocation elision for await in same func as async - return await (async func(args2) catch unreachable); - } - }; - var handle: promise->@typeOf(func).ReturnType = undefined; - return async S.asyncFunc(self, &handle, args); + /// This is equivalent to function call, except it calls `startCpuBoundOperation` first. + pub fn call(comptime func: var, args: ...) @typeOf(func).ReturnType { + startCpuBoundOperation(); + return func(args); } - /// Awaiting a yield lets the event loop run, starting any unstarted async operations. + /// Yielding lets the event loop run, starting any unstarted async operations. /// Note that async operations automatically start when a function yields for any other reason, /// for example, when async I/O is performed. This function is intended to be used only when /// CPU bound tasks would be waiting in the event loop but never get started because no async I/O /// is performed. - pub async fn yield(self: *Loop) void { + pub fn yield(self: *Loop) void { suspend { - var my_tick_node = Loop.NextTickNode{ + var my_tick_node = NextTickNode{ .prev = undefined, .next = undefined, - .data = @handle(), + .data = @frame(), }; self.onNextTick(&my_tick_node); } } + /// If the build is multi-threaded and there is an event loop, then it calls `yield`. Otherwise, + /// does nothing. + pub fn startCpuBoundOperation() void { + if (builtin.is_single_threaded) { + return; + } else if (instance) |event_loop| { + event_loop.yield(); + } + } + + /// call finishOneEvent when done pub fn beginOneEvent(self: *Loop) void { _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); @@ -624,7 +618,7 @@ pub const Loop = struct { // cause all the threads to stop switch (builtin.os) { .linux => { - self.posixFsRequest(&self.os_data.fs_end_request); + // TODO self.posixFsRequest(&self.os_data.fs_end_request); // writing 8 bytes to an eventfd cannot fail os.write(self.os_data.final_eventfd, wakeup_bytes) catch unreachable; return; @@ -672,9 +666,9 @@ pub const Loop = struct { const handle = resume_node.handle; const resume_node_id = resume_node.id; switch (resume_node_id) { - ResumeNode.Id.Basic => {}, - ResumeNode.Id.Stop => return, - ResumeNode.Id.EventFd => { + .Basic => {}, + .Stop => return, + .EventFd => { const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node); event_fd_node.epoll_op = os.EPOLL_CTL_MOD; const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node); @@ -696,12 +690,12 @@ pub const Loop = struct { const handle = resume_node.handle; const resume_node_id = resume_node.id; switch (resume_node_id) { - ResumeNode.Id.Basic => { + .Basic => { const basic_node = @fieldParentPtr(ResumeNode.Basic, "base", resume_node); basic_node.kev = ev; }, - ResumeNode.Id.Stop => return, - ResumeNode.Id.EventFd => { + .Stop => return, + .EventFd => { const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node); const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node); self.available_eventfd_resume_nodes.push(stack_node); @@ -730,9 +724,9 @@ pub const Loop = struct { const handle = resume_node.handle; const resume_node_id = resume_node.id; switch (resume_node_id) { - ResumeNode.Id.Basic => {}, - ResumeNode.Id.Stop => return, - ResumeNode.Id.EventFd => { + .Basic => {}, + .Stop => return, + .EventFd => { const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node); const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node); self.available_eventfd_resume_nodes.push(stack_node); @@ -750,12 +744,12 @@ pub const Loop = struct { self.beginOneEvent(); // finished in posixFsRun after processing the msg self.os_data.fs_queue.put(request_node); switch (builtin.os) { - builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => { + .macosx, .freebsd, .netbsd => { const fs_kevs = (*const [1]os.Kevent)(&self.os_data.fs_kevent_wake); const empty_kevs = ([*]os.Kevent)(undefined)[0..0]; _ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable; }, - builtin.Os.linux => { + .linux => { _ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1); switch (os.linux.getErrno(rc)) { @@ -781,18 +775,18 @@ pub const Loop = struct { } while (self.os_data.fs_queue.get()) |node| { switch (node.data.msg) { - @TagType(fs.Request.Msg).End => return, - @TagType(fs.Request.Msg).PWriteV => |*msg| { + .End => return, + .PWriteV => |*msg| { msg.result = os.pwritev(msg.fd, msg.iov, msg.offset); }, - @TagType(fs.Request.Msg).PReadV => |*msg| { + .PReadV => |*msg| { msg.result = os.preadv(msg.fd, msg.iov, msg.offset); }, - @TagType(fs.Request.Msg).Open => |*msg| { + .Open => |*msg| { msg.result = os.openC(msg.path.ptr, msg.flags, msg.mode); }, - @TagType(fs.Request.Msg).Close => |*msg| os.close(msg.fd), - @TagType(fs.Request.Msg).WriteFile => |*msg| blk: { + .Close => |*msg| os.close(msg.fd), + .WriteFile => |*msg| blk: { const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC; const fd = os.openC(msg.path.ptr, flags, msg.mode) catch |err| { @@ -804,11 +798,11 @@ pub const Loop = struct { }, } switch (node.data.finish) { - @TagType(fs.Request.Finish).TickNode => |*tick_node| self.onNextTick(tick_node), - @TagType(fs.Request.Finish).DeallocCloseOperation => |close_op| { + .TickNode => |*tick_node| self.onNextTick(tick_node), + .DeallocCloseOperation => |close_op| { self.allocator.destroy(close_op); }, - @TagType(fs.Request.Finish).NoAction => {}, + .NoAction => {}, } self.finishOneEvent(); } @@ -855,16 +849,16 @@ pub const Loop = struct { epollfd: i32, final_eventfd: i32, final_eventfd_event: os.linux.epoll_event, - fs_thread: *Thread, - fs_queue_item: i32, - fs_queue: std.atomic.Queue(fs.Request), - fs_end_request: fs.RequestNode, + // TODO fs_thread: *Thread, + // TODO fs_queue_item: i32, + // TODO fs_queue: std.atomic.Queue(fs.Request), + // TODO fs_end_request: fs.RequestNode, }; }; test "std.event.Loop - basic" { // https://github.com/ziglang/zig/issues/1908 - if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest; + if (builtin.single_threaded) return error.SkipZigTest; const allocator = std.heap.direct_allocator; @@ -877,7 +871,7 @@ test "std.event.Loop - basic" { test "std.event.Loop - call" { // https://github.com/ziglang/zig/issues/1908 - if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest; + if (builtin.single_threaded) return error.SkipZigTest; const allocator = std.heap.direct_allocator; @@ -886,9 +880,8 @@ test "std.event.Loop - call" { defer loop.deinit(); var did_it = false; - const handle = try loop.call(testEventLoop); - const handle2 = try loop.call(testEventLoop2, handle, &did_it); - defer cancel handle2; + const handle = async Loop.call(testEventLoop); + const handle2 = async Loop.call(testEventLoop2, handle, &did_it); loop.run(); @@ -899,7 +892,7 @@ async fn testEventLoop() i32 { return 1234; } -async fn testEventLoop2(h: promise->i32, did_it: *bool) void { +async fn testEventLoop2(h: anyframe->i32, did_it: *bool) void { const value = await h; testing.expect(value == 1234); did_it.* = true; From 93840f8610974109d129e6940a851c1f7a8c9fce Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 15:34:41 -0400 Subject: [PATCH 077/125] fix var args call on non-generic function --- src/ir.cpp | 66 +++++++++++++++++++++++++++++++++++----------- std/event/loop.zig | 17 ++++++++++-- 2 files changed, 65 insertions(+), 18 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 20a21bb5c3..5fc31db3ef 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -15799,26 +15799,60 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c casted_args[next_arg_index] = casted_arg; next_arg_index += 1; } - size_t iter_count = (call_param_count < call_instruction->arg_count) ? - call_param_count : call_instruction->arg_count; - for (size_t call_i = 0; call_i < iter_count; call_i += 1) { + for (size_t call_i = 0; call_i < call_instruction->arg_count; call_i += 1) { IrInstruction *old_arg = call_instruction->args[call_i]->child; if (type_is_invalid(old_arg->value.type)) return ira->codegen->invalid_instruction; - IrInstruction *casted_arg; - if (next_arg_index < src_param_count) { - ZigType *param_type = fn_type_id->param_info[next_arg_index].type; - if (type_is_invalid(param_type)) - return ira->codegen->invalid_instruction; - casted_arg = ir_implicit_cast(ira, old_arg, param_type); - if (type_is_invalid(casted_arg->value.type)) - return ira->codegen->invalid_instruction; - } else { - casted_arg = old_arg; - } - casted_args[next_arg_index] = casted_arg; - next_arg_index += 1; + if (old_arg->value.type->id == ZigTypeIdArgTuple) { + for (size_t arg_tuple_i = old_arg->value.data.x_arg_tuple.start_index; + arg_tuple_i < old_arg->value.data.x_arg_tuple.end_index; arg_tuple_i += 1) + { + ZigVar *arg_var = get_fn_var_by_index(parent_fn_entry, arg_tuple_i); + if (arg_var == nullptr) { + ir_add_error(ira, old_arg, + buf_sprintf("compiler bug: var args can't handle void. https://github.com/ziglang/zig/issues/557")); + return ira->codegen->invalid_instruction; + } + IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, old_arg, arg_var); + if (type_is_invalid(arg_var_ptr_inst->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *arg_tuple_arg = ir_get_deref(ira, old_arg, arg_var_ptr_inst, nullptr); + if (type_is_invalid(arg_tuple_arg->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *casted_arg; + if (next_arg_index < src_param_count) { + ZigType *param_type = fn_type_id->param_info[next_arg_index].type; + if (type_is_invalid(param_type)) + return ira->codegen->invalid_instruction; + casted_arg = ir_implicit_cast(ira, arg_tuple_arg, param_type); + if (type_is_invalid(casted_arg->value.type)) + return ira->codegen->invalid_instruction; + } else { + casted_arg = arg_tuple_arg; + } + + casted_args[next_arg_index] = casted_arg; + next_arg_index += 1; + } + } else { + IrInstruction *casted_arg; + if (next_arg_index < src_param_count) { + ZigType *param_type = fn_type_id->param_info[next_arg_index].type; + if (type_is_invalid(param_type)) + return ira->codegen->invalid_instruction; + casted_arg = ir_implicit_cast(ira, old_arg, param_type); + if (type_is_invalid(casted_arg->value.type)) + return ira->codegen->invalid_instruction; + } else { + casted_arg = old_arg; + } + + casted_args[next_arg_index] = casted_arg; + next_arg_index += 1; + } } assert(next_arg_index == call_param_count); diff --git a/std/event/loop.zig b/std/event/loop.zig index 70cd8d2ab6..f0ae67a3d1 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -1,5 +1,6 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); +const root = @import("root"); const assert = std.debug.assert; const testing = std.testing; const mem = std.mem; @@ -85,6 +86,18 @@ pub const Loop = struct { }; }; + pub const IoMode = enum { + blocking, + evented, + }; + pub const io_mode: IoMode = if (@hasDecl(root, "io_mode")) root.io_mode else IoMode.blocking; + var global_instance_state: Loop = undefined; + const default_instance: ?*Loop = switch (io_mode) { + .blocking => null, + .evented => &global_instance_state, + }; + pub const instance: ?*Loop = if (@hasDecl(root, "event_loop")) root.event_loop else default_instance; + /// After initialization, call run(). /// TODO copy elision / named return values so that the threads referencing *Loop /// have the correct pointer value. @@ -599,7 +612,7 @@ pub const Loop = struct { /// If the build is multi-threaded and there is an event loop, then it calls `yield`. Otherwise, /// does nothing. pub fn startCpuBoundOperation() void { - if (builtin.is_single_threaded) { + if (builtin.single_threaded) { return; } else if (instance) |event_loop| { event_loop.yield(); @@ -881,7 +894,7 @@ test "std.event.Loop - call" { var did_it = false; const handle = async Loop.call(testEventLoop); - const handle2 = async Loop.call(testEventLoop2, handle, &did_it); + const handle2 = async Loop.call(testEventLoop2, &handle, &did_it); loop.run(); From da56959a9a7dd7b83a8d2bc6b1454ae546a48be6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 16:41:38 -0400 Subject: [PATCH 078/125] closer to std lib event stuff working --- std/event/channel.zig | 46 +- std/event/fs.zig | 1184 ++++++++++++++++++++--------------------- std/event/loop.zig | 66 ++- 3 files changed, 642 insertions(+), 654 deletions(-) diff --git a/std/event/channel.zig b/std/event/channel.zig index bb2fbbf126..c9686e37e9 100644 --- a/std/event/channel.zig +++ b/std/event/channel.zig @@ -89,12 +89,7 @@ pub fn Channel(comptime T: type) type { /// puts a data item in the channel. The promise completes when the value has been added to the /// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter. pub async fn put(self: *SelfChannel, data: T) void { - // TODO fix this workaround - suspend { - resume @handle(); - } - - var my_tick_node = Loop.NextTickNode.init(@handle()); + var my_tick_node = Loop.NextTickNode.init(@frame()); var queue_node = std.atomic.Queue(PutNode).Node.init(PutNode{ .tick_node = &my_tick_node, .data = data, @@ -122,15 +117,10 @@ pub fn Channel(comptime T: type) type { /// await this function to get an item from the channel. If the buffer is empty, the promise will /// complete when the next item is put in the channel. pub async fn get(self: *SelfChannel) T { - // TODO fix this workaround - suspend { - resume @handle(); - } - // TODO integrate this function with named return values // so we can get rid of this extra result copy var result: T = undefined; - var my_tick_node = Loop.NextTickNode.init(@handle()); + var my_tick_node = Loop.NextTickNode.init(@frame()); var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{ .tick_node = &my_tick_node, .data = GetNode.Data{ @@ -173,15 +163,10 @@ pub fn Channel(comptime T: type) type { /// Await is necessary for locking purposes. The function will be resumed after checking the channel /// for data and will not wait for data to be available. pub async fn getOrNull(self: *SelfChannel) ?T { - // TODO fix this workaround - suspend { - resume @handle(); - } - // TODO integrate this function with named return values // so we can get rid of this extra result copy var result: ?T = null; - var my_tick_node = Loop.NextTickNode.init(@handle()); + var my_tick_node = Loop.NextTickNode.init(@frame()); var or_null_node = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node.init(undefined); var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{ .tick_node = &my_tick_node, @@ -334,41 +319,36 @@ test "std.event.Channel" { const channel = try Channel(i32).create(&loop, 0); defer channel.destroy(); - const handle = try async testChannelGetter(&loop, channel); - defer cancel handle; - - const putter = try async testChannelPutter(channel); - defer cancel putter; + const handle = async testChannelGetter(&loop, channel); + const putter = async testChannelPutter(channel); loop.run(); } async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void { - errdefer @panic("test failed"); - - const value1_promise = try async channel.get(); + const value1_promise = async channel.get(); const value1 = await value1_promise; testing.expect(value1 == 1234); - const value2_promise = try async channel.get(); + const value2_promise = async channel.get(); const value2 = await value2_promise; testing.expect(value2 == 4567); - const value3_promise = try async channel.getOrNull(); + const value3_promise = async channel.getOrNull(); const value3 = await value3_promise; testing.expect(value3 == null); - const last_put = try async testPut(channel, 4444); - const value4 = await try async channel.getOrNull(); + const last_put = async testPut(channel, 4444); + const value4 = channel.getOrNull(); testing.expect(value4.? == 4444); await last_put; } async fn testChannelPutter(channel: *Channel(i32)) void { - await (async channel.put(1234) catch @panic("out of memory")); - await (async channel.put(4567) catch @panic("out of memory")); + channel.put(1234); + channel.put(4567); } async fn testPut(channel: *Channel(i32), value: i32) void { - await (async channel.put(value) catch @panic("out of memory")); + channel.put(value); } diff --git a/std/event/fs.zig b/std/event/fs.zig index 3ead77e949..22e9fc38c9 100644 --- a/std/event/fs.zig +++ b/std/event/fs.zig @@ -715,594 +715,594 @@ pub const WatchEventId = enum { Delete, }; -pub const WatchEventError = error{ - UserResourceLimitReached, - SystemResources, - AccessDenied, - Unexpected, // TODO remove this possibility -}; - -pub fn Watch(comptime V: type) type { - return struct { - channel: *event.Channel(Event.Error!Event), - os_data: OsData, - - const OsData = switch (builtin.os) { - .macosx, .freebsd, .netbsd => struct { - file_table: FileTable, - table_lock: event.Lock, - - const FileTable = std.AutoHashMap([]const u8, *Put); - const Put = struct { - putter: promise, - value_ptr: *V, - }; - }, - - .linux => LinuxOsData, - .windows => WindowsOsData, - - else => @compileError("Unsupported OS"), - }; - - const WindowsOsData = struct { - table_lock: event.Lock, - dir_table: DirTable, - all_putters: std.atomic.Queue(promise), - ref_count: std.atomic.Int(usize), - - const DirTable = std.AutoHashMap([]const u8, *Dir); - const FileTable = std.AutoHashMap([]const u16, V); - - const Dir = struct { - putter: promise, - file_table: FileTable, - table_lock: event.Lock, - }; - }; - - const LinuxOsData = struct { - putter: promise, - inotify_fd: i32, - wd_table: WdTable, - table_lock: event.Lock, - - const WdTable = std.AutoHashMap(i32, Dir); - const FileTable = std.AutoHashMap([]const u8, V); - - const Dir = struct { - dirname: []const u8, - file_table: FileTable, - }; - }; - - const FileToHandle = std.AutoHashMap([]const u8, promise); - - const Self = @This(); - - pub const Event = struct { - id: Id, - data: V, - - pub const Id = WatchEventId; - pub const Error = WatchEventError; - }; - - pub fn create(loop: *Loop, event_buf_count: usize) !*Self { - const channel = try event.Channel(Self.Event.Error!Self.Event).create(loop, event_buf_count); - errdefer channel.destroy(); - - switch (builtin.os) { - .linux => { - const inotify_fd = try os.inotify_init1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC); - errdefer os.close(inotify_fd); - - var result: *Self = undefined; - _ = try async linuxEventPutter(inotify_fd, channel, &result); - return result; - }, - - .windows => { - const self = try loop.allocator.create(Self); - errdefer loop.allocator.destroy(self); - self.* = Self{ - .channel = channel, - .os_data = OsData{ - .table_lock = event.Lock.init(loop), - .dir_table = OsData.DirTable.init(loop.allocator), - .ref_count = std.atomic.Int(usize).init(1), - .all_putters = std.atomic.Queue(promise).init(), - }, - }; - return self; - }, - - .macosx, .freebsd, .netbsd => { - const self = try loop.allocator.create(Self); - errdefer loop.allocator.destroy(self); - - self.* = Self{ - .channel = channel, - .os_data = OsData{ - .table_lock = event.Lock.init(loop), - .file_table = OsData.FileTable.init(loop.allocator), - }, - }; - return self; - }, - else => @compileError("Unsupported OS"), - } - } - - /// All addFile calls and removeFile calls must have completed. - pub fn destroy(self: *Self) void { - switch (builtin.os) { - .macosx, .freebsd, .netbsd => { - // TODO we need to cancel the coroutines before destroying the lock - self.os_data.table_lock.deinit(); - var it = self.os_data.file_table.iterator(); - while (it.next()) |entry| { - cancel entry.value.putter; - self.channel.loop.allocator.free(entry.key); - } - self.channel.destroy(); - }, - .linux => cancel self.os_data.putter, - .windows => { - while (self.os_data.all_putters.get()) |putter_node| { - cancel putter_node.data; - } - self.deref(); - }, - else => @compileError("Unsupported OS"), - } - } - - fn ref(self: *Self) void { - _ = self.os_data.ref_count.incr(); - } - - fn deref(self: *Self) void { - if (self.os_data.ref_count.decr() == 1) { - const allocator = self.channel.loop.allocator; - self.os_data.table_lock.deinit(); - var it = self.os_data.dir_table.iterator(); - while (it.next()) |entry| { - allocator.free(entry.key); - allocator.destroy(entry.value); - } - self.os_data.dir_table.deinit(); - self.channel.destroy(); - allocator.destroy(self); - } - } - - pub async fn addFile(self: *Self, file_path: []const u8, value: V) !?V { - switch (builtin.os) { - .macosx, .freebsd, .netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable), - .linux => return await (async addFileLinux(self, file_path, value) catch unreachable), - .windows => return await (async addFileWindows(self, file_path, value) catch unreachable), - else => @compileError("Unsupported OS"), - } - } - - async fn addFileKEvent(self: *Self, file_path: []const u8, value: V) !?V { - const resolved_path = try std.fs.path.resolve(self.channel.loop.allocator, [_][]const u8{file_path}); - var resolved_path_consumed = false; - defer if (!resolved_path_consumed) self.channel.loop.allocator.free(resolved_path); - - var close_op = try CloseOperation.start(self.channel.loop); - var close_op_consumed = false; - defer if (!close_op_consumed) close_op.finish(); - - const flags = if (os.darwin.is_the_target) os.O_SYMLINK | os.O_EVTONLY else 0; - const mode = 0; - const fd = try await (async openPosix(self.channel.loop, resolved_path, flags, mode) catch unreachable); - close_op.setHandle(fd); - - var put_data: *OsData.Put = undefined; - const putter = try async self.kqPutEvents(close_op, value, &put_data); - close_op_consumed = true; - errdefer cancel putter; - - const result = blk: { - const held = await (async self.os_data.table_lock.acquire() catch unreachable); - defer held.release(); - - const gop = try self.os_data.file_table.getOrPut(resolved_path); - if (gop.found_existing) { - const prev_value = gop.kv.value.value_ptr.*; - cancel gop.kv.value.putter; - gop.kv.value = put_data; - break :blk prev_value; - } else { - resolved_path_consumed = true; - gop.kv.value = put_data; - break :blk null; - } - }; - - return result; - } - - async fn kqPutEvents(self: *Self, close_op: *CloseOperation, value: V, out_put: **OsData.Put) void { - // TODO https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - - var value_copy = value; - var put = OsData.Put{ - .putter = @handle(), - .value_ptr = &value_copy, - }; - out_put.* = &put; - self.channel.loop.beginOneEvent(); - - defer { - close_op.finish(); - self.channel.loop.finishOneEvent(); - } - - while (true) { - if (await (async self.channel.loop.bsdWaitKev( - @intCast(usize, close_op.getHandle()), - os.EVFILT_VNODE, - os.NOTE_WRITE | os.NOTE_DELETE, - ) catch unreachable)) |kev| { - // TODO handle EV_ERROR - if (kev.fflags & os.NOTE_DELETE != 0) { - await (async self.channel.put(Self.Event{ - .id = Event.Id.Delete, - .data = value_copy, - }) catch unreachable); - } else if (kev.fflags & os.NOTE_WRITE != 0) { - await (async self.channel.put(Self.Event{ - .id = Event.Id.CloseWrite, - .data = value_copy, - }) catch unreachable); - } - } else |err| switch (err) { - error.EventNotFound => unreachable, - error.ProcessNotFound => unreachable, - error.Overflow => unreachable, - error.AccessDenied, error.SystemResources => |casted_err| { - await (async self.channel.put(casted_err) catch unreachable); - }, - } - } - } - - async fn addFileLinux(self: *Self, file_path: []const u8, value: V) !?V { - const value_copy = value; - - const dirname = std.fs.path.dirname(file_path) orelse "."; - const dirname_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, dirname); - var dirname_with_null_consumed = false; - defer if (!dirname_with_null_consumed) self.channel.loop.allocator.free(dirname_with_null); - - const basename = std.fs.path.basename(file_path); - const basename_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, basename); - var basename_with_null_consumed = false; - defer if (!basename_with_null_consumed) self.channel.loop.allocator.free(basename_with_null); - - const wd = try os.inotify_add_watchC( - self.os_data.inotify_fd, - dirname_with_null.ptr, - os.linux.IN_CLOSE_WRITE | os.linux.IN_ONLYDIR | os.linux.IN_EXCL_UNLINK, - ); - // wd is either a newly created watch or an existing one. - - const held = await (async self.os_data.table_lock.acquire() catch unreachable); - defer held.release(); - - const gop = try self.os_data.wd_table.getOrPut(wd); - if (!gop.found_existing) { - gop.kv.value = OsData.Dir{ - .dirname = dirname_with_null, - .file_table = OsData.FileTable.init(self.channel.loop.allocator), - }; - dirname_with_null_consumed = true; - } - const dir = &gop.kv.value; - - const file_table_gop = try dir.file_table.getOrPut(basename_with_null); - if (file_table_gop.found_existing) { - const prev_value = file_table_gop.kv.value; - file_table_gop.kv.value = value_copy; - return prev_value; - } else { - file_table_gop.kv.value = value_copy; - basename_with_null_consumed = true; - return null; - } - } - - async fn addFileWindows(self: *Self, file_path: []const u8, value: V) !?V { - const value_copy = value; - // TODO we might need to convert dirname and basename to canonical file paths ("short"?) - - const dirname = try std.mem.dupe(self.channel.loop.allocator, u8, std.fs.path.dirname(file_path) orelse "."); - var dirname_consumed = false; - defer if (!dirname_consumed) self.channel.loop.allocator.free(dirname); - - const dirname_utf16le = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, dirname); - defer self.channel.loop.allocator.free(dirname_utf16le); - - // TODO https://github.com/ziglang/zig/issues/265 - const basename = std.fs.path.basename(file_path); - const basename_utf16le_null = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, basename); - var basename_utf16le_null_consumed = false; - defer if (!basename_utf16le_null_consumed) self.channel.loop.allocator.free(basename_utf16le_null); - const basename_utf16le_no_null = basename_utf16le_null[0 .. basename_utf16le_null.len - 1]; - - const dir_handle = try windows.CreateFileW( - dirname_utf16le.ptr, - windows.FILE_LIST_DIRECTORY, - windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE | windows.FILE_SHARE_WRITE, - null, - windows.OPEN_EXISTING, - windows.FILE_FLAG_BACKUP_SEMANTICS | windows.FILE_FLAG_OVERLAPPED, - null, - ); - var dir_handle_consumed = false; - defer if (!dir_handle_consumed) windows.CloseHandle(dir_handle); - - const held = await (async self.os_data.table_lock.acquire() catch unreachable); - defer held.release(); - - const gop = try self.os_data.dir_table.getOrPut(dirname); - if (gop.found_existing) { - const dir = gop.kv.value; - const held_dir_lock = await (async dir.table_lock.acquire() catch unreachable); - defer held_dir_lock.release(); - - const file_gop = try dir.file_table.getOrPut(basename_utf16le_no_null); - if (file_gop.found_existing) { - const prev_value = file_gop.kv.value; - file_gop.kv.value = value_copy; - return prev_value; - } else { - file_gop.kv.value = value_copy; - basename_utf16le_null_consumed = true; - return null; - } - } else { - errdefer _ = self.os_data.dir_table.remove(dirname); - const dir = try self.channel.loop.allocator.create(OsData.Dir); - errdefer self.channel.loop.allocator.destroy(dir); - - dir.* = OsData.Dir{ - .file_table = OsData.FileTable.init(self.channel.loop.allocator), - .table_lock = event.Lock.init(self.channel.loop), - .putter = undefined, - }; - gop.kv.value = dir; - assert((try dir.file_table.put(basename_utf16le_no_null, value_copy)) == null); - basename_utf16le_null_consumed = true; - - dir.putter = try async self.windowsDirReader(dir_handle, dir); - dir_handle_consumed = true; - - dirname_consumed = true; - - return null; - } - } - - async fn windowsDirReader(self: *Self, dir_handle: windows.HANDLE, dir: *OsData.Dir) void { - // TODO https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - - self.ref(); - defer self.deref(); - - defer os.close(dir_handle); - - var putter_node = std.atomic.Queue(promise).Node{ - .data = @handle(), - .prev = null, - .next = null, - }; - self.os_data.all_putters.put(&putter_node); - defer _ = self.os_data.all_putters.remove(&putter_node); - - var resume_node = Loop.ResumeNode.Basic{ - .base = Loop.ResumeNode{ - .id = Loop.ResumeNode.Id.Basic, - .handle = @handle(), - .overlapped = windows.OVERLAPPED{ - .Internal = 0, - .InternalHigh = 0, - .Offset = 0, - .OffsetHigh = 0, - .hEvent = null, - }, - }, - }; - var event_buf: [4096]u8 align(@alignOf(windows.FILE_NOTIFY_INFORMATION)) = undefined; - - // TODO handle this error not in the channel but in the setup - _ = windows.CreateIoCompletionPort( - dir_handle, - self.channel.loop.os_data.io_port, - undefined, - undefined, - ) catch |err| { - await (async self.channel.put(err) catch unreachable); - return; - }; - - while (true) { - { - // TODO only 1 beginOneEvent for the whole coroutine - self.channel.loop.beginOneEvent(); - errdefer self.channel.loop.finishOneEvent(); - errdefer { - _ = windows.kernel32.CancelIoEx(dir_handle, &resume_node.base.overlapped); - } - suspend { - _ = windows.kernel32.ReadDirectoryChangesW( - dir_handle, - &event_buf, - @intCast(windows.DWORD, event_buf.len), - windows.FALSE, // watch subtree - windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME | - windows.FILE_NOTIFY_CHANGE_ATTRIBUTES | windows.FILE_NOTIFY_CHANGE_SIZE | - windows.FILE_NOTIFY_CHANGE_LAST_WRITE | windows.FILE_NOTIFY_CHANGE_LAST_ACCESS | - windows.FILE_NOTIFY_CHANGE_CREATION | windows.FILE_NOTIFY_CHANGE_SECURITY, - null, // number of bytes transferred (unused for async) - &resume_node.base.overlapped, - null, // completion routine - unused because we use IOCP - ); - } - } - var bytes_transferred: windows.DWORD = undefined; - if (windows.kernel32.GetOverlappedResult(dir_handle, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) { - const err = switch (windows.kernel32.GetLastError()) { - else => |err| windows.unexpectedError(err), - }; - await (async self.channel.put(err) catch unreachable); - } else { - // can't use @bytesToSlice because of the special variable length name field - var ptr = event_buf[0..].ptr; - const end_ptr = ptr + bytes_transferred; - var ev: *windows.FILE_NOTIFY_INFORMATION = undefined; - while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += ev.NextEntryOffset) { - ev = @ptrCast(*windows.FILE_NOTIFY_INFORMATION, ptr); - const emit = switch (ev.Action) { - windows.FILE_ACTION_REMOVED => WatchEventId.Delete, - windows.FILE_ACTION_MODIFIED => WatchEventId.CloseWrite, - else => null, - }; - if (emit) |id| { - const basename_utf16le = ([*]u16)(&ev.FileName)[0 .. ev.FileNameLength / 2]; - const user_value = blk: { - const held = await (async dir.table_lock.acquire() catch unreachable); - defer held.release(); - - if (dir.file_table.get(basename_utf16le)) |entry| { - break :blk entry.value; - } else { - break :blk null; - } - }; - if (user_value) |v| { - await (async self.channel.put(Event{ - .id = id, - .data = v, - }) catch unreachable); - } - } - if (ev.NextEntryOffset == 0) break; - } - } - } - } - - pub async fn removeFile(self: *Self, file_path: []const u8) ?V { - @panic("TODO"); - } - - async fn linuxEventPutter(inotify_fd: i32, channel: *event.Channel(Event.Error!Event), out_watch: **Self) void { - // TODO https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - - const loop = channel.loop; - - var watch = Self{ - .channel = channel, - .os_data = OsData{ - .putter = @handle(), - .inotify_fd = inotify_fd, - .wd_table = OsData.WdTable.init(loop.allocator), - .table_lock = event.Lock.init(loop), - }, - }; - out_watch.* = &watch; - - loop.beginOneEvent(); - - defer { - watch.os_data.table_lock.deinit(); - var wd_it = watch.os_data.wd_table.iterator(); - while (wd_it.next()) |wd_entry| { - var file_it = wd_entry.value.file_table.iterator(); - while (file_it.next()) |file_entry| { - loop.allocator.free(file_entry.key); - } - loop.allocator.free(wd_entry.value.dirname); - } - loop.finishOneEvent(); - os.close(inotify_fd); - channel.destroy(); - } - - var event_buf: [4096]u8 align(@alignOf(os.linux.inotify_event)) = undefined; - - while (true) { - const rc = os.linux.read(inotify_fd, &event_buf, event_buf.len); - const errno = os.linux.getErrno(rc); - switch (errno) { - 0 => { - // can't use @bytesToSlice because of the special variable length name field - var ptr = event_buf[0..].ptr; - const end_ptr = ptr + event_buf.len; - var ev: *os.linux.inotify_event = undefined; - while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += @sizeOf(os.linux.inotify_event) + ev.len) { - ev = @ptrCast(*os.linux.inotify_event, ptr); - if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) { - const basename_ptr = ptr + @sizeOf(os.linux.inotify_event); - const basename_with_null = basename_ptr[0 .. std.mem.len(u8, basename_ptr) + 1]; - const user_value = blk: { - const held = await (async watch.os_data.table_lock.acquire() catch unreachable); - defer held.release(); - - const dir = &watch.os_data.wd_table.get(ev.wd).?.value; - if (dir.file_table.get(basename_with_null)) |entry| { - break :blk entry.value; - } else { - break :blk null; - } - }; - if (user_value) |v| { - await (async channel.put(Event{ - .id = WatchEventId.CloseWrite, - .data = v, - }) catch unreachable); - } - } - } - }, - os.linux.EINTR => continue, - os.linux.EINVAL => unreachable, - os.linux.EFAULT => unreachable, - os.linux.EAGAIN => { - (await (async loop.linuxWaitFd( - inotify_fd, - os.linux.EPOLLET | os.linux.EPOLLIN, - ) catch unreachable)) catch |err| { - const transformed_err = switch (err) { - error.FileDescriptorAlreadyPresentInSet => unreachable, - error.OperationCausesCircularLoop => unreachable, - error.FileDescriptorNotRegistered => unreachable, - error.FileDescriptorIncompatibleWithEpoll => unreachable, - error.Unexpected => unreachable, - else => |e| e, - }; - await (async channel.put(transformed_err) catch unreachable); - }; - }, - else => unreachable, - } - } - } - }; -} +//pub const WatchEventError = error{ +// UserResourceLimitReached, +// SystemResources, +// AccessDenied, +// Unexpected, // TODO remove this possibility +//}; +// +//pub fn Watch(comptime V: type) type { +// return struct { +// channel: *event.Channel(Event.Error!Event), +// os_data: OsData, +// +// const OsData = switch (builtin.os) { +// .macosx, .freebsd, .netbsd => struct { +// file_table: FileTable, +// table_lock: event.Lock, +// +// const FileTable = std.AutoHashMap([]const u8, *Put); +// const Put = struct { +// putter: promise, +// value_ptr: *V, +// }; +// }, +// +// .linux => LinuxOsData, +// .windows => WindowsOsData, +// +// else => @compileError("Unsupported OS"), +// }; +// +// const WindowsOsData = struct { +// table_lock: event.Lock, +// dir_table: DirTable, +// all_putters: std.atomic.Queue(promise), +// ref_count: std.atomic.Int(usize), +// +// const DirTable = std.AutoHashMap([]const u8, *Dir); +// const FileTable = std.AutoHashMap([]const u16, V); +// +// const Dir = struct { +// putter: promise, +// file_table: FileTable, +// table_lock: event.Lock, +// }; +// }; +// +// const LinuxOsData = struct { +// putter: promise, +// inotify_fd: i32, +// wd_table: WdTable, +// table_lock: event.Lock, +// +// const WdTable = std.AutoHashMap(i32, Dir); +// const FileTable = std.AutoHashMap([]const u8, V); +// +// const Dir = struct { +// dirname: []const u8, +// file_table: FileTable, +// }; +// }; +// +// const FileToHandle = std.AutoHashMap([]const u8, promise); +// +// const Self = @This(); +// +// pub const Event = struct { +// id: Id, +// data: V, +// +// pub const Id = WatchEventId; +// pub const Error = WatchEventError; +// }; +// +// pub fn create(loop: *Loop, event_buf_count: usize) !*Self { +// const channel = try event.Channel(Self.Event.Error!Self.Event).create(loop, event_buf_count); +// errdefer channel.destroy(); +// +// switch (builtin.os) { +// .linux => { +// const inotify_fd = try os.inotify_init1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC); +// errdefer os.close(inotify_fd); +// +// var result: *Self = undefined; +// _ = try async linuxEventPutter(inotify_fd, channel, &result); +// return result; +// }, +// +// .windows => { +// const self = try loop.allocator.create(Self); +// errdefer loop.allocator.destroy(self); +// self.* = Self{ +// .channel = channel, +// .os_data = OsData{ +// .table_lock = event.Lock.init(loop), +// .dir_table = OsData.DirTable.init(loop.allocator), +// .ref_count = std.atomic.Int(usize).init(1), +// .all_putters = std.atomic.Queue(promise).init(), +// }, +// }; +// return self; +// }, +// +// .macosx, .freebsd, .netbsd => { +// const self = try loop.allocator.create(Self); +// errdefer loop.allocator.destroy(self); +// +// self.* = Self{ +// .channel = channel, +// .os_data = OsData{ +// .table_lock = event.Lock.init(loop), +// .file_table = OsData.FileTable.init(loop.allocator), +// }, +// }; +// return self; +// }, +// else => @compileError("Unsupported OS"), +// } +// } +// +// /// All addFile calls and removeFile calls must have completed. +// pub fn destroy(self: *Self) void { +// switch (builtin.os) { +// .macosx, .freebsd, .netbsd => { +// // TODO we need to cancel the coroutines before destroying the lock +// self.os_data.table_lock.deinit(); +// var it = self.os_data.file_table.iterator(); +// while (it.next()) |entry| { +// cancel entry.value.putter; +// self.channel.loop.allocator.free(entry.key); +// } +// self.channel.destroy(); +// }, +// .linux => cancel self.os_data.putter, +// .windows => { +// while (self.os_data.all_putters.get()) |putter_node| { +// cancel putter_node.data; +// } +// self.deref(); +// }, +// else => @compileError("Unsupported OS"), +// } +// } +// +// fn ref(self: *Self) void { +// _ = self.os_data.ref_count.incr(); +// } +// +// fn deref(self: *Self) void { +// if (self.os_data.ref_count.decr() == 1) { +// const allocator = self.channel.loop.allocator; +// self.os_data.table_lock.deinit(); +// var it = self.os_data.dir_table.iterator(); +// while (it.next()) |entry| { +// allocator.free(entry.key); +// allocator.destroy(entry.value); +// } +// self.os_data.dir_table.deinit(); +// self.channel.destroy(); +// allocator.destroy(self); +// } +// } +// +// pub async fn addFile(self: *Self, file_path: []const u8, value: V) !?V { +// switch (builtin.os) { +// .macosx, .freebsd, .netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable), +// .linux => return await (async addFileLinux(self, file_path, value) catch unreachable), +// .windows => return await (async addFileWindows(self, file_path, value) catch unreachable), +// else => @compileError("Unsupported OS"), +// } +// } +// +// async fn addFileKEvent(self: *Self, file_path: []const u8, value: V) !?V { +// const resolved_path = try std.fs.path.resolve(self.channel.loop.allocator, [_][]const u8{file_path}); +// var resolved_path_consumed = false; +// defer if (!resolved_path_consumed) self.channel.loop.allocator.free(resolved_path); +// +// var close_op = try CloseOperation.start(self.channel.loop); +// var close_op_consumed = false; +// defer if (!close_op_consumed) close_op.finish(); +// +// const flags = if (os.darwin.is_the_target) os.O_SYMLINK | os.O_EVTONLY else 0; +// const mode = 0; +// const fd = try await (async openPosix(self.channel.loop, resolved_path, flags, mode) catch unreachable); +// close_op.setHandle(fd); +// +// var put_data: *OsData.Put = undefined; +// const putter = try async self.kqPutEvents(close_op, value, &put_data); +// close_op_consumed = true; +// errdefer cancel putter; +// +// const result = blk: { +// const held = await (async self.os_data.table_lock.acquire() catch unreachable); +// defer held.release(); +// +// const gop = try self.os_data.file_table.getOrPut(resolved_path); +// if (gop.found_existing) { +// const prev_value = gop.kv.value.value_ptr.*; +// cancel gop.kv.value.putter; +// gop.kv.value = put_data; +// break :blk prev_value; +// } else { +// resolved_path_consumed = true; +// gop.kv.value = put_data; +// break :blk null; +// } +// }; +// +// return result; +// } +// +// async fn kqPutEvents(self: *Self, close_op: *CloseOperation, value: V, out_put: **OsData.Put) void { +// // TODO https://github.com/ziglang/zig/issues/1194 +// suspend { +// resume @handle(); +// } +// +// var value_copy = value; +// var put = OsData.Put{ +// .putter = @handle(), +// .value_ptr = &value_copy, +// }; +// out_put.* = &put; +// self.channel.loop.beginOneEvent(); +// +// defer { +// close_op.finish(); +// self.channel.loop.finishOneEvent(); +// } +// +// while (true) { +// if (await (async self.channel.loop.bsdWaitKev( +// @intCast(usize, close_op.getHandle()), +// os.EVFILT_VNODE, +// os.NOTE_WRITE | os.NOTE_DELETE, +// ) catch unreachable)) |kev| { +// // TODO handle EV_ERROR +// if (kev.fflags & os.NOTE_DELETE != 0) { +// await (async self.channel.put(Self.Event{ +// .id = Event.Id.Delete, +// .data = value_copy, +// }) catch unreachable); +// } else if (kev.fflags & os.NOTE_WRITE != 0) { +// await (async self.channel.put(Self.Event{ +// .id = Event.Id.CloseWrite, +// .data = value_copy, +// }) catch unreachable); +// } +// } else |err| switch (err) { +// error.EventNotFound => unreachable, +// error.ProcessNotFound => unreachable, +// error.Overflow => unreachable, +// error.AccessDenied, error.SystemResources => |casted_err| { +// await (async self.channel.put(casted_err) catch unreachable); +// }, +// } +// } +// } +// +// async fn addFileLinux(self: *Self, file_path: []const u8, value: V) !?V { +// const value_copy = value; +// +// const dirname = std.fs.path.dirname(file_path) orelse "."; +// const dirname_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, dirname); +// var dirname_with_null_consumed = false; +// defer if (!dirname_with_null_consumed) self.channel.loop.allocator.free(dirname_with_null); +// +// const basename = std.fs.path.basename(file_path); +// const basename_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, basename); +// var basename_with_null_consumed = false; +// defer if (!basename_with_null_consumed) self.channel.loop.allocator.free(basename_with_null); +// +// const wd = try os.inotify_add_watchC( +// self.os_data.inotify_fd, +// dirname_with_null.ptr, +// os.linux.IN_CLOSE_WRITE | os.linux.IN_ONLYDIR | os.linux.IN_EXCL_UNLINK, +// ); +// // wd is either a newly created watch or an existing one. +// +// const held = await (async self.os_data.table_lock.acquire() catch unreachable); +// defer held.release(); +// +// const gop = try self.os_data.wd_table.getOrPut(wd); +// if (!gop.found_existing) { +// gop.kv.value = OsData.Dir{ +// .dirname = dirname_with_null, +// .file_table = OsData.FileTable.init(self.channel.loop.allocator), +// }; +// dirname_with_null_consumed = true; +// } +// const dir = &gop.kv.value; +// +// const file_table_gop = try dir.file_table.getOrPut(basename_with_null); +// if (file_table_gop.found_existing) { +// const prev_value = file_table_gop.kv.value; +// file_table_gop.kv.value = value_copy; +// return prev_value; +// } else { +// file_table_gop.kv.value = value_copy; +// basename_with_null_consumed = true; +// return null; +// } +// } +// +// async fn addFileWindows(self: *Self, file_path: []const u8, value: V) !?V { +// const value_copy = value; +// // TODO we might need to convert dirname and basename to canonical file paths ("short"?) +// +// const dirname = try std.mem.dupe(self.channel.loop.allocator, u8, std.fs.path.dirname(file_path) orelse "."); +// var dirname_consumed = false; +// defer if (!dirname_consumed) self.channel.loop.allocator.free(dirname); +// +// const dirname_utf16le = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, dirname); +// defer self.channel.loop.allocator.free(dirname_utf16le); +// +// // TODO https://github.com/ziglang/zig/issues/265 +// const basename = std.fs.path.basename(file_path); +// const basename_utf16le_null = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, basename); +// var basename_utf16le_null_consumed = false; +// defer if (!basename_utf16le_null_consumed) self.channel.loop.allocator.free(basename_utf16le_null); +// const basename_utf16le_no_null = basename_utf16le_null[0 .. basename_utf16le_null.len - 1]; +// +// const dir_handle = try windows.CreateFileW( +// dirname_utf16le.ptr, +// windows.FILE_LIST_DIRECTORY, +// windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE | windows.FILE_SHARE_WRITE, +// null, +// windows.OPEN_EXISTING, +// windows.FILE_FLAG_BACKUP_SEMANTICS | windows.FILE_FLAG_OVERLAPPED, +// null, +// ); +// var dir_handle_consumed = false; +// defer if (!dir_handle_consumed) windows.CloseHandle(dir_handle); +// +// const held = await (async self.os_data.table_lock.acquire() catch unreachable); +// defer held.release(); +// +// const gop = try self.os_data.dir_table.getOrPut(dirname); +// if (gop.found_existing) { +// const dir = gop.kv.value; +// const held_dir_lock = await (async dir.table_lock.acquire() catch unreachable); +// defer held_dir_lock.release(); +// +// const file_gop = try dir.file_table.getOrPut(basename_utf16le_no_null); +// if (file_gop.found_existing) { +// const prev_value = file_gop.kv.value; +// file_gop.kv.value = value_copy; +// return prev_value; +// } else { +// file_gop.kv.value = value_copy; +// basename_utf16le_null_consumed = true; +// return null; +// } +// } else { +// errdefer _ = self.os_data.dir_table.remove(dirname); +// const dir = try self.channel.loop.allocator.create(OsData.Dir); +// errdefer self.channel.loop.allocator.destroy(dir); +// +// dir.* = OsData.Dir{ +// .file_table = OsData.FileTable.init(self.channel.loop.allocator), +// .table_lock = event.Lock.init(self.channel.loop), +// .putter = undefined, +// }; +// gop.kv.value = dir; +// assert((try dir.file_table.put(basename_utf16le_no_null, value_copy)) == null); +// basename_utf16le_null_consumed = true; +// +// dir.putter = try async self.windowsDirReader(dir_handle, dir); +// dir_handle_consumed = true; +// +// dirname_consumed = true; +// +// return null; +// } +// } +// +// async fn windowsDirReader(self: *Self, dir_handle: windows.HANDLE, dir: *OsData.Dir) void { +// // TODO https://github.com/ziglang/zig/issues/1194 +// suspend { +// resume @handle(); +// } +// +// self.ref(); +// defer self.deref(); +// +// defer os.close(dir_handle); +// +// var putter_node = std.atomic.Queue(promise).Node{ +// .data = @handle(), +// .prev = null, +// .next = null, +// }; +// self.os_data.all_putters.put(&putter_node); +// defer _ = self.os_data.all_putters.remove(&putter_node); +// +// var resume_node = Loop.ResumeNode.Basic{ +// .base = Loop.ResumeNode{ +// .id = Loop.ResumeNode.Id.Basic, +// .handle = @handle(), +// .overlapped = windows.OVERLAPPED{ +// .Internal = 0, +// .InternalHigh = 0, +// .Offset = 0, +// .OffsetHigh = 0, +// .hEvent = null, +// }, +// }, +// }; +// var event_buf: [4096]u8 align(@alignOf(windows.FILE_NOTIFY_INFORMATION)) = undefined; +// +// // TODO handle this error not in the channel but in the setup +// _ = windows.CreateIoCompletionPort( +// dir_handle, +// self.channel.loop.os_data.io_port, +// undefined, +// undefined, +// ) catch |err| { +// await (async self.channel.put(err) catch unreachable); +// return; +// }; +// +// while (true) { +// { +// // TODO only 1 beginOneEvent for the whole coroutine +// self.channel.loop.beginOneEvent(); +// errdefer self.channel.loop.finishOneEvent(); +// errdefer { +// _ = windows.kernel32.CancelIoEx(dir_handle, &resume_node.base.overlapped); +// } +// suspend { +// _ = windows.kernel32.ReadDirectoryChangesW( +// dir_handle, +// &event_buf, +// @intCast(windows.DWORD, event_buf.len), +// windows.FALSE, // watch subtree +// windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME | +// windows.FILE_NOTIFY_CHANGE_ATTRIBUTES | windows.FILE_NOTIFY_CHANGE_SIZE | +// windows.FILE_NOTIFY_CHANGE_LAST_WRITE | windows.FILE_NOTIFY_CHANGE_LAST_ACCESS | +// windows.FILE_NOTIFY_CHANGE_CREATION | windows.FILE_NOTIFY_CHANGE_SECURITY, +// null, // number of bytes transferred (unused for async) +// &resume_node.base.overlapped, +// null, // completion routine - unused because we use IOCP +// ); +// } +// } +// var bytes_transferred: windows.DWORD = undefined; +// if (windows.kernel32.GetOverlappedResult(dir_handle, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) { +// const err = switch (windows.kernel32.GetLastError()) { +// else => |err| windows.unexpectedError(err), +// }; +// await (async self.channel.put(err) catch unreachable); +// } else { +// // can't use @bytesToSlice because of the special variable length name field +// var ptr = event_buf[0..].ptr; +// const end_ptr = ptr + bytes_transferred; +// var ev: *windows.FILE_NOTIFY_INFORMATION = undefined; +// while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += ev.NextEntryOffset) { +// ev = @ptrCast(*windows.FILE_NOTIFY_INFORMATION, ptr); +// const emit = switch (ev.Action) { +// windows.FILE_ACTION_REMOVED => WatchEventId.Delete, +// windows.FILE_ACTION_MODIFIED => WatchEventId.CloseWrite, +// else => null, +// }; +// if (emit) |id| { +// const basename_utf16le = ([*]u16)(&ev.FileName)[0 .. ev.FileNameLength / 2]; +// const user_value = blk: { +// const held = await (async dir.table_lock.acquire() catch unreachable); +// defer held.release(); +// +// if (dir.file_table.get(basename_utf16le)) |entry| { +// break :blk entry.value; +// } else { +// break :blk null; +// } +// }; +// if (user_value) |v| { +// await (async self.channel.put(Event{ +// .id = id, +// .data = v, +// }) catch unreachable); +// } +// } +// if (ev.NextEntryOffset == 0) break; +// } +// } +// } +// } +// +// pub async fn removeFile(self: *Self, file_path: []const u8) ?V { +// @panic("TODO"); +// } +// +// async fn linuxEventPutter(inotify_fd: i32, channel: *event.Channel(Event.Error!Event), out_watch: **Self) void { +// // TODO https://github.com/ziglang/zig/issues/1194 +// suspend { +// resume @handle(); +// } +// +// const loop = channel.loop; +// +// var watch = Self{ +// .channel = channel, +// .os_data = OsData{ +// .putter = @handle(), +// .inotify_fd = inotify_fd, +// .wd_table = OsData.WdTable.init(loop.allocator), +// .table_lock = event.Lock.init(loop), +// }, +// }; +// out_watch.* = &watch; +// +// loop.beginOneEvent(); +// +// defer { +// watch.os_data.table_lock.deinit(); +// var wd_it = watch.os_data.wd_table.iterator(); +// while (wd_it.next()) |wd_entry| { +// var file_it = wd_entry.value.file_table.iterator(); +// while (file_it.next()) |file_entry| { +// loop.allocator.free(file_entry.key); +// } +// loop.allocator.free(wd_entry.value.dirname); +// } +// loop.finishOneEvent(); +// os.close(inotify_fd); +// channel.destroy(); +// } +// +// var event_buf: [4096]u8 align(@alignOf(os.linux.inotify_event)) = undefined; +// +// while (true) { +// const rc = os.linux.read(inotify_fd, &event_buf, event_buf.len); +// const errno = os.linux.getErrno(rc); +// switch (errno) { +// 0 => { +// // can't use @bytesToSlice because of the special variable length name field +// var ptr = event_buf[0..].ptr; +// const end_ptr = ptr + event_buf.len; +// var ev: *os.linux.inotify_event = undefined; +// while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += @sizeOf(os.linux.inotify_event) + ev.len) { +// ev = @ptrCast(*os.linux.inotify_event, ptr); +// if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) { +// const basename_ptr = ptr + @sizeOf(os.linux.inotify_event); +// const basename_with_null = basename_ptr[0 .. std.mem.len(u8, basename_ptr) + 1]; +// const user_value = blk: { +// const held = await (async watch.os_data.table_lock.acquire() catch unreachable); +// defer held.release(); +// +// const dir = &watch.os_data.wd_table.get(ev.wd).?.value; +// if (dir.file_table.get(basename_with_null)) |entry| { +// break :blk entry.value; +// } else { +// break :blk null; +// } +// }; +// if (user_value) |v| { +// await (async channel.put(Event{ +// .id = WatchEventId.CloseWrite, +// .data = v, +// }) catch unreachable); +// } +// } +// } +// }, +// os.linux.EINTR => continue, +// os.linux.EINVAL => unreachable, +// os.linux.EFAULT => unreachable, +// os.linux.EAGAIN => { +// (await (async loop.linuxWaitFd( +// inotify_fd, +// os.linux.EPOLLET | os.linux.EPOLLIN, +// ) catch unreachable)) catch |err| { +// const transformed_err = switch (err) { +// error.FileDescriptorAlreadyPresentInSet => unreachable, +// error.OperationCausesCircularLoop => unreachable, +// error.FileDescriptorNotRegistered => unreachable, +// error.FileDescriptorIncompatibleWithEpoll => unreachable, +// error.Unexpected => unreachable, +// else => |e| e, +// }; +// await (async channel.put(transformed_err) catch unreachable); +// }; +// }, +// else => unreachable, +// } +// } +// } +// }; +//} const test_tmp_dir = "std_event_fs_test"; @@ -1397,11 +1397,11 @@ pub const OutStream = struct { }; } - async<*mem.Allocator> fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void { + fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void { const self = @fieldParentPtr(OutStream, "stream", out_stream); const offset = self.offset; self.offset += bytes.len; - return await (async pwritev(self.loop, self.fd, [][]const u8{bytes}, offset) catch unreachable); + return pwritev(self.loop, self.fd, [][]const u8{bytes}, offset); } }; @@ -1423,9 +1423,9 @@ pub const InStream = struct { }; } - async<*mem.Allocator> fn readFn(in_stream: *Stream, bytes: []u8) Error!usize { + fn readFn(in_stream: *Stream, bytes: []u8) Error!usize { const self = @fieldParentPtr(InStream, "stream", in_stream); - const amt = try await (async preadv(self.loop, self.fd, [][]u8{bytes}, self.offset) catch unreachable); + const amt = try preadv(self.loop, self.fd, [][]u8{bytes}, self.offset); self.offset += amt; return amt; } diff --git a/std/event/loop.zig b/std/event/loop.zig index f0ae67a3d1..a4a60b5098 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -98,9 +98,21 @@ pub const Loop = struct { }; pub const instance: ?*Loop = if (@hasDecl(root, "event_loop")) root.event_loop else default_instance; + /// TODO copy elision / named return values so that the threads referencing *Loop + /// have the correct pointer value. + /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765 + pub fn init(self: *Loop, allocator: *mem.Allocator) !void { + if (builtin.single_threaded) { + return self.initSingleThreaded(allocator); + } else { + return self.initMultiThreaded(allocator); + } + } + /// After initialization, call run(). /// TODO copy elision / named return values so that the threads referencing *Loop /// have the correct pointer value. + /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765 pub fn initSingleThreaded(self: *Loop, allocator: *mem.Allocator) !void { return self.initInternal(allocator, 1); } @@ -110,6 +122,7 @@ pub const Loop = struct { /// After initialization, call run(). /// TODO copy elision / named return values so that the threads referencing *Loop /// have the correct pointer value. + /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765 pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void { if (builtin.single_threaded) @compileError("initMultiThreaded unavailable when building in single-threaded mode"); const core_count = try Thread.cpuCount(); @@ -161,18 +174,18 @@ pub const Loop = struct { fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void { switch (builtin.os) { .linux => { - // TODO self.os_data.fs_queue = std.atomic.Queue(fs.Request).init(); - // TODO self.os_data.fs_queue_item = 0; - // TODO // we need another thread for the file system because Linux does not have an async - // TODO // file system I/O API. - // TODO self.os_data.fs_end_request = fs.RequestNode{ - // TODO .prev = undefined, - // TODO .next = undefined, - // TODO .data = fs.Request{ - // TODO .msg = fs.Request.Msg.End, - // TODO .finish = fs.Request.Finish.NoAction, - // TODO }, - // TODO }; + self.os_data.fs_queue = std.atomic.Queue(fs.Request).init(); + self.os_data.fs_queue_item = 0; + // we need another thread for the file system because Linux does not have an async + // file system I/O API. + self.os_data.fs_end_request = fs.RequestNode{ + .prev = undefined, + .next = undefined, + .data = fs.Request{ + .msg = fs.Request.Msg.End, + .finish = fs.Request.Finish.NoAction, + }, + }; errdefer { while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd); @@ -210,10 +223,10 @@ pub const Loop = struct { &self.os_data.final_eventfd_event, ); - // TODO self.os_data.fs_thread = try Thread.spawn(self, posixFsRun); + self.os_data.fs_thread = try Thread.spawn(self, posixFsRun); errdefer { - // TODO self.posixFsRequest(&self.os_data.fs_end_request); - // TODO self.os_data.fs_thread.wait(); + self.posixFsRequest(&self.os_data.fs_end_request); + self.os_data.fs_thread.wait(); } if (builtin.single_threaded) { @@ -315,10 +328,10 @@ pub const Loop = struct { .udata = undefined, }; - // TODO self.os_data.fs_thread = try Thread.spawn(self, posixFsRun); + self.os_data.fs_thread = try Thread.spawn(self, posixFsRun); errdefer { - // TODO self.posixFsRequest(&self.os_data.fs_end_request); - // TODO self.os_data.fs_thread.wait(); + self.posixFsRequest(&self.os_data.fs_end_request); + self.os_data.fs_thread.wait(); } if (builtin.single_threaded) { @@ -441,7 +454,6 @@ pub const Loop = struct { pub async fn linuxWaitFd(self: *Loop, fd: i32, flags: u32) !void { defer self.linuxRemoveFd(fd); suspend { - // TODO explicitly put this memory in the coroutine frame #1194 var resume_node = ResumeNode.Basic{ .base = ResumeNode{ .id = ResumeNode.Id.Basic, @@ -454,10 +466,6 @@ pub const Loop = struct { } pub async fn bsdWaitKev(self: *Loop, ident: usize, filter: i16, fflags: u32) !os.Kevent { - // TODO #1194 - suspend { - resume @handle(); - } var resume_node = ResumeNode.Basic{ .base = ResumeNode{ .id = ResumeNode.Id.Basic, @@ -578,7 +586,7 @@ pub const Loop = struct { .macosx, .freebsd, .netbsd, - => {}, // TODO self.os_data.fs_thread.wait(), + => self.os_data.fs_thread.wait(), else => {}, } @@ -631,7 +639,7 @@ pub const Loop = struct { // cause all the threads to stop switch (builtin.os) { .linux => { - // TODO self.posixFsRequest(&self.os_data.fs_end_request); + self.posixFsRequest(&self.os_data.fs_end_request); // writing 8 bytes to an eventfd cannot fail os.write(self.os_data.final_eventfd, wakeup_bytes) catch unreachable; return; @@ -862,10 +870,10 @@ pub const Loop = struct { epollfd: i32, final_eventfd: i32, final_eventfd_event: os.linux.epoll_event, - // TODO fs_thread: *Thread, - // TODO fs_queue_item: i32, - // TODO fs_queue: std.atomic.Queue(fs.Request), - // TODO fs_end_request: fs.RequestNode, + fs_thread: *Thread, + fs_queue_item: i32, + fs_queue: std.atomic.Queue(fs.Request), + fs_end_request: fs.RequestNode, }; }; From 614cab5d68176ea56e48195d04997738297429a1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 19:08:41 -0400 Subject: [PATCH 079/125] fix passing string literals to async functions --- src/all_types.hpp | 1 + src/codegen.cpp | 16 ++++++++++------ test/stage1/behavior/coroutines.zig | 21 +++++++++++++++++++++ 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index e6daa1c726..cf41444f0b 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -3763,6 +3763,7 @@ struct FnWalkAttrs { struct FnWalkCall { ZigList *gen_param_values; + ZigList *gen_param_types; IrInstructionCallGen *inst; bool is_var_args; }; diff --git a/src/codegen.cpp b/src/codegen.cpp index 4ef81de4a0..46cd8e9fcf 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1575,14 +1575,14 @@ static LLVMRealPredicate cmp_op_to_real_predicate(IrBinOp cmp_op) { } } -static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_type, +static void gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_type, LLVMValueRef value) { assert(ptr_type->id == ZigTypeIdPointer); ZigType *child_type = ptr_type->data.pointer.child_type; if (!type_has_bits(child_type)) - return nullptr; + return; if (handle_is_ptr(child_type)) { assert(LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMPointerTypeKind); @@ -1602,13 +1602,13 @@ static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_ty ZigLLVMBuildMemCpy(g->builder, dest_ptr, align_bytes, src_ptr, align_bytes, LLVMConstInt(usize->llvm_type, size_bytes, false), ptr_type->data.pointer.is_volatile); - return nullptr; + return; } uint32_t host_int_bytes = ptr_type->data.pointer.host_int_bytes; if (host_int_bytes == 0) { gen_store(g, value, ptr, ptr_type); - return nullptr; + return; } bool big_endian = g->is_big_endian; @@ -1638,7 +1638,7 @@ static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_ty LLVMValueRef ored_value = LLVMBuildOr(g->builder, shifted_value, anded_containing_int, ""); gen_store(g, ored_value, ptr, ptr_type); - return nullptr; + return; } static void gen_var_debug_decl(CodeGen *g, ZigVar *var) { @@ -1958,6 +1958,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) { LLVMValueRef param_value = ir_llvm_value(g, param_instruction); assert(param_value); fn_walk->data.call.gen_param_values->append(param_value); + fn_walk->data.call.gen_param_types->append(param_type); } } return; @@ -3821,6 +3822,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr bool prefix_arg_err_ret_stack = codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type); bool is_var_args = fn_type_id->is_var_args; ZigList gen_param_values = {}; + ZigList gen_param_types = {}; LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr; LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef frame_result_loc; @@ -3923,6 +3925,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr fn_walk.data.call.inst = instruction; fn_walk.data.call.is_var_args = is_var_args; fn_walk.data.call.gen_param_values = &gen_param_values; + fn_walk.data.call.gen_param_types = &gen_param_types; walk_function_params(g, fn_type, &fn_walk); ZigLLVM_FnInline fn_inline; @@ -3964,7 +3967,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) { LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, casted_frame, arg_start_i + arg_i, ""); - LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr); + gen_assign_raw(g, arg_ptr, get_pointer_to_type(g, gen_param_types.at(arg_i), true), + gen_param_values.at(arg_i)); } } if (instruction->is_async) { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 2fd5912aac..57706c2455 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -568,3 +568,24 @@ test "errdefers in scope get run when canceling async fn call" { }; S.doTheTest(); } + +test "pass string literal to async function" { + const S = struct { + var frame: anyframe = undefined; + var ok: bool = false; + + fn doTheTest() void { + _ = async hello("hello"); + resume frame; + expect(ok); + } + + fn hello(msg: []const u8) void { + frame = @frame(); + suspend; + expectEqual(([]const u8)("hello"), msg); + ok = true; + } + }; + S.doTheTest(); +} From 0bf6796b76501efef486815946ce12932d6f6a21 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 9 Aug 2019 10:22:02 -0400 Subject: [PATCH 080/125] fix regression in std.math.min closes #3035 --- std/math.zig | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/std/math.zig b/std/math.zig index 2745fe6508..73f981528c 100644 --- a/std/math.zig +++ b/std/math.zig @@ -245,25 +245,20 @@ pub fn floatExponentBits(comptime T: type) comptime_int { /// Given two types, returns the smallest one which is capable of holding the /// full range of the minimum value. pub fn Min(comptime A: type, comptime B: type) type { - return switch (@typeInfo(A)) { + switch (@typeInfo(A)) { .Int => |a_info| switch (@typeInfo(B)) { - .Int => |b_info| blk: { - if (a_info.is_signed == b_info.is_signed) { - break :blk if (a_info.bits < b_info.bits) A else B; - } else if (a_info.is_signed) { - break :blk A; + .Int => |b_info| if (!a_info.is_signed and !b_info.is_signed) { + if (a_info.bits < b_info.bits) { + return A; } else { - break :blk B; + return B; } }, - .ComptimeInt => A, - else => @compileError("unsupported type: " ++ @typeName(B)), + else => {}, }, - .Float => |a_info| if (a_info.bits < @typeInfo(B).Float.bits) A else B, - .ComptimeInt => B, - .ComptimeFloat => B, - else => @compileError("unsupported type: " ++ @typeName(A)), - }; + else => {}, + } + return @typeOf(A(0) + B(0)); } /// Returns the smaller number. When one of the parameter's type's full range fits in the other, @@ -275,7 +270,6 @@ pub fn min(x: var, y: var) Min(@typeOf(x), @typeOf(y)) { // scope it is known to fit in the return type. switch (@typeInfo(Result)) { .Int => return @intCast(Result, x), - .Float => return @floatCast(Result, x), else => return x, } } else { @@ -283,7 +277,6 @@ pub fn min(x: var, y: var) Min(@typeOf(x), @typeOf(y)) { // scope it is known to fit in the return type. switch (@typeInfo(Result)) { .Int => return @intCast(Result, y), - .Float => return @floatCast(Result, y), else => return y, } } @@ -302,9 +295,16 @@ test "math.min" { var a: f64 = 10.34; var b: f32 = 999.12; var result = min(a, b); - testing.expect(@typeOf(result) == f32); + testing.expect(@typeOf(result) == f64); testing.expect(result == 10.34); } + { + var a: i8 = -127; + var b: i16 = -200; + var result = min(a, b); + testing.expect(@typeOf(result) == i16); + testing.expect(result == -200); + } } pub fn max(x: var, y: var) @typeOf(x + y) { From e6ebc41a07b294e602f8594e001df78e143e190f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 9 Aug 2019 17:09:27 -0400 Subject: [PATCH 081/125] add a little compile error to make Future Andy's life easier --- tools/process_headers.zig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/process_headers.zig b/tools/process_headers.zig index f191721316..805fc3d5f7 100644 --- a/tools/process_headers.zig +++ b/tools/process_headers.zig @@ -504,6 +504,9 @@ const Contents = struct { } }; +comptime { + @compileError("the behavior of std.AutoHashMap changed and []const u8 will be treated as a pointer. will need to update the hash maps to actually do some kind of hashing on the slices."); +} const HashToContents = std.AutoHashMap([]const u8, Contents); const TargetToHash = std.HashMap(DestTarget, []const u8, DestTarget.hash, DestTarget.eql); const PathTable = std.AutoHashMap([]const u8, *TargetToHash); From 2e7f53f1f0d8339b8dc90ad7e0bc9963f1ec471c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 9 Aug 2019 17:34:06 -0400 Subject: [PATCH 082/125] fix cancel inside an errdefer --- src/all_types.hpp | 7 ++++--- src/analyze.cpp | 4 ++++ src/codegen.cpp | 23 ++++++++++++++++++++--- src/ir.cpp | 12 ++++-------- src/ir_print.cpp | 3 +-- test/stage1/behavior/coroutines.zig | 26 +++++++++++++++++++++++++- 6 files changed, 58 insertions(+), 17 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index cf41444f0b..0b03388502 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1725,6 +1725,7 @@ struct CodeGen { LLVMValueRef cur_async_resume_index_ptr; LLVMValueRef cur_async_awaiter_ptr; LLVMValueRef cur_async_prev_val; + LLVMValueRef cur_async_prev_val_field_ptr; LLVMBasicBlockRef cur_preamble_llvm_block; size_t cur_resume_block_count; LLVMValueRef cur_err_ret_trace_val_arg; @@ -1886,6 +1887,7 @@ struct CodeGen { bool system_linker_hack; bool reported_bad_link_libc_error; bool is_dynamic; // shared library rather than static library. dynamic musl rather than static musl. + bool cur_is_after_return; //////////////////////////// Participates in Input Parameter Cache Hash /////// Note: there is a separate cache hash for builtin.zig, when adding fields, @@ -3639,8 +3641,6 @@ struct IrInstructionCoroResume { struct IrInstructionTestCancelRequested { IrInstruction base; - - bool use_return_begin_prev_value; }; enum ResultLocId { @@ -3730,7 +3730,8 @@ static const size_t err_union_payload_index = 1; static const size_t coro_fn_ptr_index = 0; static const size_t coro_resume_index = 1; static const size_t coro_awaiter_index = 2; -static const size_t coro_ret_start = 3; +static const size_t coro_prev_val_index = 3; +static const size_t coro_ret_start = 4; // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. diff --git a/src/analyze.cpp b/src/analyze.cpp index cc90573f41..a09ba582c9 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5246,6 +5246,9 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { field_names.append("@awaiter"); field_types.append(g->builtin_types.entry_usize); + field_names.append("@prev_val"); + field_types.append(g->builtin_types.entry_usize); + FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false); field_names.append("@result_ptr_callee"); @@ -7592,6 +7595,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re field_types.append(ptr_fn_llvm_type); // fn_ptr field_types.append(usize_type_ref); // resume_index field_types.append(usize_type_ref); // awaiter + field_types.append(usize_type_ref); // prev_val bool have_result_type = result_type != nullptr && type_has_bits(result_type); if (have_result_type) { diff --git a/src/codegen.cpp b/src/codegen.cpp index 46cd8e9fcf..5a8fd3e9ca 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2226,7 +2226,18 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); } +static LLVMValueRef get_cur_async_prev_val(CodeGen *g) { + if (g->cur_async_prev_val != nullptr) { + return g->cur_async_prev_val; + } + g->cur_async_prev_val = LLVMBuildLoad(g->builder, g->cur_async_prev_val_field_ptr, ""); + return g->cur_async_prev_val; +} + static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) { + // This becomes invalid when a suspend happens. + g->cur_async_prev_val = nullptr; + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint); size_t new_block_index = g->cur_resume_block_count; @@ -2319,6 +2330,9 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb }; LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2); + g->cur_is_after_return = true; + LLVMBuildStore(g->builder, g->cur_async_prev_val, g->cur_async_prev_val_field_ptr); + if (!ret_type_has_bits) { return nullptr; } @@ -2366,7 +2380,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns ZigType *any_frame_type = get_any_frame_type(g, ret_type); LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); LLVMValueRef mask_val = LLVMConstNot(one); - LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, g->cur_async_prev_val, mask_val, ""); + LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, get_cur_async_prev_val(g), mask_val, ""); LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val, get_llvm_type(g, any_frame_type), ""); LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr); @@ -5590,8 +5604,8 @@ static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *ex { if (!fn_is_async(g->cur_fn)) return LLVMConstInt(LLVMInt1Type(), 0, false); - if (instruction->use_return_begin_prev_value) { - return LLVMBuildTrunc(g->builder, g->cur_async_prev_val, LLVMInt1Type(), ""); + if (g->cur_is_after_return) { + return LLVMBuildTrunc(g->builder, get_cur_async_prev_val(g), LLVMInt1Type(), ""); } else { zig_panic("TODO"); } @@ -7063,6 +7077,7 @@ static void do_code_gen(CodeGen *g) { } if (is_async) { + g->cur_is_after_return = false; g->cur_resume_block_count = 0; LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; @@ -7099,6 +7114,8 @@ static void do_code_gen(CodeGen *g) { g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack, ""); } + g->cur_async_prev_val_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, + coro_prev_val_index, ""); LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4); diff --git a/src/ir.cpp b/src/ir.cpp index 5fc31db3ef..4dcfaa6cce 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -3325,12 +3325,9 @@ static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } -static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node, - bool use_return_begin_prev_value) -{ +static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionTestCancelRequested *instruction = ir_build_instruction(irb, scope, source_node); instruction->base.value.type = irb->codegen->builtin_types.entry_bool; - instruction->use_return_begin_prev_value = use_return_begin_prev_value; return &instruction->base; } @@ -3546,7 +3543,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, if (need_test_cancel) { ir_set_cursor_at_end_and_append_block(irb, ok_block); - IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node, true); + IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node); ir_mark_gen(ir_build_cond_br(irb, scope, node, is_canceled, all_defers_block, normal_defers_block, force_comptime)); } @@ -3830,7 +3827,7 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result)); } - IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node, true); + IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node); IrBasicBlock *all_defers_block = ir_create_basic_block(irb, child_scope, "ErrDefers"); IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, child_scope, "Defers"); IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, child_scope, "RetStmt"); @@ -24725,8 +24722,7 @@ static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ir if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) { return ir_const_bool(ira, &instruction->base, false); } - return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node, - instruction->use_return_begin_prev_value); + return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node); } static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) { diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 8b8445f625..8c90eb02f3 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1551,8 +1551,7 @@ static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction) } static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancelRequested *instruction) { - const char *arg = instruction->use_return_begin_prev_value ? "UseReturnBeginPrevValue" : "AdditionalCheck"; - fprintf(irp->f, "@testCancelRequested(%s)", arg); + fprintf(irp->f, "@testCancelRequested()"); } static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 57706c2455..c2b95e8559 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -318,7 +318,7 @@ test "@asyncCall with return type" { } }; var foo = Foo{ .bar = Foo.middle }; - var bytes: [100]u8 = undefined; + var bytes: [150]u8 = undefined; var aresult: i32 = 0; _ = @asyncCall(&bytes, &aresult, foo.bar); expect(aresult == 0); @@ -589,3 +589,27 @@ test "pass string literal to async function" { }; S.doTheTest(); } + +test "cancel inside an errdefer" { + const S = struct { + var frame: anyframe = undefined; + + fn doTheTest() void { + _ = async amainWrap(); + resume frame; + } + + fn amainWrap() !void { + var foo = async func(); + errdefer cancel foo; + return error.Bad; + } + + fn func() void { + frame = @frame(); + suspend; + } + + }; + S.doTheTest(); +} From b9d1d45dfd0f704bc762732c23aa2844f1d14e8d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 9 Aug 2019 21:49:40 -0400 Subject: [PATCH 083/125] fix combining try with errdefer cancel --- src/all_types.hpp | 13 +++++++++ src/codegen.cpp | 45 +++++++++++++++++++++++------ src/ir.cpp | 33 +++++++++++++++++++++ src/ir_print.cpp | 9 ++++++ test/stage1/behavior/coroutines.zig | 29 +++++++++++++++++++ 5 files changed, 120 insertions(+), 9 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 0b03388502..45182f3db3 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -2366,6 +2366,7 @@ enum IrInstructionId { IrInstructionIdAwaitGen, IrInstructionIdCoroResume, IrInstructionIdTestCancelRequested, + IrInstructionIdSpill, }; struct IrInstruction { @@ -3643,6 +3644,18 @@ struct IrInstructionTestCancelRequested { IrInstruction base; }; +enum SpillId { + SpillIdInvalid, + SpillIdRetErrCode, +}; + +struct IrInstructionSpill { + IrInstruction base; + + SpillId spill_id; + IrInstruction *operand; +}; + enum ResultLocId { ResultLocIdInvalid, ResultLocIdNone, diff --git a/src/codegen.cpp b/src/codegen.cpp index 5a8fd3e9ca..976ee4181e 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5113,6 +5113,18 @@ static LLVMValueRef ir_render_test_err(CodeGen *g, IrExecutable *executable, IrI return LLVMBuildICmp(g->builder, LLVMIntNE, err_val, zero, ""); } +static LLVMValueRef gen_unwrap_err_code(CodeGen *g, LLVMValueRef err_union_ptr, ZigType *ptr_type) { + ZigType *err_union_type = ptr_type->data.pointer.child_type; + ZigType *payload_type = err_union_type->data.error_union.payload_type; + if (!type_has_bits(payload_type)) { + return err_union_ptr; + } else { + // TODO assign undef to the payload + LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type); + return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); + } +} + static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executable, IrInstructionUnwrapErrCode *instruction) { @@ -5121,16 +5133,8 @@ static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executab ZigType *ptr_type = instruction->err_union_ptr->value.type; assert(ptr_type->id == ZigTypeIdPointer); - ZigType *err_union_type = ptr_type->data.pointer.child_type; - ZigType *payload_type = err_union_type->data.error_union.payload_type; LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->err_union_ptr); - if (!type_has_bits(payload_type)) { - return err_union_ptr; - } else { - // TODO assign undef to the payload - LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type); - return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); - } + return gen_unwrap_err_code(g, err_union_ptr, ptr_type); } static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *executable, @@ -5611,6 +5615,27 @@ static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *ex } } +static LLVMValueRef ir_render_spill(CodeGen *g, IrExecutable *executable, IrInstructionSpill *instruction) { + if (!fn_is_async(g->cur_fn)) + return ir_llvm_value(g, instruction->operand); + + switch (instruction->spill_id) { + case SpillIdInvalid: + zig_unreachable(); + case SpillIdRetErrCode: { + LLVMValueRef ret_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr, ""); + ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + if (ret_type->id == ZigTypeIdErrorUnion) { + return gen_unwrap_err_code(g, ret_ptr, get_pointer_to_type(g, ret_type, true)); + } else { + zig_unreachable(); + } + } + + } + zig_unreachable(); +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -5866,6 +5891,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction); case IrInstructionIdTestCancelRequested: return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction); + case IrInstructionIdSpill: + return ir_render_spill(g, executable, (IrInstructionSpill *)instruction); } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index 4dcfaa6cce..845ee03757 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1066,6 +1066,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelReques return IrInstructionIdTestCancelRequested; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionSpill *) { + return IrInstructionIdSpill; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -3332,6 +3336,18 @@ static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scop return &instruction->base; } +static IrInstruction *ir_build_spill(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *operand, SpillId spill_id) +{ + IrInstructionSpill *instruction = ir_build_instruction(irb, scope, source_node); + instruction->operand = operand; + instruction->spill_id = spill_id; + + ir_ref_instruction(operand, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -3591,6 +3607,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ResultLocReturn *result_loc_ret = allocate(1); result_loc_ret->base.id = ResultLocIdReturn; ir_build_reset_result(irb, scope, node, &result_loc_ret->base); + err_val = ir_build_spill(irb, scope, node, err_val, SpillIdRetErrCode); ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base); if (irb->codegen->have_err_ret_tracing && !should_inline) { @@ -24725,6 +24742,19 @@ static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ir return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node); } +static IrInstruction *ir_analyze_instruction_spill(IrAnalyze *ira, IrInstructionSpill *instruction) { + IrInstruction *operand = instruction->operand->child; + if (type_is_invalid(operand->value.type)) + return ira->codegen->invalid_instruction; + if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) { + return operand; + } + IrInstruction *result = ir_build_spill(&ira->new_irb, instruction->base.scope, instruction->base.source_node, + operand, instruction->spill_id); + result->value.type = operand->value.type; + return result; +} + static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: @@ -25024,6 +25054,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction); case IrInstructionIdTestCancelRequested: return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction); + case IrInstructionIdSpill: + return ir_analyze_instruction_spill(ira, (IrInstructionSpill *)instruction); } zig_unreachable(); } @@ -25259,6 +25291,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdAllocaSrc: case IrInstructionIdAllocaGen: case IrInstructionIdTestCancelRequested: + case IrInstructionIdSpill: return false; case IrInstructionIdAsm: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 8c90eb02f3..39e781e4f0 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1554,6 +1554,12 @@ static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancel fprintf(irp->f, "@testCancelRequested()"); } +static void ir_print_spill(IrPrint *irp, IrInstructionSpill *instruction) { + fprintf(irp->f, "@spill("); + ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -2039,6 +2045,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdTestCancelRequested: ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction); break; + case IrInstructionIdSpill: + ir_print_spill(irp, (IrInstructionSpill *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index c2b95e8559..c92cca9573 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -613,3 +613,32 @@ test "cancel inside an errdefer" { }; S.doTheTest(); } + +test "combining try with errdefer cancel" { + const S = struct { + var frame: anyframe = undefined; + var ok = false; + + fn doTheTest() void { + _ = async amain(); + resume frame; + expect(ok); + } + + fn amain() !void { + var f = async func("https://example.com/"); + errdefer cancel f; + + _ = try await f; + } + + fn func(url: []const u8) ![]u8 { + errdefer ok = true; + frame = @frame(); + suspend; + return error.Bad; + } + + }; + S.doTheTest(); +} From 22428a75462e01877181501801dce4c090a87e9c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 10 Aug 2019 15:20:08 -0400 Subject: [PATCH 084/125] fix try in an async function with error union and non-zero-bit payload --- src/all_types.hpp | 13 +++- src/analyze.cpp | 12 ++++ src/codegen.cpp | 75 +++++++++++++--------- src/ir.cpp | 97 +++++++++++++++++++++++------ src/ir_print.cpp | 17 +++-- test/stage1/behavior/coroutines.zig | 30 +++++++++ 6 files changed, 187 insertions(+), 57 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 45182f3db3..8b4d1e6d70 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -74,6 +74,7 @@ struct IrExecutable { bool invalid; bool is_inline; bool is_generic_instantiation; + bool need_err_code_spill; }; enum OutType { @@ -1384,6 +1385,7 @@ struct ZigFn { size_t prealloc_backward_branch_quota; AstNode **param_source_nodes; Buf **param_names; + IrInstruction *err_code_spill; AstNode *fn_no_inline_set_node; AstNode *fn_static_eval_set_node; @@ -2366,7 +2368,8 @@ enum IrInstructionId { IrInstructionIdAwaitGen, IrInstructionIdCoroResume, IrInstructionIdTestCancelRequested, - IrInstructionIdSpill, + IrInstructionIdSpillBegin, + IrInstructionIdSpillEnd, }; struct IrInstruction { @@ -3649,13 +3652,19 @@ enum SpillId { SpillIdRetErrCode, }; -struct IrInstructionSpill { +struct IrInstructionSpillBegin { IrInstruction base; SpillId spill_id; IrInstruction *operand; }; +struct IrInstructionSpillEnd { + IrInstruction base; + + IrInstructionSpillBegin *begin; +}; + enum ResultLocId { ResultLocIdInvalid, ResultLocIdNone, diff --git a/src/analyze.cpp b/src/analyze.cpp index a09ba582c9..7482ba92ba 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5190,6 +5190,18 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { } ZigType *fn_type = get_async_fn_type(g, fn->type_entry); + if (fn->analyzed_executable.need_err_code_spill) { + IrInstructionAllocaGen *alloca_gen = allocate(1); + alloca_gen->base.id = IrInstructionIdAllocaGen; + alloca_gen->base.source_node = fn->proto_node; + alloca_gen->base.scope = fn->child_scope; + alloca_gen->base.value.type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false); + alloca_gen->base.ref_count = 1; + alloca_gen->name_hint = ""; + fn->alloca_gen_list.append(alloca_gen); + fn->err_code_spill = &alloca_gen->base; + } + for (size_t i = 0; i < fn->call_list.length; i += 1) { IrInstructionCallGen *call = fn->call_list.at(i); ZigFn *callee = call->fn_entry; diff --git a/src/codegen.cpp b/src/codegen.cpp index 976ee4181e..2f07fcd710 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2274,16 +2274,16 @@ static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMV static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, IrInstructionReturnBegin *instruction) { - bool ret_type_has_bits = instruction->operand != nullptr && - type_has_bits(instruction->operand->value.type); - + ZigType *operand_type = (instruction->operand != nullptr) ? instruction->operand->value.type : nullptr; + bool operand_has_bits = (operand_type != nullptr) && type_has_bits(operand_type); if (!fn_is_async(g->cur_fn)) { - return ret_type_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr; + return operand_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr; } + ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + bool ret_type_has_bits = type_has_bits(ret_type); LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - ZigType *ret_type = ret_type_has_bits ? instruction->operand->value.type : nullptr; if (ret_type_has_bits && !handle_is_ptr(ret_type)) { // It's a scalar, so it didn't get written to the result ptr. Do that before the atomic rmw. LLVMBuildStore(g->builder, ir_llvm_value(g, instruction->operand), g->cur_ret_ptr); @@ -2333,11 +2333,11 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, g->cur_is_after_return = true; LLVMBuildStore(g->builder, g->cur_async_prev_val, g->cur_async_prev_val_field_ptr); - if (!ret_type_has_bits) { + if (!operand_has_bits) { return nullptr; } - return get_handle_value(g, g->cur_ret_ptr, ret_type, get_pointer_to_type(g, ret_type, true)); + return get_handle_value(g, g->cur_ret_ptr, operand_type, get_pointer_to_type(g, operand_type, true)); } static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) { @@ -5113,18 +5113,6 @@ static LLVMValueRef ir_render_test_err(CodeGen *g, IrExecutable *executable, IrI return LLVMBuildICmp(g->builder, LLVMIntNE, err_val, zero, ""); } -static LLVMValueRef gen_unwrap_err_code(CodeGen *g, LLVMValueRef err_union_ptr, ZigType *ptr_type) { - ZigType *err_union_type = ptr_type->data.pointer.child_type; - ZigType *payload_type = err_union_type->data.error_union.payload_type; - if (!type_has_bits(payload_type)) { - return err_union_ptr; - } else { - // TODO assign undef to the payload - LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type); - return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); - } -} - static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executable, IrInstructionUnwrapErrCode *instruction) { @@ -5133,8 +5121,16 @@ static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executab ZigType *ptr_type = instruction->err_union_ptr->value.type; assert(ptr_type->id == ZigTypeIdPointer); + ZigType *err_union_type = ptr_type->data.pointer.child_type; + ZigType *payload_type = err_union_type->data.error_union.payload_type; LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->err_union_ptr); - return gen_unwrap_err_code(g, err_union_ptr, ptr_type); + if (!type_has_bits(payload_type)) { + return err_union_ptr; + } else { + // TODO assign undef to the payload + LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type); + return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); + } } static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *executable, @@ -5615,21 +5611,36 @@ static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *ex } } -static LLVMValueRef ir_render_spill(CodeGen *g, IrExecutable *executable, IrInstructionSpill *instruction) { +static LLVMValueRef ir_render_spill_begin(CodeGen *g, IrExecutable *executable, + IrInstructionSpillBegin *instruction) +{ if (!fn_is_async(g->cur_fn)) - return ir_llvm_value(g, instruction->operand); + return nullptr; switch (instruction->spill_id) { case SpillIdInvalid: zig_unreachable(); case SpillIdRetErrCode: { - LLVMValueRef ret_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr, ""); - ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; - if (ret_type->id == ZigTypeIdErrorUnion) { - return gen_unwrap_err_code(g, ret_ptr, get_pointer_to_type(g, ret_type, true)); - } else { - zig_unreachable(); - } + LLVMValueRef operand = ir_llvm_value(g, instruction->operand); + LLVMValueRef ptr = ir_llvm_value(g, g->cur_fn->err_code_spill); + LLVMBuildStore(g->builder, operand, ptr); + return nullptr; + } + + } + zig_unreachable(); +} + +static LLVMValueRef ir_render_spill_end(CodeGen *g, IrExecutable *executable, IrInstructionSpillEnd *instruction) { + if (!fn_is_async(g->cur_fn)) + return ir_llvm_value(g, instruction->begin->operand); + + switch (instruction->begin->spill_id) { + case SpillIdInvalid: + zig_unreachable(); + case SpillIdRetErrCode: { + LLVMValueRef ptr = ir_llvm_value(g, g->cur_fn->err_code_spill); + return LLVMBuildLoad(g->builder, ptr, ""); } } @@ -5891,8 +5902,10 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction); case IrInstructionIdTestCancelRequested: return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction); - case IrInstructionIdSpill: - return ir_render_spill(g, executable, (IrInstructionSpill *)instruction); + case IrInstructionIdSpillBegin: + return ir_render_spill_begin(g, executable, (IrInstructionSpillBegin *)instruction); + case IrInstructionIdSpillEnd: + return ir_render_spill_end(g, executable, (IrInstructionSpillEnd *)instruction); } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index 845ee03757..97971efd50 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1066,8 +1066,12 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelReques return IrInstructionIdTestCancelRequested; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionSpill *) { - return IrInstructionIdSpill; +static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillBegin *) { + return IrInstructionIdSpillBegin; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillEnd *) { + return IrInstructionIdSpillEnd; } template @@ -3336,15 +3340,28 @@ static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scop return &instruction->base; } -static IrInstruction *ir_build_spill(IrBuilder *irb, Scope *scope, AstNode *source_node, +static IrInstructionSpillBegin *ir_build_spill_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *operand, SpillId spill_id) { - IrInstructionSpill *instruction = ir_build_instruction(irb, scope, source_node); + IrInstructionSpillBegin *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.value.special = ConstValSpecialStatic; + instruction->base.value.type = irb->codegen->builtin_types.entry_void; instruction->operand = operand; instruction->spill_id = spill_id; ir_ref_instruction(operand, irb->current_basic_block); + return instruction; +} + +static IrInstruction *ir_build_spill_end(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstructionSpillBegin *begin) +{ + IrInstructionSpillEnd *instruction = ir_build_instruction(irb, scope, source_node); + instruction->begin = begin; + + ir_ref_instruction(&begin->base, irb->current_basic_block); + return &instruction->base; } @@ -3602,14 +3619,15 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr); IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr); ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val)); - err_val = ir_build_return_begin(irb, scope, node, err_val); + IrInstructionSpillBegin *spill_begin = ir_build_spill_begin(irb, scope, node, err_val, + SpillIdRetErrCode); + ir_build_return_begin(irb, scope, node, err_val); + err_val = ir_build_spill_end(irb, scope, node, spill_begin); + ResultLocReturn *result_loc_ret = allocate(1); + result_loc_ret->base.id = ResultLocIdReturn; + ir_build_reset_result(irb, scope, node, &result_loc_ret->base); + ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base); if (!ir_gen_defers_for_block(irb, scope, outer_scope, true)) { - ResultLocReturn *result_loc_ret = allocate(1); - result_loc_ret->base.id = ResultLocIdReturn; - ir_build_reset_result(irb, scope, node, &result_loc_ret->base); - err_val = ir_build_spill(irb, scope, node, err_val, SpillIdRetErrCode); - ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base); - if (irb->codegen->have_err_ret_tracing && !should_inline) { ir_build_save_err_ret_addr(irb, scope, node); } @@ -12778,8 +12796,21 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio return ir_finish_anal(ira, result); } + // This cast might have been already done from IrInstructionReturnBegin but it also + // might not have, in the case of `try`. + IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type); + if (type_is_invalid(casted_operand->value.type)) { + AstNode *source_node = ira->explicit_return_type_source_node; + if (source_node != nullptr) { + ErrorMsg *msg = ira->codegen->errors.last(); + add_error_note(ira->codegen, msg, source_node, + buf_sprintf("return type declared here")); + } + return ir_unreach_error(ira); + } + IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, operand); + instruction->base.source_node, casted_operand); result->value.type = ira->codegen->builtin_types.entry_unreachable; return ir_finish_anal(ira, result); } @@ -24742,15 +24773,38 @@ static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ir return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node); } -static IrInstruction *ir_analyze_instruction_spill(IrAnalyze *ira, IrInstructionSpill *instruction) { +static IrInstruction *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstructionSpillBegin *instruction) { + if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) + return ir_const_void(ira, &instruction->base); + IrInstruction *operand = instruction->operand->child; if (type_is_invalid(operand->value.type)) return ira->codegen->invalid_instruction; - if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) { + + if (!type_has_bits(operand->value.type)) + return ir_const_void(ira, &instruction->base); + + ir_assert(instruction->spill_id == SpillIdRetErrCode, &instruction->base); + ira->new_irb.exec->need_err_code_spill = true; + + IrInstructionSpillBegin *result = ir_build_spill_begin(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, operand, instruction->spill_id); + return &result->base; +} + +static IrInstruction *ir_analyze_instruction_spill_end(IrAnalyze *ira, IrInstructionSpillEnd *instruction) { + IrInstruction *operand = instruction->begin->operand->child; + if (type_is_invalid(operand->value.type)) + return ira->codegen->invalid_instruction; + + if (ir_should_inline(ira->new_irb.exec, instruction->base.scope) || !type_has_bits(operand->value.type)) return operand; - } - IrInstruction *result = ir_build_spill(&ira->new_irb, instruction->base.scope, instruction->base.source_node, - operand, instruction->spill_id); + + ir_assert(instruction->begin->base.child->id == IrInstructionIdSpillBegin, &instruction->base); + IrInstructionSpillBegin *begin = reinterpret_cast(instruction->begin->base.child); + + IrInstruction *result = ir_build_spill_end(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, begin); result->value.type = operand->value.type; return result; } @@ -25054,8 +25108,10 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction); case IrInstructionIdTestCancelRequested: return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction); - case IrInstructionIdSpill: - return ir_analyze_instruction_spill(ira, (IrInstructionSpill *)instruction); + case IrInstructionIdSpillBegin: + return ir_analyze_instruction_spill_begin(ira, (IrInstructionSpillBegin *)instruction); + case IrInstructionIdSpillEnd: + return ir_analyze_instruction_spill_end(ira, (IrInstructionSpillEnd *)instruction); } zig_unreachable(); } @@ -25193,6 +25249,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCoroResume: case IrInstructionIdAwaitSrc: case IrInstructionIdAwaitGen: + case IrInstructionIdSpillBegin: return true; case IrInstructionIdPhi: @@ -25291,7 +25348,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdAllocaSrc: case IrInstructionIdAllocaGen: case IrInstructionIdTestCancelRequested: - case IrInstructionIdSpill: + case IrInstructionIdSpillEnd: return false; case IrInstructionIdAsm: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 39e781e4f0..9d4570d79a 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1554,12 +1554,18 @@ static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancel fprintf(irp->f, "@testCancelRequested()"); } -static void ir_print_spill(IrPrint *irp, IrInstructionSpill *instruction) { - fprintf(irp->f, "@spill("); +static void ir_print_spill_begin(IrPrint *irp, IrInstructionSpillBegin *instruction) { + fprintf(irp->f, "@spillBegin("); ir_print_other_instruction(irp, instruction->operand); fprintf(irp->f, ")"); } +static void ir_print_spill_end(IrPrint *irp, IrInstructionSpillEnd *instruction) { + fprintf(irp->f, "@spillEnd("); + ir_print_other_instruction(irp, &instruction->begin->base); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -2045,8 +2051,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdTestCancelRequested: ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction); break; - case IrInstructionIdSpill: - ir_print_spill(irp, (IrInstructionSpill *)instruction); + case IrInstructionIdSpillBegin: + ir_print_spill_begin(irp, (IrInstructionSpillBegin *)instruction); + break; + case IrInstructionIdSpillEnd: + ir_print_spill_end(irp, (IrInstructionSpillEnd *)instruction); break; } fprintf(irp->f, "\n"); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index c92cca9573..a1828a662c 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -642,3 +642,33 @@ test "combining try with errdefer cancel" { }; S.doTheTest(); } + +test "try in an async function with error union and non-zero-bit payload" { + const S = struct { + var frame: anyframe = undefined; + var ok = false; + + fn doTheTest() void { + _ = async amain(); + resume frame; + expect(ok); + } + + fn amain() void { + std.testing.expectError(error.Bad, theProblem()); + ok = true; + } + + fn theProblem() ![]u8 { + frame = @frame(); + suspend; + const result = try other(); + return result; + } + + fn other() ![]u8 { + return error.Bad; + } + }; + S.doTheTest(); +} From 77d098e92d1f7d69e4067db811333982ad98190a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 10 Aug 2019 17:23:45 -0400 Subject: [PATCH 085/125] fix returning a const error from async function --- src/codegen.cpp | 18 +++++++++++------- test/stage1/behavior/coroutines.zig | 28 ++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 2f07fcd710..0db9b37c52 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2280,13 +2280,16 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, return operand_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr; } - ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; - bool ret_type_has_bits = type_has_bits(ret_type); LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - - if (ret_type_has_bits && !handle_is_ptr(ret_type)) { - // It's a scalar, so it didn't get written to the result ptr. Do that before the atomic rmw. - LLVMBuildStore(g->builder, ir_llvm_value(g, instruction->operand), g->cur_ret_ptr); + if (operand_has_bits && instruction->operand != nullptr) { + ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + bool need_store = instruction->operand->value.special != ConstValSpecialRuntime || !handle_is_ptr(ret_type); + if (need_store) { + // It didn't get written to the result ptr. We do that now so that we do not have to spill + // the return operand. + ZigType *ret_ptr_type = get_pointer_to_type(g, ret_type, true); + gen_assign_raw(g, g->cur_ret_ptr, ret_ptr_type, ir_llvm_value(g, instruction->operand)); + } } // Prepare to be suspended. We might end up not having to suspend though. @@ -2387,6 +2390,8 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns LLVMSetTailCall(call_inst, true); LLVMBuildRetVoid(g->builder); + g->cur_is_after_return = false; + return nullptr; } if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) { @@ -7117,7 +7122,6 @@ static void do_code_gen(CodeGen *g) { } if (is_async) { - g->cur_is_after_return = false; g->cur_resume_block_count = 0; LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index a1828a662c..103cbe0d1d 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -672,3 +672,31 @@ test "try in an async function with error union and non-zero-bit payload" { }; S.doTheTest(); } + +test "returning a const error from async function" { + const S = struct { + var frame: anyframe = undefined; + var ok = false; + + fn doTheTest() void { + _ = async amain(); + resume frame; + expect(ok); + } + + fn amain() !void { + var download_frame = async fetchUrl(10, "a string"); + const download_text = try await download_frame; + + @panic("should not get here"); + } + + fn fetchUrl(unused: i32, url: []const u8) ![]u8 { + frame = @frame(); + suspend; + ok = true; + return error.OutOfMemory; + } + }; + S.doTheTest(); +} From 3f5c6d7a891a22ce6c38c93498339319a36dd2fe Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 10 Aug 2019 18:03:36 -0400 Subject: [PATCH 086/125] add test case for typical async/await usage --- test/stage1/behavior/coroutines.zig | 66 +++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 103cbe0d1d..13b04d7787 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -700,3 +700,69 @@ test "returning a const error from async function" { }; S.doTheTest(); } + +test "async/await typical usage" { + inline for ([_]bool{false, true}) |b1| { + inline for ([_]bool{false, true}) |b2| { + testAsyncAwaitTypicalUsage(b1, b2).doTheTest(); + } + } +} + +fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime simulate_fail_file: bool) type { + return struct { + fn doTheTest() void { + _ = async amainWrap(); + resume global_file_frame; + resume global_download_frame; + } + fn amainWrap() void { + if (amain()) |_| { + expect(!simulate_fail_download); + expect(!simulate_fail_file); + } else |e| switch (e) { + error.NoResponse => expect(simulate_fail_download), + error.FileNotFound => expect(simulate_fail_file), + else => @panic("test failure"), + } + } + + fn amain() !void { + const allocator = std.heap.direct_allocator; // TODO once we have the debug allocator, use that, so that this can detect leaks + var download_frame = async fetchUrl(allocator, "https://example.com/"); + errdefer cancel download_frame; + + var file_frame = async readFile(allocator, "something.txt"); + errdefer cancel file_frame; + + const download_text = try await download_frame; + defer allocator.free(download_text); + + const file_text = try await file_frame; + defer allocator.free(file_text); + + expect(std.mem.eql(u8, "expected download text", download_text)); + expect(std.mem.eql(u8, "expected file text", file_text)); + } + + var global_download_frame: anyframe = undefined; + fn fetchUrl(allocator: *std.mem.Allocator, url: []const u8) anyerror![]u8 { + global_download_frame = @frame(); + const result = try std.mem.dupe(allocator, u8, "expected download text"); + errdefer allocator.free(result); + suspend; + if (simulate_fail_download) return error.NoResponse; + return result; + } + + var global_file_frame: anyframe = undefined; + fn readFile(allocator: *std.mem.Allocator, filename: []const u8) anyerror![]u8 { + global_file_frame = @frame(); + const result = try std.mem.dupe(allocator, u8, "expected file text"); + errdefer allocator.free(result); + suspend; + if (simulate_fail_file) return error.FileNotFound; + return result; + } + }; +} From 4bd4c5e06d13c35a70d0207b730fb67b83ff6363 Mon Sep 17 00:00:00 2001 From: data-man Date: Fri, 9 Aug 2019 12:43:29 +0500 Subject: [PATCH 087/125] Fixed tiny typo in the math module (shr's description) --- std/math.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/std/math.zig b/std/math.zig index 73f981528c..e10c9329d9 100644 --- a/std/math.zig +++ b/std/math.zig @@ -366,7 +366,7 @@ test "math.shl" { } /// Shifts right. Overflowed bits are truncated. -/// A negative shift amount results in a lefft shift. +/// A negative shift amount results in a left shift. pub fn shr(comptime T: type, a: T, shift_amt: var) T { const abs_shift_amt = absCast(shift_amt); const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt); From 1b83ee78a48a64bef28f12b7b2e263074f88b6b6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 12:00:32 -0400 Subject: [PATCH 088/125] allow comptime_int to implicit cast to comptime_float --- src/ir.cpp | 3 +++ std/math.zig | 7 +++++++ test/compile_errors.zig | 8 -------- test/stage1/behavior/cast.zig | 7 ++++++- 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 2b096a3383..13348d28c4 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -9713,6 +9713,9 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc bool const_val_is_float = (const_val->type->id == ZigTypeIdFloat || const_val->type->id == ZigTypeIdComptimeFloat); assert(const_val_is_int || const_val_is_float); + if (const_val_is_int && other_type->id == ZigTypeIdComptimeFloat) { + return true; + } if (other_type->id == ZigTypeIdFloat) { if (const_val->type->id == ZigTypeIdComptimeInt || const_val->type->id == ZigTypeIdComptimeFloat) { return true; diff --git a/std/math.zig b/std/math.zig index e10c9329d9..e47021512e 100644 --- a/std/math.zig +++ b/std/math.zig @@ -305,6 +305,13 @@ test "math.min" { testing.expect(@typeOf(result) == i16); testing.expect(result == -200); } + { + const a = 10.34; + var b: f32 = 999.12; + var result = min(a, b); + testing.expect(@typeOf(result) == f32); + testing.expect(result == 10.34); + } } pub fn max(x: var, y: var) @typeOf(x + y) { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index a4bc2a66f0..437e40900d 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -3225,14 +3225,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { "tmp.zig:3:17: note: value 8 cannot fit into type u3", ); - cases.add( - "incompatible number literals", - \\const x = 2 == 2.0; - \\export fn entry() usize { return @sizeOf(@typeOf(x)); } - , - "tmp.zig:1:11: error: integer value 2 cannot be implicitly casted to type 'comptime_float'", - ); - cases.add( "missing function call param", \\const Foo = struct { diff --git a/test/stage1/behavior/cast.zig b/test/stage1/behavior/cast.zig index c243f18088..04c7fa606f 100644 --- a/test/stage1/behavior/cast.zig +++ b/test/stage1/behavior/cast.zig @@ -508,7 +508,7 @@ test "peer type resolution: unreachable, null, slice" { } test "peer type resolution: unreachable, error set, unreachable" { - const Error = error { + const Error = error{ FileDescriptorAlreadyPresentInSet, OperationCausesCircularLoop, FileDescriptorNotRegistered, @@ -529,3 +529,8 @@ test "peer type resolution: unreachable, error set, unreachable" { }; expect(transformed_err == error.SystemResources); } + +test "implicit cast comptime_int to comptime_float" { + comptime expect(comptime_float(10) == f32(10)); + expect(2 == 2.0); +} From b87686dfa094770e96da33fb23a7d011a168157c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 13:43:44 -0400 Subject: [PATCH 089/125] fix enum with one member and custom tag type --- src/ir.cpp | 1 - test/stage1/behavior/enum.zig | 11 +++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/ir.cpp b/src/ir.cpp index 13348d28c4..fde2b972f8 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -11938,7 +11938,6 @@ static IrInstruction *ir_analyze_enum_to_int(IrAnalyze *ira, IrInstruction *sour if (enum_type->data.enumeration.layout == ContainerLayoutAuto && enum_type->data.enumeration.src_field_count == 1) { - assert(tag_type == ira->codegen->builtin_types.entry_num_lit_int); IrInstruction *result = ir_const(ira, source_instr, tag_type); init_const_bigint(&result->value, tag_type, &enum_type->data.enumeration.fields[0].value); diff --git a/test/stage1/behavior/enum.zig b/test/stage1/behavior/enum.zig index 51f4f0e196..d7d34aec88 100644 --- a/test/stage1/behavior/enum.zig +++ b/test/stage1/behavior/enum.zig @@ -982,3 +982,14 @@ test "enum literal casting to tagged union" { else => @panic("fail"), } } + +test "enum with one member and custom tag type" { + const E = enum(u2) { + One, + }; + expect(@enumToInt(E.One) == 0); + const E2 = enum(u2) { + One = 2, + }; + expect(@enumToInt(E2.One) == 2); +} From af8c6ccb4bcae7baf30f3b1032a98b82f39d9c26 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 14:26:34 -0400 Subject: [PATCH 090/125] fix canceling async functions which have error return tracing --- src/codegen.cpp | 17 ++++++++++++ src/ir.cpp | 74 +++++++++++++++++++++++++------------------------ 2 files changed, 55 insertions(+), 36 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 0db9b37c52..f1a42e321d 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2071,6 +2071,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { LLVMPositionBuilderAtEnd(g->builder, entry_block); ZigLLVMClearCurrentDebugLocation(g->builder); + // if (dest_stack_trace == null) return; // var frame_index: usize = undefined; // var frames_left: usize = undefined; // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) { @@ -2088,6 +2089,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len; // } LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return"); + LLVMBasicBlockRef dest_non_null_block = LLVMAppendBasicBlock(fn_val, "DestNonNull"); LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index"); LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left"); @@ -2095,6 +2097,11 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { LLVMValueRef dest_stack_trace_ptr = LLVMGetParam(fn_val, 0); LLVMValueRef src_stack_trace_ptr = LLVMGetParam(fn_val, 1); + LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, dest_stack_trace_ptr, + LLVMConstNull(LLVMTypeOf(dest_stack_trace_ptr)), ""); + LLVMBuildCondBr(g->builder, null_dest_bit, return_block, dest_non_null_block); + + LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block); size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index; size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index; LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr, @@ -5480,10 +5487,20 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); + src_assert(instruction->frame->value.type->id == ZigTypeIdAnyFrame, instruction->base.source_node); + ZigType *result_type = instruction->frame->value.type->data.any_frame.result_type; LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame); LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "CancelResume"); + // supply null for the error return trace pointer + if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { + LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, + frame_index_trace_arg(g, result_type), ""); + LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(err_ret_trace_ptr_ptr))), + err_ret_trace_ptr_ptr); + } + LLVMValueRef awaiter_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, ""); LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); diff --git a/src/ir.cpp b/src/ir.cpp index 97971efd50..f1d4b80a2c 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -24656,38 +24656,9 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira, return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin); } -static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { - IrInstruction *frame_ptr = instruction->frame->child; - if (type_is_invalid(frame_ptr->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *frame; - if (frame_ptr->value.type->id == ZigTypeIdPointer && - frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle && - frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame) - { - frame = frame_ptr; - } else { - frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr); - } - - ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr); - IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type); - if (type_is_invalid(casted_frame->value.type)) - return ira->codegen->invalid_instruction; - - ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); - ir_assert(fn_entry != nullptr, &instruction->base); - - if (fn_entry->inferred_async_node == nullptr) { - fn_entry->inferred_async_node = instruction->base.source_node; - } - - return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame); -} - -static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) { - IrInstruction *frame_ptr = instruction->frame->child; +static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *frame_ptr) +{ if (type_is_invalid(frame_ptr->value.type)) return ira->codegen->invalid_instruction; @@ -24700,15 +24671,21 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type; frame = frame_ptr; } else { - frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr); - if (frame->value.type->id != ZigTypeIdAnyFrame || + frame = ir_get_deref(ira, source_instr, frame_ptr, nullptr); + if (frame->value.type->id == ZigTypeIdPointer && + frame->value.type->data.pointer.ptr_len == PtrLenSingle && + frame->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame) + { + result_type = frame->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type; + } else if (frame->value.type->id != ZigTypeIdAnyFrame || frame->value.type->data.any_frame.result_type == nullptr) { - ir_add_error(ira, &instruction->base, + ir_add_error(ira, source_instr, buf_sprintf("expected anyframe->T, found '%s'", buf_ptr(&frame->value.type->name))); return ira->codegen->invalid_instruction; + } else { + result_type = frame->value.type->data.any_frame.result_type; } - result_type = frame->value.type->data.any_frame.result_type; } ZigType *any_frame_type = get_any_frame_type(ira->codegen, result_type); @@ -24716,6 +24693,31 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction if (type_is_invalid(casted_frame->value.type)) return ira->codegen->invalid_instruction; + return casted_frame; +} + +static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { + IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child); + if (type_is_invalid(frame->value.type)) + return ira->codegen->invalid_instruction; + + ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); + ir_assert(fn_entry != nullptr, &instruction->base); + + if (fn_entry->inferred_async_node == nullptr) { + fn_entry->inferred_async_node = instruction->base.source_node; + } + + return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame); +} + +static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) { + IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child); + if (type_is_invalid(frame->value.type)) + return ira->codegen->invalid_instruction; + + ZigType *result_type = frame->value.type->data.any_frame.result_type; + ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); ir_assert(fn_entry != nullptr, &instruction->base); From 3aed7de0c47af40e6ca00c0e969ad30b44af5a7c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 16:09:23 -0400 Subject: [PATCH 091/125] README: link to community projects --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 1be33c34f7..9cf0893da8 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ A general-purpose programming language designed for **robustness**, * [Community](https://github.com/ziglang/zig/wiki/Community) * [Contributing](https://github.com/ziglang/zig/blob/master/CONTRIBUTING.md) * [Frequently Asked Questions](https://github.com/ziglang/zig/wiki/FAQ) + * [Community Projects](https://github.com/ziglang/zig/wiki/Community-Projects) ## Building from Source From 4d8d513e16d308131846d98267bc844bf702e9ce Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 19:53:10 -0400 Subject: [PATCH 092/125] all tests passing --- BRANCH_TODO | 6 ++- doc/docgen.zig | 2 +- doc/langref.html.in | 81 ++++++++++--------------------- src/analyze.cpp | 2 +- src/codegen.cpp | 49 ++++++++++--------- src/ir.cpp | 3 ++ std/event/channel.zig | 11 +++-- std/event/fs.zig | 102 ++++++++++------------------------------ std/event/future.zig | 45 ++++++++---------- std/event/group.zig | 68 ++++++++------------------- std/event/io.zig | 19 ++++---- std/event/lock.zig | 54 +++++++++------------ std/event/loop.zig | 4 +- std/event/net.zig | 53 +++++++++------------ std/event/rwlock.zig | 85 ++++++++++++++++----------------- std/zig/parser_test.zig | 2 +- test/compile_errors.zig | 35 ++++---------- 17 files changed, 240 insertions(+), 381 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index bd797a75a8..b2b293aec1 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,10 +1,13 @@ + * for loops need to spill the index. other payload captures probably also need to spill + * compile error (instead of crashing) for trying to get @Frame of generic function + * compile error (instead of crashing) for trying to async call and passing @Frame of wrong function + * `const result = (await a) + (await b);` this causes "Instruction does not dominate all uses" - need spill * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function * async call on a non async function * a test where an async function destroys its own frame in a defer * implicit cast of normal function to async function should be allowed when it is inferred to be async - * revive std.event.Loop * @typeInfo for @Frame(func) * peer type resolution of *@Frame(func) and anyframe * peer type resolution of *@Frame(func) and anyframe->T when the return type matches @@ -36,3 +39,4 @@ - it can be assumed that these are always available: the awaiter ptr, return ptr if applicable, error return trace ptr if applicable. - it can be assumed that it is never cancelled + * fix the debug info for variables of async functions diff --git a/doc/docgen.zig b/doc/docgen.zig index 3d3dcba76d..92764d7642 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -770,7 +770,7 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok .Keyword_or, .Keyword_orelse, .Keyword_packed, - .Keyword_promise, + .Keyword_anyframe, .Keyword_pub, .Keyword_resume, .Keyword_return, diff --git a/doc/langref.html.in b/doc/langref.html.in index ac381e00b2..0cb76a4bdf 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -6024,13 +6024,14 @@ const assert = std.debug.assert; var x: i32 = 1; -test "create a coroutine and cancel it" { - const p = try async simpleAsyncFn(); - comptime assert(@typeOf(p) == promise->void); - cancel p; +test "call an async function" { + var frame = async simpleAsyncFn(); + comptime assert(@typeOf(frame) == @Frame(simpleAsyncFn)); assert(x == 2); } -async<*std.mem.Allocator> fn simpleAsyncFn() void { +fn simpleAsyncFn() void { + x += 1; + suspend; x += 1; } {#code_end#} @@ -6041,60 +6042,33 @@ async<*std.mem.Allocator> fn simpleAsyncFn() void { return to the caller or resumer. The following code demonstrates where control flow goes:

- {#code_begin|test#} -const std = @import("std"); -const assert = std.debug.assert; - -test "coroutine suspend, resume, cancel" { - seq('a'); - const p = try async testAsyncSeq(); - seq('c'); - resume p; - seq('f'); - cancel p; - seq('g'); - - assert(std.mem.eql(u8, points, "abcdefg")); -} -async fn testAsyncSeq() void { - defer seq('e'); - - seq('b'); - suspend; - seq('d'); -} -var points = [_]u8{0} ** "abcdefg".len; -var index: usize = 0; - -fn seq(c: u8) void { - points[index] = c; - index += 1; -} - {#code_end#} +

+ TODO another test example here +

When an async function suspends itself, it must be sure that it will be resumed or canceled somehow, for example by registering its promise handle in an event loop. Use a suspend capture block to gain access to the - promise: + promise (TODO this is outdated):

{#code_begin|test#} const std = @import("std"); const assert = std.debug.assert; +var the_frame: anyframe = undefined; +var result = false; + test "coroutine suspend with block" { - const p = try async testSuspendBlock(); + _ = async testSuspendBlock(); std.debug.assert(!result); - resume a_promise; + resume the_frame; std.debug.assert(result); - cancel p; } -var a_promise: promise = undefined; -var result = false; -async fn testSuspendBlock() void { +fn testSuspendBlock() void { suspend { - comptime assert(@typeOf(@handle()) == promise->void); - a_promise = @handle(); + comptime assert(@typeOf(@frame()) == *@Frame(testSuspendBlock)); + the_frame = @frame(); } result = true; } @@ -6124,16 +6098,13 @@ const std = @import("std"); const assert = std.debug.assert; test "resume from suspend" { - var buf: [500]u8 = undefined; - var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; var my_result: i32 = 1; - const p = try async
testResumeFromSuspend(&my_result); - cancel p; + _ = async testResumeFromSuspend(&my_result); std.debug.assert(my_result == 2); } async fn testResumeFromSuspend(my_result: *i32) void { suspend { - resume @handle(); + resume @frame(); } my_result.* += 1; suspend; @@ -6172,30 +6143,30 @@ async fn testResumeFromSuspend(my_result: *i32) void { const std = @import("std"); const assert = std.debug.assert; -var a_promise: promise = undefined; +var the_frame: anyframe = undefined; var final_result: i32 = 0; test "coroutine await" { seq('a'); - const p = async amain() catch unreachable; + _ = async amain(); seq('f'); - resume a_promise; + resume the_frame; seq('i'); assert(final_result == 1234); assert(std.mem.eql(u8, seq_points, "abcdefghi")); } async fn amain() void { seq('b'); - const p = async another() catch unreachable; + var f = async another(); seq('e'); - final_result = await p; + final_result = await f; seq('h'); } async fn another() i32 { seq('c'); suspend { seq('d'); - a_promise = @handle(); + the_frame = @frame(); } seq('g'); return 1234; diff --git a/src/analyze.cpp b/src/analyze.cpp index 7482ba92ba..30aa82a216 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5325,7 +5325,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (*instruction->name_hint == 0) { name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i)); } else { - name = instruction->name_hint; + name = buf_ptr(buf_sprintf("%s.%" ZIG_PRI_usize, instruction->name_hint, alloca_i)); } field_names.append(name); field_types.append(child_type); diff --git a/src/codegen.cpp b/src/codegen.cpp index f1a42e321d..4510e7156c 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -535,24 +535,24 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) { // use the ABI alignment, which is fine. } - unsigned init_gen_i = 0; - if (!type_has_bits(return_type)) { - // nothing to do - } else if (type_is_nonnull_ptr(return_type)) { - addLLVMAttr(llvm_fn, 0, "nonnull"); - } else if (!is_async && want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) { - // Sret pointers must not be address 0 - addLLVMArgAttr(llvm_fn, 0, "nonnull"); - addLLVMArgAttr(llvm_fn, 0, "sret"); - if (cc_want_sret_attr(cc)) { - addLLVMArgAttr(llvm_fn, 0, "noalias"); - } - init_gen_i = 1; - } - if (is_async) { addLLVMArgAttr(llvm_fn, 0, "nonnull"); } else { + unsigned init_gen_i = 0; + if (!type_has_bits(return_type)) { + // nothing to do + } else if (type_is_nonnull_ptr(return_type)) { + addLLVMAttr(llvm_fn, 0, "nonnull"); + } else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) { + // Sret pointers must not be address 0 + addLLVMArgAttr(llvm_fn, 0, "nonnull"); + addLLVMArgAttr(llvm_fn, 0, "sret"); + if (cc_want_sret_attr(cc)) { + addLLVMArgAttr(llvm_fn, 0, "noalias"); + } + init_gen_i = 1; + } + // set parameter attributes FnWalk fn_walk = {}; fn_walk.id = FnWalkIdAttrs; @@ -911,7 +911,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { case PanicMsgIdBadResume: return buf_create_from_str("resumed an async function which already returned"); case PanicMsgIdBadAwait: - return buf_create_from_str("async function awaited/canceled twice"); + return buf_create_from_str("async function awaited twice"); case PanicMsgIdBadReturn: return buf_create_from_str("async function returned twice"); case PanicMsgIdResumedAnAwaitingFn: @@ -2350,6 +2350,10 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, return get_handle_value(g, g->cur_ret_ptr, operand_type, get_pointer_to_type(g, operand_type, true)); } +static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) { + LLVMSetTailCall(call_inst, true); +} + static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) { if (fn_is_async(g->cur_fn)) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; @@ -2394,7 +2398,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val, get_llvm_type(g, any_frame_type), ""); LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr); - LLVMSetTailCall(call_inst, true); + set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); g->cur_is_after_return = false; @@ -4009,7 +4013,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume"); LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); - LLVMSetTailCall(call_inst, true); + set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, call_bb); @@ -5520,7 +5524,7 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns LLVMPositionBuilderAtEnd(g->builder, early_return_block); LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val); - LLVMSetTailCall(call_inst, true); + set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, resume_bb); @@ -5556,8 +5560,9 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst } // supply the error return trace pointer - LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); - if (my_err_ret_trace_val != nullptr) { + if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { + LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); + assert(my_err_ret_trace_val != nullptr); LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_index_trace_arg(g, result_type), ""); LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); @@ -5588,7 +5593,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // Tail resume it now, so that it can complete. LLVMPositionBuilderAtEnd(g->builder, early_return_block); LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val); - LLVMSetTailCall(call_inst, true); + set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); // Rely on the target to resume us from suspension. diff --git a/src/ir.cpp b/src/ir.cpp index f1d4b80a2c..57c50db818 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -15064,6 +15064,9 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) { return result_loc; } + result_loc = ir_implicit_cast(ira, result_loc, get_pointer_to_type(ira->codegen, frame_type, false)); + if (type_is_invalid(result_loc->value.type)) + return ira->codegen->invalid_instruction; return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count, casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type)->base; } diff --git a/std/event/channel.zig b/std/event/channel.zig index c9686e37e9..c4f7dca085 100644 --- a/std/event/channel.zig +++ b/std/event/channel.zig @@ -77,18 +77,19 @@ pub fn Channel(comptime T: type) type { /// must be called when all calls to put and get have suspended and no more calls occur pub fn destroy(self: *SelfChannel) void { while (self.getters.get()) |get_node| { - cancel get_node.data.tick_node.data; + resume get_node.data.tick_node.data; } while (self.putters.get()) |put_node| { - cancel put_node.data.tick_node.data; + resume put_node.data.tick_node.data; } self.loop.allocator.free(self.buffer_nodes); self.loop.allocator.destroy(self); } - /// puts a data item in the channel. The promise completes when the value has been added to the + /// puts a data item in the channel. The function returns when the value has been added to the /// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter. - pub async fn put(self: *SelfChannel, data: T) void { + /// Or when the channel is destroyed. + pub fn put(self: *SelfChannel, data: T) void { var my_tick_node = Loop.NextTickNode.init(@frame()); var queue_node = std.atomic.Queue(PutNode).Node.init(PutNode{ .tick_node = &my_tick_node, @@ -114,7 +115,7 @@ pub fn Channel(comptime T: type) type { } } - /// await this function to get an item from the channel. If the buffer is empty, the promise will + /// await this function to get an item from the channel. If the buffer is empty, the frame will /// complete when the next item is put in the channel. pub async fn get(self: *SelfChannel) T { // TODO integrate this function with named return values diff --git a/std/event/fs.zig b/std/event/fs.zig index 22e9fc38c9..fe2f604ac3 100644 --- a/std/event/fs.zig +++ b/std/event/fs.zig @@ -76,12 +76,8 @@ pub const Request = struct { pub const PWriteVError = error{OutOfMemory} || File.WriteError; -/// data - just the inner references - must live until pwritev promise completes. +/// data - just the inner references - must live until pwritev frame completes. pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: usize) PWriteVError!void { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } switch (builtin.os) { .macosx, .linux, @@ -109,7 +105,7 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us } } -/// data must outlive the returned promise +/// data must outlive the returned frame pub async fn pwritevWindows(loop: *Loop, fd: fd_t, data: []const []const u8, offset: usize) os.WindowsWriteError!void { if (data.len == 0) return; if (data.len == 1) return await (async pwriteWindows(loop, fd, data[0], offset) catch unreachable); @@ -123,15 +119,10 @@ pub async fn pwritevWindows(loop: *Loop, fd: fd_t, data: []const []const u8, off } pub async fn pwriteWindows(loop: *Loop, fd: fd_t, data: []const u8, offset: u64) os.WindowsWriteError!void { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - var resume_node = Loop.ResumeNode.Basic{ .base = Loop.ResumeNode{ .id = Loop.ResumeNode.Id.Basic, - .handle = @handle(), + .handle = @frame(), .overlapped = windows.OVERLAPPED{ .Internal = 0, .InternalHigh = 0, @@ -166,18 +157,13 @@ pub async fn pwriteWindows(loop: *Loop, fd: fd_t, data: []const u8, offset: u64) } } -/// iovecs must live until pwritev promise completes. +/// iovecs must live until pwritev frame completes. pub async fn pwritevPosix( loop: *Loop, fd: fd_t, iovecs: []const os.iovec_const, offset: usize, ) os.WriteError!void { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - var req_node = RequestNode{ .prev = null, .next = null, @@ -194,7 +180,7 @@ pub async fn pwritevPosix( .TickNode = Loop.NextTickNode{ .prev = null, .next = null, - .data = @handle(), + .data = @frame(), }, }, }, @@ -211,13 +197,8 @@ pub async fn pwritevPosix( pub const PReadVError = error{OutOfMemory} || File.ReadError; -/// data - just the inner references - must live until preadv promise completes. +/// data - just the inner references - must live until preadv frame completes. pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PReadVError!usize { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - assert(data.len != 0); switch (builtin.os) { .macosx, @@ -246,7 +227,7 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR } } -/// data must outlive the returned promise +/// data must outlive the returned frame pub async fn preadvWindows(loop: *Loop, fd: fd_t, data: []const []u8, offset: u64) !usize { assert(data.len != 0); if (data.len == 1) return await (async preadWindows(loop, fd, data[0], offset) catch unreachable); @@ -272,15 +253,10 @@ pub async fn preadvWindows(loop: *Loop, fd: fd_t, data: []const []u8, offset: u6 } pub async fn preadWindows(loop: *Loop, fd: fd_t, data: []u8, offset: u64) !usize { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - var resume_node = Loop.ResumeNode.Basic{ .base = Loop.ResumeNode{ .id = Loop.ResumeNode.Id.Basic, - .handle = @handle(), + .handle = @frame(), .overlapped = windows.OVERLAPPED{ .Internal = 0, .InternalHigh = 0, @@ -314,18 +290,13 @@ pub async fn preadWindows(loop: *Loop, fd: fd_t, data: []u8, offset: u64) !usize return usize(bytes_transferred); } -/// iovecs must live until preadv promise completes +/// iovecs must live until preadv frame completes pub async fn preadvPosix( loop: *Loop, fd: fd_t, iovecs: []const os.iovec, offset: usize, ) os.ReadError!usize { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - var req_node = RequestNode{ .prev = null, .next = null, @@ -342,7 +313,7 @@ pub async fn preadvPosix( .TickNode = Loop.NextTickNode{ .prev = null, .next = null, - .data = @handle(), + .data = @frame(), }, }, }, @@ -363,11 +334,6 @@ pub async fn openPosix( flags: u32, mode: File.Mode, ) File.OpenError!fd_t { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - const path_c = try std.os.toPosixPath(path); var req_node = RequestNode{ @@ -386,7 +352,7 @@ pub async fn openPosix( .TickNode = Loop.NextTickNode{ .prev = null, .next = null, - .data = @handle(), + .data = @frame(), }, }, }, @@ -643,11 +609,6 @@ async fn writeFileWindows(loop: *Loop, path: []const u8, contents: []const u8) ! } async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8, mode: File.Mode) !void { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - const path_with_null = try std.cstr.addNullByte(loop.allocator, path); defer loop.allocator.free(path_with_null); @@ -667,7 +628,7 @@ async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8 .TickNode = Loop.NextTickNode{ .prev = null, .next = null, - .data = @handle(), + .data = @frame(), }, }, }, @@ -682,7 +643,7 @@ async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8 return req_node.data.msg.WriteFile.result; } -/// The promise resumes when the last data has been confirmed written, but before the file handle +/// The frame resumes when the last data has been confirmed written, but before the file handle /// is closed. /// Caller owns returned memory. pub async fn readFile(loop: *Loop, file_path: []const u8, max_size: usize) ![]u8 { @@ -734,7 +695,7 @@ pub const WatchEventId = enum { // // const FileTable = std.AutoHashMap([]const u8, *Put); // const Put = struct { -// putter: promise, +// putter: anyframe, // value_ptr: *V, // }; // }, @@ -748,21 +709,21 @@ pub const WatchEventId = enum { // const WindowsOsData = struct { // table_lock: event.Lock, // dir_table: DirTable, -// all_putters: std.atomic.Queue(promise), +// all_putters: std.atomic.Queue(anyframe), // ref_count: std.atomic.Int(usize), // // const DirTable = std.AutoHashMap([]const u8, *Dir); // const FileTable = std.AutoHashMap([]const u16, V); // // const Dir = struct { -// putter: promise, +// putter: anyframe, // file_table: FileTable, // table_lock: event.Lock, // }; // }; // // const LinuxOsData = struct { -// putter: promise, +// putter: anyframe, // inotify_fd: i32, // wd_table: WdTable, // table_lock: event.Lock, @@ -776,7 +737,7 @@ pub const WatchEventId = enum { // }; // }; // -// const FileToHandle = std.AutoHashMap([]const u8, promise); +// const FileToHandle = std.AutoHashMap([]const u8, anyframe); // // const Self = @This(); // @@ -811,7 +772,7 @@ pub const WatchEventId = enum { // .table_lock = event.Lock.init(loop), // .dir_table = OsData.DirTable.init(loop.allocator), // .ref_count = std.atomic.Int(usize).init(1), -// .all_putters = std.atomic.Queue(promise).init(), +// .all_putters = std.atomic.Queue(anyframe).init(), // }, // }; // return self; @@ -926,14 +887,9 @@ pub const WatchEventId = enum { // } // // async fn kqPutEvents(self: *Self, close_op: *CloseOperation, value: V, out_put: **OsData.Put) void { -// // TODO https://github.com/ziglang/zig/issues/1194 -// suspend { -// resume @handle(); -// } -// // var value_copy = value; // var put = OsData.Put{ -// .putter = @handle(), +// .putter = @frame(), // .value_ptr = &value_copy, // }; // out_put.* = &put; @@ -1091,18 +1047,13 @@ pub const WatchEventId = enum { // } // // async fn windowsDirReader(self: *Self, dir_handle: windows.HANDLE, dir: *OsData.Dir) void { -// // TODO https://github.com/ziglang/zig/issues/1194 -// suspend { -// resume @handle(); -// } -// // self.ref(); // defer self.deref(); // // defer os.close(dir_handle); // -// var putter_node = std.atomic.Queue(promise).Node{ -// .data = @handle(), +// var putter_node = std.atomic.Queue(anyframe).Node{ +// .data = @frame(), // .prev = null, // .next = null, // }; @@ -1112,7 +1063,7 @@ pub const WatchEventId = enum { // var resume_node = Loop.ResumeNode.Basic{ // .base = Loop.ResumeNode{ // .id = Loop.ResumeNode.Id.Basic, -// .handle = @handle(), +// .handle = @frame(), // .overlapped = windows.OVERLAPPED{ // .Internal = 0, // .InternalHigh = 0, @@ -1207,17 +1158,12 @@ pub const WatchEventId = enum { // } // // async fn linuxEventPutter(inotify_fd: i32, channel: *event.Channel(Event.Error!Event), out_watch: **Self) void { -// // TODO https://github.com/ziglang/zig/issues/1194 -// suspend { -// resume @handle(); -// } -// // const loop = channel.loop; // // var watch = Self{ // .channel = channel, // .os_data = OsData{ -// .putter = @handle(), +// .putter = @frame(), // .inotify_fd = inotify_fd, // .wd_table = OsData.WdTable.init(loop.allocator), // .table_lock = event.Lock.init(loop), diff --git a/std/event/future.zig b/std/event/future.zig index 2e62ace978..11a4c82fb0 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -2,8 +2,6 @@ const std = @import("../std.zig"); const assert = std.debug.assert; const testing = std.testing; const builtin = @import("builtin"); -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; const Lock = std.event.Lock; const Loop = std.event.Loop; @@ -23,7 +21,7 @@ pub fn Future(comptime T: type) type { available: u8, const Self = @This(); - const Queue = std.atomic.Queue(promise); + const Queue = std.atomic.Queue(anyframe); pub fn init(loop: *Loop) Self { return Self{ @@ -37,10 +35,10 @@ pub fn Future(comptime T: type) type { /// available. /// Thread-safe. pub async fn get(self: *Self) *T { - if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) { + if (@atomicLoad(u8, &self.available, .SeqCst) == 2) { return &self.data; } - const held = await (async self.lock.acquire() catch unreachable); + const held = self.lock.acquire(); held.release(); return &self.data; @@ -49,7 +47,7 @@ pub fn Future(comptime T: type) type { /// Gets the data without waiting for it. If it's available, a pointer is /// returned. Otherwise, null is returned. pub fn getOrNull(self: *Self) ?*T { - if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) { + if (@atomicLoad(u8, &self.available, .SeqCst) == 2) { return &self.data; } else { return null; @@ -62,10 +60,10 @@ pub fn Future(comptime T: type) type { /// It's not required to call start() before resolve() but it can be useful since /// this method is thread-safe. pub async fn start(self: *Self) ?*T { - const state = @cmpxchgStrong(u8, &self.available, 0, 1, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return null; + const state = @cmpxchgStrong(u8, &self.available, 0, 1, .SeqCst, .SeqCst) orelse return null; switch (state) { 1 => { - const held = await (async self.lock.acquire() catch unreachable); + const held = self.lock.acquire(); held.release(); return &self.data; }, @@ -77,7 +75,7 @@ pub fn Future(comptime T: type) type { /// Make the data become available. May be called only once. /// Before calling this, modify the `data` property. pub fn resolve(self: *Self) void { - const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 2, AtomicOrder.SeqCst); + const prev = @atomicRmw(u8, &self.available, .Xchg, 2, .SeqCst); assert(prev == 0 or prev == 1); // resolve() called twice Lock.Held.release(Lock.Held{ .lock = &self.lock }); } @@ -86,7 +84,7 @@ pub fn Future(comptime T: type) type { test "std.event.Future" { // https://github.com/ziglang/zig/issues/1908 - if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest; + if (builtin.single_threaded) return error.SkipZigTest; const allocator = std.heap.direct_allocator; @@ -94,38 +92,33 @@ test "std.event.Future" { try loop.initMultiThreaded(allocator); defer loop.deinit(); - const handle = try async testFuture(&loop); - defer cancel handle; + const handle = async testFuture(&loop); loop.run(); } async fn testFuture(loop: *Loop) void { - suspend { - resume @handle(); - } var future = Future(i32).init(loop); - const a = async waitOnFuture(&future) catch @panic("memory"); - const b = async waitOnFuture(&future) catch @panic("memory"); - const c = async resolveFuture(&future) catch @panic("memory"); + const a = async waitOnFuture(&future); + const b = async waitOnFuture(&future); + const c = async resolveFuture(&future); + + // TODO make this work: + //const result = (await a) + (await b); + const a_result = await a; + const b_result = await b; + const result = a_result + b_result; - const result = (await a) + (await b); cancel c; testing.expect(result == 12); } async fn waitOnFuture(future: *Future(i32)) i32 { - suspend { - resume @handle(); - } - return (await (async future.get() catch @panic("memory"))).*; + return future.get().*; } async fn resolveFuture(future: *Future(i32)) void { - suspend { - resume @handle(); - } future.data = 6; future.resolve(); } diff --git a/std/event/group.zig b/std/event/group.zig index 36235eed74..ab6d592278 100644 --- a/std/event/group.zig +++ b/std/event/group.zig @@ -2,8 +2,6 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); const Lock = std.event.Lock; const Loop = std.event.Loop; -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; const testing = std.testing; /// ReturnType must be `void` or `E!void` @@ -16,10 +14,10 @@ pub fn Group(comptime ReturnType: type) type { const Self = @This(); const Error = switch (@typeInfo(ReturnType)) { - builtin.TypeId.ErrorUnion => |payload| payload.error_set, + .ErrorUnion => |payload| payload.error_set, else => void, }; - const Stack = std.atomic.Stack(promise->ReturnType); + const Stack = std.atomic.Stack(anyframe->ReturnType); pub fn init(loop: *Loop) Self { return Self{ @@ -29,7 +27,7 @@ pub fn Group(comptime ReturnType: type) type { }; } - /// Cancel all the outstanding promises. Can be called even if wait was already called. + /// Cancel all the outstanding frames. Can be called even if wait was already called. pub fn deinit(self: *Self) void { while (self.coro_stack.pop()) |node| { cancel node.data; @@ -40,8 +38,8 @@ pub fn Group(comptime ReturnType: type) type { } } - /// Add a promise to the group. Thread-safe. - pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) { + /// Add a frame to the group. Thread-safe. + pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) { const node = try self.lock.loop.allocator.create(Stack.Node); node.* = Stack.Node{ .next = undefined, @@ -51,7 +49,7 @@ pub fn Group(comptime ReturnType: type) type { } /// Add a node to the group. Thread-safe. Cannot fail. - /// `node.data` should be the promise handle to add to the group. + /// `node.data` should be the frame handle to add to the group. /// The node's memory should be in the coroutine frame of /// the handle that is in the node, or somewhere guaranteed to live /// at least as long. @@ -59,40 +57,11 @@ pub fn Group(comptime ReturnType: type) type { self.coro_stack.push(node); } - /// This is equivalent to an async call, but the async function is added to the group, instead - /// of returning a promise. func must be async and have return type ReturnType. - /// Thread-safe. - pub fn call(self: *Self, comptime func: var, args: ...) (error{OutOfMemory}!void) { - const S = struct { - async fn asyncFunc(node: **Stack.Node, args2: ...) ReturnType { - // TODO this is a hack to make the memory following be inside the coro frame - suspend { - var my_node: Stack.Node = undefined; - node.* = &my_node; - resume @handle(); - } - - // TODO this allocation elision should be guaranteed because we await it in - // this coro frame - return await (async func(args2) catch unreachable); - } - }; - var node: *Stack.Node = undefined; - const handle = try async S.asyncFunc(&node, args); - node.* = Stack.Node{ - .next = undefined, - .data = handle, - }; - self.coro_stack.push(node); - } - /// Wait for all the calls and promises of the group to complete. /// Thread-safe. /// Safe to call any number of times. pub async fn wait(self: *Self) ReturnType { - // TODO catch unreachable because the allocation can be grouped with - // the coro frame allocation - const held = await (async self.lock.acquire() catch unreachable); + const held = self.lock.acquire(); defer held.release(); while (self.coro_stack.pop()) |node| { @@ -131,8 +100,7 @@ test "std.event.Group" { try loop.initMultiThreaded(allocator); defer loop.deinit(); - const handle = try async testGroup(&loop); - defer cancel handle; + const handle = async testGroup(&loop); loop.run(); } @@ -140,26 +108,30 @@ test "std.event.Group" { async fn testGroup(loop: *Loop) void { var count: usize = 0; var group = Group(void).init(loop); - group.add(async sleepALittle(&count) catch @panic("memory")) catch @panic("memory"); - group.call(increaseByTen, &count) catch @panic("memory"); - await (async group.wait() catch @panic("memory")); + var sleep_a_little_frame = async sleepALittle(&count); + group.add(&sleep_a_little_frame) catch @panic("memory"); + var increase_by_ten_frame = async increaseByTen(&count); + group.add(&increase_by_ten_frame) catch @panic("memory"); + group.wait(); testing.expect(count == 11); var another = Group(anyerror!void).init(loop); - another.add(async somethingElse() catch @panic("memory")) catch @panic("memory"); - another.call(doSomethingThatFails) catch @panic("memory"); - testing.expectError(error.ItBroke, await (async another.wait() catch @panic("memory"))); + var something_else_frame = async somethingElse(); + another.add(&something_else_frame) catch @panic("memory"); + var something_that_fails_frame = async doSomethingThatFails(); + another.add(&something_that_fails_frame) catch @panic("memory"); + testing.expectError(error.ItBroke, another.wait()); } async fn sleepALittle(count: *usize) void { std.time.sleep(1 * std.time.millisecond); - _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, count, .Add, 1, .SeqCst); } async fn increaseByTen(count: *usize) void { var i: usize = 0; while (i < 10) : (i += 1) { - _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, count, .Add, 1, .SeqCst); } } diff --git a/std/event/io.zig b/std/event/io.zig index 29419a792e..4b54822e68 100644 --- a/std/event/io.zig +++ b/std/event/io.zig @@ -1,6 +1,5 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); -const Allocator = std.mem.Allocator; const assert = std.debug.assert; const mem = std.mem; @@ -12,13 +11,13 @@ pub fn InStream(comptime ReadError: type) type { /// Return the number of bytes read. It may be less than buffer.len. /// If the number of bytes read is 0, it means end of stream. /// End of stream is not an error condition. - readFn: async<*Allocator> fn (self: *Self, buffer: []u8) Error!usize, + readFn: async fn (self: *Self, buffer: []u8) Error!usize, /// Return the number of bytes read. It may be less than buffer.len. /// If the number of bytes read is 0, it means end of stream. /// End of stream is not an error condition. pub async fn read(self: *Self, buffer: []u8) !usize { - return await (async self.readFn(self, buffer) catch unreachable); + return self.readFn(self, buffer); } /// Return the number of bytes read. If it is less than buffer.len @@ -26,7 +25,7 @@ pub fn InStream(comptime ReadError: type) type { pub async fn readFull(self: *Self, buffer: []u8) !usize { var index: usize = 0; while (index != buf.len) { - const amt_read = try await (async self.read(buf[index..]) catch unreachable); + const amt_read = try self.read(buf[index..]); if (amt_read == 0) return index; index += amt_read; } @@ -35,25 +34,25 @@ pub fn InStream(comptime ReadError: type) type { /// Same as `readFull` but end of stream returns `error.EndOfStream`. pub async fn readNoEof(self: *Self, buf: []u8) !void { - const amt_read = try await (async self.readFull(buf[index..]) catch unreachable); + const amt_read = try self.readFull(buf[index..]); if (amt_read < buf.len) return error.EndOfStream; } pub async fn readIntLittle(self: *Self, comptime T: type) !T { var bytes: [@sizeOf(T)]u8 = undefined; - try await (async self.readNoEof(bytes[0..]) catch unreachable); + try self.readNoEof(bytes[0..]); return mem.readIntLittle(T, &bytes); } pub async fn readIntBe(self: *Self, comptime T: type) !T { var bytes: [@sizeOf(T)]u8 = undefined; - try await (async self.readNoEof(bytes[0..]) catch unreachable); + try self.readNoEof(bytes[0..]); return mem.readIntBig(T, &bytes); } pub async fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T { var bytes: [@sizeOf(T)]u8 = undefined; - try await (async self.readNoEof(bytes[0..]) catch unreachable); + try self.readNoEof(bytes[0..]); return mem.readInt(T, &bytes, endian); } @@ -61,7 +60,7 @@ pub fn InStream(comptime ReadError: type) type { // Only extern and packed structs have defined in-memory layout. comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto); var res: [1]T = undefined; - try await (async self.readNoEof(@sliceToBytes(res[0..])) catch unreachable); + try self.readNoEof(@sliceToBytes(res[0..])); return res[0]; } }; @@ -72,6 +71,6 @@ pub fn OutStream(comptime WriteError: type) type { const Self = @This(); pub const Error = WriteError; - writeFn: async<*Allocator> fn (self: *Self, buffer: []u8) Error!void, + writeFn: async fn (self: *Self, buffer: []u8) Error!void, }; } diff --git a/std/event/lock.zig b/std/event/lock.zig index d86902cc06..8f2dac008c 100644 --- a/std/event/lock.zig +++ b/std/event/lock.zig @@ -3,8 +3,6 @@ const builtin = @import("builtin"); const assert = std.debug.assert; const testing = std.testing; const mem = std.mem; -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; const Loop = std.event.Loop; /// Thread-safe async/await lock. @@ -17,7 +15,7 @@ pub const Lock = struct { queue: Queue, queue_empty_bit: u8, // TODO make this a bool - const Queue = std.atomic.Queue(promise); + const Queue = std.atomic.Queue(anyframe); pub const Held = struct { lock: *Lock, @@ -30,19 +28,19 @@ pub const Lock = struct { } // We need to release the lock. - _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst); + _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst); // There might be a queue item. If we know the queue is empty, we can be done, // because the other actor will try to obtain the lock. // But if there's a queue item, we are the actor which must loop and attempt // to grab the lock again. - if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) { + if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { return; } while (true) { - const old_bit = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst); if (old_bit != 0) { // We did not obtain the lock. Great, the queue is someone else's problem. return; @@ -55,11 +53,11 @@ pub const Lock = struct { } // Release the lock again. - _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst); + _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst); // Find out if we can be done. - if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) { + if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { return; } } @@ -88,15 +86,11 @@ pub const Lock = struct { /// All calls to acquire() and release() must complete before calling deinit(). pub fn deinit(self: *Lock) void { assert(self.shared_bit == 0); - while (self.queue.get()) |node| cancel node.data; + while (self.queue.get()) |node| resume node.data; } pub async fn acquire(self: *Lock) Held { - // TODO explicitly put this memory in the coroutine frame #1194 - suspend { - resume @handle(); - } - var my_tick_node = Loop.NextTickNode.init(@handle()); + var my_tick_node = Loop.NextTickNode.init(@frame()); errdefer _ = self.queue.remove(&my_tick_node); // TODO test canceling an acquire suspend { @@ -107,9 +101,9 @@ pub const Lock = struct { // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor // will attempt to grab the lock. - _ = @atomicRmw(u8, &self.queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst); - const old_bit = @atomicRmw(u8, &self.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst); if (old_bit == 0) { if (self.queue.get()) |node| { // Whether this node is us or someone else, we tail resume it. @@ -123,8 +117,7 @@ pub const Lock = struct { }; test "std.event.Lock" { - // TODO https://github.com/ziglang/zig/issues/2377 - if (true) return error.SkipZigTest; + // TODO https://github.com/ziglang/zig/issues/1908 if (builtin.single_threaded) return error.SkipZigTest; const allocator = std.heap.direct_allocator; @@ -136,39 +129,34 @@ test "std.event.Lock" { var lock = Lock.init(&loop); defer lock.deinit(); - const handle = try async testLock(&loop, &lock); - defer cancel handle; + _ = async testLock(&loop, &lock); loop.run(); testing.expectEqualSlices(i32, [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len, shared_test_data); } async fn testLock(loop: *Loop, lock: *Lock) void { - // TODO explicitly put next tick node memory in the coroutine frame #1194 - suspend { - resume @handle(); - } - const handle1 = async lockRunner(lock) catch @panic("out of memory"); + const handle1 = async lockRunner(lock); var tick_node1 = Loop.NextTickNode{ .prev = undefined, .next = undefined, - .data = handle1, + .data = &handle1, }; loop.onNextTick(&tick_node1); - const handle2 = async lockRunner(lock) catch @panic("out of memory"); + const handle2 = async lockRunner(lock); var tick_node2 = Loop.NextTickNode{ .prev = undefined, .next = undefined, - .data = handle2, + .data = &handle2, }; loop.onNextTick(&tick_node2); - const handle3 = async lockRunner(lock) catch @panic("out of memory"); + const handle3 = async lockRunner(lock); var tick_node3 = Loop.NextTickNode{ .prev = undefined, .next = undefined, - .data = handle3, + .data = &handle3, }; loop.onNextTick(&tick_node3); @@ -185,7 +173,7 @@ async fn lockRunner(lock: *Lock) void { var i: usize = 0; while (i < shared_test_data.len) : (i += 1) { - const lock_promise = async lock.acquire() catch @panic("out of memory"); + const lock_promise = async lock.acquire(); const handle = await lock_promise; defer handle.release(); diff --git a/std/event/loop.zig b/std/event/loop.zig index a4a60b5098..f1febd3fdb 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -457,7 +457,7 @@ pub const Loop = struct { var resume_node = ResumeNode.Basic{ .base = ResumeNode{ .id = ResumeNode.Id.Basic, - .handle = @handle(), + .handle = @frame(), .overlapped = ResumeNode.overlapped_init, }, }; @@ -469,7 +469,7 @@ pub const Loop = struct { var resume_node = ResumeNode.Basic{ .base = ResumeNode{ .id = ResumeNode.Id.Basic, - .handle = @handle(), + .handle = @frame(), .overlapped = ResumeNode.overlapped_init, }, .kev = undefined, diff --git a/std/event/net.zig b/std/event/net.zig index 46b724e32e..3752c88e99 100644 --- a/std/event/net.zig +++ b/std/event/net.zig @@ -9,17 +9,17 @@ const File = std.fs.File; const fd_t = os.fd_t; pub const Server = struct { - handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, File) void, + handleRequestFn: async fn (*Server, *const std.net.Address, File) void, loop: *Loop, sockfd: ?i32, - accept_coro: ?promise, + accept_coro: ?anyframe, listen_address: std.net.Address, waiting_for_emfile_node: PromiseNode, listen_resume_node: event.Loop.ResumeNode, - const PromiseNode = std.TailQueue(promise).Node; + const PromiseNode = std.TailQueue(anyframe).Node; pub fn init(loop: *Loop) Server { // TODO can't initialize handler coroutine here because we need well defined copy elision @@ -41,7 +41,7 @@ pub const Server = struct { pub fn listen( self: *Server, address: *const std.net.Address, - handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, File) void, + handleRequestFn: async fn (*Server, *const std.net.Address, File) void, ) !void { self.handleRequestFn = handleRequestFn; @@ -53,7 +53,7 @@ pub const Server = struct { try os.listen(sockfd, os.SOMAXCONN); self.listen_address = std.net.Address.initPosix(try os.getsockname(sockfd)); - self.accept_coro = try async Server.handler(self); + self.accept_coro = async Server.handler(self); errdefer cancel self.accept_coro.?; self.listen_resume_node.handle = self.accept_coro.?; @@ -86,12 +86,7 @@ pub const Server = struct { continue; } var socket = File.openHandle(accepted_fd); - _ = async self.handleRequestFn(self, &accepted_addr, socket) catch |err| switch (err) { - error.OutOfMemory => { - socket.close(); - continue; - }, - }; + self.handleRequestFn(self, &accepted_addr, socket); } else |err| switch (err) { error.ProcessFdQuotaExceeded => @panic("TODO handle this error"), error.ConnectionAborted => continue, @@ -124,7 +119,7 @@ pub async fn connectUnixSocket(loop: *Loop, path: []const u8) !i32 { mem.copy(u8, sock_addr.path[0..], path); const size = @intCast(u32, @sizeOf(os.sa_family_t) + path.len); try os.connect_async(sockfd, &sock_addr, size); - try await try async loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); + try loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); try os.getsockoptError(sockfd); return sockfd; @@ -149,7 +144,7 @@ pub async fn read(loop: *std.event.Loop, fd: fd_t, buffer: []u8) ReadError!usize .iov_len = buffer.len, }; const iovs: *const [1]os.iovec = &iov; - return await (async readvPosix(loop, fd, iovs, 1) catch unreachable); + return readvPosix(loop, fd, iovs, 1); } pub const WriteError = error{}; @@ -160,7 +155,7 @@ pub async fn write(loop: *std.event.Loop, fd: fd_t, buffer: []const u8) WriteErr .iov_len = buffer.len, }; const iovs: *const [1]os.iovec_const = &iov; - return await (async writevPosix(loop, fd, iovs, 1) catch unreachable); + return writevPosix(loop, fd, iovs, 1); } pub async fn writevPosix(loop: *Loop, fd: i32, iov: [*]const os.iovec_const, count: usize) !void { @@ -174,7 +169,7 @@ pub async fn writevPosix(loop: *Loop, fd: i32, iov: [*]const os.iovec_const, cou os.EINVAL => unreachable, os.EFAULT => unreachable, os.EAGAIN => { - try await (async loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLOUT) catch unreachable); + try loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLOUT); continue; }, os.EBADF => unreachable, // always a race condition @@ -205,7 +200,7 @@ pub async fn readvPosix(loop: *std.event.Loop, fd: i32, iov: [*]os.iovec, count: os.EINVAL => unreachable, os.EFAULT => unreachable, os.EAGAIN => { - try await (async loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLIN) catch unreachable); + try loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLIN); continue; }, os.EBADF => unreachable, // always a race condition @@ -232,7 +227,7 @@ pub async fn writev(loop: *Loop, fd: fd_t, data: []const []const u8) !void { }; } - return await (async writevPosix(loop, fd, iovecs.ptr, data.len) catch unreachable); + return writevPosix(loop, fd, iovecs.ptr, data.len); } pub async fn readv(loop: *Loop, fd: fd_t, data: []const []u8) !usize { @@ -246,7 +241,7 @@ pub async fn readv(loop: *Loop, fd: fd_t, data: []const []u8) !usize { }; } - return await (async readvPosix(loop, fd, iovecs.ptr, data.len) catch unreachable); + return readvPosix(loop, fd, iovecs.ptr, data.len); } pub async fn connect(loop: *Loop, _address: *const std.net.Address) !File { @@ -256,7 +251,7 @@ pub async fn connect(loop: *Loop, _address: *const std.net.Address) !File { errdefer os.close(sockfd); try os.connect_async(sockfd, &address.os_addr, @sizeOf(os.sockaddr_in)); - try await try async loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); + try loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); try os.getsockoptError(sockfd); return File.openHandle(sockfd); @@ -275,17 +270,16 @@ test "listen on a port, send bytes, receive bytes" { tcp_server: Server, const Self = @This(); - async<*mem.Allocator> fn handler(tcp_server: *Server, _addr: *const std.net.Address, _socket: File) void { + async fn handler(tcp_server: *Server, _addr: *const std.net.Address, _socket: File) void { const self = @fieldParentPtr(Self, "tcp_server", tcp_server); var socket = _socket; // TODO https://github.com/ziglang/zig/issues/1592 defer socket.close(); // TODO guarantee elision of this allocation - const next_handler = async errorableHandler(self, _addr, socket) catch unreachable; - (await next_handler) catch |err| { + const next_handler = errorableHandler(self, _addr, socket) catch |err| { std.debug.panic("unable to handle connection: {}\n", err); }; suspend { - cancel @handle(); + cancel @frame(); } } async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: File) !void { @@ -306,15 +300,14 @@ test "listen on a port, send bytes, receive bytes" { defer server.tcp_server.deinit(); try server.tcp_server.listen(&addr, MyServer.handler); - const p = try async doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server); - defer cancel p; + _ = async doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server); loop.run(); } async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *Server) void { errdefer @panic("test failure"); - var socket_file = try await try async connect(loop, address); + var socket_file = try connect(loop, address); defer socket_file.close(); var buf: [512]u8 = undefined; @@ -340,9 +333,9 @@ pub const OutStream = struct { }; } - async<*mem.Allocator> fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void { + async fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void { const self = @fieldParentPtr(OutStream, "stream", out_stream); - return await (async write(self.loop, self.fd, bytes) catch unreachable); + return write(self.loop, self.fd, bytes); } }; @@ -362,8 +355,8 @@ pub const InStream = struct { }; } - async<*mem.Allocator> fn readFn(in_stream: *Stream, bytes: []u8) Error!usize { + async fn readFn(in_stream: *Stream, bytes: []u8) Error!usize { const self = @fieldParentPtr(InStream, "stream", in_stream); - return await (async read(self.loop, self.fd, bytes) catch unreachable); + return read(self.loop, self.fd, bytes); } }; diff --git a/std/event/rwlock.zig b/std/event/rwlock.zig index 7b97fa24c1..a5768e5b65 100644 --- a/std/event/rwlock.zig +++ b/std/event/rwlock.zig @@ -3,8 +3,6 @@ const builtin = @import("builtin"); const assert = std.debug.assert; const testing = std.testing; const mem = std.mem; -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; const Loop = std.event.Loop; /// Thread-safe async/await lock. @@ -28,19 +26,19 @@ pub const RwLock = struct { const ReadLock = 2; }; - const Queue = std.atomic.Queue(promise); + const Queue = std.atomic.Queue(anyframe); pub const HeldRead = struct { lock: *RwLock, pub fn release(self: HeldRead) void { // If other readers still hold the lock, we're done. - if (@atomicRmw(usize, &self.lock.reader_lock_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) != 1) { + if (@atomicRmw(usize, &self.lock.reader_lock_count, .Sub, 1, .SeqCst) != 1) { return; } - _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) { + _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst); + if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; } @@ -61,17 +59,17 @@ pub const RwLock = struct { } // We need to release the write lock. Check if any readers are waiting to grab the lock. - if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) { + if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) { // Switch to a read lock. - _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.ReadLock, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.ReadLock, .SeqCst); while (self.lock.reader_queue.get()) |node| { self.lock.loop.onNextTick(node); } return; } - _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst); + _ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst); self.lock.commonPostUnlock(); } @@ -93,17 +91,16 @@ pub const RwLock = struct { /// All calls to acquire() and release() must complete before calling deinit(). pub fn deinit(self: *RwLock) void { assert(self.shared_state == State.Unlocked); - while (self.writer_queue.get()) |node| cancel node.data; - while (self.reader_queue.get()) |node| cancel node.data; + while (self.writer_queue.get()) |node| resume node.data; + while (self.reader_queue.get()) |node| resume node.data; } pub async fn acquireRead(self: *RwLock) HeldRead { - _ = @atomicRmw(usize, &self.reader_lock_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.reader_lock_count, .Add, 1, .SeqCst); suspend { - // TODO explicitly put this memory in the coroutine frame #1194 var my_tick_node = Loop.NextTickNode{ - .data = @handle(), + .data = @frame(), .prev = undefined, .next = undefined, }; @@ -115,10 +112,10 @@ pub const RwLock = struct { // We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1, // some actor will attempt to grab the lock. - _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst); // Here we don't care if we are the one to do the locking or if it was already locked for reading. - const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |old_state| old_state == State.ReadLock else true; + const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == State.ReadLock else true; if (have_read_lock) { // Give out all the read locks. if (self.reader_queue.get()) |first_node| { @@ -134,9 +131,8 @@ pub const RwLock = struct { pub async fn acquireWrite(self: *RwLock) HeldWrite { suspend { - // TODO explicitly put this memory in the coroutine frame #1194 var my_tick_node = Loop.NextTickNode{ - .data = @handle(), + .data = @frame(), .prev = undefined, .next = undefined, }; @@ -148,10 +144,10 @@ pub const RwLock = struct { // We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1, // some actor will attempt to grab the lock. - _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst); // Here we must be the one to acquire the write lock. It cannot already be locked. - if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null) { + if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) == null) { // We now have a write lock. if (self.writer_queue.get()) |node| { // Whether this node is us or someone else, we tail resume it. @@ -169,8 +165,8 @@ pub const RwLock = struct { // obtain the lock. // But if there's a writer_queue item or a reader_queue item, // we are the actor which must loop and attempt to grab the lock again. - if (@atomicLoad(u8, &self.writer_queue_empty_bit, AtomicOrder.SeqCst) == 0) { - if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) { + if (@atomicLoad(u8, &self.writer_queue_empty_bit, .SeqCst) == 0) { + if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) != null) { // We did not obtain the lock. Great, the queues are someone else's problem. return; } @@ -180,13 +176,13 @@ pub const RwLock = struct { return; } // Release the lock again. - _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - _ = @atomicRmw(u8, &self.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst); + _ = @atomicRmw(u8, &self.shared_state, .Xchg, State.Unlocked, .SeqCst); continue; } - if (@atomicLoad(u8, &self.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) { - if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) { + if (@atomicLoad(u8, &self.reader_queue_empty_bit, .SeqCst) == 0) { + if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst) != null) { // We did not obtain the lock. Great, the queues are someone else's problem. return; } @@ -199,8 +195,8 @@ pub const RwLock = struct { return; } // Release the lock again. - _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) { + _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst); + if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; } @@ -215,6 +211,9 @@ test "std.event.RwLock" { // https://github.com/ziglang/zig/issues/2377 if (true) return error.SkipZigTest; + // https://github.com/ziglang/zig/issues/1908 + if (builtin.single_threaded) return error.SkipZigTest; + const allocator = std.heap.direct_allocator; var loop: Loop = undefined; @@ -224,8 +223,7 @@ test "std.event.RwLock" { var lock = RwLock.init(&loop); defer lock.deinit(); - const handle = try async testLock(&loop, &lock); - defer cancel handle; + const handle = testLock(&loop, &lock); loop.run(); const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len; @@ -233,28 +231,31 @@ test "std.event.RwLock" { } async fn testLock(loop: *Loop, lock: *RwLock) void { - // TODO explicitly put next tick node memory in the coroutine frame #1194 - suspend { - resume @handle(); - } - var read_nodes: [100]Loop.NextTickNode = undefined; for (read_nodes) |*read_node| { - read_node.data = async readRunner(lock) catch @panic("out of memory"); + const frame = loop.allocator.create(@Frame(readRunner)) catch @panic("memory"); + read_node.data = frame; + frame.* = async readRunner(lock); loop.onNextTick(read_node); } var write_nodes: [shared_it_count]Loop.NextTickNode = undefined; for (write_nodes) |*write_node| { - write_node.data = async writeRunner(lock) catch @panic("out of memory"); + const frame = loop.allocator.create(@Frame(writeRunner)) catch @panic("memory"); + write_node.data = frame; + frame.* = async writeRunner(lock); loop.onNextTick(write_node); } for (write_nodes) |*write_node| { - await @ptrCast(promise->void, write_node.data); + const casted = @ptrCast(*const @Frame(writeRunner), write_node.data); + await casted; + loop.allocator.destroy(casted); } for (read_nodes) |*read_node| { - await @ptrCast(promise->void, read_node.data); + const casted = @ptrCast(*const @Frame(readRunner), read_node.data); + await casted; + loop.allocator.destroy(casted); } } @@ -269,7 +270,7 @@ async fn writeRunner(lock: *RwLock) void { var i: usize = 0; while (i < shared_test_data.len) : (i += 1) { std.time.sleep(100 * std.time.microsecond); - const lock_promise = async lock.acquireWrite() catch @panic("out of memory"); + const lock_promise = async lock.acquireWrite(); const handle = await lock_promise; defer handle.release(); @@ -287,7 +288,7 @@ async fn readRunner(lock: *RwLock) void { var i: usize = 0; while (i < shared_test_data.len) : (i += 1) { - const lock_promise = async lock.acquireRead() catch @panic("out of memory"); + const lock_promise = async lock.acquireRead(); const handle = await lock_promise; defer handle.release(); diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index 7407528bf5..aec1ef96b5 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -1183,7 +1183,7 @@ test "zig fmt: resume from suspend block" { try testCanonical( \\fn foo() void { \\ suspend { - \\ resume @handle(); + \\ resume @frame(); \\ } \\} \\ diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 810e40b18b..835f968e23 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1403,24 +1403,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { ); cases.add( - "@handle() called outside of function definition", - \\var handle_undef: promise = undefined; - \\var handle_dummy: promise = @handle(); + "@frame() called outside of function definition", + \\var handle_undef: anyframe = undefined; + \\var handle_dummy: anyframe = @frame(); \\export fn entry() bool { \\ return handle_undef == handle_dummy; \\} , - "tmp.zig:2:29: error: @handle() called outside of function definition", - ); - - cases.add( - "@handle() in non-async function", - \\export fn entry() bool { - \\ var handle_undef: promise = undefined; - \\ return handle_undef == @handle(); - \\} - , - "tmp.zig:3:28: error: @handle() in non-async function", + "tmp.zig:2:30: error: @frame() called outside of function definition", ); cases.add( @@ -1796,15 +1786,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "suspend inside suspend block", - \\const std = @import("std",); - \\ \\export fn entry() void { - \\ var buf: [500]u8 = undefined; - \\ var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; - \\ const p = (async foo()) catch unreachable; - \\ cancel p; + \\ _ = async foo(); \\} - \\ \\async fn foo() void { \\ suspend { \\ suspend { @@ -1812,8 +1796,8 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ } \\} , - "tmp.zig:12:9: error: cannot suspend inside suspend block", - "tmp.zig:11:5: note: other suspend block here", + "tmp.zig:6:9: error: cannot suspend inside suspend block", + "tmp.zig:5:5: note: other suspend block here", ); cases.add( @@ -1854,15 +1838,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "returning error from void async function", - \\const std = @import("std",); \\export fn entry() void { - \\ const p = async amain() catch unreachable; + \\ _ = async amain(); \\} \\async fn amain() void { \\ return error.ShouldBeCompileError; \\} , - "tmp.zig:6:17: error: expected type 'void', found 'error{ShouldBeCompileError}'", + "tmp.zig:5:17: error: expected type 'void', found 'error{ShouldBeCompileError}'", ); cases.add( From 62c5bc6058a64c411c49f84cdbd6994c24c91e00 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 21:14:19 -0400 Subject: [PATCH 093/125] fix cancel invoking branch on undefined memory --- src/codegen.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/codegen.cpp b/src/codegen.cpp index 4510e7156c..0197f40e5a 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1163,6 +1163,17 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) { LLVMValueRef return_address_ptr = LLVMBuildCall(g->builder, get_return_address_fn_val(g), &zero, 1, ""); LLVMValueRef return_address = LLVMBuildPtrToInt(g->builder, return_address_ptr, usize_type_ref, ""); + LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return"); + LLVMBasicBlockRef dest_non_null_block = LLVMAppendBasicBlock(fn_val, "DestNonNull"); + + LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, err_ret_trace_ptr, + LLVMConstNull(LLVMTypeOf(err_ret_trace_ptr)), ""); + LLVMBuildCondBr(g->builder, null_dest_bit, return_block, dest_non_null_block); + + LLVMPositionBuilderAtEnd(g->builder, return_block); + LLVMBuildRetVoid(g->builder); + + LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block); LLVMValueRef args[] = { err_ret_trace_ptr, return_address }; ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, ""); LLVMBuildRetVoid(g->builder); @@ -5497,6 +5508,13 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame); LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "CancelResume"); + // supply null for the awaiter return pointer (no copy needed) + if (type_has_bits(result_type)) { + LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start + 1, ""); + LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))), + awaiter_ret_ptr_ptr); + } + // supply null for the error return trace pointer if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, From 911b1a0428d106923a13aa28933957a88bb1bfb5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 22:05:09 -0400 Subject: [PATCH 094/125] fix no-longer-correct `nonnull` attribute on merge err ret traces fn --- src/codegen.cpp | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 0197f40e5a..67689b8cc2 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1139,9 +1139,6 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) { LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); addLLVMFnAttr(fn_val, "nounwind"); add_uwtable_attr(g, fn_val); - // Error return trace memory is in the stack, which is impossible to be at address 0 - // on any architecture. - addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); if (codegen_have_frame_pointer(g)) { ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true"); ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr); @@ -2058,13 +2055,9 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); addLLVMFnAttr(fn_val, "nounwind"); add_uwtable_attr(g, fn_val); - // Error return trace memory is in the stack, which is impossible to be at address 0 - // on any architecture. - addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); addLLVMArgAttr(fn_val, (unsigned)0, "noalias"); addLLVMArgAttr(fn_val, (unsigned)0, "writeonly"); - // Error return trace memory is in the stack, which is impossible to be at address 0 - // on any architecture. + addLLVMArgAttr(fn_val, (unsigned)1, "nonnull"); addLLVMArgAttr(fn_val, (unsigned)1, "noalias"); addLLVMArgAttr(fn_val, (unsigned)1, "readonly"); @@ -5571,7 +5564,8 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start + 1, ""); if (result_loc == nullptr) { // no copy needed - LLVMBuildStore(g->builder, zero, awaiter_ret_ptr_ptr); + LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))), + awaiter_ret_ptr_ptr); } else { LLVMBuildStore(g->builder, result_loc, awaiter_ret_ptr_ptr); } From 24d5ec078355d68e3f1002220fd284b1ff02a465 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 22:35:12 -0400 Subject: [PATCH 095/125] fix async function frames not aligned enough --- src/analyze.cpp | 10 +++---- src/analyze.hpp | 2 +- src/target.cpp | 4 +++ src/target.hpp | 2 ++ std/event/channel.zig | 62 +++++++++++++++++++------------------------ 5 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index 30aa82a216..9cd3ba026b 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1500,7 +1500,7 @@ bool type_is_invalid(ZigType *type_entry) { ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], - ZigType *field_types[], size_t field_count) + ZigType *field_types[], size_t field_count, unsigned min_abi_align) { ZigType *struct_type = new_type_table_entry(ZigTypeIdStruct); @@ -1512,7 +1512,7 @@ ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_na struct_type->data.structure.fields = allocate(field_count); struct_type->data.structure.fields_by_name.init(field_count); - size_t abi_align = 0; + size_t abi_align = min_abi_align; for (size_t i = 0; i < field_count; i += 1) { TypeStructField *field = &struct_type->data.structure.fields[i]; field->name = buf_create_from_str(field_names[i]); @@ -5334,7 +5334,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { assert(field_names.length == field_types.length); frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), - field_names.items, field_types.items, field_names.length); + field_names.items, field_types.items, field_names.length, target_fn_align(g->zig_target)); frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size; frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align; frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits; @@ -7764,8 +7764,8 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r LLVMTypeRef get_llvm_type(CodeGen *g, ZigType *type) { assertNoError(type_resolve(g, type, ResolveStatusLLVMFull)); - assert(type->abi_size == 0 || type->abi_size == LLVMABISizeOfType(g->target_data_ref, type->llvm_type)); - assert(type->abi_align == 0 || type->abi_align == LLVMABIAlignmentOfType(g->target_data_ref, type->llvm_type)); + assert(type->abi_size == 0 || type->abi_size >= LLVMABISizeOfType(g->target_data_ref, type->llvm_type)); + assert(type->abi_align == 0 || type->abi_align >= LLVMABIAlignmentOfType(g->target_data_ref, type->llvm_type)); return type->llvm_type; } diff --git a/src/analyze.hpp b/src/analyze.hpp index 3115c79b40..97d8de7bb1 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -39,7 +39,7 @@ ZigType *get_error_union_type(CodeGen *g, ZigType *err_set_type, ZigType *payloa ZigType *get_bound_fn_type(CodeGen *g, ZigFn *fn_entry); ZigType *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const char *full_name, Buf *bare_name); ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], - ZigType *field_types[], size_t field_count); + ZigType *field_types[], size_t field_count, unsigned min_abi_align); ZigType *get_test_fn_type(CodeGen *g); ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type); bool handle_is_ptr(ZigType *type_entry); diff --git a/src/target.cpp b/src/target.cpp index 7bb248a35f..d1ae64acd4 100644 --- a/src/target.cpp +++ b/src/target.cpp @@ -1759,3 +1759,7 @@ bool target_supports_libunwind(const ZigTarget *target) { return true; } + +unsigned target_fn_align(const ZigTarget *target) { + return 16; +} diff --git a/src/target.hpp b/src/target.hpp index 985a4c11b4..cc8da97777 100644 --- a/src/target.hpp +++ b/src/target.hpp @@ -197,4 +197,6 @@ uint32_t target_arch_largest_atomic_bits(ZigLLVM_ArchType arch); size_t target_libc_count(void); void target_libc_enum(size_t index, ZigTarget *out_target); +unsigned target_fn_align(const ZigTarget *target); + #endif diff --git a/std/event/channel.zig b/std/event/channel.zig index c4f7dca085..fa5ad6575f 100644 --- a/std/event/channel.zig +++ b/std/event/channel.zig @@ -2,8 +2,6 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const testing = std.testing; -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; const Loop = std.event.Loop; /// many producer, many consumer, thread-safe, runtime configurable buffer size @@ -98,18 +96,18 @@ pub fn Channel(comptime T: type) type { // TODO test canceling a put() errdefer { - _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst); const need_dispatch = !self.putters.remove(&queue_node); self.loop.cancelOnNextTick(&my_tick_node); if (need_dispatch) { // oops we made the put_count incorrect for a period of time. fix by dispatching. - _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.put_count, .Add, 1, .SeqCst); self.dispatch(); } } suspend { self.putters.put(&queue_node); - _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.put_count, .Add, 1, .SeqCst); self.dispatch(); } @@ -118,8 +116,7 @@ pub fn Channel(comptime T: type) type { /// await this function to get an item from the channel. If the buffer is empty, the frame will /// complete when the next item is put in the channel. pub async fn get(self: *SelfChannel) T { - // TODO integrate this function with named return values - // so we can get rid of this extra result copy + // TODO https://github.com/ziglang/zig/issues/2765 var result: T = undefined; var my_tick_node = Loop.NextTickNode.init(@frame()); var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{ @@ -131,19 +128,19 @@ pub fn Channel(comptime T: type) type { // TODO test canceling a get() errdefer { - _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst); const need_dispatch = !self.getters.remove(&queue_node); self.loop.cancelOnNextTick(&my_tick_node); if (need_dispatch) { // oops we made the get_count incorrect for a period of time. fix by dispatching. - _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst); self.dispatch(); } } suspend { self.getters.put(&queue_node); - _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst); self.dispatch(); } @@ -183,19 +180,19 @@ pub fn Channel(comptime T: type) type { // TODO test canceling getOrNull errdefer { _ = self.or_null_queue.remove(&or_null_node); - _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst); const need_dispatch = !self.getters.remove(&queue_node); self.loop.cancelOnNextTick(&my_tick_node); if (need_dispatch) { // oops we made the get_count incorrect for a period of time. fix by dispatching. - _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst); self.dispatch(); } } suspend { self.getters.put(&queue_node); - _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst); self.or_null_queue.put(&or_null_node); self.dispatch(); @@ -205,21 +202,21 @@ pub fn Channel(comptime T: type) type { fn dispatch(self: *SelfChannel) void { // set the "need dispatch" flag - _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 1, .SeqCst); lock: while (true) { // set the lock flag - const prev_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + const prev_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 1, .SeqCst); if (prev_lock != 0) return; // clear the need_dispatch flag since we're about to do it - _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst); while (true) { one_dispatch: { // later we correct these extra subtractions - var get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); - var put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); + var get_count = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst); + var put_count = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst); // transfer self.buffer to self.getters while (self.buffer_len != 0) { @@ -238,7 +235,7 @@ pub fn Channel(comptime T: type) type { self.loop.onNextTick(get_node.tick_node); self.buffer_len -= 1; - get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); + get_count = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst); } // direct transfer self.putters to self.getters @@ -258,8 +255,8 @@ pub fn Channel(comptime T: type) type { self.loop.onNextTick(get_node.tick_node); self.loop.onNextTick(put_node.tick_node); - get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); - put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); + get_count = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst); + put_count = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst); } // transfer self.putters to self.buffer @@ -271,13 +268,13 @@ pub fn Channel(comptime T: type) type { self.buffer_index +%= 1; self.buffer_len += 1; - put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); + put_count = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst); } } // undo the extra subtractions - _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); - _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst); + _ = @atomicRmw(usize, &self.put_count, .Add, 1, .SeqCst); // All the "get or null" functions should resume now. var remove_count: usize = 0; @@ -286,18 +283,18 @@ pub fn Channel(comptime T: type) type { self.loop.onNextTick(or_null_node.data.data.tick_node); } if (remove_count != 0) { - _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, remove_count, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.get_count, .Sub, remove_count, .SeqCst); } // clear need-dispatch flag - const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + const need_dispatch = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst); if (need_dispatch != 0) continue; - const my_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + const my_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 0, .SeqCst); assert(my_lock != 0); // we have to check again now that we unlocked - if (@atomicLoad(u8, &self.need_dispatch, AtomicOrder.SeqCst) != 0) continue :lock; + if (@atomicLoad(u8, &self.need_dispatch, .SeqCst) != 0) continue :lock; return; } @@ -327,16 +324,13 @@ test "std.event.Channel" { } async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void { - const value1_promise = async channel.get(); - const value1 = await value1_promise; + const value1 = channel.get(); testing.expect(value1 == 1234); - const value2_promise = async channel.get(); - const value2 = await value2_promise; + const value2 = channel.get(); testing.expect(value2 == 4567); - const value3_promise = async channel.getOrNull(); - const value3 = await value3_promise; + const value3 = channel.getOrNull(); testing.expect(value3 == null); const last_put = async testPut(channel, 4444); From 98183e47436699f6e5eab200061c46eec342806e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Aug 2019 11:33:01 -0400 Subject: [PATCH 096/125] flip the order of fields in error unions to prepare for fixing u128 alignment issues --- src/all_types.hpp | 4 ++-- src/analyze.cpp | 13 +++++++------ src/codegen.cpp | 25 ++++--------------------- 3 files changed, 13 insertions(+), 29 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 4c3aeade9e..e9d5aa3834 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -3772,8 +3772,8 @@ static const size_t slice_len_index = 1; static const size_t maybe_child_index = 0; static const size_t maybe_null_index = 1; -static const size_t err_union_err_index = 0; -static const size_t err_union_payload_index = 1; +static const size_t err_union_payload_index = 0; +static const size_t err_union_err_index = 1; // TODO call graph analysis to find out what this number needs to be for every function // MUST BE A POWER OF TWO. diff --git a/src/analyze.cpp b/src/analyze.cpp index 0af1baec35..672e75a5ee 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -7017,20 +7017,21 @@ static void resolve_llvm_types_error_union(CodeGen *g, ZigType *type) { uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, type->llvm_type); uint64_t debug_align_in_bits = 8*LLVMABISizeOfType(g->target_data_ref, type->llvm_type); - ZigLLVMDIType *di_element_types[] = { - ZigLLVMCreateDebugMemberType(g->dbuilder, ZigLLVMTypeToScope(type->llvm_di_type), + ZigLLVMDIType *di_element_types[2]; + di_element_types[err_union_err_index] = ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(type->llvm_di_type), "tag", di_file, line, tag_debug_size_in_bits, tag_debug_align_in_bits, tag_offset_in_bits, - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, err_set_type)), - ZigLLVMCreateDebugMemberType(g->dbuilder, ZigLLVMTypeToScope(type->llvm_di_type), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, err_set_type)); + di_element_types[err_union_payload_index] = ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(type->llvm_di_type), "value", di_file, line, value_debug_size_in_bits, value_debug_align_in_bits, value_offset_in_bits, - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, payload_type)), - }; + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, payload_type)); ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder, compile_unit_scope, diff --git a/src/codegen.cpp b/src/codegen.cpp index 881b83c169..32e6d2fbee 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6437,29 +6437,12 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c err_payload_value = gen_const_val(g, payload_val, ""); make_unnamed_struct = is_llvm_value_unnamed_type(g, payload_val->type, err_payload_value); } + LLVMValueRef fields[2]; + fields[err_union_err_index] = err_tag_value; + fields[err_union_payload_index] = err_payload_value; if (make_unnamed_struct) { - uint64_t payload_off = LLVMOffsetOfElement(g->target_data_ref, get_llvm_type(g, type_entry), 1); - uint64_t err_sz = LLVMStoreSizeOfType(g->target_data_ref, LLVMTypeOf(err_tag_value)); - unsigned pad_sz = payload_off - err_sz; - if (pad_sz == 0) { - LLVMValueRef fields[] = { - err_tag_value, - err_payload_value, - }; - return LLVMConstStruct(fields, 2, false); - } else { - LLVMValueRef fields[] = { - err_tag_value, - LLVMGetUndef(LLVMArrayType(LLVMInt8Type(), pad_sz)), - err_payload_value, - }; - return LLVMConstStruct(fields, 3, false); - } + return LLVMConstStruct(fields, 2, false); } else { - LLVMValueRef fields[] = { - err_tag_value, - err_payload_value, - }; return LLVMConstNamedStruct(get_llvm_type(g, type_entry), fields, 2); } } From 12ff91c1c99da11dcaab1e4a1adb6a6917ef4881 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Aug 2019 12:44:30 -0400 Subject: [PATCH 097/125] alignment of structs no longer depends on LLVM fixes async function tests in optimized builds --- BRANCH_TODO | 1 + src/all_types.hpp | 3 + src/analyze.cpp | 133 +++++++++++++++++++++++++-------- src/analyze.hpp | 2 - src/codegen.cpp | 58 ++++++++------ std/event/channel.zig | 4 +- test/stage1/behavior/align.zig | 62 +++++++++++++++ 7 files changed, 204 insertions(+), 59 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index b2b293aec1..98d4edc401 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,3 +1,4 @@ + * alignment of variables not being respected in async functions * for loops need to spill the index. other payload captures probably also need to spill * compile error (instead of crashing) for trying to get @Frame of generic function * compile error (instead of crashing) for trying to async call and passing @Frame of wrong function diff --git a/src/all_types.hpp b/src/all_types.hpp index 10a2a5fb7c..96b51ab5d2 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1148,6 +1148,8 @@ struct ZigTypeOptional { struct ZigTypeErrorUnion { ZigType *err_set_type; ZigType *payload_type; + size_t pad_bytes; + LLVMTypeRef pad_llvm_type; }; struct ZigTypeErrorSet { @@ -3564,6 +3566,7 @@ struct IrInstructionAllocaGen { uint32_t align; const char *name_hint; + size_t field_index; }; struct IrInstructionEndExpr { diff --git a/src/analyze.cpp b/src/analyze.cpp index 744640084c..858fa7be2d 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -630,6 +630,7 @@ ZigType *get_error_union_type(CodeGen *g, ZigType *err_set_type, ZigType *payloa size_t field2_offset = next_field_offset(0, entry->abi_align, field_sizes[0], field_aligns[1]); entry->abi_size = next_field_offset(field2_offset, entry->abi_align, field_sizes[1], entry->abi_align); entry->size_in_bits = entry->abi_size * 8; + entry->data.error_union.pad_bytes = entry->abi_size - (field2_offset + field_sizes[1]); } g->type_table.put(type_id, entry); @@ -1499,7 +1500,7 @@ bool type_is_invalid(ZigType *type_entry) { } -ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], +static ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], ZigType *field_types[], size_t field_count, unsigned min_abi_align) { ZigType *struct_type = new_type_table_entry(ZigTypeIdStruct); @@ -1524,10 +1525,6 @@ ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_na if (field->type_entry->abi_align > abi_align) { abi_align = field->type_entry->abi_align; } - field->gen_index = struct_type->data.structure.gen_field_count; - struct_type->data.structure.gen_field_count += 1; - } else { - field->gen_index = SIZE_MAX; } auto prev_entry = struct_type->data.structure.fields_by_name.put_unique(field->name, field); @@ -1537,14 +1534,16 @@ ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_na size_t next_offset = 0; for (size_t i = 0; i < field_count; i += 1) { TypeStructField *field = &struct_type->data.structure.fields[i]; - if (field->gen_index == SIZE_MAX) + if (!type_has_bits(field->type_entry)) continue; + field->offset = next_offset; + + // find the next non-zero-byte field for offset calculations size_t next_src_field_index = i + 1; for (; next_src_field_index < field_count; next_src_field_index += 1) { - if (struct_type->data.structure.fields[next_src_field_index].gen_index != SIZE_MAX) { + if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry)) break; - } } size_t next_abi_align = (next_src_field_index == field_count) ? abi_align : struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align; @@ -5304,6 +5303,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) { IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i); + instruction->field_index = SIZE_MAX; ZigType *ptr_type = instruction->base.value.type; assert(ptr_type->id == ZigTypeIdPointer); ZigType *child_type = ptr_type->data.pointer.child_type; @@ -5327,6 +5327,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { } else { name = buf_ptr(buf_sprintf("%s.%" ZIG_PRI_usize, instruction->name_hint, alloca_i)); } + instruction->field_index = field_types.length; field_names.append(name); field_types.append(child_type); } @@ -5949,6 +5950,15 @@ ZigType *make_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) { entry->llvm_type = LLVMIntType(size_in_bits); entry->abi_size = LLVMABISizeOfType(g->target_data_ref, entry->llvm_type); entry->abi_align = LLVMABIAlignmentOfType(g->target_data_ref, entry->llvm_type); + + if (size_in_bits >= 128) { + // Override the incorrect alignment reported by LLVM. Clang does this as well. + // On x86_64 there are some instructions like CMPXCHG16B which require this. + // On all targets, integers 128 bits and above have ABI alignment of 16. + // See: https://github.com/ziglang/zig/issues/2987 + assert(entry->abi_align == 8); // if this trips we can remove the workaround + entry->abi_align = 16; + } } const char u_or_i = is_signed ? 'i' : 'u'; @@ -6810,10 +6820,9 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS } size_t field_count = struct_type->data.structure.src_field_count; - size_t gen_field_count = struct_type->data.structure.gen_field_count; - LLVMTypeRef *element_types = allocate(gen_field_count); + // Every field could potentially have a generated padding field after it. + LLVMTypeRef *element_types = allocate(field_count * 2); - size_t gen_field_index = 0; bool packed = (struct_type->data.structure.layout == ContainerLayoutPacked); size_t packed_bits_offset = 0; size_t first_packed_bits_offset_misalign = SIZE_MAX; @@ -6821,20 +6830,36 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS // trigger all the recursive get_llvm_type calls for (size_t i = 0; i < field_count; i += 1) { - TypeStructField *type_struct_field = &struct_type->data.structure.fields[i]; - ZigType *field_type = type_struct_field->type_entry; + TypeStructField *field = &struct_type->data.structure.fields[i]; + ZigType *field_type = field->type_entry; if (!type_has_bits(field_type)) continue; (void)get_llvm_type(g, field_type); if (struct_type->data.structure.resolve_status >= wanted_resolve_status) return; } - for (size_t i = 0; i < field_count; i += 1) { - TypeStructField *type_struct_field = &struct_type->data.structure.fields[i]; - ZigType *field_type = type_struct_field->type_entry; + size_t gen_field_index = 0; + // Calculate what LLVM thinks the ABI align of the struct will be. We do this to avoid + // inserting padding bytes where LLVM would do it automatically. + size_t llvm_struct_abi_align = 0; + for (size_t i = 0; i < field_count; i += 1) { + ZigType *field_type = struct_type->data.structure.fields[i].type_entry; if (!type_has_bits(field_type)) continue; + LLVMTypeRef field_llvm_type = get_llvm_type(g, field_type); + size_t llvm_field_abi_align = LLVMABIAlignmentOfType(g->target_data_ref, field_llvm_type); + llvm_struct_abi_align = max(llvm_struct_abi_align, llvm_field_abi_align); + } + + for (size_t i = 0; i < field_count; i += 1) { + TypeStructField *field = &struct_type->data.structure.fields[i]; + ZigType *field_type = field->type_entry; + + if (!type_has_bits(field_type)) { + field->gen_index = SIZE_MAX; + continue; + } if (packed) { size_t field_size_in_bits = type_size_bits(g, field_type); @@ -6871,11 +6896,44 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS llvm_type = get_llvm_type(g, field_type); } element_types[gen_field_index] = llvm_type; - + field->gen_index = gen_field_index; gen_field_index += 1; + + // find the next non-zero-byte field for offset calculations + size_t next_src_field_index = i + 1; + for (; next_src_field_index < field_count; next_src_field_index += 1) { + if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry)) + break; + } + size_t next_abi_align = (next_src_field_index == field_count) ? + struct_type->abi_align : + struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align; + size_t llvm_next_abi_align = (next_src_field_index == field_count) ? + llvm_struct_abi_align : + LLVMABIAlignmentOfType(g->target_data_ref, + get_llvm_type(g, struct_type->data.structure.fields[next_src_field_index].type_entry)); + + size_t next_offset = next_field_offset(field->offset, struct_type->abi_align, + field_type->abi_size, next_abi_align); + size_t llvm_next_offset = next_field_offset(field->offset, llvm_struct_abi_align, + LLVMABISizeOfType(g->target_data_ref, llvm_type), llvm_next_abi_align); + + assert(next_offset >= llvm_next_offset); + if (next_offset > llvm_next_offset) { + size_t pad_bytes = next_offset - (field->offset + field_type->abi_size); + if (pad_bytes != 0) { + LLVMTypeRef pad_llvm_type = LLVMArrayType(LLVMInt8Type(), pad_bytes); + element_types[gen_field_index] = pad_llvm_type; + gen_field_index += 1; + } + } } debug_field_count += 1; } + if (!packed) { + struct_type->data.structure.gen_field_count = gen_field_index; + } + if (first_packed_bits_offset_misalign != SIZE_MAX) { size_t full_bit_count = packed_bits_offset - first_packed_bits_offset_misalign; size_t full_abi_size = get_abi_size_bytes(full_bit_count, g->pointer_size_bytes); @@ -6884,19 +6942,20 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS } if (type_has_bits(struct_type)) { - LLVMStructSetBody(struct_type->llvm_type, element_types, (unsigned)gen_field_count, packed); + LLVMStructSetBody(struct_type->llvm_type, element_types, + (unsigned)struct_type->data.structure.gen_field_count, packed); } ZigLLVMDIType **di_element_types = allocate(debug_field_count); size_t debug_field_index = 0; for (size_t i = 0; i < field_count; i += 1) { - TypeStructField *type_struct_field = &struct_type->data.structure.fields[i]; - size_t gen_field_index = type_struct_field->gen_index; + TypeStructField *field = &struct_type->data.structure.fields[i]; + size_t gen_field_index = field->gen_index; if (gen_field_index == SIZE_MAX) { continue; } - ZigType *field_type = type_struct_field->type_entry; + ZigType *field_type = field->type_entry; // if the field is a function, actually the debug info should be a pointer. ZigLLVMDIType *field_di_type; @@ -6914,13 +6973,13 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS uint64_t debug_align_in_bits; uint64_t debug_offset_in_bits; if (packed) { - debug_size_in_bits = type_struct_field->type_entry->size_in_bits; - debug_align_in_bits = 8 * type_struct_field->type_entry->abi_align; - debug_offset_in_bits = 8 * type_struct_field->offset + type_struct_field->bit_offset_in_host; + debug_size_in_bits = field->type_entry->size_in_bits; + debug_align_in_bits = 8 * field->type_entry->abi_align; + debug_offset_in_bits = 8 * field->offset + field->bit_offset_in_host; } else { debug_size_in_bits = 8 * get_store_size_bytes(field_type->size_in_bits); debug_align_in_bits = 8 * field_type->abi_align; - debug_offset_in_bits = 8 * type_struct_field->offset; + debug_offset_in_bits = 8 * field->offset; } unsigned line; if (decl_node != nullptr) { @@ -6930,7 +6989,7 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS line = 0; } di_element_types[debug_field_index] = ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(struct_type->llvm_di_type), buf_ptr(type_struct_field->name), + ZigLLVMTypeToScope(struct_type->llvm_di_type), buf_ptr(field->name), di_file, line, debug_size_in_bits, debug_align_in_bits, @@ -7171,7 +7230,7 @@ static void resolve_llvm_types_union(CodeGen *g, ZigType *union_type, ResolveSta union_type->data.unionation.resolve_status = ResolveStatusLLVMFull; } -static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type) { +static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) { if (type->llvm_di_type != nullptr) return; if (!type_has_bits(type)) { @@ -7200,7 +7259,7 @@ static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type) { uint64_t debug_align_in_bits = 8*type->abi_align; type->llvm_di_type = ZigLLVMCreateDebugPointerType(g->dbuilder, elem_type->llvm_di_type, debug_size_in_bits, debug_align_in_bits, buf_ptr(&type->name)); - assertNoError(type_resolve(g, elem_type, ResolveStatusLLVMFull)); + assertNoError(type_resolve(g, elem_type, wanted_resolve_status)); } else { ZigType *host_int_type = get_int_type(g, false, type->data.pointer.host_int_bytes * 8); LLVMTypeRef host_int_llvm_type = get_llvm_type(g, host_int_type); @@ -7326,10 +7385,17 @@ static void resolve_llvm_types_error_union(CodeGen *g, ZigType *type) { } else { LLVMTypeRef err_set_llvm_type = get_llvm_type(g, err_set_type); LLVMTypeRef payload_llvm_type = get_llvm_type(g, payload_type); - LLVMTypeRef elem_types[2]; + LLVMTypeRef elem_types[3]; elem_types[err_union_err_index] = err_set_llvm_type; elem_types[err_union_payload_index] = payload_llvm_type; + type->llvm_type = LLVMStructType(elem_types, 2, false); + if (LLVMABISizeOfType(g->target_data_ref, type->llvm_type) != type->abi_size) { + // we need to do our own padding + type->data.error_union.pad_llvm_type = LLVMArrayType(LLVMInt8Type(), type->data.error_union.pad_bytes); + elem_types[2] = type->data.error_union.pad_llvm_type; + type->llvm_type = LLVMStructType(elem_types, 3, false); + } ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit); ZigLLVMDIFile *di_file = nullptr; @@ -7511,6 +7577,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) { } void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) { + Error err; if (fn->raw_di_type != nullptr) return; ZigType *fn_type = fn->type_entry; @@ -7529,8 +7596,10 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) { ZigType *frame_type = get_coro_frame_type(g, fn); ZigType *ptr_type = get_pointer_to_type(g, frame_type, false); - gen_param_types.append(get_llvm_type(g, ptr_type)); - param_di_types.append(get_llvm_di_type(g, ptr_type)); + if ((err = type_resolve(g, ptr_type, ResolveStatusLLVMFwdDecl))) + zig_unreachable(); + gen_param_types.append(ptr_type->llvm_type); + param_di_types.append(ptr_type->llvm_di_type); // this parameter is used to pass the result pointer when await completes gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize)); @@ -7726,7 +7795,7 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r case ZigTypeIdUnion: return resolve_llvm_types_union(g, type, wanted_resolve_status); case ZigTypeIdPointer: - return resolve_llvm_types_pointer(g, type); + return resolve_llvm_types_pointer(g, type, wanted_resolve_status); case ZigTypeIdInt: return resolve_llvm_types_integer(g, type); case ZigTypeIdOptional: diff --git a/src/analyze.hpp b/src/analyze.hpp index 97d8de7bb1..26bb006249 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -38,8 +38,6 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x); ZigType *get_error_union_type(CodeGen *g, ZigType *err_set_type, ZigType *payload_type); ZigType *get_bound_fn_type(CodeGen *g, ZigFn *fn_entry); ZigType *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const char *full_name, Buf *bare_name); -ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], - ZigType *field_types[], size_t field_count, unsigned min_abi_align); ZigType *get_test_fn_type(CodeGen *g); ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type); bool handle_is_ptr(ZigType *type_entry); diff --git a/src/codegen.cpp b/src/codegen.cpp index 589094e810..3fb3022ac7 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3765,29 +3765,17 @@ static void render_async_spills(CodeGen *g) { gen_var_debug_decl(g, var); } } - // label (grep this): [coro_frame_struct_layout] - if (codegen_fn_has_err_ret_tracing_stack(g, g->cur_fn, true)) { - async_var_index += 2; - } + + ZigType *frame_type = g->cur_fn->frame_type->data.frame.locals_struct; + for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) { IrInstructionAllocaGen *instruction = g->cur_fn->alloca_gen_list.at(alloca_i); - ZigType *ptr_type = instruction->base.value.type; - assert(ptr_type->id == ZigTypeIdPointer); - ZigType *child_type = ptr_type->data.pointer.child_type; - if (!type_has_bits(child_type)) + if (instruction->field_index == SIZE_MAX) continue; - if (instruction->base.ref_count == 0) - continue; - if (instruction->base.value.special != ConstValSpecialRuntime) { - if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != - ConstValSpecialRuntime) - { - continue; - } - } - instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, async_var_index, + + size_t gen_index = frame_type->data.structure.fields[instruction->field_index].gen_index; + instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, gen_index, instruction->name_hint); - async_var_index += 1; } } @@ -6363,6 +6351,9 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c break; } + if ((err = type_resolve(g, type_entry, ResolveStatusLLVMFull))) + zig_unreachable(); + switch (type_entry->id) { case ZigTypeIdInt: return bigint_to_llvm_const(get_llvm_type(g, type_entry), &const_val->data.x_bigint); @@ -6434,6 +6425,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c LLVMValueRef *fields = allocate(type_entry->data.structure.gen_field_count); size_t src_field_count = type_entry->data.structure.src_field_count; bool make_unnamed_struct = false; + assert(type_entry->data.structure.resolve_status == ResolveStatusLLVMFull); if (type_entry->data.structure.layout == ContainerLayoutPacked) { size_t src_field_index = 0; while (src_field_index < src_field_count) { @@ -6503,6 +6495,22 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c LLVMValueRef val = gen_const_val(g, field_val, ""); fields[type_struct_field->gen_index] = val; make_unnamed_struct = make_unnamed_struct || is_llvm_value_unnamed_type(g, field_val->type, val); + + size_t end_pad_gen_index = (i + 1 < src_field_count) ? + type_entry->data.structure.fields[i + 1].gen_index : + type_entry->data.structure.gen_field_count; + size_t next_offset = (i + 1 < src_field_count) ? + type_entry->data.structure.fields[i + 1].offset : type_entry->abi_size; + if (end_pad_gen_index != SIZE_MAX) { + for (size_t gen_i = type_struct_field->gen_index + 1; gen_i < end_pad_gen_index; + gen_i += 1) + { + size_t pad_bytes = next_offset - + (type_struct_field->offset + type_struct_field->type_entry->abi_size); + LLVMTypeRef llvm_array_type = LLVMArrayType(LLVMInt8Type(), pad_bytes); + fields[gen_i] = LLVMGetUndef(llvm_array_type); + } + } } } if (make_unnamed_struct) { @@ -6690,13 +6698,18 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c err_payload_value = gen_const_val(g, payload_val, ""); make_unnamed_struct = is_llvm_value_unnamed_type(g, payload_val->type, err_payload_value); } - LLVMValueRef fields[2]; + LLVMValueRef fields[3]; fields[err_union_err_index] = err_tag_value; fields[err_union_payload_index] = err_payload_value; + size_t field_count = 2; + if (type_entry->data.error_union.pad_llvm_type != nullptr) { + fields[2] = LLVMGetUndef(type_entry->data.error_union.pad_llvm_type); + field_count = 3; + } if (make_unnamed_struct) { - return LLVMConstStruct(fields, 2, false); + return LLVMConstStruct(fields, field_count, false); } else { - return LLVMConstNamedStruct(get_llvm_type(g, type_entry), fields, 2); + return LLVMConstNamedStruct(get_llvm_type(g, type_entry), fields, field_count); } } } @@ -7139,6 +7152,7 @@ static void do_code_gen(CodeGen *g) { } if (is_async) { + (void)get_llvm_type(g, fn_table_entry->frame_type); g->cur_resume_block_count = 0; LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; diff --git a/std/event/channel.zig b/std/event/channel.zig index fa5ad6575f..91c4650dc1 100644 --- a/std/event/channel.zig +++ b/std/event/channel.zig @@ -307,11 +307,9 @@ test "std.event.Channel" { // https://github.com/ziglang/zig/issues/1908 if (builtin.single_threaded) return error.SkipZigTest; - const allocator = std.heap.direct_allocator; - var loop: Loop = undefined; // TODO make a multi threaded test - try loop.initSingleThreaded(allocator); + try loop.initSingleThreaded(std.heap.direct_allocator); defer loop.deinit(); const channel = try Channel(i32).create(&loop, 0); diff --git a/test/stage1/behavior/align.zig b/test/stage1/behavior/align.zig index b64f45bb6b..f607ac59d2 100644 --- a/test/stage1/behavior/align.zig +++ b/test/stage1/behavior/align.zig @@ -228,3 +228,65 @@ test "alignment of extern() void" { } extern fn nothing() void {} + +test "return error union with 128-bit integer" { + expect(3 == try give()); +} +fn give() anyerror!u128 { + return 3; +} + +test "alignment of >= 128-bit integer type" { + expect(@alignOf(u128) == 16); + expect(@alignOf(u129) == 16); +} + +test "alignment of struct with 128-bit field" { + expect(@alignOf(struct { + x: u128, + }) == 16); + + comptime { + expect(@alignOf(struct { + x: u128, + }) == 16); + } +} + +test "size of extern struct with 128-bit field" { + expect(@sizeOf(extern struct { + x: u128, + y: u8, + }) == 32); + + comptime { + expect(@sizeOf(extern struct { + x: u128, + y: u8, + }) == 32); + } +} + +const DefaultAligned = struct { + nevermind: u32, + badguy: i128, +}; + +test "read 128-bit field from default aligned struct in stack memory" { + var default_aligned = DefaultAligned{ + .nevermind = 1, + .badguy = 12, + }; + expect((@ptrToInt(&default_aligned.badguy) % 16) == 0); + expect(12 == default_aligned.badguy); +} + +var default_aligned_global = DefaultAligned{ + .nevermind = 1, + .badguy = 12, +}; + +test "read 128-bit field from default aligned struct in global memory" { + expect((@ptrToInt(&default_aligned_global.badguy) % 16) == 0); + expect(12 == default_aligned_global.badguy); +} From 82d4ebe53a9d86559dee4e82cde97ab26e76a375 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Aug 2019 13:28:05 -0400 Subject: [PATCH 098/125] organize TODOs --- BRANCH_TODO | 35 +++++++++++++++++------------------ src/all_types.hpp | 7 ++++--- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 98d4edc401..77ea14c06f 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,17 +1,27 @@ + * grep for "coroutine" and "coro" and replace all that nomenclature with "async functions" * alignment of variables not being respected in async functions - * for loops need to spill the index. other payload captures probably also need to spill - * compile error (instead of crashing) for trying to get @Frame of generic function - * compile error (instead of crashing) for trying to async call and passing @Frame of wrong function - * `const result = (await a) + (await b);` this causes "Instruction does not dominate all uses" - need spill - * compile error for error: expected anyframe->T, found 'anyframe' - * compile error for error: expected anyframe->T, found 'i32' * await of a non async function * async call on a non async function + * documentation + - @asyncCall + - @frame + - @Frame + - @frameSize + - coroutines section + - suspend + - resume + - anyframe, anyframe->T * a test where an async function destroys its own frame in a defer + * compile error (instead of crashing) for trying to get @Frame of generic function + * compile error (instead of crashing) for trying to async call and passing @Frame of wrong function * implicit cast of normal function to async function should be allowed when it is inferred to be async - * @typeInfo for @Frame(func) + * compile error for error: expected anyframe->T, found 'anyframe' + * compile error for error: expected anyframe->T, found 'i32' * peer type resolution of *@Frame(func) and anyframe * peer type resolution of *@Frame(func) and anyframe->T when the return type matches + * for loops need to spill the index. other payload captures probably also need to spill + * `const result = (await a) + (await b);` this causes "Instruction does not dominate all uses" - need spill + * @typeInfo for @Frame(func) * returning a value from within a suspend block * make resuming inside a suspend block, with nothing after it, a must-tail call. * make sure there are safety tests for all the new safety features (search the new PanicFnId enum values) @@ -22,17 +32,6 @@ * calling a generic function which is async * make sure `await @asyncCall` and `await async` are handled correctly. * allow @asyncCall with a real @Frame(func) (the point of this is result pointer) - * documentation - - @asyncCall - - @frame - - @Frame - - @frameSize - - coroutines section - - suspend - - resume - - anyframe, anyframe->T - * call graph analysis to have fewer stack trace frames - * grep for "coroutine" and "coro" and replace all that nomenclature with "async functions" * when there are multiple calls to async functions in a function, reuse the same frame buffer, so that the needed bytes is equal to the largest callee's frame * if an async function is never called with async then a few optimizations can be made: diff --git a/src/all_types.hpp b/src/all_types.hpp index 96b51ab5d2..e6a19a0c1d 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -3758,9 +3758,10 @@ static const size_t coro_awaiter_index = 2; static const size_t coro_prev_val_index = 3; static const size_t coro_ret_start = 4; -// TODO call graph analysis to find out what this number needs to be for every function -// MUST BE A POWER OF TWO. -static const size_t stack_trace_ptr_count = 32; +// TODO https://github.com/ziglang/zig/issues/3056 +// We require this to be a power of 2 so that we can use shifting rather than +// remainder division. +static const size_t stack_trace_ptr_count = 32; // Must be a power of 2. #define NAMESPACE_SEP_CHAR '.' #define NAMESPACE_SEP_STR "." From 50926341036c3ba377215d1c70c3e97adb07a292 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Aug 2019 14:14:19 -0400 Subject: [PATCH 099/125] avoid the word "coroutine", they're "async functions" --- BRANCH_TODO | 2 +- doc/langref.html.in | 40 ++++++++--------- src-self-hosted/ir.zig | 14 ------ src-self-hosted/link.zig | 2 +- src-self-hosted/main.zig | 2 +- src-self-hosted/stage1.zig | 3 +- src/all_types.hpp | 24 +++++----- src/analyze.cpp | 90 +++++++++++++++++++------------------- src/analyze.hpp | 2 +- src/codegen.cpp | 62 +++++++++++++------------- src/ir.cpp | 62 +++++++++++++------------- src/ir_print.cpp | 9 ++-- src/zig_llvm.cpp | 3 -- std/event/fs.zig | 6 +-- std/event/future.zig | 2 +- std/event/group.zig | 12 ++--- std/event/lock.zig | 5 +-- std/event/locked.zig | 2 +- std/event/loop.zig | 2 +- std/event/net.zig | 14 +++--- std/event/rwlock.zig | 8 ++-- std/event/rwlocked.zig | 2 +- std/zig/parser_test.zig | 6 +-- 23 files changed, 175 insertions(+), 199 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 77ea14c06f..cac275eb75 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,4 +1,4 @@ - * grep for "coroutine" and "coro" and replace all that nomenclature with "async functions" + * zig fmt support for the syntax * alignment of variables not being respected in async functions * await of a non async function * async call on a non async function diff --git a/doc/langref.html.in b/doc/langref.html.in index 0cb76a4bdf..23e4dd194e 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5968,9 +5968,10 @@ test "global assembly" {

TODO: @atomic rmw

TODO: builtin atomic memory ordering enum

{#header_close#} - {#header_open|Coroutines#} + {#header_open|Async Functions#}

- A coroutine is a generalization of a function. + An async function is a function whose callsite is split into an {#syntax#}async{#endsyntax#} initiation, + followed by an {#syntax#}await{#endsyntax#} completion. They can also be canceled.

When you call a function, it creates a stack frame, @@ -5980,14 +5981,14 @@ test "global assembly" { until the function returns.

- A coroutine is like a function, but it can be suspended + An async function is like a function, but it can be suspended and resumed any number of times, and then it must be - explicitly destroyed. When a coroutine suspends, it + explicitly destroyed. When an async function suspends, it returns to the resumer.

- {#header_open|Minimal Coroutine Example#} + {#header_open|Minimal Async Function Example#}

- Declare a coroutine with the {#syntax#}async{#endsyntax#} keyword. + Declare an async function with the {#syntax#}async{#endsyntax#} keyword. The expression in angle brackets must evaluate to a struct which has these fields:

@@ -6006,8 +6007,8 @@ test "global assembly" { the function generic. Zig will infer the allocator type when the async function is called.

- Call a coroutine with the {#syntax#}async{#endsyntax#} keyword. Here, the expression in angle brackets - is a pointer to the allocator struct that the coroutine expects. + Call an async function with the {#syntax#}async{#endsyntax#} keyword. Here, the expression in angle brackets + is a pointer to the allocator struct that the async function expects.

The result of an async function call is a {#syntax#}promise->T{#endsyntax#} type, where {#syntax#}T{#endsyntax#} @@ -6058,7 +6059,7 @@ const assert = std.debug.assert; var the_frame: anyframe = undefined; var result = false; -test "coroutine suspend with block" { +test "async function suspend with block" { _ = async testSuspendBlock(); std.debug.assert(!result); resume the_frame; @@ -6074,7 +6075,7 @@ fn testSuspendBlock() void { } {#code_end#}

- Every suspend point in an async function represents a point at which the coroutine + Every suspend point in an async function represents a point at which the async function could be destroyed. If that happens, {#syntax#}defer{#endsyntax#} expressions that are in scope are run, as well as {#syntax#}errdefer{#endsyntax#} expressions.

@@ -6083,14 +6084,14 @@ fn testSuspendBlock() void {

{#header_open|Resuming from Suspend Blocks#}

- Upon entering a {#syntax#}suspend{#endsyntax#} block, the coroutine is already considered + Upon entering a {#syntax#}suspend{#endsyntax#} block, the async function is already considered suspended, and can be resumed. For example, if you started another kernel thread, and had that thread call {#syntax#}resume{#endsyntax#} on the promise handle provided by the {#syntax#}suspend{#endsyntax#} block, the new thread would begin executing after the suspend block, while the old thread continued executing the suspend block.

- However, the coroutine can be directly resumed from the suspend block, in which case it + However, the async function can be directly resumed from the suspend block, in which case it never returns to its resumer and continues executing.

{#code_begin|test#} @@ -6127,8 +6128,8 @@ async fn testResumeFromSuspend(my_result: *i32) void { If the async function associated with the promise handle has already returned, then {#syntax#}await{#endsyntax#} destroys the target async function, and gives the return value. Otherwise, {#syntax#}await{#endsyntax#} suspends the current async function, registering its - promise handle with the target coroutine. It becomes the target coroutine's responsibility - to have ensured that it will be resumed or destroyed. When the target coroutine reaches + promise handle with the target async function. It becomes the target async function's responsibility + to have ensured that it will be resumed or destroyed. When the target async function reaches its return statement, it gives the return value to the awaiter, destroys itself, and then resumes the awaiter.

@@ -6137,7 +6138,7 @@ async fn testResumeFromSuspend(my_result: *i32) void {

{#syntax#}await{#endsyntax#} counts as a suspend point, and therefore at every {#syntax#}await{#endsyntax#}, - a coroutine can be potentially destroyed, which would run {#syntax#}defer{#endsyntax#} and {#syntax#}errdefer{#endsyntax#} expressions. + a async function can be potentially destroyed, which would run {#syntax#}defer{#endsyntax#} and {#syntax#}errdefer{#endsyntax#} expressions.

{#code_begin|test#} const std = @import("std"); @@ -6146,7 +6147,7 @@ const assert = std.debug.assert; var the_frame: anyframe = undefined; var final_result: i32 = 0; -test "coroutine await" { +test "async function await" { seq('a'); _ = async amain(); seq('f'); @@ -6188,7 +6189,7 @@ fn seq(c: u8) void { {#header_close#} {#header_open|Open Issues#}

- There are a few issues with coroutines that are considered unresolved. Best be aware of them, + There are a few issues with async function that are considered unresolved. Best be aware of them, as the situation is likely to change before 1.0.0:

    @@ -6202,7 +6203,7 @@ fn seq(c: u8) void {
  • Zig does not take advantage of LLVM's allocation elision optimization for - coroutines. It crashed LLVM when I tried to do it the first time. This is + async function. It crashed LLVM when I tried to do it the first time. This is related to the other 2 bullet points here. See #802.
  • @@ -8016,8 +8017,7 @@ pub fn build(b: *Builder) void {

    Zig has a compile option --single-threaded which has the following effects:

    • All {#link|Thread Local Variables#} are treated as {#link|Global Variables#}.
    • -
    • The overhead of {#link|Coroutines#} becomes equivalent to function call overhead. - TODO: please note this will not be implemented until the upcoming Coroutine Rewrite
    • +
    • The overhead of {#link|Async Functions#} becomes equivalent to function call overhead.
    • The {#syntax#}@import("builtin").single_threaded{#endsyntax#} becomes {#syntax#}true{#endsyntax#} and therefore various userland APIs which read this variable become more efficient. For example {#syntax#}std.Mutex{#endsyntax#} becomes diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index db89af7a42..bc7aeffdf5 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -1904,20 +1904,6 @@ pub const Builder = struct { } return error.Unimplemented; - //ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value); - //IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, - // get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); - //// TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig - //IrInstruction *replacement_value = irb->exec->coro_handle; - //IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node, - // promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr, - // AtomicRmwOp_xchg, AtomicOrderSeqCst); - //ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, maybe_await_handle); - //IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle); - //IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); - //return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final, - // is_comptime); - //// the above blocks are rendered by ir_gen after the rest of codegen } const Ident = union(enum) { diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig index 95b71a55c9..1f5f07eff0 100644 --- a/src-self-hosted/link.zig +++ b/src-self-hosted/link.zig @@ -627,7 +627,7 @@ fn constructLinkerArgsWasm(ctx: *Context) void { fn addFnObjects(ctx: *Context) !void { // at this point it's guaranteed nobody else has this lock, so we circumvent it - // and avoid having to be a coroutine + // and avoid having to be an async function const fn_link_set = &ctx.comp.fn_link_set.private_data; var it = fn_link_set.first; diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index 63ac47147d..bc5d078950 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -52,7 +52,7 @@ const Command = struct { pub fn main() !void { // This allocator needs to be thread-safe because we use it for the event.Loop - // which multiplexes coroutines onto kernel threads. + // which multiplexes async functions onto kernel threads. // libc allocator is guaranteed to have this property. const allocator = std.heap.c_allocator; diff --git a/src-self-hosted/stage1.zig b/src-self-hosted/stage1.zig index dd26e9594c..b8f13b5d03 100644 --- a/src-self-hosted/stage1.zig +++ b/src-self-hosted/stage1.zig @@ -142,7 +142,8 @@ export fn stage2_render_ast(tree: *ast.Tree, output_file: *FILE) Error { return Error.None; } -// TODO: just use the actual self-hosted zig fmt. Until the coroutine rewrite, we use a blocking implementation. +// TODO: just use the actual self-hosted zig fmt. Until https://github.com/ziglang/zig/issues/2377, +// we use a blocking implementation. export fn stage2_fmt(argc: c_int, argv: [*]const [*]const u8) c_int { if (std.debug.runtime_safety) { fmtMain(argc, argv) catch unreachable; diff --git a/src/all_types.hpp b/src/all_types.hpp index e6a19a0c1d..6445c05bff 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1265,7 +1265,7 @@ enum ZigTypeId { ZigTypeIdBoundFn, ZigTypeIdArgTuple, ZigTypeIdOpaque, - ZigTypeIdCoroFrame, + ZigTypeIdFnFrame, ZigTypeIdAnyFrame, ZigTypeIdVector, ZigTypeIdEnumLiteral, @@ -1281,7 +1281,7 @@ struct ZigTypeOpaque { Buf *bare_name; }; -struct ZigTypeCoroFrame { +struct ZigTypeFnFrame { ZigFn *fn; ZigType *locals_struct; }; @@ -1315,7 +1315,7 @@ struct ZigType { ZigTypeBoundFn bound_fn; ZigTypeVector vector; ZigTypeOpaque opaque; - ZigTypeCoroFrame frame; + ZigTypeFnFrame frame; ZigTypeAnyFrame any_frame; } data; @@ -1376,7 +1376,7 @@ struct ZigFn { LLVMTypeRef raw_type_ref; ZigLLVMDIType *raw_di_type; - ZigType *frame_type; // coro frame type + ZigType *frame_type; // in the case of normal functions this is the implicit return type // in the case of async functions this is the implicit return type according to the // zig source code, not according to zig ir @@ -2368,7 +2368,7 @@ enum IrInstructionId { IrInstructionIdSuspendFinish, IrInstructionIdAwaitSrc, IrInstructionIdAwaitGen, - IrInstructionIdCoroResume, + IrInstructionIdResume, IrInstructionIdTestCancelRequested, IrInstructionIdSpillBegin, IrInstructionIdSpillEnd, @@ -3640,7 +3640,7 @@ struct IrInstructionAwaitGen { IrInstruction *result_loc; }; -struct IrInstructionCoroResume { +struct IrInstructionResume { IrInstruction base; IrInstruction *frame; @@ -3751,12 +3751,12 @@ static const size_t maybe_null_index = 1; static const size_t err_union_payload_index = 0; static const size_t err_union_err_index = 1; -// label (grep this): [coro_frame_struct_layout] -static const size_t coro_fn_ptr_index = 0; -static const size_t coro_resume_index = 1; -static const size_t coro_awaiter_index = 2; -static const size_t coro_prev_val_index = 3; -static const size_t coro_ret_start = 4; +// label (grep this): [fn_frame_struct_layout] +static const size_t frame_fn_ptr_index = 0; +static const size_t frame_resume_index = 1; +static const size_t frame_awaiter_index = 2; +static const size_t frame_prev_val_index = 3; +static const size_t frame_ret_start = 4; // TODO https://github.com/ziglang/zig/issues/3056 // We require this to be a power of 2 so that we can use shifting rather than diff --git a/src/analyze.cpp b/src/analyze.cpp index 858fa7be2d..dd8c56da1c 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -234,7 +234,7 @@ AstNode *type_decl_node(ZigType *type_entry) { return type_entry->data.enumeration.decl_node; case ZigTypeIdUnion: return type_entry->data.unionation.decl_node; - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: return type_entry->data.frame.fn->proto_node; case ZigTypeIdOpaque: case ZigTypeIdMetaType: @@ -271,7 +271,7 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) { return type_entry->data.structure.resolve_status >= status; case ZigTypeIdUnion: return type_entry->data.unionation.resolve_status >= status; - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: switch (status) { case ResolveStatusInvalid: zig_unreachable(); @@ -394,18 +394,18 @@ static const char *ptr_len_to_star_str(PtrLen ptr_len) { zig_unreachable(); } -ZigType *get_coro_frame_type(CodeGen *g, ZigFn *fn) { +ZigType *get_fn_frame_type(CodeGen *g, ZigFn *fn) { if (fn->frame_type != nullptr) { return fn->frame_type; } - ZigType *entry = new_type_table_entry(ZigTypeIdCoroFrame); + ZigType *entry = new_type_table_entry(ZigTypeIdFnFrame); buf_resize(&entry->name, 0); buf_appendf(&entry->name, "@Frame(%s)", buf_ptr(&fn->symbol_name)); entry->data.frame.fn = fn; - // Coroutine frames are always non-zero bits because they always have a resume index. + // Async function frames are always non-zero bits because they always have a resume index. entry->abi_size = SIZE_MAX; entry->size_in_bits = SIZE_MAX; @@ -1108,7 +1108,7 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: add_node_error(g, source_node, buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation", @@ -1198,7 +1198,7 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) { case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdVoid: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: return false; case ZigTypeIdOpaque: @@ -1370,7 +1370,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: switch (type_requires_comptime(g, type_entry)) { case ReqCompTimeNo: @@ -1467,7 +1467,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: switch (type_requires_comptime(g, fn_type_id.return_type)) { case ReqCompTimeInvalid: @@ -3080,7 +3080,7 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: return type_entry; } @@ -3582,7 +3582,7 @@ bool is_container(ZigType *type_entry) { case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: return false; } @@ -3640,7 +3640,7 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) { case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: zig_unreachable(); } @@ -3672,7 +3672,7 @@ bool type_is_nonnull_ptr(ZigType *type) { return get_codegen_ptr_type(type) == type && !ptr_allows_addr_zero(type); } -static uint32_t get_coro_frame_align_bytes(CodeGen *g) { +static uint32_t get_async_frame_align_bytes(CodeGen *g) { uint32_t a = g->pointer_size_bytes * 2; // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw if (a < 8) a = 8; @@ -3691,7 +3691,7 @@ uint32_t get_ptr_align(CodeGen *g, ZigType *type) { // See http://lists.llvm.org/pipermail/llvm-dev/2018-September/126142.html return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment; } else if (ptr_type->id == ZigTypeIdAnyFrame) { - return get_coro_frame_align_bytes(g); + return get_async_frame_align_bytes(g); } else { zig_unreachable(); } @@ -3779,7 +3779,7 @@ bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *sour } static void resolve_async_fn_frame(CodeGen *g, ZigFn *fn) { - ZigType *frame_type = get_coro_frame_type(g, fn); + ZigType *frame_type = get_fn_frame_type(g, fn); Error err; if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) { fn->anal_state = FnAnalStateInvalid; @@ -4218,7 +4218,7 @@ bool handle_is_ptr(ZigType *type_entry) { return false; case ZigTypeIdArray: case ZigTypeIdStruct: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: return type_has_bits(type_entry); case ZigTypeIdErrorUnion: return type_has_bits(type_entry->data.error_union.payload_type); @@ -4463,7 +4463,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { case ZigTypeIdVector: // TODO better hashing algorithm return 3647867726; - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: // TODO better hashing algorithm return 675741936; case ZigTypeIdAnyFrame: @@ -4533,7 +4533,7 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { case ZigTypeIdOpaque: case ZigTypeIdErrorSet: case ZigTypeIdEnum: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: return false; @@ -4606,7 +4606,7 @@ static bool return_type_is_cacheable(ZigType *return_type) { case ZigTypeIdEnum: case ZigTypeIdPointer: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: return true; @@ -4739,7 +4739,7 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) { case ZigTypeIdBool: case ZigTypeIdFloat: case ZigTypeIdErrorUnion: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: return OnePossibleValueNo; case ZigTypeIdUndefined: @@ -4828,7 +4828,7 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) { case ZigTypeIdFloat: case ZigTypeIdVoid: case ZigTypeIdUnreachable: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: return ReqCompTimeNo; } @@ -5161,7 +5161,7 @@ static ZigType *get_async_fn_type(CodeGen *g, ZigType *orig_fn_type) { return fn_type; } -static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { +static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { Error err; if (frame_type->data.frame.locals_struct != nullptr) @@ -5231,7 +5231,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (!fn_is_async(callee)) continue; - ZigType *callee_frame_type = get_coro_frame_type(g, callee); + ZigType *callee_frame_type = get_fn_frame_type(g, callee); IrInstructionAllocaGen *alloca_gen = allocate(1); alloca_gen->base.id = IrInstructionIdAllocaGen; @@ -5244,7 +5244,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { call->frame_result_loc = &alloca_gen->base; } - // label (grep this): [coro_frame_struct_layout] + // label (grep this): [fn_frame_struct_layout] ZigList field_types = {}; ZigList field_names = {}; @@ -5366,8 +5366,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) { return resolve_enum_zero_bits(g, ty); } else if (ty->id == ZigTypeIdUnion) { return resolve_union_alignment(g, ty); - } else if (ty->id == ZigTypeIdCoroFrame) { - return resolve_coro_frame(g, ty); + } else if (ty->id == ZigTypeIdFnFrame) { + return resolve_async_frame(g, ty); } return ErrorNone; case ResolveStatusSizeKnown: @@ -5377,8 +5377,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) { return resolve_enum_zero_bits(g, ty); } else if (ty->id == ZigTypeIdUnion) { return resolve_union_type(g, ty); - } else if (ty->id == ZigTypeIdCoroFrame) { - return resolve_coro_frame(g, ty); + } else if (ty->id == ZigTypeIdFnFrame) { + return resolve_async_frame(g, ty); } return ErrorNone; case ResolveStatusLLVMFwdDecl: @@ -5573,7 +5573,7 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) { return false; } return true; - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: zig_panic("TODO"); case ZigTypeIdAnyFrame: zig_panic("TODO"); @@ -5929,7 +5929,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { buf_appendf(buf, "(args value)"); return; } - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: buf_appendf(buf, "(TODO: async function frame value)"); return; @@ -5992,7 +5992,7 @@ uint32_t type_id_hash(TypeId x) { case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: zig_unreachable(); case ZigTypeIdErrorUnion: @@ -6042,7 +6042,7 @@ bool type_id_eql(TypeId a, TypeId b) { case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: zig_unreachable(); case ZigTypeIdErrorUnion: @@ -6209,7 +6209,7 @@ static const ZigTypeId all_type_ids[] = { ZigTypeIdBoundFn, ZigTypeIdArgTuple, ZigTypeIdOpaque, - ZigTypeIdCoroFrame, + ZigTypeIdFnFrame, ZigTypeIdAnyFrame, ZigTypeIdVector, ZigTypeIdEnumLiteral, @@ -6274,7 +6274,7 @@ size_t type_id_index(ZigType *entry) { return 20; case ZigTypeIdOpaque: return 21; - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: return 22; case ZigTypeIdAnyFrame: return 23; @@ -6338,7 +6338,7 @@ const char *type_id_name(ZigTypeId id) { return "Opaque"; case ZigTypeIdVector: return "Vector"; - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: return "Frame"; case ZigTypeIdAnyFrame: return "AnyFrame"; @@ -6782,7 +6782,7 @@ static void resolve_llvm_types_slice(CodeGen *g, ZigType *type, ResolveStatus wa } static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status, - ZigType *coro_frame_type) + ZigType *async_frame_type) { assert(struct_type->id == ZigTypeIdStruct); assert(struct_type->data.structure.resolve_status != ResolveStatusInvalid); @@ -6887,11 +6887,11 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS packed_bits_offset = next_packed_bits_offset; } else { LLVMTypeRef llvm_type; - if (i == 0 && coro_frame_type != nullptr) { - assert(coro_frame_type->id == ZigTypeIdCoroFrame); + if (i == 0 && async_frame_type != nullptr) { + assert(async_frame_type->id == ZigTypeIdFnFrame); assert(field_type->id == ZigTypeIdFn); - resolve_llvm_types_fn(g, coro_frame_type->data.frame.fn); - llvm_type = LLVMPointerType(coro_frame_type->data.frame.fn->raw_type_ref, 0); + resolve_llvm_types_fn(g, async_frame_type->data.frame.fn); + llvm_type = LLVMPointerType(async_frame_type->data.frame.fn->raw_type_ref, 0); } else { llvm_type = get_llvm_type(g, field_type); } @@ -7594,7 +7594,7 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) { // first "parameter" is return value param_di_types.append(get_llvm_di_type(g, gen_return_type)); - ZigType *frame_type = get_coro_frame_type(g, fn); + ZigType *frame_type = get_fn_frame_type(g, fn); ZigType *ptr_type = get_pointer_to_type(g, frame_type, false); if ((err = type_resolve(g, ptr_type, ResolveStatusLLVMFwdDecl))) zig_unreachable(); @@ -7634,7 +7634,7 @@ static void resolve_llvm_types_anyerror(CodeGen *g) { get_llvm_di_type(g, g->err_tag_type), ""); } -static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) { +static void resolve_llvm_types_async_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) { resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status, frame_type); frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type; frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type; @@ -7673,7 +7673,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re ZigList field_types = {}; ZigList di_element_types = {}; - // label (grep this): [coro_frame_struct_layout] + // label (grep this): [fn_frame_struct_layout] field_types.append(ptr_fn_llvm_type); // fn_ptr field_types.append(usize_type_ref); // resume_index field_types.append(usize_type_ref); // awaiter @@ -7824,8 +7824,8 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r type->abi_align, get_llvm_di_type(g, type->data.vector.elem_type), type->data.vector.len); return; } - case ZigTypeIdCoroFrame: - return resolve_llvm_types_coro_frame(g, type, wanted_resolve_status); + case ZigTypeIdFnFrame: + return resolve_llvm_types_async_frame(g, type, wanted_resolve_status); case ZigTypeIdAnyFrame: return resolve_llvm_types_any_frame(g, type, wanted_resolve_status); } diff --git a/src/analyze.hpp b/src/analyze.hpp index 26bb006249..e6336d3cdc 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -16,7 +16,7 @@ ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg); ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg); void emit_error_notes_for_ref_stack(CodeGen *g, ErrorMsg *msg); ZigType *new_type_table_entry(ZigTypeId id); -ZigType *get_coro_frame_type(CodeGen *g, ZigFn *fn); +ZigType *get_fn_frame_type(CodeGen *g, ZigFn *fn); ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const); ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const, bool is_volatile, PtrLen ptr_len, diff --git a/src/codegen.cpp b/src/codegen.cpp index 3fb3022ac7..c50d4121b6 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -305,16 +305,16 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) { zig_unreachable(); } -// label (grep this): [coro_frame_struct_layout] +// label (grep this): [fn_frame_struct_layout] static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) { // [0] *ReturnType (callee's) // [1] *ReturnType (awaiter's) // [2] ReturnType uint32_t return_field_count = type_has_bits(return_type) ? 3 : 0; - return coro_ret_start + return_field_count; + return frame_ret_start + return_field_count; } -// label (grep this): [coro_frame_struct_layout] +// label (grep this): [fn_frame_struct_layout] static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) { bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type); // [0] *StackTrace @@ -322,7 +322,7 @@ static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) { return frame_index_trace_arg(g, return_type) + trace_field_count; } -// label (grep this): [coro_frame_struct_layout] +// label (grep this): [fn_frame_struct_layout] static uint32_t frame_index_trace_stack(CodeGen *g, FnTypeId *fn_type_id) { uint32_t result = frame_index_arg(g, fn_type_id->return_type); for (size_t i = 0; i < fn_type_id->param_count; i += 1) { @@ -2224,7 +2224,7 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (fn_val == nullptr) { - LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_fn_ptr_index, ""); + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_fn_ptr_index, ""); fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); } if (arg_val == nullptr) { @@ -2373,7 +2373,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns // If the awaiter result pointer is non-null, we need to copy the result to there. LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult"); LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd"); - LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_ret_start + 1, ""); + LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, ""); LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, ""); LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr)); LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, ""); @@ -3858,7 +3858,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (ret_has_bits) { // Use the result location which is inside the frame if this is an async call. - ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 2, ""); + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); } } else { LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack); @@ -3897,14 +3897,14 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (ret_has_bits) { if (result_loc == nullptr) { // return type is a scalar, but we still need a pointer to it. Use the async fn frame. - ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 2, ""); + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); } else { // Use the call instruction's result location. ret_ptr = result_loc; } // Store a zero in the awaiter's result ptr to indicate we do not need a copy made. - LLVMValueRef awaiter_ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, ""); + LLVMValueRef awaiter_ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 1, ""); LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr))); LLVMBuildStore(g->builder, zero_ptr, awaiter_ret_ptr); } @@ -3919,19 +3919,19 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); - LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_fn_ptr_index, ""); + LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_fn_ptr_index, ""); LLVMValueRef bitcasted_fn_val = LLVMBuildBitCast(g->builder, fn_val, LLVMGetElementType(LLVMTypeOf(fn_ptr_ptr)), ""); LLVMBuildStore(g->builder, bitcasted_fn_val, fn_ptr_ptr); - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_resume_index, ""); + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_resume_index, ""); LLVMBuildStore(g->builder, zero, resume_index_ptr); - LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_awaiter_index, ""); + LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_awaiter_index, ""); LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr); if (ret_has_bits) { - LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start, ""); + LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, ""); LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); } } else { @@ -4018,7 +4018,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (result_loc != nullptr) return get_handle_value(g, result_loc, src_return_type, ptr_result_type); - LLVMValueRef result_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 2, ""); + LLVMValueRef result_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); return LLVMBuildLoad(g->builder, result_ptr, ""); } @@ -5491,7 +5491,7 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns // supply null for the awaiter return pointer (no copy needed) if (type_has_bits(result_type)) { - LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start + 1, ""); + LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, ""); LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))), awaiter_ret_ptr_ptr); } @@ -5506,7 +5506,7 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns LLVMValueRef awaiter_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, ""); - LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); + LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, ""); LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_ored_val, LLVMAtomicOrderingRelease); @@ -5549,7 +5549,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst LLVMValueRef result_loc = (instruction->result_loc == nullptr) ? nullptr : ir_llvm_value(g, instruction->result_loc); if (type_has_bits(result_type)) { - LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start + 1, ""); + LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, ""); if (result_loc == nullptr) { // no copy needed LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))), @@ -5570,7 +5570,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // caller's own frame pointer LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); - LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, ""); + LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, ""); LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val, LLVMAtomicOrderingRelease, g->is_single_threaded); @@ -5608,9 +5608,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst return nullptr; } -static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, - IrInstructionCoroResume *instruction) -{ +static LLVMValueRef ir_render_resume(CodeGen *g, IrExecutable *executable, IrInstructionResume *instruction) { LLVMValueRef frame = ir_llvm_value(g, instruction->frame); ZigType *frame_type = instruction->frame->value.type; assert(frame_type->id == ZigTypeIdAnyFrame); @@ -5921,8 +5919,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_suspend_begin(g, executable, (IrInstructionSuspendBegin *)instruction); case IrInstructionIdSuspendFinish: return ir_render_suspend_finish(g, executable, (IrInstructionSuspendFinish *)instruction); - case IrInstructionIdCoroResume: - return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); + case IrInstructionIdResume: + return ir_render_resume(g, executable, (IrInstructionResume *)instruction); case IrInstructionIdFrameSizeGen: return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction); case IrInstructionIdAwaitGen: @@ -6195,7 +6193,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con } return val; } - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: zig_panic("TODO bit pack an async function frame"); case ZigTypeIdAnyFrame: zig_panic("TODO bit pack an anyframe"); @@ -6727,7 +6725,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c case ZigTypeIdArgTuple: case ZigTypeIdOpaque: zig_unreachable(); - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: zig_panic("TODO"); case ZigTypeIdAnyFrame: zig_panic("TODO"); @@ -7171,12 +7169,12 @@ static void do_code_gen(CodeGen *g) { LLVMPositionBuilderAtEnd(g->builder, g->cur_preamble_llvm_block); render_async_spills(g); - g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_awaiter_index, ""); - LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_resume_index, ""); + g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_awaiter_index, ""); + LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_resume_index, ""); g->cur_async_resume_index_ptr = resume_index_ptr; if (type_has_bits(fn_type_id->return_type)) { - LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_ret_start, ""); + LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start, ""); g->cur_ret_ptr = LLVMBuildLoad(g->builder, cur_ret_ptr_ptr, ""); } if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { @@ -7190,7 +7188,7 @@ static void do_code_gen(CodeGen *g) { trace_field_index_stack, ""); } g->cur_async_prev_val_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, - coro_prev_val_index, ""); + frame_prev_val_index, ""); LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4); @@ -9229,7 +9227,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e case ZigTypeIdArgTuple: case ZigTypeIdErrorUnion: case ZigTypeIdErrorSet: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: zig_unreachable(); case ZigTypeIdVoid: @@ -9414,7 +9412,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu case ZigTypeIdUndefined: case ZigTypeIdNull: case ZigTypeIdArgTuple: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: zig_unreachable(); } @@ -9583,7 +9581,7 @@ static void gen_h_file(CodeGen *g) { case ZigTypeIdOptional: case ZigTypeIdFn: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: zig_unreachable(); diff --git a/src/ir.cpp b/src/ir.cpp index 3829e0c2cc..3564435ddd 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -321,7 +321,7 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) { case ZigTypeIdFn: case ZigTypeIdArgTuple: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: return false; } zig_unreachable(); @@ -1058,8 +1058,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitGen *) { return IrInstructionIdAwaitGen; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) { - return IrInstructionIdCoroResume; +static constexpr IrInstructionId ir_instruction_id(IrInstructionResume *) { + return IrInstructionIdResume; } static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelRequested *) { @@ -3321,10 +3321,8 @@ static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_i return &instruction->base; } -static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *frame) -{ - IrInstructionCoroResume *instruction = ir_build_instruction(irb, scope, source_node); +static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) { + IrInstructionResume *instruction = ir_build_instruction(irb, scope, source_node); instruction->base.value.type = irb->codegen->builtin_types.entry_void; instruction->frame = frame; @@ -7964,7 +7962,7 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - return ir_build_coro_resume(irb, scope, node, target_inst); + return ir_build_resume(irb, scope, node, target_inst); } static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, @@ -12223,7 +12221,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // *@Frame(func) to anyframe->T or anyframe if (actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle && - actual_type->data.pointer.child_type->id == ZigTypeIdCoroFrame && wanted_type->id == ZigTypeIdAnyFrame) + actual_type->data.pointer.child_type->id == ZigTypeIdFnFrame && wanted_type->id == ZigTypeIdAnyFrame) { bool ok = true; if (wanted_type->data.any_frame.result_type != nullptr) { @@ -13123,7 +13121,7 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp * case ZigTypeIdNull: case ZigTypeIdErrorUnion: case ZigTypeIdUnion: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: operator_allowed = false; break; case ZigTypeIdOptional: @@ -14488,7 +14486,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: case ZigTypeIdOpaque: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: ir_add_error(ira, target, buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name))); @@ -14514,7 +14512,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdEnumLiteral: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: ir_add_error(ira, target, buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name))); @@ -15060,7 +15058,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc return ira->codegen->invalid_instruction; } - ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry); + ZigType *frame_type = get_fn_frame_type(ira->codegen, fn_entry); IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, frame_type, nullptr, true, true, false); if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) { @@ -16121,7 +16119,7 @@ static IrInstruction *ir_analyze_optional_type(IrAnalyze *ira, IrInstructionUnOp case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdArgTuple: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry)); @@ -17910,7 +17908,7 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira, case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: { ResolveStatus needed_status = (align_bytes == 0) ? @@ -18026,7 +18024,7 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira, case ZigTypeIdFn: case ZigTypeIdBoundFn: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: { if ((err = ensure_complete_type(ira->codegen, child_type))) @@ -18078,7 +18076,7 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira, case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: { uint64_t size_in_bytes = type_size(ira->codegen, type_entry); @@ -18643,7 +18641,7 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira, case ZigTypeIdArgTuple: case ZigTypeIdOpaque: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: ir_add_error(ira, &switch_target_instruction->base, buf_sprintf("invalid switch target type '%s'", buf_ptr(&target_type->name))); @@ -20500,7 +20498,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr break; } - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: zig_panic("TODO @typeInfo for async function frames"); } @@ -22219,7 +22217,7 @@ static IrInstruction *ir_analyze_instruction_frame_handle(IrAnalyze *ira, IrInst ZigFn *fn = exec_fn_entry(ira->new_irb.exec); ir_assert(fn != nullptr, &instruction->base); - ZigType *frame_type = get_coro_frame_type(ira->codegen, fn); + ZigType *frame_type = get_fn_frame_type(ira->codegen, fn); ZigType *ptr_frame_type = get_pointer_to_type(ira->codegen, frame_type, false); IrInstruction *result = ir_build_handle(&ira->new_irb, instruction->base.scope, instruction->base.source_node); @@ -22232,7 +22230,7 @@ static IrInstruction *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstru if (fn == nullptr) return ira->codegen->invalid_instruction; - ZigType *ty = get_coro_frame_type(ira->codegen, fn); + ZigType *ty = get_fn_frame_type(ira->codegen, fn); return ir_const_type(ira, &instruction->base, ty); } @@ -22293,7 +22291,7 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct case ZigTypeIdUnion: case ZigTypeIdFn: case ZigTypeIdVector: - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: case ZigTypeIdAnyFrame: { uint64_t align_in_bytes = get_abi_alignment(ira->codegen, type_entry); @@ -23438,7 +23436,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue zig_panic("TODO buf_write_value_bytes fn type"); case ZigTypeIdUnion: zig_panic("TODO buf_write_value_bytes union type"); - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: zig_panic("TODO buf_write_value_bytes async fn frame type"); case ZigTypeIdAnyFrame: zig_panic("TODO buf_write_value_bytes anyframe type"); @@ -23621,7 +23619,7 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou zig_panic("TODO buf_read_value_bytes fn type"); case ZigTypeIdUnion: zig_panic("TODO buf_read_value_bytes union type"); - case ZigTypeIdCoroFrame: + case ZigTypeIdFnFrame: zig_panic("TODO buf_read_value_bytes async fn frame type"); case ZigTypeIdAnyFrame: zig_panic("TODO buf_read_value_bytes anyframe type"); @@ -24674,7 +24672,7 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct IrInstruction *frame; if (frame_ptr->value.type->id == ZigTypeIdPointer && frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle && - frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame) + frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame) { result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type; frame = frame_ptr; @@ -24682,7 +24680,7 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct frame = ir_get_deref(ira, source_instr, frame_ptr, nullptr); if (frame->value.type->id == ZigTypeIdPointer && frame->value.type->data.pointer.ptr_len == PtrLenSingle && - frame->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame) + frame->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame) { result_type = frame->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type; } else if (frame->value.type->id != ZigTypeIdAnyFrame || @@ -24751,7 +24749,7 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction return ir_finish_anal(ira, result); } -static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) { +static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructionResume *instruction) { IrInstruction *frame_ptr = instruction->frame->child; if (type_is_invalid(frame_ptr->value.type)) return ira->codegen->invalid_instruction; @@ -24759,7 +24757,7 @@ static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstr IrInstruction *frame; if (frame_ptr->value.type->id == ZigTypeIdPointer && frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle && - frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame) + frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame) { frame = frame_ptr; } else { @@ -24771,7 +24769,7 @@ static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstr if (type_is_invalid(casted_frame->value.type)) return ira->codegen->invalid_instruction; - return ir_build_coro_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame); + return ir_build_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame); } static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ira, @@ -25112,8 +25110,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_suspend_begin(ira, (IrInstructionSuspendBegin *)instruction); case IrInstructionIdSuspendFinish: return ir_analyze_instruction_suspend_finish(ira, (IrInstructionSuspendFinish *)instruction); - case IrInstructionIdCoroResume: - return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); + case IrInstructionIdResume: + return ir_analyze_instruction_resume(ira, (IrInstructionResume *)instruction); case IrInstructionIdAwaitSrc: return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction); case IrInstructionIdTestCancelRequested: @@ -25256,7 +25254,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdResetResult: case IrInstructionIdSuspendBegin: case IrInstructionIdSuspendFinish: - case IrInstructionIdCoroResume: + case IrInstructionIdResume: case IrInstructionIdAwaitSrc: case IrInstructionIdAwaitGen: case IrInstructionIdSpillBegin: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 9d4570d79a..63f3711266 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1528,10 +1528,9 @@ static void ir_print_suspend_finish(IrPrint *irp, IrInstructionSuspendFinish *in fprintf(irp->f, "@suspendFinish()"); } -static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) { - fprintf(irp->f, "@coroResume("); +static void ir_print_resume(IrPrint *irp, IrInstructionResume *instruction) { + fprintf(irp->f, "resume "); ir_print_other_instruction(irp, instruction->frame); - fprintf(irp->f, ")"); } static void ir_print_await_src(IrPrint *irp, IrInstructionAwaitSrc *instruction) { @@ -2039,8 +2038,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdSuspendFinish: ir_print_suspend_finish(irp, (IrInstructionSuspendFinish *)instruction); break; - case IrInstructionIdCoroResume: - ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction); + case IrInstructionIdResume: + ir_print_resume(irp, (IrInstructionResume *)instruction); break; case IrInstructionIdAwaitSrc: ir_print_await_src(irp, (IrInstructionAwaitSrc *)instruction); diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 906b278b21..695f8b18ef 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include @@ -203,8 +202,6 @@ bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMM PMBuilder->Inliner = createFunctionInliningPass(PMBuilder->OptLevel, PMBuilder->SizeLevel, false); } - addCoroutinePassesToExtensionPoints(*PMBuilder); - // Set up the per-function pass manager. legacy::FunctionPassManager FPM = legacy::FunctionPassManager(module); auto tliwp = new(std::nothrow) TargetLibraryInfoWrapperPass(tlii); diff --git a/std/event/fs.zig b/std/event/fs.zig index fe2f604ac3..73a296ca3f 100644 --- a/std/event/fs.zig +++ b/std/event/fs.zig @@ -799,7 +799,7 @@ pub const WatchEventId = enum { // pub fn destroy(self: *Self) void { // switch (builtin.os) { // .macosx, .freebsd, .netbsd => { -// // TODO we need to cancel the coroutines before destroying the lock +// // TODO we need to cancel the frames before destroying the lock // self.os_data.table_lock.deinit(); // var it = self.os_data.file_table.iterator(); // while (it.next()) |entry| { @@ -1088,7 +1088,7 @@ pub const WatchEventId = enum { // // while (true) { // { -// // TODO only 1 beginOneEvent for the whole coroutine +// // TODO only 1 beginOneEvent for the whole function // self.channel.loop.beginOneEvent(); // errdefer self.channel.loop.finishOneEvent(); // errdefer { @@ -1252,7 +1252,7 @@ pub const WatchEventId = enum { const test_tmp_dir = "std_event_fs_test"; -// TODO this test is disabled until the coroutine rewrite is finished. +// TODO this test is disabled until the async function rewrite is finished. //test "write a file, watch it, write it again" { // return error.SkipZigTest; // const allocator = std.heap.direct_allocator; diff --git a/std/event/future.zig b/std/event/future.zig index 11a4c82fb0..e5f6d984ce 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -6,7 +6,7 @@ const Lock = std.event.Lock; const Loop = std.event.Loop; /// This is a value that starts out unavailable, until resolve() is called -/// While it is unavailable, coroutines suspend when they try to get() it, +/// While it is unavailable, functions suspend when they try to get() it, /// and then are resumed when resolve() is called. /// At this point the value remains forever available, and another resolve() is not allowed. pub fn Future(comptime T: type) type { diff --git a/std/event/group.zig b/std/event/group.zig index ab6d592278..1fc4a61e93 100644 --- a/std/event/group.zig +++ b/std/event/group.zig @@ -7,7 +7,7 @@ const testing = std.testing; /// ReturnType must be `void` or `E!void` pub fn Group(comptime ReturnType: type) type { return struct { - coro_stack: Stack, + frame_stack: Stack, alloc_stack: Stack, lock: Lock, @@ -21,7 +21,7 @@ pub fn Group(comptime ReturnType: type) type { pub fn init(loop: *Loop) Self { return Self{ - .coro_stack = Stack.init(), + .frame_stack = Stack.init(), .alloc_stack = Stack.init(), .lock = Lock.init(loop), }; @@ -29,7 +29,7 @@ pub fn Group(comptime ReturnType: type) type { /// Cancel all the outstanding frames. Can be called even if wait was already called. pub fn deinit(self: *Self) void { - while (self.coro_stack.pop()) |node| { + while (self.frame_stack.pop()) |node| { cancel node.data; } while (self.alloc_stack.pop()) |node| { @@ -50,11 +50,11 @@ pub fn Group(comptime ReturnType: type) type { /// Add a node to the group. Thread-safe. Cannot fail. /// `node.data` should be the frame handle to add to the group. - /// The node's memory should be in the coroutine frame of + /// The node's memory should be in the function frame of /// the handle that is in the node, or somewhere guaranteed to live /// at least as long. pub fn addNode(self: *Self, node: *Stack.Node) void { - self.coro_stack.push(node); + self.frame_stack.push(node); } /// Wait for all the calls and promises of the group to complete. @@ -64,7 +64,7 @@ pub fn Group(comptime ReturnType: type) type { const held = self.lock.acquire(); defer held.release(); - while (self.coro_stack.pop()) |node| { + while (self.frame_stack.pop()) |node| { if (Error == void) { await node.data; } else { diff --git a/std/event/lock.zig b/std/event/lock.zig index 8f2dac008c..da698d9fb2 100644 --- a/std/event/lock.zig +++ b/std/event/lock.zig @@ -6,7 +6,7 @@ const mem = std.mem; const Loop = std.event.Loop; /// Thread-safe async/await lock. -/// coroutines which are waiting for the lock are suspended, and +/// Functions which are waiting for the lock are suspended, and /// are resumed when the lock is released, in order. /// Allows only one actor to hold the lock. pub const Lock = struct { @@ -96,8 +96,7 @@ pub const Lock = struct { suspend { self.queue.put(&my_tick_node); - // At this point, we are in the queue, so we might have already been resumed and this coroutine - // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame. + // At this point, we are in the queue, so we might have already been resumed. // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor // will attempt to grab the lock. diff --git a/std/event/locked.zig b/std/event/locked.zig index ede5fe3d95..aeedf3558a 100644 --- a/std/event/locked.zig +++ b/std/event/locked.zig @@ -3,7 +3,7 @@ const Lock = std.event.Lock; const Loop = std.event.Loop; /// Thread-safe async/await lock that protects one piece of data. -/// coroutines which are waiting for the lock are suspended, and +/// Functions which are waiting for the lock are suspended, and /// are resumed when the lock is released, in order. pub fn Locked(comptime T: type) type { return struct { diff --git a/std/event/loop.zig b/std/event/loop.zig index f1febd3fdb..827fbf3dd7 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -118,7 +118,7 @@ pub const Loop = struct { } /// The allocator must be thread-safe because we use it for multiplexing - /// coroutines onto kernel threads. + /// async functions onto kernel threads. /// After initialization, call run(). /// TODO copy elision / named return values so that the threads referencing *Loop /// have the correct pointer value. diff --git a/std/event/net.zig b/std/event/net.zig index 3752c88e99..2a28a0ef93 100644 --- a/std/event/net.zig +++ b/std/event/net.zig @@ -13,7 +13,7 @@ pub const Server = struct { loop: *Loop, sockfd: ?i32, - accept_coro: ?anyframe, + accept_frame: ?anyframe, listen_address: std.net.Address, waiting_for_emfile_node: PromiseNode, @@ -22,11 +22,11 @@ pub const Server = struct { const PromiseNode = std.TailQueue(anyframe).Node; pub fn init(loop: *Loop) Server { - // TODO can't initialize handler coroutine here because we need well defined copy elision + // TODO can't initialize handler here because we need well defined copy elision return Server{ .loop = loop, .sockfd = null, - .accept_coro = null, + .accept_frame = null, .handleRequestFn = undefined, .waiting_for_emfile_node = undefined, .listen_address = undefined, @@ -53,10 +53,10 @@ pub const Server = struct { try os.listen(sockfd, os.SOMAXCONN); self.listen_address = std.net.Address.initPosix(try os.getsockname(sockfd)); - self.accept_coro = async Server.handler(self); - errdefer cancel self.accept_coro.?; + self.accept_frame = async Server.handler(self); + errdefer cancel self.accept_frame.?; - self.listen_resume_node.handle = self.accept_coro.?; + self.listen_resume_node.handle = self.accept_frame.?; try self.loop.linuxAddFd(sockfd, &self.listen_resume_node, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); errdefer self.loop.removeFd(sockfd); } @@ -71,7 +71,7 @@ pub const Server = struct { } pub fn deinit(self: *Server) void { - if (self.accept_coro) |accept_coro| cancel accept_coro; + if (self.accept_frame) |accept_frame| cancel accept_frame; if (self.sockfd) |sockfd| os.close(sockfd); } diff --git a/std/event/rwlock.zig b/std/event/rwlock.zig index a5768e5b65..bf7ea0fa9f 100644 --- a/std/event/rwlock.zig +++ b/std/event/rwlock.zig @@ -6,7 +6,7 @@ const mem = std.mem; const Loop = std.event.Loop; /// Thread-safe async/await lock. -/// coroutines which are waiting for the lock are suspended, and +/// Functions which are waiting for the lock are suspended, and /// are resumed when the lock is released, in order. /// Many readers can hold the lock at the same time; however locking for writing is exclusive. /// When a read lock is held, it will not be released until the reader queue is empty. @@ -107,8 +107,7 @@ pub const RwLock = struct { self.reader_queue.put(&my_tick_node); - // At this point, we are in the reader_queue, so we might have already been resumed and this coroutine - // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame. + // At this point, we are in the reader_queue, so we might have already been resumed. // We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1, // some actor will attempt to grab the lock. @@ -139,8 +138,7 @@ pub const RwLock = struct { self.writer_queue.put(&my_tick_node); - // At this point, we are in the writer_queue, so we might have already been resumed and this coroutine - // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame. + // At this point, we are in the writer_queue, so we might have already been resumed. // We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1, // some actor will attempt to grab the lock. diff --git a/std/event/rwlocked.zig b/std/event/rwlocked.zig index 0448e0298e..386aa08407 100644 --- a/std/event/rwlocked.zig +++ b/std/event/rwlocked.zig @@ -3,7 +3,7 @@ const RwLock = std.event.RwLock; const Loop = std.event.Loop; /// Thread-safe async/await RW lock that protects one piece of data. -/// coroutines which are waiting for the lock are suspended, and +/// Functions which are waiting for the lock are suspended, and /// are resumed when the lock is released, in order. pub fn RwLocked(comptime T: type) type { return struct { diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index aec1ef96b5..5f2a3934fd 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -2103,7 +2103,7 @@ test "zig fmt: inline asm" { ); } -test "zig fmt: coroutines" { +test "zig fmt: async functions" { try testCanonical( \\async fn simpleAsyncFn() void { \\ const a = async a.b(); @@ -2115,8 +2115,8 @@ test "zig fmt: coroutines" { \\ await p; \\} \\ - \\test "coroutine suspend, resume, cancel" { - \\ const p: anyframe = try async testAsyncSeq(); + \\test "suspend, resume, cancel" { + \\ const p: anyframe = async testAsyncSeq(); \\ resume p; \\ cancel p; \\} From dd8c8c080229c1f0e742fb8702687f4080d7714d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Aug 2019 18:14:38 -0400 Subject: [PATCH 100/125] get_struct_type accepts field alignment overrides --- BRANCH_TODO | 42 -------------------------- src/analyze.cpp | 79 +++++++++++++++++++++++-------------------------- 2 files changed, 37 insertions(+), 84 deletions(-) delete mode 100644 BRANCH_TODO diff --git a/BRANCH_TODO b/BRANCH_TODO deleted file mode 100644 index cac275eb75..0000000000 --- a/BRANCH_TODO +++ /dev/null @@ -1,42 +0,0 @@ - * zig fmt support for the syntax - * alignment of variables not being respected in async functions - * await of a non async function - * async call on a non async function - * documentation - - @asyncCall - - @frame - - @Frame - - @frameSize - - coroutines section - - suspend - - resume - - anyframe, anyframe->T - * a test where an async function destroys its own frame in a defer - * compile error (instead of crashing) for trying to get @Frame of generic function - * compile error (instead of crashing) for trying to async call and passing @Frame of wrong function - * implicit cast of normal function to async function should be allowed when it is inferred to be async - * compile error for error: expected anyframe->T, found 'anyframe' - * compile error for error: expected anyframe->T, found 'i32' - * peer type resolution of *@Frame(func) and anyframe - * peer type resolution of *@Frame(func) and anyframe->T when the return type matches - * for loops need to spill the index. other payload captures probably also need to spill - * `const result = (await a) + (await b);` this causes "Instruction does not dominate all uses" - need spill - * @typeInfo for @Frame(func) - * returning a value from within a suspend block - * make resuming inside a suspend block, with nothing after it, a must-tail call. - * make sure there are safety tests for all the new safety features (search the new PanicFnId enum values) - * compile error for casting a function to a non-async function pointer, but then later it gets inferred to be an async function - * compile error for copying a frame - * compile error for resuming a const frame pointer - * runtime safety enabling/disabling scope has to be coordinated across resume/await/calls/return - * calling a generic function which is async - * make sure `await @asyncCall` and `await async` are handled correctly. - * allow @asyncCall with a real @Frame(func) (the point of this is result pointer) - * when there are multiple calls to async functions in a function, reuse the same frame buffer, so that the - needed bytes is equal to the largest callee's frame - * if an async function is never called with async then a few optimizations can be made: - - the return does not need to be atomic - - it can be assumed that these are always available: the awaiter ptr, return ptr if applicable, - error return trace ptr if applicable. - - it can be assumed that it is never cancelled - * fix the debug info for variables of async functions diff --git a/src/analyze.cpp b/src/analyze.cpp index dd8c56da1c..6f3b098063 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1499,9 +1499,14 @@ bool type_is_invalid(ZigType *type_entry) { zig_unreachable(); } +struct SrcField { + const char *name; + ZigType *ty; + unsigned align; +}; -static ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], - ZigType *field_types[], size_t field_count, unsigned min_abi_align) +static ZigType *get_struct_type(CodeGen *g, const char *type_name, SrcField fields[], size_t field_count, + unsigned min_abi_align) { ZigType *struct_type = new_type_table_entry(ZigTypeIdStruct); @@ -1516,14 +1521,15 @@ static ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *f size_t abi_align = min_abi_align; for (size_t i = 0; i < field_count; i += 1) { TypeStructField *field = &struct_type->data.structure.fields[i]; - field->name = buf_create_from_str(field_names[i]); - field->type_entry = field_types[i]; + field->name = buf_create_from_str(fields[i].name); + field->type_entry = fields[i].ty; field->src_index = i; if (type_has_bits(field->type_entry)) { assert(type_is_resolved(field->type_entry, ResolveStatusSizeKnown)); - if (field->type_entry->abi_align > abi_align) { - abi_align = field->type_entry->abi_align; + unsigned field_abi_align = max(fields[i].align, field->type_entry->abi_align); + if (field_abi_align > abi_align) { + abi_align = field_abi_align; } } @@ -1545,8 +1551,13 @@ static ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *f if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry)) break; } - size_t next_abi_align = (next_src_field_index == field_count) ? - abi_align : struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align; + size_t next_abi_align; + if (next_src_field_index == field_count) { + next_abi_align = abi_align; + } else { + next_abi_align = max(fields[next_src_field_index].align, + struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align); + } next_offset = next_field_offset(next_offset, abi_align, field->type_entry->abi_size, next_abi_align); } @@ -5245,35 +5256,22 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { } // label (grep this): [fn_frame_struct_layout] - ZigList field_types = {}; - ZigList field_names = {}; + ZigList fields = {}; - field_names.append("@fn_ptr"); - field_types.append(fn_type); - - field_names.append("@resume_index"); - field_types.append(g->builtin_types.entry_usize); - - field_names.append("@awaiter"); - field_types.append(g->builtin_types.entry_usize); - - field_names.append("@prev_val"); - field_types.append(g->builtin_types.entry_usize); + fields.append({"@fn_ptr", fn_type, 0}); + fields.append({"@resume_index", g->builtin_types.entry_usize, 0}); + fields.append({"@awaiter", g->builtin_types.entry_usize, 0}); + fields.append({"@prev_val", g->builtin_types.entry_usize, 0}); FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false); - field_names.append("@result_ptr_callee"); - field_types.append(ptr_return_type); - field_names.append("@result_ptr_awaiter"); - field_types.append(ptr_return_type); - - field_names.append("@result"); - field_types.append(fn_type_id->return_type); + fields.append({"@result_ptr_callee", ptr_return_type, 0}); + fields.append({"@result_ptr_awaiter", ptr_return_type, 0}); + fields.append({"@result", fn_type_id->return_type, 0}); if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { - field_names.append("@ptr_stack_trace"); - field_types.append(get_ptr_to_stack_trace_type(g)); + fields.append({"@ptr_stack_trace", get_ptr_to_stack_trace_type(g), 0}); } for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) { @@ -5287,18 +5285,16 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { param_name = buf_sprintf("@arg%" ZIG_PRI_usize, arg_i); } ZigType *param_type = param_info->type; - field_names.append(buf_ptr(param_name)); - field_types.append(param_type); + + fields.append({buf_ptr(param_name), param_type, 0}); } if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) { (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type - field_names.append("@stack_trace"); - field_types.append(g->stack_trace_type); - - field_names.append("@instruction_addresses"); - field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count)); + fields.append({"@stack_trace", g->stack_trace_type, 0}); + fields.append({"@instruction_addresses", + get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0}); } for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) { @@ -5327,15 +5323,14 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { } else { name = buf_ptr(buf_sprintf("%s.%" ZIG_PRI_usize, instruction->name_hint, alloca_i)); } - instruction->field_index = field_types.length; - field_names.append(name); - field_types.append(child_type); + instruction->field_index = fields.length; + + fields.append({name, child_type, 0}); } - assert(field_names.length == field_types.length); frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), - field_names.items, field_types.items, field_names.length, target_fn_align(g->zig_target)); + fields.items, fields.length, target_fn_align(g->zig_target)); frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size; frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align; frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits; From 5749dc49d839ba607392ed0934008773d33f47d7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Aug 2019 00:35:51 -0400 Subject: [PATCH 101/125] respect local variable alignment in async functions --- src/all_types.hpp | 1 + src/analyze.cpp | 18 +++++++++++++----- test/stage1/behavior/coroutines.zig | 11 +++++++++++ 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 6445c05bff..22e38b9f0c 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1087,6 +1087,7 @@ struct TypeStructField { ConstExprValue *init_val; // null and then memoized uint32_t bit_offset_in_host; // offset from the memory at gen_index uint32_t host_int_bytes; // size of host integer + uint32_t align; }; enum ResolveStatus { diff --git a/src/analyze.cpp b/src/analyze.cpp index 6f3b098063..146f661244 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1524,10 +1524,11 @@ static ZigType *get_struct_type(CodeGen *g, const char *type_name, SrcField fiel field->name = buf_create_from_str(fields[i].name); field->type_entry = fields[i].ty; field->src_index = i; + field->align = fields[i].align; if (type_has_bits(field->type_entry)) { assert(type_is_resolved(field->type_entry, ResolveStatusSizeKnown)); - unsigned field_abi_align = max(fields[i].align, field->type_entry->abi_align); + unsigned field_abi_align = max(field->align, field->type_entry->abi_align); if (field_abi_align > abi_align) { abi_align = field_abi_align; } @@ -5325,7 +5326,7 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { } instruction->field_index = fields.length; - fields.append({name, child_type, 0}); + fields.append({name, child_type, instruction->align}); } @@ -6900,9 +6901,16 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry)) break; } - size_t next_abi_align = (next_src_field_index == field_count) ? - struct_type->abi_align : - struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align; + size_t next_abi_align; + if (next_src_field_index == field_count) { + next_abi_align = struct_type->abi_align; + } else { + if (struct_type->data.structure.fields[next_src_field_index].align == 0) { + next_abi_align = struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align; + } else { + next_abi_align = struct_type->data.structure.fields[next_src_field_index].align; + } + } size_t llvm_next_abi_align = (next_src_field_index == field_count) ? llvm_struct_abi_align : LLVMABIAlignmentOfType(g->target_data_ref, diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 13b04d7787..2d76b47244 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -766,3 +766,14 @@ fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime si } }; } + +test "alignment of local variables in async functions" { + const S = struct { + fn doTheTest() void { + var y: u8 = 123; + var x: u8 align(128) = 1; + expect(@ptrToInt(&x) % 128 == 0); + } + }; + S.doTheTest(); +} From 7799423f24a0f628641e772146f1e6a5c2f6bdcb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Aug 2019 00:38:27 -0400 Subject: [PATCH 102/125] rename behavior test files --- test/stage1/behavior.zig | 12 ++++++------ .../stage1/behavior/{coroutines.zig => async_fn.zig} | 0 .../{coroutine_await_struct.zig => await_struct.zig} | 0 3 files changed, 6 insertions(+), 6 deletions(-) rename test/stage1/behavior/{coroutines.zig => async_fn.zig} (100%) rename test/stage1/behavior/{coroutine_await_struct.zig => await_struct.zig} (100%) diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig index 71af5586ed..dba43268e2 100644 --- a/test/stage1/behavior.zig +++ b/test/stage1/behavior.zig @@ -3,12 +3,13 @@ comptime { _ = @import("behavior/alignof.zig"); _ = @import("behavior/array.zig"); _ = @import("behavior/asm.zig"); + _ = @import("behavior/async_fn.zig"); _ = @import("behavior/atomics.zig"); + _ = @import("behavior/await_struct.zig"); _ = @import("behavior/bit_shifting.zig"); _ = @import("behavior/bitcast.zig"); _ = @import("behavior/bitreverse.zig"); _ = @import("behavior/bool.zig"); - _ = @import("behavior/byteswap.zig"); _ = @import("behavior/bugs/1025.zig"); _ = @import("behavior/bugs/1076.zig"); _ = @import("behavior/bugs/1111.zig"); @@ -38,23 +39,24 @@ comptime { _ = @import("behavior/bugs/726.zig"); _ = @import("behavior/bugs/828.zig"); _ = @import("behavior/bugs/920.zig"); + _ = @import("behavior/byteswap.zig"); _ = @import("behavior/byval_arg_var.zig"); _ = @import("behavior/cancel.zig"); _ = @import("behavior/cast.zig"); _ = @import("behavior/const_slice_child.zig"); - _ = @import("behavior/coroutine_await_struct.zig"); - _ = @import("behavior/coroutines.zig"); _ = @import("behavior/defer.zig"); _ = @import("behavior/enum.zig"); _ = @import("behavior/enum_with_members.zig"); _ = @import("behavior/error.zig"); _ = @import("behavior/eval.zig"); _ = @import("behavior/field_parent_ptr.zig"); + _ = @import("behavior/floatop.zig"); _ = @import("behavior/fn.zig"); _ = @import("behavior/fn_in_struct_in_comptime.zig"); _ = @import("behavior/for.zig"); _ = @import("behavior/generics.zig"); _ = @import("behavior/hasdecl.zig"); + _ = @import("behavior/hasfield.zig"); _ = @import("behavior/if.zig"); _ = @import("behavior/import.zig"); _ = @import("behavior/incomplete_struct_param_tld.zig"); @@ -63,14 +65,13 @@ comptime { _ = @import("behavior/math.zig"); _ = @import("behavior/merge_error_sets.zig"); _ = @import("behavior/misc.zig"); + _ = @import("behavior/muladd.zig"); _ = @import("behavior/namespace_depends_on_compile_var.zig"); _ = @import("behavior/new_stack_call.zig"); _ = @import("behavior/null.zig"); _ = @import("behavior/optional.zig"); _ = @import("behavior/pointers.zig"); _ = @import("behavior/popcount.zig"); - _ = @import("behavior/muladd.zig"); - _ = @import("behavior/floatop.zig"); _ = @import("behavior/ptrcast.zig"); _ = @import("behavior/pub_enum.zig"); _ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig"); @@ -99,5 +100,4 @@ comptime { _ = @import("behavior/void.zig"); _ = @import("behavior/while.zig"); _ = @import("behavior/widening.zig"); - _ = @import("behavior/hasfield.zig"); } diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/async_fn.zig similarity index 100% rename from test/stage1/behavior/coroutines.zig rename to test/stage1/behavior/async_fn.zig diff --git a/test/stage1/behavior/coroutine_await_struct.zig b/test/stage1/behavior/await_struct.zig similarity index 100% rename from test/stage1/behavior/coroutine_await_struct.zig rename to test/stage1/behavior/await_struct.zig From f3f838cc016fd8190a9bba46fa495fbc27325492 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Aug 2019 11:22:12 -0400 Subject: [PATCH 103/125] add compile error for await in exported function --- src/analyze.cpp | 45 ++++++++++++++++++++++++++--------------- test/compile_errors.zig | 14 +++++++++++++ 2 files changed, 43 insertions(+), 16 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index 146f661244..64d6059da4 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3893,18 +3893,18 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { fn->inferred_async_node = inferred_async_none; } -static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node) { - ZigType *fn_type = fn_table_entry->type_entry; +static void analyze_fn_ir(CodeGen *g, ZigFn *fn, AstNode *return_type_node) { + ZigType *fn_type = fn->type_entry; assert(!fn_type->data.fn.is_generic); FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; - ZigType *block_return_type = ir_analyze(g, &fn_table_entry->ir_executable, - &fn_table_entry->analyzed_executable, fn_type_id->return_type, return_type_node); - fn_table_entry->src_implicit_return_type = block_return_type; + ZigType *block_return_type = ir_analyze(g, &fn->ir_executable, + &fn->analyzed_executable, fn_type_id->return_type, return_type_node); + fn->src_implicit_return_type = block_return_type; - if (type_is_invalid(block_return_type) || fn_table_entry->analyzed_executable.invalid) { + if (type_is_invalid(block_return_type) || fn->analyzed_executable.invalid) { assert(g->errors.length > 0); - fn_table_entry->anal_state = FnAnalStateInvalid; + fn->anal_state = FnAnalStateInvalid; return; } @@ -3912,20 +3912,20 @@ static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_typ ZigType *return_err_set_type = fn_type_id->return_type->data.error_union.err_set_type; if (return_err_set_type->data.error_set.infer_fn != nullptr) { ZigType *inferred_err_set_type; - if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorSet) { - inferred_err_set_type = fn_table_entry->src_implicit_return_type; - } else if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorUnion) { - inferred_err_set_type = fn_table_entry->src_implicit_return_type->data.error_union.err_set_type; + if (fn->src_implicit_return_type->id == ZigTypeIdErrorSet) { + inferred_err_set_type = fn->src_implicit_return_type; + } else if (fn->src_implicit_return_type->id == ZigTypeIdErrorUnion) { + inferred_err_set_type = fn->src_implicit_return_type->data.error_union.err_set_type; } else { add_node_error(g, return_type_node, buf_sprintf("function with inferred error set must return at least one possible error")); - fn_table_entry->anal_state = FnAnalStateInvalid; + fn->anal_state = FnAnalStateInvalid; return; } if (inferred_err_set_type->data.error_set.infer_fn != nullptr) { if (!resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) { - fn_table_entry->anal_state = FnAnalStateInvalid; + fn->anal_state = FnAnalStateInvalid; return; } } @@ -3945,12 +3945,25 @@ static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_typ } } + CallingConvention cc = fn->type_entry->data.fn.fn_type_id.cc; + if (cc != CallingConventionUnspecified && cc != CallingConventionAsync && + fn->inferred_async_node != nullptr && + fn->inferred_async_node != inferred_async_checking && + fn->inferred_async_node != inferred_async_none) + { + ErrorMsg *msg = add_node_error(g, fn->proto_node, + buf_sprintf("function with calling convention '%s' cannot be async", + calling_convention_name(cc))); + add_async_error_notes(g, msg, fn); + fn->anal_state = FnAnalStateInvalid; + } + if (g->verbose_ir) { - fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn_table_entry->symbol_name)); - ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4); + fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn->symbol_name)); + ir_print(g, stderr, &fn->analyzed_executable, 4); fprintf(stderr, "}\n"); } - fn_table_entry->anal_state = FnAnalStateComplete; + fn->anal_state = FnAnalStateComplete; } static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index d3d2685f1b..c07786d462 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,20 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "invalid suspend in exported function", + \\export fn entry() void { + \\ var frame = async func(); + \\ var result = await frame; + \\} + \\fn func() void { + \\ suspend; + \\} + , + "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async", + "tmp.zig:3:18: note: await is a suspend point", + ); + cases.add( "async function indirectly depends on its own frame", \\export fn entry() void { From 64c293f8a4ce5fcbb506c32b989a88d982f005ce Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Aug 2019 12:52:20 -0400 Subject: [PATCH 104/125] codegen for async call of blocking function --- src/analyze.cpp | 54 +++++++++++--- src/codegen.cpp | 195 +++++++++++++++++++++++++++--------------------- 2 files changed, 155 insertions(+), 94 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index 64d6059da4..1b6de6e7df 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3831,7 +3831,7 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { } // This function resolves functions being inferred async. -static void analyze_fn_async(CodeGen *g, ZigFn *fn) { +static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) { if (fn->inferred_async_node == inferred_async_checking) { // TODO call graph cycle detected, disallow the recursion fn->inferred_async_node = inferred_async_none; @@ -3841,7 +3841,9 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { return; } if (fn->inferred_async_node != nullptr) { - resolve_async_fn_frame(g, fn); + if (resolve_frame) { + resolve_async_fn_frame(g, fn); + } return; } fn->inferred_async_node = inferred_async_checking; @@ -3870,7 +3872,7 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { } } assert(callee->anal_state == FnAnalStateComplete); - analyze_fn_async(g, callee); + analyze_fn_async(g, callee, true); if (callee->anal_state == FnAnalStateInvalid) { fn->anal_state = FnAnalStateInvalid; return; @@ -3886,7 +3888,9 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { fn->anal_state = FnAnalStateInvalid; return; } - resolve_async_fn_frame(g, fn); + if (resolve_frame) { + resolve_async_fn_frame(g, fn); + } return; } } @@ -4141,7 +4145,7 @@ void semantic_analyze(CodeGen *g) { // second pass over functions for detecting async for (g->fn_defs_index = 0; g->fn_defs_index < g->fn_defs.length; g->fn_defs_index += 1) { ZigFn *fn_entry = g->fn_defs.at(g->fn_defs_index); - analyze_fn_async(g, fn_entry); + analyze_fn_async(g, fn_entry, true); } } @@ -5212,6 +5216,36 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { return ErrorSemanticAnalyzeFail; } } + analyze_fn_async(g, fn, false); + if (fn->anal_state == FnAnalStateInvalid) + return ErrorSemanticAnalyzeFail; + + if (!fn_is_async(fn)) { + ZigType *fn_type = fn->type_entry; + FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; + ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false); + + // label (grep this): [fn_frame_struct_layout] + ZigList fields = {}; + + fields.append({"@fn_ptr", g->builtin_types.entry_usize, 0}); + fields.append({"@resume_index", g->builtin_types.entry_usize, 0}); + fields.append({"@awaiter", g->builtin_types.entry_usize, 0}); + fields.append({"@prev_val", g->builtin_types.entry_usize, 0}); + + fields.append({"@result_ptr_callee", ptr_return_type, 0}); + fields.append({"@result_ptr_awaiter", ptr_return_type, 0}); + fields.append({"@result", fn_type_id->return_type, 0}); + + frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), + fields.items, fields.length, target_fn_align(g->zig_target)); + frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size; + frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align; + frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits; + + return ErrorNone; + } + ZigType *fn_type = get_async_fn_type(g, fn->type_entry); if (fn->analyzed_executable.need_err_code_spill) { @@ -5252,7 +5286,7 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { frame_type->data.frame.locals_struct = g->builtin_types.entry_invalid; return ErrorSemanticAnalyzeFail; } - analyze_fn_async(g, callee); + analyze_fn_async(g, callee, true); if (!fn_is_async(callee)) continue; @@ -5268,6 +5302,8 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { fn->alloca_gen_list.append(alloca_gen); call->frame_result_loc = &alloca_gen->base; } + FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; + ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false); // label (grep this): [fn_frame_struct_layout] ZigList fields = {}; @@ -5277,9 +5313,6 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { fields.append({"@awaiter", g->builtin_types.entry_usize, 0}); fields.append({"@prev_val", g->builtin_types.entry_usize, 0}); - FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; - ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false); - fields.append({"@result_ptr_callee", ptr_return_type, 0}); fields.append({"@result_ptr_awaiter", ptr_return_type, 0}); fields.append({"@result", fn_type_id->return_type, 0}); @@ -7651,7 +7684,8 @@ static void resolve_llvm_types_anyerror(CodeGen *g) { } static void resolve_llvm_types_async_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) { - resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status, frame_type); + ZigType *passed_frame_type = fn_is_async(frame_type->data.frame.fn) ? frame_type : nullptr; + resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status, passed_frame_type); frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type; frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type; } diff --git a/src/codegen.cpp b/src/codegen.cpp index c50d4121b6..e9f323dd0d 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3850,73 +3850,74 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef frame_result_loc; LLVMValueRef awaiter_init_val; LLVMValueRef ret_ptr; - if (instruction->is_async) { - awaiter_init_val = zero; + if (callee_is_async) { + if (instruction->is_async) { + if (instruction->new_stack == nullptr) { + awaiter_init_val = zero; + frame_result_loc = result_loc; - if (instruction->new_stack == nullptr) { - frame_result_loc = result_loc; + if (ret_has_bits) { + // Use the result location which is inside the frame if this is an async call. + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); + } + } else if (cc == CallingConventionAsync) { + awaiter_init_val = zero; + LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack); + if (ir_want_runtime_safety(g, &instruction->base)) { + LLVMValueRef given_len_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_len_index, ""); + LLVMValueRef given_frame_len = LLVMBuildLoad(g->builder, given_len_ptr, ""); + LLVMValueRef actual_frame_len = gen_frame_size(g, fn_val); - if (ret_has_bits) { - // Use the result location which is inside the frame if this is an async call. - ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); + LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckFail"); + LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckOk"); + + LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntUGE, given_frame_len, actual_frame_len, ""); + LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block); + + LLVMPositionBuilderAtEnd(g->builder, fail_block); + gen_safety_crash(g, PanicMsgIdFrameTooSmall); + + LLVMPositionBuilderAtEnd(g->builder, ok_block); + } + LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, ""); + LLVMValueRef frame_ptr = LLVMBuildLoad(g->builder, frame_ptr_ptr, ""); + frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr, + get_llvm_type(g, instruction->base.value.type), ""); + + if (ret_has_bits) { + // Use the result location provided to the @asyncCall builtin + ret_ptr = result_loc; + } } + + // even if prefix_arg_err_ret_stack is true, let the async function do its own + // initialization. } else { - LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack); - if (ir_want_runtime_safety(g, &instruction->base)) { - LLVMValueRef given_len_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_len_index, ""); - LLVMValueRef given_frame_len = LLVMBuildLoad(g->builder, given_len_ptr, ""); - LLVMValueRef actual_frame_len = gen_frame_size(g, fn_val); - - LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckFail"); - LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckOk"); - - LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntUGE, given_frame_len, actual_frame_len, ""); - LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block); - - LLVMPositionBuilderAtEnd(g->builder, fail_block); - gen_safety_crash(g, PanicMsgIdFrameTooSmall); - - LLVMPositionBuilderAtEnd(g->builder, ok_block); - } - LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, ""); - LLVMValueRef frame_ptr = LLVMBuildLoad(g->builder, frame_ptr_ptr, ""); - frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr, - get_llvm_type(g, instruction->base.value.type), ""); - + frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); + awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer if (ret_has_bits) { - // Use the result location provided to the @asyncCall builtin - ret_ptr = result_loc; + if (result_loc == nullptr) { + // return type is a scalar, but we still need a pointer to it. Use the async fn frame. + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); + } else { + // Use the call instruction's result location. + ret_ptr = result_loc; + } + + // Store a zero in the awaiter's result ptr to indicate we do not need a copy made. + LLVMValueRef awaiter_ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 1, ""); + LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr))); + LLVMBuildStore(g->builder, zero_ptr, awaiter_ret_ptr); + } + + if (prefix_arg_err_ret_stack) { + LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + frame_index_trace_arg(g, src_return_type), ""); + LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); + LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); } } - // even if prefix_arg_err_ret_stack is true, let the async function do its own - // initialization. - } else if (callee_is_async) { - frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); - awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer - if (ret_has_bits) { - if (result_loc == nullptr) { - // return type is a scalar, but we still need a pointer to it. Use the async fn frame. - ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); - } else { - // Use the call instruction's result location. - ret_ptr = result_loc; - } - - // Store a zero in the awaiter's result ptr to indicate we do not need a copy made. - LLVMValueRef awaiter_ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 1, ""); - LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr))); - LLVMBuildStore(g->builder, zero_ptr, awaiter_ret_ptr); - } - - if (prefix_arg_err_ret_stack) { - LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, - frame_index_trace_arg(g, src_return_type), ""); - LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); - LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); - } - } - if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_fn_ptr_index, ""); @@ -3934,6 +3935,29 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, ""); LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); } + } else if (instruction->is_async) { + // Async call of blocking function + if (instruction->new_stack != nullptr) { + zig_panic("TODO @asyncCall of non-async function"); + } + frame_result_loc = result_loc; + awaiter_init_val = LLVMConstAllOnes(usize_type_ref); + + LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_awaiter_index, ""); + LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr); + + if (ret_has_bits) { + LLVMValueRef ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); + LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, ""); + LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); + + if (first_arg_ret) { + gen_param_values.append(ret_ptr); + } + } + if (prefix_arg_err_ret_stack) { + gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); + } } else { if (first_arg_ret) { gen_param_values.append(result_loc); @@ -3966,7 +3990,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMCallConv llvm_cc = get_llvm_cc(g, cc); LLVMValueRef result; - if (instruction->is_async || callee_is_async) { + if (callee_is_async) { uint32_t arg_start_i = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type); LLVMValueRef casted_frame; @@ -3992,39 +4016,42 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr gen_assign_raw(g, arg_ptr, get_pointer_to_type(g, gen_param_types.at(arg_i), true), gen_param_values.at(arg_i)); } - } - if (instruction->is_async) { - gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); - if (instruction->new_stack != nullptr) { - return frame_result_loc; - } - return nullptr; - } else if (callee_is_async) { - ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true); - LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume"); - - LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); - set_tail_call_if_appropriate(g, call_inst); - LLVMBuildRetVoid(g->builder); - - LLVMPositionBuilderAtEnd(g->builder, call_bb); - gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr); - render_async_var_decls(g, instruction->base.scope); - - if (!type_has_bits(src_return_type)) + if (instruction->is_async) { + gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); + if (instruction->new_stack != nullptr) { + return frame_result_loc; + } return nullptr; + } else { + ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true); - if (result_loc != nullptr) - return get_handle_value(g, result_loc, src_return_type, ptr_result_type); + LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume"); - LLVMValueRef result_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); - return LLVMBuildLoad(g->builder, result_ptr, ""); + LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); + set_tail_call_if_appropriate(g, call_inst); + LLVMBuildRetVoid(g->builder); + + LLVMPositionBuilderAtEnd(g->builder, call_bb); + gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr); + render_async_var_decls(g, instruction->base.scope); + + if (!type_has_bits(src_return_type)) + return nullptr; + + if (result_loc != nullptr) + return get_handle_value(g, result_loc, src_return_type, ptr_result_type); + + LLVMValueRef result_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); + return LLVMBuildLoad(g->builder, result_ptr, ""); + } } if (instruction->new_stack == nullptr) { result = ZigLLVMBuildCall(g->builder, fn_val, gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, ""); + } else if (instruction->is_async) { + zig_panic("TODO @asyncCall of non-async function"); } else { LLVMValueRef stacksave_fn_val = get_stacksave_fn_val(g); LLVMValueRef stackrestore_fn_val = get_stackrestore_fn_val(g); From 13b5a4bf8ca65c569e6b28ca0e41d101d12d0ff1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 14:05:12 -0400 Subject: [PATCH 105/125] remove `cancel` --- doc/docgen.zig | 1 - doc/langref.html.in | 14 +- src-self-hosted/main.zig | 3 - src/all_types.hpp | 33 +-- src/analyze.cpp | 53 ++-- src/analyze.hpp | 2 +- src/ast_render.cpp | 8 - src/codegen.cpp | 402 +++++++++++------------------- src/ir.cpp | 230 +++-------------- src/ir_print.cpp | 24 -- src/parser.cpp | 12 - src/tokenizer.cpp | 2 - src/tokenizer.hpp | 1 - std/event/fs.zig | 2 +- std/event/future.zig | 2 +- std/event/group.zig | 20 +- std/event/net.zig | 8 +- std/zig/parse.zig | 15 -- std/zig/parser_test.zig | 4 +- std/zig/tokenizer.zig | 2 - test/compile_errors.zig | 8 +- test/stage1/behavior.zig | 1 - test/stage1/behavior/async_fn.zig | 90 ++----- test/stage1/behavior/cancel.zig | 115 --------- 24 files changed, 251 insertions(+), 801 deletions(-) delete mode 100644 test/stage1/behavior/cancel.zig diff --git a/doc/docgen.zig b/doc/docgen.zig index 92764d7642..458b97d2c0 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -750,7 +750,6 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok .Keyword_async, .Keyword_await, .Keyword_break, - .Keyword_cancel, .Keyword_catch, .Keyword_comptime, .Keyword_const, diff --git a/doc/langref.html.in b/doc/langref.html.in index 23e4dd194e..0f964373c5 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5971,7 +5971,7 @@ test "global assembly" { {#header_open|Async Functions#}

      An async function is a function whose callsite is split into an {#syntax#}async{#endsyntax#} initiation, - followed by an {#syntax#}await{#endsyntax#} completion. They can also be canceled. + followed by an {#syntax#}await{#endsyntax#} completion.

      When you call a function, it creates a stack frame, @@ -6013,11 +6013,11 @@ test "global assembly" {

      The result of an async function call is a {#syntax#}promise->T{#endsyntax#} type, where {#syntax#}T{#endsyntax#} is the return type of the async function. Once a promise has been created, it must be - consumed, either with {#syntax#}cancel{#endsyntax#} or {#syntax#}await{#endsyntax#}: + consumed with {#syntax#}await{#endsyntax#}:

      Async functions start executing when created, so in the following example, the entire - async function completes before it is canceled: + TODO

      {#code_begin|test#} const std = @import("std"); @@ -6048,7 +6048,7 @@ fn simpleAsyncFn() void {

      When an async function suspends itself, it must be sure that it will be - resumed or canceled somehow, for example by registering its promise handle + resumed somehow, for example by registering its promise handle in an event loop. Use a suspend capture block to gain access to the promise (TODO this is outdated):

      @@ -6134,7 +6134,7 @@ async fn testResumeFromSuspend(my_result: *i32) void { resumes the awaiter.

      - A promise handle must be consumed exactly once after it is created, either by {#syntax#}cancel{#endsyntax#} or {#syntax#}await{#endsyntax#}. + A frame handle must be consumed exactly once after it is created with {#syntax#}await{#endsyntax#}.

      {#syntax#}await{#endsyntax#} counts as a suspend point, and therefore at every {#syntax#}await{#endsyntax#}, @@ -9764,7 +9764,6 @@ PrimaryExpr <- AsmExpr / IfExpr / KEYWORD_break BreakLabel? Expr? - / KEYWORD_cancel Expr / KEYWORD_comptime Expr / KEYWORD_continue BreakLabel? / KEYWORD_resume Expr @@ -10120,7 +10119,6 @@ KEYWORD_asm <- 'asm' end_of_word KEYWORD_async <- 'async' end_of_word KEYWORD_await <- 'await' end_of_word KEYWORD_break <- 'break' end_of_word -KEYWORD_cancel <- 'cancel' end_of_word KEYWORD_catch <- 'catch' end_of_word KEYWORD_comptime <- 'comptime' end_of_word KEYWORD_const <- 'const' end_of_word @@ -10165,7 +10163,7 @@ KEYWORD_volatile <- 'volatile' end_of_word KEYWORD_while <- 'while' end_of_word keyword <- KEYWORD_align / KEYWORD_and / KEYWORD_allowzero / KEYWORD_asm - / KEYWORD_async / KEYWORD_await / KEYWORD_break / KEYWORD_cancel + / KEYWORD_async / KEYWORD_await / KEYWORD_break / KEYWORD_catch / KEYWORD_comptime / KEYWORD_const / KEYWORD_continue / KEYWORD_defer / KEYWORD_else / KEYWORD_enum / KEYWORD_errdefer / KEYWORD_error / KEYWORD_export / KEYWORD_extern / KEYWORD_false diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index bc5d078950..5136b32735 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -467,7 +467,6 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co comp.start(); // TODO const process_build_events_handle = try async processBuildEvents(comp, color); - defer cancel process_build_events_handle; loop.run(); } @@ -579,7 +578,6 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void { defer zig_compiler.deinit(); // TODO const handle = try async findLibCAsync(&zig_compiler); - defer cancel handle; loop.run(); } @@ -669,7 +667,6 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void { // TODO &flags, // TODO color, // TODO ); - defer cancel main_handle; loop.run(); return result; } diff --git a/src/all_types.hpp b/src/all_types.hpp index 22e38b9f0c..f1c699ba10 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -476,7 +476,6 @@ enum NodeType { NodeTypeIfErrorExpr, NodeTypeIfOptional, NodeTypeErrorSetDecl, - NodeTypeCancel, NodeTypeResume, NodeTypeAwaitExpr, NodeTypeSuspend, @@ -911,10 +910,6 @@ struct AstNodeBreakExpr { AstNode *expr; // may be null }; -struct AstNodeCancelExpr { - AstNode *expr; -}; - struct AstNodeResumeExpr { AstNode *expr; }; @@ -1003,7 +998,6 @@ struct AstNode { AstNodeInferredArrayType inferred_array_type; AstNodeErrorType error_type; AstNodeErrorSetDecl err_set_decl; - AstNodeCancelExpr cancel_expr; AstNodeResumeExpr resume_expr; AstNodeAwaitExpr await_expr; AstNodeSuspend suspend; @@ -1561,7 +1555,6 @@ enum PanicMsgId { PanicMsgIdBadAwait, PanicMsgIdBadReturn, PanicMsgIdResumedAnAwaitingFn, - PanicMsgIdResumedACancelingFn, PanicMsgIdFrameTooSmall, PanicMsgIdResumedFnPendingAwait, @@ -1729,8 +1722,6 @@ struct CodeGen { LLVMValueRef cur_async_switch_instr; LLVMValueRef cur_async_resume_index_ptr; LLVMValueRef cur_async_awaiter_ptr; - LLVMValueRef cur_async_prev_val; - LLVMValueRef cur_async_prev_val_field_ptr; LLVMBasicBlockRef cur_preamble_llvm_block; size_t cur_resume_block_count; LLVMValueRef cur_err_ret_trace_val_arg; @@ -1822,7 +1813,6 @@ struct CodeGen { ZigType *align_amt_type; ZigType *stack_trace_type; - ZigType *ptr_to_stack_trace_type; ZigType *err_tag_type; ZigType *test_fn_type; @@ -1892,7 +1882,6 @@ struct CodeGen { bool system_linker_hack; bool reported_bad_link_libc_error; bool is_dynamic; // shared library rather than static library. dynamic musl rather than static musl. - bool cur_is_after_return; //////////////////////////// Participates in Input Parameter Cache Hash /////// Note: there is a separate cache hash for builtin.zig, when adding fields, @@ -2235,7 +2224,6 @@ enum IrInstructionId { IrInstructionIdCallGen, IrInstructionIdConst, IrInstructionIdReturn, - IrInstructionIdReturnBegin, IrInstructionIdCast, IrInstructionIdResizeSlice, IrInstructionIdContainerInitList, @@ -2345,7 +2333,6 @@ enum IrInstructionId { IrInstructionIdExport, IrInstructionIdErrorReturnTrace, IrInstructionIdErrorUnion, - IrInstructionIdCancel, IrInstructionIdAtomicRmw, IrInstructionIdAtomicLoad, IrInstructionIdSaveErrRetAddr, @@ -2370,7 +2357,6 @@ enum IrInstructionId { IrInstructionIdAwaitSrc, IrInstructionIdAwaitGen, IrInstructionIdResume, - IrInstructionIdTestCancelRequested, IrInstructionIdSpillBegin, IrInstructionIdSpillEnd, }; @@ -2649,12 +2635,6 @@ struct IrInstructionReturn { IrInstruction *operand; }; -struct IrInstructionReturnBegin { - IrInstruction base; - - IrInstruction *operand; -}; - enum CastOp { CastOpNoCast, // signifies the function call expression is not a cast CastOpNoop, // fn call expr is a cast, but does nothing @@ -3440,12 +3420,6 @@ struct IrInstructionErrorUnion { IrInstruction *payload; }; -struct IrInstructionCancel { - IrInstruction base; - - IrInstruction *frame; -}; - struct IrInstructionAtomicRmw { IrInstruction base; @@ -3647,10 +3621,6 @@ struct IrInstructionResume { IrInstruction *frame; }; -struct IrInstructionTestCancelRequested { - IrInstruction base; -}; - enum SpillId { SpillIdInvalid, SpillIdRetErrCode, @@ -3756,8 +3726,7 @@ static const size_t err_union_err_index = 1; static const size_t frame_fn_ptr_index = 0; static const size_t frame_resume_index = 1; static const size_t frame_awaiter_index = 2; -static const size_t frame_prev_val_index = 3; -static const size_t frame_ret_start = 4; +static const size_t frame_ret_start = 3; // TODO https://github.com/ziglang/zig/issues/3056 // We require this to be a power of 2 so that we can use shifting rather than diff --git a/src/analyze.cpp b/src/analyze.cpp index 1b6de6e7df..fc42abaf26 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -828,17 +828,15 @@ bool calling_convention_allows_zig_types(CallingConvention cc) { zig_unreachable(); } -ZigType *get_ptr_to_stack_trace_type(CodeGen *g) { +ZigType *get_stack_trace_type(CodeGen *g) { if (g->stack_trace_type == nullptr) { ConstExprValue *stack_trace_type_val = get_builtin_value(g, "StackTrace"); assert(stack_trace_type_val->type->id == ZigTypeIdMetaType); g->stack_trace_type = stack_trace_type_val->data.x_type; assertNoError(type_resolve(g, g->stack_trace_type, ResolveStatusZeroBitsKnown)); - - g->ptr_to_stack_trace_type = get_pointer_to_type(g, g->stack_trace_type, false); } - return g->ptr_to_stack_trace_type; + return g->stack_trace_type; } bool want_first_arg_sret(CodeGen *g, FnTypeId *fn_type_id) { @@ -3035,7 +3033,6 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeIfErrorExpr: case NodeTypeIfOptional: case NodeTypeErrorSetDecl: - case NodeTypeCancel: case NodeTypeResume: case NodeTypeAwaitExpr: case NodeTypeSuspend: @@ -3822,11 +3819,9 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("await is a suspend point")); - } else if (fn->inferred_async_node->type == NodeTypeCancel) { - add_error_note(g, msg, fn->inferred_async_node, - buf_sprintf("cancel is a suspend point")); } else { - zig_unreachable(); + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("suspends here")); } } @@ -5231,12 +5226,21 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { fields.append({"@fn_ptr", g->builtin_types.entry_usize, 0}); fields.append({"@resume_index", g->builtin_types.entry_usize, 0}); fields.append({"@awaiter", g->builtin_types.entry_usize, 0}); - fields.append({"@prev_val", g->builtin_types.entry_usize, 0}); fields.append({"@result_ptr_callee", ptr_return_type, 0}); fields.append({"@result_ptr_awaiter", ptr_return_type, 0}); fields.append({"@result", fn_type_id->return_type, 0}); + if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { + ZigType *ptr_to_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false); + fields.append({"@ptr_stack_trace_callee", ptr_to_stack_trace_type, 0}); + fields.append({"@ptr_stack_trace_awaiter", ptr_to_stack_trace_type, 0}); + + fields.append({"@stack_trace", get_stack_trace_type(g), 0}); + fields.append({"@instruction_addresses", + get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0}); + } + frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), fields.items, fields.length, target_fn_align(g->zig_target)); frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size; @@ -5311,14 +5315,15 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { fields.append({"@fn_ptr", fn_type, 0}); fields.append({"@resume_index", g->builtin_types.entry_usize, 0}); fields.append({"@awaiter", g->builtin_types.entry_usize, 0}); - fields.append({"@prev_val", g->builtin_types.entry_usize, 0}); fields.append({"@result_ptr_callee", ptr_return_type, 0}); fields.append({"@result_ptr_awaiter", ptr_return_type, 0}); fields.append({"@result", fn_type_id->return_type, 0}); if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { - fields.append({"@ptr_stack_trace", get_ptr_to_stack_trace_type(g), 0}); + ZigType *ptr_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false); + fields.append({"@ptr_stack_trace_callee", ptr_stack_trace_type, 0}); + fields.append({"@ptr_stack_trace_awaiter", ptr_stack_trace_type, 0}); } for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) { @@ -5337,9 +5342,7 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { } if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) { - (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type - - fields.append({"@stack_trace", g->stack_trace_type, 0}); + fields.append({"@stack_trace", get_stack_trace_type(g), 0}); fields.append({"@instruction_addresses", get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0}); } @@ -7553,7 +7556,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) { fn_type->data.fn.gen_return_type = gen_return_type; if (prefix_arg_error_return_trace && !is_async) { - ZigType *gen_type = get_ptr_to_stack_trace_type(g); + ZigType *gen_type = get_pointer_to_type(g, get_stack_trace_type(g), false); gen_param_types.append(get_llvm_type(g, gen_type)); param_di_types.append(get_llvm_di_type(g, gen_type)); } @@ -7727,7 +7730,6 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re field_types.append(ptr_fn_llvm_type); // fn_ptr field_types.append(usize_type_ref); // resume_index field_types.append(usize_type_ref); // awaiter - field_types.append(usize_type_ref); // prev_val bool have_result_type = result_type != nullptr && type_has_bits(result_type); if (have_result_type) { @@ -7735,7 +7737,9 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_awaiter field_types.append(get_llvm_type(g, result_type)); // result if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { - field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace + ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false); + field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_callee + field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_awaiter } } LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false); @@ -7792,14 +7796,23 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type))); if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { + ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false); di_element_types.append( ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace", + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_callee", di_file, line, 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g)))); + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace))); + di_element_types.append( + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_awaiter", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace))); } }; diff --git a/src/analyze.hpp b/src/analyze.hpp index e6336d3cdc..5752c74751 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -195,7 +195,7 @@ void add_var_export(CodeGen *g, ZigVar *fn_table_entry, Buf *symbol_name, Global ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name); -ZigType *get_ptr_to_stack_trace_type(CodeGen *g); +ZigType *get_stack_trace_type(CodeGen *g); bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *source_node); ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry); diff --git a/src/ast_render.cpp b/src/ast_render.cpp index dd4d9cf646..334dc37b59 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -249,8 +249,6 @@ static const char *node_type_str(NodeType node_type) { return "IfOptional"; case NodeTypeErrorSetDecl: return "ErrorSetDecl"; - case NodeTypeCancel: - return "Cancel"; case NodeTypeResume: return "Resume"; case NodeTypeAwaitExpr: @@ -1136,12 +1134,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, "}"); break; } - case NodeTypeCancel: - { - fprintf(ar->f, "cancel "); - render_node_grouped(ar, node->data.cancel_expr.expr); - break; - } case NodeTypeResume: { fprintf(ar->f, "resume "); diff --git a/src/codegen.cpp b/src/codegen.cpp index e9f323dd0d..9bf7b0287b 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -28,8 +28,6 @@ enum ResumeId { ResumeIdManual, ResumeIdReturn, ResumeIdCall, - - ResumeIdAwaitEarlyReturn // must be last }; static void init_darwin_native(CodeGen *g) { @@ -317,8 +315,9 @@ static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) { // label (grep this): [fn_frame_struct_layout] static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) { bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type); - // [0] *StackTrace - uint32_t trace_field_count = have_stack_trace ? 1 : 0; + // [0] *StackTrace (callee's) + // [1] *StackTrace (awaiter's) + uint32_t trace_field_count = have_stack_trace ? 2 : 0; return frame_index_trace_arg(g, return_type) + trace_field_count; } @@ -916,8 +915,6 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("async function returned twice"); case PanicMsgIdResumedAnAwaitingFn: return buf_create_from_str("awaiting function resumed"); - case PanicMsgIdResumedACancelingFn: - return buf_create_from_str("canceling function resumed"); case PanicMsgIdFrameTooSmall: return buf_create_from_str("frame too small"); case PanicMsgIdResumedFnPendingAwait: @@ -946,13 +943,16 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) { return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(get_llvm_type(g, str_type), 0)); } +static ZigType *ptr_to_stack_trace_type(CodeGen *g) { + return get_pointer_to_type(g, get_stack_trace_type(g), false); +} + static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace_arg) { assert(g->panic_fn != nullptr); LLVMValueRef fn_val = fn_llvm_value(g, g->panic_fn); LLVMCallConv llvm_cc = get_llvm_cc(g, g->panic_fn->type_entry->data.fn.fn_type_id.cc); if (stack_trace_arg == nullptr) { - ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g); - stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type)); + stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g))); } LLVMValueRef args[] = { msg_arg, @@ -1046,7 +1046,7 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) { return g->add_error_return_trace_addr_fn_val; LLVMTypeRef arg_types[] = { - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), + get_llvm_type(g, ptr_to_stack_trace_type(g)), g->builtin_types.entry_usize->llvm_type, }; LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false); @@ -1127,7 +1127,7 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) { LLVMTypeRef arg_types[] = { // error return trace pointer - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), + get_llvm_type(g, ptr_to_stack_trace_type(g)), }; LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 1, false); @@ -1205,7 +1205,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { LLVMTypeRef fn_type_ref; if (g->have_err_ret_tracing) { LLVMTypeRef arg_types[] = { - get_llvm_type(g, g->ptr_to_stack_trace_type), + get_llvm_type(g, get_pointer_to_type(g, get_stack_trace_type(g), false)), get_llvm_type(g, g->err_tag_type), }; fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false); @@ -1321,14 +1321,7 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) { if (g->cur_err_ret_trace_val_stack != nullptr) { return g->cur_err_ret_trace_val_stack; } - if (g->cur_err_ret_trace_val_arg != nullptr) { - if (fn_is_async(g->cur_fn)) { - return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, ""); - } else { - return g->cur_err_ret_trace_val_arg; - } - } - return nullptr; + return g->cur_err_ret_trace_val_arg; } static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *scope) { @@ -1337,8 +1330,7 @@ static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *sc if (g->have_err_ret_tracing) { LLVMValueRef err_ret_trace_val = get_cur_err_ret_trace_val(g, scope); if (err_ret_trace_val == nullptr) { - ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g); - err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type)); + err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g))); } LLVMValueRef args[] = { err_ret_trace_val, @@ -2044,8 +2036,8 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { assert(g->stack_trace_type != nullptr); LLVMTypeRef param_types[] = { - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), + get_llvm_type(g, ptr_to_stack_trace_type(g)), + get_llvm_type(g, ptr_to_stack_trace_type(g)), }; LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false); @@ -2058,7 +2050,6 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { addLLVMArgAttr(fn_val, (unsigned)0, "noalias"); addLLVMArgAttr(fn_val, (unsigned)0, "writeonly"); - addLLVMArgAttr(fn_val, (unsigned)1, "nonnull"); addLLVMArgAttr(fn_val, (unsigned)1, "noalias"); addLLVMArgAttr(fn_val, (unsigned)1, "readonly"); if (g->build_mode == BuildModeDebug) { @@ -2075,7 +2066,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { LLVMPositionBuilderAtEnd(g->builder, entry_block); ZigLLVMClearCurrentDebugLocation(g->builder); - // if (dest_stack_trace == null) return; + // if (dest_stack_trace == null or src_stack_trace == null) return; // var frame_index: usize = undefined; // var frames_left: usize = undefined; // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) { @@ -2093,7 +2084,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len; // } LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return"); - LLVMBasicBlockRef dest_non_null_block = LLVMAppendBasicBlock(fn_val, "DestNonNull"); + LLVMBasicBlockRef non_null_block = LLVMAppendBasicBlock(fn_val, "NonNull"); LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index"); LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left"); @@ -2103,9 +2094,12 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, dest_stack_trace_ptr, LLVMConstNull(LLVMTypeOf(dest_stack_trace_ptr)), ""); - LLVMBuildCondBr(g->builder, null_dest_bit, return_block, dest_non_null_block); + LLVMValueRef null_src_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_stack_trace_ptr, + LLVMConstNull(LLVMTypeOf(src_stack_trace_ptr)), ""); + LLVMValueRef null_bit = LLVMBuildOr(g->builder, null_dest_bit, null_src_bit, ""); + LLVMBuildCondBr(g->builder, null_bit, return_block, non_null_block); - LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block); + LLVMPositionBuilderAtEnd(g->builder, non_null_block); size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index; size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index; LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr, @@ -2183,13 +2177,11 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut ZigLLVMBuildCall(g->builder, return_err_fn, &my_err_trace_val, 1, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); - if (fn_is_async(g->cur_fn) && g->cur_fn->calls_or_awaits_errorable_fn && - codegen_fn_has_err_ret_tracing_arg(g, g->cur_fn->type_entry->data.fn.fn_type_id.return_type)) - { - LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, ""); - LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val }; - ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, - get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + if (fn_is_async(g->cur_fn) && codegen_fn_has_err_ret_tracing_arg(g, ret_type)) { + LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, + frame_index_trace_arg(g, ret_type), ""); + LLVMBuildStore(g->builder, my_err_trace_val, trace_ptr_ptr); } return nullptr; @@ -2201,16 +2193,9 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume"); - LLVMValueRef ok_bit; - if (resume_id == ResumeIdAwaitEarlyReturn) { - LLVMValueRef last_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref), - LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false)); - ok_bit = LLVMBuildICmp(g->builder, LLVMIntULT, LLVMGetParam(g->cur_fn_val, 1), last_value, ""); - } else { - LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref), - LLVMConstInt(usize_type_ref, resume_id, false)); - ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, ""); - } + LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, resume_id, false)); + LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, ""); LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block); LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); @@ -2219,36 +2204,19 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume LLVMPositionBuilderAtEnd(g->builder, end_bb); } -static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr, - ResumeId resume_id, LLVMValueRef arg_val) -{ +static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr, ResumeId resume_id) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (fn_val == nullptr) { LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_fn_ptr_index, ""); fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); } - if (arg_val == nullptr) { - arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), - LLVMConstInt(usize_type_ref, resume_id, false), ""); - } else { - assert(resume_id == ResumeIdAwaitEarlyReturn); - } + LLVMValueRef arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, resume_id, false), ""); LLVMValueRef args[] = {target_frame_ptr, arg_val}; return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); } -static LLVMValueRef get_cur_async_prev_val(CodeGen *g) { - if (g->cur_async_prev_val != nullptr) { - return g->cur_async_prev_val; - } - g->cur_async_prev_val = LLVMBuildLoad(g->builder, g->cur_async_prev_val_field_ptr, ""); - return g->cur_async_prev_val; -} - static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) { - // This becomes invalid when a suspend happens. - g->cur_async_prev_val = nullptr; - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint); size_t new_block_index = g->cur_resume_block_count; @@ -2259,6 +2227,10 @@ static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) { return resume_bb; } +static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) { + LLVMSetTailCall(call_inst, true); +} + static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMValueRef ptr, LLVMValueRef val, LLVMAtomicOrdering order) { @@ -2282,32 +2254,32 @@ static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMV } } -static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, - IrInstructionReturnBegin *instruction) -{ +static void gen_async_return(CodeGen *g, IrInstructionReturn *instruction) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + ZigType *operand_type = (instruction->operand != nullptr) ? instruction->operand->value.type : nullptr; bool operand_has_bits = (operand_type != nullptr) && type_has_bits(operand_type); - if (!fn_is_async(g->cur_fn)) { - return operand_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr; - } + ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + bool ret_type_has_bits = type_has_bits(ret_type); - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (operand_has_bits && instruction->operand != nullptr) { - ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; bool need_store = instruction->operand->value.special != ConstValSpecialRuntime || !handle_is_ptr(ret_type); if (need_store) { - // It didn't get written to the result ptr. We do that now so that we do not have to spill - // the return operand. + // It didn't get written to the result ptr. We do that now. ZigType *ret_ptr_type = get_pointer_to_type(g, ret_type, true); gen_assign_raw(g, g->cur_ret_ptr, ret_ptr_type, ir_llvm_value(g, instruction->operand)); } } - // Prepare to be suspended. We might end up not having to suspend though. - LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "ReturnResume"); + // Whether we tail resume the awaiter, or do an early return, we are done and will not be resumed. + if (ir_want_runtime_safety(g, &instruction->base)) { + LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref); + LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr); + } LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); + LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXor, g->cur_async_awaiter_ptr, all_ones, LLVMAtomicOrderingAcquire); @@ -2316,7 +2288,6 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem"); LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2); - LLVMBasicBlockRef switch_bb = LLVMGetInsertBlock(g->builder); LLVMAddCase(switch_instr, zero, early_return_block); LLVMAddCase(switch_instr, all_ones, bad_return_block); @@ -2325,90 +2296,63 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, LLVMPositionBuilderAtEnd(g->builder, bad_return_block); gen_assertion(g, PanicMsgIdBadReturn, &instruction->base); - // The caller has not done an await yet. So we suspend at the return instruction, until a - // cancel or await is performed. + // There is no awaiter yet, but we're completely done. LLVMPositionBuilderAtEnd(g->builder, early_return_block); LLVMBuildRetVoid(g->builder); - // Add a safety check for when getting resumed by the awaiter. - LLVMPositionBuilderAtEnd(g->builder, resume_bb); - LLVMBasicBlockRef after_resume_block = LLVMGetInsertBlock(g->builder); - gen_assert_resume_id(g, &instruction->base, ResumeIdAwaitEarlyReturn, PanicMsgIdResumedFnPendingAwait, - resume_them_block); + // We need to resume the caller by tail calling them, + // but first write through the result pointer and possibly + // error return trace pointer. + LLVMPositionBuilderAtEnd(g->builder, resume_them_block); - // We need to resume the caller by tail calling them. - // That will happen when rendering IrInstructionReturn after running the defers/errdefers. - // We either got here from Entry (function call) or from the switch above - g->cur_async_prev_val = LLVMBuildPhi(g->builder, usize_type_ref, ""); - LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), prev_val }; - LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb }; - LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2); + if (ret_type_has_bits) { + // If the awaiter result pointer is non-null, we need to copy the result to there. + LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult"); + LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd"); + LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, ""); + LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, ""); + LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr)); + LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, ""); + LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block); - g->cur_is_after_return = true; - LLVMBuildStore(g->builder, g->cur_async_prev_val, g->cur_async_prev_val_field_ptr); + LLVMPositionBuilderAtEnd(g->builder, copy_block); + LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); + LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, ""); + LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, ""); + bool is_volatile = false; + uint32_t abi_align = get_abi_alignment(g, ret_type); + LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false); + ZigLLVMBuildMemCpy(g->builder, + dest_ptr_casted, abi_align, + src_ptr_casted, abi_align, byte_count_val, is_volatile); + LLVMBuildBr(g->builder, copy_end_block); - if (!operand_has_bits) { - return nullptr; + LLVMPositionBuilderAtEnd(g->builder, copy_end_block); + if (codegen_fn_has_err_ret_tracing_arg(g, ret_type)) { + LLVMValueRef awaiter_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, + frame_index_trace_arg(g, ret_type) + 1, ""); + LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, awaiter_trace_ptr_ptr, ""); + LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); + LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val }; + ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + } } - return get_handle_value(g, g->cur_ret_ptr, operand_type, get_pointer_to_type(g, operand_type, true)); -} - -static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) { - LLVMSetTailCall(call_inst, true); + // Resume the caller by tail calling them. + ZigType *any_frame_type = get_any_frame_type(g, ret_type); + LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val, get_llvm_type(g, any_frame_type), ""); + LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn); + set_tail_call_if_appropriate(g, call_inst); + LLVMBuildRetVoid(g->builder); } static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) { if (fn_is_async(g->cur_fn)) { - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; - bool ret_type_has_bits = type_has_bits(ret_type); - - if (ir_want_runtime_safety(g, &instruction->base)) { - LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref); - LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr); - } - - if (ret_type_has_bits) { - // If the awaiter result pointer is non-null, we need to copy the result to there. - LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult"); - LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd"); - LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, ""); - LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, ""); - LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr)); - LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, ""); - LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block); - - LLVMPositionBuilderAtEnd(g->builder, copy_block); - LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); - LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, ""); - LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, ""); - bool is_volatile = false; - uint32_t abi_align = get_abi_alignment(g, ret_type); - LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false); - ZigLLVMBuildMemCpy(g->builder, - dest_ptr_casted, abi_align, - src_ptr_casted, abi_align, byte_count_val, is_volatile); - LLVMBuildBr(g->builder, copy_end_block); - - LLVMPositionBuilderAtEnd(g->builder, copy_end_block); - } - - // We need to resume the caller by tail calling them. - ZigType *any_frame_type = get_any_frame_type(g, ret_type); - LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); - LLVMValueRef mask_val = LLVMConstNot(one); - LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, get_cur_async_prev_val(g), mask_val, ""); - LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val, - get_llvm_type(g, any_frame_type), ""); - LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr); - set_tail_call_if_appropriate(g, call_inst); - LLVMBuildRetVoid(g->builder); - - g->cur_is_after_return = false; - + gen_async_return(g, instruction); return nullptr; } + if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) { if (instruction->operand == nullptr) { LLVMBuildRetVoid(g->builder); @@ -3893,6 +3837,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr // even if prefix_arg_err_ret_stack is true, let the async function do its own // initialization. } else { + // async function called as a normal function + frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer if (ret_has_bits) { @@ -3912,7 +3858,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (prefix_arg_err_ret_stack) { LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, - frame_index_trace_arg(g, src_return_type), ""); + frame_index_trace_arg(g, src_return_type) + 1, ""); LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); } @@ -4018,7 +3964,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } if (instruction->is_async) { - gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); + gen_resume(g, fn_val, frame_result_loc, ResumeIdCall); if (instruction->new_stack != nullptr) { return frame_result_loc; } @@ -4028,7 +3974,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume"); - LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); + LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall); set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); @@ -4744,8 +4690,7 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu { LLVMValueRef cur_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); if (cur_err_ret_trace_val == nullptr) { - ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g); - return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type)); + return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g))); } return cur_err_ret_trace_val; } @@ -5505,60 +5450,6 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl return nullptr; } -static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) { - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - LLVMValueRef zero = LLVMConstNull(usize_type_ref); - LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); - LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); - src_assert(instruction->frame->value.type->id == ZigTypeIdAnyFrame, instruction->base.source_node); - ZigType *result_type = instruction->frame->value.type->data.any_frame.result_type; - - LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame); - LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "CancelResume"); - - // supply null for the awaiter return pointer (no copy needed) - if (type_has_bits(result_type)) { - LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, ""); - LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))), - awaiter_ret_ptr_ptr); - } - - // supply null for the error return trace pointer - if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { - LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, - frame_index_trace_arg(g, result_type), ""); - LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(err_ret_trace_ptr_ptr))), - err_ret_trace_ptr_ptr); - } - - LLVMValueRef awaiter_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); - LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, ""); - LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, ""); - - LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_ored_val, - LLVMAtomicOrderingRelease); - - LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CancelSuspend"); - LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); - - LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_bb, 2); - LLVMAddCase(switch_instr, zero, complete_suspend_block); - LLVMAddCase(switch_instr, all_ones, early_return_block); - - LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block); - LLVMBuildRetVoid(g->builder); - - LLVMPositionBuilderAtEnd(g->builder, early_return_block); - LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val); - set_tail_call_if_appropriate(g, call_inst); - LLVMBuildRetVoid(g->builder); - - LLVMPositionBuilderAtEnd(g->builder, resume_bb); - gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedACancelingFn, nullptr); - - return nullptr; -} - static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMValueRef zero = LLVMConstNull(usize_type_ref); @@ -5568,8 +5459,9 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // Prepare to be suspended LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume"); + LLVMBasicBlockRef end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitEnd"); - // At this point resuming the function will do the correct thing. + // At this point resuming the function will continue from resume_bb. // This code is as if it is running inside the suspend block. // supply the awaiter return pointer @@ -5591,15 +5483,15 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); assert(my_err_ret_trace_val != nullptr); LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, - frame_index_trace_arg(g, result_type), ""); + frame_index_trace_arg(g, result_type) + 1, ""); LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); } // caller's own frame pointer LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, ""); - LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val, - LLVMAtomicOrderingRelease, g->is_single_threaded); + LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val, + LLVMAtomicOrderingRelease); LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait"); LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend"); @@ -5615,20 +5507,42 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst LLVMPositionBuilderAtEnd(g->builder, bad_await_block); gen_assertion(g, PanicMsgIdBadAwait, &instruction->base); - // Early return: The async function has already completed, but it is suspending before setting the result, - // populating the error return trace if applicable, and running the defers. - // Tail resume it now, so that it can complete. - LLVMPositionBuilderAtEnd(g->builder, early_return_block); - LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val); - set_tail_call_if_appropriate(g, call_inst); - LLVMBuildRetVoid(g->builder); - // Rely on the target to resume us from suspension. LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block); LLVMBuildRetVoid(g->builder); + // Early return: The async function has already completed. We must copy the result and + // the error return trace if applicable. + LLVMPositionBuilderAtEnd(g->builder, early_return_block); + if (type_has_bits(result_type) && result_loc != nullptr) { + LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, ""); + LLVMValueRef their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, ""); + LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); + LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, ""); + LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, ""); + bool is_volatile = false; + uint32_t abi_align = get_abi_alignment(g, result_type); + LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false); + ZigLLVMBuildMemCpy(g->builder, + dest_ptr_casted, abi_align, + src_ptr_casted, abi_align, byte_count_val, is_volatile); + } + if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { + LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, + frame_index_trace_arg(g, result_type), ""); + LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, ""); + LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, instruction->base.scope); + LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr }; + ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + } + LLVMBuildBr(g->builder, end_bb); + LLVMPositionBuilderAtEnd(g->builder, resume_bb); gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr); + LLVMBuildBr(g->builder, end_bb); + + LLVMPositionBuilderAtEnd(g->builder, end_bb); if (type_has_bits(result_type) && result_loc != nullptr) { return get_handle_value(g, result_loc, result_type, ptr_result_type); } @@ -5640,7 +5554,7 @@ static LLVMValueRef ir_render_resume(CodeGen *g, IrExecutable *executable, IrIns ZigType *frame_type = instruction->frame->value.type; assert(frame_type->id == ZigTypeIdAnyFrame); - gen_resume(g, nullptr, frame, ResumeIdManual, nullptr); + gen_resume(g, nullptr, frame, ResumeIdManual); return nullptr; } @@ -5651,18 +5565,6 @@ static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, return gen_frame_size(g, fn_val); } -static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *executable, - IrInstructionTestCancelRequested *instruction) -{ - if (!fn_is_async(g->cur_fn)) - return LLVMConstInt(LLVMInt1Type(), 0, false); - if (g->cur_is_after_return) { - return LLVMBuildTrunc(g->builder, get_cur_async_prev_val(g), LLVMInt1Type(), ""); - } else { - zig_panic("TODO"); - } -} - static LLVMValueRef ir_render_spill_begin(CodeGen *g, IrExecutable *executable, IrInstructionSpillBegin *instruction) { @@ -5798,8 +5700,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdDeclVarGen: return ir_render_decl_var(g, executable, (IrInstructionDeclVarGen *)instruction); - case IrInstructionIdReturnBegin: - return ir_render_return_begin(g, executable, (IrInstructionReturnBegin *)instruction); case IrInstructionIdReturn: return ir_render_return(g, executable, (IrInstructionReturn *)instruction); case IrInstructionIdBinOp: @@ -5918,8 +5818,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_align_cast(g, executable, (IrInstructionAlignCast *)instruction); case IrInstructionIdErrorReturnTrace: return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction); - case IrInstructionIdCancel: - return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction); case IrInstructionIdAtomicRmw: return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction); case IrInstructionIdAtomicLoad: @@ -5952,8 +5850,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction); case IrInstructionIdAwaitGen: return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction); - case IrInstructionIdTestCancelRequested: - return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction); case IrInstructionIdSpillBegin: return ir_render_spill_begin(g, executable, (IrInstructionSpillBegin *)instruction); case IrInstructionIdSpillEnd: @@ -7060,9 +6956,9 @@ static void do_code_gen(CodeGen *g) { ZigType *array_type = get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count); err_ret_array_val = build_alloca(g, array_type, "error_return_trace_addresses", get_abi_alignment(g, array_type)); - // populate g->stack_trace_type - (void)get_ptr_to_stack_trace_type(g); - g->cur_err_ret_trace_val_stack = build_alloca(g, g->stack_trace_type, "error_return_trace", get_abi_alignment(g, g->stack_trace_type)); + (void)get_llvm_type(g, get_stack_trace_type(g)); + g->cur_err_ret_trace_val_stack = build_alloca(g, get_stack_trace_type(g), "error_return_trace", + get_abi_alignment(g, g->stack_trace_type)); } else { g->cur_err_ret_trace_val_stack = nullptr; } @@ -7204,18 +7100,12 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start, ""); g->cur_ret_ptr = LLVMBuildLoad(g->builder, cur_ret_ptr_ptr, ""); } - if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { - uint32_t trace_field_index = frame_index_trace_arg(g, fn_type_id->return_type); - g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index, ""); - } uint32_t trace_field_index_stack = UINT32_MAX; if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) { trace_field_index_stack = frame_index_trace_stack(g, fn_type_id); g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack, ""); } - g->cur_async_prev_val_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, - frame_prev_val_index, ""); LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4); @@ -7227,6 +7117,13 @@ static void do_code_gen(CodeGen *g) { g->cur_resume_block_count += 1; LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block); if (trace_field_index_stack != UINT32_MAX) { + if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { + LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, + frame_index_trace_arg(g, fn_type_id->return_type), ""); + LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(trace_ptr_ptr))); + LLVMBuildStore(g->builder, zero_ptr, trace_ptr_ptr); + } + LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack, ""); LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, @@ -7273,8 +7170,6 @@ static void do_code_gen(CodeGen *g) { LLVMDumpModule(g->module); } - // in release mode, we're sooooo confident that we've generated correct ir, - // that we skip the verify module step in order to get better performance. #ifndef NDEBUG char *error = nullptr; LLVMVerifyModule(g->module, LLVMAbortProcessAction, &error); @@ -10157,6 +10052,11 @@ bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) { } bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async) { - return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn && - (is_async || !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type)); + if (is_async) { + return g->have_err_ret_tracing && (fn->calls_or_awaits_errorable_fn || + codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type)); + } else { + return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn && + !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type); + } } diff --git a/src/ir.cpp b/src/ir.cpp index 3564435ddd..3e80fad270 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -526,10 +526,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionReturn *) { return IrInstructionIdReturn; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionReturnBegin *) { - return IrInstructionIdReturnBegin; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionCast *) { return IrInstructionIdCast; } @@ -974,10 +970,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionErrorUnion *) { return IrInstructionIdErrorUnion; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) { - return IrInstructionIdCancel; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) { return IrInstructionIdAtomicRmw; } @@ -1062,10 +1054,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionResume *) { return IrInstructionIdResume; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelRequested *) { - return IrInstructionIdTestCancelRequested; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillBegin *) { return IrInstructionIdSpillBegin; } @@ -1138,18 +1126,6 @@ static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *sou return &return_instruction->base; } -static IrInstruction *ir_build_return_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *operand) -{ - IrInstructionReturnBegin *return_instruction = ir_build_instruction(irb, scope, source_node); - return_instruction->operand = operand; - - ir_ref_instruction(operand, irb->current_basic_block); - - return &return_instruction->base; -} - - static IrInstruction *ir_build_const_void(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); const_instruction->base.value.type = irb->codegen->builtin_types.entry_void; @@ -3284,16 +3260,6 @@ static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstN return &instruction->base; } -static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) { - IrInstructionCancel *instruction = ir_build_instruction(irb, scope, source_node); - instruction->base.value.type = irb->codegen->builtin_types.entry_void; - instruction->frame = frame; - - ir_ref_instruction(frame, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame, ResultLoc *result_loc) { @@ -3331,13 +3297,6 @@ static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *sou return &instruction->base; } -static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node) { - IrInstructionTestCancelRequested *instruction = ir_build_instruction(irb, scope, source_node); - instruction->base.value.type = irb->codegen->builtin_types.entry_bool; - - return &instruction->base; -} - static IrInstructionSpillBegin *ir_build_spill_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *operand, SpillId spill_id) { @@ -3532,7 +3491,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, } ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value)); - return_value = ir_build_return_begin(irb, scope, node, return_value); size_t defer_counts[2]; ir_count_defers(irb, scope, outer_scope, defer_counts); @@ -3545,49 +3503,40 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, return result; } bool should_inline = ir_should_inline(irb->exec, scope); - bool need_test_cancel = !should_inline && have_err_defers; IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr"); - IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, scope, "Defers"); - IrBasicBlock *ok_block = need_test_cancel ? - ir_create_basic_block(irb, scope, "ErrRetOk") : normal_defers_block; - IrBasicBlock *all_defers_block = have_err_defers ? ir_create_basic_block(irb, scope, "ErrDefers") : normal_defers_block; + IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk"); + + if (!have_err_defers) { + ir_gen_defers_for_block(irb, scope, outer_scope, false); + } IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true); - IrInstruction *force_comptime = ir_build_const_bool(irb, scope, node, should_inline); - IrInstruction *err_is_comptime; + IrInstruction *is_comptime; if (should_inline) { - err_is_comptime = force_comptime; + is_comptime = ir_build_const_bool(irb, scope, node, should_inline); } else { - err_is_comptime = ir_build_test_comptime(irb, scope, node, is_err); + is_comptime = ir_build_test_comptime(irb, scope, node, is_err); } - ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, err_is_comptime)); + ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime)); IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt"); ir_set_cursor_at_end_and_append_block(irb, err_block); + if (have_err_defers) { + ir_gen_defers_for_block(irb, scope, outer_scope, true); + } if (irb->codegen->have_err_ret_tracing && !should_inline) { ir_build_save_err_ret_addr(irb, scope, node); } - ir_build_br(irb, scope, node, all_defers_block, err_is_comptime); + ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); - if (need_test_cancel) { - ir_set_cursor_at_end_and_append_block(irb, ok_block); - IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node); - ir_mark_gen(ir_build_cond_br(irb, scope, node, is_canceled, - all_defers_block, normal_defers_block, force_comptime)); + ir_set_cursor_at_end_and_append_block(irb, ok_block); + if (have_err_defers) { + ir_gen_defers_for_block(irb, scope, outer_scope, false); } - - if (all_defers_block != normal_defers_block) { - ir_set_cursor_at_end_and_append_block(irb, all_defers_block); - ir_gen_defers_for_block(irb, scope, outer_scope, true); - ir_build_br(irb, scope, node, ret_stmt_block, force_comptime); - } - - ir_set_cursor_at_end_and_append_block(irb, normal_defers_block); - ir_gen_defers_for_block(irb, scope, outer_scope, false); - ir_build_br(irb, scope, node, ret_stmt_block, force_comptime); + ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); IrInstruction *result = ir_build_return(irb, scope, node, return_value); @@ -3619,8 +3568,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val)); IrInstructionSpillBegin *spill_begin = ir_build_spill_begin(irb, scope, node, err_val, SpillIdRetErrCode); - ir_build_return_begin(irb, scope, node, err_val); - err_val = ir_build_spill_end(irb, scope, node, spill_begin); ResultLocReturn *result_loc_ret = allocate(1); result_loc_ret->base.id = ResultLocIdReturn; ir_build_reset_result(irb, scope, node, &result_loc_ret->base); @@ -3629,6 +3576,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, if (irb->codegen->have_err_ret_tracing && !should_inline) { ir_build_save_err_ret_addr(irb, scope, node); } + err_val = ir_build_spill_end(irb, scope, node, spill_begin); IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val); result_loc_ret->base.source_instruction = ret_inst; } @@ -3847,38 +3795,10 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode return result; // no need for save_err_ret_addr because this cannot return error - // but if it is a canceled async function we do need to run the errdefers + // only generate unconditional defers ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result)); - result = ir_mark_gen(ir_build_return_begin(irb, child_scope, block_node, result)); - - size_t defer_counts[2]; - ir_count_defers(irb, child_scope, outer_block_scope, defer_counts); - bool have_err_defers = defer_counts[ReturnKindError] > 0; - if (!have_err_defers) { - // only generate unconditional defers - ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); - return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result)); - } - IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node); - IrBasicBlock *all_defers_block = ir_create_basic_block(irb, child_scope, "ErrDefers"); - IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, child_scope, "Defers"); - IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, child_scope, "RetStmt"); - bool should_inline = ir_should_inline(irb->exec, child_scope); - IrInstruction *errdefers_is_comptime = ir_build_const_bool(irb, child_scope, block_node, - should_inline || !have_err_defers); - ir_mark_gen(ir_build_cond_br(irb, child_scope, block_node, is_canceled, - all_defers_block, normal_defers_block, errdefers_is_comptime)); - - ir_set_cursor_at_end_and_append_block(irb, all_defers_block); - ir_gen_defers_for_block(irb, child_scope, outer_block_scope, true); - ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, normal_defers_block); ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); - ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result)); } @@ -7930,31 +7850,6 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, is_var_args); } -static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) { - assert(node->type == NodeTypeCancel); - - ZigFn *fn_entry = exec_fn_entry(irb->exec); - if (!fn_entry) { - add_node_error(irb->codegen, node, buf_sprintf("cancel outside function definition")); - return irb->codegen->invalid_instruction; - } - ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope); - if (existing_suspend_scope) { - if (!existing_suspend_scope->reported_err) { - ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot cancel inside suspend block")); - add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here")); - existing_suspend_scope->reported_err = true; - } - return irb->codegen->invalid_instruction; - } - - IrInstruction *operand = ir_gen_node_extra(irb, node->data.cancel_expr.expr, scope, LValPtr, nullptr); - if (operand == irb->codegen->invalid_instruction) - return irb->codegen->invalid_instruction; - - return ir_build_cancel(irb, scope, node, operand); -} - static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeResume); @@ -8149,8 +8044,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval, result_loc); case NodeTypeErrorSetDecl: return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval, result_loc); - case NodeTypeCancel: - return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval, result_loc); case NodeTypeResume: return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc); case NodeTypeAwaitExpr: @@ -8228,7 +8121,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec if (!instr_is_unreachable(result)) { ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result)); - result = ir_mark_gen(ir_build_return_begin(irb, scope, node, result)); // no need for save_err_ret_addr because this cannot return error ir_mark_gen(ir_build_return(irb, scope, result->source_node, result)); } @@ -8340,7 +8232,6 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec switch (instruction->id) { case IrInstructionIdUnwrapErrPayload: case IrInstructionIdUnionFieldPtr: - case IrInstructionIdReturnBegin: continue; default: break; @@ -12745,17 +12636,17 @@ static IrInstruction *ir_analyze_instruction_add_implicit_return_type(IrAnalyze return ir_const_void(ira, &instruction->base); } -static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInstructionReturnBegin *instruction) { +static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) { IrInstruction *operand = instruction->operand->child; if (type_is_invalid(operand->value.type)) - return ira->codegen->invalid_instruction; + return ir_unreach_error(ira); if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) { // result location mechanism took care of it. - IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, operand); - copy_const_val(&result->value, &operand->value, true); - return result; + IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, nullptr); + result->value.type = ira->codegen->builtin_types.entry_unreachable; + return ir_finish_anal(ira, result); } IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type); @@ -12777,38 +12668,6 @@ static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInst return ir_unreach_error(ira); } - IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, casted_operand); - copy_const_val(&result->value, &casted_operand->value, true); - return result; -} - -static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) { - IrInstruction *operand = instruction->operand->child; - if (type_is_invalid(operand->value.type)) - return ir_unreach_error(ira); - - if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) { - // result location mechanism took care of it. - IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, nullptr); - result->value.type = ira->codegen->builtin_types.entry_unreachable; - return ir_finish_anal(ira, result); - } - - // This cast might have been already done from IrInstructionReturnBegin but it also - // might not have, in the case of `try`. - IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type); - if (type_is_invalid(casted_operand->value.type)) { - AstNode *source_node = ira->explicit_return_type_source_node; - if (source_node != nullptr) { - ErrorMsg *msg = ira->codegen->errors.last(); - add_error_note(ira->codegen, msg, source_node, - buf_sprintf("return type declared here")); - } - return ir_unreach_error(ira); - } - IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_operand); result->value.type = ira->codegen->builtin_types.entry_unreachable; @@ -14540,8 +14399,8 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) { static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, IrInstructionErrorReturnTrace *instruction) { + ZigType *ptr_to_stack_trace_type = get_pointer_to_type(ira->codegen, get_stack_trace_type(ira->codegen), false); if (instruction->optional == IrInstructionErrorReturnTrace::Null) { - ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen); ZigType *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type); if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) { IrInstruction *result = ir_const(ira, &instruction->base, optional_type); @@ -14559,7 +14418,7 @@ static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, assert(ira->codegen->have_err_ret_tracing); IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope, instruction->base.source_node, instruction->optional); - new_instruction->value.type = get_ptr_to_stack_trace_type(ira->codegen); + new_instruction->value.type = ptr_to_stack_trace_type; return new_instruction; } } @@ -15800,6 +15659,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { parent_fn_entry->inferred_async_node = fn_ref->source_node; + parent_fn_entry->inferred_async_fn = impl_fn; } IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, @@ -15923,6 +15783,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { parent_fn_entry->inferred_async_node = fn_ref->source_node; + parent_fn_entry->inferred_async_fn = fn_entry; } IrInstruction *result_loc; @@ -24702,21 +24563,6 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct return casted_frame; } -static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { - IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child); - if (type_is_invalid(frame->value.type)) - return ira->codegen->invalid_instruction; - - ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); - ir_assert(fn_entry != nullptr, &instruction->base); - - if (fn_entry->inferred_async_node == nullptr) { - fn_entry->inferred_async_node = instruction->base.source_node; - } - - return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame); -} - static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) { IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child); if (type_is_invalid(frame->value.type)) @@ -24772,15 +24618,6 @@ static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructio return ir_build_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame); } -static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ira, - IrInstructionTestCancelRequested *instruction) -{ - if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) { - return ir_const_bool(ira, &instruction->base, false); - } - return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node); -} - static IrInstruction *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstructionSpillBegin *instruction) { if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) return ir_const_void(ira, &instruction->base); @@ -24848,8 +24685,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction case IrInstructionIdAwaitGen: zig_unreachable(); - case IrInstructionIdReturnBegin: - return ir_analyze_instruction_return_begin(ira, (IrInstructionReturnBegin *)instruction); case IrInstructionIdReturn: return ir_analyze_instruction_return(ira, (IrInstructionReturn *)instruction); case IrInstructionIdConst: @@ -25070,8 +24905,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_error_return_trace(ira, (IrInstructionErrorReturnTrace *)instruction); case IrInstructionIdErrorUnion: return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction); - case IrInstructionIdCancel: - return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction); case IrInstructionIdAtomicRmw: return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction); case IrInstructionIdAtomicLoad: @@ -25114,8 +24947,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_resume(ira, (IrInstructionResume *)instruction); case IrInstructionIdAwaitSrc: return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction); - case IrInstructionIdTestCancelRequested: - return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction); case IrInstructionIdSpillBegin: return ir_analyze_instruction_spill_begin(ira, (IrInstructionSpillBegin *)instruction); case IrInstructionIdSpillEnd: @@ -25209,7 +25040,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdStorePtr: case IrInstructionIdCallSrc: case IrInstructionIdCallGen: - case IrInstructionIdReturnBegin: case IrInstructionIdReturn: case IrInstructionIdUnreachable: case IrInstructionIdSetCold: @@ -25235,7 +25065,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdPtrType: case IrInstructionIdSetAlignStack: case IrInstructionIdExport: - case IrInstructionIdCancel: case IrInstructionIdSaveErrRetAddr: case IrInstructionIdAddImplicitReturnType: case IrInstructionIdAtomicRmw: @@ -25355,7 +25184,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdHasDecl: case IrInstructionIdAllocaSrc: case IrInstructionIdAllocaGen: - case IrInstructionIdTestCancelRequested: case IrInstructionIdSpillEnd: return false; diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 63f3711266..7580f19059 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -64,12 +64,6 @@ static void ir_print_other_block(IrPrint *irp, IrBasicBlock *bb) { } } -static void ir_print_return_begin(IrPrint *irp, IrInstructionReturnBegin *instruction) { - fprintf(irp->f, "@returnBegin("); - ir_print_other_instruction(irp, instruction->operand); - fprintf(irp->f, ")"); -} - static void ir_print_return(IrPrint *irp, IrInstructionReturn *instruction) { fprintf(irp->f, "return "); ir_print_other_instruction(irp, instruction->operand); @@ -1394,11 +1388,6 @@ static void ir_print_error_union(IrPrint *irp, IrInstructionErrorUnion *instruct ir_print_other_instruction(irp, instruction->payload); } -static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) { - fprintf(irp->f, "cancel "); - ir_print_other_instruction(irp, instruction->frame); -} - static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) { fprintf(irp->f, "@atomicRmw("); if (instruction->operand_type != nullptr) { @@ -1549,10 +1538,6 @@ static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction) fprintf(irp->f, ")"); } -static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancelRequested *instruction) { - fprintf(irp->f, "@testCancelRequested()"); -} - static void ir_print_spill_begin(IrPrint *irp, IrInstructionSpillBegin *instruction) { fprintf(irp->f, "@spillBegin("); ir_print_other_instruction(irp, instruction->operand); @@ -1570,9 +1555,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: zig_unreachable(); - case IrInstructionIdReturnBegin: - ir_print_return_begin(irp, (IrInstructionReturnBegin *)instruction); - break; case IrInstructionIdReturn: ir_print_return(irp, (IrInstructionReturn *)instruction); break; @@ -1966,9 +1948,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdErrorUnion: ir_print_error_union(irp, (IrInstructionErrorUnion *)instruction); break; - case IrInstructionIdCancel: - ir_print_cancel(irp, (IrInstructionCancel *)instruction); - break; case IrInstructionIdAtomicRmw: ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction); break; @@ -2047,9 +2026,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdAwaitGen: ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction); break; - case IrInstructionIdTestCancelRequested: - ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction); - break; case IrInstructionIdSpillBegin: ir_print_spill_begin(irp, (IrInstructionSpillBegin *)instruction); break; diff --git a/src/parser.cpp b/src/parser.cpp index 82312aacf3..afe5735a06 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -1167,7 +1167,6 @@ static AstNode *ast_parse_prefix_expr(ParseContext *pc) { // <- AsmExpr // / IfExpr // / KEYWORD_break BreakLabel? Expr? -// / KEYWORD_cancel Expr // / KEYWORD_comptime Expr // / KEYWORD_continue BreakLabel? // / KEYWORD_resume Expr @@ -1195,14 +1194,6 @@ static AstNode *ast_parse_primary_expr(ParseContext *pc) { return res; } - Token *cancel = eat_token_if(pc, TokenIdKeywordCancel); - if (cancel != nullptr) { - AstNode *expr = ast_expect(pc, ast_parse_expr); - AstNode *res = ast_create_node(pc, NodeTypeCancel, cancel); - res->data.cancel_expr.expr = expr; - return res; - } - Token *comptime = eat_token_if(pc, TokenIdKeywordCompTime); if (comptime != nullptr) { AstNode *expr = ast_expect(pc, ast_parse_expr); @@ -3035,9 +3026,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeErrorSetDecl: visit_node_list(&node->data.err_set_decl.decls, visit, context); break; - case NodeTypeCancel: - visit_field(&node->data.cancel_expr.expr, visit, context); - break; case NodeTypeResume: visit_field(&node->data.resume_expr.expr, visit, context); break; diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index 38c6c7153e..84f3f2c0ec 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -114,7 +114,6 @@ static const struct ZigKeyword zig_keywords[] = { {"async", TokenIdKeywordAsync}, {"await", TokenIdKeywordAwait}, {"break", TokenIdKeywordBreak}, - {"cancel", TokenIdKeywordCancel}, {"catch", TokenIdKeywordCatch}, {"comptime", TokenIdKeywordCompTime}, {"const", TokenIdKeywordConst}, @@ -1531,7 +1530,6 @@ const char * token_name(TokenId id) { case TokenIdKeywordAwait: return "await"; case TokenIdKeywordResume: return "resume"; case TokenIdKeywordSuspend: return "suspend"; - case TokenIdKeywordCancel: return "cancel"; case TokenIdKeywordAlign: return "align"; case TokenIdKeywordAnd: return "and"; case TokenIdKeywordAnyFrame: return "anyframe"; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index 98bdfea907..ce62f5dc87 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -58,7 +58,6 @@ enum TokenId { TokenIdKeywordAsync, TokenIdKeywordAwait, TokenIdKeywordBreak, - TokenIdKeywordCancel, TokenIdKeywordCatch, TokenIdKeywordCompTime, TokenIdKeywordConst, diff --git a/std/event/fs.zig b/std/event/fs.zig index 73a296ca3f..d6d8f2faef 100644 --- a/std/event/fs.zig +++ b/std/event/fs.zig @@ -1301,7 +1301,7 @@ async fn testFsWatch(loop: *Loop) !void { const ev = try async watch.channel.get(); var ev_consumed = false; - defer if (!ev_consumed) cancel ev; + defer if (!ev_consumed) await ev; // overwrite line 2 const fd = try await try async openReadWrite(loop, file_path, File.default_mode); diff --git a/std/event/future.zig b/std/event/future.zig index e5f6d984ce..45bb7759c5 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -110,7 +110,7 @@ async fn testFuture(loop: *Loop) void { const b_result = await b; const result = a_result + b_result; - cancel c; + await c; testing.expect(result == 12); } diff --git a/std/event/group.zig b/std/event/group.zig index 1fc4a61e93..f96b938f80 100644 --- a/std/event/group.zig +++ b/std/event/group.zig @@ -27,17 +27,6 @@ pub fn Group(comptime ReturnType: type) type { }; } - /// Cancel all the outstanding frames. Can be called even if wait was already called. - pub fn deinit(self: *Self) void { - while (self.frame_stack.pop()) |node| { - cancel node.data; - } - while (self.alloc_stack.pop()) |node| { - cancel node.data; - self.lock.loop.allocator.destroy(node); - } - } - /// Add a frame to the group. Thread-safe. pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) { const node = try self.lock.loop.allocator.create(Stack.Node); @@ -64,13 +53,14 @@ pub fn Group(comptime ReturnType: type) type { const held = self.lock.acquire(); defer held.release(); + var result: ReturnType = {}; + while (self.frame_stack.pop()) |node| { if (Error == void) { await node.data; } else { (await node.data) catch |err| { - self.deinit(); - return err; + result = err; }; } } @@ -81,11 +71,11 @@ pub fn Group(comptime ReturnType: type) type { await handle; } else { (await handle) catch |err| { - self.deinit(); - return err; + result = err; }; } } + return result; } }; } diff --git a/std/event/net.zig b/std/event/net.zig index 2a28a0ef93..bed665dcdc 100644 --- a/std/event/net.zig +++ b/std/event/net.zig @@ -54,7 +54,7 @@ pub const Server = struct { self.listen_address = std.net.Address.initPosix(try os.getsockname(sockfd)); self.accept_frame = async Server.handler(self); - errdefer cancel self.accept_frame.?; + errdefer await self.accept_frame.?; self.listen_resume_node.handle = self.accept_frame.?; try self.loop.linuxAddFd(sockfd, &self.listen_resume_node, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); @@ -71,7 +71,7 @@ pub const Server = struct { } pub fn deinit(self: *Server) void { - if (self.accept_frame) |accept_frame| cancel accept_frame; + if (self.accept_frame) |accept_frame| await accept_frame; if (self.sockfd) |sockfd| os.close(sockfd); } @@ -274,13 +274,9 @@ test "listen on a port, send bytes, receive bytes" { const self = @fieldParentPtr(Self, "tcp_server", tcp_server); var socket = _socket; // TODO https://github.com/ziglang/zig/issues/1592 defer socket.close(); - // TODO guarantee elision of this allocation const next_handler = errorableHandler(self, _addr, socket) catch |err| { std.debug.panic("unable to handle connection: {}\n", err); }; - suspend { - cancel @frame(); - } } async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: File) !void { const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/1592 diff --git a/std/zig/parse.zig b/std/zig/parse.zig index 600178cdce..077870a9ca 100644 --- a/std/zig/parse.zig +++ b/std/zig/parse.zig @@ -814,7 +814,6 @@ fn parsePrefixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { /// <- AsmExpr /// / IfExpr /// / KEYWORD_break BreakLabel? Expr? -/// / KEYWORD_cancel Expr /// / KEYWORD_comptime Expr /// / KEYWORD_continue BreakLabel? /// / KEYWORD_resume Expr @@ -839,20 +838,6 @@ fn parsePrimaryExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node return &node.base; } - if (eatToken(it, .Keyword_cancel)) |token| { - const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{ - .ExpectedExpr = AstError.ExpectedExpr{ .token = it.index }, - }); - const node = try arena.create(Node.PrefixOp); - node.* = Node.PrefixOp{ - .base = Node{ .id = .PrefixOp }, - .op_token = token, - .op = Node.PrefixOp.Op.Cancel, - .rhs = expr_node, - }; - return &node.base; - } - if (eatToken(it, .Keyword_comptime)) |token| { const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{ .ExpectedExpr = AstError.ExpectedExpr{ .token = it.index }, diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index 5f2a3934fd..c5c740353e 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -2115,10 +2115,10 @@ test "zig fmt: async functions" { \\ await p; \\} \\ - \\test "suspend, resume, cancel" { + \\test "suspend, resume, await" { \\ const p: anyframe = async testAsyncSeq(); \\ resume p; - \\ cancel p; + \\ await p; \\} \\ ); diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig index 9de20c39f2..4d4ceb07db 100644 --- a/std/zig/tokenizer.zig +++ b/std/zig/tokenizer.zig @@ -21,7 +21,6 @@ pub const Token = struct { Keyword{ .bytes = "await", .id = Id.Keyword_await }, Keyword{ .bytes = "break", .id = Id.Keyword_break }, Keyword{ .bytes = "catch", .id = Id.Keyword_catch }, - Keyword{ .bytes = "cancel", .id = Id.Keyword_cancel }, Keyword{ .bytes = "comptime", .id = Id.Keyword_comptime }, Keyword{ .bytes = "const", .id = Id.Keyword_const }, Keyword{ .bytes = "continue", .id = Id.Keyword_continue }, @@ -151,7 +150,6 @@ pub const Token = struct { Keyword_async, Keyword_await, Keyword_break, - Keyword_cancel, Keyword_catch, Keyword_comptime, Keyword_const, diff --git a/test/compile_errors.zig b/test/compile_errors.zig index c07786d462..f53b1c9707 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -61,13 +61,15 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "runtime-known async function called", \\export fn entry() void { + \\ _ = async amain(); + \\} + \\fn amain() void { \\ var ptr = afunc; \\ _ = ptr(); \\} - \\ \\async fn afunc() void {} , - "tmp.zig:3:12: error: function is not comptime-known; @asyncCall required", + "tmp.zig:6:12: error: function is not comptime-known; @asyncCall required", ); cases.add( @@ -3388,7 +3390,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ \\export fn entry() usize { return @sizeOf(@typeOf(Foo)); } , - "tmp.zig:5:18: error: unable to evaluate constant expression", + "tmp.zig:5:25: error: unable to evaluate constant expression", "tmp.zig:2:12: note: called from here", "tmp.zig:2:8: note: called from here", ); diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig index dba43268e2..6ec1521048 100644 --- a/test/stage1/behavior.zig +++ b/test/stage1/behavior.zig @@ -41,7 +41,6 @@ comptime { _ = @import("behavior/bugs/920.zig"); _ = @import("behavior/byteswap.zig"); _ = @import("behavior/byval_arg_var.zig"); - _ = @import("behavior/cancel.zig"); _ = @import("behavior/cast.zig"); _ = @import("behavior/const_slice_child.zig"); _ = @import("behavior/defer.zig"); diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index 2d76b47244..ec0c9e52a6 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -150,7 +150,7 @@ test "coroutine suspend, resume" { seq('a'); var f = async testAsyncSeq(); seq('c'); - cancel f; + await f; seq('g'); } @@ -271,7 +271,6 @@ test "async function with dot syntax" { } }; const p = async S.foo(); - // can't cancel in tests because they are non-async functions expect(S.y == 2); } @@ -286,7 +285,7 @@ test "async fn pointer in a struct field" { comptime expect(@typeOf(f) == anyframe->void); expect(data == 2); resume f; - expect(data == 2); + expect(data == 4); _ = async doTheAwait(f); expect(data == 4); } @@ -394,7 +393,6 @@ async fn printTrace(p: anyframe->(anyerror!void)) void { test "break from suspend" { var my_result: i32 = 1; const p = async testBreakFromSuspend(&my_result); - // can't cancel here std.testing.expect(my_result == 2); } async fn testBreakFromSuspend(my_result: *i32) void { @@ -530,45 +528,6 @@ test "call async function which has struct return type" { S.doTheTest(); } -test "errdefers in scope get run when canceling async fn call" { - const S = struct { - var frame: anyframe = undefined; - var x: u32 = 0; - - fn doTheTest() void { - x = 9; - _ = async cancelIt(); - resume frame; - expect(x == 6); - - x = 9; - _ = async awaitIt(); - resume frame; - expect(x == 11); - } - - fn cancelIt() void { - var f = async func(); - cancel f; - } - - fn awaitIt() void { - var f = async func(); - await f; - } - - fn func() void { - defer x += 1; - errdefer x /= 2; - defer x += 1; - suspend { - frame = @frame(); - } - } - }; - S.doTheTest(); -} - test "pass string literal to async function" { const S = struct { var frame: anyframe = undefined; @@ -590,7 +549,7 @@ test "pass string literal to async function" { S.doTheTest(); } -test "cancel inside an errdefer" { +test "await inside an errdefer" { const S = struct { var frame: anyframe = undefined; @@ -601,7 +560,7 @@ test "cancel inside an errdefer" { fn amainWrap() !void { var foo = async func(); - errdefer cancel foo; + errdefer await foo; return error.Bad; } @@ -614,35 +573,6 @@ test "cancel inside an errdefer" { S.doTheTest(); } -test "combining try with errdefer cancel" { - const S = struct { - var frame: anyframe = undefined; - var ok = false; - - fn doTheTest() void { - _ = async amain(); - resume frame; - expect(ok); - } - - fn amain() !void { - var f = async func("https://example.com/"); - errdefer cancel f; - - _ = try await f; - } - - fn func(url: []const u8) ![]u8 { - errdefer ok = true; - frame = @frame(); - suspend; - return error.Bad; - } - - }; - S.doTheTest(); -} - test "try in an async function with error union and non-zero-bit payload" { const S = struct { var frame: anyframe = undefined; @@ -730,14 +660,22 @@ fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime si fn amain() !void { const allocator = std.heap.direct_allocator; // TODO once we have the debug allocator, use that, so that this can detect leaks var download_frame = async fetchUrl(allocator, "https://example.com/"); - errdefer cancel download_frame; + var download_awaited = false; + errdefer if (!download_awaited) { + if (await download_frame) |x| allocator.free(x) else |_| {} + }; var file_frame = async readFile(allocator, "something.txt"); - errdefer cancel file_frame; + var file_awaited = false; + errdefer if (!file_awaited) { + if (await file_frame) |x| allocator.free(x) else |_| {} + }; + download_awaited = true; const download_text = try await download_frame; defer allocator.free(download_text); + file_awaited = true; const file_text = try await file_frame; defer allocator.free(file_text); diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig deleted file mode 100644 index 5dedb20159..0000000000 --- a/test/stage1/behavior/cancel.zig +++ /dev/null @@ -1,115 +0,0 @@ -const std = @import("std"); -const expect = std.testing.expect; - -var defer_f1: bool = false; -var defer_f2: bool = false; -var defer_f3: bool = false; -var f3_frame: anyframe = undefined; - -test "cancel forwards" { - _ = async atest1(); - resume f3_frame; -} - -fn atest1() void { - const p = async f1(); - cancel &p; - expect(defer_f1); - expect(defer_f2); - expect(defer_f3); -} - -async fn f1() void { - defer { - defer_f1 = true; - } - var f2_frame = async f2(); - await f2_frame; -} - -async fn f2() void { - defer { - defer_f2 = true; - } - f3(); -} - -async fn f3() void { - f3_frame = @frame(); - defer { - defer_f3 = true; - } - suspend; -} - -var defer_b1: bool = false; -var defer_b2: bool = false; -var defer_b3: bool = false; -var defer_b4: bool = false; - -test "cancel backwards" { - var b1_frame = async b1(); - resume b4_handle; - _ = async awaitAFrame(&b1_frame); - expect(defer_b1); - expect(defer_b2); - expect(defer_b3); - expect(defer_b4); -} - -async fn b1() void { - defer { - defer_b1 = true; - } - b2(); -} - -var b4_handle: anyframe->void = undefined; - -async fn b2() void { - const b3_handle = async b3(); - resume b4_handle; - defer { - defer_b2 = true; - } - const value = await b3_handle; - expect(value == 1234); -} - -async fn b3() i32 { - defer { - defer_b3 = true; - } - b4(); - return 1234; -} - -async fn b4() void { - defer { - defer_b4 = true; - } - suspend { - b4_handle = @frame(); - } - suspend; -} - -fn awaitAFrame(f: anyframe->void) void { - await f; -} - -test "cancel on a non-pointer" { - const S = struct { - fn doTheTest() void { - _ = async atest(); - } - fn atest() void { - var f = async func(); - cancel f; - } - fn func() void { - suspend; - } - }; - S.doTheTest(); -} From 729807203a4ef162f39656be062dd11a428af8e3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 14:34:52 -0400 Subject: [PATCH 106/125] force static libs for vendored dependencies --- CMakeLists.txt | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 998da172fc..ec90a69f2b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -199,11 +199,11 @@ else() "${CMAKE_SOURCE_DIR}/deps/lld/wasm/Writer.cpp" "${CMAKE_SOURCE_DIR}/deps/lld/wasm/WriterUtils.cpp" ) - add_library(embedded_lld_lib ${EMBEDDED_LLD_LIB_SOURCES}) - add_library(embedded_lld_elf ${EMBEDDED_LLD_ELF_SOURCES}) - add_library(embedded_lld_coff ${EMBEDDED_LLD_COFF_SOURCES}) - add_library(embedded_lld_mingw ${EMBEDDED_LLD_MINGW_SOURCES}) - add_library(embedded_lld_wasm ${EMBEDDED_LLD_WASM_SOURCES}) + add_library(embedded_lld_lib STATIC ${EMBEDDED_LLD_LIB_SOURCES}) + add_library(embedded_lld_elf STATIC ${EMBEDDED_LLD_ELF_SOURCES}) + add_library(embedded_lld_coff STATIC ${EMBEDDED_LLD_COFF_SOURCES}) + add_library(embedded_lld_mingw STATIC ${EMBEDDED_LLD_MINGW_SOURCES}) + add_library(embedded_lld_wasm STATIC ${EMBEDDED_LLD_WASM_SOURCES}) if(MSVC) set(ZIG_LLD_COMPILE_FLAGS "-std=c++11 -D_CRT_SECURE_NO_WARNINGS /w") else() @@ -400,7 +400,7 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/ui32_to_f128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/ui64_to_f128M.c" ) -add_library(embedded_softfloat ${EMBEDDED_SOFTFLOAT_SOURCES}) +add_library(embedded_softfloat STATIC ${EMBEDDED_SOFTFLOAT_SOURCES}) if(MSVC) set_target_properties(embedded_softfloat PROPERTIES COMPILE_FLAGS "-std=c99 /w" From 55f5cee86b39bb2127a316f9b5d0abf532580cac Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 15:06:05 -0400 Subject: [PATCH 107/125] fix error return traces for async calls of blocking functions --- src/analyze.cpp | 5 +++ src/codegen.cpp | 52 ++++++++++++++++++++++--------- src/ir.cpp | 4 +++ test/compile_errors.zig | 12 +++++++ test/stage1/behavior/async_fn.zig | 35 ++++++++++++++++----- 5 files changed, 85 insertions(+), 23 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index fc42abaf26..21289f24a8 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3819,6 +3819,11 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("await is a suspend point")); + } else if (fn->inferred_async_node->type == NodeTypeFnCallExpr && + fn->inferred_async_node->data.fn_call_expr.is_builtin) + { + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("@frame() causes function to be async")); } else { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("suspends here")); diff --git a/src/codegen.cpp b/src/codegen.cpp index 9bf7b0287b..45e2e4122f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3760,6 +3760,23 @@ static LLVMValueRef gen_frame_size(CodeGen *g, LLVMValueRef fn_val) { return LLVMBuildLoad(g->builder, prefix_ptr, ""); } +static void gen_init_stack_trace(CodeGen *g, LLVMValueRef trace_field_ptr, LLVMValueRef addrs_field_ptr) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef zero = LLVMConstNull(usize_type_ref); + + LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, ""); + LLVMBuildStore(g->builder, zero, index_ptr); + + LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, ""); + LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, ""); + LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) }; + LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, addrs_field_ptr, indices, 2, ""); + LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr); + + LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, ""); + LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr); +} + static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; @@ -3900,9 +3917,24 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (first_arg_ret) { gen_param_values.append(ret_ptr); } - } - if (prefix_arg_err_ret_stack) { - gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); + if (prefix_arg_err_ret_stack) { + // Set up the callee stack trace pointer pointing into the frame. + // Then we have to wire up the StackTrace pointers. + // Await is responsible for merging error return traces. + uint32_t trace_field_index_start = frame_index_trace_arg(g, src_return_type); + LLVMValueRef callee_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + trace_field_index_start, ""); + LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + trace_field_index_start + 2, ""); + LLVMValueRef addrs_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + trace_field_index_start + 3, ""); + + LLVMBuildStore(g->builder, trace_field_ptr, callee_trace_ptr_ptr); + + gen_init_stack_trace(g, trace_field_ptr, addrs_field_ptr); + + gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); + } } } else { if (first_arg_ret) { @@ -7126,20 +7158,10 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack, ""); - LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, + LLVMValueRef addrs_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack + 1, ""); - LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, ""); - LLVMBuildStore(g->builder, zero, index_ptr); - - LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, ""); - LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, ""); - LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) }; - LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, trace_field_addrs, indices, 2, ""); - LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr); - - LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, ""); - LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr); + gen_init_stack_trace(g, trace_field_ptr, addrs_field_ptr); } render_async_var_decls(g, entry_block->instruction_list.at(0)->scope); } else { diff --git a/src/ir.cpp b/src/ir.cpp index 3e80fad270..ddaf82893a 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -22078,6 +22078,10 @@ static IrInstruction *ir_analyze_instruction_frame_handle(IrAnalyze *ira, IrInst ZigFn *fn = exec_fn_entry(ira->new_irb.exec); ir_assert(fn != nullptr, &instruction->base); + if (fn->inferred_async_node == nullptr) { + fn->inferred_async_node = instruction->base.source_node; + } + ZigType *frame_type = get_fn_frame_type(ira->codegen, fn); ZigType *ptr_frame_type = get_pointer_to_type(ira->codegen, frame_type, false); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index f53b1c9707..0d579ece95 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "@frame() causes function to be async", + \\export fn entry() void { + \\ func(); + \\} + \\fn func() void { + \\ _ = @frame(); + \\} + , + "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async", + "tmp.zig:5:9: note: @frame() causes function to be async", + ); cases.add( "invalid suspend in exported function", \\export fn entry() void { diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index ec0c9e52a6..96b7b02137 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -634,17 +634,30 @@ test "returning a const error from async function" { test "async/await typical usage" { inline for ([_]bool{false, true}) |b1| { inline for ([_]bool{false, true}) |b2| { - testAsyncAwaitTypicalUsage(b1, b2).doTheTest(); + inline for ([_]bool{false, true}) |b3| { + inline for ([_]bool{false, true}) |b4| { + testAsyncAwaitTypicalUsage(b1, b2, b3, b4).doTheTest(); + } + } } } } -fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime simulate_fail_file: bool) type { +fn testAsyncAwaitTypicalUsage( + comptime simulate_fail_download: bool, + comptime simulate_fail_file: bool, + comptime suspend_download: bool, + comptime suspend_file: bool) type +{ return struct { fn doTheTest() void { _ = async amainWrap(); - resume global_file_frame; - resume global_download_frame; + if (suspend_file) { + resume global_file_frame; + } + if (suspend_download) { + resume global_download_frame; + } } fn amainWrap() void { if (amain()) |_| { @@ -685,20 +698,26 @@ fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime si var global_download_frame: anyframe = undefined; fn fetchUrl(allocator: *std.mem.Allocator, url: []const u8) anyerror![]u8 { - global_download_frame = @frame(); const result = try std.mem.dupe(allocator, u8, "expected download text"); errdefer allocator.free(result); - suspend; + if (suspend_download) { + suspend { + global_download_frame = @frame(); + } + } if (simulate_fail_download) return error.NoResponse; return result; } var global_file_frame: anyframe = undefined; fn readFile(allocator: *std.mem.Allocator, filename: []const u8) anyerror![]u8 { - global_file_frame = @frame(); const result = try std.mem.dupe(allocator, u8, "expected file text"); errdefer allocator.free(result); - suspend; + if (suspend_file) { + suspend { + global_file_frame = @frame(); + } + } if (simulate_fail_file) return error.FileNotFound; return result; } From d3672493cc6ad5085f202df1859b13b4ae4dec96 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 16:46:43 -0400 Subject: [PATCH 108/125] basic docs for new async/await semantics --- doc/langref.html.in | 427 ++++++++++++++++++++++++++++++++------------ 1 file changed, 310 insertions(+), 117 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 0f964373c5..e02a406bd4 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5970,54 +5970,25 @@ test "global assembly" { {#header_close#} {#header_open|Async Functions#}

      + When a function is called, a frame is pushed to the stack, + the function runs until it reaches a return statement, and then the frame is popped from the stack. + At the callsite, the following code does not run until the function returns. +

      +

      An async function is a function whose callsite is split into an {#syntax#}async{#endsyntax#} initiation, - followed by an {#syntax#}await{#endsyntax#} completion. + followed by an {#syntax#}await{#endsyntax#} completion. Its frame is + provided explicitly by the caller, and it can be suspended and resumed any number of times.

      - When you call a function, it creates a stack frame, - and then the function runs until it reaches a return - statement, and then the stack frame is destroyed. - At the callsite, the next line of code does not run - until the function returns. + Zig infers that a function is {#syntax#}async{#endsyntax#} when it observes that the function contains + a suspension point. Async functions can be called the same as normal functions. A + function call of an async function is a suspend point.

      + {#header_open|Suspend and Resume#}

      - An async function is like a function, but it can be suspended - and resumed any number of times, and then it must be - explicitly destroyed. When an async function suspends, it - returns to the resumer. -

      - {#header_open|Minimal Async Function Example#} -

      - Declare an async function with the {#syntax#}async{#endsyntax#} keyword. - The expression in angle brackets must evaluate to a struct - which has these fields: -

      -
        -
      • {#syntax#}allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8{#endsyntax#} - where {#syntax#}Error{#endsyntax#} can be any error set.
      • -
      • {#syntax#}freeFn: fn (self: *Allocator, old_mem: []u8) void{#endsyntax#}
      • -
      -

      - You may notice that this corresponds to the {#syntax#}std.mem.Allocator{#endsyntax#} interface. - This makes it convenient to integrate with existing allocators. Note, however, - that the language feature does not depend on the standard library, and any struct which - has these fields is allowed. -

      -

      - Omitting the angle bracket expression when defining an async function makes - the function generic. Zig will infer the allocator type when the async function is called. -

      -

      - Call an async function with the {#syntax#}async{#endsyntax#} keyword. Here, the expression in angle brackets - is a pointer to the allocator struct that the async function expects. -

      -

      - The result of an async function call is a {#syntax#}promise->T{#endsyntax#} type, where {#syntax#}T{#endsyntax#} - is the return type of the async function. Once a promise has been created, it must be - consumed with {#syntax#}await{#endsyntax#}: -

      -

      - Async functions start executing when created, so in the following example, the entire - TODO + At any point, a function may suspend itself. This causes control flow to + return to the callsite (in the case of the first suspension), + or resumer (in the case of subsequent suspensions).

      {#code_begin|test#} const std = @import("std"); @@ -6025,32 +5996,25 @@ const assert = std.debug.assert; var x: i32 = 1; -test "call an async function" { - var frame = async simpleAsyncFn(); - comptime assert(@typeOf(frame) == @Frame(simpleAsyncFn)); +test "suspend with no resume" { + var frame = async func(); assert(x == 2); } -fn simpleAsyncFn() void { + +fn func() void { x += 1; suspend; + // This line is never reached because the suspend has no matching resume. x += 1; } {#code_end#} - {#header_close#} - {#header_open|Suspend and Resume#}

      - At any point, an async function may suspend itself. This causes control flow to - return to the caller or resumer. The following code demonstrates where control flow - goes: -

      -

      - TODO another test example here -

      -

      - When an async function suspends itself, it must be sure that it will be - resumed somehow, for example by registering its promise handle - in an event loop. Use a suspend capture block to gain access to the - promise (TODO this is outdated): + In the same way that each allocation should have a corresponding free, + Each {#syntax#}suspend{#endsyntax#} should have a corresponding {#syntax#}resume{#endsyntax#}. + A suspend block allows a function to put a pointer to its own + frame somewhere, for example into an event loop, even if that action will perform a + {#syntax#}resume{#endsyntax#} operation on a different thread. + {#link|@frame#} provides access to the async function frame pointer.

      {#code_begin|test#} const std = @import("std"); @@ -6061,9 +6025,9 @@ var result = false; test "async function suspend with block" { _ = async testSuspendBlock(); - std.debug.assert(!result); + assert(!result); resume the_frame; - std.debug.assert(result); + assert(result); } fn testSuspendBlock() void { @@ -6075,19 +6039,15 @@ fn testSuspendBlock() void { } {#code_end#}

      - Every suspend point in an async function represents a point at which the async function - could be destroyed. If that happens, {#syntax#}defer{#endsyntax#} expressions that are in - scope are run, as well as {#syntax#}errdefer{#endsyntax#} expressions. -

      -

      - {#link|Await#} counts as a suspend point. + {#syntax#}suspend{#endsyntax#} causes a function to be {#syntax#}async{#endsyntax#}.

      + {#header_open|Resuming from Suspend Blocks#}

      Upon entering a {#syntax#}suspend{#endsyntax#} block, the async function is already considered suspended, and can be resumed. For example, if you started another kernel thread, - and had that thread call {#syntax#}resume{#endsyntax#} on the promise handle provided by the - {#syntax#}suspend{#endsyntax#} block, the new thread would begin executing after the suspend + and had that thread call {#syntax#}resume{#endsyntax#} on the frame pointer provided by the + {#link|@frame#}, the new thread would begin executing after the suspend block, while the old thread continued executing the suspend block.

      @@ -6103,7 +6063,7 @@ test "resume from suspend" { _ = async testResumeFromSuspend(&my_result); std.debug.assert(my_result == 2); } -async fn testResumeFromSuspend(my_result: *i32) void { +fn testResumeFromSuspend(my_result: *i32) void { suspend { resume @frame(); } @@ -6113,32 +6073,59 @@ async fn testResumeFromSuspend(my_result: *i32) void { } {#code_end#}

      - This is guaranteed to be a tail call, and therefore will not cause a new stack frame. + This is guaranteed to tail call, and therefore will not cause a new stack frame.

      {#header_close#} {#header_close#} - {#header_open|Await#} + + {#header_open|Async and Await#} +

      + In the same way that every {#syntax#}suspend{#endsyntax#} has a matching + {#syntax#}resume{#endsyntax#}, every {#syntax#}async{#endsyntax#} has a matching {#syntax#}await{#endsyntax#}. +

      + {#code_begin|test#} +const std = @import("std"); +const assert = std.debug.assert; + +test "async and await" { + // Here we have an exception where we do not match an async + // with an await. The test block is not async and so cannot + // have a suspend point in it. + // This is well-defined behavior, and everything is OK here. + // Note however that there would be no way to collect the + // return value of amain, if it were something other than void. + _ = async amain(); +} + +fn amain() void { + var frame = async func(); + comptime assert(@typeOf(frame) == @Frame(func)); + + const ptr: anyframe->void = &frame; + const any_ptr: anyframe = ptr; + + resume any_ptr; + await ptr; +} + +fn func() void { + suspend; +} + {#code_end#}

      The {#syntax#}await{#endsyntax#} keyword is used to coordinate with an async function's - {#syntax#}return{#endsyntax#} statement. + {#syntax#}return{#endsyntax#} statement.

      - {#syntax#}await{#endsyntax#} is valid only in an {#syntax#}async{#endsyntax#} function, and it takes - as an operand a promise handle. - If the async function associated with the promise handle has already returned, - then {#syntax#}await{#endsyntax#} destroys the target async function, and gives the return value. - Otherwise, {#syntax#}await{#endsyntax#} suspends the current async function, registering its - promise handle with the target async function. It becomes the target async function's responsibility - to have ensured that it will be resumed or destroyed. When the target async function reaches - its return statement, it gives the return value to the awaiter, destroys itself, and then - resumes the awaiter. + {#syntax#}await{#endsyntax#} is a suspend point, and takes as an operand anything that + implicitly casts to {#syntax#}anyframe->T{#endsyntax#}.

      - A frame handle must be consumed exactly once after it is created with {#syntax#}await{#endsyntax#}. -

      -

      - {#syntax#}await{#endsyntax#} counts as a suspend point, and therefore at every {#syntax#}await{#endsyntax#}, - a async function can be potentially destroyed, which would run {#syntax#}defer{#endsyntax#} and {#syntax#}errdefer{#endsyntax#} expressions. + There is a common misconception that {#syntax#}await{#endsyntax#} resumes the target function. + It is the other way around: it suspends until the target function completes. + In the event that the target function has already completed, {#syntax#}await{#endsyntax#} + does not suspend; instead it copies the + return value directly from the target function's frame.

      {#code_begin|test#} const std = @import("std"); @@ -6156,14 +6143,14 @@ test "async function await" { assert(final_result == 1234); assert(std.mem.eql(u8, seq_points, "abcdefghi")); } -async fn amain() void { +fn amain() void { seq('b'); var f = async another(); seq('e'); final_result = await f; seq('h'); } -async fn another() i32 { +fn another() i32 { seq('c'); suspend { seq('d'); @@ -6183,31 +6170,156 @@ fn seq(c: u8) void { {#code_end#}

      In general, {#syntax#}suspend{#endsyntax#} is lower level than {#syntax#}await{#endsyntax#}. Most application - code will use only {#syntax#}async{#endsyntax#} and {#syntax#}await{#endsyntax#}, but event loop - implementations will make use of {#syntax#}suspend{#endsyntax#} internally. + code will use only {#syntax#}async{#endsyntax#} and {#syntax#}await{#endsyntax#}, but event loop + implementations will make use of {#syntax#}suspend{#endsyntax#} internally.

      {#header_close#} - {#header_open|Open Issues#} + + {#header_open|Async Function Example#}

      - There are a few issues with async function that are considered unresolved. Best be aware of them, - as the situation is likely to change before 1.0.0: + Putting all of this together, here is an example of typical + {#syntax#}async{#endsyntax#}/{#syntax#}await{#endsyntax#} usage: +

      + {#code_begin|exe|async#} +const std = @import("std"); +const Allocator = std.mem.Allocator; + +pub fn main() void { + _ = async amainWrap(); + + // Typically we would use an event loop to manage resuming async functions, + // but in this example we hard code what the event loop would do, + // to make things deterministic. + resume global_file_frame; + resume global_download_frame; +} + +fn amainWrap() void { + amain() catch |e| { + std.debug.warn("{}\n", e); + if (@errorReturnTrace()) |trace| { + std.debug.dumpStackTrace(trace.*); + } + std.process.exit(1); + }; +} + +fn amain() !void { + const allocator = std.heap.direct_allocator; + var download_frame = async fetchUrl(allocator, "https://example.com/"); + var awaited_download_frame = false; + errdefer if (!awaited_download_frame) { + if (await download_frame) |r| allocator.free(r) else |_| {} + }; + + var file_frame = async readFile(allocator, "something.txt"); + var awaited_file_frame = false; + errdefer if (!awaited_file_frame) { + if (await file_frame) |r| allocator.free(r) else |_| {} + }; + + awaited_file_frame = true; + const file_text = try await file_frame; + defer allocator.free(file_text); + + awaited_download_frame = true; + const download_text = try await download_frame; + defer allocator.free(download_text); + + std.debug.warn("download_text: {}\n", download_text); + std.debug.warn("file_text: {}\n", file_text); +} + +var global_download_frame: anyframe = undefined; +fn fetchUrl(allocator: *Allocator, url: []const u8) ![]u8 { + const result = try std.mem.dupe(allocator, u8, "this is the downloaded url contents"); + errdefer allocator.free(result); + suspend { + global_download_frame = @frame(); + } + std.debug.warn("fetchUrl returning\n"); + return result; +} + +var global_file_frame: anyframe = undefined; +fn readFile(allocator: *Allocator, filename: []const u8) ![]u8 { + const result = try std.mem.dupe(allocator, u8, "this is the file contents"); + errdefer allocator.free(result); + suspend { + global_file_frame = @frame(); + } + std.debug.warn("readFile returning\n"); + return result; +} + {#code_end#} +

      + Now we remove the {#syntax#}suspend{#endsyntax#} and {#syntax#}resume{#endsyntax#} code, and + observe the same behavior, with one tiny difference: +

      + {#code_begin|exe|blocking#} +const std = @import("std"); +const Allocator = std.mem.Allocator; + +pub fn main() void { + _ = async amainWrap(); +} + +fn amainWrap() void { + amain() catch |e| { + std.debug.warn("{}\n", e); + if (@errorReturnTrace()) |trace| { + std.debug.dumpStackTrace(trace.*); + } + std.process.exit(1); + }; +} + +fn amain() !void { + const allocator = std.heap.direct_allocator; + var download_frame = async fetchUrl(allocator, "https://example.com/"); + var awaited_download_frame = false; + errdefer if (!awaited_download_frame) { + if (await download_frame) |r| allocator.free(r) else |_| {} + }; + + var file_frame = async readFile(allocator, "something.txt"); + var awaited_file_frame = false; + errdefer if (!awaited_file_frame) { + if (await file_frame) |r| allocator.free(r) else |_| {} + }; + + awaited_file_frame = true; + const file_text = try await file_frame; + defer allocator.free(file_text); + + awaited_download_frame = true; + const download_text = try await download_frame; + defer allocator.free(download_text); + + std.debug.warn("download_text: {}\n", download_text); + std.debug.warn("file_text: {}\n", file_text); +} + +fn fetchUrl(allocator: *Allocator, url: []const u8) ![]u8 { + const result = try std.mem.dupe(allocator, u8, "this is the downloaded url contents"); + errdefer allocator.free(result); + std.debug.warn("fetchUrl returning\n"); + return result; +} + +fn readFile(allocator: *Allocator, filename: []const u8) ![]u8 { + const result = try std.mem.dupe(allocator, u8, "this is the file contents"); + errdefer allocator.free(result); + std.debug.warn("readFile returning\n"); + return result; +} + {#code_end#} +

      + Previously, the {#syntax#}fetchUrl{#endsyntax#} and {#syntax#}readFile{#endsyntax#} functions suspended, + and were resumed in an order determined by the {#syntax#}main{#endsyntax#} function. Now, + since there are no suspend points, the order of the printed "... returning" messages + is determined by the order of {#syntax#}async{#endsyntax#} callsites.

      -
        -
      • Async functions have optimizations disabled - even in release modes - due to an - LLVM bug. -
      • -
      • - There are some situations where we can know statically that there will not be - memory allocation failure, but Zig still forces us to handle it. - TODO file an issue for this and link it here. -
      • -
      • - Zig does not take advantage of LLVM's allocation elision optimization for - async function. It crashed LLVM when I tried to do it the first time. This is - related to the other 2 bullet points here. See - #802. -
      • -
      {#header_close#} {#header_close#} @@ -6265,6 +6377,49 @@ comptime { Note: This function is deprecated. Use {#link|@typeInfo#} instead.

      {#header_close#} + + {#header_open|@asyncCall#} +
      {#syntax#}@asyncCall(frame_buffer: []u8, result_ptr, function_ptr, args: ...) anyframe->T{#endsyntax#}
      +

      + {#syntax#}@asyncCall{#endsyntax#} performs an {#syntax#}async{#endsyntax#} call on a function pointer, + which may or may not be an {#link|async function|Async Functions#}. +

      +

      + The provided {#syntax#}frame_buffer{#endsyntax#} must be large enough to fit the entire function frame. + This size can be determined with {#link|@frameSize#}. To provide a too-small buffer + invokes safety-checked {#link|Undefined Behavior#}. +

      +

      + {#syntax#}result_ptr{#endsyntax#} is optional ({#link|null#} may be provided). If provided, + the function call will write its result directly to the result pointer, which will be available to + read after {#link|await|Async and Await#} completes. Any result location provided to + {#syntax#}await{#endsyntax#} will copy the result from {#syntax#}result_ptr{#endsyntax#}. +

      + {#code_begin|test#} +const std = @import("std"); +const assert = std.debug.assert; + +test "async fn pointer in a struct field" { + var data: i32 = 1; + const Foo = struct { + bar: async fn (*i32) void, + }; + var foo = Foo{ .bar = func }; + var bytes: [64]u8 = undefined; + const f = @asyncCall(&bytes, {}, foo.bar, &data); + assert(data == 2); + resume f; + assert(data == 4); +} + +async fn func(y: *i32) void { + defer y.* += 2; + y.* += 1; + suspend; +} + {#code_end#} + {#header_close#} + {#header_open|@atomicLoad#}
      {#syntax#}@atomicLoad(comptime T: type, ptr: *const T, comptime ordering: builtin.AtomicOrder) T{#endsyntax#}

      @@ -6855,6 +7010,44 @@ export fn @"A function name that is a complete sentence."() void {} {#see_also|@intToFloat#} {#header_close#} + {#header_open|@frame#} +

      {#syntax#}@frame() *@Frame(func){#endsyntax#}
      +

      + This function returns a pointer to the frame for a given function. This type + can be {#link|implicitly cast|Implicit Casts#} to {#syntax#}anyframe->T{#endsyntax#} and + to {#syntax#}anyframe{#endsyntax#}, where {#syntax#}T{#endsyntax#} is the return type + of the function in scope. +

      +

      + This function does not mark a suspension point, but it does cause the function in scope + to become an {#link|async function|Async Functions#}. +

      + {#header_close#} + + {#header_open|@Frame#} +
      {#syntax#}@Frame(func: var) type{#endsyntax#}
      +

      + This function returns the frame type of a function. This works for {#link|Async Functions#} + as well as any function without a specific calling convention. +

      +

      + This type is suitable to be used as the return type of {#link|async|Async and Await#} which + allows one to, for example, heap-allocate an async function frame: +

      + {#code_begin|test#} +const std = @import("std"); + +test "heap allocated frame" { + const frame = try std.heap.direct_allocator.create(@Frame(func)); + frame.* = async func(); +} + +fn func() void { + suspend; +} + {#code_end#} + {#header_close#} + {#header_open|@frameAddress#}
      {#syntax#}@frameAddress() usize{#endsyntax#}

      @@ -6870,14 +7063,14 @@ export fn @"A function name that is a complete sentence."() void {}

      {#header_close#} - {#header_open|@handle#} -
      {#syntax#}@handle(){#endsyntax#}
      + {#header_open|@frameSize#} +
      {#syntax#}@frameSize() usize{#endsyntax#}

      - This function returns a {#syntax#}promise->T{#endsyntax#} type, where {#syntax#}T{#endsyntax#} - is the return type of the async function in scope. + This is the same as {#syntax#}@sizeOf(@Frame(func)){#endsyntax#}, where {#syntax#}func{#endsyntax#} + may be runtime-known.

      - This function is only valid within an async function scope. + This function is typically used in conjunction with {#link|@asyncCall#}.

      {#header_close#} From 7e75e1075e14a124706bf5f22244ca6b6e814fdb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 18:19:04 -0400 Subject: [PATCH 109/125] zig fmt --- std/event/loop.zig | 1 - std/hash.zig | 1 - std/os/darwin.zig | 2 +- std/os/freebsd.zig | 2 +- std/os/windows.zig | 1 - std/os/windows/bits.zig | 4 ++-- std/os/windows/ntdll.zig | 10 ++++++++-- std/os/windows/status.zig | 4 ++-- std/rb.zig | 1 - test/stage1/behavior/muladd.zig | 2 +- 10 files changed, 15 insertions(+), 13 deletions(-) diff --git a/std/event/loop.zig b/std/event/loop.zig index 827fbf3dd7..0e02addcb9 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -627,7 +627,6 @@ pub const Loop = struct { } } - /// call finishOneEvent when done pub fn beginOneEvent(self: *Loop) void { _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); diff --git a/std/hash.zig b/std/hash.zig index 648f34b11d..ab3a0ea8f3 100644 --- a/std/hash.zig +++ b/std/hash.zig @@ -20,7 +20,6 @@ pub const SipHash128 = siphash.SipHash128; pub const murmur = @import("hash/murmur.zig"); pub const Murmur2_32 = murmur.Murmur2_32; - pub const Murmur2_64 = murmur.Murmur2_64; pub const Murmur3_32 = murmur.Murmur3_32; diff --git a/std/os/darwin.zig b/std/os/darwin.zig index c2b6801e22..0adf71affb 100644 --- a/std/os/darwin.zig +++ b/std/os/darwin.zig @@ -5,4 +5,4 @@ pub const is_the_target = switch (builtin.os) { else => false, }; pub usingnamespace std.c; -pub usingnamespace @import("bits.zig"); \ No newline at end of file +pub usingnamespace @import("bits.zig"); diff --git a/std/os/freebsd.zig b/std/os/freebsd.zig index e9efe64920..ddbf98f2bc 100644 --- a/std/os/freebsd.zig +++ b/std/os/freebsd.zig @@ -2,4 +2,4 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); pub const is_the_target = builtin.os == .freebsd; pub usingnamespace std.c; -pub usingnamespace @import("bits.zig"); \ No newline at end of file +pub usingnamespace @import("bits.zig"); diff --git a/std/os/windows.zig b/std/os/windows.zig index 4c6bfe70d5..7c1761a4b8 100644 --- a/std/os/windows.zig +++ b/std/os/windows.zig @@ -875,7 +875,6 @@ pub fn unexpectedError(err: DWORD) std.os.UnexpectedError { return error.Unexpected; } - /// Call this when you made a windows NtDll call /// and you get an unexpected status. pub fn unexpectedStatus(status: NTSTATUS) std.os.UnexpectedError { diff --git a/std/os/windows/bits.zig b/std/os/windows/bits.zig index 79697995f4..ddfdd27e1b 100644 --- a/std/os/windows/bits.zig +++ b/std/os/windows/bits.zig @@ -209,7 +209,7 @@ pub const FILE_INFORMATION_CLASS = extern enum { FileLinkInformationExBypassAccessCheck, FileStorageReserveIdInformation, FileCaseSensitiveInformationForceAccessCheck, - FileMaximumInformation + FileMaximumInformation, }; pub const OVERLAPPED = extern struct { @@ -731,4 +731,4 @@ pub const UNICODE_STRING = extern struct { Length: USHORT, MaximumLength: USHORT, Buffer: [*]WCHAR, -}; \ No newline at end of file +}; diff --git a/std/os/windows/ntdll.zig b/std/os/windows/ntdll.zig index 746403fa6d..bfc98aba8a 100644 --- a/std/os/windows/ntdll.zig +++ b/std/os/windows/ntdll.zig @@ -1,7 +1,13 @@ usingnamespace @import("bits.zig"); pub extern "NtDll" stdcallcc fn RtlCaptureStackBackTrace(FramesToSkip: DWORD, FramesToCapture: DWORD, BackTrace: **c_void, BackTraceHash: ?*DWORD) WORD; -pub extern "NtDll" stdcallcc fn NtQueryInformationFile(FileHandle: HANDLE, IoStatusBlock: *IO_STATUS_BLOCK, FileInformation: *c_void, Length: ULONG, FileInformationClass: FILE_INFORMATION_CLASS,) NTSTATUS; +pub extern "NtDll" stdcallcc fn NtQueryInformationFile( + FileHandle: HANDLE, + IoStatusBlock: *IO_STATUS_BLOCK, + FileInformation: *c_void, + Length: ULONG, + FileInformationClass: FILE_INFORMATION_CLASS, +) NTSTATUS; pub extern "NtDll" stdcallcc fn NtCreateFile( FileHandle: *HANDLE, DesiredAccess: ACCESS_MASK, @@ -15,4 +21,4 @@ pub extern "NtDll" stdcallcc fn NtCreateFile( EaBuffer: *c_void, EaLength: ULONG, ) NTSTATUS; -pub extern "NtDll" stdcallcc fn NtClose(Handle: HANDLE) NTSTATUS; \ No newline at end of file +pub extern "NtDll" stdcallcc fn NtClose(Handle: HANDLE) NTSTATUS; diff --git a/std/os/windows/status.zig b/std/os/windows/status.zig index 668a736e90..b9fd2b495f 100644 --- a/std/os/windows/status.zig +++ b/std/os/windows/status.zig @@ -1,5 +1,5 @@ -/// The operation completed successfully. +/// The operation completed successfully. pub const SUCCESS = 0x00000000; /// The data was too large to fit into the specified buffer. -pub const BUFFER_OVERFLOW = 0x80000005; \ No newline at end of file +pub const BUFFER_OVERFLOW = 0x80000005; diff --git a/std/rb.zig b/std/rb.zig index 0b84950544..3f2a2d5bb0 100644 --- a/std/rb.zig +++ b/std/rb.zig @@ -549,7 +549,6 @@ test "rb" { } } - test "inserting and looking up" { var tree: Tree = undefined; tree.init(testCompare); diff --git a/test/stage1/behavior/muladd.zig b/test/stage1/behavior/muladd.zig index 143e6a93e4..d507f503f5 100644 --- a/test/stage1/behavior/muladd.zig +++ b/test/stage1/behavior/muladd.zig @@ -31,4 +31,4 @@ fn testMulAdd() void { // var c: f128 = 6.25; // expect(@mulAdd(f128, a, b, c) == 20); //} -} \ No newline at end of file +} From 0b08ae581edb5811d14d86e7c75c23aca59a3a34 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 18:38:20 -0400 Subject: [PATCH 110/125] add assertion about control flow to fix gcc warning --- src/codegen.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/codegen.cpp b/src/codegen.cpp index 45e2e4122f..982cb821b2 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3849,6 +3849,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr // Use the result location provided to the @asyncCall builtin ret_ptr = result_loc; } + } else { + zig_unreachable(); } // even if prefix_arg_err_ret_stack is true, let the async function do its own From ff7e826b82b097ffb688e809b6d762d8d56c0f3c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 18:54:23 -0400 Subject: [PATCH 111/125] fix crash with sometimes type not being resolved --- src/codegen.cpp | 4 ++++ test/stage1/behavior/async_fn.zig | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/src/codegen.cpp b/src/codegen.cpp index 982cb821b2..ebd3c72b50 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -7004,6 +7004,8 @@ static void do_code_gen(CodeGen *g) { ZigType *ptr_type = instruction->base.value.type; assert(ptr_type->id == ZigTypeIdPointer); ZigType *child_type = ptr_type->data.pointer.child_type; + if (type_resolve(g, child_type, ResolveStatusSizeKnown)) + zig_unreachable(); if (!type_has_bits(child_type)) continue; if (instruction->base.ref_count == 0) @@ -7015,6 +7017,8 @@ static void do_code_gen(CodeGen *g) { continue; } } + if (type_resolve(g, child_type, ResolveStatusLLVMFull)) + zig_unreachable(); instruction->base.llvm_value = build_alloca(g, child_type, instruction->name_hint, get_ptr_align(g, ptr_type)); } diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index 96b7b02137..1536f02913 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -734,3 +734,10 @@ test "alignment of local variables in async functions" { }; S.doTheTest(); } + +test "no reason to resolve frame still works" { + _ = async simpleNothing(); +} +fn simpleNothing() void { + var x: i32 = 1234; +} From 1e3b6816a8929bb141fb2157197077d74656e7ca Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 22:57:06 -0400 Subject: [PATCH 112/125] note that -mllvm is unsupported closes #3045 --- src/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.cpp b/src/main.cpp index 42d0850046..c0945ef180 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -85,7 +85,7 @@ static int print_full_usage(const char *arg0, FILE *file, int return_code) { " --verbose-cc enable compiler debug output for C compilation\n" " -dirafter [dir] same as -isystem but do it last\n" " -isystem [dir] add additional search path for other .h files\n" - " -mllvm [arg] forward an arg to LLVM's option processing\n" + " -mllvm [arg] (unsupported) forward an arg to LLVM's option processing\n" " --override-std-dir [arg] override path to Zig standard library\n" " --override-lib-dir [arg] override path to Zig lib library\n" " -ffunction-sections places each function in a seperate section\n" From 2151f84d59f8af6e28570cb01a3a134c7b068fa2 Mon Sep 17 00:00:00 2001 From: Vexu <15308111+Vexu@users.noreply.github.com> Date: Fri, 16 Aug 2019 12:24:06 +0300 Subject: [PATCH 113/125] implement new async syntax in self-hosted compiler --- doc/langref.html.in | 6 +-- src-self-hosted/ir.zig | 1 - src-self-hosted/translate_c.zig | 3 +- src/parser.cpp | 44 +++++++-------------- std/zig/ast.zig | 37 +----------------- std/zig/parse.zig | 69 ++++++++------------------------- std/zig/parser_test.zig | 4 +- std/zig/render.zig | 22 +---------- 8 files changed, 41 insertions(+), 145 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index e02a406bd4..2e56bc6557 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -9987,7 +9987,7 @@ TypeExpr <- PrefixTypeOp* ErrorUnionExpr ErrorUnionExpr <- SuffixExpr (EXCLAMATIONMARK TypeExpr)? SuffixExpr - <- AsyncPrefix PrimaryTypeExpr SuffixOp* FnCallArguments + <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments / PrimaryTypeExpr (SuffixOp / FnCallArguments)* PrimaryTypeExpr @@ -10063,7 +10063,7 @@ FnCC <- KEYWORD_nakedcc / KEYWORD_stdcallcc / KEYWORD_extern - / KEYWORD_async (LARROW TypeExpr RARROW)? + / KEYWORD_async ParamDecl <- (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType @@ -10168,8 +10168,6 @@ SuffixOp / DOTASTERISK / DOTQUESTIONMARK -AsyncPrefix <- KEYWORD_async (LARROW PrefixExpr RARROW)? - FnCallArguments <- LPAREN ExprList RPAREN # Ptr specific diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index bc7aeffdf5..df4d436b50 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -1181,7 +1181,6 @@ pub const Builder = struct { ast.Node.Id.ErrorTag => return error.Unimplemented, ast.Node.Id.AsmInput => return error.Unimplemented, ast.Node.Id.AsmOutput => return error.Unimplemented, - ast.Node.Id.AsyncAttribute => return error.Unimplemented, ast.Node.Id.ParamDecl => return error.Unimplemented, ast.Node.Id.FieldInitializer => return error.Unimplemented, ast.Node.Id.EnumLiteral => return error.Unimplemented, diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig index b18397ede2..6a91b8e7bf 100644 --- a/src-self-hosted/translate_c.zig +++ b/src-self-hosted/translate_c.zig @@ -1037,7 +1037,7 @@ fn transCreateNodeFnCall(c: *Context, fn_expr: *ast.Node) !*ast.Node.SuffixOp { .op = ast.Node.SuffixOp.Op{ .Call = ast.Node.SuffixOp.Op.Call{ .params = ast.Node.SuffixOp.Op.Call.ParamList.init(c.a()), - .async_attr = null, + .async_token = null, }, }, .rtoken = undefined, // set after appending args @@ -1355,7 +1355,6 @@ fn finishTransFnProto( .var_args_token = null, // TODO this field is broken in the AST data model .extern_export_inline_token = extern_export_inline_tok, .cc_token = cc_tok, - .async_attr = null, .body_node = null, .lib_name = null, .align_expr = null, diff --git a/src/parser.cpp b/src/parser.cpp index afe5735a06..1e7e36d0bd 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -113,7 +113,6 @@ static AstNode *ast_parse_multiply_op(ParseContext *pc); static AstNode *ast_parse_prefix_op(ParseContext *pc); static AstNode *ast_parse_prefix_type_op(ParseContext *pc); static AstNode *ast_parse_suffix_op(ParseContext *pc); -static AstNode *ast_parse_async_prefix(ParseContext *pc); static AstNode *ast_parse_fn_call_argumnets(ParseContext *pc); static AstNode *ast_parse_array_type_start(ParseContext *pc); static AstNode *ast_parse_ptr_type_start(ParseContext *pc); @@ -1389,22 +1388,18 @@ static AstNode *ast_parse_error_union_expr(ParseContext *pc) { } // SuffixExpr -// <- AsyncPrefix PrimaryTypeExpr SuffixOp* FnCallArguments +// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments // / PrimaryTypeExpr (SuffixOp / FnCallArguments)* static AstNode *ast_parse_suffix_expr(ParseContext *pc) { - AstNode *async_call = ast_parse_async_prefix(pc); - if (async_call != nullptr) { + Token *async_token = eat_token_if(pc, TokenIdKeywordAsync); + if (async_token != nullptr) { if (eat_token_if(pc, TokenIdKeywordFn) != nullptr) { // HACK: If we see the keyword `fn`, then we assume that // we are parsing an async fn proto, and not a call. // We therefore put back all tokens consumed by the async // prefix... - // HACK: This loop is not actually enough to put back all the - // tokens. Let's hope this is fine for most code right now - // and wait till we get the async rework for a syntax update. - do { - put_back_token(pc); - } while (peek_token(pc)->id != TokenIdKeywordAsync); + put_back_token(pc); + put_back_token(pc); return ast_parse_primary_type_expr(pc); } @@ -1446,10 +1441,14 @@ static AstNode *ast_parse_suffix_expr(ParseContext *pc) { ast_invalid_token_error(pc, peek_token(pc)); assert(args->type == NodeTypeFnCallExpr); - async_call->data.fn_call_expr.fn_ref_expr = child; - async_call->data.fn_call_expr.params = args->data.fn_call_expr.params; - async_call->data.fn_call_expr.is_builtin = false; - return async_call; + + AstNode *res = ast_create_node(pc, NodeTypeFnCallExpr, async_token); + res->data.fn_call_expr.is_async = true; + res->data.fn_call_expr.seen = false; + res->data.fn_call_expr.fn_ref_expr = child; + res->data.fn_call_expr.params = args->data.fn_call_expr.params; + res->data.fn_call_expr.is_builtin = false; + return res; } AstNode *res = ast_parse_primary_type_expr(pc); @@ -1501,7 +1500,7 @@ static AstNode *ast_parse_suffix_expr(ParseContext *pc) { // <- BUILTINIDENTIFIER FnCallArguments // / CHAR_LITERAL // / ContainerDecl -// / DOT IDENTIFIER +// / DOT IDENTIFIER // / ErrorSetDecl // / FLOAT // / FnProto @@ -2016,7 +2015,7 @@ static AstNode *ast_parse_link_section(ParseContext *pc) { // <- KEYWORD_nakedcc // / KEYWORD_stdcallcc // / KEYWORD_extern -// / KEYWORD_async (LARROW TypeExpr RARROW)? +// / KEYWORD_async static Optional ast_parse_fn_cc(ParseContext *pc) { AstNodeFnProto res = {}; if (eat_token_if(pc, TokenIdKeywordNakedCC) != nullptr) { @@ -2657,19 +2656,6 @@ static AstNode *ast_parse_suffix_op(ParseContext *pc) { return nullptr; } -// AsyncPrefix <- KEYWORD_async (LARROW PrefixExpr RARROW)? -static AstNode *ast_parse_async_prefix(ParseContext *pc) { - Token *async = eat_token_if(pc, TokenIdKeywordAsync); - if (async == nullptr) - return nullptr; - - AstNode *res = ast_create_node(pc, NodeTypeFnCallExpr, async); - res->data.fn_call_expr.is_async = true; - res->data.fn_call_expr.seen = false; - - return res; -} - // FnCallArguments <- LPAREN ExprList RPAREN static AstNode *ast_parse_fn_call_argumnets(ParseContext *pc) { Token *paren = eat_token_if(pc, TokenIdLParen); diff --git a/std/zig/ast.zig b/std/zig/ast.zig index 475a0e4e13..e5781da035 100644 --- a/std/zig/ast.zig +++ b/std/zig/ast.zig @@ -434,7 +434,6 @@ pub const Node = struct { ErrorTag, AsmInput, AsmOutput, - AsyncAttribute, ParamDecl, FieldInitializer, }; @@ -838,36 +837,6 @@ pub const Node = struct { } }; - pub const AsyncAttribute = struct { - base: Node, - async_token: TokenIndex, - allocator_type: ?*Node, - rangle_bracket: ?TokenIndex, - - pub fn iterate(self: *AsyncAttribute, index: usize) ?*Node { - var i = index; - - if (self.allocator_type) |allocator_type| { - if (i < 1) return allocator_type; - i -= 1; - } - - return null; - } - - pub fn firstToken(self: *const AsyncAttribute) TokenIndex { - return self.async_token; - } - - pub fn lastToken(self: *const AsyncAttribute) TokenIndex { - if (self.rangle_bracket) |rangle_bracket| { - return rangle_bracket; - } - - return self.async_token; - } - }; - pub const FnProto = struct { base: Node, doc_comments: ?*DocComment, @@ -879,7 +848,6 @@ pub const Node = struct { var_args_token: ?TokenIndex, extern_export_inline_token: ?TokenIndex, cc_token: ?TokenIndex, - async_attr: ?*AsyncAttribute, body_node: ?*Node, lib_name: ?*Node, // populated if this is an extern declaration align_expr: ?*Node, // populated if align(A) is present @@ -935,7 +903,6 @@ pub const Node = struct { pub fn firstToken(self: *const FnProto) TokenIndex { if (self.visib_token) |visib_token| return visib_token; - if (self.async_attr) |async_attr| return async_attr.firstToken(); if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token; assert(self.lib_name == null); if (self.cc_token) |cc_token| return cc_token; @@ -1699,7 +1666,7 @@ pub const Node = struct { pub const Call = struct { params: ParamList, - async_attr: ?*AsyncAttribute, + async_token: ?TokenIndex, pub const ParamList = SegmentedList(*Node, 2); }; @@ -1752,7 +1719,7 @@ pub const Node = struct { pub fn firstToken(self: *const SuffixOp) TokenIndex { switch (self.op) { - .Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(), + .Call => |*call_info| if (call_info.async_token) |async_token| return async_token, else => {}, } return self.lhs.firstToken(); diff --git a/std/zig/parse.zig b/std/zig/parse.zig index 077870a9ca..0a2fbb4fa1 100644 --- a/std/zig/parse.zig +++ b/std/zig/parse.zig @@ -277,7 +277,7 @@ fn parseTopLevelDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node /// FnProto <- FnCC? KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? EXCLAMATIONMARK? (KEYWORD_var / TypeExpr) fn parseFnProto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { - const cc = try parseFnCC(arena, it, tree); + const cc = parseFnCC(arena, it, tree); const fn_token = eatToken(it, .Keyword_fn) orelse { if (cc == null) return null else return error.ParseError; }; @@ -320,7 +320,6 @@ fn parseFnProto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { .var_args_token = var_args_token, .extern_export_inline_token = null, .cc_token = null, - .async_attr = null, .body_node = null, .lib_name = null, .align_expr = align_expr, @@ -331,7 +330,6 @@ fn parseFnProto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { switch (kind) { .CC => |token| fn_proto_node.cc_token = token, .Extern => |token| fn_proto_node.extern_export_inline_token = token, - .Async => |node| fn_proto_node.async_attr = node, } } @@ -1092,10 +1090,19 @@ fn parseErrorUnionExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*No } /// SuffixExpr -/// <- AsyncPrefix PrimaryTypeExpr SuffixOp* FnCallArguments +/// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments /// / PrimaryTypeExpr (SuffixOp / FnCallArguments)* fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { - if (try parseAsyncPrefix(arena, it, tree)) |async_node| { + if (eatToken(it, .Keyword_async)) |async_token| { + if (eatToken(it, .Keyword_fn)) |token_fn| { + // HACK: If we see the keyword `fn`, then we assume that + // we are parsing an async fn proto, and not a call. + // We therefore put back all tokens consumed by the async + // prefix... + putBackToken(it, token_fn); + putBackToken(it, async_token); + return parsePrimaryTypeExpr(arena, it, tree); + } // TODO: Implement hack for parsing `async fn ...` in ast_parse_suffix_expr var res = try expectNode(arena, it, tree, parsePrimaryTypeExpr, AstError{ .ExpectedPrimaryTypeExpr = AstError.ExpectedPrimaryTypeExpr{ .token = it.index }, @@ -1116,7 +1123,6 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { }); return null; }; - const node = try arena.create(Node.SuffixOp); node.* = Node.SuffixOp{ .base = Node{ .id = .SuffixOp }, @@ -1124,14 +1130,13 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { .op = Node.SuffixOp.Op{ .Call = Node.SuffixOp.Op.Call{ .params = params.list, - .async_attr = async_node.cast(Node.AsyncAttribute).?, + .async_token = async_token, }, }, .rtoken = params.rparen, }; return &node.base; } - if (try parsePrimaryTypeExpr(arena, it, tree)) |expr| { var res = expr; @@ -1153,7 +1158,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { .op = Node.SuffixOp.Op{ .Call = Node.SuffixOp.Op.Call{ .params = params.list, - .async_attr = null, + .async_token = null, }, }, .rtoken = params.rparen, @@ -1653,36 +1658,18 @@ fn parseLinkSection(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node /// <- KEYWORD_nakedcc /// / KEYWORD_stdcallcc /// / KEYWORD_extern -/// / KEYWORD_async (LARROW TypeExpr RARROW)? -fn parseFnCC(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?FnCC { +/// / KEYWORD_async +fn parseFnCC(arena: *Allocator, it: *TokenIterator, tree: *Tree) ?FnCC { if (eatToken(it, .Keyword_nakedcc)) |token| return FnCC{ .CC = token }; if (eatToken(it, .Keyword_stdcallcc)) |token| return FnCC{ .CC = token }; if (eatToken(it, .Keyword_extern)) |token| return FnCC{ .Extern = token }; - if (eatToken(it, .Keyword_async)) |token| { - const node = try arena.create(Node.AsyncAttribute); - node.* = Node.AsyncAttribute{ - .base = Node{ .id = .AsyncAttribute }, - .async_token = token, - .allocator_type = null, - .rangle_bracket = null, - }; - if (eatToken(it, .AngleBracketLeft)) |_| { - const type_expr = try expectNode(arena, it, tree, parseTypeExpr, AstError{ - .ExpectedTypeExpr = AstError.ExpectedTypeExpr{ .token = it.index }, - }); - const rarrow = try expectToken(it, tree, .AngleBracketRight); - node.allocator_type = type_expr; - node.rangle_bracket = rarrow; - } - return FnCC{ .Async = node }; - } + if (eatToken(it, .Keyword_async)) |token| return FnCC{ .CC = token }; return null; } const FnCC = union(enum) { CC: TokenIndex, Extern: TokenIndex, - Async: *Node.AsyncAttribute, }; /// ParamDecl <- (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType @@ -2409,28 +2396,6 @@ fn parseSuffixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { return &node.base; } -/// AsyncPrefix <- KEYWORD_async (LARROW PrefixExpr RARROW)? -fn parseAsyncPrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { - const async_token = eatToken(it, .Keyword_async) orelse return null; - var rangle_bracket: ?TokenIndex = null; - const expr_node = if (eatToken(it, .AngleBracketLeft)) |_| blk: { - const prefix_expr = try expectNode(arena, it, tree, parsePrefixExpr, AstError{ - .ExpectedPrefixExpr = AstError.ExpectedPrefixExpr{ .token = it.index }, - }); - rangle_bracket = try expectToken(it, tree, .AngleBracketRight); - break :blk prefix_expr; - } else null; - - const node = try arena.create(Node.AsyncAttribute); - node.* = Node.AsyncAttribute{ - .base = Node{ .id = .AsyncAttribute }, - .async_token = async_token, - .allocator_type = expr_node, - .rangle_bracket = rangle_bracket, - }; - return &node.base; -} - /// FnCallArguments <- LPAREN ExprList RPAREN /// ExprList <- (Expr COMMA)* Expr? fn parseFnCallArguments(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?AnnotatedParamList { diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index c5c740353e..7d6456ed0a 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -210,7 +210,7 @@ test "zig fmt: spaces around slice operator" { test "zig fmt: async call in if condition" { try testCanonical( \\comptime { - \\ if (async b()) { + \\ if (async b()) { \\ a(); \\ } \\} @@ -1118,7 +1118,7 @@ test "zig fmt: first line comment in struct initializer" { \\pub async fn acquire(self: *Self) HeldLock { \\ return HeldLock{ \\ // guaranteed allocation elision - \\ .held = await (async self.lock.acquire() catch unreachable), + \\ .held = self.lock.acquire(), \\ .value = &self.private_data, \\ }; \\} diff --git a/std/zig/render.zig b/std/zig/render.zig index 2e034142c2..035526ed11 100644 --- a/std/zig/render.zig +++ b/std/zig/render.zig @@ -284,20 +284,6 @@ fn renderExpression( return renderExpression(allocator, stream, tree, indent, start_col, comptime_node.expr, space); }, - ast.Node.Id.AsyncAttribute => { - const async_attr = @fieldParentPtr(ast.Node.AsyncAttribute, "base", base); - - if (async_attr.allocator_type) |allocator_type| { - try renderToken(tree, stream, async_attr.async_token, indent, start_col, Space.None); // async - - try renderToken(tree, stream, tree.nextToken(async_attr.async_token), indent, start_col, Space.None); // < - try renderExpression(allocator, stream, tree, indent, start_col, allocator_type, Space.None); // allocator - return renderToken(tree, stream, tree.nextToken(allocator_type.lastToken()), indent, start_col, space); // > - } else { - return renderToken(tree, stream, async_attr.async_token, indent, start_col, space); // async - } - }, - ast.Node.Id.Suspend => { const suspend_node = @fieldParentPtr(ast.Node.Suspend, "base", base); @@ -459,8 +445,8 @@ fn renderExpression( switch (suffix_op.op) { @TagType(ast.Node.SuffixOp.Op).Call => |*call_info| { - if (call_info.async_attr) |async_attr| { - try renderExpression(allocator, stream, tree, indent, start_col, &async_attr.base, Space.Space); + if (call_info.async_token) |async_token| { + try renderToken(tree, stream, async_token, indent, start_col, Space.Space); } try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None); @@ -1121,10 +1107,6 @@ fn renderExpression( try renderToken(tree, stream, cc_token, indent, start_col, Space.Space); // stdcallcc } - if (fn_proto.async_attr) |async_attr| { - try renderExpression(allocator, stream, tree, indent, start_col, &async_attr.base, Space.Space); - } - const lparen = if (fn_proto.name_token) |name_token| blk: { try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn try renderToken(tree, stream, name_token, indent, start_col, Space.None); // name From 7874d5a40b5ab4bd94ffbf5ecf5935d707a28942 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 10:11:53 -0400 Subject: [PATCH 114/125] zig fmt: add more test cases --- std/zig/parser_test.zig | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index 7d6456ed0a..871195280f 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -8,6 +8,18 @@ test "zig fmt: change use to usingnamespace" { ); } +test "zig fmt: async function" { + try testCanonical( + \\pub const Server = struct { + \\ handleRequestFn: async fn (*Server, *const std.net.Address, File) void, + \\}; + \\test "hi" { + \\ var ptr = @ptrCast(async fn (i32) void, other); + \\} + \\ + ); +} + test "zig fmt: whitespace fixes" { try testTransform("test \"\" {\r\n\tconst hi = x;\r\n}\n// zig fmt: off\ntest \"\"{\r\n\tconst a = b;}\r\n", \\test "" { From 49c88e23af6fe1dc895544f91231cc3b53110c8e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 10:13:40 -0400 Subject: [PATCH 115/125] zig fmt --- test/stage1/behavior/async_fn.zig | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index 1536f02913..13f7be6779 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -111,12 +111,12 @@ test "@frameSize" { const S = struct { fn doTheTest() void { { - var ptr = @ptrCast(async fn(i32) void, other); + var ptr = @ptrCast(async fn (i32) void, other); const size = @frameSize(ptr); expect(size == @sizeOf(@Frame(other))); } { - var ptr = @ptrCast(async fn() void, first); + var ptr = @ptrCast(async fn () void, first); const size = @frameSize(ptr); expect(size == @sizeOf(@Frame(first))); } @@ -431,7 +431,7 @@ test "heap allocated async function frame" { test "async function call return value" { const S = struct { var frame: anyframe = undefined; - var pt = Point{.x = 10, .y = 11 }; + var pt = Point{ .x = 10, .y = 11 }; fn doTheTest() void { expectEqual(pt.x, 10); @@ -568,7 +568,6 @@ test "await inside an errdefer" { frame = @frame(); suspend; } - }; S.doTheTest(); } @@ -632,10 +631,10 @@ test "returning a const error from async function" { } test "async/await typical usage" { - inline for ([_]bool{false, true}) |b1| { - inline for ([_]bool{false, true}) |b2| { - inline for ([_]bool{false, true}) |b3| { - inline for ([_]bool{false, true}) |b4| { + inline for ([_]bool{ false, true }) |b1| { + inline for ([_]bool{ false, true }) |b2| { + inline for ([_]bool{ false, true }) |b3| { + inline for ([_]bool{ false, true }) |b4| { testAsyncAwaitTypicalUsage(b1, b2, b3, b4).doTheTest(); } } @@ -647,8 +646,8 @@ fn testAsyncAwaitTypicalUsage( comptime simulate_fail_download: bool, comptime simulate_fail_file: bool, comptime suspend_download: bool, - comptime suspend_file: bool) type -{ + comptime suspend_file: bool, +) type { return struct { fn doTheTest() void { _ = async amainWrap(); From 1254a453b91623849dcb0a655b1212c9a179d29a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 10:44:42 -0400 Subject: [PATCH 116/125] add compile error for @Frame() of generic function See #3063 --- src/analyze.cpp | 2 ++ src/ir.cpp | 6 ++++++ test/compile_errors.zig | 13 +++++++++++++ 3 files changed, 21 insertions(+) diff --git a/src/analyze.cpp b/src/analyze.cpp index 21289f24a8..4aff6da8e9 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5197,6 +5197,8 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { return ErrorNone; ZigFn *fn = frame_type->data.frame.fn; + assert(!fn->type_entry->data.fn.is_generic); + switch (fn->anal_state) { case FnAnalStateInvalid: return ErrorSemanticAnalyzeFail; diff --git a/src/ir.cpp b/src/ir.cpp index ddaf82893a..9589000ab0 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -22095,6 +22095,12 @@ static IrInstruction *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstru if (fn == nullptr) return ira->codegen->invalid_instruction; + if (fn->type_entry->data.fn.is_generic) { + ir_add_error(ira, &instruction->base, + buf_sprintf("@Frame() of generic function")); + return ira->codegen->invalid_instruction; + } + ZigType *ty = get_fn_frame_type(ira->codegen, fn); return ir_const_type(ira, &instruction->base, ty); } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 0d579ece95..c4549be405 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "@Frame() of generic function", + \\export fn entry() void { + \\ var frame: @Frame(func) = undefined; + \\} + \\fn func(comptime T: type) void { + \\ var x: T = undefined; + \\} + , + "tmp.zig:2:16: error: @Frame() of generic function", + ); + cases.add( "@frame() causes function to be async", \\export fn entry() void { @@ -14,6 +26,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async", "tmp.zig:5:9: note: @frame() causes function to be async", ); + cases.add( "invalid suspend in exported function", \\export fn entry() void { From 5df89dafef1bb410608dae2c3c97daa644e89f75 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 10:49:00 -0400 Subject: [PATCH 117/125] add test for wrong frame type used for async call See #3063 --- test/compile_errors.zig | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/test/compile_errors.zig b/test/compile_errors.zig index c4549be405..42cead93ce 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,22 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "wrong frame type used for async call", + \\export fn entry() void { + \\ var frame: @Frame(foo) = undefined; + \\ frame = async bar(); + \\} + \\fn foo() void { + \\ suspend; + \\} + \\fn bar() void { + \\ suspend; + \\} + , + "tmp.zig:3:5: error: expected type '*@Frame(bar)', found '*@Frame(foo)'", + ); + cases.add( "@Frame() of generic function", \\export fn entry() void { From 4ea2331e3d900079a502432dbefb06528b62afab Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 10:54:45 -0400 Subject: [PATCH 118/125] add test for async call of generic function See #3063 --- test/stage1/behavior/async_fn.zig | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index 13f7be6779..a6231e4609 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -740,3 +740,23 @@ test "no reason to resolve frame still works" { fn simpleNothing() void { var x: i32 = 1234; } + +test "async call a generic function" { + const S = struct { + fn doTheTest() void { + var f = async func(i32, 2); + const result = await f; + expect(result == 3); + } + + fn func(comptime T: type, inc: T) T { + var x: T = 1; + suspend { + resume @frame(); + } + x += inc; + return x; + } + }; + _ = async S.doTheTest(); +} From 7798054b5880860fba410b2cd12626455ef4394b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 11:00:21 -0400 Subject: [PATCH 119/125] add tests for bad implicit casting of anyframe types See #3063 --- test/compile_errors.zig | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 42cead93ce..b00f090780 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,27 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "prevent bad implicit casting of anyframe types", + \\export fn a() void { + \\ var x: anyframe = undefined; + \\ var y: anyframe->i32 = x; + \\} + \\export fn b() void { + \\ var x: i32 = undefined; + \\ var y: anyframe->i32 = x; + \\} + \\export fn c() void { + \\ var x: @Frame(func) = undefined; + \\ var y: anyframe->i32 = &x; + \\} + \\fn func() void {} + , + "tmp.zig:3:28: error: expected type 'anyframe->i32', found 'anyframe'", + "tmp.zig:7:28: error: expected type 'anyframe->i32', found 'i32'", + "tmp.zig:11:29: error: expected type 'anyframe->i32', found '*@Frame(func)'", + ); + cases.add( "wrong frame type used for async call", \\export fn entry() void { From cba3b8291a18ee16cda2b453bb2bcd4279fa8b98 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 11:19:01 -0400 Subject: [PATCH 120/125] codegen: LLVMConstSub instead of LLVMBuildSub in one place --- src/codegen.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index ebd3c72b50..7e809f4824 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2210,8 +2210,8 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_fn_ptr_index, ""); fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); } - LLVMValueRef arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), - LLVMConstInt(usize_type_ref, resume_id, false), ""); + LLVMValueRef arg_val = LLVMConstSub(LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, resume_id, false)); LLVMValueRef args[] = {target_frame_ptr, arg_val}; return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); } From 13c584d325d042879c8c56a3c41ffbf99a3346c0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 11:27:29 -0400 Subject: [PATCH 121/125] add compile error for casting const frame to anyframe See #3063 --- src/ir.cpp | 1 + std/event/channel.zig | 2 +- std/event/future.zig | 6 +++--- std/event/lock.zig | 10 +++++----- std/event/loop.zig | 4 ++-- test/compile_errors.zig | 18 ++++++++++++++++++ test/stage1/behavior/async_fn.zig | 14 +++++++------- test/stage1/behavior/await_struct.zig | 4 ++-- 8 files changed, 39 insertions(+), 20 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 9589000ab0..d6fba23856 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -12112,6 +12112,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // *@Frame(func) to anyframe->T or anyframe if (actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle && + !actual_type->data.pointer.is_const && actual_type->data.pointer.child_type->id == ZigTypeIdFnFrame && wanted_type->id == ZigTypeIdAnyFrame) { bool ok = true; diff --git a/std/event/channel.zig b/std/event/channel.zig index 91c4650dc1..a397d280de 100644 --- a/std/event/channel.zig +++ b/std/event/channel.zig @@ -331,7 +331,7 @@ async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void { const value3 = channel.getOrNull(); testing.expect(value3 == null); - const last_put = async testPut(channel, 4444); + var last_put = async testPut(channel, 4444); const value4 = channel.getOrNull(); testing.expect(value4.? == 4444); await last_put; diff --git a/std/event/future.zig b/std/event/future.zig index 45bb7759c5..70e20819be 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -100,9 +100,9 @@ test "std.event.Future" { async fn testFuture(loop: *Loop) void { var future = Future(i32).init(loop); - const a = async waitOnFuture(&future); - const b = async waitOnFuture(&future); - const c = async resolveFuture(&future); + var a = async waitOnFuture(&future); + var b = async waitOnFuture(&future); + var c = async resolveFuture(&future); // TODO make this work: //const result = (await a) + (await b); diff --git a/std/event/lock.zig b/std/event/lock.zig index da698d9fb2..0fa65f031d 100644 --- a/std/event/lock.zig +++ b/std/event/lock.zig @@ -135,7 +135,7 @@ test "std.event.Lock" { } async fn testLock(loop: *Loop, lock: *Lock) void { - const handle1 = async lockRunner(lock); + var handle1 = async lockRunner(lock); var tick_node1 = Loop.NextTickNode{ .prev = undefined, .next = undefined, @@ -143,7 +143,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void { }; loop.onNextTick(&tick_node1); - const handle2 = async lockRunner(lock); + var handle2 = async lockRunner(lock); var tick_node2 = Loop.NextTickNode{ .prev = undefined, .next = undefined, @@ -151,7 +151,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void { }; loop.onNextTick(&tick_node2); - const handle3 = async lockRunner(lock); + var handle3 = async lockRunner(lock); var tick_node3 = Loop.NextTickNode{ .prev = undefined, .next = undefined, @@ -172,8 +172,8 @@ async fn lockRunner(lock: *Lock) void { var i: usize = 0; while (i < shared_test_data.len) : (i += 1) { - const lock_promise = async lock.acquire(); - const handle = await lock_promise; + var lock_frame = async lock.acquire(); + const handle = await lock_frame; defer handle.release(); shared_test_index = 0; diff --git a/std/event/loop.zig b/std/event/loop.zig index 0e02addcb9..a4605c8928 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -900,8 +900,8 @@ test "std.event.Loop - call" { defer loop.deinit(); var did_it = false; - const handle = async Loop.call(testEventLoop); - const handle2 = async Loop.call(testEventLoop2, &handle, &did_it); + var handle = async Loop.call(testEventLoop); + var handle2 = async Loop.call(testEventLoop2, &handle, &did_it); loop.run(); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index b00f090780..7c9d8fae51 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,24 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "const frame cast to anyframe", + \\export fn a() void { + \\ const f = async func(); + \\ resume f; + \\} + \\export fn b() void { + \\ const f = async func(); + \\ var x: anyframe = &f; + \\} + \\fn func() void { + \\ suspend; + \\} + , + "tmp.zig:3:12: error: expected type 'anyframe', found '*const @Frame(func)'", + "tmp.zig:7:24: error: expected type 'anyframe', found '*const @Frame(func)'", + ); + cases.add( "prevent bad implicit casting of anyframe types", \\export fn a() void { diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index a6231e4609..b5e1d3a63f 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -6,7 +6,7 @@ const expectEqual = std.testing.expectEqual; var global_x: i32 = 1; test "simple coroutine suspend and resume" { - const frame = async simpleAsyncFn(); + var frame = async simpleAsyncFn(); expect(global_x == 2); resume frame; expect(global_x == 3); @@ -25,7 +25,7 @@ fn simpleAsyncFn() void { var global_y: i32 = 1; test "pass parameter to coroutine" { - const p = async simpleAsyncFnWithArg(2); + var p = async simpleAsyncFnWithArg(2); expect(global_y == 3); resume p; expect(global_y == 5); @@ -60,7 +60,7 @@ test "local variable in async function" { fn doTheTest() void { expect(x == 0); - const p = async add(1, 2); + var p = async add(1, 2); expect(x == 0); resume p; expect(x == 0); @@ -201,7 +201,7 @@ var await_final_result: i32 = 0; test "coroutine await" { await_seq('a'); - const p = async await_amain(); + var p = async await_amain(); await_seq('f'); resume await_a_promise; await_seq('i'); @@ -210,7 +210,7 @@ test "coroutine await" { } async fn await_amain() void { await_seq('b'); - const p = async await_another(); + var p = async await_another(); await_seq('e'); await_final_result = await p; await_seq('h'); @@ -237,14 +237,14 @@ var early_final_result: i32 = 0; test "coroutine await early return" { early_seq('a'); - const p = async early_amain(); + var p = async early_amain(); early_seq('f'); expect(early_final_result == 1234); expect(std.mem.eql(u8, early_points, "abcdef")); } async fn early_amain() void { early_seq('b'); - const p = async early_another(); + var p = async early_another(); early_seq('d'); early_final_result = await p; early_seq('e'); diff --git a/test/stage1/behavior/await_struct.zig b/test/stage1/behavior/await_struct.zig index a649b0a39b..6e4d330ea3 100644 --- a/test/stage1/behavior/await_struct.zig +++ b/test/stage1/behavior/await_struct.zig @@ -11,7 +11,7 @@ var await_final_result = Foo{ .x = 0 }; test "coroutine await struct" { await_seq('a'); - const p = async await_amain(); + var p = async await_amain(); await_seq('f'); resume await_a_promise; await_seq('i'); @@ -20,7 +20,7 @@ test "coroutine await struct" { } async fn await_amain() void { await_seq('b'); - const p = async await_another(); + var p = async await_another(); await_seq('e'); await_final_result = await p; await_seq('h'); From 2cb1f93894be3f48f0c49004515fa5e8190f69d9 Mon Sep 17 00:00:00 2001 From: yvt Date: Fri, 16 Aug 2019 23:54:28 +0900 Subject: [PATCH 122/125] correct LLVM subarch names for arm --- src/target.cpp | 58 +++++++++++++++++++++++++++++++++++++++++++++++- src/zig_llvm.cpp | 14 ++++++------ 2 files changed, 64 insertions(+), 8 deletions(-) diff --git a/src/target.cpp b/src/target.cpp index d1ae64acd4..8d73af6a01 100644 --- a/src/target.cpp +++ b/src/target.cpp @@ -651,7 +651,63 @@ ZigLLVM_SubArchType target_subarch_enum(SubArchList sub_arch_list, size_t i) { } const char *target_subarch_name(ZigLLVM_SubArchType subarch) { - return ZigLLVMGetSubArchTypeName(subarch); + switch (subarch) { + case ZigLLVM_NoSubArch: + return ""; + case ZigLLVM_ARMSubArch_v8_5a: + return "v8_5a"; + case ZigLLVM_ARMSubArch_v8_4a: + return "v8_4a"; + case ZigLLVM_ARMSubArch_v8_3a: + return "v8_3a"; + case ZigLLVM_ARMSubArch_v8_2a: + return "v8_2a"; + case ZigLLVM_ARMSubArch_v8_1a: + return "v8_1a"; + case ZigLLVM_ARMSubArch_v8: + return "v8"; + case ZigLLVM_ARMSubArch_v8r: + return "v8r"; + case ZigLLVM_ARMSubArch_v8m_baseline: + return "v8m_baseline"; + case ZigLLVM_ARMSubArch_v8m_mainline: + return "v8m_mainline"; + case ZigLLVM_ARMSubArch_v7: + return "v7"; + case ZigLLVM_ARMSubArch_v7em: + return "v7em"; + case ZigLLVM_ARMSubArch_v7m: + return "v7m"; + case ZigLLVM_ARMSubArch_v7s: + return "v7s"; + case ZigLLVM_ARMSubArch_v7k: + return "v7k"; + case ZigLLVM_ARMSubArch_v7ve: + return "v7ve"; + case ZigLLVM_ARMSubArch_v6: + return "v6"; + case ZigLLVM_ARMSubArch_v6m: + return "v6m"; + case ZigLLVM_ARMSubArch_v6k: + return "v6k"; + case ZigLLVM_ARMSubArch_v6t2: + return "v6t2"; + case ZigLLVM_ARMSubArch_v5: + return "v5"; + case ZigLLVM_ARMSubArch_v5te: + return "v5te"; + case ZigLLVM_ARMSubArch_v4t: + return "v4t"; + case ZigLLVM_KalimbaSubArch_v3: + return "v3"; + case ZigLLVM_KalimbaSubArch_v4: + return "v4"; + case ZigLLVM_KalimbaSubArch_v5: + return "v5"; + case ZigLLVM_MipsSubArch_r6: + return "r6"; + } + zig_unreachable(); } size_t target_subarch_list_count(void) { diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 695f8b18ef..d9315ff549 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -785,23 +785,23 @@ const char *ZigLLVMGetSubArchTypeName(ZigLLVM_SubArchType sub_arch) { case ZigLLVM_NoSubArch: return ""; case ZigLLVM_ARMSubArch_v8_5a: - return "v8_5a"; + return "v8.5a"; case ZigLLVM_ARMSubArch_v8_4a: - return "v8_4a"; + return "v8.4a"; case ZigLLVM_ARMSubArch_v8_3a: - return "v8_3a"; + return "v8.3a"; case ZigLLVM_ARMSubArch_v8_2a: - return "v8_2a"; + return "v8.2a"; case ZigLLVM_ARMSubArch_v8_1a: - return "v8_1a"; + return "v8.1a"; case ZigLLVM_ARMSubArch_v8: return "v8"; case ZigLLVM_ARMSubArch_v8r: return "v8r"; case ZigLLVM_ARMSubArch_v8m_baseline: - return "v8m_baseline"; + return "v8m.base"; case ZigLLVM_ARMSubArch_v8m_mainline: - return "v8m_mainline"; + return "v8m.main"; case ZigLLVM_ARMSubArch_v7: return "v7"; case ZigLLVM_ARMSubArch_v7em: From 5a2cbe239f4299b4412209148131a501ec023ceb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 13:00:48 -0400 Subject: [PATCH 123/125] fix and test case for returning from suspend block See #3063 --- src/codegen.cpp | 4 +++- src/ir.cpp | 2 +- test/stage1/behavior/async_fn.zig | 14 ++++++++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 7e809f4824..622ade712a 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5470,7 +5470,9 @@ static LLVMValueRef ir_render_assert_non_null(CodeGen *g, IrExecutable *executab static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable, IrInstructionSuspendBegin *instruction) { - instruction->resume_bb = gen_suspend_begin(g, "SuspendResume"); + if (fn_is_async(g->cur_fn)) { + instruction->resume_bb = gen_suspend_begin(g, "SuspendResume"); + } return nullptr; } diff --git a/src/ir.cpp b/src/ir.cpp index d6fba23856..3287a6bd37 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -7914,7 +7914,7 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res)); } - return ir_build_suspend_finish(irb, parent_scope, node, begin); + return ir_mark_gen(ir_build_suspend_finish(irb, parent_scope, node, begin)); } static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index b5e1d3a63f..fea713b725 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -760,3 +760,17 @@ test "async call a generic function" { }; _ = async S.doTheTest(); } + +test "return from suspend block" { + const S = struct { + fn doTheTest() void { + expect(func() == 1234); + } + fn func() i32 { + suspend { + return 1234; + } + } + }; + _ = async S.doTheTest(); +} From cbca6586e72a8adefb3d8923d0c0f4590f54bfd8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 13:56:26 -0400 Subject: [PATCH 124/125] add test for struct parameter to async function being copied closes #1155 --- test/stage1/behavior/async_fn.zig | 43 +++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index fea713b725..e1b173292b 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -774,3 +774,46 @@ test "return from suspend block" { }; _ = async S.doTheTest(); } + +test "struct parameter to async function is copied to the frame" { + const S = struct { + const Point = struct { + x: i32, + y: i32, + }; + + var frame: anyframe = undefined; + + fn doTheTest() void { + _ = async atest(); + resume frame; + } + + fn atest() void { + var f: @Frame(foo) = undefined; + bar(&f); + clobberStack(10); + } + + fn clobberStack(x: i32) void { + if (x == 0) return; + clobberStack(x - 1); + var y: i32 = x; + } + + fn bar(f: *@Frame(foo)) void { + var pt = Point{ .x = 1, .y = 2 }; + f.* = async foo(pt); + var result = await f; + expect(result == 1); + } + + fn foo(point: Point) i32 { + suspend { + frame = @frame(); + } + return point.x; + } + }; + S.doTheTest(); +} From bf7b6fbbdb7d28c0d7dba3e17c46ce156712cfc8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 16:30:24 -0400 Subject: [PATCH 125/125] add missing compile error for fn call bad implicit cast when the function's return type handle is a pointer but the result location's result value type handle is not a pointer closes #3055 --- src/ir.cpp | 28 +++++++++++++++++++++------- test/compile_errors.zig | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 7 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 3287a6bd37..0129081e22 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -9615,6 +9615,10 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT return cur_type; } + if (prev_type == cur_type) { + continue; + } + if (prev_type->id == ZigTypeIdUnreachable) { prev_inst = cur_inst; continue; @@ -14921,7 +14925,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc ZigType *frame_type = get_fn_frame_type(ira->codegen, fn_entry); IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, frame_type, nullptr, true, true, false); - if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) { + if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) { return result_loc; } result_loc = ir_implicit_cast(ira, result_loc, get_pointer_to_type(ira->codegen, frame_type, false)); @@ -15638,10 +15642,14 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (handle_is_ptr(impl_fn_type_id->return_type)) { result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, impl_fn_type_id->return_type, nullptr, true, true, false); - if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || - instr_is_unreachable(result_loc))) - { - return result_loc; + if (result_loc != nullptr) { + if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) { + return result_loc; + } + if (!handle_is_ptr(result_loc->value.type->data.pointer.child_type)) { + ir_reset_result(call_instruction->result_loc); + result_loc = nullptr; + } } } else { result_loc = nullptr; @@ -15791,8 +15799,14 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (handle_is_ptr(return_type)) { result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, return_type, nullptr, true, true, false); - if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) { - return result_loc; + if (result_loc != nullptr) { + if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) { + return result_loc; + } + if (!handle_is_ptr(result_loc->value.type->data.pointer.child_type)) { + ir_reset_result(call_instruction->result_loc); + result_loc = nullptr; + } } } else { result_loc = nullptr; diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 7c9d8fae51..5eec78fa7f 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,40 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "result location incompatibility mismatching handle_is_ptr (generic call)", + \\export fn entry() void { + \\ var damn = Container{ + \\ .not_optional = getOptional(i32), + \\ }; + \\} + \\pub fn getOptional(comptime T: type) ?T { + \\ return 0; + \\} + \\pub const Container = struct { + \\ not_optional: i32, + \\}; + , + "tmp.zig:3:36: error: expected type 'i32', found '?i32'", + ); + + cases.add( + "result location incompatibility mismatching handle_is_ptr", + \\export fn entry() void { + \\ var damn = Container{ + \\ .not_optional = getOptional(), + \\ }; + \\} + \\pub fn getOptional() ?i32 { + \\ return 0; + \\} + \\pub const Container = struct { + \\ not_optional: i32, + \\}; + , + "tmp.zig:3:36: error: expected type 'i32', found '?i32'", + ); + cases.add( "const frame cast to anyframe", \\export fn a() void {