From de369de312584c64ec162972ba14e95b7a17e531 Mon Sep 17 00:00:00 2001 From: Vexu <15308111+Vexu@users.noreply.github.com> Date: Thu, 27 Jun 2019 02:00:30 +0300 Subject: [PATCH 01/12] fix comments getting removed after empty comments --- std/zig/parser_test.zig | 14 ++++++++++++++ std/zig/render.zig | 27 ++++++++++++++++++--------- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index f78e666779..2014ce02e9 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -2246,6 +2246,20 @@ test "zig fmt: if type expr" { ); } +test "zig fmt: comment after empty comment" { + try testTransform( + \\const x = true; // + \\// + \\// + \\//a + \\ + , + \\const x = true; + \\//a + \\ + ); +} + const std = @import("std"); const mem = std.mem; const warn = std.debug.warn; diff --git a/std/zig/render.zig b/std/zig/render.zig index 2e8e4481be..3dd5a6dd1b 100644 --- a/std/zig/render.zig +++ b/std/zig/render.zig @@ -1941,15 +1941,24 @@ fn renderTokenOffset( } } - const comment_is_empty = mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ").len == 2; - if (comment_is_empty) { - switch (space) { - Space.Newline => { - try stream.writeByte('\n'); - start_col.* = 0; - return; - }, - else => {}, + while (true) { + const comment_is_empty = mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ").len == 2; + if (comment_is_empty) { + switch (space) { + Space.Newline => { + offset += 1; + token = next_token; + next_token = tree.tokens.at(token_index + offset); + if (next_token.id != .LineComment) { + try stream.writeByte('\n'); + start_col.* = 0; + return; + } + }, + else => break, + } + } else { + break; } } From 7139eef4cfc7c75655f9951849a994b116e96abe Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Aug 2019 11:17:20 -0400 Subject: [PATCH 02/12] implement lazy values for error union types closes #3129 --- src/all_types.hpp | 9 ++++++ src/analyze.cpp | 43 ++++++++++++++++++++++--- src/ir.cpp | 57 ++++++++++++++++++++++++---------- test/stage1/behavior/error.zig | 20 ++++++++++++ 4 files changed, 107 insertions(+), 22 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 38964d0091..48323e58ad 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -312,6 +312,7 @@ enum LazyValueId { LazyValueIdOptType, LazyValueIdSliceType, LazyValueIdFnType, + LazyValueIdErrUnionType, }; struct LazyValue { @@ -372,6 +373,14 @@ struct LazyValueFnType { bool is_generic; }; +struct LazyValueErrUnionType { + LazyValue base; + + IrAnalyze *ira; + IrInstruction *err_set_type; + IrInstruction *payload_type; +}; + struct ConstExprValue { ZigType *type; ConstValSpecial special; diff --git a/src/analyze.cpp b/src/analyze.cpp index 965bd57e02..9ae7e99547 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1015,6 +1015,7 @@ static Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, Zi } case LazyValueIdOptType: case LazyValueIdSliceType: + case LazyValueIdErrUnionType: *is_zero_bits = false; return ErrorNone; case LazyValueIdFnType: { @@ -1040,6 +1041,7 @@ Error type_val_resolve_is_opaque_type(CodeGen *g, ConstExprValue *type_val, bool case LazyValueIdPtrType: case LazyValueIdFnType: case LazyValueIdOptType: + case LazyValueIdErrUnionType: *is_opaque_type = false; return ErrorNone; } @@ -1094,6 +1096,11 @@ static ReqCompTime type_val_resolve_requires_comptime(CodeGen *g, ConstExprValue } return ReqCompTimeNo; } + case LazyValueIdErrUnionType: { + LazyValueErrUnionType *lazy_err_union_type = + reinterpret_cast(type_val->data.x_lazy); + return type_val_resolve_requires_comptime(g, &lazy_err_union_type->payload_type->value); + } } zig_unreachable(); } @@ -1102,10 +1109,8 @@ static Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstEx size_t *abi_size, size_t *size_in_bits) { Error err; - if (type_val->data.x_lazy->id == LazyValueIdOptType) { - if ((err = ir_resolve_lazy(g, source_node, type_val))) - return err; - } + +start_over: if (type_val->special != ConstValSpecialLazy) { assert(type_val->special == ConstValSpecialStatic); ZigType *ty = type_val->data.x_type; @@ -1129,7 +1134,10 @@ static Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstEx *size_in_bits = g->builtin_types.entry_usize->size_in_bits; return ErrorNone; case LazyValueIdOptType: - zig_unreachable(); + case LazyValueIdErrUnionType: + if ((err = ir_resolve_lazy(g, source_node, type_val))) + return err; + goto start_over; } zig_unreachable(); } @@ -1161,6 +1169,19 @@ Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t LazyValueOptType *lazy_opt_type = reinterpret_cast(type_val->data.x_lazy); return type_val_resolve_abi_align(g, &lazy_opt_type->payload_type->value, abi_align); } + case LazyValueIdErrUnionType: { + LazyValueErrUnionType *lazy_err_union_type = + reinterpret_cast(type_val->data.x_lazy); + uint32_t payload_abi_align; + if ((err = type_val_resolve_abi_align(g, &lazy_err_union_type->payload_type->value, + &payload_abi_align))) + { + return err; + } + *abi_align = (payload_abi_align > g->err_tag_type->abi_align) ? + payload_abi_align : g->err_tag_type->abi_align; + return ErrorNone; + } } zig_unreachable(); } @@ -1189,6 +1210,18 @@ static OnePossibleValue type_val_resolve_has_one_possible_value(CodeGen *g, Cons return OnePossibleValueNo; } } + case LazyValueIdErrUnionType: { + LazyValueErrUnionType *lazy_err_union_type = + reinterpret_cast(type_val->data.x_lazy); + switch (type_val_resolve_has_one_possible_value(g, &lazy_err_union_type->err_set_type->value)) { + case OnePossibleValueInvalid: + return OnePossibleValueInvalid; + case OnePossibleValueNo: + return OnePossibleValueNo; + case OnePossibleValueYes: + return type_val_resolve_has_one_possible_value(g, &lazy_err_union_type->payload_type->value); + } + } } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index 15fa4ccbe1..52cf69de82 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -14626,28 +14626,23 @@ static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, static IrInstruction *ir_analyze_instruction_error_union(IrAnalyze *ira, IrInstructionErrorUnion *instruction) { - Error err; + IrInstruction *result = ir_const(ira, &instruction->base, ira->codegen->builtin_types.entry_type); + result->value.special = ConstValSpecialLazy; - ZigType *err_set_type = ir_resolve_type(ira, instruction->err_set->child); - if (type_is_invalid(err_set_type)) + LazyValueErrUnionType *lazy_err_union_type = allocate(1); + lazy_err_union_type->ira = ira; + result->value.data.x_lazy = &lazy_err_union_type->base; + lazy_err_union_type->base.id = LazyValueIdErrUnionType; + + lazy_err_union_type->err_set_type = instruction->err_set->child; + if (ir_resolve_type_lazy(ira, lazy_err_union_type->err_set_type) == nullptr) return ira->codegen->invalid_instruction; - ZigType *payload_type = ir_resolve_type(ira, instruction->payload->child); - if (type_is_invalid(payload_type)) + lazy_err_union_type->payload_type = instruction->payload->child; + if (ir_resolve_type_lazy(ira, lazy_err_union_type->payload_type) == nullptr) return ira->codegen->invalid_instruction; - if (err_set_type->id != ZigTypeIdErrorSet) { - ir_add_error(ira, instruction->err_set->child, - buf_sprintf("expected error set type, found type '%s'", - buf_ptr(&err_set_type->name))); - return ira->codegen->invalid_instruction; - } - - if ((err = type_resolve(ira->codegen, payload_type, ResolveStatusSizeKnown))) - return ira->codegen->invalid_instruction; - ZigType *result_type = get_error_union_type(ira->codegen, err_set_type, payload_type); - - return ir_const_type(ira, &instruction->base, result_type); + return result; } static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_inst, ZigType *var_type, @@ -25698,6 +25693,34 @@ static Error ir_resolve_lazy_raw(AstNode *source_node, ConstExprValue *val) { val->data.x_type = fn_type; return ErrorNone; } + case LazyValueIdErrUnionType: { + LazyValueErrUnionType *lazy_err_union_type = + reinterpret_cast(val->data.x_lazy); + IrAnalyze *ira = lazy_err_union_type->ira; + + ZigType *err_set_type = ir_resolve_type(ira, lazy_err_union_type->err_set_type); + if (type_is_invalid(err_set_type)) + return ErrorSemanticAnalyzeFail; + + ZigType *payload_type = ir_resolve_type(ira, lazy_err_union_type->payload_type); + if (type_is_invalid(payload_type)) + return ErrorSemanticAnalyzeFail; + + if (err_set_type->id != ZigTypeIdErrorSet) { + ir_add_error(ira, lazy_err_union_type->err_set_type, + buf_sprintf("expected error set type, found type '%s'", + buf_ptr(&err_set_type->name))); + return ErrorSemanticAnalyzeFail; + } + + if ((err = type_resolve(ira->codegen, payload_type, ResolveStatusSizeKnown))) + return ErrorSemanticAnalyzeFail; + + assert(val->type->id == ZigTypeIdMetaType); + val->data.x_type = get_error_union_type(ira->codegen, err_set_type, payload_type); + val->special = ConstValSpecialStatic; + return ErrorNone; + } } zig_unreachable(); } diff --git a/test/stage1/behavior/error.zig b/test/stage1/behavior/error.zig index 264f140c9d..fefd95a850 100644 --- a/test/stage1/behavior/error.zig +++ b/test/stage1/behavior/error.zig @@ -375,3 +375,23 @@ test "implicit cast to optional to error union to return result loc" { S.entry(); //comptime S.entry(); TODO } + +test "function pointer with return type that is error union with payload which is pointer of parent struct" { + const S = struct { + const Foo = struct { + fun: fn (a: i32) (anyerror!*Foo), + }; + + const Err = error{UnspecifiedErr}; + + fn bar(a: i32) anyerror!*Foo { + return Err.UnspecifiedErr; + } + + fn doTheTest() void { + var x = Foo{ .fun = bar }; + expectError(error.UnspecifiedErr, x.fun(1)); + } + }; + S.doTheTest(); +} From c98f792ff8ed7af0d5e427836600c2ed7b30965e Mon Sep 17 00:00:00 2001 From: yvt Date: Wed, 28 Aug 2019 15:35:49 +0900 Subject: [PATCH 03/12] Improve the handling of `zig fmt: off/on` This commit reworks the handling of `zig fmt: off/on`. A motivating example is shown below: const c = d; // zig fmt: off // comment const a = b; // zig fmt: on Before processing the decl `const a = b;`, `renderRoot` looks for `zig fmt: off` that appears between this decl and the previous one. If it finds one, it searches for the next `zig fmt: on` that re-enables reformatting (or EOF if none was found), and copies the input code between `zig fmt: off` and `zig fmt: on` to the output stream. After that, it proceeds to the next decl. The important thing to notice here is that `renderTopLevelDecl` emits line comment tokens that follow the decl. Therefore, when copying code, we must be careful not to copy the line comment tokens that already have been written to the output stream. The original code failed to take this fact into consideration. It did skip `// zig fmt: off`, but not the remaining ones. As a result, when the above example is fed as input, it duplicated the line `// comment`. --- std/zig/parser_test.zig | 97 +++++++++++++++++++++++++++++++++++ std/zig/render.zig | 109 ++++++++++++++++++++++++++++++---------- 2 files changed, 180 insertions(+), 26 deletions(-) diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index 73089f76fd..ab56b89f27 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -210,6 +210,103 @@ test "zig fmt: comment to disable/enable zig fmt" { ); } +test "zig fmt: line comment following 'zig fmt: off'" { + try testCanonical( + \\// zig fmt: off + \\// Test + \\const e = f; + ); +} + +test "zig fmt: doc comment following 'zig fmt: off'" { + try testCanonical( + \\// zig fmt: off + \\/// test + \\const e = f; + ); +} + +test "zig fmt: line and doc comment following 'zig fmt: off'" { + try testCanonical( + \\// zig fmt: off + \\// test 1 + \\/// test 2 + \\const e = f; + ); +} + +test "zig fmt: doc and line comment following 'zig fmt: off'" { + try testCanonical( + \\// zig fmt: off + \\/// test 1 + \\// test 2 + \\const e = f; + ); +} + +test "zig fmt: alternating 'zig fmt: off' and 'zig fmt: on'" { + try testCanonical( + \\// zig fmt: off + \\// zig fmt: on + \\// zig fmt: off + \\const e = f; + \\// zig fmt: off + \\// zig fmt: on + \\// zig fmt: off + \\const a = b; + \\// zig fmt: on + \\const c = d; + \\// zig fmt: on + \\ + ); +} + +test "zig fmt: line comment following 'zig fmt: on'" { + try testCanonical( + \\// zig fmt: off + \\const e = f; + \\// zig fmt: on + \\// test + \\const e = f; + \\ + ); +} + +test "zig fmt: doc comment following 'zig fmt: on'" { + try testCanonical( + \\// zig fmt: off + \\const e = f; + \\// zig fmt: on + \\/// test + \\const e = f; + \\ + ); +} + +test "zig fmt: line and doc comment following 'zig fmt: on'" { + try testCanonical( + \\// zig fmt: off + \\const e = f; + \\// zig fmt: on + \\// test1 + \\/// test2 + \\const e = f; + \\ + ); +} + +test "zig fmt: doc and line comment following 'zig fmt: on'" { + try testCanonical( + \\// zig fmt: off + \\const e = f; + \\// zig fmt: on + \\/// test1 + \\// test2 + \\const e = f; + \\ + ); +} + test "zig fmt: pointer of unknown length" { try testCanonical( \\fn foo(ptr: [*]u8) void {} diff --git a/std/zig/render.zig b/std/zig/render.zig index 66edf4daef..42ca071e5f 100644 --- a/std/zig/render.zig +++ b/std/zig/render.zig @@ -89,41 +89,98 @@ fn renderRoot( var it = tree.root_node.decls.iterator(0); while (true) { var decl = (it.next() orelse return).*; - // look for zig fmt: off comment - var start_token_index = decl.firstToken(); - zig_fmt_loop: while (start_token_index != 0) { - start_token_index -= 1; - const start_token = tree.tokens.at(start_token_index); - switch (start_token.id) { + + // This loop does the following: + // + // - Iterates through line/doc comment tokens that precedes the current + // decl. + // - Figures out the first token index (`copy_start_token_index`) which + // hasn't been copied to the output stream yet. + // - Detects `zig fmt: (off|on)` in the line comment tokens, and + // determines whether the current decl should be reformatted or not. + // + var token_index = decl.firstToken(); + var fmt_active = true; + var found_fmt_directive = false; + + var copy_start_token_index = token_index; + + while (token_index != 0) { + token_index -= 1; + const token = tree.tokens.at(token_index); + switch (token.id) { Token.Id.LineComment => {}, - Token.Id.DocComment => continue, + Token.Id.DocComment => { + copy_start_token_index = token_index; + continue; + }, else => break, } - if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(start_token)[2..], " "), "zig fmt: off")) { - var end_token_index = start_token_index; - while (true) { - end_token_index += 1; - const end_token = tree.tokens.at(end_token_index); - switch (end_token.id) { + + if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: off")) { + if (!found_fmt_directive) { + fmt_active = false; + found_fmt_directive = true; + } + } else if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: on")) { + if (!found_fmt_directive) { + fmt_active = true; + found_fmt_directive = true; + } + } + } + + if (!fmt_active) { + // Reformatting is disabled for the current decl and possibly some + // more decls that follow. + // Find the next `decl` for which reformatting is re-enabled. + token_index = decl.firstToken(); + + while (!fmt_active) { + decl = (it.next() orelse { + // If there's no next reformatted `decl`, just copy the + // remaining input tokens and bail out. + const start = tree.tokens.at(copy_start_token_index).start; + try copyFixingWhitespace(stream, tree.source[start..]); + return; + }).*; + var decl_first_token_index = decl.firstToken(); + + while (token_index < decl_first_token_index) : (token_index += 1) { + const token = tree.tokens.at(token_index); + switch (token.id) { Token.Id.LineComment => {}, - Token.Id.Eof => { - const start = tree.tokens.at(start_token_index + 1).start; - try copyFixingWhitespace(stream, tree.source[start..]); - return; - }, + Token.Id.Eof => unreachable, else => continue, } - if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(end_token)[2..], " "), "zig fmt: on")) { - const start = tree.tokens.at(start_token_index + 1).start; - try copyFixingWhitespace(stream, tree.source[start..end_token.end]); - try stream.writeByte('\n'); - while (tree.tokens.at(decl.firstToken()).start < end_token.end) { - decl = (it.next() orelse return).*; - } - break :zig_fmt_loop; + if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: on")) { + fmt_active = true; + } else if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: off")) { + fmt_active = false; } } } + + // Found the next `decl` for which reformatting is enabled. Copy + // the input tokens before the `decl` that haven't been copied yet. + var copy_end_token_index = decl.firstToken(); + token_index = copy_end_token_index; + while (token_index != 0) { + token_index -= 1; + const token = tree.tokens.at(token_index); + switch (token.id) { + Token.Id.LineComment => {}, + Token.Id.DocComment => { + copy_end_token_index = token_index; + continue; + }, + else => break, + } + } + + const start = tree.tokens.at(copy_start_token_index).start; + const end = tree.tokens.at(copy_end_token_index).start; + try copyFixingWhitespace(stream, tree.source[start..end]); } try renderTopLevelDecl(allocator, stream, tree, 0, &start_col, decl); From af90da153178032df109aa955df0aac113de032d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Aug 2019 12:16:52 -0400 Subject: [PATCH 04/12] fix implicit cast from zero sized array ptr to slice closes #1850 --- src/codegen.cpp | 28 ++++++++++++++++++---------- test/stage1/behavior/array.zig | 6 ++++++ 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index e8724f0d22..eda7a75d38 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3052,8 +3052,10 @@ static LLVMValueRef ir_render_ptr_of_array_to_slice(CodeGen *g, IrExecutable *ex IrInstructionPtrOfArrayToSlice *instruction) { ZigType *actual_type = instruction->operand->value.type; - LLVMValueRef expr_val = ir_llvm_value(g, instruction->operand); - assert(expr_val); + ZigType *slice_type = instruction->base.value.type; + ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index].type_entry; + size_t ptr_index = slice_type->data.structure.fields[slice_ptr_index].gen_index; + size_t len_index = slice_type->data.structure.fields[slice_len_index].gen_index; LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); @@ -3061,15 +3063,21 @@ static LLVMValueRef ir_render_ptr_of_array_to_slice(CodeGen *g, IrExecutable *ex ZigType *array_type = actual_type->data.pointer.child_type; assert(array_type->id == ZigTypeIdArray); - LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, slice_ptr_index, ""); - LLVMValueRef indices[] = { - LLVMConstNull(g->builtin_types.entry_usize->llvm_type), - LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 0, false), - }; - LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, ""); - gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false); + if (type_has_bits(actual_type)) { + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, ptr_index, ""); + LLVMValueRef indices[] = { + LLVMConstNull(g->builtin_types.entry_usize->llvm_type), + LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 0, false), + }; + LLVMValueRef expr_val = ir_llvm_value(g, instruction->operand); + LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, ""); + gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false); + } else if (ir_want_runtime_safety(g, &instruction->base)) { + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, ptr_index, ""); + gen_undef_init(g, slice_ptr_type->abi_align, slice_ptr_type, ptr_field_ptr); + } - LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, slice_len_index, ""); + LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, len_index, ""); LLVMValueRef len_value = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, array_type->data.array.len, false); gen_store_untyped(g, len_value, len_field_ptr, 0, false); diff --git a/test/stage1/behavior/array.zig b/test/stage1/behavior/array.zig index 9349af573c..462977066e 100644 --- a/test/stage1/behavior/array.zig +++ b/test/stage1/behavior/array.zig @@ -292,3 +292,9 @@ test "read/write through global variable array of struct fields initialized via }; S.doTheTest(); } + +test "implicit cast zero sized array ptr to slice" { + var b = ""; + const c: []const u8 = &b; + expect(c.len == 0); +} From e8a9caa3dd0aef7b745b8197afecec90598b2cdd Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Aug 2019 14:38:55 -0400 Subject: [PATCH 05/12] add suggestion to AutoHash compile error message --- std/hash/auto_hash.zig | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/std/hash/auto_hash.zig b/std/hash/auto_hash.zig index dc65cada17..d34fc2719a 100644 --- a/std/hash/auto_hash.zig +++ b/std/hash/auto_hash.zig @@ -1,5 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); +const assert = std.debug.assert; const mem = std.mem; const meta = std.meta; @@ -165,8 +166,17 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void { /// Slices are rejected to avoid ambiguity on the user's intention. pub fn autoHash(hasher: var, key: var) void { const Key = @typeOf(key); - if (comptime meta.trait.isSlice(Key)) - @compileError("std.auto_hash.autoHash does not allow slices (here " ++ @typeName(Key) ++ " because the intent is unclear. Consider using std.auto_hash.hash or providing your own hash function instead."); + if (comptime meta.trait.isSlice(Key)) { + comptime assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated + const extra_help = if (Key == []const u8) + " Consider std.StringHashMap for hashing the contents of []const u8." + else + ""; + + @compileError("std.auto_hash.autoHash does not allow slices (here " ++ @typeName(Key) ++ + ") because the intent is unclear. Consider using std.auto_hash.hash or providing your own hash function instead." ++ + extra_help); + } hash(hasher, key, .Shallow); } From 834a789bb96e65749299ce6c6e57688d699145eb Mon Sep 17 00:00:00 2001 From: Shritesh Date: Thu, 29 Aug 2019 09:14:45 -0500 Subject: [PATCH 06/12] Use LLVM path provided by homebrew --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9cf0893da8..ae664f52b6 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ brew install cmake llvm@8 brew outdated llvm@8 || brew upgrade llvm@8 mkdir build cd build -cmake .. -DCMAKE_PREFIX_PATH=/usr/local/Cellar/llvm/8.0.0_1 +cmake .. -DCMAKE_PREFIX_PATH=$(brew --prefix llvm) make install ``` From 94bbb46ca602be0ea0df97c207a98734ac459a0f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Aug 2019 10:24:24 -0400 Subject: [PATCH 07/12] fix not fully resolving debug info for structs causing llvm error --- src/all_types.hpp | 2 ++ src/analyze.cpp | 15 ++++++++++++++- src/codegen.cpp | 6 ++++++ src/list.hpp | 11 +++++++++++ 4 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 48323e58ad..e55f10d5e2 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1214,6 +1214,7 @@ struct ZigTypeStruct { HashMap fields_by_name; RootStruct *root_struct; uint32_t *host_int_bytes; // available for packed structs, indexed by gen_index + size_t llvm_full_type_queue_index; uint32_t src_field_count; uint32_t gen_field_count; @@ -1861,6 +1862,7 @@ struct CodeGen { ZigList errors_by_index; ZigList caches_to_release; size_t largest_err_name_len; + ZigList type_resolve_stack; ZigPackage *std_package; ZigPackage *panic_package; diff --git a/src/analyze.cpp b/src/analyze.cpp index 9ae7e99547..2c934bcf69 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -7271,7 +7271,13 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS di_scope, di_file, line); struct_type->data.structure.resolve_status = ResolveStatusLLVMFwdDecl; - if (ResolveStatusLLVMFwdDecl >= wanted_resolve_status) return; + if (ResolveStatusLLVMFwdDecl >= wanted_resolve_status) { + struct_type->data.structure.llvm_full_type_queue_index = g->type_resolve_stack.length; + g->type_resolve_stack.append(struct_type); + return; + } else { + struct_type->data.structure.llvm_full_type_queue_index = SIZE_MAX; + } } size_t field_count = struct_type->data.structure.src_field_count; @@ -7475,6 +7481,13 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS ZigLLVMReplaceTemporary(g->dbuilder, struct_type->llvm_di_type, replacement_di_type); struct_type->llvm_di_type = replacement_di_type; struct_type->data.structure.resolve_status = ResolveStatusLLVMFull; + if (struct_type->data.structure.llvm_full_type_queue_index != SIZE_MAX) { + ZigType *last = g->type_resolve_stack.last(); + assert(last->id == ZigTypeIdStruct); + last->data.structure.llvm_full_type_queue_index = struct_type->data.structure.llvm_full_type_queue_index; + g->type_resolve_stack.swap_remove(struct_type->data.structure.llvm_full_type_queue_index); + struct_type->data.structure.llvm_full_type_queue_index = SIZE_MAX; + } } static void resolve_llvm_types_enum(CodeGen *g, ZigType *enum_type, ResolveStatus wanted_resolve_status) { diff --git a/src/codegen.cpp b/src/codegen.cpp index eda7a75d38..4065994d80 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -7208,6 +7208,12 @@ static void do_code_gen(CodeGen *g) { LLVMSetModuleInlineAsm(g->module, buf_ptr(&g->global_asm)); } + while (g->type_resolve_stack.length != 0) { + ZigType *ty = g->type_resolve_stack.last(); + if (type_resolve(g, ty, ResolveStatusLLVMFull)) + zig_unreachable(); + } + ZigLLVMDIBuilderFinalize(g->dbuilder); if (g->verbose_llvm_ir) { diff --git a/src/list.hpp b/src/list.hpp index f838e44a5b..8dce75f2b8 100644 --- a/src/list.hpp +++ b/src/list.hpp @@ -74,6 +74,17 @@ struct ZigList { capacity = better_capacity; } + T swap_remove(size_t index) { + if (length - 1 == index) return pop(); + + assert(index != SIZE_MAX); + assert(index < length); + + T old_item = items[index]; + items[index] = pop(); + return old_item; + } + T *items; size_t length; size_t capacity; From d9f0446b1f993c1b3c1bf5cc410b6d5f8a2f94fe Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Aug 2019 12:43:56 -0400 Subject: [PATCH 08/12] make `@sizeOf` lazy --- src/all_types.hpp | 8 ++++ src/analyze.cpp | 59 +++++++++++++++++++++--- src/analyze.hpp | 2 + src/codegen.cpp | 5 +++ src/ir.cpp | 111 +++++++++++++++++++++++++++------------------- 5 files changed, 134 insertions(+), 51 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index e55f10d5e2..36d3c0a398 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -308,6 +308,7 @@ struct ConstGlobalRefs { enum LazyValueId { LazyValueIdInvalid, LazyValueIdAlignOf, + LazyValueIdSizeOf, LazyValueIdPtrType, LazyValueIdOptType, LazyValueIdSliceType, @@ -326,6 +327,13 @@ struct LazyValueAlignOf { IrInstruction *target_type; }; +struct LazyValueSizeOf { + LazyValue base; + + IrAnalyze *ira; + IrInstruction *target_type; +}; + struct LazyValueSliceType { LazyValue base; diff --git a/src/analyze.cpp b/src/analyze.cpp index 2c934bcf69..411a0d7a01 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -997,6 +997,7 @@ static Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, Zi switch (type_val->data.x_lazy->id) { case LazyValueIdInvalid: case LazyValueIdAlignOf: + case LazyValueIdSizeOf: zig_unreachable(); case LazyValueIdPtrType: { LazyValuePtrType *lazy_ptr_type = reinterpret_cast(type_val->data.x_lazy); @@ -1036,6 +1037,7 @@ Error type_val_resolve_is_opaque_type(CodeGen *g, ConstExprValue *type_val, bool switch (type_val->data.x_lazy->id) { case LazyValueIdInvalid: case LazyValueIdAlignOf: + case LazyValueIdSizeOf: zig_unreachable(); case LazyValueIdSliceType: case LazyValueIdPtrType: @@ -1055,6 +1057,7 @@ static ReqCompTime type_val_resolve_requires_comptime(CodeGen *g, ConstExprValue switch (type_val->data.x_lazy->id) { case LazyValueIdInvalid: case LazyValueIdAlignOf: + case LazyValueIdSizeOf: zig_unreachable(); case LazyValueIdSliceType: { LazyValueSliceType *lazy_slice_type = reinterpret_cast(type_val->data.x_lazy); @@ -1105,7 +1108,7 @@ static ReqCompTime type_val_resolve_requires_comptime(CodeGen *g, ConstExprValue zig_unreachable(); } -static Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val, +Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val, size_t *abi_size, size_t *size_in_bits) { Error err; @@ -1123,12 +1126,42 @@ start_over: switch (type_val->data.x_lazy->id) { case LazyValueIdInvalid: case LazyValueIdAlignOf: + case LazyValueIdSizeOf: zig_unreachable(); - case LazyValueIdSliceType: - *abi_size = g->builtin_types.entry_usize->abi_size * 2; - *size_in_bits = g->builtin_types.entry_usize->size_in_bits * 2; + case LazyValueIdSliceType: { + LazyValueSliceType *lazy_slice_type = reinterpret_cast(type_val->data.x_lazy); + bool is_zero_bits; + if ((err = type_val_resolve_zero_bits(g, &lazy_slice_type->elem_type->value, nullptr, + nullptr, &is_zero_bits))) + { + return err; + } + if (is_zero_bits) { + *abi_size = g->builtin_types.entry_usize->abi_size; + *size_in_bits = g->builtin_types.entry_usize->size_in_bits; + } else { + *abi_size = g->builtin_types.entry_usize->abi_size * 2; + *size_in_bits = g->builtin_types.entry_usize->size_in_bits * 2; + } return ErrorNone; - case LazyValueIdPtrType: + } + case LazyValueIdPtrType: { + LazyValuePtrType *lazy_ptr_type = reinterpret_cast(type_val->data.x_lazy); + bool is_zero_bits; + if ((err = type_val_resolve_zero_bits(g, &lazy_ptr_type->elem_type->value, nullptr, + nullptr, &is_zero_bits))) + { + return err; + } + if (is_zero_bits) { + *abi_size = 0; + *size_in_bits = 0; + } else { + *abi_size = g->builtin_types.entry_usize->abi_size; + *size_in_bits = g->builtin_types.entry_usize->size_in_bits; + } + return ErrorNone; + } case LazyValueIdFnType: *abi_size = g->builtin_types.entry_usize->abi_size; *size_in_bits = g->builtin_types.entry_usize->size_in_bits; @@ -1159,6 +1192,7 @@ Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t switch (type_val->data.x_lazy->id) { case LazyValueIdInvalid: case LazyValueIdAlignOf: + case LazyValueIdSizeOf: zig_unreachable(); case LazyValueIdSliceType: case LazyValueIdPtrType: @@ -1193,6 +1227,7 @@ static OnePossibleValue type_val_resolve_has_one_possible_value(CodeGen *g, Cons switch (type_val->data.x_lazy->id) { case LazyValueIdInvalid: case LazyValueIdAlignOf: + case LazyValueIdSizeOf: zig_unreachable(); case LazyValueIdSliceType: // it has the len field case LazyValueIdOptType: // it has the optional bit @@ -4202,7 +4237,12 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) { return; } } - assert(callee->anal_state == FnAnalStateComplete); + if (callee->anal_state != FnAnalStateComplete) { + add_node_error(g, call->base.source_node, + buf_sprintf("call to function '%s' depends on itself", buf_ptr(&callee->symbol_name))); + fn->anal_state = FnAnalStateInvalid; + return; + } analyze_fn_async(g, callee, true); if (callee->anal_state == FnAnalStateInvalid) { fn->anal_state = FnAnalStateInvalid; @@ -4480,6 +4520,8 @@ void semantic_analyze(CodeGen *g) { ZigFn *fn = g->fn_defs.at(g->fn_defs_index); g->trace_err = nullptr; analyze_fn_async(g, fn, true); + if (fn->anal_state == FnAnalStateInvalid) + continue; if (fn_is_async(fn) && fn->non_async_node != nullptr) { ErrorMsg *msg = add_node_error(g, fn->proto_node, buf_sprintf("'%s' cannot be async", buf_ptr(&fn->symbol_name))); @@ -5632,6 +5674,11 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { return ErrorSemanticAnalyzeFail; } analyze_fn_async(g, callee, true); + if (callee->inferred_async_node == inferred_async_checking) { + assert(g->errors.length != 0); + frame_type->data.frame.locals_struct = g->builtin_types.entry_invalid; + return ErrorSemanticAnalyzeFail; + } if (!fn_is_async(callee)) continue; diff --git a/src/analyze.hpp b/src/analyze.hpp index ebfd11f514..7fa0143506 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -247,6 +247,8 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn); bool fn_is_async(ZigFn *fn); Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t *abi_align); +Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val, + size_t *abi_size, size_t *size_in_bits); ZigType *resolve_union_field_type(CodeGen *g, TypeUnionField *union_field); ZigType *resolve_struct_field_type(CodeGen *g, TypeStructField *struct_field); diff --git a/src/codegen.cpp b/src/codegen.cpp index 4065994d80..29ecb3a47d 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6845,6 +6845,7 @@ static void set_global_tls(CodeGen *g, ZigVar *var, LLVMValueRef global_value) { } static void do_code_gen(CodeGen *g) { + Error err; assert(!g->errors.length); generate_error_name_table(g); @@ -6858,6 +6859,8 @@ static void do_code_gen(CodeGen *g) { // Generate debug info for it but that's it. ConstExprValue *const_val = var->const_value; assert(const_val->special != ConstValSpecialRuntime); + if ((err = ir_resolve_lazy(g, var->decl_node, const_val))) + zig_unreachable(); if (const_val->type != var->var_type) { zig_panic("TODO debug info for var with ptr casted value"); } @@ -6875,6 +6878,8 @@ static void do_code_gen(CodeGen *g) { // Generate debug info for it but that's it. ConstExprValue *const_val = var->const_value; assert(const_val->special != ConstValSpecialRuntime); + if ((err = ir_resolve_lazy(g, var->decl_node, const_val))) + zig_unreachable(); if (const_val->type != var->var_type) { zig_panic("TODO debug info for var with ptr casted value"); } diff --git a/src/ir.cpp b/src/ir.cpp index 52cf69de82..b6a73638e3 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -18066,54 +18066,20 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira, zig_unreachable(); } -static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira, - IrInstructionSizeOf *size_of_instruction) -{ - Error err; - IrInstruction *type_value = size_of_instruction->type_value->child; - ZigType *type_entry = ir_resolve_type(ira, type_value); +static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira, IrInstructionSizeOf *instruction) { + IrInstruction *result = ir_const(ira, &instruction->base, ira->codegen->builtin_types.entry_num_lit_int); + result->value.special = ConstValSpecialLazy; - if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown))) + LazyValueSizeOf *lazy_size_of = allocate(1); + lazy_size_of->ira = ira; + result->value.data.x_lazy = &lazy_size_of->base; + lazy_size_of->base.id = LazyValueIdSizeOf; + + lazy_size_of->target_type = instruction->type_value->child; + if (ir_resolve_type_lazy(ira, lazy_size_of->target_type) == nullptr) return ira->codegen->invalid_instruction; - switch (type_entry->id) { - case ZigTypeIdInvalid: // handled above - zig_unreachable(); - case ZigTypeIdUnreachable: - case ZigTypeIdUndefined: - case ZigTypeIdNull: - case ZigTypeIdBoundFn: - case ZigTypeIdArgTuple: - case ZigTypeIdOpaque: - ir_add_error_node(ira, type_value->source_node, - buf_sprintf("no size available for type '%s'", buf_ptr(&type_entry->name))); - return ira->codegen->invalid_instruction; - case ZigTypeIdMetaType: - case ZigTypeIdEnumLiteral: - case ZigTypeIdComptimeFloat: - case ZigTypeIdComptimeInt: - case ZigTypeIdVoid: - case ZigTypeIdBool: - case ZigTypeIdInt: - case ZigTypeIdFloat: - case ZigTypeIdPointer: - case ZigTypeIdArray: - case ZigTypeIdStruct: - case ZigTypeIdOptional: - case ZigTypeIdErrorUnion: - case ZigTypeIdErrorSet: - case ZigTypeIdEnum: - case ZigTypeIdUnion: - case ZigTypeIdFn: - case ZigTypeIdVector: - case ZigTypeIdFnFrame: - case ZigTypeIdAnyFrame: - { - uint64_t size_in_bytes = type_size(ira->codegen, type_entry); - return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes); - } - } - zig_unreachable(); + return result; } static IrInstruction *ir_analyze_test_non_null(IrAnalyze *ira, IrInstruction *source_inst, IrInstruction *value) { @@ -25548,6 +25514,61 @@ static Error ir_resolve_lazy_raw(AstNode *source_node, ConstExprValue *val) { bigint_init_unsigned(&val->data.x_bigint, align_in_bytes); return ErrorNone; } + case LazyValueIdSizeOf: { + LazyValueSizeOf *lazy_size_of = reinterpret_cast(val->data.x_lazy); + IrAnalyze *ira = lazy_size_of->ira; + + if (lazy_size_of->target_type->value.special == ConstValSpecialStatic) { + switch (lazy_size_of->target_type->value.data.x_type->id) { + case ZigTypeIdInvalid: // handled above + zig_unreachable(); + case ZigTypeIdUnreachable: + case ZigTypeIdUndefined: + case ZigTypeIdNull: + case ZigTypeIdBoundFn: + case ZigTypeIdArgTuple: + case ZigTypeIdOpaque: + ir_add_error(ira, lazy_size_of->target_type, + buf_sprintf("no size available for type '%s'", + buf_ptr(&lazy_size_of->target_type->value.data.x_type->name))); + return ErrorSemanticAnalyzeFail; + case ZigTypeIdMetaType: + case ZigTypeIdEnumLiteral: + case ZigTypeIdComptimeFloat: + case ZigTypeIdComptimeInt: + case ZigTypeIdVoid: + case ZigTypeIdBool: + case ZigTypeIdInt: + case ZigTypeIdFloat: + case ZigTypeIdPointer: + case ZigTypeIdArray: + case ZigTypeIdStruct: + case ZigTypeIdOptional: + case ZigTypeIdErrorUnion: + case ZigTypeIdErrorSet: + case ZigTypeIdEnum: + case ZigTypeIdUnion: + case ZigTypeIdFn: + case ZigTypeIdVector: + case ZigTypeIdFnFrame: + case ZigTypeIdAnyFrame: + break; + } + } + + uint64_t abi_size; + uint64_t size_in_bits; + if ((err = type_val_resolve_abi_size(ira->codegen, source_node, &lazy_size_of->target_type->value, + &abi_size, &size_in_bits))) + { + return err; + } + + val->special = ConstValSpecialStatic; + assert(val->type->id == ZigTypeIdComptimeInt); + bigint_init_unsigned(&val->data.x_bigint, abi_size); + return ErrorNone; + } case LazyValueIdSliceType: { LazyValueSliceType *lazy_slice_type = reinterpret_cast(val->data.x_lazy); IrAnalyze *ira = lazy_slice_type->ira; From 0512beca9d694a667e3ad12a656835b44457fbcd Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Aug 2019 14:46:22 -0400 Subject: [PATCH 09/12] comparing against zero participates in lazy values --- src/analyze.cpp | 2 +- src/analyze.hpp | 2 + src/error.cpp | 1 + src/ir.cpp | 89 ++++++++++++++++++++++ src/userland.h | 1 + test/stage1/behavior/sizeof_and_typeof.zig | 15 ++++ 6 files changed, 109 insertions(+), 1 deletion(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index 411a0d7a01..1ed4e19727 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -973,7 +973,7 @@ ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, Zig nullptr, nullptr, node, type_name, nullptr, nullptr, undef); } -static Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, ZigType *parent_type, +Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, ZigType *parent_type, ConstExprValue *parent_type_val, bool *is_zero_bits) { Error err; diff --git a/src/analyze.hpp b/src/analyze.hpp index 7fa0143506..6e8897bf82 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -249,6 +249,8 @@ bool fn_is_async(ZigFn *fn); Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t *abi_align); Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val, size_t *abi_size, size_t *size_in_bits); +Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, ZigType *parent_type, + ConstExprValue *parent_type_val, bool *is_zero_bits); ZigType *resolve_union_field_type(CodeGen *g, TypeUnionField *union_field); ZigType *resolve_struct_field_type(CodeGen *g, TypeStructField *struct_field); diff --git a/src/error.cpp b/src/error.cpp index 20d429e8bf..753aeb292a 100644 --- a/src/error.cpp +++ b/src/error.cpp @@ -55,6 +55,7 @@ const char *err_str(Error err) { case ErrorBrokenPipe: return "broken pipe"; case ErrorNoSpaceLeft: return "no space left"; case ErrorNoCCompilerInstalled: return "no C compiler installed"; + case ErrorNotLazy: return "not lazy"; } return "(invalid error)"; } diff --git a/src/ir.cpp b/src/ir.cpp index b6a73638e3..67b5157c97 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -12932,7 +12932,52 @@ static bool optional_value_is_null(ConstExprValue *val) { } } +// Returns ErrorNotLazy when the value cannot be determined +static Error lazy_cmp_zero(AstNode *source_node, ConstExprValue *val, Cmp *result) { + Error err; + + switch (val->special) { + case ConstValSpecialRuntime: + case ConstValSpecialUndef: + return ErrorNotLazy; + case ConstValSpecialStatic: + switch (val->type->id) { + case ZigTypeIdComptimeInt: + case ZigTypeIdInt: + *result = bigint_cmp_zero(&val->data.x_bigint); + return ErrorNone; + default: + return ErrorNotLazy; + } + case ConstValSpecialLazy: + switch (val->data.x_lazy->id) { + case LazyValueIdInvalid: + zig_unreachable(); + case LazyValueIdAlignOf: + *result = CmpGT; + return ErrorNone; + case LazyValueIdSizeOf: { + LazyValueSizeOf *lazy_size_of = reinterpret_cast(val->data.x_lazy); + IrAnalyze *ira = lazy_size_of->ira; + bool is_zero_bits; + if ((err = type_val_resolve_zero_bits(ira->codegen, &lazy_size_of->target_type->value, + nullptr, nullptr, &is_zero_bits))) + { + return err; + } + *result = is_zero_bits ? CmpEQ : CmpGT; + return ErrorNone; + } + default: + return ErrorNotLazy; + } + } + zig_unreachable(); +} + static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) { + Error err; + IrInstruction *op1 = bin_op_instruction->op1->child; if (type_is_invalid(op1->value.type)) return ira->codegen->invalid_instruction; @@ -13182,6 +13227,50 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp * } if (one_possible_value || (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2))) { + { + // Before resolving the values, we special case comparisons against zero. These can often be done + // without resolving lazy values, preventing potential dependency loops. + Cmp op1_cmp_zero; + if ((err = lazy_cmp_zero(bin_op_instruction->base.source_node, &casted_op1->value, &op1_cmp_zero))) { + if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally; + return ira->codegen->invalid_instruction; + } + Cmp op2_cmp_zero; + if ((err = lazy_cmp_zero(bin_op_instruction->base.source_node, &casted_op2->value, &op2_cmp_zero))) { + if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally; + return ira->codegen->invalid_instruction; + } + bool can_cmp_zero = false; + Cmp cmp_result; + if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpEQ) { + can_cmp_zero = true; + cmp_result = CmpEQ; + } else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpEQ) { + can_cmp_zero = true; + cmp_result = CmpGT; + } else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpGT) { + can_cmp_zero = true; + cmp_result = CmpLT; + } else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpEQ) { + can_cmp_zero = true; + cmp_result = CmpLT; + } else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpLT) { + can_cmp_zero = true; + cmp_result = CmpGT; + } else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpGT) { + can_cmp_zero = true; + cmp_result = CmpLT; + } else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpLT) { + can_cmp_zero = true; + cmp_result = CmpGT; + } + if (can_cmp_zero) { + bool answer = resolve_cmp_op_id(op_id, cmp_result); + return ir_const_bool(ira, &bin_op_instruction->base, answer); + } + } +never_mind_just_calculate_it_normally: + ConstExprValue *op1_val = one_possible_value ? &casted_op1->value : ir_resolve_const(ira, casted_op1, UndefBad); if (op1_val == nullptr) return ira->codegen->invalid_instruction; diff --git a/src/userland.h b/src/userland.h index 43bdbd18b1..43356438fd 100644 --- a/src/userland.h +++ b/src/userland.h @@ -75,6 +75,7 @@ enum Error { ErrorOperationAborted, ErrorBrokenPipe, ErrorNoSpaceLeft, + ErrorNotLazy, }; // ABI warning diff --git a/test/stage1/behavior/sizeof_and_typeof.zig b/test/stage1/behavior/sizeof_and_typeof.zig index cfad311e06..da79c3a270 100644 --- a/test/stage1/behavior/sizeof_and_typeof.zig +++ b/test/stage1/behavior/sizeof_and_typeof.zig @@ -74,3 +74,18 @@ test "@sizeOf on compile-time types" { expect(@sizeOf(@typeOf(.hi)) == 0); expect(@sizeOf(@typeOf(type)) == 0); } + +test "@sizeOf(T) == 0 doesn't force resolving struct size" { + const S = struct { + const Foo = struct { + y: if (@sizeOf(Foo) == 0) u64 else u32, + }; + const Bar = struct { + x: i32, + y: if (0 == @sizeOf(Bar)) u64 else u32, + }; + }; + + expect(@sizeOf(S.Foo) == 4); + expect(@sizeOf(S.Bar) == 8); +} From 8e939916347888e755d737c579042b034e215aa8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Aug 2019 16:25:24 -0400 Subject: [PATCH 10/12] avoid unnecessarily requiring alignment for array elem pointers --- src/ir.cpp | 39 +++++++++++++++++++++++++-------------- std/mem.zig | 11 ++++++----- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 67b5157c97..ec414a5adc 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -16899,12 +16899,6 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct return ira->codegen->invalid_instruction; bool safety_check_on = elem_ptr_instruction->safety_check_on; - if ((err = type_resolve(ira->codegen, return_type->data.pointer.child_type, ResolveStatusSizeKnown))) - return ira->codegen->invalid_instruction; - - uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type); - uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type); - uint64_t ptr_align = get_ptr_align(ira->codegen, return_type); if (instr_is_comptime(casted_elem_index)) { uint64_t index = bigint_as_u64(&casted_elem_index->value.data.x_bigint); if (array_type->id == ZigTypeIdArray) { @@ -16918,8 +16912,16 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct safety_check_on = false; } - { + if (return_type->data.pointer.explicit_alignment != 0) { // figure out the largest alignment possible + + if ((err = type_resolve(ira->codegen, return_type->data.pointer.child_type, ResolveStatusSizeKnown))) + return ira->codegen->invalid_instruction; + + uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type); + uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type); + uint64_t ptr_align = get_ptr_align(ira->codegen, return_type); + uint64_t chosen_align = abi_align; if (ptr_align >= abi_align) { while (ptr_align > abi_align) { @@ -17148,15 +17150,24 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct case ReqCompTimeNo: break; } - if (ptr_align < abi_align) { - if (elem_size >= ptr_align && elem_size % ptr_align == 0) { - return_type = adjust_ptr_align(ira->codegen, return_type, ptr_align); + + if (return_type->data.pointer.explicit_alignment != 0) { + if ((err = type_resolve(ira->codegen, return_type->data.pointer.child_type, ResolveStatusSizeKnown))) + return ira->codegen->invalid_instruction; + + uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type); + uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type); + uint64_t ptr_align = get_ptr_align(ira->codegen, return_type); + if (ptr_align < abi_align) { + if (elem_size >= ptr_align && elem_size % ptr_align == 0) { + return_type = adjust_ptr_align(ira->codegen, return_type, ptr_align); + } else { + // can't get here because guaranteed elem_size >= abi_align + zig_unreachable(); + } } else { - // can't get here because guaranteed elem_size >= abi_align - zig_unreachable(); + return_type = adjust_ptr_align(ira->codegen, return_type, abi_align); } - } else { - return_type = adjust_ptr_align(ira->codegen, return_type, abi_align); } } diff --git a/std/mem.zig b/std/mem.zig index ef001d5dab..49a143dffc 100644 --- a/std/mem.zig +++ b/std/mem.zig @@ -75,15 +75,16 @@ pub const Allocator = struct { new_alignment: u29, ) []u8, - /// Call `destroy` with the result. - /// Returns undefined memory. + /// Returns a pointer to undefined memory. + /// Call `destroy` with the result to free the memory. pub fn create(self: *Allocator, comptime T: type) Error!*T { if (@sizeOf(T) == 0) return &(T{}); const slice = try self.alloc(T, 1); return &slice[0]; } - /// `ptr` should be the return value of `create` + /// `ptr` should be the return value of `create`, or otherwise + /// have the same address and alignment property. pub fn destroy(self: *Allocator, ptr: var) void { const T = @typeOf(ptr).Child; if (@sizeOf(T) == 0) return; @@ -92,7 +93,7 @@ pub const Allocator = struct { assert(shrink_result.len == 0); } - pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T { + pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T { return self.alignedAlloc(T, @alignOf(T), n); } @@ -101,7 +102,7 @@ pub const Allocator = struct { comptime T: type, comptime alignment: u29, n: usize, - ) ![]align(alignment) T { + ) Error![]align(alignment) T { if (n == 0) { return ([*]align(alignment) T)(undefined)[0..0]; } From 03910925f06f6127e81de47ff22ce4d24ca565b2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Aug 2019 21:51:31 -0400 Subject: [PATCH 11/12] await does not force async if callee is blocking closes #3067 --- src/all_types.hpp | 4 ++ src/analyze.cpp | 108 +++++++++++++++++++++--------- src/codegen.cpp | 78 ++++++++++++++------- src/error.cpp | 1 + src/ir.cpp | 35 +++++++--- src/userland.h | 1 + test/stage1/behavior/async_fn.zig | 10 +++ 7 files changed, 169 insertions(+), 68 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 36d3c0a398..42b3e04f49 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -36,6 +36,7 @@ struct IrInstruction; struct IrInstructionCast; struct IrInstructionAllocaGen; struct IrInstructionCallGen; +struct IrInstructionAwaitGen; struct IrBasicBlock; struct ScopeDecls; struct ZigWindowsSDK; @@ -1486,6 +1487,7 @@ struct ZigFn { AstNode **param_source_nodes; Buf **param_names; IrInstruction *err_code_spill; + AstNode *assumed_non_async; AstNode *fn_no_inline_set_node; AstNode *fn_static_eval_set_node; @@ -1503,6 +1505,7 @@ struct ZigFn { ZigList export_list; ZigList call_list; + ZigList await_list; LLVMValueRef valgrind_client_request_array; @@ -3717,6 +3720,7 @@ struct IrInstructionAwaitGen { IrInstruction *frame; IrInstruction *result_loc; + ZigFn *target_fn; }; struct IrInstructionResume { diff --git a/src/analyze.cpp b/src/analyze.cpp index 1ed4e19727..d1c79f9a1a 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -31,6 +31,7 @@ static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry); static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status); static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope); static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope); +static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame); // nullptr means not analyzed yet; this one means currently being analyzed static const AstNode *inferred_async_checking = reinterpret_cast(0x1); @@ -4196,6 +4197,54 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { } } +// ErrorNone - not async +// ErrorIsAsync - yes async +// ErrorSemanticAnalyzeFail - compile error emitted result is invalid +static Error analyze_callee_async(CodeGen *g, ZigFn *fn, ZigFn *callee, AstNode *call_node, + bool must_not_be_async) +{ + if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) + return ErrorNone; + if (callee->anal_state == FnAnalStateReady) { + analyze_fn_body(g, callee); + if (callee->anal_state == FnAnalStateInvalid) { + return ErrorSemanticAnalyzeFail; + } + } + bool callee_is_async; + if (callee->anal_state == FnAnalStateComplete) { + analyze_fn_async(g, callee, true); + if (callee->anal_state == FnAnalStateInvalid) { + return ErrorSemanticAnalyzeFail; + } + callee_is_async = fn_is_async(callee); + } else { + // If it's already been determined, use that value. Otherwise + // assume non-async, emit an error later if it turned out to be async. + if (callee->inferred_async_node == nullptr || + callee->inferred_async_node == inferred_async_checking) + { + callee->assumed_non_async = call_node; + callee_is_async = false; + } else { + callee_is_async = callee->inferred_async_node != inferred_async_none; + } + } + if (callee_is_async) { + fn->inferred_async_node = call_node; + fn->inferred_async_fn = callee; + if (must_not_be_async) { + ErrorMsg *msg = add_node_error(g, fn->proto_node, + buf_sprintf("function with calling convention '%s' cannot be async", + calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc))); + add_async_error_notes(g, msg, fn); + return ErrorSemanticAnalyzeFail; + } + return ErrorIsAsync; + } + return ErrorNone; +} + // This function resolves functions being inferred async. static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) { if (fn->inferred_async_node == inferred_async_checking) { @@ -4222,47 +4271,40 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) { for (size_t i = 0; i < fn->call_list.length; i += 1) { IrInstructionCallGen *call = fn->call_list.at(i); - ZigFn *callee = call->fn_entry; - if (callee == nullptr) { + if (call->fn_entry == nullptr) { // TODO function pointer call here, could be anything continue; } - - if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) - continue; - if (callee->anal_state == FnAnalStateReady) { - analyze_fn_body(g, callee); - if (callee->anal_state == FnAnalStateInvalid) { + switch (analyze_callee_async(g, fn, call->fn_entry, call->base.source_node, must_not_be_async)) { + case ErrorSemanticAnalyzeFail: fn->anal_state = FnAnalStateInvalid; return; - } + case ErrorNone: + continue; + case ErrorIsAsync: + if (resolve_frame) { + resolve_async_fn_frame(g, fn); + } + return; + default: + zig_unreachable(); } - if (callee->anal_state != FnAnalStateComplete) { - add_node_error(g, call->base.source_node, - buf_sprintf("call to function '%s' depends on itself", buf_ptr(&callee->symbol_name))); - fn->anal_state = FnAnalStateInvalid; - return; - } - analyze_fn_async(g, callee, true); - if (callee->anal_state == FnAnalStateInvalid) { - fn->anal_state = FnAnalStateInvalid; - return; - } - if (fn_is_async(callee)) { - fn->inferred_async_node = call->base.source_node; - fn->inferred_async_fn = callee; - if (must_not_be_async) { - ErrorMsg *msg = add_node_error(g, fn->proto_node, - buf_sprintf("function with calling convention '%s' cannot be async", - calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc))); - add_async_error_notes(g, msg, fn); + } + for (size_t i = 0; i < fn->await_list.length; i += 1) { + IrInstructionAwaitGen *await = fn->await_list.at(i); + switch (analyze_callee_async(g, fn, await->target_fn, await->base.source_node, must_not_be_async)) { + case ErrorSemanticAnalyzeFail: fn->anal_state = FnAnalStateInvalid; return; - } - if (resolve_frame) { - resolve_async_fn_frame(g, fn); - } - return; + case ErrorNone: + continue; + case ErrorIsAsync: + if (resolve_frame) { + resolve_async_fn_frame(g, fn); + } + return; + default: + zig_unreachable(); } } fn->inferred_async_node = inferred_async_none; diff --git a/src/codegen.cpp b/src/codegen.cpp index 29ecb3a47d..491ddcd4ea 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3924,7 +3924,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr); if (ret_has_bits) { - LLVMValueRef ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, ""); LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, ""); LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); @@ -4067,6 +4067,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef store_instr = LLVMBuildStore(g->builder, result, result_loc); LLVMSetAlignment(store_instr, get_ptr_align(g, instruction->result_loc->value.type)); return result_loc; + } else if (!callee_is_async && instruction->is_async) { + LLVMBuildStore(g->builder, result, ret_ptr); + return result_loc; } else { return result; } @@ -5498,6 +5501,44 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl return nullptr; } +static LLVMValueRef gen_await_early_return(CodeGen *g, IrInstruction *source_instr, + LLVMValueRef target_frame_ptr, ZigType *result_type, ZigType *ptr_result_type, + LLVMValueRef result_loc, bool non_async) +{ + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef their_result_ptr = nullptr; + if (type_has_bits(result_type) && (non_async || result_loc != nullptr)) { + LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, ""); + their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, ""); + if (result_loc != nullptr) { + LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); + LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, ""); + LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, ""); + bool is_volatile = false; + uint32_t abi_align = get_abi_alignment(g, result_type); + LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false); + ZigLLVMBuildMemCpy(g->builder, + dest_ptr_casted, abi_align, + src_ptr_casted, abi_align, byte_count_val, is_volatile); + } + } + if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { + LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, + frame_index_trace_arg(g, result_type), ""); + LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, ""); + LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, source_instr->scope); + LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr }; + ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + } + if (non_async && type_has_bits(result_type)) { + LLVMValueRef result_ptr = (result_loc == nullptr) ? their_result_ptr : result_loc; + return get_handle_value(g, result_ptr, result_type, ptr_result_type); + } else { + return nullptr; + } +} + static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMValueRef zero = LLVMConstNull(usize_type_ref); @@ -5505,6 +5546,14 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst ZigType *result_type = instruction->base.value.type; ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true); + LLVMValueRef result_loc = (instruction->result_loc == nullptr) ? + nullptr : ir_llvm_value(g, instruction->result_loc); + + if (instruction->target_fn != nullptr && !fn_is_async(instruction->target_fn)) { + return gen_await_early_return(g, &instruction->base, target_frame_ptr, result_type, + ptr_result_type, result_loc, true); + } + // Prepare to be suspended LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume"); LLVMBasicBlockRef end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitEnd"); @@ -5512,9 +5561,8 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // At this point resuming the function will continue from resume_bb. // This code is as if it is running inside the suspend block. + // supply the awaiter return pointer - LLVMValueRef result_loc = (instruction->result_loc == nullptr) ? - nullptr : ir_llvm_value(g, instruction->result_loc); if (type_has_bits(result_type)) { LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, ""); if (result_loc == nullptr) { @@ -5562,28 +5610,8 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // Early return: The async function has already completed. We must copy the result and // the error return trace if applicable. LLVMPositionBuilderAtEnd(g->builder, early_return_block); - if (type_has_bits(result_type) && result_loc != nullptr) { - LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, ""); - LLVMValueRef their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, ""); - LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); - LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, ""); - LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, ""); - bool is_volatile = false; - uint32_t abi_align = get_abi_alignment(g, result_type); - LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false); - ZigLLVMBuildMemCpy(g->builder, - dest_ptr_casted, abi_align, - src_ptr_casted, abi_align, byte_count_val, is_volatile); - } - if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { - LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, - frame_index_trace_arg(g, result_type), ""); - LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, ""); - LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, instruction->base.scope); - LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr }; - ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, - get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); - } + gen_await_early_return(g, &instruction->base, target_frame_ptr, result_type, ptr_result_type, + result_loc, false); LLVMBuildBr(g->builder, end_bb); LLVMPositionBuilderAtEnd(g->builder, resume_bb); diff --git a/src/error.cpp b/src/error.cpp index 753aeb292a..86df76ed4e 100644 --- a/src/error.cpp +++ b/src/error.cpp @@ -56,6 +56,7 @@ const char *err_str(Error err) { case ErrorNoSpaceLeft: return "no space left"; case ErrorNoCCompilerInstalled: return "no C compiler installed"; case ErrorNotLazy: return "not lazy"; + case ErrorIsAsync: return "is async"; } return "(invalid error)"; } diff --git a/src/ir.cpp b/src/ir.cpp index ec414a5adc..d53042fedf 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -3268,7 +3268,7 @@ static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode * return &instruction->base; } -static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction, +static IrInstructionAwaitGen *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *frame, ZigType *result_type, IrInstruction *result_loc) { IrInstructionAwaitGen *instruction = ir_build_instruction(&ira->new_irb, @@ -3280,7 +3280,7 @@ static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_i ir_ref_instruction(frame, ira->new_irb.current_basic_block); if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); - return &instruction->base; + return instruction; } static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) { @@ -24763,18 +24763,22 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira, } static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruction *source_instr, - IrInstruction *frame_ptr) + IrInstruction *frame_ptr, ZigFn **target_fn) { if (type_is_invalid(frame_ptr->value.type)) return ira->codegen->invalid_instruction; + *target_fn = nullptr; + ZigType *result_type; IrInstruction *frame; if (frame_ptr->value.type->id == ZigTypeIdPointer && frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle && frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame) { - result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type; + ZigFn *func = frame_ptr->value.type->data.pointer.child_type->data.frame.fn; + result_type = func->type_entry->data.fn.fn_type_id.return_type; + *target_fn = func; frame = frame_ptr; } else { frame = ir_get_deref(ira, source_instr, frame_ptr, nullptr); @@ -24782,7 +24786,9 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct frame->value.type->data.pointer.ptr_len == PtrLenSingle && frame->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame) { - result_type = frame->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type; + ZigFn *func = frame->value.type->data.pointer.child_type->data.frame.fn; + result_type = func->type_entry->data.fn.fn_type_id.return_type; + *target_fn = func; } else if (frame->value.type->id != ZigTypeIdAnyFrame || frame->value.type->data.any_frame.result_type == nullptr) { @@ -24803,7 +24809,11 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct } static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) { - IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child); + IrInstruction *operand = instruction->frame->child; + if (type_is_invalid(operand->value.type)) + return ira->codegen->invalid_instruction; + ZigFn *target_fn; + IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, operand, &target_fn); if (type_is_invalid(frame->value.type)) return ira->codegen->invalid_instruction; @@ -24812,8 +24822,11 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); ir_assert(fn_entry != nullptr, &instruction->base); - if (fn_entry->inferred_async_node == nullptr) { - fn_entry->inferred_async_node = instruction->base.source_node; + // If it's not @Frame(func) then it's definitely a suspend point + if (target_fn == nullptr) { + if (fn_entry->inferred_async_node == nullptr) { + fn_entry->inferred_async_node = instruction->base.source_node; + } } if (type_can_fail(result_type)) { @@ -24830,8 +24843,10 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction result_loc = nullptr; } - IrInstruction *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc); - return ir_finish_anal(ira, result); + IrInstructionAwaitGen *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc); + result->target_fn = target_fn; + fn_entry->await_list.append(result); + return ir_finish_anal(ira, &result->base); } static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructionResume *instruction) { diff --git a/src/userland.h b/src/userland.h index 43356438fd..c92caf327e 100644 --- a/src/userland.h +++ b/src/userland.h @@ -76,6 +76,7 @@ enum Error { ErrorBrokenPipe, ErrorNoSpaceLeft, ErrorNotLazy, + ErrorIsAsync, }; // ABI warning diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index ccfc4d1ea6..dfed1c4ab7 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -844,3 +844,13 @@ test "cast fn to async fn when it is inferred to be async" { resume S.frame; expect(S.ok); } + +test "await does not force async if callee is blocking" { + const S = struct { + fn simple() i32 { + return 1234; + } + }; + var x = async S.simple(); + expect(await x == 1234); +} From e9a4bcbcc6ac89c5526a6baaf2b0df49d0577eb4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Aug 2019 22:44:07 -0400 Subject: [PATCH 12/12] fix regressions --- src/analyze.cpp | 22 +++++++++++++++++++--- src/ir.cpp | 4 +++- test/compile_errors.zig | 8 ++++---- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index d1c79f9a1a..0bc42cc971 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4174,8 +4174,14 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { assert(fn->inferred_async_node != inferred_async_checking); assert(fn->inferred_async_node != inferred_async_none); if (fn->inferred_async_fn != nullptr) { - ErrorMsg *new_msg = add_error_note(g, msg, fn->inferred_async_node, - buf_sprintf("async function call here")); + ErrorMsg *new_msg; + if (fn->inferred_async_node->type == NodeTypeAwaitExpr) { + new_msg = add_error_note(g, msg, fn->inferred_async_node, + buf_create_from_str("await here is a suspend point")); + } else { + new_msg = add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("async function call here")); + } return add_async_error_notes(g, new_msg, fn->inferred_async_fn); } else if (fn->inferred_async_node->type == NodeTypeFnProto) { add_error_note(g, msg, fn->inferred_async_node, @@ -4185,7 +4191,7 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { buf_sprintf("suspends here")); } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) { add_error_note(g, msg, fn->inferred_async_node, - buf_sprintf("await is a suspend point")); + buf_sprintf("await here is a suspend point")); } else if (fn->inferred_async_node->type == NodeTypeFnCallExpr && fn->inferred_async_node->data.fn_call_expr.is_builtin) { @@ -4240,6 +4246,16 @@ static Error analyze_callee_async(CodeGen *g, ZigFn *fn, ZigFn *callee, AstNode add_async_error_notes(g, msg, fn); return ErrorSemanticAnalyzeFail; } + if (fn->assumed_non_async != nullptr) { + ErrorMsg *msg = add_node_error(g, fn->proto_node, + buf_sprintf("unable to infer whether '%s' should be async", + buf_ptr(&fn->symbol_name))); + add_error_note(g, msg, fn->assumed_non_async, + buf_sprintf("assumed to be non-async here")); + add_async_error_notes(g, msg, fn); + fn->anal_state = FnAnalStateInvalid; + return ErrorSemanticAnalyzeFail; + } return ErrorIsAsync; } return ErrorNone; diff --git a/src/ir.cpp b/src/ir.cpp index d53042fedf..dfe9132e2d 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -10640,7 +10640,9 @@ static void ir_finish_bb(IrAnalyze *ira) { static IrInstruction *ir_unreach_error(IrAnalyze *ira) { ira->old_bb_index = SIZE_MAX; - assert(ira->new_irb.exec->first_err_trace_msg != nullptr); + if (ira->new_irb.exec->first_err_trace_msg == nullptr) { + ira->new_irb.exec->first_err_trace_msg = ira->codegen->trace_err; + } return ira->codegen->unreach_instruction; } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 812b236716..91916e6f38 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -273,7 +273,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\} , "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async", - "tmp.zig:3:18: note: await is a suspend point", + "tmp.zig:3:18: note: await here is a suspend point", ); cases.add( @@ -507,11 +507,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "@sizeOf bad type", - \\export fn entry() void { - \\ _ = @sizeOf(@typeOf(null)); + \\export fn entry() usize { + \\ return @sizeOf(@typeOf(null)); \\} , - "tmp.zig:2:17: error: no size available for type '(null)'", + "tmp.zig:2:20: error: no size available for type '(null)'", ); cases.add(