From 0046551852fe97dca2c295b84be3b0b4c3004e45 Mon Sep 17 00:00:00 2001
From: Nathan Sharp
Date: Mon, 23 Jul 2018 23:24:53 -0700
Subject: [PATCH 01/37] std.io: PeekStream and SliceStream
SliceStream is a read-only stream wrapper around a slice of bytes. It
allows adapting algorithms which work on InStreams to in-memory data.
PeekStream is a stream wrapper which allows "putting back" bytes into
the stream so that they can be read again. This will help make
look-ahead parsers easier to write.
---
std/io.zig | 98 +++++++++++++++++++++++++++++++++++++++++++++++++
std/io_test.zig | 51 +++++++++++++++++++++++++
2 files changed, 149 insertions(+)
diff --git a/std/io.zig b/std/io.zig
index 1c468f6f4f..71a9822399 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -331,6 +331,104 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
};
}
+/// Creates a stream which supports 'un-reading' data, so that it can be read again.
+/// This makes look-ahead style parsing much easier.
+pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) type {
+ return struct {
+ const Self = this;
+ pub const Error = InStreamError;
+ pub const Stream = InStream(Error);
+
+ pub stream: Stream,
+ base: *Stream,
+
+ // Right now the look-ahead space is statically allocated, but a version with dynamic allocation
+ // is not too difficult to derive from this.
+ buffer: [buffer_size]u8,
+ index: usize,
+ at_end: bool,
+
+ pub fn init(base: *Stream) Self {
+ return Self{
+ .base = base,
+ .buffer = undefined,
+ .index = 0,
+ .at_end = false,
+ .stream = Stream{ .readFn = readFn },
+ };
+ }
+
+ pub fn putBackByte(self: *Self, byte: u8) void {
+ self.buffer[self.index] = byte;
+ self.index += 1;
+ }
+
+ pub fn putBack(self: *Self, bytes: []const u8) void {
+ var pos = bytes.len;
+ while (pos != 0) {
+ pos -= 1;
+ self.putBackByte(bytes[pos]);
+ }
+ }
+
+ fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
+ const self = @fieldParentPtr(Self, "stream", in_stream);
+
+ // copy over anything putBack()'d
+ var pos: usize = 0;
+ while (pos < dest.len and self.index != 0) {
+ dest[pos] = self.buffer[self.index - 1];
+ self.index -= 1;
+ pos += 1;
+ }
+
+ if (pos == dest.len or self.at_end) {
+ return pos;
+ }
+
+ // ask the backing stream for more
+ const left = dest.len - pos;
+ const read = try self.base.read(dest[pos..]);
+ assert(read <= left);
+
+ self.at_end = (read < left);
+ return pos + read;
+ }
+
+ };
+}
+
+pub const SliceStream = struct {
+ const Self = this;
+ pub const Error = error { };
+ pub const Stream = InStream(Error);
+
+ pub stream: Stream,
+
+ pos: usize,
+ slice: []const u8,
+
+ pub fn init(slice: []const u8) Self {
+ return Self{
+ .slice = slice,
+ .pos = 0,
+ .stream = Stream{ .readFn = readFn },
+ };
+ }
+
+ fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
+ const self = @fieldParentPtr(Self, "stream", in_stream);
+ const size = math.min(dest.len, self.slice.len - self.pos);
+ const end = self.pos + size;
+
+ mem.copy(u8, dest[0..size], self.slice[self.pos..end]);
+ self.pos = end;
+
+ return size;
+ }
+
+};
+
pub fn BufferedOutStream(comptime Error: type) type {
return BufferedOutStreamCustom(os.page_size, Error);
}
diff --git a/std/io_test.zig b/std/io_test.zig
index 301a9a4cd0..8d5c35c5fd 100644
--- a/std/io_test.zig
+++ b/std/io_test.zig
@@ -60,3 +60,54 @@ test "BufferOutStream" {
assert(mem.eql(u8, buffer.toSlice(), "x: 42\ny: 1234\n"));
}
+
+test "SliceStream" {
+ const bytes = []const u8 { 1, 2, 3, 4, 5, 6, 7 };
+ var ss = io.SliceStream.init(bytes);
+
+ var dest: [4]u8 = undefined;
+
+ var read = try ss.stream.read(dest[0..4]);
+ assert(read == 4);
+ assert(mem.eql(u8, dest[0..4], bytes[0..4]));
+
+ read = try ss.stream.read(dest[0..4]);
+ assert(read == 3);
+ assert(mem.eql(u8, dest[0..3], bytes[4..7]));
+
+ read = try ss.stream.read(dest[0..4]);
+ assert(read == 0);
+}
+
+test "PeekStream" {
+ const bytes = []const u8 { 1, 2, 3, 4, 5, 6, 7, 8 };
+ var ss = io.SliceStream.init(bytes);
+ var ps = io.PeekStream(2, io.SliceStream.Error).init(&ss.stream);
+
+ var dest: [4]u8 = undefined;
+
+ ps.putBackByte(9);
+ ps.putBackByte(10);
+
+ var read = try ps.stream.read(dest[0..4]);
+ assert(read == 4);
+ assert(dest[0] == 10);
+ assert(dest[1] == 9);
+ assert(mem.eql(u8, dest[2..4], bytes[0..2]));
+
+ read = try ps.stream.read(dest[0..4]);
+ assert(read == 4);
+ assert(mem.eql(u8, dest[0..4], bytes[2..6]));
+
+ read = try ps.stream.read(dest[0..4]);
+ assert(read == 2);
+ assert(mem.eql(u8, dest[0..2], bytes[6..8]));
+
+ ps.putBackByte(11);
+ ps.putBackByte(12);
+
+ read = try ps.stream.read(dest[0..4]);
+ assert(read == 2);
+ assert(dest[0] == 12);
+ assert(dest[1] == 11);
+}
From 29e19ace362e7a1910b9f105257f2bce2491e32b Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 24 Jul 2018 10:13:40 -0400
Subject: [PATCH 02/37] fix logic for determining whether param requires
comptime
closes #778
closes #1213
---
src/analyze.cpp | 14 +++++++++-----
test/compile_errors.zig | 11 +++++++++++
2 files changed, 20 insertions(+), 5 deletions(-)
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 6bbe5f6037..f399ab8305 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -1585,10 +1585,6 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdMetaType:
- add_node_error(g, param_node->data.param_decl.type,
- buf_sprintf("parameter of type '%s' must be declared comptime",
- buf_ptr(&type_entry->name)));
- return g->builtin_types.entry_invalid;
case TypeTableEntryIdVoid:
case TypeTableEntryIdBool:
case TypeTableEntryIdInt:
@@ -1603,6 +1599,13 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdUnion:
case TypeTableEntryIdFn:
case TypeTableEntryIdPromise:
+ type_ensure_zero_bits_known(g, type_entry);
+ if (type_requires_comptime(type_entry)) {
+ add_node_error(g, param_node->data.param_decl.type,
+ buf_sprintf("parameter of type '%s' must be declared comptime",
+ buf_ptr(&type_entry->name)));
+ return g->builtin_types.entry_invalid;
+ }
break;
}
FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index];
@@ -5019,9 +5022,10 @@ bool type_requires_comptime(TypeTableEntry *type_entry) {
} else {
return type_requires_comptime(type_entry->data.pointer.child_type);
}
+ case TypeTableEntryIdFn:
+ return type_entry->data.fn.is_generic;
case TypeTableEntryIdEnum:
case TypeTableEntryIdErrorSet:
- case TypeTableEntryIdFn:
case TypeTableEntryIdBool:
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index d5582b1584..b7bd39f29e 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,17 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "generic fn as parameter without comptime keyword",
+ \\fn f(_: fn (var) void) void {}
+ \\fn g(_: var) void {}
+ \\export fn entry() void {
+ \\ f(g);
+ \\}
+ ,
+ ".tmp_source.zig:1:9: error: parameter of type 'fn(var)var' must be declared comptime",
+ );
+
cases.add(
"optional pointer to void in extern struct",
\\comptime {
From 1d4a94b63525b7f9a980069de1807d03d0ad98e0 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 24 Jul 2018 11:04:01 -0400
Subject: [PATCH 03/37] remove old section from readme
we still want all these people but I think there are better ways
to communicate this than the readme file
---
README.md | 38 --------------------------------------
1 file changed, 38 deletions(-)
diff --git a/README.md b/README.md
index 6e582a27e7..2ee3f178ce 100644
--- a/README.md
+++ b/README.md
@@ -74,44 +74,6 @@ that counts as "freestanding" for the purposes of this table.
* Reddit: [/r/zig](https://www.reddit.com/r/zig)
* Email list: [ziglang@googlegroups.com](https://groups.google.com/forum/#!forum/ziglang)
-### Wanted: Windows Developers
-
-Flesh out the standard library for Windows, streamline Zig installation and
-distribution for Windows. Work with LLVM and LLD teams to improve
-PDB/CodeView/MSVC debugging. Implement stack traces for Windows in the MinGW
-environment and the MSVC environment.
-
-### Wanted: MacOS and iOS Developers
-
-Flesh out the standard library for MacOS. Improve the MACH-O linker. Implement
-stack traces for MacOS. Streamline the process of using Zig to build for
-iOS.
-
-### Wanted: Android Developers
-
-Flesh out the standard library for Android. Streamline the process of using
-Zig to build for Android and for depending on Zig code on Android.
-
-### Wanted: Web Developers
-
-Figure out what are the use cases for compiling Zig to WebAssembly. Create demo
-projects with it and streamline experience for users trying to output
-WebAssembly. Work on the documentation generator outputting useful searchable html
-documentation. Create Zig modules for common web tasks such as WebSockets and gzip.
-
-### Wanted: Embedded Developers
-
-Flesh out the standard library for uncommon CPU architectures and OS targets.
-Drive issue discussion for cross compiling and using Zig in constrained
-or unusual environments.
-
-### Wanted: Game Developers
-
-Create cross platform Zig modules to compete with SDL and GLFW. Create an
-OpenGL library that does not depend on libc. Drive the usability of Zig
-for video games. Create a general purpose allocator that does not depend on
-libc. Create demo games using Zig.
-
## Building
[](https://travis-ci.org/ziglang/zig)
From 2ea08561cf69dabc99722ffc24cb0e4327605506 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 24 Jul 2018 14:20:49 -0400
Subject: [PATCH 04/37] self-hosted: function types use table lookup
---
src-self-hosted/codegen.zig | 3 +-
src-self-hosted/compilation.zig | 69 ++++++-
src-self-hosted/ir.zig | 8 +-
src-self-hosted/type.zig | 342 ++++++++++++++++++++++++++------
src/analyze.cpp | 8 +-
5 files changed, 358 insertions(+), 72 deletions(-)
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index ad3dce061e..88293c845e 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -168,6 +168,7 @@ pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code)
//}
const fn_type = fn_val.base.typ.cast(Type.Fn).?;
+ const fn_type_normal = &fn_type.key.data.Normal;
try addLLVMFnAttr(ofile, llvm_fn, "nounwind");
//add_uwtable_attr(g, fn_table_entry->llvm_value);
@@ -209,7 +210,7 @@ pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code)
// addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull");
//}
- const cur_ret_ptr = if (fn_type.return_type.handleIsPtr()) llvm.GetParam(llvm_fn, 0) else null;
+ const cur_ret_ptr = if (fn_type_normal.return_type.handleIsPtr()) llvm.GetParam(llvm_fn, 0) else null;
// build all basic blocks
for (code.basic_block_list.toSlice()) |bb| {
diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig
index 093aab21da..8d41e2439b 100644
--- a/src-self-hosted/compilation.zig
+++ b/src-self-hosted/compilation.zig
@@ -220,12 +220,14 @@ pub const Compilation = struct {
int_type_table: event.Locked(IntTypeTable),
array_type_table: event.Locked(ArrayTypeTable),
ptr_type_table: event.Locked(PtrTypeTable),
+ fn_type_table: event.Locked(FnTypeTable),
c_int_types: [CInt.list.len]*Type.Int,
const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql);
const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql);
const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql);
+ const FnTypeTable = std.HashMap(*const Type.Fn.Key, *Type.Fn, Type.Fn.Key.hash, Type.Fn.Key.eql);
const TypeTable = std.HashMap([]const u8, *Type, mem.hash_slice_u8, mem.eql_slice_u8);
const CompileErrList = std.ArrayList(*Msg);
@@ -384,6 +386,7 @@ pub const Compilation = struct {
.int_type_table = event.Locked(IntTypeTable).init(loop, IntTypeTable.init(loop.allocator)),
.array_type_table = event.Locked(ArrayTypeTable).init(loop, ArrayTypeTable.init(loop.allocator)),
.ptr_type_table = event.Locked(PtrTypeTable).init(loop, PtrTypeTable.init(loop.allocator)),
+ .fn_type_table = event.Locked(FnTypeTable).init(loop, FnTypeTable.init(loop.allocator)),
.c_int_types = undefined,
.meta_type = undefined,
@@ -414,6 +417,7 @@ pub const Compilation = struct {
comp.int_type_table.private_data.deinit();
comp.array_type_table.private_data.deinit();
comp.ptr_type_table.private_data.deinit();
+ comp.fn_type_table.private_data.deinit();
comp.arena_allocator.deinit();
comp.loop.allocator.destroy(comp);
}
@@ -1160,10 +1164,47 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
fn_decl.value = Decl.Fn.Val{ .Fn = fn_val };
symbol_name_consumed = true;
+ // Define local parameter variables
+ //for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
+ // FnTypeParamInfo *param_info = &fn_type_id->param_info[i];
+ // AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i);
+ // Buf *param_name;
+ // bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args;
+ // if (param_decl_node && !is_var_args) {
+ // param_name = param_decl_node->data.param_decl.name;
+ // } else {
+ // param_name = buf_sprintf("arg%" ZIG_PRI_usize "", i);
+ // }
+ // if (param_name == nullptr) {
+ // continue;
+ // }
+
+ // TypeTableEntry *param_type = param_info->type;
+ // bool is_noalias = param_info->is_noalias;
+
+ // if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) {
+ // add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter"));
+ // }
+
+ // VariableTableEntry *var = add_variable(g, param_decl_node, fn_table_entry->child_scope,
+ // param_name, true, create_const_runtime(param_type), nullptr);
+ // var->src_arg_index = i;
+ // fn_table_entry->child_scope = var->child_scope;
+ // var->shadowable = var->shadowable || is_var_args;
+
+ // if (type_has_bits(param_type)) {
+ // fn_table_entry->variable_list.append(var);
+ // }
+
+ // if (fn_type->data.fn.gen_param_info) {
+ // var->gen_arg_index = fn_type->data.fn.gen_param_info[i].gen_index;
+ // }
+ //}
+
const analyzed_code = try await (async comp.genAndAnalyzeCode(
&fndef_scope.base,
body_node,
- fn_type.return_type,
+ fn_type.key.data.Normal.return_type,
) catch unreachable);
errdefer analyzed_code.destroy(comp.gpa());
@@ -1199,14 +1240,13 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
var params = ArrayList(Type.Fn.Param).init(comp.gpa());
var params_consumed = false;
- defer if (params_consumed) {
+ defer if (!params_consumed) {
for (params.toSliceConst()) |param| {
param.typ.base.deref(comp);
}
params.deinit();
};
- const is_var_args = false;
{
var it = fn_proto.params.iterator(0);
while (it.next()) |param_node_ptr| {
@@ -1219,8 +1259,29 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
});
}
}
- const fn_type = try Type.Fn.create(comp, return_type, params.toOwnedSlice(), is_var_args);
+
+ const key = Type.Fn.Key{
+ .alignment = null,
+ .data = Type.Fn.Key.Data{
+ .Normal = Type.Fn.Normal{
+ .return_type = return_type,
+ .params = params.toOwnedSlice(),
+ .is_var_args = false, // TODO
+ .cc = Type.Fn.CallingConvention.Auto, // TODO
+ },
+ },
+ };
params_consumed = true;
+ var key_consumed = false;
+ defer if (!key_consumed) {
+ for (key.data.Normal.params) |param| {
+ param.typ.base.deref(comp);
+ }
+ comp.gpa().free(key.data.Normal.params);
+ };
+
+ const fn_type = try await (async Type.Fn.get(comp, key) catch unreachable);
+ key_consumed = true;
errdefer fn_type.base.base.deref(comp);
return fn_type;
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index c34f06753d..45355bbf2c 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -281,11 +281,13 @@ pub const Inst = struct {
return error.SemanticAnalysisFailed;
};
- if (fn_type.params.len != self.params.args.len) {
+ const fn_type_param_count = fn_type.paramCount();
+
+ if (fn_type_param_count != self.params.args.len) {
try ira.addCompileError(
self.base.span,
"expected {} arguments, found {}",
- fn_type.params.len,
+ fn_type_param_count,
self.params.args.len,
);
return error.SemanticAnalysisFailed;
@@ -299,7 +301,7 @@ pub const Inst = struct {
.fn_ref = fn_ref,
.args = args,
});
- new_inst.val = IrVal{ .KnownType = fn_type.return_type };
+ new_inst.val = IrVal{ .KnownType = fn_type.key.data.Normal.return_type };
return new_inst;
}
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index 217c1d50a7..3b57260447 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -221,57 +221,267 @@ pub const Type = struct {
pub const Fn = struct {
base: Type,
- return_type: *Type,
- params: []Param,
- is_var_args: bool,
+ key: Key,
+ garbage_node: std.atomic.Stack(*Fn).Node,
+
+ pub const Key = struct {
+ data: Data,
+ alignment: ?u32,
+
+ pub const Data = union(enum) {
+ Generic: Generic,
+ Normal: Normal,
+ };
+
+ pub fn hash(self: *const Key) u32 {
+ var result: u32 = 0;
+ result +%= hashAny(self.alignment, 0);
+ switch (self.data) {
+ Data.Generic => |generic| {
+ result +%= hashAny(generic.param_count, 1);
+ switch (generic.cc) {
+ CallingConvention.Async => |allocator_type| result +%= hashAny(allocator_type, 2),
+ else => result +%= hashAny(CallingConvention(generic.cc), 3),
+ }
+ },
+ Data.Normal => |normal| {
+ result +%= hashAny(normal.return_type, 4);
+ result +%= hashAny(normal.is_var_args, 5);
+ result +%= hashAny(normal.cc, 6);
+ for (normal.params) |param| {
+ result +%= hashAny(param.is_noalias, 7);
+ result +%= hashAny(param.typ, 8);
+ }
+ },
+ }
+ return result;
+ }
+
+ pub fn eql(self: *const Key, other: *const Key) bool {
+ if ((self.alignment == null) != (other.alignment == null)) return false;
+ if (self.alignment) |self_align| {
+ if (self_align != other.alignment.?) return false;
+ }
+ if (@TagType(Data)(self.data) != @TagType(Data)(other.data)) return false;
+ switch (self.data) {
+ Data.Generic => |*self_generic| {
+ const other_generic = &other.data.Generic;
+ if (self_generic.param_count != other_generic.param_count) return false;
+ if (CallingConvention(self_generic.cc) != CallingConvention(other_generic.cc)) return false;
+ switch (self_generic.cc) {
+ CallingConvention.Async => |self_allocator_type| {
+ const other_allocator_type = other_generic.cc.Async;
+ if (self_allocator_type != other_allocator_type) return false;
+ },
+ else => {},
+ }
+ },
+ Data.Normal => |*self_normal| {
+ const other_normal = &other.data.Normal;
+ if (self_normal.cc != other_normal.cc) return false;
+ if (self_normal.is_var_args != other_normal.is_var_args) return false;
+ if (self_normal.return_type != other_normal.return_type) return false;
+ for (self_normal.params) |*self_param, i| {
+ const other_param = &other_normal.params[i];
+ if (self_param.is_noalias != other_param.is_noalias) return false;
+ if (self_param.typ != other_param.typ) return false;
+ }
+ },
+ }
+ return true;
+ }
+
+ pub fn deref(key: Key, comp: *Compilation) void {
+ switch (key.data) {
+ Key.Data.Generic => |generic| {
+ switch (generic.cc) {
+ CallingConvention.Async => |allocator_type| allocator_type.base.deref(comp),
+ else => {},
+ }
+ },
+ Key.Data.Normal => |normal| {
+ normal.return_type.base.deref(comp);
+ for (normal.params) |param| {
+ param.typ.base.deref(comp);
+ }
+ },
+ }
+ }
+
+ pub fn ref(key: Key) void {
+ switch (key.data) {
+ Key.Data.Generic => |generic| {
+ switch (generic.cc) {
+ CallingConvention.Async => |allocator_type| allocator_type.base.ref(),
+ else => {},
+ }
+ },
+ Key.Data.Normal => |normal| {
+ normal.return_type.base.ref();
+ for (normal.params) |param| {
+ param.typ.base.ref();
+ }
+ },
+ }
+ }
+ };
+
+ pub const Normal = struct {
+ params: []Param,
+ return_type: *Type,
+ is_var_args: bool,
+ cc: CallingConvention,
+ };
+
+ pub const Generic = struct {
+ param_count: usize,
+ cc: CC,
+
+ pub const CC = union(CallingConvention) {
+ Auto,
+ C,
+ Cold,
+ Naked,
+ Stdcall,
+ Async: *Type, // allocator type
+ };
+ };
+
+ pub const CallingConvention = enum {
+ Auto,
+ C,
+ Cold,
+ Naked,
+ Stdcall,
+ Async,
+ };
pub const Param = struct {
is_noalias: bool,
typ: *Type,
};
- pub fn create(comp: *Compilation, return_type: *Type, params: []Param, is_var_args: bool) !*Fn {
- const result = try comp.gpa().create(Fn{
- .base = undefined,
- .return_type = return_type,
- .params = params,
- .is_var_args = is_var_args,
- });
- errdefer comp.gpa().destroy(result);
+ fn ccFnTypeStr(cc: CallingConvention) []const u8 {
+ return switch (cc) {
+ CallingConvention.Auto => "",
+ CallingConvention.C => "extern ",
+ CallingConvention.Cold => "coldcc ",
+ CallingConvention.Naked => "nakedcc ",
+ CallingConvention.Stdcall => "stdcallcc ",
+ CallingConvention.Async => unreachable,
+ };
+ }
- result.base.init(comp, Id.Fn, "TODO fn type name");
+ pub fn paramCount(self: *Fn) usize {
+ return switch (self.key.data) {
+ Key.Data.Generic => |generic| generic.param_count,
+ Key.Data.Normal => |normal| normal.params.len,
+ };
+ }
- result.return_type.base.ref();
- for (result.params) |param| {
- param.typ.base.ref();
+ /// takes ownership of key.Normal.params on success
+ pub async fn get(comp: *Compilation, key: Key) !*Fn {
+ {
+ const held = await (async comp.fn_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ if (held.value.get(&key)) |entry| {
+ entry.value.base.base.ref();
+ return entry.value;
+ }
}
- return result;
+
+ key.ref();
+ errdefer key.deref(comp);
+
+ const self = try comp.gpa().create(Fn{
+ .base = undefined,
+ .key = key,
+ .garbage_node = undefined,
+ });
+ errdefer comp.gpa().destroy(self);
+
+ var name_buf = try std.Buffer.initSize(comp.gpa(), 0);
+ defer name_buf.deinit();
+
+ const name_stream = &std.io.BufferOutStream.init(&name_buf).stream;
+
+ switch (key.data) {
+ Key.Data.Generic => |generic| {
+ switch (generic.cc) {
+ CallingConvention.Async => |async_allocator_type| {
+ try name_stream.print("async<{}> ", async_allocator_type.name);
+ },
+ else => {
+ const cc_str = ccFnTypeStr(generic.cc);
+ try name_stream.write(cc_str);
+ },
+ }
+ try name_stream.write("fn(");
+ var param_i: usize = 0;
+ while (param_i < generic.param_count) : (param_i += 1) {
+ const arg = if (param_i == 0) "var" else ", var";
+ try name_stream.write(arg);
+ }
+ try name_stream.write(")");
+ if (key.alignment) |alignment| {
+ try name_stream.print(" align<{}>", alignment);
+ }
+ try name_stream.write(" var");
+ },
+ Key.Data.Normal => |normal| {
+ const cc_str = ccFnTypeStr(normal.cc);
+ try name_stream.print("{}fn(", cc_str);
+ for (normal.params) |param, i| {
+ if (i != 0) try name_stream.write(", ");
+ if (param.is_noalias) try name_stream.write("noalias ");
+ try name_stream.write(param.typ.name);
+ }
+ if (normal.is_var_args) {
+ if (normal.params.len != 0) try name_stream.write(", ");
+ try name_stream.write("...");
+ }
+ try name_stream.write(")");
+ if (key.alignment) |alignment| {
+ try name_stream.print(" align<{}>", alignment);
+ }
+ try name_stream.print(" {}", normal.return_type.name);
+ },
+ }
+
+ self.base.init(comp, Id.Fn, name_buf.toOwnedSlice());
+
+ {
+ const held = await (async comp.fn_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = try held.value.put(&self.key, self);
+ }
+ return self;
}
pub fn destroy(self: *Fn, comp: *Compilation) void {
- self.return_type.base.deref(comp);
- for (self.params) |param| {
- param.typ.base.deref(comp);
- }
+ self.key.deref(comp);
comp.gpa().destroy(self);
}
pub fn getLlvmType(self: *Fn, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef {
- const llvm_return_type = switch (self.return_type.id) {
+ const normal = &self.key.data.Normal;
+ const llvm_return_type = switch (normal.return_type.id) {
Type.Id.Void => llvm.VoidTypeInContext(llvm_context) orelse return error.OutOfMemory,
- else => try self.return_type.getLlvmType(allocator, llvm_context),
+ else => try normal.return_type.getLlvmType(allocator, llvm_context),
};
- const llvm_param_types = try allocator.alloc(llvm.TypeRef, self.params.len);
+ const llvm_param_types = try allocator.alloc(llvm.TypeRef, normal.params.len);
defer allocator.free(llvm_param_types);
for (llvm_param_types) |*llvm_param_type, i| {
- llvm_param_type.* = try self.params[i].typ.getLlvmType(allocator, llvm_context);
+ llvm_param_type.* = try normal.params[i].typ.getLlvmType(allocator, llvm_context);
}
return llvm.FunctionType(
llvm_return_type,
llvm_param_types.ptr,
@intCast(c_uint, llvm_param_types.len),
- @boolToInt(self.is_var_args),
+ @boolToInt(normal.is_var_args),
) orelse error.OutOfMemory;
}
};
@@ -347,8 +557,10 @@ pub const Type = struct {
is_signed: bool,
pub fn hash(self: *const Key) u32 {
- const rands = [2]u32{ 0xa4ba6498, 0x75fc5af7 };
- return rands[@boolToInt(self.is_signed)] *% self.bit_count;
+ var result: u32 = 0;
+ result +%= hashAny(self.is_signed, 0);
+ result +%= hashAny(self.bit_count, 1);
+ return result;
}
pub fn eql(self: *const Key, other: *const Key) bool {
@@ -443,15 +655,16 @@ pub const Type = struct {
alignment: Align,
pub fn hash(self: *const Key) u32 {
- const align_hash = switch (self.alignment) {
+ var result: u32 = 0;
+ result +%= switch (self.alignment) {
Align.Abi => 0xf201c090,
- Align.Override => |x| x,
+ Align.Override => |x| hashAny(x, 0),
};
- return hash_usize(@ptrToInt(self.child_type)) *%
- hash_enum(self.mut) *%
- hash_enum(self.vol) *%
- hash_enum(self.size) *%
- align_hash;
+ result +%= hashAny(self.child_type, 1);
+ result +%= hashAny(self.mut, 2);
+ result +%= hashAny(self.vol, 3);
+ result +%= hashAny(self.size, 4);
+ return result;
}
pub fn eql(self: *const Key, other: *const Key) bool {
@@ -605,7 +818,10 @@ pub const Type = struct {
len: usize,
pub fn hash(self: *const Key) u32 {
- return hash_usize(@ptrToInt(self.elem_type)) *% hash_usize(self.len);
+ var result: u32 = 0;
+ result +%= hashAny(self.elem_type, 0);
+ result +%= hashAny(self.len, 1);
+ return result;
}
pub fn eql(self: *const Key, other: *const Key) bool {
@@ -818,27 +1034,37 @@ pub const Type = struct {
};
};
-fn hash_usize(x: usize) u32 {
- return switch (@sizeOf(usize)) {
- 4 => x,
- 8 => @truncate(u32, x *% 0xad44ee2d8e3fc13d),
- else => @compileError("implement this hash function"),
- };
-}
-
-fn hash_enum(x: var) u32 {
- const rands = []u32{
- 0x85ebf64f,
- 0x3fcb3211,
- 0x240a4e8e,
- 0x40bb0e3c,
- 0x78be45af,
- 0x1ca98e37,
- 0xec56053a,
- 0x906adc48,
- 0xd4fe9763,
- 0x54c80dac,
- };
- comptime assert(@memberCount(@typeOf(x)) < rands.len);
- return rands[@enumToInt(x)];
+fn hashAny(x: var, comptime seed: u64) u32 {
+ switch (@typeInfo(@typeOf(x))) {
+ builtin.TypeId.Int => |info| {
+ comptime var rng = comptime std.rand.DefaultPrng.init(seed);
+ const unsigned_x = @bitCast(@IntType(false, info.bits), x);
+ if (info.bits <= 32) {
+ return u32(unsigned_x) *% comptime rng.random.scalar(u32);
+ } else {
+ return @truncate(u32, unsigned_x *% comptime rng.random.scalar(@typeOf(unsigned_x)));
+ }
+ },
+ builtin.TypeId.Pointer => |info| {
+ switch (info.size) {
+ builtin.TypeInfo.Pointer.Size.One => return hashAny(@ptrToInt(x), seed),
+ builtin.TypeInfo.Pointer.Size.Many => @compileError("implement hash function"),
+ builtin.TypeInfo.Pointer.Size.Slice => @compileError("implement hash function"),
+ }
+ },
+ builtin.TypeId.Enum => return hashAny(@enumToInt(x), seed),
+ builtin.TypeId.Bool => {
+ comptime var rng = comptime std.rand.DefaultPrng.init(seed);
+ const vals = comptime [2]u32{ rng.random.scalar(u32), rng.random.scalar(u32) };
+ return vals[@boolToInt(x)];
+ },
+ builtin.TypeId.Optional => {
+ if (x) |non_opt| {
+ return hashAny(non_opt, seed);
+ } else {
+ return hashAny(u32(1), seed);
+ }
+ },
+ else => @compileError("implement hash function for " ++ @typeName(@typeOf(x))),
+ }
}
diff --git a/src/analyze.cpp b/src/analyze.cpp
index f399ab8305..a4bfff78c3 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -3941,7 +3941,7 @@ AstNode *get_param_decl_node(FnTableEntry *fn_entry, size_t index) {
return nullptr;
}
-static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry, VariableTableEntry **arg_vars) {
+static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry) {
TypeTableEntry *fn_type = fn_table_entry->type_entry;
assert(!fn_type->data.fn.is_generic);
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
@@ -3979,10 +3979,6 @@ static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entr
if (fn_type->data.fn.gen_param_info) {
var->gen_arg_index = fn_type->data.fn.gen_param_info[i].gen_index;
}
-
- if (arg_vars) {
- arg_vars[i] = var;
- }
}
}
@@ -4082,7 +4078,7 @@ static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) {
if (!fn_table_entry->child_scope)
fn_table_entry->child_scope = &fn_table_entry->fndef_scope->base;
- define_local_param_variables(g, fn_table_entry, nullptr);
+ define_local_param_variables(g, fn_table_entry);
TypeTableEntry *fn_type = fn_table_entry->type_entry;
assert(!fn_type->data.fn.is_generic);
From adefd1a52b812813dd3e3590d398f927ffc5b9af Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 24 Jul 2018 20:24:05 -0400
Subject: [PATCH 05/37] self-hosted: function calling another function
---
src-self-hosted/codegen.zig | 160 ++++++++++++++++++++++++-
src-self-hosted/compilation.zig | 64 +++++-----
src-self-hosted/ir.zig | 197 +++++++++++++++++++++++++++----
src-self-hosted/llvm.zig | 15 ++-
src-self-hosted/scope.zig | 201 ++++++++++++++++++++------------
src-self-hosted/type.zig | 105 +++++++++++------
src-self-hosted/value.zig | 22 +++-
7 files changed, 593 insertions(+), 171 deletions(-)
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 88293c845e..5ca01ca7e7 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -6,6 +6,7 @@ const c = @import("c.zig");
const ir = @import("ir.zig");
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
+const Scope = @import("scope.zig").Scope;
const event = std.event;
const assert = std.debug.assert;
const DW = std.dwarf;
@@ -156,7 +157,7 @@ pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code)
llvm_fn_type,
) orelse return error.OutOfMemory;
- const want_fn_safety = fn_val.block_scope.safety.get(ofile.comp);
+ const want_fn_safety = fn_val.block_scope.?.safety.get(ofile.comp);
if (want_fn_safety and ofile.comp.haveLibC()) {
try addLLVMFnAttr(ofile, llvm_fn, "sspstrong");
try addLLVMFnAttrStr(ofile, llvm_fn, "stack-protector-buffer-size", "4");
@@ -227,9 +228,86 @@ pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code)
// TODO set up error return tracing
// TODO allocate temporary stack values
- // TODO create debug variable declarations for variables and allocate all local variables
+
+ const var_list = fn_type.non_key.Normal.variable_list.toSliceConst();
+ // create debug variable declarations for variables and allocate all local variables
+ for (var_list) |var_scope, i| {
+ const var_type = switch (var_scope.data) {
+ Scope.Var.Data.Const => unreachable,
+ Scope.Var.Data.Param => |param| param.typ,
+ };
+ // if (!type_has_bits(var->value->type)) {
+ // continue;
+ // }
+ // if (ir_get_var_is_comptime(var))
+ // continue;
+ // if (type_requires_comptime(var->value->type))
+ // continue;
+ // if (var->src_arg_index == SIZE_MAX) {
+ // var->value_ref = build_alloca(g, var->value->type, buf_ptr(&var->name), var->align_bytes);
+
+ // var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
+ // buf_ptr(&var->name), import->di_file, (unsigned)(var->decl_node->line + 1),
+ // var->value->type->di_type, !g->strip_debug_symbols, 0);
+
+ // } else {
+ // it's a parameter
+ // assert(var->gen_arg_index != SIZE_MAX);
+ // TypeTableEntry *gen_type;
+ // FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index];
+
+ if (var_type.handleIsPtr()) {
+ // if (gen_info->is_byval) {
+ // gen_type = var->value->type;
+ // } else {
+ // gen_type = gen_info->type;
+ // }
+ var_scope.data.Param.llvm_value = llvm.GetParam(llvm_fn, @intCast(c_uint, i));
+ } else {
+ // gen_type = var->value->type;
+ var_scope.data.Param.llvm_value = try renderAlloca(ofile, var_type, var_scope.name, Type.Pointer.Align.Abi);
+ }
+ // if (var->decl_node) {
+ // var->di_loc_var = ZigLLVMCreateParameterVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
+ // buf_ptr(&var->name), import->di_file,
+ // (unsigned)(var->decl_node->line + 1),
+ // gen_type->di_type, !g->strip_debug_symbols, 0, (unsigned)(var->gen_arg_index + 1));
+ // }
+
+ // }
+ }
+
// TODO finishing error return trace setup. we have to do this after all the allocas.
- // TODO create debug variable declarations for parameters
+
+ // create debug variable declarations for parameters
+ // rely on the first variables in the variable_list being parameters.
+ //size_t next_var_i = 0;
+ for (fn_type.key.data.Normal.params) |param, i| {
+ //FnGenParamInfo *info = &fn_table_entry->type_entry->data.fn.gen_param_info[param_i];
+ //if (info->gen_index == SIZE_MAX)
+ // continue;
+ const scope_var = var_list[i];
+ //assert(variable->src_arg_index != SIZE_MAX);
+ //next_var_i += 1;
+ //assert(variable);
+ //assert(variable->value_ref);
+
+ if (!param.typ.handleIsPtr()) {
+ //clear_debug_source_node(g);
+ const llvm_param = llvm.GetParam(llvm_fn, @intCast(c_uint, i));
+ _ = renderStoreUntyped(
+ ofile,
+ llvm_param,
+ scope_var.data.Param.llvm_value,
+ Type.Pointer.Align.Abi,
+ Type.Pointer.Vol.Non,
+ );
+ }
+
+ //if (variable->decl_node) {
+ // gen_var_debug_decl(g, variable);
+ //}
+ }
for (code.basic_block_list.toSlice()) |current_block| {
llvm.PositionBuilderAtEnd(ofile.builder, current_block.llvm_block);
@@ -294,3 +372,79 @@ fn addLLVMFnAttrStr(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []cons
fn addLLVMFnAttrInt(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8, attr_val: u64) !void {
return addLLVMAttrInt(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name, attr_val);
}
+
+fn renderLoadUntyped(
+ ofile: *ObjectFile,
+ ptr: llvm.ValueRef,
+ alignment: Type.Pointer.Align,
+ vol: Type.Pointer.Vol,
+ name: [*]const u8,
+) !llvm.ValueRef {
+ const result = llvm.BuildLoad(ofile.builder, ptr, name) orelse return error.OutOfMemory;
+ switch (vol) {
+ Type.Pointer.Vol.Non => {},
+ Type.Pointer.Vol.Volatile => llvm.SetVolatile(result, 1),
+ }
+ llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm.GetElementType(llvm.TypeOf(ptr))));
+ return result;
+}
+
+fn renderLoad(ofile: *ObjectFile, ptr: llvm.ValueRef, ptr_type: *Type.Pointer, name: [*]const u8) !llvm.ValueRef {
+ return renderLoadUntyped(ofile, ptr, ptr_type.key.alignment, ptr_type.key.vol, name);
+}
+
+pub fn getHandleValue(ofile: *ObjectFile, ptr: llvm.ValueRef, ptr_type: *Type.Pointer) !?llvm.ValueRef {
+ const child_type = ptr_type.key.child_type;
+ if (!child_type.hasBits()) {
+ return null;
+ }
+ if (child_type.handleIsPtr()) {
+ return ptr;
+ }
+ return try renderLoad(ofile, ptr, ptr_type, c"");
+}
+
+pub fn renderStoreUntyped(
+ ofile: *ObjectFile,
+ value: llvm.ValueRef,
+ ptr: llvm.ValueRef,
+ alignment: Type.Pointer.Align,
+ vol: Type.Pointer.Vol,
+) !llvm.ValueRef {
+ const result = llvm.BuildStore(ofile.builder, value, ptr) orelse return error.OutOfMemory;
+ switch (vol) {
+ Type.Pointer.Vol.Non => {},
+ Type.Pointer.Vol.Volatile => llvm.SetVolatile(result, 1),
+ }
+ llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm.TypeOf(value)));
+ return result;
+}
+
+pub fn renderStore(
+ ofile: *ObjectFile,
+ value: llvm.ValueRef,
+ ptr: llvm.ValueRef,
+ ptr_type: *Type.Pointer,
+) !llvm.ValueRef {
+ return renderStoreUntyped(ofile, value, ptr, ptr_type.key.alignment, ptr_type.key.vol);
+}
+
+pub fn renderAlloca(
+ ofile: *ObjectFile,
+ var_type: *Type,
+ name: []const u8,
+ alignment: Type.Pointer.Align,
+) !llvm.ValueRef {
+ const llvm_var_type = try var_type.getLlvmType(ofile.arena, ofile.context);
+ const name_with_null = try std.cstr.addNullByte(ofile.arena, name);
+ const result = llvm.BuildAlloca(ofile.builder, llvm_var_type, name_with_null.ptr) orelse return error.OutOfMemory;
+ llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm_var_type));
+ return result;
+}
+
+pub fn resolveAlign(ofile: *ObjectFile, alignment: Type.Pointer.Align, llvm_type: llvm.TypeRef) u32 {
+ return switch (alignment) {
+ Type.Pointer.Align.Abi => return llvm.ABIAlignmentOfType(ofile.comp.target_data_ref, llvm_type),
+ Type.Pointer.Align.Override => |a| a,
+ };
+}
diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig
index 8d41e2439b..1564abfbd3 100644
--- a/src-self-hosted/compilation.zig
+++ b/src-self-hosted/compilation.zig
@@ -1165,49 +1165,47 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
symbol_name_consumed = true;
// Define local parameter variables
- //for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
- // FnTypeParamInfo *param_info = &fn_type_id->param_info[i];
- // AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i);
- // Buf *param_name;
- // bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args;
- // if (param_decl_node && !is_var_args) {
- // param_name = param_decl_node->data.param_decl.name;
- // } else {
- // param_name = buf_sprintf("arg%" ZIG_PRI_usize "", i);
- // }
- // if (param_name == nullptr) {
- // continue;
- // }
+ const root_scope = fn_decl.base.findRootScope();
+ for (fn_type.key.data.Normal.params) |param, i| {
+ //AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i);
+ const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", fn_decl.fn_proto.params.at(i).*);
+ const name_token = param_decl.name_token orelse {
+ try comp.addCompileError(root_scope, Span{
+ .first = param_decl.firstToken(),
+ .last = param_decl.type_node.firstToken(),
+ }, "missing parameter name");
+ return error.SemanticAnalysisFailed;
+ };
+ const param_name = root_scope.tree.tokenSlice(name_token);
- // TypeTableEntry *param_type = param_info->type;
- // bool is_noalias = param_info->is_noalias;
+ // if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) {
+ // add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter"));
+ // }
- // if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) {
- // add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter"));
- // }
+ // TODO check for shadowing
- // VariableTableEntry *var = add_variable(g, param_decl_node, fn_table_entry->child_scope,
- // param_name, true, create_const_runtime(param_type), nullptr);
- // var->src_arg_index = i;
- // fn_table_entry->child_scope = var->child_scope;
- // var->shadowable = var->shadowable || is_var_args;
+ const var_scope = try Scope.Var.createParam(
+ comp,
+ fn_val.child_scope,
+ param_name,
+ ¶m_decl.base,
+ i,
+ param.typ,
+ );
+ fn_val.child_scope = &var_scope.base;
- // if (type_has_bits(param_type)) {
- // fn_table_entry->variable_list.append(var);
- // }
-
- // if (fn_type->data.fn.gen_param_info) {
- // var->gen_arg_index = fn_type->data.fn.gen_param_info[i].gen_index;
- // }
- //}
+ try fn_type.non_key.Normal.variable_list.append(var_scope);
+ }
const analyzed_code = try await (async comp.genAndAnalyzeCode(
- &fndef_scope.base,
+ fn_val.child_scope,
body_node,
fn_type.key.data.Normal.return_type,
) catch unreachable);
errdefer analyzed_code.destroy(comp.gpa());
+ assert(fn_val.block_scope != null);
+
// Kick off rendering to LLVM module, but it doesn't block the fn decl
// analysis from being complete.
try comp.prelink_group.call(codegen.renderToLlvm, comp, fn_val, analyzed_code);
@@ -1263,7 +1261,7 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
const key = Type.Fn.Key{
.alignment = null,
.data = Type.Fn.Key.Data{
- .Normal = Type.Fn.Normal{
+ .Normal = Type.Fn.Key.Normal{
.return_type = return_type,
.params = params.toOwnedSlice(),
.is_var_args = false, // TODO
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 45355bbf2c..619cd4f330 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -10,8 +10,10 @@ const assert = std.debug.assert;
const Token = std.zig.Token;
const Span = @import("errmsg.zig").Span;
const llvm = @import("llvm.zig");
-const ObjectFile = @import("codegen.zig").ObjectFile;
+const codegen = @import("codegen.zig");
+const ObjectFile = codegen.ObjectFile;
const Decl = @import("decl.zig").Decl;
+const mem = std.mem;
pub const LVal = enum {
None,
@@ -122,6 +124,8 @@ pub const Inst = struct {
Id.Br => return @fieldParentPtr(Br, "base", base).analyze(ira),
Id.AddImplicitReturnType => return @fieldParentPtr(AddImplicitReturnType, "base", base).analyze(ira),
Id.PtrType => return await (async @fieldParentPtr(PtrType, "base", base).analyze(ira) catch unreachable),
+ Id.VarPtr => return await (async @fieldParentPtr(VarPtr, "base", base).analyze(ira) catch unreachable),
+ Id.LoadPtr => return await (async @fieldParentPtr(LoadPtr, "base", base).analyze(ira) catch unreachable),
}
}
@@ -130,6 +134,8 @@ pub const Inst = struct {
Id.Return => return @fieldParentPtr(Return, "base", base).render(ofile, fn_val),
Id.Const => return @fieldParentPtr(Const, "base", base).render(ofile, fn_val),
Id.Call => return @fieldParentPtr(Call, "base", base).render(ofile, fn_val),
+ Id.VarPtr => return @fieldParentPtr(VarPtr, "base", base).render(ofile, fn_val),
+ Id.LoadPtr => return @fieldParentPtr(LoadPtr, "base", base).render(ofile, fn_val),
Id.DeclRef => unreachable,
Id.PtrType => unreachable,
Id.Ref => @panic("TODO"),
@@ -248,6 +254,8 @@ pub const Inst = struct {
Call,
DeclRef,
PtrType,
+ VarPtr,
+ LoadPtr,
};
pub const Call = struct {
@@ -491,6 +499,133 @@ pub const Inst = struct {
}
};
+ pub const VarPtr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ var_scope: *Scope.Var,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const VarPtr) void {
+ std.debug.warn("{}", inst.params.var_scope.name);
+ }
+
+ pub fn hasSideEffects(inst: *const VarPtr) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const VarPtr, ira: *Analyze) !*Inst {
+ switch (self.params.var_scope.data) {
+ Scope.Var.Data.Const => @panic("TODO"),
+ Scope.Var.Data.Param => |param| {
+ const new_inst = try ira.irb.build(
+ Inst.VarPtr,
+ self.base.scope,
+ self.base.span,
+ Inst.VarPtr.Params{ .var_scope = self.params.var_scope },
+ );
+ const ptr_type = try await (async Type.Pointer.get(ira.irb.comp, Type.Pointer.Key{
+ .child_type = param.typ,
+ .mut = Type.Pointer.Mut.Const,
+ .vol = Type.Pointer.Vol.Non,
+ .size = Type.Pointer.Size.One,
+ .alignment = Type.Pointer.Align.Abi,
+ }) catch unreachable);
+ new_inst.val = IrVal{ .KnownType = &ptr_type.base };
+ return new_inst;
+ },
+ }
+ }
+
+ pub fn render(self: *VarPtr, ofile: *ObjectFile, fn_val: *Value.Fn) llvm.ValueRef {
+ switch (self.params.var_scope.data) {
+ Scope.Var.Data.Const => unreachable, // turned into Inst.Const in analyze pass
+ Scope.Var.Data.Param => |param| return param.llvm_value,
+ }
+ }
+ };
+
+ pub const LoadPtr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const LoadPtr) void {}
+
+ pub fn hasSideEffects(inst: *const LoadPtr) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const LoadPtr, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ const target_type = target.getKnownType();
+ if (target_type.id != Type.Id.Pointer) {
+ try ira.addCompileError(self.base.span, "dereference of non pointer type '{}'", target_type.name);
+ return error.SemanticAnalysisFailed;
+ }
+ const ptr_type = @fieldParentPtr(Type.Pointer, "base", target_type);
+ // if (instr_is_comptime(ptr)) {
+ // if (ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst ||
+ // ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar)
+ // {
+ // ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &ptr->value);
+ // if (pointee->special != ConstValSpecialRuntime) {
+ // IrInstruction *result = ir_create_const(&ira->new_irb, source_instruction->scope,
+ // source_instruction->source_node, child_type);
+ // copy_const_val(&result->value, pointee, ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst);
+ // result->value.type = child_type;
+ // return result;
+ // }
+ // }
+ // }
+ const new_inst = try ira.irb.build(
+ Inst.LoadPtr,
+ self.base.scope,
+ self.base.span,
+ Inst.LoadPtr.Params{ .target = target },
+ );
+ new_inst.val = IrVal{ .KnownType = ptr_type.key.child_type };
+ return new_inst;
+ }
+
+ pub fn render(self: *LoadPtr, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
+ const child_type = self.base.getKnownType();
+ if (!child_type.hasBits()) {
+ return null;
+ }
+ const ptr = self.params.target.llvm_value.?;
+ const ptr_type = self.params.target.getKnownType().cast(Type.Pointer).?;
+
+ return try codegen.getHandleValue(ofile, ptr, ptr_type);
+
+ //uint32_t unaligned_bit_count = ptr_type->data.pointer.unaligned_bit_count;
+ //if (unaligned_bit_count == 0)
+ // return get_handle_value(g, ptr, child_type, ptr_type);
+
+ //bool big_endian = g->is_big_endian;
+
+ //assert(!handle_is_ptr(child_type));
+ //LLVMValueRef containing_int = gen_load(g, ptr, ptr_type, "");
+
+ //uint32_t bit_offset = ptr_type->data.pointer.bit_offset;
+ //uint32_t host_bit_count = LLVMGetIntTypeWidth(LLVMTypeOf(containing_int));
+ //uint32_t shift_amt = big_endian ? host_bit_count - bit_offset - unaligned_bit_count : bit_offset;
+
+ //LLVMValueRef shift_amt_val = LLVMConstInt(LLVMTypeOf(containing_int), shift_amt, false);
+ //LLVMValueRef shifted_value = LLVMBuildLShr(g->builder, containing_int, shift_amt_val, "");
+
+ //return LLVMBuildTrunc(g->builder, shifted_value, child_type->type_ref, "");
+ }
+ };
+
pub const PtrType = struct {
base: Inst,
params: Params,
@@ -1160,6 +1295,7 @@ pub const Builder = struct {
Scope.Id.Block,
Scope.Id.Defer,
Scope.Id.DeferExpr,
+ Scope.Id.Var,
=> scope = scope.parent.?,
}
}
@@ -1261,8 +1397,8 @@ pub const Builder = struct {
var child_scope = outer_block_scope;
if (parent_scope.findFnDef()) |fndef_scope| {
- if (fndef_scope.fn_val.child_scope == parent_scope) {
- fndef_scope.fn_val.block_scope = block_scope;
+ if (fndef_scope.fn_val.?.block_scope == null) {
+ fndef_scope.fn_val.?.block_scope = block_scope;
}
}
@@ -1492,20 +1628,23 @@ pub const Builder = struct {
error.OutOfMemory => return error.OutOfMemory,
}
- //VariableTableEntry *var = find_variable(irb->codegen, scope, variable_name);
- //if (var) {
- // IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var);
- // if (lval == LValPtr)
- // return var_ptr;
- // else
- // return ir_build_load_ptr(irb, scope, node, var_ptr);
- //}
-
- if (await (async irb.findDecl(scope, name) catch unreachable)) |decl| {
- return irb.build(Inst.DeclRef, scope, src_span, Inst.DeclRef.Params{
- .decl = decl,
- .lval = lval,
- });
+ switch (await (async irb.findIdent(scope, name) catch unreachable)) {
+ Ident.Decl => |decl| {
+ return irb.build(Inst.DeclRef, scope, src_span, Inst.DeclRef.Params{
+ .decl = decl,
+ .lval = lval,
+ });
+ },
+ Ident.VarScope => |var_scope| {
+ const var_ptr = try irb.build(Inst.VarPtr, scope, src_span, Inst.VarPtr.Params{ .var_scope = var_scope });
+ switch (lval) {
+ LVal.Ptr => return var_ptr,
+ LVal.None => {
+ return irb.build(Inst.LoadPtr, scope, src_span, Inst.LoadPtr.Params{ .target = var_ptr });
+ },
+ }
+ },
+ Ident.NotFound => {},
}
//if (node->owner->any_imports_failed) {
@@ -1546,6 +1685,7 @@ pub const Builder = struct {
Scope.Id.Block,
Scope.Id.Decls,
Scope.Id.Root,
+ Scope.Id.Var,
=> scope = scope.parent orelse break,
Scope.Id.DeferExpr => unreachable,
@@ -1596,6 +1736,7 @@ pub const Builder = struct {
Scope.Id.CompTime,
Scope.Id.Block,
+ Scope.Id.Var,
=> scope = scope.parent orelse return is_noreturn,
Scope.Id.DeferExpr => unreachable,
@@ -1674,8 +1815,10 @@ pub const Builder = struct {
Type.Pointer.Size,
LVal,
*Decl,
+ *Scope.Var,
=> {},
- // it's ok to add more types here, just make sure any instructions are ref'd appropriately
+ // it's ok to add more types here, just make sure that
+ // any instructions and basic blocks are ref'd appropriately
else => @compileError("unrecognized type in Params: " ++ @typeName(FieldType)),
}
}
@@ -1773,18 +1916,30 @@ pub const Builder = struct {
//// the above blocks are rendered by ir_gen after the rest of codegen
}
- async fn findDecl(irb: *Builder, scope: *Scope, name: []const u8) ?*Decl {
+ const Ident = union(enum) {
+ NotFound,
+ Decl: *Decl,
+ VarScope: *Scope.Var,
+ };
+
+ async fn findIdent(irb: *Builder, scope: *Scope, name: []const u8) Ident {
var s = scope;
while (true) {
switch (s.id) {
+ Scope.Id.Root => return Ident.NotFound,
Scope.Id.Decls => {
const decls = @fieldParentPtr(Scope.Decls, "base", s);
const table = await (async decls.getTableReadOnly() catch unreachable);
if (table.get(name)) |entry| {
- return entry.value;
+ return Ident{ .Decl = entry.value };
+ }
+ },
+ Scope.Id.Var => {
+ const var_scope = @fieldParentPtr(Scope.Var, "base", s);
+ if (mem.eql(u8, var_scope.name, name)) {
+ return Ident{ .VarScope = var_scope };
}
},
- Scope.Id.Root => return null,
else => {},
}
s = s.parent.?;
diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig
index 8bb45ac616..778d3fae07 100644
--- a/src-self-hosted/llvm.zig
+++ b/src-self-hosted/llvm.zig
@@ -30,6 +30,7 @@ pub const AddGlobal = c.LLVMAddGlobal;
pub const AddModuleCodeViewFlag = c.ZigLLVMAddModuleCodeViewFlag;
pub const AddModuleDebugInfoFlag = c.ZigLLVMAddModuleDebugInfoFlag;
pub const ArrayType = c.LLVMArrayType;
+pub const BuildLoad = c.LLVMBuildLoad;
pub const ClearCurrentDebugLocation = c.ZigLLVMClearCurrentDebugLocation;
pub const ConstAllOnes = c.LLVMConstAllOnes;
pub const ConstArray = c.LLVMConstArray;
@@ -95,13 +96,25 @@ pub const SetInitializer = c.LLVMSetInitializer;
pub const SetLinkage = c.LLVMSetLinkage;
pub const SetTarget = c.LLVMSetTarget;
pub const SetUnnamedAddr = c.LLVMSetUnnamedAddr;
+pub const SetVolatile = c.LLVMSetVolatile;
pub const StructTypeInContext = c.LLVMStructTypeInContext;
pub const TokenTypeInContext = c.LLVMTokenTypeInContext;
-pub const TypeOf = c.LLVMTypeOf;
pub const VoidTypeInContext = c.LLVMVoidTypeInContext;
pub const X86FP80TypeInContext = c.LLVMX86FP80TypeInContext;
pub const X86MMXTypeInContext = c.LLVMX86MMXTypeInContext;
+pub const GetElementType = LLVMGetElementType;
+extern fn LLVMGetElementType(Ty: TypeRef) TypeRef;
+
+pub const TypeOf = LLVMTypeOf;
+extern fn LLVMTypeOf(Val: ValueRef) TypeRef;
+
+pub const BuildStore = LLVMBuildStore;
+extern fn LLVMBuildStore(arg0: BuilderRef, Val: ValueRef, Ptr: ValueRef) ?ValueRef;
+
+pub const BuildAlloca = LLVMBuildAlloca;
+extern fn LLVMBuildAlloca(arg0: BuilderRef, Ty: TypeRef, Name: ?[*]const u8) ?ValueRef;
+
pub const ConstInBoundsGEP = LLVMConstInBoundsGEP;
pub extern fn LLVMConstInBoundsGEP(ConstantVal: ValueRef, ConstantIndices: [*]ValueRef, NumIndices: c_uint) ?ValueRef;
diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig
index 7a41083f44..a38e765c6e 100644
--- a/src-self-hosted/scope.zig
+++ b/src-self-hosted/scope.zig
@@ -6,23 +6,26 @@ const Compilation = @import("compilation.zig").Compilation;
const mem = std.mem;
const ast = std.zig.ast;
const Value = @import("value.zig").Value;
+const Type = @import("type.zig").Type;
const ir = @import("ir.zig");
const Span = @import("errmsg.zig").Span;
const assert = std.debug.assert;
const event = std.event;
+const llvm = @import("llvm.zig");
pub const Scope = struct {
id: Id,
parent: ?*Scope,
- ref_count: usize,
+ ref_count: std.atomic.Int(usize),
+ /// Thread-safe
pub fn ref(base: *Scope) void {
- base.ref_count += 1;
+ _ = base.ref_count.incr();
}
+ /// Thread-safe
pub fn deref(base: *Scope, comp: *Compilation) void {
- base.ref_count -= 1;
- if (base.ref_count == 0) {
+ if (base.ref_count.decr() == 1) {
if (base.parent) |parent| parent.deref(comp);
switch (base.id) {
Id.Root => @fieldParentPtr(Root, "base", base).destroy(comp),
@@ -32,6 +35,7 @@ pub const Scope = struct {
Id.CompTime => @fieldParentPtr(CompTime, "base", base).destroy(comp),
Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp),
Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp),
+ Id.Var => @fieldParentPtr(Var, "base", base).destroy(comp),
}
}
}
@@ -49,15 +53,15 @@ pub const Scope = struct {
var scope = base;
while (true) {
switch (scope.id) {
- Id.FnDef => return @fieldParentPtr(FnDef, "base", base),
- Id.Decls => return null,
+ Id.FnDef => return @fieldParentPtr(FnDef, "base", scope),
+ Id.Root, Id.Decls => return null,
Id.Block,
Id.Defer,
Id.DeferExpr,
Id.CompTime,
- Id.Root,
- => scope = scope.parent orelse return null,
+ Id.Var,
+ => scope = scope.parent.?,
}
}
}
@@ -66,7 +70,7 @@ pub const Scope = struct {
var scope = base;
while (true) {
switch (scope.id) {
- Id.DeferExpr => return @fieldParentPtr(DeferExpr, "base", base),
+ Id.DeferExpr => return @fieldParentPtr(DeferExpr, "base", scope),
Id.FnDef,
Id.Decls,
@@ -76,11 +80,21 @@ pub const Scope = struct {
Id.Defer,
Id.CompTime,
Id.Root,
+ Id.Var,
=> scope = scope.parent orelse return null,
}
}
}
+ fn init(base: *Scope, id: Id, parent: *Scope) void {
+ base.* = Scope{
+ .id = id,
+ .parent = parent,
+ .ref_count = std.atomic.Int(usize).init(1),
+ };
+ parent.ref();
+ }
+
pub const Id = enum {
Root,
Decls,
@@ -89,6 +103,7 @@ pub const Scope = struct {
CompTime,
Defer,
DeferExpr,
+ Var,
};
pub const Root = struct {
@@ -100,16 +115,16 @@ pub const Scope = struct {
/// Takes ownership of realpath
/// Takes ownership of tree, will deinit and destroy when done.
pub fn create(comp: *Compilation, tree: *ast.Tree, realpath: []u8) !*Root {
- const self = try comp.gpa().create(Root{
+ const self = try comp.gpa().createOne(Root);
+ self.* = Root{
.base = Scope{
.id = Id.Root,
.parent = null,
- .ref_count = 1,
+ .ref_count = std.atomic.Int(usize).init(1),
},
.tree = tree,
.realpath = realpath,
- });
- errdefer comp.gpa().destroy(self);
+ };
return self;
}
@@ -137,16 +152,13 @@ pub const Scope = struct {
/// Creates a Decls scope with 1 reference
pub fn create(comp: *Compilation, parent: *Scope) !*Decls {
- const self = try comp.gpa().create(Decls{
- .base = Scope{
- .id = Id.Decls,
- .parent = parent,
- .ref_count = 1,
- },
+ const self = try comp.gpa().createOne(Decls);
+ self.* = Decls{
+ .base = undefined,
.table = event.Locked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())),
.name_future = event.Future(void).init(comp.loop),
- });
- parent.ref();
+ };
+ self.base.init(Id.Decls, parent);
return self;
}
@@ -199,21 +211,16 @@ pub const Scope = struct {
/// Creates a Block scope with 1 reference
pub fn create(comp: *Compilation, parent: *Scope) !*Block {
- const self = try comp.gpa().create(Block{
- .base = Scope{
- .id = Id.Block,
- .parent = parent,
- .ref_count = 1,
- },
+ const self = try comp.gpa().createOne(Block);
+ self.* = Block{
+ .base = undefined,
.incoming_values = undefined,
.incoming_blocks = undefined,
.end_block = undefined,
.is_comptime = undefined,
.safety = Safety.Auto,
- });
- errdefer comp.gpa().destroy(self);
-
- parent.ref();
+ };
+ self.base.init(Id.Block, parent);
return self;
}
@@ -226,22 +233,17 @@ pub const Scope = struct {
base: Scope,
/// This reference is not counted so that the scope can get destroyed with the function
- fn_val: *Value.Fn,
+ fn_val: ?*Value.Fn,
/// Creates a FnDef scope with 1 reference
/// Must set the fn_val later
pub fn create(comp: *Compilation, parent: *Scope) !*FnDef {
- const self = try comp.gpa().create(FnDef{
- .base = Scope{
- .id = Id.FnDef,
- .parent = parent,
- .ref_count = 1,
- },
- .fn_val = undefined,
- });
-
- parent.ref();
-
+ const self = try comp.gpa().createOne(FnDef);
+ self.* = FnDef{
+ .base = undefined,
+ .fn_val = null,
+ };
+ self.base.init(Id.FnDef, parent);
return self;
}
@@ -255,15 +257,9 @@ pub const Scope = struct {
/// Creates a CompTime scope with 1 reference
pub fn create(comp: *Compilation, parent: *Scope) !*CompTime {
- const self = try comp.gpa().create(CompTime{
- .base = Scope{
- .id = Id.CompTime,
- .parent = parent,
- .ref_count = 1,
- },
- });
-
- parent.ref();
+ const self = try comp.gpa().createOne(CompTime);
+ self.* = CompTime{ .base = undefined };
+ self.base.init(Id.CompTime, parent);
return self;
}
@@ -289,20 +285,14 @@ pub const Scope = struct {
kind: Kind,
defer_expr_scope: *DeferExpr,
) !*Defer {
- const self = try comp.gpa().create(Defer{
- .base = Scope{
- .id = Id.Defer,
- .parent = parent,
- .ref_count = 1,
- },
+ const self = try comp.gpa().createOne(Defer);
+ self.* = Defer{
+ .base = undefined,
.defer_expr_scope = defer_expr_scope,
.kind = kind,
- });
- errdefer comp.gpa().destroy(self);
-
+ };
+ self.base.init(Id.Defer, parent);
defer_expr_scope.base.ref();
-
- parent.ref();
return self;
}
@@ -319,18 +309,13 @@ pub const Scope = struct {
/// Creates a DeferExpr scope with 1 reference
pub fn create(comp: *Compilation, parent: *Scope, expr_node: *ast.Node) !*DeferExpr {
- const self = try comp.gpa().create(DeferExpr{
- .base = Scope{
- .id = Id.DeferExpr,
- .parent = parent,
- .ref_count = 1,
- },
+ const self = try comp.gpa().createOne(DeferExpr);
+ self.* = DeferExpr{
+ .base = undefined,
.expr_node = expr_node,
.reported_err = false,
- });
- errdefer comp.gpa().destroy(self);
-
- parent.ref();
+ };
+ self.base.init(Id.DeferExpr, parent);
return self;
}
@@ -338,4 +323,74 @@ pub const Scope = struct {
comp.gpa().destroy(self);
}
};
+
+ pub const Var = struct {
+ base: Scope,
+ name: []const u8,
+ src_node: *ast.Node,
+ data: Data,
+
+ pub const Data = union(enum) {
+ Param: Param,
+ Const: *Value,
+ };
+
+ pub const Param = struct {
+ index: usize,
+ typ: *Type,
+ llvm_value: llvm.ValueRef,
+ };
+
+ pub fn createParam(
+ comp: *Compilation,
+ parent: *Scope,
+ name: []const u8,
+ src_node: *ast.Node,
+ param_index: usize,
+ param_type: *Type,
+ ) !*Var {
+ const self = try create(comp, parent, name, src_node);
+ self.data = Data{
+ .Param = Param{
+ .index = param_index,
+ .typ = param_type,
+ .llvm_value = undefined,
+ },
+ };
+ return self;
+ }
+
+ pub fn createConst(
+ comp: *Compilation,
+ parent: *Scope,
+ name: []const u8,
+ src_node: *ast.Node,
+ value: *Value,
+ ) !*Var {
+ const self = try create(comp, parent, name, src_node);
+ self.data = Data{ .Const = value };
+ value.ref();
+ return self;
+ }
+
+ fn create(comp: *Compilation, parent: *Scope, name: []const u8, src_node: *ast.Node) !*Var {
+ const self = try comp.gpa().createOne(Var);
+ self.* = Var{
+ .base = undefined,
+ .name = name,
+ .src_node = src_node,
+ .data = undefined,
+ };
+ self.base.init(Id.Var, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *Var, comp: *Compilation) void {
+ switch (self.data) {
+ Data.Param => {},
+ Data.Const => |value| value.deref(comp),
+ }
+ comp.gpa().destroy(self);
+ }
+ };
};
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index 3b57260447..6783130fc7 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -141,9 +141,13 @@ pub const Type = struct {
Id.Promise,
=> return true,
+ Id.Pointer => {
+ const ptr_type = @fieldParentPtr(Pointer, "base", base);
+ return ptr_type.key.child_type.hasBits();
+ },
+
Id.ErrorSet => @panic("TODO"),
Id.Enum => @panic("TODO"),
- Id.Pointer => @panic("TODO"),
Id.Struct => @panic("TODO"),
Id.Array => @panic("TODO"),
Id.Optional => @panic("TODO"),
@@ -222,29 +226,65 @@ pub const Type = struct {
pub const Fn = struct {
base: Type,
key: Key,
+ non_key: NonKey,
garbage_node: std.atomic.Stack(*Fn).Node,
+ pub const Kind = enum {
+ Normal,
+ Generic,
+ };
+
+ pub const NonKey = union {
+ Normal: Normal,
+ Generic: void,
+
+ pub const Normal = struct {
+ variable_list: std.ArrayList(*Scope.Var),
+ };
+ };
+
pub const Key = struct {
data: Data,
alignment: ?u32,
- pub const Data = union(enum) {
+ pub const Data = union(Kind) {
Generic: Generic,
Normal: Normal,
};
+ pub const Normal = struct {
+ params: []Param,
+ return_type: *Type,
+ is_var_args: bool,
+ cc: CallingConvention,
+ };
+
+ pub const Generic = struct {
+ param_count: usize,
+ cc: CC,
+
+ pub const CC = union(CallingConvention) {
+ Auto,
+ C,
+ Cold,
+ Naked,
+ Stdcall,
+ Async: *Type, // allocator type
+ };
+ };
+
pub fn hash(self: *const Key) u32 {
var result: u32 = 0;
result +%= hashAny(self.alignment, 0);
switch (self.data) {
- Data.Generic => |generic| {
+ Kind.Generic => |generic| {
result +%= hashAny(generic.param_count, 1);
switch (generic.cc) {
CallingConvention.Async => |allocator_type| result +%= hashAny(allocator_type, 2),
else => result +%= hashAny(CallingConvention(generic.cc), 3),
}
},
- Data.Normal => |normal| {
+ Kind.Normal => |normal| {
result +%= hashAny(normal.return_type, 4);
result +%= hashAny(normal.is_var_args, 5);
result +%= hashAny(normal.cc, 6);
@@ -264,7 +304,7 @@ pub const Type = struct {
}
if (@TagType(Data)(self.data) != @TagType(Data)(other.data)) return false;
switch (self.data) {
- Data.Generic => |*self_generic| {
+ Kind.Generic => |*self_generic| {
const other_generic = &other.data.Generic;
if (self_generic.param_count != other_generic.param_count) return false;
if (CallingConvention(self_generic.cc) != CallingConvention(other_generic.cc)) return false;
@@ -276,7 +316,7 @@ pub const Type = struct {
else => {},
}
},
- Data.Normal => |*self_normal| {
+ Kind.Normal => |*self_normal| {
const other_normal = &other.data.Normal;
if (self_normal.cc != other_normal.cc) return false;
if (self_normal.is_var_args != other_normal.is_var_args) return false;
@@ -293,13 +333,13 @@ pub const Type = struct {
pub fn deref(key: Key, comp: *Compilation) void {
switch (key.data) {
- Key.Data.Generic => |generic| {
+ Kind.Generic => |generic| {
switch (generic.cc) {
CallingConvention.Async => |allocator_type| allocator_type.base.deref(comp),
else => {},
}
},
- Key.Data.Normal => |normal| {
+ Kind.Normal => |normal| {
normal.return_type.base.deref(comp);
for (normal.params) |param| {
param.typ.base.deref(comp);
@@ -310,13 +350,13 @@ pub const Type = struct {
pub fn ref(key: Key) void {
switch (key.data) {
- Key.Data.Generic => |generic| {
+ Kind.Generic => |generic| {
switch (generic.cc) {
CallingConvention.Async => |allocator_type| allocator_type.base.ref(),
else => {},
}
},
- Key.Data.Normal => |normal| {
+ Kind.Normal => |normal| {
normal.return_type.base.ref();
for (normal.params) |param| {
param.typ.base.ref();
@@ -326,27 +366,6 @@ pub const Type = struct {
}
};
- pub const Normal = struct {
- params: []Param,
- return_type: *Type,
- is_var_args: bool,
- cc: CallingConvention,
- };
-
- pub const Generic = struct {
- param_count: usize,
- cc: CC,
-
- pub const CC = union(CallingConvention) {
- Auto,
- C,
- Cold,
- Naked,
- Stdcall,
- Async: *Type, // allocator type
- };
- };
-
pub const CallingConvention = enum {
Auto,
C,
@@ -374,8 +393,8 @@ pub const Type = struct {
pub fn paramCount(self: *Fn) usize {
return switch (self.key.data) {
- Key.Data.Generic => |generic| generic.param_count,
- Key.Data.Normal => |normal| normal.params.len,
+ Kind.Generic => |generic| generic.param_count,
+ Kind.Normal => |normal| normal.params.len,
};
}
@@ -394,11 +413,13 @@ pub const Type = struct {
key.ref();
errdefer key.deref(comp);
- const self = try comp.gpa().create(Fn{
+ const self = try comp.gpa().createOne(Fn);
+ self.* = Fn{
.base = undefined,
.key = key,
+ .non_key = undefined,
.garbage_node = undefined,
- });
+ };
errdefer comp.gpa().destroy(self);
var name_buf = try std.Buffer.initSize(comp.gpa(), 0);
@@ -407,7 +428,8 @@ pub const Type = struct {
const name_stream = &std.io.BufferOutStream.init(&name_buf).stream;
switch (key.data) {
- Key.Data.Generic => |generic| {
+ Kind.Generic => |generic| {
+ self.non_key = NonKey{ .Generic = {} };
switch (generic.cc) {
CallingConvention.Async => |async_allocator_type| {
try name_stream.print("async<{}> ", async_allocator_type.name);
@@ -429,7 +451,10 @@ pub const Type = struct {
}
try name_stream.write(" var");
},
- Key.Data.Normal => |normal| {
+ Kind.Normal => |normal| {
+ self.non_key = NonKey{
+ .Normal = NonKey.Normal{ .variable_list = std.ArrayList(*Scope.Var).init(comp.gpa()) },
+ };
const cc_str = ccFnTypeStr(normal.cc);
try name_stream.print("{}fn(", cc_str);
for (normal.params) |param, i| {
@@ -462,6 +487,12 @@ pub const Type = struct {
pub fn destroy(self: *Fn, comp: *Compilation) void {
self.key.deref(comp);
+ switch (self.key.data) {
+ Kind.Generic => {},
+ Kind.Normal => {
+ self.non_key.Normal.variable_list.deinit();
+ },
+ }
comp.gpa().destroy(self);
}
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
index 2005e3c119..e6dca4eff7 100644
--- a/src-self-hosted/value.zig
+++ b/src-self-hosted/value.zig
@@ -60,7 +60,7 @@ pub const Value = struct {
pub fn getLlvmConst(base: *Value, ofile: *ObjectFile) (error{OutOfMemory}!?llvm.ValueRef) {
switch (base.id) {
Id.Type => unreachable,
- Id.Fn => @panic("TODO"),
+ Id.Fn => return @fieldParentPtr(Fn, "base", base).getLlvmConst(ofile),
Id.FnProto => return @fieldParentPtr(FnProto, "base", base).getLlvmConst(ofile),
Id.Void => return null,
Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmConst(ofile),
@@ -180,7 +180,7 @@ pub const Value = struct {
child_scope: *Scope,
/// parent is child_scope
- block_scope: *Scope.Block,
+ block_scope: ?*Scope.Block,
/// Path to the object file that contains this function
containing_object: Buffer,
@@ -205,7 +205,7 @@ pub const Value = struct {
},
.fndef_scope = fndef_scope,
.child_scope = &fndef_scope.base,
- .block_scope = undefined,
+ .block_scope = null,
.symbol_name = symbol_name,
.containing_object = Buffer.initNull(comp.gpa()),
.link_set_node = link_set_node,
@@ -231,6 +231,22 @@ pub const Value = struct {
self.symbol_name.deinit();
comp.gpa().destroy(self);
}
+
+ /// We know that the function definition will end up in an .o file somewhere.
+ /// Here, all we have to do is generate a global prototype.
+ /// TODO cache the prototype per ObjectFile
+ pub fn getLlvmConst(self: *Fn, ofile: *ObjectFile) !?llvm.ValueRef {
+ const llvm_fn_type = try self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ const llvm_fn = llvm.AddFunction(
+ ofile.module,
+ self.symbol_name.ptr(),
+ llvm_fn_type,
+ ) orelse return error.OutOfMemory;
+
+ // TODO port more logic from codegen.cpp:fn_llvm_value
+
+ return llvm_fn;
+ }
};
pub const Void = struct {
From 02713e8d8aa9641616bd85e77dda784009c96113 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 24 Jul 2018 21:28:54 -0400
Subject: [PATCH 06/37] fix race conditions in self-hosted compiler; add test
* fix race condition in std.event.Channel deinit
* add support to zig build for --no-rosegment
* add passing self-hosted compare-output test for calling a function
* put a global lock on LLD linking because it's not thread safe
---
build.zig | 4 ++++
src-self-hosted/compilation.zig | 3 +++
src-self-hosted/link.zig | 33 +++++++++++++++++++++++----------
std/build.zig | 21 +++++++++++++++++++++
std/event/channel.zig | 25 +++----------------------
test/stage2/compare_output.zig | 13 +++++++++++++
6 files changed, 67 insertions(+), 32 deletions(-)
diff --git a/build.zig b/build.zig
index e7a5c5cba1..dd939365a2 100644
--- a/build.zig
+++ b/build.zig
@@ -45,6 +45,7 @@ pub fn build(b: *Builder) !void {
.c_header_files = nextValue(&index, build_info),
.dia_guids_lib = nextValue(&index, build_info),
.llvm = undefined,
+ .no_rosegment = b.option(bool, "no-rosegment", "Workaround to enable valgrind builds") orelse false,
};
ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
@@ -228,6 +229,8 @@ fn configureStage2(b: *Builder, exe: var, ctx: Context) !void {
// TODO turn this into -Dextra-lib-path=/lib option
exe.addLibPath("/lib");
+ exe.setNoRoSegment(ctx.no_rosegment);
+
exe.addIncludeDir("src");
exe.addIncludeDir(ctx.cmake_binary_dir);
addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp");
@@ -286,4 +289,5 @@ const Context = struct {
c_header_files: []const u8,
dia_guids_lib: []const u8,
llvm: LibraryDep,
+ no_rosegment: bool,
};
diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig
index 1564abfbd3..5ff8b1a858 100644
--- a/src-self-hosted/compilation.zig
+++ b/src-self-hosted/compilation.zig
@@ -35,6 +35,7 @@ const CInt = @import("c_int.zig").CInt;
pub const EventLoopLocal = struct {
loop: *event.Loop,
llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
+ lld_lock: event.Lock,
/// TODO pool these so that it doesn't have to lock
prng: event.Locked(std.rand.DefaultPrng),
@@ -55,6 +56,7 @@ pub const EventLoopLocal = struct {
return EventLoopLocal{
.loop = loop,
+ .lld_lock = event.Lock.init(loop),
.llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
.prng = event.Locked(std.rand.DefaultPrng).init(loop, std.rand.DefaultPrng.init(seed)),
.native_libc = event.Future(LibCInstallation).init(loop),
@@ -63,6 +65,7 @@ pub const EventLoopLocal = struct {
/// Must be called only after EventLoop.run completes.
fn deinit(self: *EventLoopLocal) void {
+ self.lld_lock.deinit();
while (self.llvm_handle_pool.pop()) |node| {
c.LLVMContextDispose(node.data);
self.loop.allocator.destroy(node);
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index b9eefa9d4f..3b79c5b891 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -80,15 +80,22 @@ pub async fn link(comp: *Compilation) !void {
const extern_ofmt = toExternObjectFormatType(comp.target.getObjectFormat());
const args_slice = ctx.args.toSlice();
- // Not evented I/O. LLD does its own multithreading internally.
- if (!ZigLLDLink(extern_ofmt, args_slice.ptr, args_slice.len, linkDiagCallback, @ptrCast(*c_void, &ctx))) {
- if (!ctx.link_msg.isNull()) {
- // TODO capture these messages and pass them through the system, reporting them through the
- // event system instead of printing them directly here.
- // perhaps try to parse and understand them.
- std.debug.warn("{}\n", ctx.link_msg.toSliceConst());
+
+ {
+ // LLD is not thread-safe, so we grab a global lock.
+ const held = await (async comp.event_loop_local.lld_lock.acquire() catch unreachable);
+ defer held.release();
+
+ // Not evented I/O. LLD does its own multithreading internally.
+ if (!ZigLLDLink(extern_ofmt, args_slice.ptr, args_slice.len, linkDiagCallback, @ptrCast(*c_void, &ctx))) {
+ if (!ctx.link_msg.isNull()) {
+ // TODO capture these messages and pass them through the system, reporting them through the
+ // event system instead of printing them directly here.
+ // perhaps try to parse and understand them.
+ std.debug.warn("{}\n", ctx.link_msg.toSliceConst());
+ }
+ return error.LinkFailed;
}
- return error.LinkFailed;
}
}
@@ -672,7 +679,13 @@ const DarwinPlatform = struct {
};
var had_extra: bool = undefined;
- try darwinGetReleaseVersion(ver_str, &result.major, &result.minor, &result.micro, &had_extra,);
+ try darwinGetReleaseVersion(
+ ver_str,
+ &result.major,
+ &result.minor,
+ &result.micro,
+ &had_extra,
+ );
if (had_extra or result.major != 10 or result.minor >= 100 or result.micro >= 100) {
return error.InvalidDarwinVersionString;
}
@@ -713,7 +726,7 @@ fn darwinGetReleaseVersion(str: []const u8, major: *u32, minor: *u32, micro: *u3
return error.InvalidDarwinVersionString;
var start_pos: usize = 0;
- for ([]*u32{major, minor, micro}) |v| {
+ for ([]*u32{ major, minor, micro }) |v| {
const dot_pos = mem.indexOfScalarPos(u8, str, start_pos, '.');
const end_pos = dot_pos orelse str.len;
v.* = std.fmt.parseUnsigned(u32, str[start_pos..end_pos], 10) catch return error.InvalidDarwinVersionString;
diff --git a/std/build.zig b/std/build.zig
index cea760e8a2..68cf13c1eb 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -807,6 +807,7 @@ pub const LibExeObjStep = struct {
disable_libc: bool,
frameworks: BufSet,
verbose_link: bool,
+ no_rosegment: bool,
// zig only stuff
root_src: ?[]const u8,
@@ -874,6 +875,7 @@ pub const LibExeObjStep = struct {
fn initExtraArgs(builder: *Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: *const Version) LibExeObjStep {
var self = LibExeObjStep{
+ .no_rosegment = false,
.strip = false,
.builder = builder,
.verbose_link = false,
@@ -914,6 +916,7 @@ pub const LibExeObjStep = struct {
fn initC(builder: *Builder, name: []const u8, kind: Kind, version: *const Version, static: bool) LibExeObjStep {
var self = LibExeObjStep{
+ .no_rosegment = false,
.builder = builder,
.name = name,
.kind = kind,
@@ -953,6 +956,10 @@ pub const LibExeObjStep = struct {
return self;
}
+ pub fn setNoRoSegment(self: *LibExeObjStep, value: bool) void {
+ self.no_rosegment = value;
+ }
+
fn computeOutFileNames(self: *LibExeObjStep) void {
switch (self.kind) {
Kind.Obj => {
@@ -1306,6 +1313,10 @@ pub const LibExeObjStep = struct {
}
}
+ if (self.no_rosegment) {
+ try zig_args.append("--no-rosegment");
+ }
+
try builder.spawnChild(zig_args.toSliceConst());
if (self.kind == Kind.Lib and !self.static and self.target.wantSharedLibSymLinks()) {
@@ -1598,6 +1609,7 @@ pub const TestStep = struct {
include_dirs: ArrayList([]const u8),
lib_paths: ArrayList([]const u8),
object_files: ArrayList([]const u8),
+ no_rosegment: bool,
pub fn init(builder: *Builder, root_src: []const u8) TestStep {
const step_name = builder.fmt("test {}", root_src);
@@ -1615,9 +1627,14 @@ pub const TestStep = struct {
.include_dirs = ArrayList([]const u8).init(builder.allocator),
.lib_paths = ArrayList([]const u8).init(builder.allocator),
.object_files = ArrayList([]const u8).init(builder.allocator),
+ .no_rosegment = false,
};
}
+ pub fn setNoRoSegment(self: *TestStep, value: bool) void {
+ self.no_rosegment = value;
+ }
+
pub fn addLibPath(self: *TestStep, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
@@ -1761,6 +1778,10 @@ pub const TestStep = struct {
try zig_args.append(lib_path);
}
+ if (self.no_rosegment) {
+ try zig_args.append("--no-rosegment");
+ }
+
try builder.spawnChild(zig_args.toSliceConst());
}
};
diff --git a/std/event/channel.zig b/std/event/channel.zig
index d4d713bdee..03a036042b 100644
--- a/std/event/channel.zig
+++ b/std/event/channel.zig
@@ -71,11 +71,6 @@ pub fn Channel(comptime T: type) type {
/// puts a data item in the channel. The promise completes when the value has been added to the
/// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
pub async fn put(self: *SelfChannel, data: T) void {
- // TODO should be able to group memory allocation failure before first suspend point
- // so that the async invocation catches it
- var dispatch_tick_node_ptr: *Loop.NextTickNode = undefined;
- _ = async self.dispatch(&dispatch_tick_node_ptr) catch unreachable;
-
suspend |handle| {
var my_tick_node = Loop.NextTickNode{
.next = undefined,
@@ -91,18 +86,13 @@ pub fn Channel(comptime T: type) type {
self.putters.put(&queue_node);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
- self.loop.onNextTick(dispatch_tick_node_ptr);
+ self.dispatch();
}
}
/// await this function to get an item from the channel. If the buffer is empty, the promise will
/// complete when the next item is put in the channel.
pub async fn get(self: *SelfChannel) T {
- // TODO should be able to group memory allocation failure before first suspend point
- // so that the async invocation catches it
- var dispatch_tick_node_ptr: *Loop.NextTickNode = undefined;
- _ = async self.dispatch(&dispatch_tick_node_ptr) catch unreachable;
-
// TODO integrate this function with named return values
// so we can get rid of this extra result copy
var result: T = undefined;
@@ -121,21 +111,12 @@ pub fn Channel(comptime T: type) type {
self.getters.put(&queue_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
- self.loop.onNextTick(dispatch_tick_node_ptr);
+ self.dispatch();
}
return result;
}
- async fn dispatch(self: *SelfChannel, tick_node_ptr: **Loop.NextTickNode) void {
- // resumed by onNextTick
- suspend |handle| {
- var tick_node = Loop.NextTickNode{
- .data = handle,
- .next = undefined,
- };
- tick_node_ptr.* = &tick_node;
- }
-
+ fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag
_ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
index 35adcbb96b..fdc3d49145 100644
--- a/test/stage2/compare_output.zig
+++ b/test/stage2/compare_output.zig
@@ -2,6 +2,7 @@ const std = @import("std");
const TestContext = @import("../../src-self-hosted/test.zig").TestContext;
pub fn addCases(ctx: *TestContext) !void {
+ // hello world
try ctx.testCompareOutputLibC(
\\extern fn puts([*]const u8) void;
\\export fn main() c_int {
@@ -9,4 +10,16 @@ pub fn addCases(ctx: *TestContext) !void {
\\ return 0;
\\}
, "Hello, world!" ++ std.cstr.line_sep);
+
+ // function calling another function
+ try ctx.testCompareOutputLibC(
+ \\extern fn puts(s: [*]const u8) void;
+ \\export fn main() c_int {
+ \\ return foo(c"OK");
+ \\}
+ \\fn foo(s: [*]const u8) c_int {
+ \\ puts(s);
+ \\ return 0;
+ \\}
+ , "OK" ++ std.cstr.line_sep);
}
From 95f45cfc34cd5e77dad2318cab27194535e14d16 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 25 Jul 2018 02:36:29 -0400
Subject: [PATCH 07/37] patch LLD to fix COFF crashing when linking twice in
same process
closes #1289
---
deps/lld/COFF/Driver.cpp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/deps/lld/COFF/Driver.cpp b/deps/lld/COFF/Driver.cpp
index 0f3d8fb0b4..34b968fe5e 100644
--- a/deps/lld/COFF/Driver.cpp
+++ b/deps/lld/COFF/Driver.cpp
@@ -72,6 +72,9 @@ bool link(ArrayRef Args, bool CanExitEarly, raw_ostream &Diag) {
exitLld(errorCount() ? 1 : 0);
freeArena();
+ ObjFile::Instances.clear();
+ ImportFile::Instances.clear();
+ BitcodeFile::Instances.clear();
return !errorCount();
}
From 2257660916a8c92d953a5a71da6c2d4f7cc031e6 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 25 Jul 2018 13:12:03 -0400
Subject: [PATCH 08/37] fix assertion failure when some compile errors happen
I don't actually know of a test case to trigger this
self-hosted won't have this problem because get_pointer_to_type
will return error.SemanticAnalysisFailed
---
src/ir.cpp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/src/ir.cpp b/src/ir.cpp
index fe5fb77085..fd2558c5eb 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -18991,6 +18991,9 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
} else if (type_entry->id == TypeTableEntryIdErrorUnion) {
TypeTableEntry *payload_type = type_entry->data.error_union.payload_type;
+ if (type_is_invalid(payload_type)) {
+ return ira->codegen->builtin_types.entry_invalid;
+ }
TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, payload_type,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
PtrLenSingle,
From 84195467ad974f9b7201e4e1bbd6dccbd5e7ab90 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 25 Jul 2018 17:08:55 -0400
Subject: [PATCH 09/37] add compile error for non-inline for loop on comptime
type
---
src/ir.cpp | 2 +-
test/compile_errors.zig | 14 ++++++++++++++
2 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index fd2558c5eb..424987823b 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -12142,7 +12142,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstruc
result_type = ira->codegen->builtin_types.entry_invalid;
} else if (type_requires_comptime(result_type)) {
var_class_requires_const = true;
- if (!var->src_is_const && !is_comptime_var) {
+ if (!var->gen_is_const && !is_comptime_var) {
ir_add_error_node(ira, source_node,
buf_sprintf("variable of type '%s' must be const or comptime",
buf_ptr(&result_type->name)));
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index b7bd39f29e..91693e091c 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,20 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "non-inline for loop on a type that requires comptime",
+ \\const Foo = struct {
+ \\ name: []const u8,
+ \\ T: type,
+ \\};
+ \\export fn entry() void {
+ \\ const xx: [2]Foo = undefined;
+ \\ for (xx) |f| {}
+ \\}
+ ,
+ ".tmp_source.zig:7:15: error: variable of type 'Foo' must be const or comptime",
+ );
+
cases.add(
"generic fn as parameter without comptime keyword",
\\fn f(_: fn (var) void) void {}
From fd575fe1f3b45806f2cf823a2abe3727d381d4ed Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 25 Jul 2018 18:15:55 -0400
Subject: [PATCH 10/37] add compile error for missing parameter name of generic
function
---
src/ir.cpp | 1 +
test/compile_errors.zig | 11 +++++++++++
2 files changed, 12 insertions(+)
diff --git a/src/ir.cpp b/src/ir.cpp
index 424987823b..e40c129953 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -12591,6 +12591,7 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod
}
Buf *param_name = param_decl_node->data.param_decl.name;
+ if (!param_name) return false;
if (!is_var_args) {
VariableTableEntry *var = add_variable(ira->codegen, param_decl_node,
*child_scope, param_name, true, arg_val, nullptr);
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 91693e091c..83bf715f78 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,17 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "missing parameter name of generic function",
+ \\fn dump(var) void {}
+ \\export fn entry() void {
+ \\ var a: u8 = 9;
+ \\ dump(a);
+ \\}
+ ,
+ ".tmp_source.zig:1:9: error: missing parameter name",
+ );
+
cases.add(
"non-inline for loop on a type that requires comptime",
\\const Foo = struct {
From 2cbad364c1d23b64ae064f8547590c133b4f070a Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 26 Jul 2018 18:29:07 -0400
Subject: [PATCH 11/37] add compile error for ignoring return value of while
loop bodies
closes #1049
---
src/analyze.cpp | 2 +-
src/ir.cpp | 12 +++++++++---
src/ir_print.cpp | 4 ++++
test/compile_errors.zig | 22 ++++++++++++++++++++++
4 files changed, 36 insertions(+), 4 deletions(-)
diff --git a/src/analyze.cpp b/src/analyze.cpp
index a4bfff78c3..aadee29fc8 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -4056,7 +4056,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ
}
if (g->verbose_ir) {
- fprintf(stderr, "{ // (analyzed)\n");
+ fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn_table_entry->symbol_name));
ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4);
fprintf(stderr, "}\n");
}
diff --git a/src/ir.cpp b/src/ir.cpp
index e40c129953..a6007852e0 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -5251,8 +5251,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
if (body_result == irb->codegen->invalid_instruction)
return body_result;
- if (!instr_is_unreachable(body_result))
+ if (!instr_is_unreachable(body_result)) {
+ ir_mark_gen(ir_build_check_statement_is_void(irb, payload_scope, node->data.while_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, payload_scope, node, continue_block, is_comptime));
+ }
if (continue_expr_node) {
ir_set_cursor_at_end_and_append_block(irb, continue_block);
@@ -5331,8 +5333,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
if (body_result == irb->codegen->invalid_instruction)
return body_result;
- if (!instr_is_unreachable(body_result))
+ if (!instr_is_unreachable(body_result)) {
+ ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.while_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime));
+ }
if (continue_expr_node) {
ir_set_cursor_at_end_and_append_block(irb, continue_block);
@@ -5392,8 +5396,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
if (body_result == irb->codegen->invalid_instruction)
return body_result;
- if (!instr_is_unreachable(body_result))
+ if (!instr_is_unreachable(body_result)) {
+ ir_mark_gen(ir_build_check_statement_is_void(irb, scope, node->data.while_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, scope, node, continue_block, is_comptime));
+ }
if (continue_expr_node) {
ir_set_cursor_at_end_and_append_block(irb, continue_block);
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 6182958d0a..127afa94a5 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -45,6 +45,10 @@ static void ir_print_var_instruction(IrPrint *irp, IrInstruction *instruction) {
}
static void ir_print_other_instruction(IrPrint *irp, IrInstruction *instruction) {
+ if (instruction == nullptr) {
+ fprintf(irp->f, "(null)");
+ return;
+ }
if (instruction->value.special != ConstValSpecialRuntime) {
ir_print_const_value(irp, &instruction->value);
} else {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 83bf715f78..2c4c9208eb 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,28 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "while loop body expression ignored",
+ \\fn returns() usize {
+ \\ return 2;
+ \\}
+ \\export fn f1() void {
+ \\ while (true) returns();
+ \\}
+ \\export fn f2() void {
+ \\ var x: ?i32 = null;
+ \\ while (x) |_| returns();
+ \\}
+ \\export fn f3() void {
+ \\ var x: error!i32 = error.Bad;
+ \\ while (x) |_| returns() else |_| unreachable;
+ \\}
+ ,
+ ".tmp_source.zig:5:25: error: expression value is ignored",
+ ".tmp_source.zig:9:26: error: expression value is ignored",
+ ".tmp_source.zig:13:26: error: expression value is ignored",
+ );
+
cases.add(
"missing parameter name of generic function",
\\fn dump(var) void {}
From b3f4182ca1756ccf84fe5bbc88594a91ead617b5 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 26 Jul 2018 22:26:00 -0400
Subject: [PATCH 12/37] coroutines have 3 more bits of atomic state
---
src/all_types.hpp | 2 +-
src/analyze.cpp | 13 +++++---
src/ir.cpp | 80 +++++++++++++++++++++++++++++++++--------------
3 files changed, 66 insertions(+), 29 deletions(-)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index bcd6a04cc3..70ea629c59 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -3245,7 +3245,7 @@ static const size_t stack_trace_ptr_count = 30;
#define RESULT_FIELD_NAME "result"
#define ASYNC_ALLOC_FIELD_NAME "allocFn"
#define ASYNC_FREE_FIELD_NAME "freeFn"
-#define AWAITER_HANDLE_FIELD_NAME "awaiter_handle"
+#define ATOMIC_STATE_FIELD_NAME "atomic_state"
// these point to data belonging to the awaiter
#define ERR_RET_TRACE_PTR_FIELD_NAME "err_ret_trace_ptr"
#define RESULT_PTR_FIELD_NAME "result_ptr"
diff --git a/src/analyze.cpp b/src/analyze.cpp
index aadee29fc8..74d59f966a 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -519,11 +519,11 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type)
return return_type->promise_frame_parent;
}
- TypeTableEntry *awaiter_handle_type = get_optional_type(g, g->builtin_types.entry_promise);
+ TypeTableEntry *atomic_state_type = g->builtin_types.entry_usize;
TypeTableEntry *result_ptr_type = get_pointer_to_type(g, return_type, false);
ZigList field_names = {};
- field_names.append(AWAITER_HANDLE_FIELD_NAME);
+ field_names.append(ATOMIC_STATE_FIELD_NAME);
field_names.append(RESULT_FIELD_NAME);
field_names.append(RESULT_PTR_FIELD_NAME);
if (g->have_err_ret_tracing) {
@@ -533,7 +533,7 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type)
}
ZigList field_types = {};
- field_types.append(awaiter_handle_type);
+ field_types.append(atomic_state_type);
field_types.append(return_type);
field_types.append(result_ptr_type);
if (g->have_err_ret_tracing) {
@@ -6228,7 +6228,12 @@ uint32_t get_abi_alignment(CodeGen *g, TypeTableEntry *type_entry) {
} else if (type_entry->id == TypeTableEntryIdOpaque) {
return 1;
} else {
- return LLVMABIAlignmentOfType(g->target_data_ref, type_entry->type_ref);
+ uint32_t llvm_alignment = LLVMABIAlignmentOfType(g->target_data_ref, type_entry->type_ref);
+ // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
+ if (type_entry->id == TypeTableEntryIdPromise && llvm_alignment < 8) {
+ return 8;
+ }
+ return llvm_alignment;
}
}
diff --git a/src/ir.cpp b/src/ir.cpp
index a6007852e0..5466e64e55 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -3097,19 +3097,47 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode
return return_inst;
}
+ IrBasicBlock *canceled_block = ir_create_basic_block(irb, scope, "Canceled");
+ IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+ IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "Suspended");
+ IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "NotSuspended");
+
ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
- IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node,
- get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
- // TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig
- IrInstruction *replacement_value = irb->exec->coro_handle;
- IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node,
- promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr,
- AtomicRmwOp_xchg, AtomicOrderSeqCst);
- ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, maybe_await_handle);
- IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *replacement_value = ir_build_const_usize(irb, scope, node, 0xa); // 0b1010
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final,
- is_comptime);
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_canceled_bool, canceled_block, not_canceled_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, canceled_block);
+ ir_mark_gen(ir_build_br(irb, scope, node, irb->exec->coro_final_cleanup_block, is_comptime));
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, inverted_ptr_mask, false);
+ IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, suspended_block);
+ ir_build_unreachable(irb, scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
+ IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+ IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
+ IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
+ // if we ever add null checking safety to the ptrtoint instruction, it needs to be disabled here
+ IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr);
+ ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, await_handle);
+ IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
+ return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final,
+ irb->exec->coro_early_final, is_comptime);
// the above blocks are rendered by ir_gen after the rest of codegen
}
@@ -6708,9 +6736,9 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast
ir_build_store_ptr(irb, parent_scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr);
}
- Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME);
- IrInstruction *awaiter_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr,
- awaiter_handle_field_name);
+ Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
+ IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr,
+ atomic_state_field_name);
IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
VariableTableEntry *result_var = ir_create_var(irb, node, parent_scope, nullptr,
@@ -6723,12 +6751,16 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast
IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var);
ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr);
IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle);
- IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node,
- get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
- IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, parent_scope, node,
- promise_type_val, awaiter_field_ptr, nullptr, irb->exec->coro_handle, nullptr,
- AtomicRmwOp_xchg, AtomicOrderSeqCst);
- IrInstruction *is_non_null = ir_build_test_nonnull(irb, parent_scope, node, maybe_await_handle);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, parent_scope, node, irb->exec->coro_handle);
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node,
+ usize_type_val, atomic_state_ptr, nullptr, coro_handle_addr, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+ IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0);
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+ IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
+ IrInstruction *is_non_null = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, parent_scope, "YesSuspend");
IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, parent_scope, "NoSuspend");
IrBasicBlock *merge_block = ir_create_basic_block(irb, parent_scope, "MergeSuspend");
@@ -7087,10 +7119,11 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, coro_scope, node, u8_ptr_type, maybe_coro_mem_ptr);
irb->exec->coro_handle = ir_build_coro_begin(irb, coro_scope, node, coro_id, coro_mem_ptr);
- Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME);
+ Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
irb->exec->coro_awaiter_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- awaiter_handle_field_name);
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_awaiter_field_ptr, null_value);
+ atomic_state_field_name);
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+ ir_build_store_ptr(irb, scope, node, irb->exec->coro_awaiter_field_ptr, zero);
Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name);
result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
@@ -7108,7 +7141,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
// coordinate with builtin.zig
Buf *index_name = buf_create_from_str("index");
IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
ir_build_store_ptr(irb, scope, node, index_ptr, zero);
Buf *instruction_addresses_name = buf_create_from_str("instruction_addresses");
From 7113f109a4111acadf0533ca84e529d229892c8c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 27 Jul 2018 15:50:26 -0400
Subject: [PATCH 13/37] update coroutine return codegen with new status bits
---
src/all_types.hpp | 2 +-
src/ir.cpp | 49 +++++++++++++++++++++++------------------------
2 files changed, 25 insertions(+), 26 deletions(-)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 70ea629c59..3ac7afe474 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -60,7 +60,7 @@ struct IrExecutable {
ZigList tld_list;
IrInstruction *coro_handle;
- IrInstruction *coro_awaiter_field_ptr; // this one is shared and in the promise
+ IrInstruction *atomic_state_field_ptr; // this one is shared and in the promise
IrInstruction *coro_result_ptr_field_ptr;
IrInstruction *coro_result_field_ptr;
IrInstruction *await_handle_var_ptr; // this one is where we put the one we extracted from the promise
diff --git a/src/ir.cpp b/src/ir.cpp
index 5466e64e55..8ebac847ac 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -3097,31 +3097,26 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode
return return_inst;
}
- IrBasicBlock *canceled_block = ir_create_basic_block(irb, scope, "Canceled");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "Suspended");
IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "NotSuspended");
+ IrBasicBlock *store_awaiter_block = ir_create_basic_block(irb, scope, "StoreAwaiter");
+ IrBasicBlock *check_canceled_block = ir_create_basic_block(irb, scope, "CheckCanceled");
+
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
+ IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
+ IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *replacement_value = ir_build_const_usize(irb, scope, node, 0xa); // 0b1010
IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr,
+ usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, ptr_mask, nullptr,
AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, canceled_block, not_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, canceled_block);
- ir_mark_gen(ir_build_br(irb, scope, node, irb->exec->coro_final_cleanup_block, is_comptime));
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, inverted_ptr_mask, false);
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
@@ -3129,16 +3124,20 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode
ir_build_unreachable(irb, scope, node);
ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
// if we ever add null checking safety to the ptrtoint instruction, it needs to be disabled here
+ IrInstruction *have_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
+ ir_build_cond_br(irb, scope, node, have_await_handle, store_awaiter_block, check_canceled_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, store_awaiter_block);
IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr);
ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, await_handle);
- IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final,
- irb->exec->coro_early_final, is_comptime);
- // the above blocks are rendered by ir_gen after the rest of codegen
+ ir_build_br(irb, scope, node, irb->exec->coro_normal_final, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, check_canceled_block);
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ return ir_build_cond_br(irb, scope, node, is_canceled_bool, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, is_comptime);
}
static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) {
@@ -7120,10 +7119,10 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
irb->exec->coro_handle = ir_build_coro_begin(irb, coro_scope, node, coro_id, coro_mem_ptr);
Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- irb->exec->coro_awaiter_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
+ irb->exec->atomic_state_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
atomic_state_field_name);
IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_awaiter_field_ptr, zero);
+ ir_build_store_ptr(irb, scope, node, irb->exec->atomic_state_field_ptr, zero);
Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name);
result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
From 10764ee0e66e5d9a815073340d8f16a58e225422 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 27 Jul 2018 17:00:41 -0400
Subject: [PATCH 14/37] resume clears suspend bit
---
src/ir.cpp | 137 +++++++++++++++++++++++++++++++++--------------------
1 file changed, 85 insertions(+), 52 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 8ebac847ac..2eff986694 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6686,20 +6686,53 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *parent_scope, AstNode
return ir_build_cancel(irb, parent_scope, node, target_inst);
}
-static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
+static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeResume);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, parent_scope);
+ IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, scope);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_build_coro_resume(irb, parent_scope, node, target_inst);
+ IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "ResumeDone");
+ IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+
+ IrInstruction *inverted_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
+ IrInstruction *mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_mask);
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+ IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
+ get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
+
+ // TODO relies on Zig not re-ordering fields
+ IrInstruction *casted_target_inst = ir_build_ptr_cast(irb, scope, node, promise_T_type_val, target_inst);
+ IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
+ Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
+ IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
+ atomic_state_field_name);
+
+ // clear the is_suspended bit
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, atomic_state_ptr, nullptr, mask, nullptr,
+ AtomicRmwOp_and, AtomicOrderSeqCst);
+
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
+ ir_build_coro_resume(irb, scope, node, target_inst);
+ ir_build_br(irb, scope, node, done_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, done_block);
+ return ir_build_const_void(irb, scope, node);
}
-static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
+static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeAwaitExpr);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, parent_scope);
+ IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, scope);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -6713,7 +6746,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast
return irb->codegen->invalid_instruction;
}
- ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope);
+ ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(scope);
if (scope_defer_expr) {
if (!scope_defer_expr->reported_err) {
add_node_error(irb->codegen, node, buf_sprintf("cannot await inside defer expression"));
@@ -6724,85 +6757,85 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast
Scope *outer_scope = irb->exec->begin_scope;
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, parent_scope, node, target_inst);
+ IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, target_inst);
Buf *result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
- IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, result_ptr_field_name);
+ IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name);
if (irb->codegen->have_err_ret_tracing) {
- IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, parent_scope, node, IrInstructionErrorReturnTrace::NonNull);
+ IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME);
- IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name);
- ir_build_store_ptr(irb, parent_scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr);
+ IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name);
+ ir_build_store_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr);
}
Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr,
+ IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
atomic_state_field_name);
- IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
- VariableTableEntry *result_var = ir_create_var(irb, node, parent_scope, nullptr,
+ IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false);
+ VariableTableEntry *result_var = ir_create_var(irb, node, scope, nullptr,
false, false, true, const_bool_false);
- IrInstruction *undefined_value = ir_build_const_undefined(irb, parent_scope, node);
- IrInstruction *target_promise_type = ir_build_typeof(irb, parent_scope, node, target_inst);
- IrInstruction *promise_result_type = ir_build_promise_result_type(irb, parent_scope, node, target_promise_type);
- ir_build_await_bookkeeping(irb, parent_scope, node, promise_result_type);
- ir_build_var_decl(irb, parent_scope, node, result_var, promise_result_type, nullptr, undefined_value);
- IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var);
- ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr);
- IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle);
- IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, parent_scope, node, irb->exec->coro_handle);
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node,
+ IrInstruction *undefined_value = ir_build_const_undefined(irb, scope, node);
+ IrInstruction *target_promise_type = ir_build_typeof(irb, scope, node, target_inst);
+ IrInstruction *promise_result_type = ir_build_promise_result_type(irb, scope, node, target_promise_type);
+ ir_build_await_bookkeeping(irb, scope, node, promise_result_type);
+ ir_build_var_decl(irb, scope, node, result_var, promise_result_type, nullptr, undefined_value);
+ IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, scope, node, result_var);
+ ir_build_store_ptr(irb, scope, node, result_ptr_field_ptr, my_result_var_ptr);
+ IrInstruction *save_token = ir_build_coro_save(irb, scope, node, irb->exec->coro_handle);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, scope, node, irb->exec->coro_handle);
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
usize_type_val, atomic_state_ptr, nullptr, coro_handle_addr, nullptr,
AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0);
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *is_non_null = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, parent_scope, "YesSuspend");
- IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, parent_scope, "NoSuspend");
- IrBasicBlock *merge_block = ir_create_basic_block(irb, parent_scope, "MergeSuspend");
- ir_build_cond_br(irb, parent_scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false);
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+ IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
+ IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
+ IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend");
+ IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, scope, "NoSuspend");
+ IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend");
+ ir_build_cond_br(irb, scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, no_suspend_block);
if (irb->codegen->have_err_ret_tracing) {
Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME);
- IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, err_ret_trace_field_name);
- IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, parent_scope, node, IrInstructionErrorReturnTrace::NonNull);
- ir_build_merge_err_ret_traces(irb, parent_scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
+ IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name);
+ IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
+ ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
}
Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
- IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, result_field_name);
+ IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name);
// If the type of the result handle_is_ptr then this does not actually perform a load. But we need it to,
// because we're about to destroy the memory. So we store it into our result variable.
- IrInstruction *no_suspend_result = ir_build_load_ptr(irb, parent_scope, node, promise_result_ptr);
- ir_build_store_ptr(irb, parent_scope, node, my_result_var_ptr, no_suspend_result);
- ir_build_cancel(irb, parent_scope, node, target_inst);
- ir_build_br(irb, parent_scope, node, merge_block, const_bool_false);
+ IrInstruction *no_suspend_result = ir_build_load_ptr(irb, scope, node, promise_result_ptr);
+ ir_build_store_ptr(irb, scope, node, my_result_var_ptr, no_suspend_result);
+ ir_build_cancel(irb, scope, node, target_inst);
+ ir_build_br(irb, scope, node, merge_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block);
- IrInstruction *suspend_code = ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false);
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
+ IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false);
+ IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup");
+ IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume");
IrInstructionSwitchBrCase *cases = allocate(2);
- cases[0].value = ir_build_const_u8(irb, parent_scope, node, 0);
+ cases[0].value = ir_build_const_u8(irb, scope, node, 0);
cases[0].block = resume_block;
- cases[1].value = ir_build_const_u8(irb, parent_scope, node, 1);
+ cases[1].value = ir_build_const_u8(irb, scope, node, 1);
cases[1].block = cleanup_block;
- ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block,
+ ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block,
2, cases, const_bool_false, nullptr);
ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
- ir_gen_defers_for_block(irb, parent_scope, outer_scope, true);
- ir_mark_gen(ir_build_br(irb, parent_scope, node, irb->exec->coro_final_cleanup_block, const_bool_false));
+ ir_gen_defers_for_block(irb, scope, outer_scope, true);
+ ir_mark_gen(ir_build_br(irb, scope, node, irb->exec->coro_final_cleanup_block, const_bool_false));
ir_set_cursor_at_end_and_append_block(irb, resume_block);
- ir_build_br(irb, parent_scope, node, merge_block, const_bool_false);
+ ir_build_br(irb, scope, node, merge_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, merge_block);
- return ir_build_load_ptr(irb, parent_scope, node, my_result_var_ptr);
+ return ir_build_load_ptr(irb, scope, node, my_result_var_ptr);
}
static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
From 442e244b4dd371d674436a163d62efdcd4e17a00 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 27 Jul 2018 17:16:00 -0400
Subject: [PATCH 15/37] suspend sets suspend bit
---
src/ir.cpp | 19 ++++++++++++++++++-
test/cases/coroutines.zig | 4 ++--
2 files changed, 20 insertions(+), 3 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 2eff986694..cd791fb189 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6874,9 +6874,26 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
+ IrBasicBlock *suspended_block = ir_create_basic_block(irb, parent_scope, "AlreadySuspended");
+ IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, parent_scope, "NotAlreadySuspended");
- IrInstruction *suspend_code;
IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, parent_scope, node, 0x2); // 0b010
+ IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0);
+
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node,
+ usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *is_suspended_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
+ ir_build_cond_br(irb, parent_scope, node, is_suspended_bool, suspended_block, not_suspended_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, suspended_block);
+ ir_build_unreachable(irb, parent_scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
+ IrInstruction *suspend_code;
if (node->data.suspend.block == nullptr) {
suspend_code = ir_build_coro_suspend(irb, parent_scope, node, nullptr, const_bool_false);
} else {
diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig
index f7f2af62a6..72a4ed0b38 100644
--- a/test/cases/coroutines.zig
+++ b/test/cases/coroutines.zig
@@ -244,8 +244,8 @@ test "break from suspend" {
std.debug.assert(my_result == 2);
}
async fn testBreakFromSuspend(my_result: *i32) void {
- s: suspend |p| {
- break :s;
+ suspend |p| {
+ resume p;
}
my_result.* += 1;
suspend;
From 02c5bda704d30e95e6af23804f9a552e9d8ca2d7 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 27 Jul 2018 17:27:03 -0400
Subject: [PATCH 16/37] remove ability to break from suspend blocks
closes #803
---
doc/langref.html.in | 2 +-
src/all_types.hpp | 2 --
src/analyze.cpp | 1 -
src/ir.cpp | 17 ++---------------
src/parser.cpp | 25 ++-----------------------
5 files changed, 5 insertions(+), 42 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 60ba09d391..d91fb6e8fb 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -7336,7 +7336,7 @@ Defer(body) = ("defer" | "deferror") body
IfExpression(body) = "if" "(" Expression ")" body option("else" BlockExpression(body))
-SuspendExpression(body) = option(Symbol ":") "suspend" option(("|" Symbol "|" body))
+SuspendExpression(body) = "suspend" option(("|" Symbol "|" body))
IfErrorExpression(body) = "if" "(" Expression ")" option("|" option("*") Symbol "|") body "else" "|" Symbol "|" BlockExpression(body)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 3ac7afe474..2f09e70301 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -898,7 +898,6 @@ struct AstNodeAwaitExpr {
};
struct AstNodeSuspend {
- Buf *name;
AstNode *block;
AstNode *promise_symbol;
};
@@ -1929,7 +1928,6 @@ struct ScopeLoop {
struct ScopeSuspend {
Scope base;
- Buf *name;
IrBasicBlock *resume_block;
bool reported_err;
};
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 74d59f966a..03cfa5b67b 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -161,7 +161,6 @@ ScopeSuspend *create_suspend_scope(AstNode *node, Scope *parent) {
assert(node->type == NodeTypeSuspend);
ScopeSuspend *scope = allocate(1);
init_scope(&scope->base, ScopeIdSuspend, node, parent);
- scope->name = node->data.suspend.name;
return scope;
}
diff --git a/src/ir.cpp b/src/ir.cpp
index cd791fb189..799d7e3bc5 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6186,15 +6186,6 @@ static IrInstruction *ir_gen_return_from_block(IrBuilder *irb, Scope *break_scop
return ir_build_br(irb, break_scope, node, dest_block, is_comptime);
}
-static IrInstruction *ir_gen_break_from_suspend(IrBuilder *irb, Scope *break_scope, AstNode *node, ScopeSuspend *suspend_scope) {
- IrInstruction *is_comptime = ir_build_const_bool(irb, break_scope, node, false);
-
- IrBasicBlock *dest_block = suspend_scope->resume_block;
- ir_gen_defers_for_block(irb, break_scope, dest_block->scope, false);
-
- return ir_build_br(irb, break_scope, node, dest_block, is_comptime);
-}
-
static IrInstruction *ir_gen_break(IrBuilder *irb, Scope *break_scope, AstNode *node) {
assert(node->type == NodeTypeBreak);
@@ -6235,12 +6226,8 @@ static IrInstruction *ir_gen_break(IrBuilder *irb, Scope *break_scope, AstNode *
return ir_gen_return_from_block(irb, break_scope, node, this_block_scope);
}
} else if (search_scope->id == ScopeIdSuspend) {
- ScopeSuspend *this_suspend_scope = (ScopeSuspend *)search_scope;
- if (node->data.break_expr.name != nullptr &&
- (this_suspend_scope->name != nullptr && buf_eql_buf(node->data.break_expr.name, this_suspend_scope->name)))
- {
- return ir_gen_break_from_suspend(irb, break_scope, node, this_suspend_scope);
- }
+ add_node_error(irb->codegen, node, buf_sprintf("cannot break out of suspend block"));
+ return irb->codegen->invalid_instruction;
}
search_scope = search_scope->parent;
}
diff --git a/src/parser.cpp b/src/parser.cpp
index adb1633f5d..a93d8de830 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -648,30 +648,12 @@ static AstNode *ast_parse_asm_expr(ParseContext *pc, size_t *token_index, bool m
}
/*
-SuspendExpression(body) = option(Symbol ":") "suspend" option(("|" Symbol "|" body))
+SuspendExpression(body) = "suspend" option(("|" Symbol "|" body))
*/
static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, bool mandatory) {
size_t orig_token_index = *token_index;
- Token *name_token = nullptr;
- Token *token = &pc->tokens->at(*token_index);
- if (token->id == TokenIdSymbol) {
- *token_index += 1;
- Token *colon_token = &pc->tokens->at(*token_index);
- if (colon_token->id == TokenIdColon) {
- *token_index += 1;
- name_token = token;
- token = &pc->tokens->at(*token_index);
- } else if (mandatory) {
- ast_expect_token(pc, colon_token, TokenIdColon);
- zig_unreachable();
- } else {
- *token_index = orig_token_index;
- return nullptr;
- }
- }
-
- Token *suspend_token = token;
+ Token *suspend_token = &pc->tokens->at(*token_index);
if (suspend_token->id == TokenIdKeywordSuspend) {
*token_index += 1;
} else if (mandatory) {
@@ -693,9 +675,6 @@ static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, b
}
AstNode *node = ast_create_node(pc, NodeTypeSuspend, suspend_token);
- if (name_token != nullptr) {
- node->data.suspend.name = token_buf(name_token);
- }
node->data.suspend.promise_symbol = ast_parse_symbol(pc, token_index);
ast_eat_token(pc, token_index, TokenIdBinOr);
node->data.suspend.block = ast_parse_block(pc, token_index, true);
From 0b7a9c072203c2f9999ddcc2231a42334cd028e3 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 27 Jul 2018 17:42:09 -0400
Subject: [PATCH 17/37] cancel sets the cancel bit
---
src/ir.cpp | 37 ++++++++++++++++++++++++++++++++++---
1 file changed, 34 insertions(+), 3 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 799d7e3bc5..4a381a26fa 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6663,14 +6663,45 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
async_allocator_type_value, is_var_args);
}
-static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
+static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeCancel);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, parent_scope);
+ IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, scope);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_build_cancel(irb, parent_scope, node, target_inst);
+ IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone");
+ IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
+ get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
+
+ // TODO relies on Zig not re-ordering fields
+ IrInstruction *casted_target_inst = ir_build_ptr_cast(irb, scope, node, promise_T_type_val, target_inst);
+ IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
+ Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
+ IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
+ atomic_state_field_name);
+
+ // set the is_canceled bit
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, atomic_state_ptr, nullptr, is_canceled_mask, nullptr,
+ AtomicRmwOp_and, AtomicOrderSeqCst);
+
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
+ ir_build_cancel(irb, scope, node, target_inst);
+ ir_build_br(irb, scope, node, done_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, done_block);
+ return ir_build_const_void(irb, scope, node);
}
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
From 341bd0dfa42a729aefff56e48a67a21cb3ea0822 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 27 Jul 2018 17:47:15 -0400
Subject: [PATCH 18/37] await sets the await bit
---
src/ir.cpp | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 4a381a26fa..a933106884 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6791,9 +6791,15 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
atomic_state_field_name);
IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false);
+ IrInstruction *undefined_value = ir_build_const_undefined(irb, scope, node);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+ IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
+
VariableTableEntry *result_var = ir_create_var(irb, node, scope, nullptr,
false, false, true, const_bool_false);
- IrInstruction *undefined_value = ir_build_const_undefined(irb, scope, node);
IrInstruction *target_promise_type = ir_build_typeof(irb, scope, node, target_inst);
IrInstruction *promise_result_type = ir_build_promise_result_type(irb, scope, node, target_promise_type);
ir_build_await_bookkeeping(irb, scope, node, promise_result_type);
@@ -6801,14 +6807,12 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, scope, node, result_var);
ir_build_store_ptr(irb, scope, node, result_ptr_field_ptr, my_result_var_ptr);
IrInstruction *save_token = ir_build_coro_save(irb, scope, node, irb->exec->coro_handle);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+
IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, scope, node, irb->exec->coro_handle);
+ IrInstruction *mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, coro_handle_addr, await_mask, false);
IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, coro_handle_addr, nullptr,
+ usize_type_val, atomic_state_ptr, nullptr, mask_bits, nullptr,
AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend");
From e491c381896b6f36366bf554f149f79d5be8f9dd Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 27 Jul 2018 18:01:26 -0400
Subject: [PATCH 19/37] resume detects resuming when not suspended
---
src/ir.cpp | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index a933106884..bc3ef11481 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6713,13 +6713,15 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node)
IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "ResumeDone");
IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+ IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "IsSuspended");
+ IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "IsNotSuspended");
- IrInstruction *inverted_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
- IrInstruction *mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_mask);
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
+ IrInstruction *and_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, is_suspended_mask);
IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
@@ -6732,7 +6734,7 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node)
// clear the is_suspended bit
IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, mask, nullptr,
+ usize_type_val, atomic_state_ptr, nullptr, and_mask, nullptr,
AtomicRmwOp_and, AtomicOrderSeqCst);
IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
@@ -6740,6 +6742,14 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node)
ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
+ ir_build_unreachable(irb, scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, suspended_block);
ir_build_coro_resume(irb, scope, node, target_inst);
ir_build_br(irb, scope, node, done_block, is_comptime);
From e5beca886ddf3138351765e1239b626e7bd612c6 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 27 Jul 2018 18:07:30 -0400
Subject: [PATCH 20/37] suspend checks the cancel bit
---
src/ir.cpp | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/src/ir.cpp b/src/ir.cpp
index bc3ef11481..c94efdf61f 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6907,16 +6907,24 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
IrBasicBlock *suspended_block = ir_create_basic_block(irb, parent_scope, "AlreadySuspended");
+ IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, parent_scope, "NotCanceled");
IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, parent_scope, "NotAlreadySuspended");
IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, parent_scope, node, 0x1); // 0b001
IrInstruction *is_suspended_mask = ir_build_const_usize(irb, parent_scope, node, 0x2); // 0b010
IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0);
IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node,
usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
AtomicRmwOp_or, AtomicOrderSeqCst);
+
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, parent_scope, node, is_canceled_bool, cleanup_block, not_canceled_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
IrInstruction *is_suspended_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
IrInstruction *is_suspended_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
ir_build_cond_br(irb, parent_scope, node, is_suspended_bool, suspended_block, not_suspended_block, const_bool_false);
From f0c049d02b68a8740096df633a41b78d90fd34cd Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 27 Jul 2018 18:37:30 -0400
Subject: [PATCH 21/37] detect double await
---
src/ir.cpp | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/src/ir.cpp b/src/ir.cpp
index c94efdf61f..2f5ba86627 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6796,6 +6796,9 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
ir_build_store_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr);
}
+ IrBasicBlock *already_awaited_block = ir_create_basic_block(irb, scope, "AlreadyAwaited");
+ IrBasicBlock *not_awaited_block = ir_create_basic_block(irb, scope, "NotAwaited");
+
Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
atomic_state_field_name);
@@ -6823,6 +6826,15 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
usize_type_val, atomic_state_ptr, nullptr, mask_bits, nullptr,
AtomicRmwOp_or, AtomicOrderSeqCst);
+
+ IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
+ IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_awaited_bool, already_awaited_block, not_awaited_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, already_awaited_block);
+ ir_build_unreachable(irb, scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_awaited_block);
IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend");
From 6fed777637daafc077ec2c079b0557f3c8bdce51 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 28 Jul 2018 01:22:51 -0400
Subject: [PATCH 22/37] cancel detects if the target handle has already
returned
---
src/ir.cpp | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/src/ir.cpp b/src/ir.cpp
index 2f5ba86627..6d6e1430fe 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6672,6 +6672,8 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone");
IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+ IrBasicBlock *post_return_block = ir_create_basic_block(irb, scope, "PostReturn");
+ IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
@@ -6679,6 +6681,9 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+ IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
// TODO relies on Zig not re-ordering fields
IrInstruction *casted_target_inst = ir_build_ptr_cast(irb, scope, node, promise_T_type_val, target_inst);
@@ -6697,6 +6702,16 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
+ IrInstruction *awaiter_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
+ IrInstruction *is_returned_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, awaiter_addr, ptr_mask, false);
+ ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, do_cancel_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, post_return_block);
+ IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
+ IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
ir_build_cancel(irb, scope, node, target_inst);
ir_build_br(irb, scope, node, done_block, is_comptime);
From c6f9a4c0445000e59bca16838cc413140d399f35 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 28 Jul 2018 01:26:11 -0400
Subject: [PATCH 23/37] cancel detects suspend bit
---
src/ir.cpp | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 6d6e1430fe..48148e6768 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6672,6 +6672,7 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone");
IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+ IrBasicBlock *pre_return_block = ir_create_basic_block(irb, scope, "PreReturn");
IrBasicBlock *post_return_block = ir_create_basic_block(irb, scope, "PostReturn");
IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
@@ -6684,6 +6685,7 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
// TODO relies on Zig not re-ordering fields
IrInstruction *casted_target_inst = ir_build_ptr_cast(irb, scope, node, promise_T_type_val, target_inst);
@@ -6704,13 +6706,18 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
IrInstruction *awaiter_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
IrInstruction *is_returned_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, awaiter_addr, ptr_mask, false);
- ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, do_cancel_block, is_comptime);
+ ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, pre_return_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, post_return_block);
IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime);
+ ir_set_cursor_at_end_and_append_block(irb, pre_return_block);
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_suspended_bool, do_cancel_block, done_block, is_comptime);
+
ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
ir_build_cancel(irb, scope, node, target_inst);
ir_build_br(irb, scope, node, done_block, is_comptime);
From 60cda3713fdc2d8bce01bfc229109a3c8b3efb6f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 28 Jul 2018 12:04:09 -0400
Subject: [PATCH 24/37] suspend cancels awaiter when it gets canceled
---
src/ir.cpp | 37 ++++++++++++++++++++++++++++---------
1 file changed, 28 insertions(+), 9 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 48148e6768..4b0080d562 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6663,13 +6663,7 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
async_allocator_type_value, is_var_args);
}
-static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypeCancel);
-
- IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, scope);
- if (target_inst == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
+static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *target_inst) {
IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone");
IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
IrBasicBlock *pre_return_block = ir_create_basic_block(irb, scope, "PreReturn");
@@ -6726,6 +6720,16 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
return ir_build_const_void(irb, scope, node);
}
+static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
+ assert(node->type == NodeTypeCancel);
+
+ IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, scope);
+ if (target_inst == irb->codegen->invalid_instruction)
+ return irb->codegen->invalid_instruction;
+
+ return ir_gen_cancel_target(irb, scope, node, target_inst);
+}
+
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeResume);
@@ -6941,14 +6945,19 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
IrBasicBlock *suspended_block = ir_create_basic_block(irb, parent_scope, "AlreadySuspended");
+ IrBasicBlock *canceled_block = ir_create_basic_block(irb, parent_scope, "IsCanceled");
IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, parent_scope, "NotCanceled");
IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, parent_scope, "NotAlreadySuspended");
+ IrBasicBlock *cancel_awaiter_block = ir_create_basic_block(irb, parent_scope, "CancelAwaiter");
+ IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_promise);
IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
IrInstruction *is_canceled_mask = ir_build_const_usize(irb, parent_scope, node, 0x1); // 0b001
IrInstruction *is_suspended_mask = ir_build_const_usize(irb, parent_scope, node, 0x2); // 0b010
IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0);
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node,
usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
@@ -6956,7 +6965,17 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
IrInstruction *is_canceled_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
IrInstruction *is_canceled_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, parent_scope, node, is_canceled_bool, cleanup_block, not_canceled_block, const_bool_false);
+ ir_build_cond_br(irb, parent_scope, node, is_canceled_bool, canceled_block, not_canceled_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, canceled_block);
+ IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
+ IrInstruction *have_await_handle = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
+ ir_build_cond_br(irb, parent_scope, node, have_await_handle, cancel_awaiter_block, cleanup_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, cancel_awaiter_block);
+ IrInstruction *await_handle = ir_build_int_to_ptr(irb, parent_scope, node, promise_type_val, await_handle_addr);
+ ir_gen_cancel_target(irb, parent_scope, node, await_handle);
+ ir_build_br(irb, parent_scope, node, cleanup_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
IrInstruction *is_suspended_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
@@ -6995,7 +7014,7 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
cases[0].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 0));
cases[0].block = resume_block;
cases[1].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 1));
- cases[1].block = cleanup_block;
+ cases[1].block = canceled_block;
ir_mark_gen(ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block,
2, cases, const_bool_false, nullptr));
From 0ba2bc38d76537a83bd6c27143779857dbb711cf Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 28 Jul 2018 12:23:47 -0400
Subject: [PATCH 25/37] await checks the cancel bit
---
src/ir.cpp | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 4b0080d562..c246b75d78 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6824,6 +6824,12 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
IrBasicBlock *already_awaited_block = ir_create_basic_block(irb, scope, "AlreadyAwaited");
IrBasicBlock *not_awaited_block = ir_create_basic_block(irb, scope, "NotAwaited");
+ IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+ IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend");
+ IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, scope, "NoSuspend");
+ IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend");
+ IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup");
+ IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume");
Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
@@ -6836,6 +6842,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
VariableTableEntry *result_var = ir_create_var(irb, node, scope, nullptr,
false, false, true, const_bool_false);
@@ -6861,11 +6868,13 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
ir_build_unreachable(irb, scope, node);
ir_set_cursor_at_end_and_append_block(irb, not_awaited_block);
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_canceled_bool, cleanup_block, not_canceled_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend");
- IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, scope, "NoSuspend");
- IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend");
ir_build_cond_br(irb, scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, no_suspend_block);
@@ -6886,8 +6895,6 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block);
IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false);
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume");
IrInstructionSwitchBrCase *cases = allocate(2);
cases[0].value = ir_build_const_u8(irb, scope, node, 0);
From dd272d13161d7a7069af78669d891d280c65529f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 28 Jul 2018 12:36:02 -0400
Subject: [PATCH 26/37] await cancels the await target when it is canceled
---
src/ir.cpp | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index c246b75d78..f298cff24b 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6830,11 +6830,13 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend");
IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup");
IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume");
+ IrBasicBlock *cancel_target_block = ir_create_basic_block(irb, scope, "CancelTarget");
Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
atomic_state_field_name);
+ IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false);
IrInstruction *undefined_value = ir_build_const_undefined(irb, scope, node);
IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
@@ -6900,10 +6902,15 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
cases[0].value = ir_build_const_u8(irb, scope, node, 0);
cases[0].block = resume_block;
cases[1].value = ir_build_const_u8(irb, scope, node, 1);
- cases[1].block = cleanup_block;
+ cases[1].block = cancel_target_block;
ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block,
2, cases, const_bool_false, nullptr);
+ ir_set_cursor_at_end_and_append_block(irb, cancel_target_block);
+ IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr);
+ ir_gen_cancel_target(irb, scope, node, await_handle);
+ ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
+
ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
ir_gen_defers_for_block(irb, scope, outer_scope, true);
ir_mark_gen(ir_build_br(irb, scope, node, irb->exec->coro_final_cleanup_block, const_bool_false));
From 05456eb275dd1d5aee6630ffdc743057db73872f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 28 Jul 2018 12:53:33 -0400
Subject: [PATCH 27/37] make some functions in std.event.Loop public
---
std/event/loop.zig | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/std/event/loop.zig b/std/event/loop.zig
index cd805f891f..4e219653be 100644
--- a/std/event/loop.zig
+++ b/std/event/loop.zig
@@ -55,7 +55,7 @@ pub const Loop = struct {
/// After initialization, call run().
/// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value.
- fn initSingleThreaded(self: *Loop, allocator: *mem.Allocator) !void {
+ pub fn initSingleThreaded(self: *Loop, allocator: *mem.Allocator) !void {
return self.initInternal(allocator, 1);
}
@@ -64,7 +64,7 @@ pub const Loop = struct {
/// After initialization, call run().
/// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value.
- fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
+ pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
const core_count = try std.os.cpuCount(allocator);
return self.initInternal(allocator, core_count);
}
From 0d79e0381601404b844b4912d15d20f4b529e837 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 28 Jul 2018 13:52:48 -0400
Subject: [PATCH 28/37] canceling an await also cancels things awaiting it
---
src/ir.cpp | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/src/ir.cpp b/src/ir.cpp
index f298cff24b..7134b4d9ac 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6831,6 +6831,8 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup");
IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume");
IrBasicBlock *cancel_target_block = ir_create_basic_block(irb, scope, "CancelTarget");
+ IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
+ IrBasicBlock *do_defers_block = ir_create_basic_block(irb, scope, "DoDefers");
Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
@@ -6912,6 +6914,20 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
+ IrInstruction *my_mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, ptr_mask, is_canceled_mask, false);
+ IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, my_mask_bits, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+ IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, ptr_mask, false);
+ IrInstruction *have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_await_handle_addr, zero, false);
+ ir_build_cond_br(irb, scope, node, have_my_await_handle, do_cancel_block, do_defers_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
+ IrInstruction *my_await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, my_await_handle_addr);
+ ir_gen_cancel_target(irb, scope, node, my_await_handle);
+ ir_mark_gen(ir_build_br(irb, scope, node, do_defers_block, const_bool_false));
+
+ ir_set_cursor_at_end_and_append_block(irb, do_defers_block);
ir_gen_defers_for_block(irb, scope, outer_scope, true);
ir_mark_gen(ir_build_br(irb, scope, node, irb->exec->coro_final_cleanup_block, const_bool_false));
From 3ce0ea884f8a64e1173ab814de10b6c33833b3b8 Mon Sep 17 00:00:00 2001
From: dbandstra
Date: Sat, 28 Jul 2018 17:30:05 -0700
Subject: [PATCH 29/37] add int writing functions to OutStream
added: writeInt, writeIntLe, and writeIntBe
---
std/io.zig | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/std/io.zig b/std/io.zig
index 71a9822399..1b1c56b69b 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -230,6 +230,20 @@ pub fn OutStream(comptime WriteError: type) type {
try self.writeFn(self, slice);
}
}
+
+ pub fn writeIntLe(self: *Self, comptime T: type, value: T) !void {
+ return self.writeInt(builtin.Endian.Little, T, value);
+ }
+
+ pub fn writeIntBe(self: *Self, comptime T: type, value: T) !void {
+ return self.writeInt(builtin.Endian.Big, T, value);
+ }
+
+ pub fn writeInt(self: *Self, endian: builtin.Endian, comptime T: type, value: T) !void {
+ var bytes: [@sizeOf(T)]u8 = undefined;
+ mem.writeInt(bytes[0..], value, endian);
+ return self.writeFn(self, bytes);
+ }
};
}
From f36faa32c46897a12852da9f382ab10979511d6f Mon Sep 17 00:00:00 2001
From: dbandstra
Date: Sat, 28 Jul 2018 17:34:28 -0700
Subject: [PATCH 30/37] add skipBytes function to InStream
this reads N bytes, discarding their values
---
std/io.zig | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/std/io.zig b/std/io.zig
index 1b1c56b69b..5d73b4e7d8 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -200,6 +200,13 @@ pub fn InStream(comptime ReadError: type) type {
try self.readNoEof(input_slice);
return mem.readInt(input_slice, T, endian);
}
+
+ pub fn skipBytes(self: *Self, num_bytes: usize) !void {
+ var i: usize = 0;
+ while (i < num_bytes) : (i += 1) {
+ _ = try self.readByte();
+ }
+ }
};
}
From 608ff52dc3ea356b23fb6ae92fbca9fbb18c7892 Mon Sep 17 00:00:00 2001
From: dbandstra
Date: Sun, 29 Jul 2018 11:52:10 -0700
Subject: [PATCH 31/37] add SliceOutStream, rename SliceStream to SliceInStream
(#1301)
---
std/io.zig | 48 +++++++++++++++++++++++++++++++++++++++++++++++-
std/io_test.zig | 29 +++++++++++++++++++++++++----
2 files changed, 72 insertions(+), 5 deletions(-)
diff --git a/std/io.zig b/std/io.zig
index 5d73b4e7d8..ff73c04f78 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -419,7 +419,7 @@ pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) typ
};
}
-pub const SliceStream = struct {
+pub const SliceInStream = struct {
const Self = this;
pub const Error = error { };
pub const Stream = InStream(Error);
@@ -447,7 +447,53 @@ pub const SliceStream = struct {
return size;
}
+};
+/// This is a simple OutStream that writes to a slice, and returns an error
+/// when it runs out of space.
+pub const SliceOutStream = struct {
+ pub const Error = error{OutOfSpace};
+ pub const Stream = OutStream(Error);
+
+ pub stream: Stream,
+
+ pos: usize,
+ slice: []u8,
+
+ pub fn init(slice: []u8) SliceOutStream {
+ return SliceOutStream{
+ .slice = slice,
+ .pos = 0,
+ .stream = Stream{ .writeFn = writeFn },
+ };
+ }
+
+ pub fn getWritten(self: *const SliceOutStream) []const u8 {
+ return self.slice[0..self.pos];
+ }
+
+ pub fn reset(self: *SliceOutStream) void {
+ self.pos = 0;
+ }
+
+ fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
+ const self = @fieldParentPtr(SliceOutStream, "stream", out_stream);
+
+ assert(self.pos <= self.slice.len);
+
+ const n =
+ if (self.pos + bytes.len <= self.slice.len)
+ bytes.len
+ else
+ self.slice.len - self.pos;
+
+ std.mem.copy(u8, self.slice[self.pos..self.pos + n], bytes[0..n]);
+ self.pos += n;
+
+ if (n < bytes.len) {
+ return Error.OutOfSpace;
+ }
+ }
};
pub fn BufferedOutStream(comptime Error: type) type {
diff --git a/std/io_test.zig b/std/io_test.zig
index 8d5c35c5fd..56f8a9a6ad 100644
--- a/std/io_test.zig
+++ b/std/io_test.zig
@@ -2,6 +2,7 @@ const std = @import("index.zig");
const io = std.io;
const DefaultPrng = std.rand.DefaultPrng;
const assert = std.debug.assert;
+const assertError = std.debug.assertError;
const mem = std.mem;
const os = std.os;
const builtin = @import("builtin");
@@ -61,9 +62,9 @@ test "BufferOutStream" {
assert(mem.eql(u8, buffer.toSlice(), "x: 42\ny: 1234\n"));
}
-test "SliceStream" {
+test "SliceInStream" {
const bytes = []const u8 { 1, 2, 3, 4, 5, 6, 7 };
- var ss = io.SliceStream.init(bytes);
+ var ss = io.SliceInStream.init(bytes);
var dest: [4]u8 = undefined;
@@ -81,8 +82,8 @@ test "SliceStream" {
test "PeekStream" {
const bytes = []const u8 { 1, 2, 3, 4, 5, 6, 7, 8 };
- var ss = io.SliceStream.init(bytes);
- var ps = io.PeekStream(2, io.SliceStream.Error).init(&ss.stream);
+ var ss = io.SliceInStream.init(bytes);
+ var ps = io.PeekStream(2, io.SliceInStream.Error).init(&ss.stream);
var dest: [4]u8 = undefined;
@@ -111,3 +112,23 @@ test "PeekStream" {
assert(dest[0] == 12);
assert(dest[1] == 11);
}
+
+test "SliceOutStream" {
+ var buffer: [10]u8 = undefined;
+ var ss = io.SliceOutStream.init(buffer[0..]);
+
+ try ss.stream.write("Hello");
+ assert(mem.eql(u8, ss.getWritten(), "Hello"));
+
+ try ss.stream.write("world");
+ assert(mem.eql(u8, ss.getWritten(), "Helloworld"));
+
+ assertError(ss.stream.write("!"), error.OutOfSpace);
+ assert(mem.eql(u8, ss.getWritten(), "Helloworld"));
+
+ ss.reset();
+ assert(ss.getWritten().len == 0);
+
+ assertError(ss.stream.write("Hello world!"), error.OutOfSpace);
+ assert(mem.eql(u8, ss.getWritten(), "Hello worl"));
+}
From 09304aab77a7b6af5693600fa6b3a35322f7469d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 29 Jul 2018 23:25:40 -0400
Subject: [PATCH 32/37] fix cancel and await semantics
---
src/ir.cpp | 65 ++++++++++++++++++++++++++++-----------------
std/debug/index.zig | 14 ++++++++--
2 files changed, 52 insertions(+), 27 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 7134b4d9ac..e91fa14170 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6663,7 +6663,9 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
async_allocator_type_value, is_var_args);
}
-static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *target_inst) {
+static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode *node,
+ IrInstruction *target_inst, bool cancel_non_suspended, bool cancel_awaited)
+{
IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone");
IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
IrBasicBlock *pre_return_block = ir_create_basic_block(irb, scope, "PreReturn");
@@ -6691,7 +6693,7 @@ static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode
// set the is_canceled bit
IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
usize_type_val, atomic_state_ptr, nullptr, is_canceled_mask, nullptr,
- AtomicRmwOp_and, AtomicOrderSeqCst);
+ AtomicRmwOp_or, AtomicOrderSeqCst);
IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
@@ -6703,14 +6705,26 @@ static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode
ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, pre_return_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, post_return_block);
- IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
- IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime);
+ if (cancel_awaited) {
+ ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
+ } else {
+ IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
+ IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime);
+ }
ir_set_cursor_at_end_and_append_block(irb, pre_return_block);
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, do_cancel_block, done_block, is_comptime);
+ if (cancel_awaited) {
+ if (cancel_non_suspended) {
+ ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
+ } else {
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_suspended_bool, do_cancel_block, done_block, is_comptime);
+ }
+ } else {
+ ir_build_br(irb, scope, node, done_block, is_comptime);
+ }
ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
ir_build_cancel(irb, scope, node, target_inst);
@@ -6727,7 +6741,7 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_gen_cancel_target(irb, scope, node, target_inst);
+ return ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
}
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -6872,15 +6886,19 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
ir_build_unreachable(irb, scope, node);
ir_set_cursor_at_end_and_append_block(irb, not_awaited_block);
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, cleanup_block, not_canceled_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_canceled_bool, cancel_target_block, not_canceled_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
ir_build_cond_br(irb, scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false);
+ ir_set_cursor_at_end_and_append_block(irb, cancel_target_block);
+ ir_build_cancel(irb, scope, node, target_inst);
+ ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
+
ir_set_cursor_at_end_and_append_block(irb, no_suspend_block);
if (irb->codegen->have_err_ret_tracing) {
Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME);
@@ -6897,6 +6915,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
ir_build_cancel(irb, scope, node, target_inst);
ir_build_br(irb, scope, node, merge_block, const_bool_false);
+
ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block);
IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false);
@@ -6904,32 +6923,28 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
cases[0].value = ir_build_const_u8(irb, scope, node, 0);
cases[0].block = resume_block;
cases[1].value = ir_build_const_u8(irb, scope, node, 1);
- cases[1].block = cancel_target_block;
+ cases[1].block = cleanup_block;
ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block,
2, cases, const_bool_false, nullptr);
- ir_set_cursor_at_end_and_append_block(irb, cancel_target_block);
- IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr);
- ir_gen_cancel_target(irb, scope, node, await_handle);
- ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
-
ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
IrInstruction *my_mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, ptr_mask, is_canceled_mask, false);
IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, my_mask_bits, nullptr,
AtomicRmwOp_or, AtomicOrderSeqCst);
IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, ptr_mask, false);
- IrInstruction *have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_await_handle_addr, zero, false);
- ir_build_cond_br(irb, scope, node, have_my_await_handle, do_cancel_block, do_defers_block, const_bool_false);
+ IrInstruction *dont_have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, my_await_handle_addr, zero, false);
+ IrInstruction *dont_destroy_ourselves = ir_build_bin_op(irb, scope, node, IrBinOpBoolAnd, dont_have_my_await_handle, is_canceled_bool, false);
+ ir_build_cond_br(irb, scope, node, dont_have_my_await_handle, do_defers_block, do_cancel_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
IrInstruction *my_await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, my_await_handle_addr);
- ir_gen_cancel_target(irb, scope, node, my_await_handle);
+ ir_gen_cancel_target(irb, scope, node, my_await_handle, true, false);
ir_mark_gen(ir_build_br(irb, scope, node, do_defers_block, const_bool_false));
ir_set_cursor_at_end_and_append_block(irb, do_defers_block);
ir_gen_defers_for_block(irb, scope, outer_scope, true);
- ir_mark_gen(ir_build_br(irb, scope, node, irb->exec->coro_final_cleanup_block, const_bool_false));
+ ir_mark_gen(ir_build_cond_br(irb, scope, node, dont_destroy_ourselves, irb->exec->coro_early_final, irb->exec->coro_final_cleanup_block, const_bool_false));
ir_set_cursor_at_end_and_append_block(irb, resume_block);
ir_build_br(irb, scope, node, merge_block, const_bool_false);
@@ -7004,7 +7019,7 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
ir_set_cursor_at_end_and_append_block(irb, cancel_awaiter_block);
IrInstruction *await_handle = ir_build_int_to_ptr(irb, parent_scope, node, promise_type_val, await_handle_addr);
- ir_gen_cancel_target(irb, parent_scope, node, await_handle);
+ ir_gen_cancel_target(irb, parent_scope, node, await_handle, true, false);
ir_build_br(irb, parent_scope, node, cleanup_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
diff --git a/std/debug/index.zig b/std/debug/index.zig
index 3070e0b40b..ab50d79db3 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -27,7 +27,7 @@ pub fn warn(comptime fmt: []const u8, args: ...) void {
const stderr = getStderrStream() catch return;
stderr.print(fmt, args) catch return;
}
-fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
+pub fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
if (stderr_stream) |st| {
return st;
} else {
@@ -172,6 +172,16 @@ pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var,
}
}
+pub inline fn getReturnAddress(frame_count: usize) usize {
+ var fp = @ptrToInt(@frameAddress());
+ var i: usize = 0;
+ while (fp != 0 and i < frame_count) {
+ fp = @intToPtr(*const usize, fp).*;
+ i += 1;
+ }
+ return @intToPtr(*const usize, fp + @sizeOf(usize)).*;
+}
+
pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool, start_addr: ?usize) !void {
const AddressState = union(enum) {
NotLookingForStartAddress,
@@ -205,7 +215,7 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_
}
}
-fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: usize, tty_color: bool) !void {
+pub fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: usize, tty_color: bool) !void {
switch (builtin.os) {
builtin.Os.windows => return error.UnsupportedDebugInfo,
builtin.Os.macosx => {
From 6fd6bc94f57b815774f9717d52bacbf304a496be Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 30 Jul 2018 12:22:54 -0400
Subject: [PATCH 33/37] await sets suspend bit; return clears suspend bit
---
src/ir.cpp | 65 ++++++++++++++++++++++++++++++++++++++++++++----------
1 file changed, 53 insertions(+), 12 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index e91fa14170..699baa152e 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6744,13 +6744,9 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
return ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
}
-static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypeResume);
-
- IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, scope);
- if (target_inst == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
+static IrInstruction *ir_gen_resume_target(IrBuilder *irb, Scope *scope, AstNode *node,
+ IrInstruction *target_inst)
+{
IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "ResumeDone");
IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "IsSuspended");
@@ -6797,6 +6793,16 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node)
return ir_build_const_void(irb, scope, node);
}
+static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
+ assert(node->type == NodeTypeResume);
+
+ IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, scope);
+ if (target_inst == irb->codegen->invalid_instruction)
+ return irb->codegen->invalid_instruction;
+
+ return ir_gen_resume_target(irb, scope, node, target_inst);
+}
+
static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeAwaitExpr);
@@ -6847,6 +6853,10 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
IrBasicBlock *cancel_target_block = ir_create_basic_block(irb, scope, "CancelTarget");
IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
IrBasicBlock *do_defers_block = ir_create_basic_block(irb, scope, "DoDefers");
+ IrBasicBlock *destroy_block = ir_create_basic_block(irb, scope, "DestroyBlock");
+ IrBasicBlock *my_suspended_block = ir_create_basic_block(irb, scope, "AlreadySuspended");
+ IrBasicBlock *my_not_suspended_block = ir_create_basic_block(irb, scope, "NotAlreadySuspended");
+ IrBasicBlock *do_suspend_block = ir_create_basic_block(irb, scope, "DoSuspend");
Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
@@ -6861,6 +6871,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
VariableTableEntry *result_var = ir_create_var(irb, node, scope, nullptr,
false, false, true, const_bool_false);
@@ -6917,22 +6928,42 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block);
+ IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+ IrInstruction *my_is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *my_is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_suspended_value, zero, false);
+ ir_build_cond_br(irb, scope, node, my_is_suspended_bool, my_suspended_block, my_not_suspended_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, my_suspended_block);
+ ir_build_unreachable(irb, scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, my_not_suspended_block);
+ IrInstruction *my_is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *my_is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, my_is_canceled_bool, cleanup_block, do_suspend_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, do_suspend_block);
IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false);
IrInstructionSwitchBrCase *cases = allocate(2);
cases[0].value = ir_build_const_u8(irb, scope, node, 0);
cases[0].block = resume_block;
cases[1].value = ir_build_const_u8(irb, scope, node, 1);
- cases[1].block = cleanup_block;
+ cases[1].block = destroy_block;
ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block,
2, cases, const_bool_false, nullptr);
+ ir_set_cursor_at_end_and_append_block(irb, destroy_block);
+ ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
+ ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
+
ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
IrInstruction *my_mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, ptr_mask, is_canceled_mask, false);
- IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ IrInstruction *b_my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, my_mask_bits, nullptr,
AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, ptr_mask, false);
+ IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, b_my_prev_atomic_value, ptr_mask, false);
IrInstruction *dont_have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, my_await_handle_addr, zero, false);
IrInstruction *dont_destroy_ourselves = ir_build_bin_op(irb, scope, node, IrBinOpBoolAnd, dont_have_my_await_handle, is_canceled_bool, false);
ir_build_cond_br(irb, scope, node, dont_have_my_await_handle, do_defers_block, do_cancel_block, const_bool_false);
@@ -6996,6 +7027,7 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
IrBasicBlock *cancel_awaiter_block = ir_create_basic_block(irb, parent_scope, "CancelAwaiter");
IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_promise);
+ IrInstruction *const_bool_true = ir_build_const_bool(irb, parent_scope, node, true);
IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
IrInstruction *is_canceled_mask = ir_build_const_usize(irb, parent_scope, node, 0x1); // 0b001
@@ -7015,11 +7047,13 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
ir_set_cursor_at_end_and_append_block(irb, canceled_block);
IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
IrInstruction *have_await_handle = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
+ IrBasicBlock *post_canceled_block = irb->current_basic_block;
ir_build_cond_br(irb, parent_scope, node, have_await_handle, cancel_awaiter_block, cleanup_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, cancel_awaiter_block);
IrInstruction *await_handle = ir_build_int_to_ptr(irb, parent_scope, node, promise_type_val, await_handle_addr);
ir_gen_cancel_target(irb, parent_scope, node, await_handle, true, false);
+ IrBasicBlock *post_cancel_awaiter_block = irb->current_basic_block;
ir_build_br(irb, parent_scope, node, cleanup_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
@@ -7064,8 +7098,15 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
2, cases, const_bool_false, nullptr));
ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
+ IrBasicBlock **incoming_blocks = allocate(2);
+ IrInstruction **incoming_values = allocate(2);
+ incoming_blocks[0] = post_canceled_block;
+ incoming_values[0] = const_bool_true;
+ incoming_blocks[1] = post_cancel_awaiter_block;
+ incoming_values[1] = const_bool_false;
+ IrInstruction *destroy_ourselves = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values);
ir_gen_defers_for_block(irb, parent_scope, outer_scope, true);
- ir_mark_gen(ir_build_br(irb, parent_scope, node, irb->exec->coro_final_cleanup_block, const_bool_false));
+ ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, destroy_ourselves, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, const_bool_false));
ir_set_cursor_at_end_and_append_block(irb, resume_block);
return ir_mark_gen(ir_build_const_void(irb, parent_scope, node));
@@ -7450,7 +7491,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, resume_block);
- ir_build_coro_resume(irb, scope, node, awaiter_handle);
+ ir_gen_resume_target(irb, scope, node, awaiter_handle);
ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false);
}
From c91c781952bd9750c029a48d0dd7a5a24ec089cf Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 30 Jul 2018 12:49:53 -0400
Subject: [PATCH 34/37] add behavior tests for cancel semantics
---
test/behavior.zig | 1 +
test/cases/cancel.zig | 92 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 93 insertions(+)
create mode 100644 test/cases/cancel.zig
diff --git a/test/behavior.zig b/test/behavior.zig
index 21b1c597e1..b03336eb71 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -16,6 +16,7 @@ comptime {
_ = @import("cases/bugs/828.zig");
_ = @import("cases/bugs/920.zig");
_ = @import("cases/byval_arg_var.zig");
+ _ = @import("cases/cancel.zig");
_ = @import("cases/cast.zig");
_ = @import("cases/const_slice_child.zig");
_ = @import("cases/coroutine_await_struct.zig");
diff --git a/test/cases/cancel.zig b/test/cases/cancel.zig
new file mode 100644
index 0000000000..edf11d687d
--- /dev/null
+++ b/test/cases/cancel.zig
@@ -0,0 +1,92 @@
+const std = @import("std");
+
+var defer_f1: bool = false;
+var defer_f2: bool = false;
+var defer_f3: bool = false;
+
+test "cancel forwards" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const p = async<&da.allocator> f1() catch unreachable;
+ cancel p;
+ std.debug.assert(defer_f1);
+ std.debug.assert(defer_f2);
+ std.debug.assert(defer_f3);
+}
+
+async fn f1() void {
+ defer {
+ defer_f1 = true;
+ }
+ await (async f2() catch unreachable);
+}
+
+async fn f2() void {
+ defer {
+ defer_f2 = true;
+ }
+ await (async f3() catch unreachable);
+}
+
+async fn f3() void {
+ defer {
+ defer_f3 = true;
+ }
+ suspend;
+}
+
+var defer_b1: bool = false;
+var defer_b2: bool = false;
+var defer_b3: bool = false;
+var defer_b4: bool = false;
+
+test "cancel backwards" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const p = async<&da.allocator> b1() catch unreachable;
+ cancel p;
+ std.debug.assert(defer_b1);
+ std.debug.assert(defer_b2);
+ std.debug.assert(defer_b3);
+ std.debug.assert(defer_b4);
+}
+
+async fn b1() void {
+ defer {
+ defer_b1 = true;
+ }
+ await (async b2() catch unreachable);
+}
+
+var b4_handle: promise = undefined;
+
+async fn b2() void {
+ const b3_handle = async b3() catch unreachable;
+ resume b4_handle;
+ cancel b4_handle;
+ defer {
+ defer_b2 = true;
+ }
+ const value = await b3_handle;
+ @panic("unreachable");
+}
+
+async fn b3() i32 {
+ defer {
+ defer_b3 = true;
+ }
+ await (async b4() catch unreachable);
+ return 1234;
+}
+
+async fn b4() void {
+ defer {
+ defer_b4 = true;
+ }
+ suspend |p| {
+ b4_handle = p;
+ }
+ suspend;
+}
From cfe03c764de0d2edfbad74a71d7c18f0fd68b506 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 30 Jul 2018 13:07:04 -0400
Subject: [PATCH 35/37] fix docs for break from suspend
---
doc/langref.html.in | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index d91fb6e8fb..0499c632e2 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -4665,24 +4665,24 @@ async fn testSuspendBlock() void {
block, while the old thread continued executing the suspend block.
- However, if you use labeled break on the suspend block, the coroutine
+ However, the coroutine can be directly resumed from the suspend block, in which case it
never returns to its resumer and continues executing.
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
-test "break from suspend" {
+test "resume from suspend" {
var buf: [500]u8 = undefined;
var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
var my_result: i32 = 1;
- const p = try async testBreakFromSuspend(&my_result);
+ const p = try async testResumeFromSuspend(&my_result);
cancel p;
std.debug.assert(my_result == 2);
}
-async fn testBreakFromSuspend(my_result: *i32) void {
- s: suspend |p| {
- break :s;
+async fn testResumeFromSuspend(my_result: *i32) void {
+ suspend |p| {
+ resume p;
}
my_result.* += 1;
suspend;
From 0db33e9c86ff43d3fe7f7f8fb2e29333c2fad2af Mon Sep 17 00:00:00 2001
From: "Matthew D. Steele"
Date: Mon, 30 Jul 2018 22:27:07 -0400
Subject: [PATCH 36/37] Add "Comments" section to language reference (#1309)
The contents of this section come from the discussion on issue #1305.
---
doc/langref.html.in | 52 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 0499c632e2..7fde550338 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -134,6 +134,58 @@ pub fn main() void {
{#see_also|Values|@import|Errors|Root Source File#}
{#header_close#}
+ {#header_open|Comments#}
+ {#code_begin|test|comments#}
+const assert = @import("std").debug.assert;
+
+test "comments" {
+ // Comments in Zig start with "//" and end at the next LF byte (end of line).
+ // The below line is a comment, and won't be executed.
+
+ //assert(false);
+
+ const x = true; // another comment
+ assert(x);
+}
+ {#code_end#}
+
+ There are no multiline comments in Zig (e.g. like /* */
+ comments in C). This helps allow Zig to have the property that each line
+ of code can be tokenized out of context.
+
+ {#header_open|Doc comments#}
+
+ A doc comment is one that begins with exactly three slashes (i.e.
+ /// but not ////);
+ multiple doc comments in a row are merged together to form a multiline
+ doc comment. The doc comment documents whatever immediately follows it.
+
+ {#code_begin|syntax|doc_comments#}
+/// A structure for storing a timestamp, with nanosecond precision (this is a
+/// multiline doc comment).
+const Timestamp = struct {
+ /// The number of seconds since the epoch (this is also a doc comment).
+ seconds: i64, // signed so we can represent pre-1970 (not a doc comment)
+ /// The number of nanoseconds past the second (doc comment again).
+ nanos: u32,
+
+ /// Returns a `Timestamp` struct representing the Unix epoch; that is, the
+ /// moment of 1970 Jan 1 00:00:00 UTC (this is a doc comment too).
+ pub fn unixEpoch() Timestamp {
+ return Timestamp{
+ .seconds = 0,
+ .nanos = 0,
+ };
+ }
+};
+ {#code_end#}
+
+ Doc comments are only allowed in certain places; eventually, it will
+ become a compile error have a doc comment in an unexpected place, such as
+ in the middle of an expression, or just before a non-doc comment.
+
+ {#header_close#}
+ {#header_close#}
{#header_open|Values#}
{#code_begin|exe|values#}
const std = @import("std");
From 058bfb254c4c0e1cfb254791f771c88c74f299e8 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 31 Jul 2018 11:34:42 -0400
Subject: [PATCH 37/37] std.fmt.format: add '*' for formatting things as
pointers
closes #1285
---
std/fmt/index.zig | 34 ++++++++++++++++++++++++++++++++++
1 file changed, 34 insertions(+)
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index 2188cc5803..8daec50f17 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -18,6 +18,7 @@ pub fn format(context: var, comptime Errors: type, output: fn (@typeOf(context),
OpenBrace,
CloseBrace,
FormatString,
+ Pointer,
};
comptime var start_index = 0;
@@ -54,6 +55,7 @@ pub fn format(context: var, comptime Errors: type, output: fn (@typeOf(context),
state = State.Start;
start_index = i + 1;
},
+ '*' => state = State.Pointer,
else => {
state = State.FormatString;
},
@@ -75,6 +77,17 @@ pub fn format(context: var, comptime Errors: type, output: fn (@typeOf(context),
},
else => {},
},
+ State.Pointer => switch (c) {
+ '}' => {
+ try output(context, @typeName(@typeOf(args[next_arg]).Child));
+ try output(context, "@");
+ try formatInt(@ptrToInt(args[next_arg]), 16, false, 0, context, Errors, output);
+ next_arg += 1;
+ state = State.Start;
+ start_index = i + 1;
+ },
+ else => @compileError("Unexpected format character after '*'"),
+ },
}
}
comptime {
@@ -861,6 +874,27 @@ test "fmt.format" {
const value: u8 = 'a';
try testFmt("u8: a\n", "u8: {c}\n", value);
}
+ {
+ const value: [3]u8 = "abc";
+ try testFmt("array: abc\n", "array: {}\n", value);
+ try testFmt("array: abc\n", "array: {}\n", &value);
+
+ var buf: [100]u8 = undefined;
+ try testFmt(
+ try bufPrint(buf[0..], "array: [3]u8@{x}\n", @ptrToInt(&value)),
+ "array: {*}\n",
+ &value,
+ );
+ }
+ {
+ const value: []const u8 = "abc";
+ try testFmt("slice: abc\n", "slice: {}\n", value);
+ }
+ {
+ const value = @intToPtr(*i32, 0xdeadbeef);
+ try testFmt("pointer: i32@deadbeef\n", "pointer: {}\n", value);
+ try testFmt("pointer: i32@deadbeef\n", "pointer: {*}\n", value);
+ }
try testFmt("buf: Test \n", "buf: {s5}\n", "Test");
try testFmt("buf: Test\n Other text", "buf: {s}\n Other text", "Test");
try testFmt("cstr: Test C\n", "cstr: {s}\n", c"Test C");